author | Jan de Mooij <jdemooij@mozilla.com> |
Sat, 15 Sep 2018 00:28:54 +0000 | |
changeset 492375 | 0667570cb938dd973aaef61ccd31f0a74ce57a9b |
parent 492374 | 371ea54455859baa1f9a5dd2d70772e04d6829bd |
child 492376 | 8c24b93ec66fab8af2274076eee397c56ccd389e |
push id | 9984 |
push user | ffxbld-merge |
push date | Mon, 15 Oct 2018 21:07:35 +0000 |
treeherder | mozilla-beta@183d27ea8570 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | tcampbell |
bugs | 1490993 |
milestone | 64.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/jit/mips-shared/Architecture-mips-shared.cpp +++ b/js/src/jit/mips-shared/Architecture-mips-shared.cpp @@ -25,29 +25,33 @@ get_mips_flags() uint32_t flags = HWCAP_MIPS; #if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64) flags |= HWCAP_FPU; flags |= HWCAP_R2; #else # ifdef __linux__ FILE* fp = fopen("/proc/cpuinfo", "r"); - if (!fp) + if (!fp) { return flags; + } char buf[1024]; memset(buf, 0, sizeof(buf)); (void)fread(buf, sizeof(char), sizeof(buf) - 1, fp); fclose(fp); - if (strstr(buf, "FPU")) + if (strstr(buf, "FPU")) { flags |= HWCAP_FPU; - if (strstr(buf, "Loongson")) + } + if (strstr(buf, "Loongson")) { flags |= HWCAP_LOONGSON; - if (strstr(buf, "mips32r2") || strstr(buf, "mips64r2")) + } + if (strstr(buf, "mips32r2") || strstr(buf, "mips64r2")) { flags |= HWCAP_R2; + } # endif #endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64 return flags; } static bool check_fpu() { return mips_private::Flags & HWCAP_FPU; @@ -70,18 +74,19 @@ namespace mips_private { bool isLoongson = check_loongson(); bool hasR2 = check_r2(); } Registers::Code Registers::FromName(const char* name) { for (size_t i = 0; i < Total; i++) { - if (strcmp(GetName(i), name) == 0) + if (strcmp(GetName(i), name) == 0) { return Code(i); + } } return Invalid; } } // namespace ion } // namespace js
--- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp +++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp @@ -118,18 +118,19 @@ AssemblerMIPSShared::reserve(size_t size bool AssemblerMIPSShared::swapBuffer(wasm::Bytes& bytes) { // For now, specialize to the one use case. As long as wasm::Bytes is a // Vector, not a linked-list of chunks, there's not much we can do other // than copy. MOZ_ASSERT(bytes.empty()); - if (!bytes.resize(bytesNeeded())) + if (!bytes.resize(bytesNeeded())) { return false; + } m_buffer.executableCopy(bytes.begin()); return true; } uint32_t AssemblerMIPSShared::actualIndex(uint32_t idx_) const { return idx_; @@ -139,25 +140,27 @@ uint8_t* AssemblerMIPSShared::PatchableJumpAddress(JitCode* code, uint32_t pe_) { return code->raw() + pe_; } void AssemblerMIPSShared::copyJumpRelocationTable(uint8_t* dest) { - if (jumpRelocations_.length()) + if (jumpRelocations_.length()) { memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length()); + } } void AssemblerMIPSShared::copyDataRelocationTable(uint8_t* dest) { - if (dataRelocations_.length()) + if (dataRelocations_.length()) { memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length()); + } } AssemblerMIPSShared::Condition AssemblerMIPSShared::InvertCondition(Condition cond) { switch (cond) { case Equal: return NotEqual; @@ -277,18 +280,19 @@ AssemblerMIPSShared::bytesNeeded() const jumpRelocationTableBytes() + dataRelocationTableBytes(); } // write a blob of binary into the instruction stream BufferOffset AssemblerMIPSShared::writeInst(uint32_t x, uint32_t* dest) { - if (dest == nullptr) + if (dest == nullptr) { return m_buffer.putInt(x); + } WriteInstStatic(x, dest); return BufferOffset(); } void AssemblerMIPSShared::WriteInstStatic(uint32_t x, uint32_t* dest) { @@ -306,25 +310,27 @@ AssemblerMIPSShared::haltingAlign(int al BufferOffset AssemblerMIPSShared::nopAlign(int alignment) { BufferOffset ret; MOZ_ASSERT(m_buffer.isAligned(4)); if (alignment == 8) { if (!m_buffer.isAligned(alignment)) { BufferOffset tmp = as_nop(); - if (!ret.assigned()) + if (!ret.assigned()) { ret = tmp; + } } } else { MOZ_ASSERT((alignment & (alignment - 1)) == 0); while (size() & (alignment - 1)) { BufferOffset tmp = as_nop(); - if (!ret.assigned()) + if (!ret.assigned()) { ret = tmp; + } } } return ret; } BufferOffset AssemblerMIPSShared::as_nop() { @@ -408,18 +414,19 @@ AssemblerMIPSShared::as_b(BOffImm16 off) spew("b %d", off.decode()); BufferOffset bo = writeInst(InstImm(op_beq, zero, zero, off).encode()); return bo; } InstImm AssemblerMIPSShared::getBranchCode(JumpOrCall jumpOrCall) { - if (jumpOrCall == BranchIsCall) + if (jumpOrCall == BranchIsCall) { return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)); + } return InstImm(op_beq, zero, zero, BOffImm16(0)); } InstImm AssemblerMIPSShared::getBranchCode(Register s, Register t, Condition c) { MOZ_ASSERT(c == AssemblerMIPSShared::Equal || c == AssemblerMIPSShared::NotEqual); @@ -1865,18 +1872,19 @@ AssemblerMIPSShared::bind(Label* label, BufferOffset dest = boff.assigned() ? boff : nextOffset(); if (label->used()) { int32_t next; // A used label holds a link to branch that uses it. BufferOffset b(label); do { // Even a 0 offset may be invalid if we're out of memory. - if (oom()) + if (oom()) { return; + } Instruction* inst = editSrc(b); // Second word holds a pointer to the next branch in label's chain. next = inst[1].encode(); bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset()); b = BufferOffset(next); @@ -1953,18 +1961,19 @@ AssemblerMIPSShared::PatchWrite_Imm32(Co // end up being the call instruction. *(raw - 1) = imm.value; } uint8_t* AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count) { Instruction* inst = reinterpret_cast<Instruction*>(inst_); - if (count != nullptr) + if (count != nullptr) { *count += sizeof(Instruction); + } return reinterpret_cast<uint8_t*>(inst->next()); } // Since there are no pools in MIPS implementation, this should be simple. Instruction* Instruction::next() { return this + 1; @@ -2005,20 +2014,21 @@ InstImm AssemblerMIPSShared::invertBranc MOZ_CRASH("Error creating long branch."); case op_cop1: MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift); branch.setBOffImm16(skipOffset); rt = branch.extractRT(); - if (rt & 0x1) + if (rt & 0x1) { branch.setRT((RTField) ((rt & ~0x1) << RTShift)); - else + } else { branch.setRT((RTField) ((rt | 0x1) << RTShift)); + } return branch; default: MOZ_CRASH("Error creating long branch."); } } void AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_)
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h +++ b/js/src/jit/mips-shared/Assembler-mips-shared.h @@ -482,20 +482,22 @@ class BOffImm16 explicit BOffImm16(int offset) : data ((offset - 4) >> 2 & Imm16Mask) { MOZ_ASSERT((offset & 0x3) == 0); MOZ_ASSERT(IsInRange(offset)); } static bool IsInRange(int offset) { - if ((offset - 4) < int(unsigned(INT16_MIN) << 2)) + if ((offset - 4) < int(unsigned(INT16_MIN) << 2)) { return false; - if ((offset - 4) > (INT16_MAX << 2)) + } + if ((offset - 4) > (INT16_MAX << 2)) { return false; + } return true; } static const uint32_t INVALID = 0x00020000; BOffImm16() : data(INVALID) { } bool isInvalid() { @@ -523,20 +525,22 @@ class JOffImm26 explicit JOffImm26(int offset) : data ((offset - 4) >> 2 & Imm26Mask) { MOZ_ASSERT((offset & 0x3) == 0); MOZ_ASSERT(IsInRange(offset)); } static bool IsInRange(int offset) { - if ((offset - 4) < -536870912) + if ((offset - 4) < -536870912) { return false; - if ((offset - 4) > 536870908) + } + if ((offset - 4) > 536870908) { return false; + } return true; } static const uint32_t INVALID = 0x20000000; JOffImm26() : data(INVALID) { } bool isInvalid() { @@ -723,28 +727,30 @@ PatchJump(CodeLocationJump& jump_, CodeL static constexpr int32_t SliceSize = 1024; typedef js::jit::AssemblerBuffer<SliceSize, Instruction> MIPSBuffer; class MIPSBufferWithExecutableCopy : public MIPSBuffer { public: void executableCopy(uint8_t* buffer) { - if (this->oom()) + if (this->oom()) { return; + } for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) { memcpy(buffer, &cur->instructions, cur->length()); buffer += cur->length(); } } bool appendRawCode(const uint8_t* code, size_t numBytes) { - if (this->oom()) + if (this->oom()) { return false; + } while (numBytes > SliceSize) { this->putBytes(SliceSize, code); numBytes -= SliceSize; code += SliceSize; } this->putBytes(numBytes, code); return !this->oom(); } @@ -896,27 +902,29 @@ class AssemblerMIPSShared : public Assem void writeRelocation(BufferOffset src) { jumpRelocations_.writeUnsigned(src.getOffset()); } // As opposed to x86/x64 version, the data relocation has to be executed // before to recover the pointer, and not after. void writeDataRelocation(ImmGCPtr ptr) { if (ptr.value) { - if (gc::IsInsideNursery(ptr.value)) + if (gc::IsInsideNursery(ptr.value)) { embedsNurseryPointers_ = true; + } dataRelocations_.writeUnsigned(nextOffset().getOffset()); } } void assertNoGCThings() const { #ifdef DEBUG MOZ_ASSERT(dataRelocations_.length() == 0); - for (auto& j : jumps_) + for (auto& j : jumps_) { MOZ_ASSERT(j.kind == RelocationKind::HARDCODED); + } #endif } public: bool oom() const; void setPrinter(Sprinter* sp) { #ifdef JS_JITSPEW @@ -943,18 +951,19 @@ class AssemblerMIPSShared : public Assem #ifdef JS_JITSPEW MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) { // Buffer to hold the formatted string. Note that this may contain // '%' characters, so do not pass it directly to printf functions. char buf[200]; int i = VsprintfLiteral(buf, fmt, va); if (i > -1) { - if (printer) + if (printer) { printer->printf("%s\n", buf); + } js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf); } } #endif static const Register getStackPointer() { return StackPointer; } @@ -1282,18 +1291,19 @@ class AssemblerMIPSShared : public Assem static bool HasRoundInstruction(RoundingMode mode) { return false; } protected: InstImm invertBranch(InstImm branch, BOffImm16 skipOffset); void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) { enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind)); - if (kind == RelocationKind::JITCODE) + if (kind == RelocationKind::JITCODE) { writeRelocation(src); + } } void addLongJump(BufferOffset src, BufferOffset dst) { CodeLabel cl; cl.patchAt()->bind(src.getOffset()); cl.target()->bind(dst.getOffset()); cl.setLinkMode(CodeLabel::JumpImmediate); addCodeLabel(std::move(cl)); @@ -1564,22 +1574,24 @@ class InstGS : public Instruction InstGS(Opcode op, Register rs, FloatRegister rt, Imm8 off, FunctionField ff) : Instruction(op | RS(rs) | RT(rt) | off.encode(6) | ff) { } }; inline bool IsUnaligned(const wasm::MemoryAccessDesc& access) { - if (!access.align()) + if (!access.align()) { return false; + } #ifdef JS_CODEGEN_MIPS32 - if (access.type() == Scalar::Int64 && access.align() >= 4) + if (access.type() == Scalar::Int64 && access.align() >= 4) { return false; + } #endif return access.align() < access.byteSize(); } } // namespace jit } // namespace js
--- a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h +++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h @@ -169,29 +169,31 @@ namespace js { namespace jit { #if defined(JS_CODEGEN_MIPS32) template<> inline int64_t js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval) { AddressGuard guard(addr); int64_t val = *addr; - if (val == oldval) + if (val == oldval) { *addr = newval; + } return val; } template<> inline uint64_t js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval) { AddressGuard guard(addr); uint64_t val = *addr; - if (val == oldval) + if (val == oldval) { *addr = newval; + } return val; } #endif } } template<typename T>
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp +++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp @@ -39,20 +39,22 @@ using JS::ToInt32; CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm) : CodeGeneratorShared(gen, graph, masm) { } Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) { - if (a.isGeneralReg()) + if (a.isGeneralReg()) { return Operand(a.toGeneralReg()->reg()); - if (a.isFloatReg()) + } + if (a.isFloatReg()) { return Operand(a.toFloatReg()->reg()); + } return Operand(masm.getStackPointer(), ToStackOffset(&a)); } Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) { return ToOperand(*a); } @@ -78,20 +80,21 @@ CodeGeneratorMIPSShared::ToOperandOrRegi #endif void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, MBasicBlock* mir, Assembler::DoubleCondition cond) { // Skip past trivial blocks. Label* label = skipTrivialBlocks(mir)->lir()->label(); - if (fmt == Assembler::DoubleFloat) + if (fmt == Assembler::DoubleFloat) { masm.branchDouble(cond, lhs, rhs, label); - else + } else { masm.branchFloat(cond, lhs, rhs, label); + } } FrameSizeClass FrameSizeClass::FromDepth(uint32_t frameDepth) { return FrameSizeClass::None(); } @@ -131,30 +134,32 @@ CodeGenerator::visitCompare(LCompare* co const LAllocation* left = comp->getOperand(0); const LAllocation* right = comp->getOperand(1); const LDefinition* def = comp->getDef(0); #ifdef JS_CODEGEN_MIPS64 if (mir->compareType() == MCompare::Compare_Object || mir->compareType() == MCompare::Compare_Symbol) { - if (right->isGeneralReg()) + if (right->isGeneralReg()) { masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right), ToRegister(def)); - else + } else { masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def)); + } return; } #endif - if (right->isConstant()) + if (right->isConstant()) { masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)), ToRegister(def)); - else if (right->isGeneralReg()) + } else if (right->isGeneralReg()) { masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def)); - else + } else { masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def)); + } } void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) { MCompare* mir = comp->cmpMir(); Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop()); @@ -184,18 +189,19 @@ CodeGenerator::visitCompareAndBranch(LCo emitBranch(ToRegister(comp->left()), ScratchRegister, cond, comp->ifTrue(), comp->ifFalse()); } } bool CodeGeneratorMIPSShared::generateOutOfLineCode() { - if (!CodeGeneratorShared::generateOutOfLineCode()) + if (!CodeGeneratorShared::generateOutOfLineCode()) { return false; + } if (deoptLabel_.used()) { // All non-table-based bailouts will go here. masm.bind(&deoptLabel_); // Push the frame size, so the handler can recover the IonScript. // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk // We have to use 'ra' because generateBailoutTable will implicitly do @@ -207,18 +213,19 @@ CodeGeneratorMIPSShared::generateOutOfLi } return !masm.oom(); } void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) { - if (masm.bailed()) + if (masm.bailed()) { return; + } MOZ_ASSERT_IF(!masm.oom(), label->used()); MOZ_ASSERT_IF(!masm.oom(), !label->bound()); encode(snapshot); // Though the assembler doesn't track all frame pushes, at least make sure // the known value makes sense. We can't use bailout tables if the stack @@ -245,34 +252,36 @@ CodeGeneratorMIPSShared::bailout(LSnapsh void CodeGenerator::visitMinMaxD(LMinMaxD* ins) { FloatRegister first = ToFloatRegister(ins->first()); FloatRegister second = ToFloatRegister(ins->second()); MOZ_ASSERT(first == ToFloatRegister(ins->output())); - if (ins->mir()->isMax()) + if (ins->mir()->isMax()) { masm.maxDouble(second, first, true); - else + } else { masm.minDouble(second, first, true); + } } void CodeGenerator::visitMinMaxF(LMinMaxF* ins) { FloatRegister first = ToFloatRegister(ins->first()); FloatRegister second = ToFloatRegister(ins->second()); MOZ_ASSERT(first == ToFloatRegister(ins->output())); - if (ins->mir()->isMax()) + if (ins->mir()->isMax()) { masm.maxFloat32(second, first, true); - else + } else { masm.minFloat32(second, first, true); + } } void CodeGenerator::visitAbsD(LAbsD* ins) { FloatRegister input = ToFloatRegister(ins->input()); MOZ_ASSERT(input == ToFloatRegister(ins->output())); masm.as_absd(input, input); @@ -308,28 +317,30 @@ CodeGenerator::visitAddI(LAddI* ins) const LAllocation* lhs = ins->getOperand(0); const LAllocation* rhs = ins->getOperand(1); const LDefinition* dest = ins->getDef(0); MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg()); // If there is no snapshot, we don't need to check for overflow if (!ins->snapshot()) { - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); - else + } else { masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); + } return; } Label overflow; - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow); - else + } else { masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow); + } bailoutFrom(&overflow, ins->snapshot()); } void CodeGenerator::visitAddI64(LAddI64* lir) { const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs); @@ -351,28 +362,30 @@ CodeGenerator::visitSubI(LSubI* ins) const LAllocation* lhs = ins->getOperand(0); const LAllocation* rhs = ins->getOperand(1); const LDefinition* dest = ins->getDef(0); MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg()); // If there is no snapshot, we don't need to check for overflow if (!ins->snapshot()) { - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); - else + } else { masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); + } return; } Label overflow; - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow); - else + } else { masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow); + } bailoutFrom(&overflow, ins->snapshot()); } void CodeGenerator::visitSubI64(LSubI64* lir) { const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs); @@ -405,18 +418,19 @@ CodeGenerator::visitMulI(LMulI* ins) // Bailout on -0.0 if (mul->canBeNegativeZero() && constant <= 0) { Assembler::Condition cond = (constant == 0) ? Assembler::LessThan : Assembler::Equal; bailoutCmp32(cond, src, Imm32(0), ins->snapshot()); } switch (constant) { case -1: - if (mul->canOverflow()) + if (mul->canOverflow()) { bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN), ins->snapshot()); + } masm.ma_negu(dest, src); break; case 0: masm.move32(Imm32(0), dest); break; case 1: masm.move32(src, dest); @@ -447,18 +461,19 @@ CodeGenerator::visitMulI(LMulI* ins) // If the constant cannot be encoded as (1<<C1), see if it can // be encoded as (1<<C1) | (1<<C2), which can be computed // using an add and a shift. uint32_t shift_rest = FloorLog2(rest); if (src != dest && (1u << shift_rest) == rest) { masm.ma_sll(dest, src, Imm32(shift - shift_rest)); masm.add32(src, dest); - if (shift_rest != 0) + if (shift_rest != 0) { masm.ma_sll(dest, dest, Imm32(shift_rest)); + } return; } } if (mul->canOverflow() && (constant > 0) && (src != dest)) { // To stay on the safe side, only optimize things that are a // power of 2. @@ -850,64 +865,70 @@ void CodeGenerator::visitBitOpI(LBitOpI* ins) { const LAllocation* lhs = ins->getOperand(0); const LAllocation* rhs = ins->getOperand(1); const LDefinition* dest = ins->getDef(0); // all of these bitops should be either imm32's, or integer registers. switch (ins->bitop()) { case JSOP_BITOR: - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); - else + } else { masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); + } break; case JSOP_BITXOR: - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); - else + } else { masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); + } break; case JSOP_BITAND: - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs))); - else + } else { masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs)); + } break; default: MOZ_CRASH("unexpected binary opcode"); } } void CodeGenerator::visitBitOpI64(LBitOpI64* lir) { const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs); const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs); MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs)); switch (lir->bitop()) { case JSOP_BITOR: - if (IsConstant(rhs)) + if (IsConstant(rhs)) { masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - else + } else { masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs)); + } break; case JSOP_BITXOR: - if (IsConstant(rhs)) + if (IsConstant(rhs)) { masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - else + } else { masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs)); + } break; case JSOP_BITAND: - if (IsConstant(rhs)) + if (IsConstant(rhs)) { masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs)); - else + } else { masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs)); + } break; default: MOZ_CRASH("unexpected binary opcode"); } } void CodeGenerator::visitShiftI(LShiftI* ins) @@ -915,34 +936,37 @@ CodeGenerator::visitShiftI(LShiftI* ins) Register lhs = ToRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); Register dest = ToRegister(ins->output()); if (rhs->isConstant()) { int32_t shift = ToInt32(rhs) & 0x1F; switch (ins->bitop()) { case JSOP_LSH: - if (shift) + if (shift) { masm.ma_sll(dest, lhs, Imm32(shift)); - else + } else { masm.move32(lhs, dest); + } break; case JSOP_RSH: - if (shift) + if (shift) { masm.ma_sra(dest, lhs, Imm32(shift)); - else + } else { masm.move32(lhs, dest); + } break; case JSOP_URSH: if (shift) { masm.ma_srl(dest, lhs, Imm32(shift)); } else { // x >>> 0 can overflow. - if (ins->mir()->toUrsh()->fallible()) + if (ins->mir()->toUrsh()->fallible()) { bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot()); + } masm.move32(lhs, dest); } break; default: MOZ_CRASH("Unexpected shift op"); } } else { // The shift amounts should be AND'ed into the 0-31 range @@ -975,26 +999,29 @@ CodeGenerator::visitShiftI64(LShiftI64* LAllocation* rhs = lir->getOperand(LShiftI64::Rhs); MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs)); if (rhs->isConstant()) { int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F); switch (lir->bitop()) { case JSOP_LSH: - if (shift) + if (shift) { masm.lshift64(Imm32(shift), ToRegister64(lhs)); + } break; case JSOP_RSH: - if (shift) + if (shift) { masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs)); + } break; case JSOP_URSH: - if (shift) + if (shift) { masm.rshift64(Imm32(shift), ToRegister64(lhs)); + } break; default: MOZ_CRASH("Unexpected shift op"); } return; } switch (lir->bitop()) { @@ -1029,25 +1056,27 @@ CodeGenerator::visitRotateI64(LRotateI64 if (count->isConstant()) { int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F); if (!c) { #ifdef JS_CODEGEN_MIPS32 masm.move64(input, output); #endif return; } - if (mir->isLeftRotate()) + if (mir->isLeftRotate()) { masm.rotateLeft64(Imm32(c), input, output, temp); - else + } else { masm.rotateRight64(Imm32(c), input, output, temp); + } } else { - if (mir->isLeftRotate()) + if (mir->isLeftRotate()) { masm.rotateLeft64(ToRegister(count), input, output, temp); - else + } else { masm.rotateRight64(ToRegister(count), input, output, temp); + } } } void CodeGenerator::visitUrshD(LUrshD* ins) { Register lhs = ToRegister(ins->lhs()); Register temp = ToRegister(ins->temp()); @@ -1124,18 +1153,19 @@ CodeGenerator::visitPowHalfD(LPowHalfD* masm.as_sqrtd(output, output); masm.bind(&done); } MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const { - if (a.isGeneralReg()) + if (a.isGeneralReg()) { return MoveOperand(ToRegister(a)); + } if (a.isFloatReg()) { return MoveOperand(ToFloatRegister(a)); } int32_t offset = ToStackOffset(a); MOZ_ASSERT((offset & 3) == 0); return MoveOperand(StackPointer, offset); } @@ -1519,33 +1549,35 @@ CodeGenerator::visitWasmTruncateToInt32( MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32); auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output); addOutOfLineCode(ool, mir); Label* oolEntry = ool->entry(); if (mir->isUnsigned()) { - if (fromType == MIRType::Double) + if (fromType == MIRType::Double) { masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(), oolEntry); - else if (fromType == MIRType::Float32) + } else if (fromType == MIRType::Float32) { masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(), oolEntry); - else + } else { MOZ_CRASH("unexpected type"); + } masm.bind(ool->rejoin()); return; } - if (fromType == MIRType::Double) + if (fromType == MIRType::Double) { masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(), oolEntry); - else if (fromType == MIRType::Float32) + } else if (fromType == MIRType::Float32) { masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(), oolEntry); - else + } else { MOZ_CRASH("unexpected type"); + } masm.bind(ool->rejoin()); } void CodeGeneratorMIPSShared::visitOutOfLineBailout(OutOfLineBailout* ool) { @@ -1732,20 +1764,21 @@ CodeGenerator::visitCompareFAndBranch(LC Assembler::InvertCondition(cond)); jumpToBlock(ifTrue); } } void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) { - if (lir->right()->isConstant()) + if (lir->right()->isConstant()) { masm.ma_and(ScratchRegister, ToRegister(lir->left()), Imm32(ToInt32(lir->right()))); - else + } else { masm.as_and(ScratchRegister, ToRegister(lir->left()), ToRegister(lir->right())); + } emitBranch(ScratchRegister, ScratchRegister, lir->cond(), lir->ifTrue(), lir->ifFalse()); } void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); @@ -1795,18 +1828,19 @@ CodeGenerator::visitMemoryBarrier(LMemor } void CodeGeneratorMIPSShared::generateInvalidateEpilogue() { // Ensure that there is enough space in the buffer for the OsiPoint // patching to occur. Otherwise, we could overwrite the invalidation // epilogue. - for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) + for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) { masm.nop(); + } masm.bind(&invalidate_); // Push the return address of the point that we bailed out at to the stack masm.Push(ra); // Push the Ion script onto the stack (when we determine what that // pointer is). @@ -1868,18 +1902,19 @@ CodeGeneratorMIPSShared::visitOutOfLineT void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base) { Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); // Lower value with low value - if (mir->low() != 0) + if (mir->low() != 0) { masm.subPtr(Imm32(mir->low()), index); + } // Jump to default case if input is out of range int32_t cases = mir->numCases(); masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase); // To fill in the CodeLabels for the case entries, we need to first // generate the case entries (we don't yet know their offsets in the // instruction stream). @@ -2029,32 +2064,34 @@ CodeGenerator::visitAsmJSLoadHeap(LAsmJS return; } Label done, outOfRange; masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptrReg, ToRegister(boundsCheckLimit), &outOfRange); // Offset is ok, let's load value. if (isFloat) { - if (size == 32) + if (size == 32) { masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out)); - else + } else { masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out)); + } } else { masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne), static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend); } masm.ma_b(&done, ShortJump); masm.bind(&outOfRange); // Offset is out of range. Load default values. if (isFloat) { - if (size == 32) + if (size == 32) { masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out)); - else + } else { masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out)); + } } else { masm.move32(Imm32(0), ToRegister(out)); } masm.bind(&done); } void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) @@ -2082,38 +2119,40 @@ CodeGenerator::visitAsmJSStoreHeap(LAsmJ if (ptr->isConstant()) { MOZ_ASSERT(!mir->needsBoundsCheck()); int32_t ptrImm = ptr->toConstant()->toInt32(); MOZ_ASSERT(ptrImm >= 0); if (isFloat) { FloatRegister freg = ToFloatRegister(value); Address addr(HeapReg, ptrImm); - if (size == 32) + if (size == 32) { masm.storeFloat32(freg, addr); - else + } else { masm.storeDouble(freg, addr); + } } else { masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm), static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend); } return; } Register ptrReg = ToRegister(ptr); Address dstAddr(ptrReg, 0); if (!mir->needsBoundsCheck()) { if (isFloat) { FloatRegister freg = ToFloatRegister(value); BaseIndex bi(HeapReg, ptrReg, TimesOne); - if (size == 32) + if (size == 32) { masm.storeFloat32(freg, bi); - else + } else { masm.storeDouble(freg, bi); + } } else { masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne), static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend); } return; } Label outOfRange; @@ -2223,20 +2262,21 @@ CodeGenerator::visitWasmStackArg(LWasmSt } } void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) { const MWasmStackArg* mir = ins->mir(); Address dst(StackPointer, mir->spOffset()); - if (IsConstant(ins->arg())) + if (IsConstant(ins->arg())) { masm.store64(Imm64(ToInt64(ins->arg())), dst); - else + } else { masm.store64(ToRegister64(ins->arg()), dst); + } } void CodeGenerator::visitWasmSelect(LWasmSelect* ins) { MIRType mirType = ins->mir()->type(); Register cond = ToRegister(ins->condExpr()); @@ -2248,32 +2288,34 @@ CodeGenerator::visitWasmSelect(LWasmSele masm.as_movz(out, ToRegister(falseExpr), cond); return; } FloatRegister out = ToFloatRegister(ins->output()); MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out, "true expr input is reused for output"); if (falseExpr->isFloatReg()) { - if (mirType == MIRType::Float32) + if (mirType == MIRType::Float32) { masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr), cond); - else if (mirType == MIRType::Double) + } else if (mirType == MIRType::Double) { masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr), cond); - else + } else { MOZ_CRASH("unhandled type in visitWasmSelect!"); + } } else { Label done; masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump); - if (mirType == MIRType::Float32) + if (mirType == MIRType::Float32) { masm.loadFloat32(ToAddress(falseExpr), out); - else if (mirType == MIRType::Double) + } else if (mirType == MIRType::Double) { masm.loadDouble(ToAddress(falseExpr), out); - else + } else { MOZ_CRASH("unhandled type in visitWasmSelect!"); + } masm.bind(&done); } } void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) { @@ -2329,24 +2371,26 @@ CodeGenerator::visitUDivOrMod(LUDivOrMod } } masm.as_divu(lhs, rhs); masm.as_mfhi(output); // If the remainder is > 0, bailout since this must be a double. if (ins->mir()->isDiv()) { - if (!ins->mir()->toDiv()->canTruncateRemainder()) + if (!ins->mir()->toDiv()->canTruncateRemainder()) { bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot()); + } // Get quotient masm.as_mflo(output); } - if (!ins->mir()->isTruncated()) + if (!ins->mir()->isTruncated()) { bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot()); + } masm.bind(&done); } void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) { const MEffectiveAddress* mir = ins->mir();
--- a/js/src/jit/mips-shared/LIR-mips-shared.h +++ b/js/src/jit/mips-shared/LIR-mips-shared.h @@ -239,31 +239,34 @@ class LUDivOrMod : public LBinaryMath<0> {} MBinaryArithInstruction* mir() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); return static_cast<MBinaryArithInstruction*>(mir_); } bool canBeDivideByZero() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeDivideByZero(); + } return mir_->toDiv()->canBeDivideByZero(); } bool trapOnError() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->trapOnError(); + } return mir_->toDiv()->trapOnError(); } wasm::BytecodeOffset bytecodeOffset() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->bytecodeOffset(); + } return mir_->toDiv()->bytecodeOffset(); } }; namespace details { // Base class for the int64 and non-int64 variants. template<size_t NumDefs>
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp +++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp @@ -80,64 +80,70 @@ LIRGeneratorMIPSShared::lowerForMulInt64 #ifdef JS_CODEGEN_MIPS32 needsTemp = true; cannotAliasRhs = true; if (rhs->isConstant()) { int64_t constant = rhs->toConstant()->toInt64(); int32_t shift = mozilla::FloorLog2(constant); // See special cases in CodeGeneratorMIPSShared::visitMulI64 - if (constant >= -1 && constant <= 2) + if (constant >= -1 && constant <= 2) { needsTemp = false; - if (int64_t(1) << shift == constant) + } + if (int64_t(1) << shift == constant) { needsTemp = false; + } if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1)) || mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant - 1))) reuseInput = false; } #endif ins->setInt64Operand(0, useInt64RegisterAtStart(lhs)); ins->setInt64Operand(INT64_PIECES, (lhs != rhs || cannotAliasRhs) ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs)); - if (needsTemp) + if (needsTemp) { ins->setTemp(0, temp()); - if(reuseInput) + } + if (reuseInput) { defineInt64ReuseInput(ins, mir, 0); - else + } else { defineInt64(ins, mir); + } } template<size_t Temps> void LIRGeneratorMIPSShared::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins, MDefinition* mir, MDefinition* lhs, MDefinition* rhs) { #ifdef JS_CODEGEN_MIPS32 if (mir->isRotate()) { - if (!rhs->isConstant()) + if (!rhs->isConstant()) { ins->setTemp(0, temp()); + } ins->setInt64Operand(0, useInt64Register(lhs)); } else { ins->setInt64Operand(0, useInt64RegisterAtStart(lhs)); } #else ins->setInt64Operand(0, useInt64RegisterAtStart(lhs)); #endif static_assert(LShiftI64::Rhs == INT64_PIECES, "Assume Rhs is located at INT64_PIECES."); static_assert(LRotateI64::Count == INT64_PIECES, "Assume Count is located at INT64_PIECES."); ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs)); #ifdef JS_CODEGEN_MIPS32 - if (mir->isRotate()) + if (mir->isRotate()) { defineInt64(ins, mir); - else + } else { defineInt64ReuseInput(ins, mir, 0); + } #else defineInt64ReuseInput(ins, mir, 0); #endif } template void LIRGeneratorMIPSShared::lowerForShiftInt64( LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir, MDefinition* lhs, MDefinition* rhs); @@ -201,35 +207,38 @@ LIRGeneratorMIPSShared::lowerDivI(MDiv* // Check for division by a positive power of two, which is an easy and // important case to optimize. Note that other optimizations are also // possible; division by negative powers of two can be optimized in a // similar manner as positive powers of two, and division by other // constants can be optimized by a reciprocal multiplication technique. int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LDivPowTwoI* lir = new(alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp()); - if (div->fallible()) + if (div->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, div); return; } } LDivI* lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp()); - if (div->fallible()) + if (div->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, div); } void LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs) { LMulI* lir = new(alloc()) LMulI; - if (mul->fallible()) + if (mul->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } lowerForALU(lir, mul, lhs, rhs); } void LIRGeneratorMIPSShared::lowerModI(MMod* mod) { if (mod->isUnsigned()) { @@ -237,36 +246,39 @@ LIRGeneratorMIPSShared::lowerModI(MMod* return; } if (mod->rhs()->isConstant()) { int32_t rhs = mod->rhs()->toConstant()->toInt32(); int32_t shift = FloorLog2(rhs); if (rhs > 0 && 1 << shift == rhs) { LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift); - if (mod->fallible()) + if (mod->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, mod); return; } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) { LModMaskI* lir = new(alloc()) LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL), temp(LDefinition::GENERAL), shift + 1); - if (mod->fallible()) + if (mod->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, mod); return; } } LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), temp(LDefinition::GENERAL)); - if (mod->fallible()) + if (mod->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, mod); } void LIRGenerator::visitPowHalf(MPowHalf* ins) { MDefinition* input = ins->input(); MOZ_ASSERT(input->type() == MIRType::Double); @@ -317,60 +329,67 @@ LIRGenerator::visitWasmNeg(MWasmNeg* ins void LIRGenerator::visitWasmLoad(MWasmLoad* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); LAllocation ptr; #ifdef JS_CODEGEN_MIPS32 - if (ins->type() == MIRType::Int64) + if (ins->type() == MIRType::Int64) { ptr = useRegister(base); - else + } else { + ptr = useRegisterAtStart(base); + } +#else + ptr = useRegisterAtStart(base); #endif - ptr = useRegisterAtStart(base); if (IsUnaligned(ins->access())) { if (ins->type() == MIRType::Int64) { auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp()); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } defineInt64(lir, ins); return; } auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, temp()); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } define(lir, ins); return; } if (ins->type() == MIRType::Int64) { #ifdef JS_CODEGEN_MIPS32 if(ins->access().isAtomic()) { auto* lir = new(alloc()) LWasmAtomicLoadI64(ptr); defineInt64(lir, ins); return; } #endif auto* lir = new(alloc()) LWasmLoadI64(ptr); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } defineInt64(lir, ins); return; } auto* lir = new(alloc()) LWasmLoad(ptr); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } define(lir, ins); } void LIRGenerator::visitWasmStore(MWasmStore* ins) { MDefinition* base = ins->base(); @@ -378,27 +397,29 @@ LIRGenerator::visitWasmStore(MWasmStore* MDefinition* value = ins->value(); if (IsUnaligned(ins->access())) { LAllocation baseAlloc = useRegisterAtStart(base); if (ins->access().type() == Scalar::Int64) { LInt64Allocation valueAlloc = useInt64RegisterAtStart(value); auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp()); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } add(lir, ins); return; } LAllocation valueAlloc = useRegisterAtStart(value); auto* lir = new(alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp()); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } add(lir, ins); return; } if (ins->access().type() == Scalar::Int64) { #ifdef JS_CODEGEN_MIPS32 @@ -407,28 +428,30 @@ LIRGenerator::visitWasmStore(MWasmStore* add(lir, ins); return; } #endif LAllocation baseAlloc = useRegisterAtStart(base); LInt64Allocation valueAlloc = useInt64RegisterAtStart(value); auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } add(lir, ins); return; } LAllocation baseAlloc = useRegisterAtStart(base); LAllocation valueAlloc = useRegisterAtStart(value); auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc); - if (ins->access().offset()) + if (ins->access().offset()) { lir->setTemp(0, tempCopy(base, 0)); + } add(lir, ins); } void LIRGenerator::visitWasmSelect(MWasmSelect* ins) { if (ins->type() == MIRType::Int64) { @@ -451,33 +474,35 @@ void LIRGeneratorMIPSShared::lowerUDiv(MDiv* div) { MDefinition* lhs = div->getOperand(0); MDefinition* rhs = div->getOperand(1); LUDivOrMod* lir = new(alloc()) LUDivOrMod; lir->setOperand(0, useRegister(lhs)); lir->setOperand(1, useRegister(rhs)); - if (div->fallible()) + if (div->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, div); } void LIRGeneratorMIPSShared::lowerUMod(MMod* mod) { MDefinition* lhs = mod->getOperand(0); MDefinition* rhs = mod->getOperand(1); LUDivOrMod* lir = new(alloc()) LUDivOrMod; lir->setOperand(0, useRegister(lhs)); lir->setOperand(1, useRegister(rhs)); - if (mod->fallible()) + if (mod->fallible()) { assignSnapshot(lir, Bailout_DoubleOutput); + } define(lir, mod); } void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) { MOZ_ASSERT(ins->input()->type() == MIRType::Int32); @@ -575,18 +600,19 @@ LIRGenerator::visitCompareExchangeTypedA const LAllocation newval = useRegister(ins->newval()); const LAllocation oldval = useRegister(ins->oldval()); LDefinition outTemp = LDefinition::BogusTemp(); LDefinition valueTemp = LDefinition::BogusTemp(); LDefinition offsetTemp = LDefinition::BogusTemp(); LDefinition maskTemp = LDefinition::BogusTemp(); - if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) + if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) { outTemp = temp(); + } if (Scalar::byteSize(ins->arrayType()) < 4) { valueTemp = temp(); offsetTemp = temp(); maskTemp = temp(); } LCompareExchangeTypedArrayElement* lir = @@ -770,18 +796,19 @@ LIRGenerator::visitAtomicTypedArrayEleme return; } // For a Uint32Array with a known double result we need a temp for // the intermediate output. LDefinition outTemp = LDefinition::BogusTemp(); - if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) + if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) { outTemp = temp(); + } LAtomicTypedArrayElementBinop* lir = new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp); define(lir, ins); } void @@ -790,20 +817,21 @@ LIRGenerator::visitCopySign(MCopySign* i MDefinition* lhs = ins->lhs(); MDefinition* rhs = ins->rhs(); MOZ_ASSERT(IsFloatingPointType(lhs->type())); MOZ_ASSERT(lhs->type() == rhs->type()); MOZ_ASSERT(lhs->type() == ins->type()); LInstructionHelper<1, 2, 2>* lir; - if (lhs->type() == MIRType::Double) + if (lhs->type() == MIRType::Double) { lir = new(alloc()) LCopySignD(); - else + } else { lir = new(alloc()) LCopySignF(); + } lir->setTemp(0, temp()); lir->setTemp(1, temp()); lir->setOperand(0, useRegisterAtStart(lhs)); lir->setOperand(1, useRegister(rhs)); defineReuseInput(lir, ins, 0); }
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h +++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h @@ -226,30 +226,32 @@ MacroAssembler::mulDoublePtr(ImmPtr imm, movePtr(imm, ScratchRegister); loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg); mulDouble(ScratchDoubleReg, dest); } void MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned) { - if (isUnsigned) + if (isUnsigned) { as_divu(srcDest, rhs); - else + } else { as_div(srcDest, rhs); + } as_mflo(srcDest); } void MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned) { - if (isUnsigned) + if (isUnsigned) { as_divu(srcDest, rhs); - else + } else { as_div(srcDest, rhs); + } as_mfhi(srcDest); } void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) { as_divs(dest, dest, src); } @@ -383,33 +385,35 @@ MacroAssembler::rshift32Arithmetic(Imm32 ma_sra(dest, dest, imm); } // =============================================================== // Rotation functions void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) { - if (count.value) + if (count.value) { ma_rol(dest, input, count); - else + } else { ma_move(dest, input); + } } void MacroAssembler::rotateLeft(Register count, Register input, Register dest) { ma_rol(dest, input, count); } void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) { - if (count.value) + if (count.value) { ma_ror(dest, input, count); - else + } else { ma_move(dest, input); + } } void MacroAssembler::rotateRight(Register count, Register input, Register dest) { ma_ror(dest, input, count); } // =============================================================== @@ -1079,18 +1083,19 @@ void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr) { ma_ss(src, addr); } void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) { - if (barrier) + if (barrier) { as_sync(); + } } // =============================================================== // Clamping functions. void MacroAssembler::clampIntToUint8(Register reg) {
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp +++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp @@ -559,29 +559,31 @@ MacroAssemblerMIPSShared::ma_load_unalig base = ScratchRegister; lowOffset = Imm16(0).encode(); hiOffset = Imm16(size / 8 - 1).encode(); } BufferOffset load; switch (size) { case SizeHalfWord: - if (extension != ZeroExtend) + if (extension != ZeroExtend) { load = as_lbu(temp, base, hiOffset); - else + } else { load = as_lb(temp, base, hiOffset); + } as_lbu(dest, base, lowOffset); ma_ins(dest, temp, 8, 24); break; case SizeWord: load = as_lwl(dest, base, hiOffset); as_lwr(dest, base, lowOffset); #ifdef JS_CODEGEN_MIPS64 - if (extension != ZeroExtend) + if (extension != ZeroExtend) { as_dext(dest, dest, 0, 32); + } #endif break; #ifdef JS_CODEGEN_MIPS64 case SizeDouble: load = as_ldl(dest, base, hiOffset); as_ldr(dest, base, lowOffset); break; #endif @@ -764,22 +766,23 @@ MacroAssemblerMIPSShared::ma_b(Register } } void MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label, Condition c, JumpKind jumpKind) { MOZ_ASSERT(c != Overflow); if (imm.value == 0) { - if (c == Always || c == AboveOrEqual) + if (c == Always || c == AboveOrEqual) { ma_b(label, jumpKind); - else if (c == Below) + } else if (c == Below) { ; // This condition is always false. No branch required. - else + } else { asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind); + } } else { switch (c) { case Equal: case NotEqual: MOZ_ASSERT(lhs != ScratchRegister); ma_li(ScratchRegister, imm); ma_b(lhs, ScratchRegister, label, c, jumpKind); break; @@ -1075,35 +1078,37 @@ void MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs, DoubleCondition c) { FloatTestKind moveCondition; compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition); ma_li(dest, Imm32(1)); - if (moveCondition == TestForTrue) + if (moveCondition == TestForTrue) { as_movf(dest, zero); - else + } else { as_movt(dest, zero); + } } void MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs, DoubleCondition c) { FloatTestKind moveCondition; compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition); ma_li(dest, Imm32(1)); - if (moveCondition == TestForTrue) + if (moveCondition == TestForTrue) { as_movf(dest, zero); - else + } else { as_movt(dest, zero); + } } void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c) { if (imm.value == 0) { switch (c) { case Equal : @@ -1116,24 +1121,26 @@ MacroAssemblerMIPSShared::ma_cmp_set(Reg break; case AboveOrEqual: case Below: as_ori(rd, zero, c == AboveOrEqual ? 1: 0); break; case GreaterThan: case LessThanOrEqual: as_slt(rd, zero, rs); - if (c == LessThanOrEqual) + if (c == LessThanOrEqual) { as_xori(rd, rd, 1); + } break; case LessThan: case GreaterThanOrEqual: as_slt(rd, rs, zero); - if (c == GreaterThanOrEqual) + if (c == GreaterThanOrEqual) { as_xori(rd, rd, 1); + } break; case Zero: as_sltiu(rd, rs, 1); break; case NonZero: as_sltu(rd, zero, rs); break; case Signed: @@ -1149,20 +1156,21 @@ MacroAssemblerMIPSShared::ma_cmp_set(Reg return; } switch (c) { case Equal: case NotEqual: MOZ_ASSERT(rs != ScratchRegister); ma_xor(rd, rs, imm); - if (c == Equal) + if (c == Equal) { as_sltiu(rd, rd, 1); - else + } else { as_sltu(rd, zero, rd); + } break; case Zero: case NonZero: case Signed: case NotSigned: MOZ_CRASH("Invalid condition."); default: Condition cond = ma_cmp(rd, rs, imm, c); @@ -1703,18 +1711,19 @@ MacroAssembler::pushFakeReturnAddress(Re addCodeLabel(cl); return retAddr; } void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) { - if (ptr != buffer) + if (ptr != buffer) { movePtr(ptr, buffer); + } orPtr(Imm32(gc::ChunkMask), buffer); loadPtr(Address(buffer, gc::ChunkStoreBufferOffsetFromLastByte), buffer); } void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label) { @@ -1804,22 +1813,23 @@ void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output, MIRType fromType, TruncFlags flags, Label* rejoin, wasm::BytecodeOffset trapOffset) { bool isUnsigned = flags & TRUNC_UNSIGNED; bool isSaturating = flags & TRUNC_SATURATING; - if(isSaturating) { - - if(fromType == MIRType::Double) + if (isSaturating) { + + if (fromType == MIRType::Double) { asMasm().loadConstantDouble(0.0, ScratchDoubleReg); - else + } else { asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg); + } if(isUnsigned) { ma_li(output, Imm32(UINT32_MAX)); FloatTestKind moveCondition; compareFloatingPoint(fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, @@ -1854,20 +1864,21 @@ MacroAssemblerMIPSShared::outOfLineWasmT MOZ_ASSERT(rejoin->bound()); asMasm().jump(rejoin); return; } Label inputIsNaN; - if (fromType == MIRType::Double) + if (fromType == MIRType::Double) { asMasm().branchDouble(Assembler::DoubleUnordered, input, input, &inputIsNaN); - else if (fromType == MIRType::Float32) + } else if (fromType == MIRType::Float32) { asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN); + } asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset); asMasm().bind(&inputIsNaN); asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset); } void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt64Check(FloatRegister input, Register64 output_, @@ -1881,20 +1892,21 @@ MacroAssemblerMIPSShared::outOfLineWasmT if(isSaturating) { #if defined(JS_CODEGEN_MIPS32) // Saturating callouts don't use ool path. return; #else Register output = output_.reg; - if(fromType == MIRType::Double) + if (fromType == MIRType::Double) { asMasm().loadConstantDouble(0.0, ScratchDoubleReg); - else + } else { asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg); + } if(isUnsigned) { asMasm().ma_li(output, ImmWord(UINT64_MAX)); FloatTestKind moveCondition; compareFloatingPoint(fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, @@ -1931,20 +1943,21 @@ MacroAssemblerMIPSShared::outOfLineWasmT asMasm().jump(rejoin); return; #endif } Label inputIsNaN; - if (fromType == MIRType::Double) + if (fromType == MIRType::Double) { asMasm().branchDouble(Assembler::DoubleUnordered, input, input, &inputIsNaN); - else if (fromType == MIRType::Float32) + } else if (fromType == MIRType::Float32) { asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN); + } #if defined(JS_CODEGEN_MIPS32) // Only possible valid input that produces INT64_MIN result. double validInput = isUnsigned ? double(uint64_t(INT64_MIN)) : double(int64_t(INT64_MIN)); if (fromType == MIRType::Double) { asMasm().loadConstantDouble(validInput, ScratchDoubleReg); @@ -2038,34 +2051,36 @@ MacroAssemblerMIPSShared::wasmLoadImpl(c case Scalar::Float32: isFloat = true; break; default: MOZ_CRASH("unexpected array type"); } BaseIndex address(memoryBase, ptr, TimesOne); if (IsUnaligned(access)) { MOZ_ASSERT(tmp != InvalidReg); if (isFloat) { - if (byteSize == 4) + if (byteSize == 4) { asMasm().loadUnalignedFloat32(access, address, tmp, output.fpu()); - else + } else { asMasm().loadUnalignedDouble(access, address, tmp, output.fpu()); + } } else { asMasm().ma_load_unaligned(access, output.gpr(), address, tmp, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); } return; } asMasm().memoryBarrierBefore(access.sync()); if (isFloat) { - if (byteSize == 4) + if (byteSize == 4) { asMasm().ma_ls(output.fpu(), address); - else + } else { asMasm().ma_ld(output.fpu(), address); + } } else { asMasm().ma_load(output.gpr(), address, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); } asMasm().append(access, asMasm().size() - 4); asMasm().memoryBarrierAfter(access.sync()); } @@ -2100,34 +2115,36 @@ MacroAssemblerMIPSShared::wasmStoreImpl( case Scalar::Float32: isFloat = true; break; default: MOZ_CRASH("unexpected array type"); } BaseIndex address(memoryBase, ptr, TimesOne); if (IsUnaligned(access)) { MOZ_ASSERT(tmp != InvalidReg); if (isFloat) { - if (byteSize == 4) + if (byteSize == 4) { asMasm().storeUnalignedFloat32(access, value.fpu(), tmp, address); - else + } else { asMasm().storeUnalignedDouble(access, value.fpu(), tmp, address); + } } else { asMasm().ma_store_unaligned(access, value.gpr(), address, tmp, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); } return; } asMasm().memoryBarrierBefore(access.sync()); if (isFloat) { - if (byteSize == 4) + if (byteSize == 4) { asMasm().ma_ss(value.fpu(), address); - else + } else { asMasm().ma_sd(value.fpu(), address); + } } else { asMasm().ma_store(value.gpr(), address, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); } // Only the last emitted instruction is a memory access. asMasm().append(access, asMasm().size() - 4); asMasm().memoryBarrierAfter(access.sync()); @@ -2863,20 +2880,21 @@ MacroAssembler::flexibleRemainder32(Regi { remainder32(rhs, srcDest, isUnsigned); } void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest, Register remOutput, bool isUnsigned, const LiveRegisterSet&) { - if (isUnsigned) + if (isUnsigned) { as_divu(srcDest, rhs); - else + } else { as_div(srcDest, rhs); + } as_mfhi(remOutput); as_mflo(srcDest); } // ======================================================================== // Spectre Mitigations. void
--- a/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp +++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp @@ -15,34 +15,36 @@ void MoveEmitterMIPSShared::emit(const MoveResolver& moves) { if (moves.numCycles()) { // Reserve stack for cycle resolution masm.reserveStack(moves.numCycles() * sizeof(double)); pushedAtCycle_ = masm.framePushed(); } - for (size_t i = 0; i < moves.numMoves(); i++) + for (size_t i = 0; i < moves.numMoves(); i++) { emit(moves.getMove(i)); + } } Address MoveEmitterMIPSShared::cycleSlot(uint32_t slot, uint32_t subslot) const { int32_t offset = masm.framePushed() - pushedAtCycle_; MOZ_ASSERT(Imm16::IsInSignedRange(offset)); return Address(StackPointer, offset + slot * sizeof(double) + subslot); } int32_t MoveEmitterMIPSShared::getAdjustedOffset(const MoveOperand& operand) { MOZ_ASSERT(operand.isMemoryOrEffectiveAddress()); - if (operand.base() != StackPointer) + if (operand.base() != StackPointer) { return operand.disp(); + } // Adjust offset if stack pointer has been moved. return operand.disp() + masm.framePushed() - pushedAtStart_; } Address MoveEmitterMIPSShared::getAdjustedAddress(const MoveOperand& operand) { @@ -59,22 +61,23 @@ MoveEmitterMIPSShared::tempReg() void MoveEmitterMIPSShared::emitMove(const MoveOperand& from, const MoveOperand& to) { if (from.isGeneralReg()) { // Second scratch register should not be moved by MoveEmitter. MOZ_ASSERT(from.reg() != spilledReg_); - if (to.isGeneralReg()) + if (to.isGeneralReg()) { masm.movePtr(from.reg(), to.reg()); - else if (to.isMemory()) + } else if (to.isMemory()) { masm.storePtr(from.reg(), getAdjustedAddress(to)); - else + } else { MOZ_CRASH("Invalid emitMove arguments."); + } } else if (from.isMemory()) { if (to.isGeneralReg()) { masm.loadPtr(getAdjustedAddress(from), to.reg()); } else if (to.isMemory()) { masm.loadPtr(getAdjustedAddress(from), tempReg()); masm.storePtr(tempReg(), getAdjustedAddress(to)); } else { MOZ_CRASH("Invalid emitMove arguments."); @@ -95,22 +98,23 @@ MoveEmitterMIPSShared::emitMove(const Mo void MoveEmitterMIPSShared::emitInt32Move(const MoveOperand &from, const MoveOperand &to) { if (from.isGeneralReg()) { // Second scratch register should not be moved by MoveEmitter. MOZ_ASSERT(from.reg() != spilledReg_); - if (to.isGeneralReg()) + if (to.isGeneralReg()) { masm.move32(from.reg(), to.reg()); - else if (to.isMemory()) + } else if (to.isMemory()) { masm.store32(from.reg(), getAdjustedAddress(to)); - else + } else { MOZ_CRASH("Invalid emitInt32Move arguments."); + } } else if (from.isMemory()) { if (to.isGeneralReg()) { masm.load32(getAdjustedAddress(from), to.reg()); } else if (to.isMemory()) { masm.load32(getAdjustedAddress(from), tempReg()); masm.store32(tempReg(), getAdjustedAddress(to)); } else { MOZ_CRASH("Invalid emitInt32Move arguments.");
--- a/js/src/jit/mips32/Architecture-mips32.cpp +++ b/js/src/jit/mips32/Architecture-mips32.cpp @@ -27,38 +27,41 @@ const Registers::SetType Registers::JSCa const Registers::SetType Registers::CallMask = (1 << Registers::v0) | (1 << Registers::v1); // used for double-size returns FloatRegisters::Encoding FloatRegisters::FromName(const char* name) { for (size_t i = 0; i < RegisterIdLimit; i++) { - if (strcmp(GetName(i), name) == 0) + if (strcmp(GetName(i), name) == 0) { return Encoding(i); + } } return Invalid; } FloatRegister FloatRegister::doubleOverlay() const { MOZ_ASSERT(isNotOdd()); - if (isSingle()) + if (isSingle()) { return FloatRegister(code_, Double); + } return *this; } FloatRegister FloatRegister::singleOverlay() const { MOZ_ASSERT(isNotOdd()); - if (isDouble()) + if (isDouble()) { return FloatRegister(code_, Single); + } return *this; } FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) { LiveFloatRegisterSet mod; for (FloatRegisterIterator iter(s); iter.more(); ++iter) { @@ -74,18 +77,19 @@ FloatRegister::GetPushSizeInBytes(const FloatRegisterSet ss = s.reduceSetForPush(); uint64_t bits = ss.bits(); // We are only pushing double registers. MOZ_ASSERT((bits & 0xFFFF) == 0); uint32_t ret = mozilla::CountPopulation32(bits) * sizeof(double); // Additional space needed by MacroAssembler::PushRegsInMask to ensure // correct alignment of double values. - if (ret) + if (ret) { ret += sizeof(double); + } return ret; } uint32_t FloatRegister::getRegisterDumpOffsetInBytes() { MOZ_ASSERT(isNotOdd()); return id() * sizeof(float);
--- a/js/src/jit/mips32/Architecture-mips32.h +++ b/js/src/jit/mips32/Architecture-mips32.h @@ -186,35 +186,39 @@ class FloatRegister : public FloatRegist } uint32_t numAliased() const { MOZ_ASSERT(isNotOdd()); return 2; } FloatRegister aliased(uint32_t aliasIdx) { MOZ_ASSERT(isNotOdd()); - if (aliasIdx == 0) + if (aliasIdx == 0) { return *this; + } MOZ_ASSERT(aliasIdx == 1); - if (isDouble()) + if (isDouble()) { return singleOverlay(); + } return doubleOverlay(); } uint32_t numAlignedAliased() const { MOZ_ASSERT(isNotOdd()); return 2; } FloatRegister alignedAliased(uint32_t aliasIdx) { MOZ_ASSERT(isNotOdd()); - if (aliasIdx == 0) + if (aliasIdx == 0) { return *this; + } MOZ_ASSERT(aliasIdx == 1); - if (isDouble()) + if (isDouble()) { return singleOverlay(); + } return doubleOverlay(); } SetType alignedOrDominatedAliasedSet() const { MOZ_ASSERT(isNotOdd()); return (SetType(1) << (code_ >> 1)) * ((1 << FloatRegisters::TotalSingle) + 1); }
--- a/js/src/jit/mips32/Assembler-mips32.cpp +++ b/js/src/jit/mips32/Assembler-mips32.cpp @@ -22,66 +22,70 @@ ABIArgGenerator::ABIArgGenerator() ABIArg ABIArgGenerator::next(MIRType type) { Register destReg; switch (type) { case MIRType::Int32: case MIRType::Pointer: - if (GetIntArgReg(usedArgSlots_, &destReg)) + if (GetIntArgReg(usedArgSlots_, &destReg)) { current_ = ABIArg(destReg); - else + } else { current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t)); + } usedArgSlots_++; break; case MIRType::Int64: if (!usedArgSlots_) { current_ = ABIArg(a0, a1); usedArgSlots_ = 2; } else if (usedArgSlots_ <= 2) { current_ = ABIArg(a2, a3); usedArgSlots_ = 4; } else { - if (usedArgSlots_ < NumIntArgRegs) + if (usedArgSlots_ < NumIntArgRegs) { usedArgSlots_ = NumIntArgRegs; + } usedArgSlots_ += usedArgSlots_ % 2; current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t)); usedArgSlots_ += 2; } break; case MIRType::Float32: if (!usedArgSlots_) { current_ = ABIArg(f12.asSingle()); firstArgFloatSize_ = 1; } else if (usedArgSlots_ == firstArgFloatSize_) { current_ = ABIArg(f14.asSingle()); } else if (useGPRForFloats_ && GetIntArgReg(usedArgSlots_, &destReg)) { current_ = ABIArg(destReg); } else { - if (usedArgSlots_ < NumIntArgRegs) + if (usedArgSlots_ < NumIntArgRegs) { usedArgSlots_ = NumIntArgRegs; + } current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t)); } usedArgSlots_++; break; case MIRType::Double: if (!usedArgSlots_) { current_ = ABIArg(f12); usedArgSlots_ = 2; firstArgFloatSize_ = 2; } else if (usedArgSlots_ == firstArgFloatSize_) { current_ = ABIArg(f14); usedArgSlots_ = 4; } else if (useGPRForFloats_ && usedArgSlots_ <= 2) { current_ = ABIArg(a2, a3); usedArgSlots_ = 4; } else { - if (usedArgSlots_ < NumIntArgRegs) + if (usedArgSlots_ < NumIntArgRegs) { usedArgSlots_ = NumIntArgRegs; + } usedArgSlots_ += usedArgSlots_ % 2; current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t)); usedArgSlots_ += 2; } break; default: MOZ_CRASH("Unexpected argument type"); } @@ -129,18 +133,19 @@ jit::PatchJump(CodeLocationJump& jump_, } void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { MOZ_ASSERT(isFinished); m_buffer.executableCopy(buffer); - if (flushICache) + if (flushICache) { AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); + } } uintptr_t Assembler::GetPointer(uint8_t* instPtr) { Instruction* inst = (Instruction*)instPtr; return Assembler::ExtractLuiOriValue(inst, inst->next()); }
--- a/js/src/jit/mips32/Assembler-mips32.h +++ b/js/src/jit/mips32/Assembler-mips32.h @@ -34,18 +34,19 @@ class ABIArgGenerator ABIArg next(MIRType argType); ABIArg& current() { return current_; } void enforceO32ABI() { useGPRForFloats_ = true; } uint32_t stackBytesConsumedSoFar() const { - if (usedArgSlots_ <= 4) + if (usedArgSlots_ <= 4) { return ShadowStackSpace; + } return usedArgSlots_ * sizeof(intptr_t); } }; // These registers may be volatile or nonvolatile. static constexpr Register ABINonArgReg0 = t0; static constexpr Register ABINonArgReg1 = t1; @@ -219,24 +220,26 @@ GetIntArgReg(uint32_t usedArgSlots, Regi // run out too. static inline bool GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) { // NOTE: We can't properly determine which regs are used if there are // float arguments. If this is needed, we will have to guess. MOZ_ASSERT(usedFloatArgs == 0); - if (GetIntArgReg(usedIntArgs, out)) + if (GetIntArgReg(usedIntArgs, out)) { return true; + } // Unfortunately, we have to assume things about the point at which // GetIntArgReg returns false, because we need to know how many registers it // can allocate. usedIntArgs -= NumIntArgRegs; - if (usedIntArgs >= NumCallTempNonArgRegs) + if (usedIntArgs >= NumCallTempNonArgRegs) { return false; + } *out = CallTempNonArgRegs[usedIntArgs]; return true; } static inline uint32_t GetArgStackDisp(uint32_t usedArgSlots) { MOZ_ASSERT(usedArgSlots >= NumIntArgRegs);
--- a/js/src/jit/mips32/Bailouts-mips32.h +++ b/js/src/jit/mips32/Bailouts-mips32.h @@ -36,30 +36,32 @@ class BailoutStack FrameSizeClass frameClass() const { return FrameSizeClass::FromClass(frameClassId_); } uintptr_t tableOffset() const { MOZ_ASSERT(frameClass() != FrameSizeClass::None()); return tableOffset_; } uint32_t frameSize() const { - if (frameClass() == FrameSizeClass::None()) + if (frameClass() == FrameSizeClass::None()) { return frameSize_; + } return frameClass().frameSize(); } MachineState machine() { return MachineState::FromBailout(regs_, fpregs_); } SnapshotOffset snapshotOffset() const { MOZ_ASSERT(frameClass() == FrameSizeClass::None()); return snapshotOffset_; } uint8_t* parentStackPointer() const { - if (frameClass() == FrameSizeClass::None()) + if (frameClass() == FrameSizeClass::None()) { return (uint8_t*)this + sizeof(BailoutStack); + } return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_); } static size_t offsetOfFrameClass() { return offsetof(BailoutStack, frameClassId_); } static size_t offsetOfFrameSize() { return offsetof(BailoutStack, frameSize_); }
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp +++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp @@ -91,20 +91,21 @@ CodeGenerator::visitCompareB(LCompareB* const Register output = ToRegister(lir->output()); MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); Label notBoolean, done; masm.branchTestBoolean(Assembler::NotEqual, lhs, ¬Boolean); { - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.cmp32Set(cond, lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), output); - else + } else { masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output); + } masm.jump(&done); } masm.bind(¬Boolean); { masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output); } @@ -119,21 +120,22 @@ CodeGenerator::visitCompareBAndBranch(LC const LAllocation* rhs = lir->rhs(); MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); MBasicBlock* mirNotBoolean = (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue(); branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean, Assembler::NotEqual); Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); - if (rhs->isConstant()) + if (rhs->isConstant()) { emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond, lir->ifTrue(), lir->ifFalse()); - else + } else { emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(), lir->ifFalse()); + } } void CodeGenerator::visitCompareBitwise(LCompareBitwise* lir) { MCompare* mir = lir->mir(); Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput); @@ -250,35 +252,37 @@ CodeGenerator::visitDivOrModI64(LDivOrMo masm.bind(&nonZero); } // Handle an integer overflow exception from INT64_MIN / -1. if (lir->canBeNegativeOverflow()) { Label notOverflow; masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), ¬Overflow); masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), ¬Overflow); - if (lir->mir()->isMod()) + if (lir->mir()->isMod()) { masm.xor64(output, output); - else + } else { masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset()); + } masm.jump(&done); masm.bind(¬Overflow); } masm.setupWasmABICall(); masm.passABIArg(lhs.high); masm.passABIArg(lhs.low); masm.passABIArg(rhs.high); masm.passABIArg(rhs.low); MOZ_ASSERT(gen->compilingWasm()); - if (lir->mir()->isMod()) + if (lir->mir()->isMod()) { masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64); - else + } else { masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64); + } MOZ_ASSERT(ReturnReg64 == output); masm.bind(&done); } void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) { @@ -297,20 +301,21 @@ CodeGenerator::visitUDivOrModI64(LUDivOr masm.setupWasmABICall(); masm.passABIArg(lhs.high); masm.passABIArg(lhs.low); masm.passABIArg(rhs.high); masm.passABIArg(rhs.low); MOZ_ASSERT(gen->compilingWasm()); - if (lir->mir()->isMod()) + if (lir->mir()->isMod()) { masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64); - else + } else { masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64); + } } template <typename T> void CodeGeneratorMIPS::emitWasmLoadI64(T* lir) { const MWasmLoad* mir = lir->mir(); @@ -421,34 +426,37 @@ CodeGenerator::visitWasmReinterpretToI64 } void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) { Register input = ToRegister(lir->input()); Register64 output = ToOutRegister64(lir); - if (input != output.low) + if (input != output.low) { masm.move32(input, output.low); - if (lir->mir()->isUnsigned()) + } + if (lir->mir()->isUnsigned()) { masm.move32(Imm32(0), output.high); - else + } else { masm.ma_sra(output.high, output.low, Imm32(31)); + } } void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) { const LInt64Allocation& input = lir->getInt64Operand(0); Register output = ToRegister(lir->output()); - if (lir->mir()->bottomHalf()) + if (lir->mir()->bottomHalf()) { masm.move32(ToRegister(input.low()), output); - else + } else { masm.move32(ToRegister(input.high()), output); + } } void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) { Register64 input = ToRegister64(lir->getInt64Operand(0)); Register64 output = ToOutRegister64(lir); switch (lir->mode()) { @@ -511,35 +519,37 @@ CodeGenerator::visitWasmTruncateToInt64( } if (!lir->mir()->isSaturating()) { masm.Push(input); masm.setupWasmABICall(); masm.passABIArg(arg, MoveOp::DOUBLE); - if (lir->mir()->isUnsigned()) + if (lir->mir()->isUnsigned()) { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::TruncateDoubleToUint64); - else + } else { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::TruncateDoubleToInt64); + } masm.Pop(input); masm.ma_xor(ScratchRegister, output.high, Imm32(0x80000000)); masm.ma_or(ScratchRegister, output.low); masm.ma_b(ScratchRegister, Imm32(0), ool->entry(), Assembler::Equal); masm.bind(ool->rejoin()); } else { masm.setupWasmABICall(); masm.passABIArg(arg, MoveOp::DOUBLE); - if (lir->mir()->isUnsigned()) + if (lir->mir()->isUnsigned()) { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64); - else + } else { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64); + } } MOZ_ASSERT(ReturnReg64 == output); } void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) { @@ -548,26 +558,29 @@ CodeGenerator::visitInt64ToFloatingPoint MInt64ToFloatingPoint* mir = lir->mir(); MIRType toType = mir->type(); masm.setupWasmABICall(); masm.passABIArg(input.high); masm.passABIArg(input.low); - if (lir->mir()->isUnsigned()) - if (toType == MIRType::Double) + if (lir->mir()->isUnsigned()) { + if (toType == MIRType::Double) { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::Uint64ToDouble, MoveOp::DOUBLE); - else + } else { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::Uint64ToFloat32, MoveOp::FLOAT32); - else - if (toType == MIRType::Double) + } + } else { + if (toType == MIRType::Double) { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::Int64ToDouble, MoveOp::DOUBLE); - else + } else { masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::Int64ToFloat32, MoveOp::FLOAT32); + } + } MOZ_ASSERT_IF(toType == MIRType::Double, *(&output) == ReturnDoubleReg); MOZ_ASSERT_IF(toType == MIRType::Float32, *(&output) == ReturnFloat32Reg); } void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
--- a/js/src/jit/mips32/LIR-mips32.h +++ b/js/src/jit/mips32/LIR-mips32.h @@ -99,29 +99,32 @@ class LDivOrModI64 : public LCallInstruc } MBinaryArithInstruction* mir() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); return static_cast<MBinaryArithInstruction*>(mir_); } bool canBeDivideByZero() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeDivideByZero(); + } return mir_->toDiv()->canBeDivideByZero(); } bool canBeNegativeOverflow() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeNegativeDividend(); + } return mir_->toDiv()->canBeNegativeOverflow(); } wasm::BytecodeOffset bytecodeOffset() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->bytecodeOffset(); + } return mir_->toDiv()->bytecodeOffset(); } }; class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0> { public: LIR_HEADER(UDivOrModI64) @@ -136,29 +139,32 @@ class LUDivOrModI64 : public LCallInstru setInt64Operand(Rhs, rhs); } MBinaryArithInstruction* mir() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); return static_cast<MBinaryArithInstruction*>(mir_); } bool canBeDivideByZero() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeDivideByZero(); + } return mir_->toDiv()->canBeDivideByZero(); } bool canBeNegativeOverflow() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeNegativeDividend(); + } return mir_->toDiv()->canBeNegativeOverflow(); } wasm::BytecodeOffset bytecodeOffset() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->bytecodeOffset(); + } return mir_->toDiv()->bytecodeOffset(); } }; class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0> { public: LIR_HEADER(WasmTruncateToInt64);
--- a/js/src/jit/mips32/Lowering-mips32.cpp +++ b/js/src/jit/mips32/Lowering-mips32.cpp @@ -68,45 +68,48 @@ LIRGenerator::visitBox(MBox* box) void LIRGenerator::visitUnbox(MUnbox* unbox) { MDefinition* inner = unbox->getOperand(0); if (inner->type() == MIRType::ObjectOrNull) { LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner)); - if (unbox->fallible()) + if (unbox->fallible()) { assignSnapshot(lir, unbox->bailoutKind()); + } defineReuseInput(lir, unbox, 0); return; } // An unbox on mips reads in a type tag (either in memory or a register) and // a payload. Unlike most instructions consuming a box, we ask for the type // second, so that the result can re-use the first input. MOZ_ASSERT(inner->type() == MIRType::Value); ensureDefined(inner); if (IsFloatingPointType(unbox->type())) { LUnboxFloatingPoint* lir = new(alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type()); - if (unbox->fallible()) + if (unbox->fallible()) { assignSnapshot(lir, unbox->bailoutKind()); + } define(lir, unbox); return; } // Swap the order we use the box pieces so we can re-use the payload // register. LUnbox* lir = new(alloc()) LUnbox; lir->setOperand(0, usePayloadInRegisterAtStart(inner)); lir->setOperand(1, useType(inner, LUse::REGISTER)); - if (unbox->fallible()) + if (unbox->fallible()) { assignSnapshot(lir, unbox->bailoutKind()); + } // Types and payloads form two separate intervals. If the type becomes dead // before the payload, it could be used as a Value without the type being // recoverable. Unbox's purpose is to eagerly kill the definition of a type // tag, so keeping both alive (for the purpose of gcmaps) is unappealing. // Instead, we create a new virtual register. defineReuseInput(lir, unbox, 0); }
--- a/js/src/jit/mips32/MacroAssembler-mips32-inl.h +++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h @@ -42,25 +42,27 @@ MacroAssembler::moveGPR64ToDouble(Regist { moveToDoubleHi(src.high, dest); moveToDoubleLo(src.low, dest); } void MacroAssembler::move64To32(Register64 src, Register dest) { - if (src.low != dest) + if (src.low != dest) { move32(src.low, dest); + } } void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) { - if (src != dest.low) + if (src != dest.low) { move32(src, dest.low); + } move32(Imm32(0), dest.high); } void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) { move8SignExtend(src, dest.low); move32To64SignExtend(dest.low, dest); @@ -71,18 +73,19 @@ MacroAssembler::move16To64SignExtend(Reg { move16SignExtend(src, dest.low); move32To64SignExtend(dest.low, dest); } void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) { - if (src != dest.low) + if (src != dest.low) { move32(src, dest.low); + } ma_sra(dest.high, dest.low, Imm32(31)); } // =============================================================== // Logical instructions void MacroAssembler::andPtr(Register src, Register dest) @@ -94,45 +97,51 @@ void MacroAssembler::andPtr(Imm32 imm, Register dest) { ma_and(dest, imm); } void MacroAssembler::and64(Imm64 imm, Register64 dest) { - if (imm.low().value != int32_t(0xFFFFFFFF)) + if (imm.low().value != int32_t(0xFFFFFFFF)) { and32(imm.low(), dest.low); - if (imm.hi().value != int32_t(0xFFFFFFFF)) + } + if (imm.hi().value != int32_t(0xFFFFFFFF)) { and32(imm.hi(), dest.high); + } } void MacroAssembler::and64(Register64 src, Register64 dest) { and32(src.low, dest.low); and32(src.high, dest.high); } void MacroAssembler::or64(Imm64 imm, Register64 dest) { - if (imm.low().value) + if (imm.low().value) { or32(imm.low(), dest.low); - if (imm.hi().value) + } + if (imm.hi().value) { or32(imm.hi(), dest.high); + } } void MacroAssembler::xor64(Imm64 imm, Register64 dest) { - if (imm.low().value) + if (imm.low().value) { xor32(imm.low(), dest.low); - if (imm.hi().value) + } + if (imm.hi().value) { xor32(imm.hi(), dest.high); + } } void MacroAssembler::orPtr(Register src, Register dest) { ma_or(dest, src); } @@ -825,27 +834,29 @@ MacroAssembler::branch64(Condition cond, default: MOZ_CRASH("Condition code not supported"); } return; } Condition c = ma_cmp64(cond, lhs, val, SecondScratchReg); ma_b(SecondScratchReg, SecondScratchReg, success, c); - if (fail) + if (fail) { jump(fail); + } } void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail) { Condition c = ma_cmp64(cond, lhs, rhs, SecondScratchReg); ma_b(SecondScratchReg, SecondScratchReg, success, c); - if (fail) + if (fail) { jump(fail); + } } void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) { branchPtr(cond, lhs, rhs, label); } @@ -964,20 +975,21 @@ MacroAssembler::branchTestMagic(Conditio } void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label) { MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); Label notMagic; - if (cond == Assembler::Equal) + if (cond == Assembler::Equal) { branchTestMagic(Assembler::NotEqual, valaddr, ¬Magic); - else + } else { branchTestMagic(Assembler::NotEqual, valaddr, label); + } branch32(cond, ToPayload(valaddr), Imm32(why), label); bind(¬Magic); } void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail) { @@ -1006,18 +1018,19 @@ MacroAssemblerMIPSCompat::incrementInt32 { asMasm().add32(Imm32(1), ToPayload(addr)); } void MacroAssemblerMIPSCompat::computeEffectiveAddress(const BaseIndex& address, Register dest) { computeScaledAddress(address, dest); - if (address.offset) + if (address.offset) { asMasm().addPtr(Imm32(address.offset), dest); + } } void MacroAssemblerMIPSCompat::retn(Imm32 n) { // pc <- [sp]; sp += n loadPtr(Address(StackPointer, 0), ra); asMasm().addPtr(n, StackPointer); as_jr(ra);
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp +++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp @@ -320,26 +320,28 @@ MacroAssemblerMIPS::ma_load(Register des encodedOffset = Imm16(0).encode(); } else { encodedOffset = Imm16(address.offset).encode(); base = address.base; } switch (size) { case SizeByte: - if (ZeroExtend == extension) + if (ZeroExtend == extension) { as_lbu(dest, base, encodedOffset); - else + } else { as_lb(dest, base, encodedOffset); + } break; case SizeHalfWord: - if (ZeroExtend == extension) + if (ZeroExtend == extension) { as_lhu(dest, base, encodedOffset); - else + } else { as_lh(dest, base, encodedOffset); + } break; case SizeWord: as_lw(dest, base, encodedOffset); break; default: MOZ_CRASH("Invalid argument for ma_load"); } } @@ -495,50 +497,54 @@ MacroAssemblerMIPS::ma_bal(Label* label, { spew("branch .Llabel %p\n", label); if (label->bound()) { // Generate the long jump for calls because return address has to be // the address after the reserved block. addLongJump(nextOffset(), BufferOffset(label->offset())); ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET)); as_jalr(ScratchRegister); - if (delaySlotFill == FillDelaySlot) + if (delaySlotFill == FillDelaySlot) { as_nop(); + } return; } // Second word holds a pointer to the next branch in label's chain. uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET; // Make the whole branch continous in the buffer. m_buffer.ensureSpace(4 * sizeof(uint32_t)); spew("bal .Llabel %p\n", label); BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode()); writeInst(nextInChain); - if (!oom()) + if (!oom()) { label->use(bo.getOffset()); + } // Leave space for long jump. as_nop(); - if (delaySlotFill == FillDelaySlot) + if (delaySlotFill == FillDelaySlot) { as_nop(); + } } void MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind) { spew("branch .Llabel %p", label); MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode()); InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); if (label->bound()) { int32_t offset = label->offset() - m_buffer.nextOffset().getOffset(); - if (BOffImm16::IsInRange(offset)) + if (BOffImm16::IsInRange(offset)) { jumpKind = ShortJump; + } if (jumpKind == ShortJump) { MOZ_ASSERT(BOffImm16::IsInRange(offset)); code.setBOffImm16(BOffImm16(offset)); #ifdef JS_JITSPEW decodeBranchInstAndSpew(code); #endif writeInst(code.encode()); @@ -582,38 +588,41 @@ MacroAssemblerMIPS::branchWithCode(InstI // Indicate that this is short jump with offset 4. code.setBOffImm16(BOffImm16(4)); #ifdef JS_JITSPEW decodeBranchInstAndSpew(code); #endif BufferOffset bo = writeInst(code.encode()); writeInst(nextInChain); - if (!oom()) + if (!oom()) { label->use(bo.getOffset()); + } return; } bool conditional = code.encode() != inst_beq.encode(); // Make the whole branch continous in the buffer. m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t)); #ifdef JS_JITSPEW decodeBranchInstAndSpew(code); #endif BufferOffset bo = writeInst(code.encode()); writeInst(nextInChain); - if (!oom()) + if (!oom()) { label->use(bo.getOffset()); + } // Leave space for potential long jump. as_nop(); as_nop(); - if (conditional) + if (conditional) { as_nop(); + } } void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs, Imm64 val, Register dest) { if (val.value == 0) { switch(cond){ @@ -625,26 +634,28 @@ MacroAssemblerMIPSCompat::cmp64Set(Condi case Assembler::NotEqual: case Assembler::Above: as_or(dest, lhs.high, lhs.low); as_sltu(dest, zero, dest); break; case Assembler::LessThan: case Assembler::GreaterThanOrEqual: as_slt(dest, lhs.high, zero); - if (cond == Assembler::GreaterThanOrEqual) + if (cond == Assembler::GreaterThanOrEqual) { as_xori(dest, dest, 1); + } break; case Assembler::GreaterThan: case Assembler::LessThanOrEqual: as_or(SecondScratchReg, lhs.high, lhs.low); as_sra(ScratchRegister, lhs.high, 31); as_sltu(dest, ScratchRegister, SecondScratchReg); - if (cond == Assembler::LessThanOrEqual) + if (cond == Assembler::LessThanOrEqual) { as_xori(dest, dest, 1); + } break; case Assembler::Below: case Assembler::AboveOrEqual: as_ori(dest, zero, cond == Assembler::AboveOrEqual ? 1 : 0); break; default: MOZ_CRASH("Condition code not supported"); break; @@ -935,33 +946,35 @@ MacroAssemblerMIPS::ma_sdc1WordAligned(F as_swc1(ft, base, off + PAYLOAD_OFFSET); as_swc1(getOddPair(ft), base, off + TAG_OFFSET); } void MacroAssemblerMIPS::ma_pop(FloatRegister f) { - if (f.isDouble()) + if (f.isDouble()) { ma_ldc1WordAligned(f, StackPointer, 0); - else + } else { as_lwc1(f, StackPointer, 0); + } as_addiu(StackPointer, StackPointer, f.size()); } void MacroAssemblerMIPS::ma_push(FloatRegister f) { as_addiu(StackPointer, StackPointer, -f.size()); - if(f.isDouble()) + if (f.isDouble()) { ma_sdc1WordAligned(f, StackPointer, 0); - else + } else { as_swc1(f, StackPointer, 0); + } } bool MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) { uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), FrameType::IonJS, ExitFrameLayout::Size()); @@ -1400,18 +1413,19 @@ MacroAssemblerMIPSCompat::testUndefinedS MOZ_ASSERT(cond == Equal || cond == NotEqual); ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond); } // unboxing code void MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand, Register dest, JSValueType) { - if (operand.payloadReg() != dest) + if (operand.payloadReg() != dest) { ma_move(dest, operand.payloadReg()); + } } void MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest, JSValueType) { ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET)); } @@ -1514,18 +1528,19 @@ MacroAssemblerMIPSCompat::boxDouble(Floa moveFromDoubleLo(src, dest.payloadReg()); moveFromDoubleHi(src, dest.typeReg()); } void MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { - if (src != dest.payloadReg()) + if (src != dest.payloadReg()) { ma_move(dest.payloadReg(), src); + } ma_li(dest.typeReg(), ImmType(type)); } void MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest) { convertBoolToInt32(operand.payloadReg(), ScratchRegister); convertInt32ToDouble(ScratchRegister, dest); @@ -1636,20 +1651,21 @@ uint32_t MacroAssemblerMIPSCompat::getType(const Value& val) { return val.toNunboxTag(); } void MacroAssemblerMIPSCompat::moveData(const Value& val, Register data) { - if (val.isGCThing()) + if (val.isGCThing()) { ma_li(data, ImmGCPtr(val.toGCThing())); - else + } else { ma_li(data, Imm32(val.toNunboxPayload())); + } } CodeOffsetJump MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label) { // Only one branch per label. MOZ_ASSERT(!label->used()); @@ -1757,18 +1773,19 @@ MacroAssemblerMIPSCompat::loadValue(Addr } } void MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest) { MOZ_ASSERT(payload != dest.typeReg()); ma_li(dest.typeReg(), ImmType(type)); - if (payload != dest.payloadReg()) + if (payload != dest.payloadReg()) { ma_move(dest.payloadReg(), payload); + } } void MacroAssemblerMIPSCompat::pushValue(ValueOperand val) { // Allocate stack slots for type and payload. One for each. asMasm().subPtr(Imm32(sizeof(Value)), StackPointer); // Store type and payload. @@ -2049,18 +2066,19 @@ void MacroAssemblerMIPSCompat::profilerExitFrame() { jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail()); } void MacroAssembler::subFromStackPtr(Imm32 imm32) { - if (imm32.value) + if (imm32.value) { asMasm().subPtr(imm32, StackPointer); + } } //{{{ check_macroassembler_style // =============================================================== // Stack manipulation functions. void MacroAssembler::PushRegsInMask(LiveRegisterSet set) @@ -2105,28 +2123,30 @@ MacroAssembler::PopRegsInMaskIgnore(Live // Read the buffer form the first aligned location. ma_addu(SecondScratchReg, sp, Imm32(reservedF)); ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1))); diffF -= sizeof(double); LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush()); for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) { - if (!ignore.has(*iter)) + if (!ignore.has(*iter)) { as_ldc1(*iter, SecondScratchReg, -diffF); + } diffF -= sizeof(double); } freeStack(reservedF); MOZ_ASSERT(diffF == 0); } for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) { diffG -= sizeof(intptr_t); - if (!ignore.has(*iter)) + if (!ignore.has(*iter)) { loadPtr(Address(StackPointer, diffG), *iter); + } } freeStack(reservedG); MOZ_ASSERT(diffG == 0); } void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch) { @@ -2198,18 +2218,19 @@ MacroAssembler::callWithABIPre(uint32_t* // Save $ra because call is going to clobber it. Restore it in // callWithABIPost. NOTE: This is needed for calls from SharedIC. // Maybe we can do this differently. storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t))); // Position all arguments. { enoughMemory_ &= moveResolver_.resolve(); - if (!enoughMemory_) + if (!enoughMemory_) { return; + } MoveEmitter emitter(*this); emitter.emit(moveResolver_); emitter.finish(); } assertStackAlignment(ABIStackAlignment); } @@ -2269,18 +2290,19 @@ MacroAssembler::moveValue(const TypedOrV return; } MIRType type = src.type(); AnyRegister reg = src.typedReg(); if (!IsFloatingPointType(type)) { mov(ImmWord(MIRTypeToTag(type)), dest.typeReg()); - if (reg.gpr() != dest.payloadReg()) + if (reg.gpr() != dest.payloadReg()) { move32(reg.gpr(), dest.payloadReg()); + } return; } ScratchDoubleScope scratch(*this); FloatRegister freg = reg.fpu(); if (type == MIRType::Float32) { convertFloat32ToDouble(freg, scratch); freg = scratch; @@ -2309,30 +2331,33 @@ MacroAssembler::moveValue(const ValueOpe move32(scratch, d0); return; } // If only one is, copy that source first. mozilla::Swap(s0, s1); mozilla::Swap(d0, d1); } - if (s0 != d0) + if (s0 != d0) { move32(s0, d0); - if (s1 != d1) + } + if (s1 != d1) { move32(s1, d1); + } } void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) { move32(Imm32(src.toNunboxTag()), dest.typeReg()); - if (src.isGCThing()) + if (src.isGCThing()) { movePtr(ImmGCPtr(src.toGCThing()), dest.payloadReg()); - else + } else { move32(Imm32(src.toNunboxPayload()), dest.payloadReg()); + } } // =============================================================== // Branch functions void MacroAssembler::branchValueIsNurseryCell(Condition cond, const Address& address, Register temp, Label* label) @@ -2409,24 +2434,26 @@ MacroAssembler::storeUnboxedValue(const const T& dest, MIRType slotType) { if (valueType == MIRType::Double) { storeDouble(value.reg().typedReg().fpu(), dest); return; } // Store the type tag if needed. - if (valueType != slotType) + if (valueType != slotType) { storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest); + } // Store the payload. - if (value.constant()) + if (value.constant()) { storePayload(value.value(), dest); - else + } else { storePayload(value.reg().typedReg().gpr(), dest); + } } template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const Address& dest, MIRType slotType); template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const BaseObjectElementIndex& dest, MIRType slotType); @@ -2567,39 +2594,41 @@ MacroAssemblerMIPSCompat::wasmLoadI64Imp BaseIndex address(memoryBase, ptr, TimesOne); MOZ_ASSERT(INT64LOW_OFFSET == 0); if (IsUnaligned(access)) { MOZ_ASSERT(tmp != InvalidReg); if (byteSize <= 4) { asMasm().ma_load_unaligned(access, output.low, address, tmp, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); - if (!isSigned) + if (!isSigned) { asMasm().move32(Imm32(0), output.high); - else + } else { asMasm().ma_sra(output.high, output.low, Imm32(31)); + } } else { MOZ_ASSERT(output.low != ptr); asMasm().ma_load_unaligned(access, output.low, address, tmp, SizeWord, ZeroExtend); asMasm().ma_load_unaligned(access, output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord, SignExtend); } return; } asMasm().memoryBarrierBefore(access.sync()); if (byteSize <= 4) { asMasm().ma_load(output.low, address, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); asMasm().append(access, asMasm().size() - 4); - if (!isSigned) + if (!isSigned) { asMasm().move32(Imm32(0), output.high); - else + } else { asMasm().ma_sra(output.high, output.low, Imm32(31)); + } } else { MOZ_ASSERT(output.low != ptr); asMasm().ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord); asMasm().append(access, asMasm().size() - 4); asMasm().ma_load(output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord); asMasm().append(access, asMasm().size() - 4); } asMasm().memoryBarrierAfter(access.sync());
--- a/js/src/jit/mips32/MacroAssembler-mips32.h +++ b/js/src/jit/mips32/MacroAssembler-mips32.h @@ -455,27 +455,29 @@ class MacroAssemblerMIPSCompat : public void moveData(const Value& val, Register data); public: void moveValue(const Value& val, Register type, Register data); CodeOffsetJump jumpWithPatch(RepatchLabel* label); void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) { - if (dest.isFloat()) + if (dest.isFloat()) { loadInt32OrDouble(address, dest.fpu()); - else + } else { ma_lw(dest.gpr(), ToPayload(address)); + } } void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) { - if (dest.isFloat()) + if (dest.isFloat()) { loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale); - else + } else { load32(ToPayload(address), dest.gpr()); + } } template <typename T> void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest, MIRType slotType); template <typename T> void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes, JSValueType) { @@ -508,20 +510,22 @@ class MacroAssemblerMIPSCompat : public move32(ScratchRegister, d0); return; } // If only one is, copy that source first. mozilla::Swap(s0, s1); mozilla::Swap(d0, d1); } - if (s0 != d0) + if (s0 != d0) { move32(s0, d0); - if (s1 != d1) + } + if (s1 != d1) { move32(s1, d1); + } } void storeValue(ValueOperand val, Operand dst); void storeValue(ValueOperand val, const BaseIndex& dest); void storeValue(JSValueType type, Register reg, BaseIndex dest); void storeValue(ValueOperand val, const Address& dest); void storeValue(JSValueType type, Register reg, Address dest); void storeValue(const Value& val, Address dest); @@ -541,31 +545,33 @@ class MacroAssemblerMIPSCompat : public void loadValue(const BaseIndex& addr, ValueOperand val); void tagValue(JSValueType type, Register payload, ValueOperand dest); void pushValue(ValueOperand val); void popValue(ValueOperand val); #if MOZ_LITTLE_ENDIAN void pushValue(const Value& val) { push(Imm32(val.toNunboxTag())); - if (val.isGCThing()) + if (val.isGCThing()) { push(ImmGCPtr(val.toGCThing())); - else + } else { push(Imm32(val.toNunboxPayload())); + } } void pushValue(JSValueType type, Register reg) { push(ImmTag(JSVAL_TYPE_TO_TAG(type))); ma_push(reg); } #else void pushValue(const Value& val) { - if (val.isGCThing()) + if (val.isGCThing()) { push(ImmGCPtr(val.toGCThing())); - else + } else { push(Imm32(val.toNunboxPayload())); + } push(Imm32(val.toNunboxTag())); } void pushValue(JSValueType type, Register reg) { ma_push(reg); push(ImmTag(JSVAL_TYPE_TO_TAG(type))); } #endif void pushValue(const Address& addr);
--- a/js/src/jit/mips32/MoveEmitter-mips32.cpp +++ b/js/src/jit/mips32/MoveEmitter-mips32.cpp @@ -76,18 +76,19 @@ MoveEmitterMIPS::completeCycle(const Mov switch (type) { case MoveOp::FLOAT32: if (to.isMemory()) { FloatRegister temp = ScratchFloat32Reg; masm.loadFloat32(cycleSlot(slotId, 0), temp); masm.storeFloat32(temp, getAdjustedAddress(to)); } else { uint32_t offset = 0; - if (from.floatReg().numAlignedAliased() == 1) + if (from.floatReg().numAlignedAliased() == 1) { offset = sizeof(float); + } masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg()); } break; case MoveOp::DOUBLE: if (to.isMemory()) { FloatRegister temp = ScratchDoubleReg; masm.loadDouble(cycleSlot(slotId, 0), temp); masm.storeDouble(temp, getAdjustedAddress(to));
--- a/js/src/jit/mips32/Simulator-mips32.cpp +++ b/js/src/jit/mips32/Simulator-mips32.cpp @@ -521,21 +521,23 @@ mozilla::Atomic<size_t, mozilla::Release SimulatorProcess* SimulatorProcess::singleton_ = nullptr; int Simulator::StopSimAt = -1; Simulator* Simulator::Create(JSContext* cx) { auto sim = MakeUnique<Simulator>(); - if (!sim) + if (!sim) { return nullptr; - - if (!sim->init()) + } + + if (!sim->init()) { return nullptr; + } char* stopAtStr = getenv("MIPS_SIM_STOP_AT"); int64_t stopAt; if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) { fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt); Simulator::StopSimAt = stopAt; } @@ -612,18 +614,19 @@ MipsDebugger::stop(SimInstruction* instr } sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize); debug(); } int32_t MipsDebugger::getRegisterValue(int regnum) { - if (regnum == kPCRegister) + if (regnum == kPCRegister) { return sim_->get_pc(); + } return sim_->getRegister(regnum); } int32_t MipsDebugger::getFPURegisterValueInt(int regnum) { return sim_->getFpuRegister(regnum); } @@ -659,63 +662,68 @@ MipsDebugger::getValue(const char* desc, } return sscanf(desc, "%i", value) == 1; } bool MipsDebugger::setBreakpoint(SimInstruction* breakpc) { // Check if a breakpoint can be set. If not return without any side-effects. - if (sim_->break_pc_ != nullptr) + if (sim_->break_pc_ != nullptr) { return false; + } // Set the breakpoint. sim_->break_pc_ = breakpc; sim_->break_instr_ = breakpc->instructionBits(); // Not setting the breakpoint instruction in the code itself. It will be set // when the debugger shell continues. return true; } bool MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) { - if (sim_->break_pc_ != nullptr) + if (sim_->break_pc_ != nullptr) { sim_->break_pc_->setInstructionBits(sim_->break_instr_); + } sim_->break_pc_ = nullptr; sim_->break_instr_ = 0; return true; } void MipsDebugger::undoBreakpoints() { - if (sim_->break_pc_) + if (sim_->break_pc_) { sim_->break_pc_->setInstructionBits(sim_->break_instr_); + } } void MipsDebugger::redoBreakpoints() { - if (sim_->break_pc_) + if (sim_->break_pc_) { sim_->break_pc_->setInstructionBits(kBreakpointInstr); + } } void MipsDebugger::printAllRegs() { int32_t value; for (uint32_t i = 0; i < Registers::Total; i++) { value = getRegisterValue(i); printf("%3s: 0x%08x %10d ", Registers::GetName(i), value, value); - if (i % 2) + if (i % 2) { printf("\n"); + } } printf("\n"); value = getRegisterValue(Simulator::LO); printf(" LO: 0x%08x %10d ", value, value); value = getRegisterValue(Simulator::HI); printf(" HI: 0x%08x %10d\n", value, value); value = getRegisterValue(Simulator::pc); @@ -764,24 +772,26 @@ ReadLine(const char* prompt) if (len > 0 && lineBuf[len - 1] == '\n') { // Since we read a new line we are done reading the line. This // will exit the loop after copying this buffer into the result. keepGoing = false; } if (!result) { // Allocate the initial result and make room for the terminating '\0' result.reset(js_pod_malloc<char>(len + 1)); - if (!result) + if (!result) { return nullptr; + } } else { // Allocate a new result with enough room for the new addition. int new_len = offset + len + 1; char* new_result = js_pod_malloc<char>(new_len); - if (!new_result) + if (!new_result) { return nullptr; + } // Copy the existing input into the new array and set the new // array as the result. memcpy(new_result, result.get(), offset * sizeof(char)); result.reset(new_result); } // Copy the newly read line into the result. memcpy(result.get() + offset, lineBuf, len * sizeof(char)); offset += len; @@ -797,18 +807,19 @@ DisassembleInstruction(uint32_t pc) { uint8_t* bytes = reinterpret_cast<uint8_t*>(pc); char hexbytes[256]; sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2], bytes[3]); char llvmcmd[1024]; sprintf(llvmcmd, "bash -c \"echo -n '%p'; echo '%s' | " "llvm-mc -disassemble -arch=mipsel -mcpu=mips32r2 | " "grep -v pure_instructions | grep -v .text\"", static_cast<void*>(bytes), hexbytes); - if (system(llvmcmd)) + if (system(llvmcmd)) { printf("Cannot disassemble instruction.\n"); + } } void MipsDebugger::debug() { intptr_t lastPC = -1; bool done = false; @@ -980,18 +991,19 @@ MipsDebugger::debug() } else if (strcmp(cmd, "gdb") == 0) { printf("relinquishing control to gdb\n"); asm("int $3"); printf("regaining control from gdb\n"); } else if (strcmp(cmd, "break") == 0) { if (argc == 2) { int32_t value; if (getValue(arg1, &value)) { - if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) + if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) { printf("setting breakpoint failed\n"); + } } else { printf("%s unrecognized\n", arg1); } } else { printf("break <address>\n"); } } else if (strcmp(cmd, "del") == 0) { if (!deleteBreakpoint(nullptr)) { @@ -1138,22 +1150,24 @@ Simulator::setLastDebuggerInput(char* in js_free(lastDebuggerInput_); lastDebuggerInput_ = input; } static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache, void* page) { SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page); - if (p) + if (p) { return p->value(); + } AutoEnterOOMUnsafeRegion oomUnsafe; CachePage* new_page = js_new<CachePage>(); - if (!new_page || !i_cache.add(p, page, new_page)) + if (!new_page || !i_cache.add(p, page, new_page)) { oomUnsafe.crash("Simulator CachePage"); + } return new_page; } // Flush from start up to and not including start + size. static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache, intptr_t start, int size) { MOZ_ASSERT(size <= CachePage::kPageSize); @@ -1269,30 +1283,32 @@ Simulator::Simulator() LLAddr_ = 0; lastLLValue_ = 0; // The ra and pc are initialized to a known bad value that will cause an // access violation if the simulator ever tries to execute it. registers_[pc] = bad_ra; registers_[ra] = bad_ra; - for (int i = 0; i < kNumExceptions; i++) + for (int i = 0; i < kNumExceptions; i++) { exceptions[i] = 0; + } lastDebuggerInput_ = nullptr; } bool Simulator::init() { // Allocate 2MB for the stack. Note that we will only use 1MB, see below. static const size_t stackSize = 2 * 1024 * 1024; stack_ = js_pod_malloc<char>(stackSize); - if (!stack_) + if (!stack_) { return false; + } // Leave a safety margin of 1MB to prevent overrunning the stack when // pushing values (total stack size is 2MB). stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024; // The sp is initialized to point to the bottom (high address) of the // allocated stack area. To be safe in potential stack underflows we leave // some buffer below. @@ -1370,18 +1386,19 @@ Simulator::~Simulator() { js_free(stack_); } SimulatorProcess::SimulatorProcess() : cacheLock_(mutexid::SimulatorCacheLock) , redirection_(nullptr) { - if (getenv("MIPS_SIM_ICACHE_CHECKS")) + if (getenv("MIPS_SIM_ICACHE_CHECKS")) { ICacheCheckingDisableCount = 0; + } } SimulatorProcess::~SimulatorProcess() { Redirection* r = redirection_; while (r) { Redirection* next = r->next_; js_delete(r); @@ -1441,18 +1458,19 @@ Simulator::setFpuRegisterDouble(int fpur } // Get the register from the architecture state. This function does handle // the special case of accessing the PC register. int32_t Simulator::getRegister(int reg) const { MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters)); - if (reg == 0) + if (reg == 0) { return 0; + } return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0); } double Simulator::getDoubleFromRegisterPair(int reg) { MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters) && ((reg % 2) == 0)); @@ -1530,20 +1548,21 @@ Simulator::setCallResult(int64_t res) setRegister(v0, static_cast<int32_t>(res)); setRegister(v1, static_cast<int32_t>(res >> 32)); } // Helper functions for setting and testing the FCSR register's bits. void Simulator::setFCSRBit(uint32_t cc, bool value) { - if (value) + if (value) { FCSR_ |= (1 << cc); - else + } else { FCSR_ &= ~(1 << cc); + } } bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); } @@ -1624,40 +1643,45 @@ Simulator::registerState() // WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses // using a signal handler that redirects PC to a stub that safely reports an // error. However, if the handler is hit by the simulator, the PC is in C++ code // and cannot be redirected. Therefore, we must avoid hitting the handler by // redirecting in the simulator before the real handler would have been hit. bool Simulator::handleWasmFault(int32_t addr, unsigned numBytes) { - if (!wasm::CodeExists) + if (!wasm::CodeExists) { return false; + } JSContext* cx = TlsContext.get(); - if (!cx->activation() || !cx->activation()->isJit()) + if (!cx->activation() || !cx->activation()->isJit()) { return false; + } JitActivation* act = cx->activation()->asJit(); void* pc = reinterpret_cast<void*>(get_pc()); uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp)); const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc); - if (!segment || !segment->isModule()) + if (!segment || !segment->isModule()) { return false; + } const wasm::ModuleSegment* moduleSegment = segment->asModule(); wasm::Instance* instance = wasm::LookupFaultingInstance(*moduleSegment, pc, fp); - if (!instance) + if (!instance) { return false; + } MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code()); - if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) + if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) { return false; + } LLBit_ = false; wasm::Trap trap; wasm::BytecodeOffset bytecode; MOZ_ALWAYS_TRUE(moduleSegment->code().lookupTrap(pc, &trap, &bytecode)); MOZ_RELEASE_ASSERT(trap == wasm::Trap::OutOfBounds); @@ -1665,35 +1689,39 @@ Simulator::handleWasmFault(int32_t addr, act->startWasmTrap(wasm::Trap::OutOfBounds, bytecode.offset(), registerState()); set_pc(int32_t(moduleSegment->trapCode())); return true; } bool Simulator::handleWasmTrapFault() { - if (!wasm::CodeExists) + if (!wasm::CodeExists) { return false; + } JSContext* cx = TlsContext.get(); - if (!cx->activation() || !cx->activation()->isJit()) + if (!cx->activation() || !cx->activation()->isJit()) { return false; + } JitActivation* act = cx->activation()->asJit(); void* pc = reinterpret_cast<void*>(get_pc()); const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc); - if (!segment || !segment->isModule()) + if (!segment || !segment->isModule()) { return false; + } const wasm::ModuleSegment* moduleSegment = segment->asModule(); wasm::Trap trap; wasm::BytecodeOffset bytecode; - if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) + if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) { return false; + } act->startWasmTrap(trap, bytecode.offset(), registerState()); set_pc(int32_t(moduleSegment->trapCode())); return true; } // MIPS memory instructions (except lwl/r and swl/r) trap on unaligned memory // access enabling the OS to handle them via trap-and-emulate. @@ -1702,202 +1730,215 @@ Simulator::handleWasmTrapFault() // Since the host is typically IA32 it will not trap on unaligned memory access. // We assume that that executing correct generated code will not produce unaligned // memory access, so we explicitly check for address alignment and trap. // Note that trapping does not occur when executing wasm code, which requires that // unaligned memory access provides correct result. int Simulator::readW(uint32_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return -1; + } if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); return *ptr; } printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeW(uint32_t addr, int value, SimInstruction* instr) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return; + } if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } double Simulator::readD(uint32_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return NAN; + } if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { double* ptr = reinterpret_cast<double*>(addr); return *ptr; } printf("Unaligned (double) read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeD(uint32_t addr, double value, SimInstruction* instr) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return; + } if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { double* ptr = reinterpret_cast<double*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned (double) write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } uint16_t Simulator::readHU(uint32_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return 0xffff; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); return *ptr; } printf("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } int16_t Simulator::readH(uint32_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return -1; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int16_t* ptr = reinterpret_cast<int16_t*>(addr); return *ptr; } printf("Unaligned signed halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeH(uint32_t addr, uint16_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } void Simulator::writeH(uint32_t addr, int16_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int16_t* ptr = reinterpret_cast<int16_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } uint32_t Simulator::readBU(uint32_t addr) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return 0xff; + } uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); return *ptr; } int32_t Simulator::readB(uint32_t addr) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return -1; + } int8_t* ptr = reinterpret_cast<int8_t*>(addr); return *ptr; } void Simulator::writeB(uint32_t addr, uint8_t value) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return; + } uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); LLBit_ = false; *ptr = value; } void Simulator::writeB(uint32_t addr, int8_t value) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return; + } int8_t* ptr = reinterpret_cast<int8_t*>(addr); LLBit_ = false; *ptr = value; } int Simulator::loadLinkedW(uint32_t addr, SimInstruction* instr) { if ((addr & kPointerAlignmentMask) == 0) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return -1; + } volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr); int32_t value = *ptr; lastLLValue_ = value; LLAddr_ = addr; // Note that any memory write or "external" interrupt should reset this value to false. LLBit_ = true; return value; @@ -1950,18 +1991,19 @@ uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; } bool Simulator::overRecursed(uintptr_t newsp) const { - if (newsp == 0) + if (newsp == 0) { newsp = getRegister(sp); + } return newsp <= stackLimit(); } bool Simulator::overRecursedWithExtra(uint32_t extra) const { uintptr_t newsp = getRegister(sp) - extra; return newsp <= stackLimit(); @@ -2056,18 +2098,19 @@ Simulator::softwareInterrupt(SimInstruct intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction()); bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0; if (!stack_aligned) { fprintf(stderr, "Runtime call with unaligned stack!\n"); MOZ_CRASH(); } - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } switch (redirection->type()) { case Args_General0: { Prototype_General0 target = reinterpret_cast<Prototype_General0>(external); int64_t result = target(); setCallResult(result); break; } @@ -2276,18 +2319,19 @@ Simulator::softwareInterrupt(SimInstruct double dresult = target(dval0, dval1, dval2, dval3); setCallResultDouble(dresult); break; } default: MOZ_CRASH("call"); } - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } setRegister(ra, saved_ra); set_pc(getRegister(ra)); #endif } else if (func == ff_break && code <= kMaxStopCode) { if (isWatchpoint(code)) { printWatchpoint(code); } else { @@ -2297,18 +2341,19 @@ Simulator::softwareInterrupt(SimInstruct } else { switch (func) { case ff_tge: case ff_tgeu: case ff_tlt: case ff_tltu: case ff_teq: case ff_tne: - if (instr->bits(15, 6) == kWasmTrapCode && handleWasmTrapFault()) + if (instr->bits(15, 6) == kWasmTrapCode && handleWasmTrapFault()) { return; + } }; // All remaining break_ codes, and all traps are handled here. MipsDebugger dbg(this); dbg.debug(); } } // Stop helper functions. @@ -2356,25 +2401,27 @@ Simulator::isEnabledStop(uint32_t code) MOZ_ASSERT(code <= kMaxStopCode); MOZ_ASSERT(code > kMaxWatchpointCode); return !(watchedStops_[code].count_ & kStopDisabledBit); } void Simulator::enableStop(uint32_t code) { - if (!isEnabledStop(code)) + if (!isEnabledStop(code)) { watchedStops_[code].count_ &= ~kStopDisabledBit; + } } void Simulator::disableStop(uint32_t code) { - if (isEnabledStop(code)) + if (isEnabledStop(code)) { watchedStops_[code].count_ |= kStopDisabledBit; + } } void Simulator::increaseStopCounter(uint32_t code) { MOZ_ASSERT(code <= kMaxStopCode); if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) { printf("Stop counter for code %i has overflowed.\n" @@ -2410,18 +2457,19 @@ Simulator::printStopInfo(uint32_t code) } } } void Simulator::signalExceptions() { for (int i = 1; i < kNumExceptions; i++) { - if (exceptions[i] != 0) + if (exceptions[i] != 0) { MOZ_CRASH("Error: Exception raised."); + } } } // Handle execution based on instruction types. void Simulator::configureTypeRegister(SimInstruction* instr, int32_t& alu_out, int64_t& i64hilo, @@ -2661,20 +2709,21 @@ Simulator::configureTypeRegister(SimInst // Interpret sa field as 5-bit lsb of extract. uint16_t lsb = sa; uint16_t size = msb + 1; uint32_t mask = (1 << size) - 1; alu_out = (rs_u & (mask << lsb)) >> lsb; break; } case ff_bshfl: { // Mips32r2 instruction. - if (16 == sa) // seb + if (16 == sa) { // seb alu_out = I32(I8(rt)); - else if (24 == sa) // seh + } else if (24 == sa) { // seh alu_out = I32(I16(rt)); + } else { MOZ_CRASH(); } break; } default: MOZ_CRASH(); } @@ -3551,18 +3600,19 @@ Simulator::decodeTypeImmediate(SimInstru // We don't check for end_sim_pc. First it should not be met as the current // pc is valid. Secondly a jump should always execute its branch delay slot. SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize); branchDelayInstructionDecode(branch_delay_instr); } // If needed update pc after the branch delay execution. - if (next_pc != bad_ra) + if (next_pc != bad_ra) { set_pc(next_pc); + } } // Type 3: instructions using a 26 bytes immediate. (e.g. j, jal). void Simulator::decodeTypeJump(SimInstruction* instr) { // Get current pc. int32_t current_pc = get_pc(); @@ -3575,18 +3625,19 @@ Simulator::decodeTypeJump(SimInstruction // We don't check for end_sim_pc. First it should not be met as the current pc // is valid. Secondly a jump should always execute its branch delay slot. SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize); branchDelayInstructionDecode(branch_delay_instr); // Update pc and ra if necessary. // Do this after the branch delay execution. - if (instr->isLinkingInstruction()) + if (instr->isLinkingInstruction()) { setRegister(31, current_pc + 2 * SimInstruction::kInstrSize); + } set_pc(next_pc); pc_modified_ = true; } // Executes the current instruction. void Simulator::instructionDecode(SimInstruction* instr) { @@ -3604,18 +3655,19 @@ Simulator::instructionDecode(SimInstruct decodeTypeImmediate(instr); break; case SimInstruction::kJumpType: decodeTypeJump(instr); break; default: UNSUPPORTED(); } - if (!pc_modified_) + if (!pc_modified_) { setRegister(pc, reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize); + } } void Simulator::branchDelayInstructionDecode(SimInstruction* instr) { if (instr->instructionBits() == NopInst) { // Short-cut generic nop instructions. They are always valid and they // never change the simulator state. @@ -3635,51 +3687,55 @@ Simulator::enable_single_stepping(Single single_step_callback_ = cb; single_step_callback_arg_ = arg; single_step_callback_(single_step_callback_arg_, this, (void*)get_pc()); } void Simulator::disable_single_stepping() { - if (!single_stepping_) + if (!single_stepping_) { return; + } single_step_callback_(single_step_callback_arg_, this, (void*)get_pc()); single_stepping_ = false; single_step_callback_ = nullptr; single_step_callback_arg_ = nullptr; } template<bool enableStopSimAt> void Simulator::execute() { - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } // Get the PC to simulate. Cannot use the accessor here as we need the // raw PC value and not the one used as input to arithmetic instructions. int program_counter = get_pc(); while (program_counter != end_sim_pc) { if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) { MipsDebugger dbg(this); dbg.debug(); } else { - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, (void*)program_counter); + } SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter); instructionDecode(instr); icount_++; } program_counter = get_pc(); } - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } } void Simulator::callInternal(uint8_t* entry) { // Prepare to execute the code at entry. setRegister(pc, reinterpret_cast<int32_t>(entry)); // Put down marker for end of simulation. The simulator will stop simulation @@ -3712,20 +3768,21 @@ Simulator::callInternal(uint8_t* entry) setRegister(s4, callee_saved_value); setRegister(s5, callee_saved_value); setRegister(s6, callee_saved_value); setRegister(s7, callee_saved_value); setRegister(gp, callee_saved_value); setRegister(fp, callee_saved_value); // Start the simulation. - if (Simulator::StopSimAt != -1) + if (Simulator::StopSimAt != -1) { execute<true>(); - else + } else { execute<false>(); + } // Check that the callee-saved registers have been preserved. MOZ_ASSERT(callee_saved_value == getRegister(s0)); MOZ_ASSERT(callee_saved_value == getRegister(s1)); MOZ_ASSERT(callee_saved_value == getRegister(s2)); MOZ_ASSERT(callee_saved_value == getRegister(s3)); MOZ_ASSERT(callee_saved_value == getRegister(s4)); MOZ_ASSERT(callee_saved_value == getRegister(s5)); @@ -3752,32 +3809,34 @@ int32_t Simulator::call(uint8_t* entry, int argument_count, ...) { va_list parameters; va_start(parameters, argument_count); int original_stack = getRegister(sp); // Compute position of stack on entry to generated code. int entry_stack = original_stack; - if (argument_count > kCArgSlotCount) + if (argument_count > kCArgSlotCount) { entry_stack = entry_stack - argument_count * sizeof(int32_t); - else + } else { entry_stack = entry_stack - kCArgsSlotsSize; + } entry_stack &= ~(ABIStackAlignment - 1); intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); // Setup the arguments. for (int i = 0; i < argument_count; i++) { js::jit::Register argReg; - if (GetIntArgReg(i, &argReg)) + if (GetIntArgReg(i, &argReg)) { setRegister(argReg.code(), va_arg(parameters, int32_t)); - else + } else { stack_argument[i] = va_arg(parameters, int32_t); + } } va_end(parameters); setRegister(sp, entry_stack); callInternal(entry); // Pop stack passed arguments.
--- a/js/src/jit/mips32/Trampoline-mips32.cpp +++ b/js/src/jit/mips32/Trampoline-mips32.cpp @@ -675,18 +675,19 @@ JitRuntime::generateVMWrapper(JSContext* static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0, "Wrapper register set should be a superset of Volatile register set."); // The context is the first argument; a0 is the first argument register. Register cxreg = a0; regs.take(cxreg); // If it isn't a tail call, then the return address needs to be saved - if (f.expectTailCall == NonTailCall) + if (f.expectTailCall == NonTailCall) { masm.pushReturnAddress(); + } // We're aligned to an exit frame, so link it up. masm.loadJSContext(cxreg); masm.enterExitFrame(cxreg, regs.getAny(), &f); // Save the base of the argument set stored on the stack. Register argsBase = InvalidReg; if (f.explicitArgs) { @@ -741,18 +742,19 @@ JitRuntime::generateVMWrapper(JSContext* } // Reserve stack for double sized args that are copied to be aligned. outParamOffset += f.doubleByRefArgs() * sizeof(double); Register doubleArgs = t0; masm.reserveStack(outParamOffset); masm.movePtr(StackPointer, doubleArgs); - if (!generateTLEnterVM(masm, f)) + if (!generateTLEnterVM(masm, f)) { return false; + } masm.setupAlignedABICall(); masm.passABIArg(cxreg); size_t argDisp = 0; size_t doubleArgDisp = 0; // Copy any arguments. @@ -792,18 +794,19 @@ JitRuntime::generateVMWrapper(JSContext* // Copy the implicit outparam, if any. if (f.outParam != Type_Void) { masm.passABIArg(MoveOperand(doubleArgs, outParamOffset, MoveOperand::EFFECTIVE_ADDRESS), MoveOp::GENERAL); } masm.callWithABI(f.wrapped, MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame); - if (!generateTLExitVM(masm, f)) + if (!generateTLExitVM(masm, f)) { return false; + } // Test for failure. switch (f.failType()) { case Type_Object: masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel()); break; case Type_Bool: // Called functions return bools, which are 0/false and non-zero/true
--- a/js/src/jit/mips64/Architecture-mips64.cpp +++ b/js/src/jit/mips64/Architecture-mips64.cpp @@ -27,38 +27,41 @@ const Registers::SetType Registers::JSCa const Registers::SetType Registers::CallMask = (1 << Registers::v0); FloatRegisters::Encoding FloatRegisters::FromName(const char* name) { for (size_t i = 0; i < Total; i++) { - if (strcmp(GetName(Encoding(i)), name) == 0) + if (strcmp(GetName(Encoding(i)), name) == 0) { return Encoding(i); + } } return Invalid; } FloatRegister FloatRegister::singleOverlay() const { MOZ_ASSERT(!isInvalid()); - if (kind_ == Codes::Double) + if (kind_ == Codes::Double) { return FloatRegister(reg_, Codes::Single); + } return *this; } FloatRegister FloatRegister::doubleOverlay() const { MOZ_ASSERT(!isInvalid()); - if (kind_ != Codes::Double) + if (kind_ != Codes::Double) { return FloatRegister(reg_, Codes::Double); + } return *this; } FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) { LiveFloatRegisterSet mod; for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
--- a/js/src/jit/mips64/Architecture-mips64.h +++ b/js/src/jit/mips64/Architecture-mips64.h @@ -167,30 +167,33 @@ class FloatRegister : public FloatRegist } bool aliases(const FloatRegister& other) { return reg_ == other.reg_; } uint32_t numAliased() const { return 2; } FloatRegister aliased(uint32_t aliasIdx) { - if (aliasIdx == 0) + if (aliasIdx == 0) { return *this; + } MOZ_ASSERT(aliasIdx == 1); - if (isDouble()) + if (isDouble()) { return singleOverlay(); + } return doubleOverlay(); } uint32_t numAlignedAliased() const { return 2; } FloatRegister alignedAliased(uint32_t aliasIdx) { MOZ_ASSERT(isDouble()); - if (aliasIdx == 0) + if (aliasIdx == 0) { return *this; + } MOZ_ASSERT(aliasIdx == 1); return singleOverlay(); } SetType alignedOrDominatedAliasedSet() const { return Codes::Spread << reg_; }
--- a/js/src/jit/mips64/Assembler-mips64.cpp +++ b/js/src/jit/mips64/Assembler-mips64.cpp @@ -22,35 +22,38 @@ ABIArgGenerator::ABIArgGenerator() ABIArg ABIArgGenerator::next(MIRType type) { switch (type) { case MIRType::Int32: case MIRType::Int64: case MIRType::Pointer: { Register destReg; - if (GetIntArgReg(usedArgSlots_, &destReg)) + if (GetIntArgReg(usedArgSlots_, &destReg)) { current_ = ABIArg(destReg); - else + } else { current_ = ABIArg(GetArgStackDisp(usedArgSlots_)); + } usedArgSlots_++; break; } case MIRType::Float32: case MIRType::Double: { FloatRegister destFReg; FloatRegister::ContentType contentType; - if (!usedArgSlots_) + if (!usedArgSlots_) { firstArgFloat = true; + } contentType = (type == MIRType::Double) ? FloatRegisters::Double : FloatRegisters::Single; - if (GetFloatArgReg(usedArgSlots_, &destFReg)) + if (GetFloatArgReg(usedArgSlots_, &destFReg)) { current_ = ABIArg(FloatRegister(destFReg.id(), contentType)); - else + } else { current_ = ABIArg(GetArgStackDisp(usedArgSlots_)); + } usedArgSlots_++; break; } default: MOZ_CRASH("Unexpected argument type"); } return current_; } @@ -96,18 +99,19 @@ jit::PatchJump(CodeLocationJump& jump_, } void Assembler::executableCopy(uint8_t* buffer, bool flushICache) { MOZ_ASSERT(isFinished); m_buffer.executableCopy(buffer); - if (flushICache) + if (flushICache) { AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); + } } uintptr_t Assembler::GetPointer(uint8_t* instPtr) { Instruction* inst = (Instruction*)instPtr; return Assembler::ExtractLoad64Value(inst); }
--- a/js/src/jit/mips64/Assembler-mips64.h +++ b/js/src/jit/mips64/Assembler-mips64.h @@ -27,18 +27,19 @@ class ABIArgGenerator ABIArg current_; public: ABIArgGenerator(); ABIArg next(MIRType argType); ABIArg& current() { return current_; } uint32_t stackBytesConsumedSoFar() const { - if (usedArgSlots_ <= 8) + if (usedArgSlots_ <= 8) { return 0; + } return (usedArgSlots_ - 8) * sizeof(int64_t); } }; // These registers may be volatile or nonvolatile. static constexpr Register ABINonArgReg0 = t0; static constexpr Register ABINonArgReg1 = t1; @@ -228,24 +229,26 @@ GetFloatArgReg(uint32_t usedArgSlots, Fl // run out too. static inline bool GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) { // NOTE: We can't properly determine which regs are used if there are // float arguments. If this is needed, we will have to guess. MOZ_ASSERT(usedFloatArgs == 0); - if (GetIntArgReg(usedIntArgs, out)) + if (GetIntArgReg(usedIntArgs, out)) { return true; + } // Unfortunately, we have to assume things about the point at which // GetIntArgReg returns false, because we need to know how many registers it // can allocate. usedIntArgs -= NumIntArgRegs; - if (usedIntArgs >= NumCallTempNonArgRegs) + if (usedIntArgs >= NumCallTempNonArgRegs) { return false; + } *out = CallTempNonArgRegs[usedIntArgs]; return true; } static inline uint32_t GetArgStackDisp(uint32_t usedArgSlots) { MOZ_ASSERT(usedArgSlots >= NumIntArgRegs);
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp +++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp @@ -127,39 +127,41 @@ CodeGenerator::visitCompareB(LCompareB* const ValueOperand lhs = ToValue(lir, LCompareB::Lhs); const LAllocation* rhs = lir->rhs(); const Register output = ToRegister(lir->output()); MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); // Load boxed boolean in ScratchRegister. - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(ScratchRegister)); - else + } else { masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister); + } // Perform the comparison. masm.cmpPtrSet(cond, lhs.valueReg(), ScratchRegister, output); } void CodeGenerator::visitCompareBAndBranch(LCompareBAndBranch* lir) { MCompare* mir = lir->cmpMir(); const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs); const LAllocation* rhs = lir->rhs(); MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); // Load boxed boolean in ScratchRegister. - if (rhs->isConstant()) + if (rhs->isConstant()) { masm.moveValue(rhs->toConstant()->toJSValue(), ValueOperand(ScratchRegister)); - else + } else { masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister); + } // Perform the comparison. Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); emitBranch(lhs.valueReg(), ScratchRegister, cond, lir->ifTrue(), lir->ifFalse()); } void CodeGenerator::visitCompareBitwise(LCompareBitwise* lir) @@ -254,30 +256,32 @@ CodeGenerator::visitDivOrModI64(LDivOrMo masm.bind(&nonZero); } // Handle an integer overflow exception from INT64_MIN / -1. if (lir->canBeNegativeOverflow()) { Label notOverflow; masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), ¬Overflow); masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), ¬Overflow); - if (lir->mir()->isMod()) + if (lir->mir()->isMod()) { masm.ma_xor(output, output); - else + } else { masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset()); + } masm.jump(&done); masm.bind(¬Overflow); } masm.as_ddiv(lhs, rhs); - if (lir->mir()->isMod()) + if (lir->mir()->isMod()) { masm.as_mfhi(output); - else + } else { masm.as_mflo(output); + } masm.bind(&done); } void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) { Register lhs = ToRegister(lir->lhs()); @@ -291,20 +295,21 @@ CodeGenerator::visitUDivOrModI64(LUDivOr Label nonZero; masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero); masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset()); masm.bind(&nonZero); } masm.as_ddivu(lhs, rhs); - if (lir->mir()->isMod()) + if (lir->mir()->isMod()) { masm.as_mfhi(output); - else + } else { masm.as_mflo(output); + } masm.bind(&done); } template <typename T> void CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) { @@ -406,33 +411,35 @@ CodeGenerator::visitWasmReinterpretToI64 } void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) { const LAllocation* input = lir->getOperand(0); Register output = ToRegister(lir->output()); - if (lir->mir()->isUnsigned()) + if (lir->mir()->isUnsigned()) { masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32)); - else + } else { masm.ma_sll(output, ToRegister(input), Imm32(0)); + } } void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) { const LAllocation* input = lir->getOperand(0); Register output = ToRegister(lir->output()); if (lir->mir()->bottomHalf()) { - if (input->isMemory()) + if (input->isMemory()) { masm.load32(ToAddress(input), output); - else + } else { masm.ma_sll(output, ToRegister(input), Imm32(0)); + } } else { MOZ_CRASH("Not implemented."); } } void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) { @@ -492,51 +499,55 @@ CodeGenerator::visitWasmTruncateToInt64( auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output); addOutOfLineCode(ool, mir); Label* oolEntry = ool->entry(); Label* oolRejoin = ool->rejoin(); bool isSaturating = mir->isSaturating(); if (fromType == MIRType::Double) { - if (mir->isUnsigned()) + if (mir->isUnsigned()) { masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); - else + } else { masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); + } } else { - if (mir->isUnsigned()) + if (mir->isUnsigned()) { masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); - else + } else { masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); + } } } void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) { Register64 input = ToRegister64(lir->getInt64Operand(0)); FloatRegister output = ToFloatRegister(lir->output()); MIRType outputType = lir->mir()->type(); MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32); if (outputType == MIRType::Double) { - if (lir->mir()->isUnsigned()) + if (lir->mir()->isUnsigned()) { masm.convertUInt64ToDouble(input, output, Register::Invalid()); - else + } else { masm.convertInt64ToDouble(input, output); + } } else { - if (lir->mir()->isUnsigned()) + if (lir->mir()->isUnsigned()) { masm.convertUInt64ToFloat32(input, output, Register::Invalid()); - else + } else { masm.convertInt64ToFloat32(input, output); + } } } void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) { Register64 input = ToRegister64(lir->getInt64Operand(0)); MBasicBlock* ifTrue = lir->ifTrue();
--- a/js/src/jit/mips64/LIR-mips64.h +++ b/js/src/jit/mips64/LIR-mips64.h @@ -72,29 +72,32 @@ class LDivOrModI64 : public LBinaryMath< return getTemp(0); } MBinaryArithInstruction* mir() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); return static_cast<MBinaryArithInstruction*>(mir_); } bool canBeDivideByZero() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeDivideByZero(); + } return mir_->toDiv()->canBeDivideByZero(); } bool canBeNegativeOverflow() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeNegativeDividend(); + } return mir_->toDiv()->canBeNegativeOverflow(); } wasm::BytecodeOffset bytecodeOffset() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->bytecodeOffset(); + } return mir_->toDiv()->bytecodeOffset(); } }; class LUDivOrModI64 : public LBinaryMath<1> { public: LIR_HEADER(UDivOrModI64); @@ -114,24 +117,26 @@ class LUDivOrModI64 : public LBinaryMath return mir()->isTruncated() ? "Truncated" : nullptr; } MBinaryArithInstruction* mir() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); return static_cast<MBinaryArithInstruction*>(mir_); } bool canBeDivideByZero() const { - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->canBeDivideByZero(); + } return mir_->toDiv()->canBeDivideByZero(); } wasm::BytecodeOffset bytecodeOffset() const { MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); - if (mir_->isMod()) + if (mir_->isMod()) { return mir_->toMod()->bytecodeOffset(); + } return mir_->toDiv()->bytecodeOffset(); } }; class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> { public: LIR_HEADER(WasmTruncateToInt64);
--- a/js/src/jit/mips64/Lowering-mips64.cpp +++ b/js/src/jit/mips64/Lowering-mips64.cpp @@ -102,18 +102,19 @@ LIRGenerator::visitBox(MBox* box) void LIRGenerator::visitUnbox(MUnbox* unbox) { MDefinition* box = unbox->getOperand(0); if (box->type() == MIRType::ObjectOrNull) { LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box)); - if (unbox->fallible()) + if (unbox->fallible()) { assignSnapshot(lir, unbox->bailoutKind()); + } defineReuseInput(lir, unbox, 0); return; } MOZ_ASSERT(box->type() == MIRType::Value); LUnbox* lir; if (IsFloatingPointType(unbox->type())) { @@ -121,18 +122,19 @@ LIRGenerator::visitUnbox(MUnbox* unbox) } else if (unbox->fallible()) { // If the unbox is fallible, load the Value in a register first to // avoid multiple loads. lir = new(alloc()) LUnbox(useRegisterAtStart(box)); } else { lir = new(alloc()) LUnbox(useAtStart(box)); } - if (unbox->fallible()) + if (unbox->fallible()) { assignSnapshot(lir, unbox->bailoutKind()); + } define(lir, unbox); } void LIRGenerator::visitReturn(MReturn* ret) { MDefinition* opd = ret->getOperand(0);
--- a/js/src/jit/mips64/MacroAssembler-mips64-inl.h +++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h @@ -428,38 +428,40 @@ MacroAssembler::rshift64Arithmetic(Regis // =============================================================== // Rotation functions void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp) { MOZ_ASSERT(temp == InvalidReg); - if (count.value) + if (count.value) { ma_drol(dest.reg, src.reg, count); - else + } else { ma_move(dest.reg, src.reg); + } } void MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp) { MOZ_ASSERT(temp == InvalidReg); ma_drol(dest.reg, src.reg, count); } void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp) { MOZ_ASSERT(temp == InvalidReg); - if (count.value) + if (count.value) { ma_dror(dest.reg, src.reg, count); - else + } else { ma_move(dest.reg, src.reg); + } } void MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp) { MOZ_ASSERT(temp == InvalidReg); ma_dror(dest.reg, src.reg, count); } @@ -533,33 +535,35 @@ MacroAssembler::branch64(Condition cond, MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal || cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual || cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual || cond == Assembler::Below || cond == Assembler::BelowOrEqual || cond == Assembler::Above || cond == Assembler::AboveOrEqual, "other condition codes not supported"); branchPtr(cond, lhs.reg, ImmWord(val.value), success); - if (fail) + if (fail) { jump(fail); + } } void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail) { MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal || cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual || cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual || cond == Assembler::Below || cond == Assembler::BelowOrEqual || cond == Assembler::Above || cond == Assembler::AboveOrEqual, "other condition codes not supported"); branchPtr(cond, lhs.reg, rhs.reg, success); - if (fail) + if (fail) { jump(fail); + } } void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) { MOZ_ASSERT(cond == Assembler::NotEqual, "other condition codes not supported"); @@ -577,18 +581,19 @@ MacroAssembler::branch64(Condition cond, loadPtr(rhs, scratch); branchPtr(cond, lhs, scratch, label); } void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) { - if (rhs != ScratchRegister) + if (rhs != ScratchRegister) { movePtr(rhs, ScratchRegister); + } // Instead of unboxing lhs, box rhs and do direct comparison with lhs. rshiftPtr(Imm32(1), ScratchRegister); branchPtr(cond, lhs, ScratchRegister, label); } template <class L> void MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp, @@ -799,18 +804,19 @@ MacroAssemblerMIPS64Compat::incrementInt { asMasm().add32(Imm32(1), addr); } void MacroAssemblerMIPS64Compat::computeEffectiveAddress(const BaseIndex& address, Register dest) { computeScaledAddress(address, dest); - if (address.offset) + if (address.offset) { asMasm().addPtr(Imm32(address.offset), dest); + } } void MacroAssemblerMIPS64Compat::retn(Imm32 n) { // pc <- [sp]; sp += n loadPtr(Address(StackPointer, 0), ra); asMasm().addPtr(n, StackPointer);
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp +++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp @@ -197,39 +197,43 @@ MacroAssemblerMIPS64::ma_li(Register des if (-1 == (value >> 31) || 0 == (value >> 31)) { as_lui(dest, uint16_t(value >> 16)); } else if (0 == (value >> 32)) { as_lui(dest, uint16_t(value >> 16)); as_dinsu(dest, zero, 32, 32); } else if (-1 == (value >> 47) || 0 == (value >> 47)) { as_lui(dest, uint16_t(value >> 32)); - if (uint16_t(value >> 16)) + if (uint16_t(value >> 16)) { as_ori(dest, dest, uint16_t(value >> 16)); + } as_dsll(dest, dest, 16); } else if (0 == (value >> 48)) { as_lui(dest, uint16_t(value >> 32)); as_dinsu(dest, zero, 32, 32); - if (uint16_t(value >> 16)) + if (uint16_t(value >> 16)) { as_ori(dest, dest, uint16_t(value >> 16)); + } as_dsll(dest, dest, 16); } else { as_lui(dest, uint16_t(value >> 48)); - if (uint16_t(value >> 32)) + if (uint16_t(value >> 32)) { as_ori(dest, dest, uint16_t(value >> 32)); + } if (uint16_t(value >> 16)) { as_dsll(dest, dest, 16); as_ori(dest, dest, uint16_t(value >> 16)); as_dsll(dest, dest, 16); } else { as_dsll32(dest, dest, 32); } } - if (uint16_t(value)) + if (uint16_t(value)) { as_ori(dest, dest, uint16_t(value)); + } } // This method generates lui, dsll and ori instruction block that can be modified // by UpdateLoad64Value, either during compilation (eg. Assembler::bind), or // during execution (eg. jit::PatchJump). void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) { @@ -261,58 +265,63 @@ MacroAssemblerMIPS64::ma_dnegu(Register { as_dsubu(rd, zero, rs); } // Shifts void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) { - if (31 < shift.value) + if (31 < shift.value) { as_dsll32(rd, rt, shift.value); - else + } else { as_dsll(rd, rt, shift.value); + } } void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) { - if (31 < shift.value) + if (31 < shift.value) { as_dsrl32(rd, rt, shift.value); - else + } else { as_dsrl(rd, rt, shift.value); + } } void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) { - if (31 < shift.value) + if (31 < shift.value) { as_dsra32(rd, rt, shift.value); - else + } else { as_dsra(rd, rt, shift.value); + } } void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) { - if (31 < shift.value) + if (31 < shift.value) { as_drotr32(rd, rt, shift.value); - else + } else { as_drotr(rd, rt, shift.value); + } } void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) { uint32_t s = 64 - shift.value; - if (31 < s) + if (31 < s) { as_drotr32(rd, rt, s); - else + } else { as_drotr(rd, rt, s); + } } void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) { as_dsllv(rd, rt, shift); } @@ -340,33 +349,35 @@ MacroAssemblerMIPS64::ma_drol(Register r as_dsubu(ScratchRegister, zero, shift); as_drotrv(rd, rt, ScratchRegister); } void MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size) { if (pos.value >= 0 && pos.value < 32) { - if (pos.value + size.value > 32) + if (pos.value + size.value > 32) { as_dinsm(rt, rs, pos.value, size.value); - else + } else { as_dins(rt, rs, pos.value, size.value); + } } else { as_dinsu(rt, rs, pos.value, size.value); } } void MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size) { if (pos.value >= 0 && pos.value < 32) { - if (size.value > 32) + if (size.value > 32) { as_dextm(rt, rs, pos.value, size.value); - else + } else { as_dext(rt, rs, pos.value, size.value); + } } else { as_dextu(rt, rs, pos.value, size.value); } } void MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) { @@ -506,32 +517,35 @@ MacroAssemblerMIPS64::ma_load(Register d encodedOffset = Imm16(0).encode(); } else { encodedOffset = Imm16(address.offset).encode(); base = address.base; } switch (size) { case SizeByte: - if (ZeroExtend == extension) + if (ZeroExtend == extension) { as_lbu(dest, base, encodedOffset); - else + } else { as_lb(dest, base, encodedOffset); + } break; case SizeHalfWord: - if (ZeroExtend == extension) + if (ZeroExtend == extension) { as_lhu(dest, base, encodedOffset); - else + } else { as_lh(dest, base, encodedOffset); + } break; case SizeWord: - if (ZeroExtend == extension) + if (ZeroExtend == extension) { as_lwu(dest, base, encodedOffset); - else + } else { as_lw(dest, base, encodedOffset); + } break; case SizeDouble: as_ld(dest, base, encodedOffset); break; default: MOZ_CRASH("Invalid argument for ma_load"); } } @@ -667,55 +681,59 @@ MacroAssemblerMIPS64::ma_bal(Label* labe { spew("branch .Llabel %p\n", label); if (label->bound()) { // Generate the long jump for calls because return address has to be // the address after the reserved block. addLongJump(nextOffset(), BufferOffset(label->offset())); ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET)); as_jalr(ScratchRegister); - if (delaySlotFill == FillDelaySlot) + if (delaySlotFill == FillDelaySlot) { as_nop(); + } return; } // Second word holds a pointer to the next branch in label's chain. uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET; // Make the whole branch continous in the buffer. The '6' // instructions are writing at below (contain delay slot). m_buffer.ensureSpace(6 * sizeof(uint32_t)); spew("bal .Llabel %p\n", label); BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode()); writeInst(nextInChain); - if (!oom()) + if (!oom()) { label->use(bo.getOffset()); + } // Leave space for long jump. as_nop(); as_nop(); as_nop(); - if (delaySlotFill == FillDelaySlot) + if (delaySlotFill == FillDelaySlot) { as_nop(); + } } void MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind) { // simply output the pointer of one label as its id, // notice that after one label destructor, the pointer will be reused. spew("branch .Llabel %p", label); MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode()); InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); if (label->bound()) { int32_t offset = label->offset() - m_buffer.nextOffset().getOffset(); - if (BOffImm16::IsInRange(offset)) + if (BOffImm16::IsInRange(offset)) { jumpKind = ShortJump; + } if (jumpKind == ShortJump) { MOZ_ASSERT(BOffImm16::IsInRange(offset)); code.setBOffImm16(BOffImm16(offset)); #ifdef JS_JITSPEW decodeBranchInstAndSpew(code); #endif writeInst(code.encode()); @@ -759,41 +777,44 @@ MacroAssemblerMIPS64::branchWithCode(Ins // Indicate that this is short jump with offset 4. code.setBOffImm16(BOffImm16(4)); #ifdef JS_JITSPEW decodeBranchInstAndSpew(code); #endif BufferOffset bo = writeInst(code.encode()); writeInst(nextInChain); - if (!oom()) + if (!oom()) { label->use(bo.getOffset()); + } return; } bool conditional = code.encode() != inst_beq.encode(); // Make the whole branch continous in the buffer. The '7' // instructions are writing at below (contain conditional nop). m_buffer.ensureSpace(7 * sizeof(uint32_t)); #ifdef JS_JITSPEW decodeBranchInstAndSpew(code); #endif BufferOffset bo = writeInst(code.encode()); writeInst(nextInChain); - if (!oom()) + if (!oom()) { label->use(bo.getOffset()); + } // Leave space for potential long jump. as_nop(); as_nop(); as_nop(); as_nop(); - if (conditional) + if (conditional) { as_nop(); + } } void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c) { if (imm.value <= INT32_MAX) { ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c); } else { @@ -1693,18 +1714,19 @@ MacroAssemblerMIPS64Compat::loadValue(Ad { loadPtr(Address(src.base, src.offset), val.valueReg()); } void MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest) { MOZ_ASSERT(dest.valueReg() != ScratchRegister); - if (payload != dest.valueReg()) + if (payload != dest.valueReg()) { ma_move(dest.valueReg(), payload); + } ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type))); ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT)); } void MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) { // Allocate stack slots for Value. One for each. @@ -1916,18 +1938,19 @@ void MacroAssemblerMIPS64Compat::profilerExitFrame() { jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail()); } void MacroAssembler::subFromStackPtr(Imm32 imm32) { - if (imm32.value) + if (imm32.value) { asMasm().subPtr(imm32, StackPointer); + } } //{{{ check_macroassembler_style // =============================================================== // Stack manipulation functions. void MacroAssembler::PushRegsInMask(LiveRegisterSet set) @@ -1952,23 +1975,25 @@ void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore) { int32_t diff = set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes(); const int32_t reserved = diff; for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) { diff -= sizeof(intptr_t); - if (!ignore.has(*iter)) + if (!ignore.has(*iter)) { loadPtr(Address(StackPointer, diff), *iter); + } } for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) { diff -= sizeof(double); - if (!ignore.has(*iter)) + if (!ignore.has(*iter)) { loadDouble(Address(StackPointer, diff), *iter); + } } MOZ_ASSERT(diff == 0); freeStack(reserved); } void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest, Register) { @@ -1986,22 +2011,23 @@ MacroAssembler::storeRegsInMask(LiveRegi } MOZ_ASSERT(diffG == 0); for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) { FloatRegister reg = *iter; diffF -= reg.size(); numFpu -= 1; dest.offset -= reg.size(); - if (reg.isDouble()) + if (reg.isDouble()) { storeDouble(reg, dest); - else if (reg.isSingle()) + } else if (reg.isSingle()) { storeFloat32(reg, dest); - else + } else { MOZ_CRASH("Unknown register type."); + } } MOZ_ASSERT(numFpu == 0); diffF -= diffF % sizeof(uintptr_t); MOZ_ASSERT(diffF == 0); } // =============================================================== // ABI function calls. @@ -2043,18 +2069,19 @@ MacroAssembler::callWithABIPre(uint32_t* // Save $ra because call is going to clobber it. Restore it in // callWithABIPost. NOTE: This is needed for calls from SharedIC. // Maybe we can do this differently. storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t))); // Position all arguments. { enoughMemory_ &= moveResolver_.resolve(); - if (!enoughMemory_) + if (!enoughMemory_) { return; + } MoveEmitter emitter(*this); emitter.emit(moveResolver_); emitter.finish(); } assertStackAlignment(ABIStackAlignment); } @@ -2130,18 +2157,19 @@ MacroAssembler::moveValue(const TypedOrV freg = scratch; } boxDouble(freg, dest, scratch); } void MacroAssembler::moveValue(const ValueOperand& src, const ValueOperand& dest) { - if (src == dest) + if (src == dest) { return; + } movePtr(src.valueReg(), dest.valueReg()); } void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) { if(!src.isGCThing()) { ma_li(dest.valueReg(), ImmWord(src.asRawBits())); @@ -2231,30 +2259,32 @@ MacroAssembler::storeUnboxedValue(const return; } // For known integers and booleans, we can just store the unboxed value if // the slot has the same type. if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) { if (value.constant()) { Value val = value.value(); - if (valueType == MIRType::Int32) + if (valueType == MIRType::Int32) { store32(Imm32(val.toInt32()), dest); - else + } else { store32(Imm32(val.toBoolean() ? 1 : 0), dest); + } } else { store32(value.reg().typedReg().gpr(), dest); } return; } - if (value.constant()) + if (value.constant()) { storeValue(value.value(), dest); - else + } else { storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest); + } } template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const Address& dest, MIRType slotType); template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const BaseObjectElementIndex& dest, MIRType slotType); @@ -2336,18 +2366,19 @@ MacroAssembler::wasmTruncateDoubleToInt6 MOZ_ASSERT(tempDouble.isInvalid()); as_truncld(ScratchDoubleReg, input); as_cfc1(ScratchRegister, Assembler::FCSR); moveFromDouble(ScratchDoubleReg, output.reg); ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); - if (isSaturating) + if (isSaturating) { bind(oolRejoin); + } } void MacroAssembler::wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble) { MOZ_ASSERT(tempDouble.isInvalid()); @@ -2376,35 +2407,37 @@ MacroAssembler::wasmTruncateDoubleToUInt // Guard against negative values that result in 0 due the precision loss. as_sltiu(SecondScratchReg, output, 1); ma_or(ScratchRegister, SecondScratchReg); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); bind(&done); - if (isSaturating) + if (isSaturating) { bind(oolRejoin); + } } void MacroAssembler::wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry, Label* oolRejoin, FloatRegister tempFloat) { MOZ_ASSERT(tempFloat.isInvalid()); as_truncls(ScratchDoubleReg, input); as_cfc1(ScratchRegister, Assembler::FCSR); moveFromDouble(ScratchDoubleReg, output.reg); ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); - if (isSaturating) + if (isSaturating) { bind(oolRejoin); + } } void MacroAssembler::wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry, Label* oolRejoin, FloatRegister tempFloat) { MOZ_ASSERT(tempFloat.isInvalid()); @@ -2433,18 +2466,19 @@ MacroAssembler::wasmTruncateFloat32ToUIn // Guard against negative values that result in 0 due the precision loss. as_sltiu(SecondScratchReg, output, 1); ma_or(ScratchRegister, SecondScratchReg); ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual); bind(&done); - if (isSaturating) + if (isSaturating) { bind(oolRejoin); + } } void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr, Register ptrScratch, Register64 output, Register tmp) { uint32_t offset = access.offset();
--- a/js/src/jit/mips64/MacroAssembler-mips64.h +++ b/js/src/jit/mips64/MacroAssembler-mips64.h @@ -242,18 +242,19 @@ class MacroAssemblerMIPS64Compat : publi } void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); } void writeDataRelocation(const Value& val) { if (val.isGCThing()) { gc::Cell* cell = val.toGCThing(); - if (cell && gc::IsInsideNursery(cell)) + if (cell && gc::IsInsideNursery(cell)) { embedsNurseryPointers_ = true; + } dataRelocations_.writeUnsigned(currentOffset()); } } void branch(JitCode* c) { BufferOffset bo = m_buffer.nextOffset(); addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE); ma_liPatchable(ScratchRegister, ImmPtr(c->raw())); @@ -501,51 +502,54 @@ class MacroAssemblerMIPS64Compat : publi Address ToPayload(Address value) { return value; } CodeOffsetJump jumpWithPatch(RepatchLabel* label); template <typename T> void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) { - if (dest.isFloat()) + if (dest.isFloat()) { loadInt32OrDouble(address, dest.fpu()); - else if (type == MIRType::ObjectOrNull) + } else if (type == MIRType::ObjectOrNull) { unboxObjectOrNull(address, dest.gpr()); - else + } else { unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type)); + } } void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes, JSValueType type) { switch (nbytes) { case 8: - if (type == JSVAL_TYPE_OBJECT) + if (type == JSVAL_TYPE_OBJECT) { unboxObjectOrNull(value, SecondScratchReg); - else + } else { unboxNonDouble(value, SecondScratchReg, type); + } computeEffectiveAddress(address, ScratchRegister); as_sd(SecondScratchReg, ScratchRegister, 0); return; case 4: store32(value.valueReg(), address); return; case 1: store8(value.valueReg(), address); return; default: MOZ_CRASH("Bad payload width"); } } void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes, JSValueType type) { switch (nbytes) { case 8: - if (type == JSVAL_TYPE_OBJECT) + if (type == JSVAL_TYPE_OBJECT) { unboxObjectOrNull(value, SecondScratchReg); - else + } else { unboxNonDouble(value, SecondScratchReg, type); + } storePtr(SecondScratchReg, address); return; case 4: store32(value.valueReg(), address); return; case 1: store8(value.valueReg(), address); return;
--- a/js/src/jit/mips64/MoveEmitter-mips64.cpp +++ b/js/src/jit/mips64/MoveEmitter-mips64.cpp @@ -133,19 +133,20 @@ MoveEmitterMIPS64::emitDoubleMove(const masm.moveDouble(from.floatReg(), to.floatReg()); } else if (to.isGeneralReg()) { masm.moveFromDouble(from.floatReg(), to.reg()); } else { MOZ_ASSERT(to.isMemory()); masm.storeDouble(from.floatReg(), getAdjustedAddress(to)); } } else if (to.isFloatReg()) { - if (from.isMemory()) + if (from.isMemory()) { masm.loadDouble(getAdjustedAddress(from), to.floatReg()); - else + } else { masm.moveToDouble(from.reg(), to.floatReg()); + } } else { MOZ_ASSERT(from.isMemory()); MOZ_ASSERT(to.isMemory()); masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg); masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to)); } }
--- a/js/src/jit/mips64/Simulator-mips64.cpp +++ b/js/src/jit/mips64/Simulator-mips64.cpp @@ -67,18 +67,19 @@ namespace js { namespace jit { static const Instr kCallRedirInstr = op_special | MAX_BREAK_CODE << FunctionBits | ff_break; // Utils functions. static uint32_t GetFCSRConditionBit(uint32_t cc) { - if (cc == 0) + if (cc == 0) { return 23; + } return 24 + cc; } // ----------------------------------------------------------------------------- // MIPS assembly various constants. class SimInstruction { @@ -556,21 +557,23 @@ mozilla::Atomic<size_t, mozilla::Release SimulatorProcess* SimulatorProcess::singleton_ = nullptr; int64_t Simulator::StopSimAt = -1; Simulator * Simulator::Create(JSContext* cx) { auto sim = MakeUnique<Simulator>(); - if (!sim) + if (!sim) { return nullptr; - - if (!sim->init()) + } + + if (!sim->init()) { return nullptr; + } int64_t stopAt; char* stopAtStr = getenv("MIPS_SIM_STOP_AT"); if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) { fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt); Simulator::StopSimAt = stopAt; } @@ -630,32 +633,35 @@ void MipsDebugger::stop(SimInstruction* instr) { // Get the stop code. uint32_t code = instr->bits(25, 6); // Retrieve the encoded address, which comes just after this stop. char* msg = *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize); // Update this stop description. - if (!sim_->watchedStops_[code].desc_) + if (!sim_->watchedStops_[code].desc_) { sim_->watchedStops_[code].desc_ = msg; + } // Print the stop message and code if it is not the default code. - if (code != kMaxStopCode) + if (code != kMaxStopCode) { printf("Simulator hit stop %u: %s\n", code, msg); - else + } else { printf("Simulator hit %s\n", msg); + } sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize); debug(); } int64_t MipsDebugger::getRegisterValue(int regnum) { - if (regnum == kPCRegister) + if (regnum == kPCRegister) { return sim_->get_pc(); + } return sim_->getRegister(regnum); } int64_t MipsDebugger::getFPURegisterValueLong(int regnum) { return sim_->getFpuRegister(regnum); } @@ -676,72 +682,78 @@ bool MipsDebugger::getValue(const char* desc, int64_t* value) { Register reg = Register::FromName(desc); if (reg != InvalidReg) { *value = getRegisterValue(reg.code()); return true; } - if (strncmp(desc, "0x", 2) == 0) + if (strncmp(desc, "0x", 2) == 0) { return sscanf(desc, "%" PRIu64, reinterpret_cast<uint64_t*>(value)) == 1; + } return sscanf(desc, "%" PRIi64, value) == 1; } bool MipsDebugger::setBreakpoint(SimInstruction* breakpc) { // Check if a breakpoint can be set. If not return without any side-effects. - if (sim_->break_pc_ != nullptr) + if (sim_->break_pc_ != nullptr) { return false; + } // Set the breakpoint. sim_->break_pc_ = breakpc; sim_->break_instr_ = breakpc->instructionBits(); // Not setting the breakpoint instruction in the code itself. It will be set // when the debugger shell continues. return true; } bool MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) { - if (sim_->break_pc_ != nullptr) + if (sim_->break_pc_ != nullptr) { sim_->break_pc_->setInstructionBits(sim_->break_instr_); + } sim_->break_pc_ = nullptr; sim_->break_instr_ = 0; return true; } void MipsDebugger::undoBreakpoints() { - if (sim_->break_pc_) + if (sim_->break_pc_) { sim_->break_pc_->setInstructionBits(sim_->break_instr_); + } } void MipsDebugger::redoBreakpoints() { - if (sim_->break_pc_) + if (sim_->break_pc_) { sim_->break_pc_->setInstructionBits(kBreakpointInstr); + } } void MipsDebugger::printAllRegs() { int64_t value; for (uint32_t i = 0; i < Registers::Total; i++) { value = getRegisterValue(i); printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i), value, value); - if (i % 2) + if (i % 2) { printf("\n"); + } } printf("\n"); value = getRegisterValue(Simulator::LO); printf(" LO: 0x%016" PRIx64 " %20" PRIi64 " ", value, value); value = getRegisterValue(Simulator::HI); printf(" HI: 0x%016" PRIx64 " %20" PRIi64 "\n", value, value); value = getRegisterValue(Simulator::pc); @@ -782,24 +794,26 @@ ReadLine(const char* prompt) if (len > 0 && lineBuf[len - 1] == '\n') { // Since we read a new line we are done reading the line. This // will exit the loop after copying this buffer into the result. keepGoing = false; } if (!result) { // Allocate the initial result and make room for the terminating '\0' result.reset(js_pod_malloc<char>(len + 1)); - if (!result) + if (!result) { return nullptr; + } } else { // Allocate a new result with enough room for the new addition. int new_len = offset + len + 1; char* new_result = js_pod_malloc<char>(new_len); - if (!new_result) + if (!new_result) { return nullptr; + } // Copy the existing input into the new array and set the new // array as the result. memcpy(new_result, result.get(), offset * sizeof(char)); result.reset(new_result); } // Copy the newly read line into the result. memcpy(result.get() + offset, lineBuf, len * sizeof(char)); offset += len; @@ -815,18 +829,19 @@ DisassembleInstruction(uint64_t pc) { uint8_t* bytes = reinterpret_cast<uint8_t*>(pc); char hexbytes[256]; sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2], bytes[3]); char llvmcmd[1024]; sprintf(llvmcmd, "bash -c \"echo -n '%p'; echo '%s' | " "llvm-mc -disassemble -arch=mips64el -mcpu=mips64r2 | " "grep -v pure_instructions | grep -v .text\"", static_cast<void*>(bytes), hexbytes); - if (system(llvmcmd)) + if (system(llvmcmd)) { printf("Cannot disassemble instruction.\n"); + } } void MipsDebugger::debug() { intptr_t lastPC = -1; bool done = false; @@ -991,18 +1006,19 @@ MipsDebugger::debug() } else if (strcmp(cmd, "gdb") == 0) { printf("relinquishing control to gdb\n"); asm("int $3"); printf("regaining control from gdb\n"); } else if (strcmp(cmd, "break") == 0) { if (argc == 2) { int64_t value; if (getValue(arg1, &value)) { - if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) + if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) { printf("setting breakpoint failed\n"); + } } else { printf("%s unrecognized\n", arg1); } } else { printf("break <address>\n"); } } else if (strcmp(cmd, "del") == 0) { if (!deleteBreakpoint(nullptr)) { @@ -1149,22 +1165,24 @@ Simulator::setLastDebuggerInput(char* in js_free(lastDebuggerInput_); lastDebuggerInput_ = input; } static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache, void* page) { SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page); - if (p) + if (p) { return p->value(); + } AutoEnterOOMUnsafeRegion oomUnsafe; CachePage* new_page = js_new<CachePage>(); - if (!new_page || !i_cache.add(p, page, new_page)) + if (!new_page || !i_cache.add(p, page, new_page)) { oomUnsafe.crash("Simulator CachePage"); + } return new_page; } // Flush from start up to and not including start + size. static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache, intptr_t start, int size) { MOZ_ASSERT(size <= CachePage::kPageSize); @@ -1190,18 +1208,19 @@ FlushICacheLocked(SimulatorProcess::ICac while (!AllOnOnePage(start, size - 1)) { int bytes_to_flush = CachePage::kPageSize - offset; FlushOnePageLocked(i_cache, start, bytes_to_flush); start += bytes_to_flush; size -= bytes_to_flush; MOZ_ASSERT((start & CachePage::kPageMask) == 0); offset = 0; } - if (size != 0) + if (size != 0) { FlushOnePageLocked(i_cache, start, size); + } } /* static */ void SimulatorProcess::checkICacheLocked(SimInstruction* instr) { intptr_t address = reinterpret_cast<intptr_t>(instr); void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask)); void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask)); @@ -1263,44 +1282,48 @@ Simulator::Simulator() break_pc_ = nullptr; break_instr_ = 0; single_stepping_ = false; single_step_callback_ = nullptr; single_step_callback_arg_ = nullptr; // Set up architecture state. // All registers are initialized to zero to start with. - for (int i = 0; i < Register::kNumSimuRegisters; i++) + for (int i = 0; i < Register::kNumSimuRegisters; i++) { registers_[i] = 0; - for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) + } + for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) { FPUregisters_[i] = 0; + } FCSR_ = 0; LLBit_ = false; LLAddr_ = 0; lastLLValue_ = 0; // The ra and pc are initialized to a known bad value that will cause an // access violation if the simulator ever tries to execute it. registers_[pc] = bad_ra; registers_[ra] = bad_ra; - for (int i = 0; i < kNumExceptions; i++) + for (int i = 0; i < kNumExceptions; i++) { exceptions[i] = 0; + } lastDebuggerInput_ = nullptr; } bool Simulator::init() { // Allocate 2MB for the stack. Note that we will only use 1MB, see below. static const size_t stackSize = 2 * 1024 * 1024; stack_ = js_pod_malloc<char>(stackSize); - if (!stack_) + if (!stack_) { return false; + } // Leave a safety margin of 1MB to prevent overrunning the stack when // pushing values (total stack size is 2MB). stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024; // The sp is initialized to point to the bottom (high address) of the // allocated stack area. To be safe in potential stack underflows we leave // some buffer below. @@ -1378,18 +1401,19 @@ Simulator::~Simulator() { js_free(stack_); } SimulatorProcess::SimulatorProcess() : cacheLock_(mutexid::SimulatorCacheLock) , redirection_(nullptr) { - if (getenv("MIPS_SIM_ICACHE_CHECKS")) + if (getenv("MIPS_SIM_ICACHE_CHECKS")) { ICacheCheckingDisableCount = 0; + } } SimulatorProcess::~SimulatorProcess() { Redirection* r = redirection_; while (r) { Redirection* next = r->next_; js_delete(r); @@ -1414,18 +1438,19 @@ Simulator::Current() } // Sets the register in the architecture state. It will also deal with updating // Simulator internal state for special registers such as PC. void Simulator::setRegister(int reg, int64_t value) { MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters)); - if (reg == pc) + if (reg == pc) { pc_modified_ = true; + } // Zero register always holds 0. registers_[reg] = (reg == 0) ? 0 : value; } void Simulator::setFpuRegister(int fpureg, int64_t value) { @@ -1462,18 +1487,19 @@ Simulator::setFpuRegisterDouble(int fpur } // Get the register from the architecture state. This function does handle // the special case of accessing the PC register. int64_t Simulator::getRegister(int reg) const { MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters)); - if (reg == 0) + if (reg == 0) { return 0; + } return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0); } int64_t Simulator::getFpuRegister(int fpureg) const { MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); return FPUregisters_[fpureg]; @@ -1531,20 +1557,21 @@ Simulator::setCallResult(__int128_t res) setRegister(v0, I64(res)); setRegister(v1, I64(res >> 64)); } // Helper functions for setting and testing the FCSR register's bits. void Simulator::setFCSRBit(uint32_t cc, bool value) { - if (value) + if (value) { FCSR_ |= (1 << cc); - else + } else { FCSR_ &= ~(1 << cc); + } } bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); } @@ -1628,40 +1655,45 @@ Simulator::registerState() // WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses // using a signal handler that redirects PC to a stub that safely reports an // error. However, if the handler is hit by the simulator, the PC is in C++ code // and cannot be redirected. Therefore, we must avoid hitting the handler by // redirecting in the simulator before the real handler would have been hit. bool Simulator::handleWasmFault(uint64_t addr, unsigned numBytes) { - if (!wasm::CodeExists) + if (!wasm::CodeExists) { return false; + } JSContext* cx = TlsContext.get(); - if (!cx->activation() || !cx->activation()->isJit()) + if (!cx->activation() || !cx->activation()->isJit()) { return false; + } JitActivation* act = cx->activation()->asJit(); void* pc = reinterpret_cast<void*>(get_pc()); uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp)); const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc); - if (!segment || !segment->isModule()) + if (!segment || !segment->isModule()) { return false; + } const wasm::ModuleSegment* moduleSegment = segment->asModule(); wasm::Instance* instance = wasm::LookupFaultingInstance(*moduleSegment, pc, fp); - if (!instance) + if (!instance) { return false; + } MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code()); - if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) + if (!instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) { return false; + } LLBit_ = false; wasm::Trap trap; wasm::BytecodeOffset bytecode; MOZ_ALWAYS_TRUE(moduleSegment->code().lookupTrap(pc, &trap, &bytecode)); MOZ_RELEASE_ASSERT(trap == wasm::Trap::OutOfBounds); @@ -1669,35 +1701,39 @@ Simulator::handleWasmFault(uint64_t addr act->startWasmTrap(wasm::Trap::OutOfBounds, bytecode.offset(), registerState()); set_pc(int64_t(moduleSegment->trapCode())); return true; } bool Simulator::handleWasmTrapFault() { - if (!wasm::CodeExists) + if (!wasm::CodeExists) { return false; + } JSContext* cx = TlsContext.get(); - if (!cx->activation() || !cx->activation()->isJit()) + if (!cx->activation() || !cx->activation()->isJit()) { return false; + } JitActivation* act = cx->activation()->asJit(); void* pc = reinterpret_cast<void*>(get_pc()); const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc); - if (!segment || !segment->isModule()) + if (!segment || !segment->isModule()) { return false; + } const wasm::ModuleSegment* moduleSegment = segment->asModule(); wasm::Trap trap; wasm::BytecodeOffset bytecode; - if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) + if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) { return false; + } act->startWasmTrap(trap, bytecode.offset(), registerState()); set_pc(int64_t(moduleSegment->trapCode())); return true; } // MIPS memory instructions (except lw(d)l/r , sw(d)l/r) trap on unaligned memory // access enabling the OS to handle them via trap-and-emulate. @@ -1707,239 +1743,255 @@ Simulator::handleWasmTrapFault() // We assume that that executing correct generated code will not produce unaligned // memory access, so we explicitly check for address alignment and trap. // Note that trapping does not occur when executing wasm code, which requires that // unaligned memory access provides correct result. uint8_t Simulator::readBU(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return 0xff; + } uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); return* ptr; } int8_t Simulator::readB(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return -1; + } int8_t* ptr = reinterpret_cast<int8_t*>(addr); return* ptr; } void Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return; + } uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); *ptr = value; } void Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 1)) + if (handleWasmFault(addr, 1)) { return; + } int8_t* ptr = reinterpret_cast<int8_t*>(addr); *ptr = value; } uint16_t Simulator::readHU(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return 0xffff; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); return *ptr; } printf("Unaligned unsigned halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } int16_t Simulator::readH(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return -1; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int16_t* ptr = reinterpret_cast<int16_t*>(addr); return *ptr; } printf("Unaligned signed halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned unsigned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } void Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 2)) + if (handleWasmFault(addr, 2)) { return; + } if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int16_t* ptr = reinterpret_cast<int16_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } uint32_t Simulator::readWU(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return -1; + } if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { uint32_t* ptr = reinterpret_cast<uint32_t*>(addr); return *ptr; } printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } int32_t Simulator::readW(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return -1; + } if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int32_t* ptr = reinterpret_cast<int32_t*>(addr); return *ptr; } printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return; + } if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { uint32_t* ptr = reinterpret_cast<uint32_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } void Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return; + } if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int32_t* ptr = reinterpret_cast<int32_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } int64_t Simulator::readDW(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return -1; + } if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); return *ptr; } printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return; + } if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { int64_t* ptr = reinterpret_cast<int64_t*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); } double Simulator::readD(uint64_t addr, SimInstruction* instr) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return NAN; + } if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { double* ptr = reinterpret_cast<double*>(addr); return *ptr; } printf("Unaligned (double) read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr, reinterpret_cast<intptr_t>(instr)); MOZ_CRASH(); return 0; } void Simulator::writeD(uint64_t addr, double value, SimInstruction* instr) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return; + } if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) { double* ptr = reinterpret_cast<double*>(addr); LLBit_ = false; *ptr = value; return; } printf("Unaligned (double) write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", @@ -1947,18 +1999,19 @@ Simulator::writeD(uint64_t addr, double MOZ_CRASH(); } int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) { if ((addr & 3) == 0) { - if (handleWasmFault(addr, 4)) + if (handleWasmFault(addr, 4)) { return -1; + } volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr); int32_t value = *ptr; lastLLValue_ = value; LLAddr_ = addr; // Note that any memory write or "external" interrupt should reset this value to false. LLBit_ = true; return value; @@ -1999,18 +2052,19 @@ Simulator::storeConditionalW(uint64_t ad return 0; } int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) { if ((addr & kPointerAlignmentMask) == 0) { - if (handleWasmFault(addr, 8)) + if (handleWasmFault(addr, 8)) { return -1; + } volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr); int64_t value = *ptr; lastLLValue_ = value; LLAddr_ = addr; // Note that any memory write or "external" interrupt should reset this value to false. LLBit_ = true; return value; @@ -2061,18 +2115,19 @@ uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; } bool Simulator::overRecursed(uintptr_t newsp) const { - if (newsp == 0) + if (newsp == 0) { newsp = getRegister(sp); + } return newsp <= stackLimit(); } bool Simulator::overRecursedWithExtra(uint32_t extra) const { uintptr_t newsp = getRegister(sp) - extra; return newsp <= stackLimit(); @@ -2154,18 +2209,19 @@ Simulator::softwareInterrupt(SimInstruct intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction()); bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0; if (!stack_aligned) { fprintf(stderr, "Runtime call with unaligned stack!\n"); MOZ_CRASH(); } - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } switch (redirection->type()) { case Args_General0: { Prototype_General0 target = reinterpret_cast<Prototype_General0>(external); int64_t result = target(); setCallResult(result); break; } @@ -2179,18 +2235,19 @@ Simulator::softwareInterrupt(SimInstruct Prototype_General2 target = reinterpret_cast<Prototype_General2>(external); int64_t result = target(arg0, arg1); setCallResult(result); break; } case Args_General3: { Prototype_General3 target = reinterpret_cast<Prototype_General3>(external); int64_t result = target(arg0, arg1, arg2); - if (external == intptr_t(&js::wasm::Instance::wake)) + if (external == intptr_t(&js::wasm::Instance::wake)) { result = int32_t(result); + } setCallResult(result); break; } case Args_General4: { Prototype_General4 target = reinterpret_cast<Prototype_General4>(external); int64_t result = target(arg0, arg1, arg2, arg3); setCallResult(result); break; @@ -2227,36 +2284,39 @@ Simulator::softwareInterrupt(SimInstruct double dresult = target(); setCallResultDouble(dresult); break; } case Args_Int_Double: { double dval0 = getFpuRegisterDouble(12); Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external); int64_t result = target(dval0); - if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) + if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) { result = int32_t(result); + } setRegister(v0, result); break; } case Args_Int_GeneralGeneralGeneralInt64: { Prototype_Int_GeneralGeneralGeneralInt64 target = reinterpret_cast<Prototype_Int_GeneralGeneralGeneralInt64>(external); int64_t result = target(arg0, arg1, arg2, arg3); - if (external == intptr_t(&js::wasm::Instance::wait_i32)) + if (external == intptr_t(&js::wasm::Instance::wait_i32)) { result = int32_t(result); + } setRegister(v0, result); break; } case Args_Int_GeneralGeneralInt64Int64: { Prototype_Int_GeneralGeneralInt64Int64 target = reinterpret_cast<Prototype_Int_GeneralGeneralInt64Int64>(external); int64_t result = target(arg0, arg1, arg2, arg3); - if (external == intptr_t(&js::wasm::Instance::wait_i64)) + if (external == intptr_t(&js::wasm::Instance::wait_i64)) { result = int32_t(result); + } setRegister(v0, result); break; } case Args_Int_DoubleIntInt: { double dval = getFpuRegisterDouble(12); Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external); int64_t result = target(dval, arg1, arg2); setRegister(v0, result); @@ -2355,18 +2415,19 @@ Simulator::softwareInterrupt(SimInstruct double dresult = target(dval0, dval1, dval2, dval3); setCallResultDouble(dresult); break; } default: MOZ_CRASH("call"); } - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } setRegister(ra, saved_ra); set_pc(getRegister(ra)); #endif } else if (func == ff_break && code <= kMaxStopCode) { if (isWatchpoint(code)) { printWatchpoint(code); } else { @@ -2376,18 +2437,19 @@ Simulator::softwareInterrupt(SimInstruct } else { switch (func) { case ff_tge: case ff_tgeu: case ff_tlt: case ff_tltu: case ff_teq: case ff_tne: - if (instr->bits(15, 6) == kWasmTrapCode && handleWasmTrapFault()) + if (instr->bits(15, 6) == kWasmTrapCode && handleWasmTrapFault()) { return; + } }; // All remaining break_ codes, and all traps are handled here. MipsDebugger dbg(this); dbg.debug(); } } // Stop helper functions. @@ -2434,25 +2496,27 @@ Simulator::isEnabledStop(uint32_t code) MOZ_ASSERT(code <= kMaxStopCode); MOZ_ASSERT(code > kMaxWatchpointCode); return !(watchedStops_[code].count_ & kStopDisabledBit); } void Simulator::enableStop(uint32_t code) { - if (!isEnabledStop(code)) + if (!isEnabledStop(code)) { watchedStops_[code].count_ &= ~kStopDisabledBit; + } } void Simulator::disableStop(uint32_t code) { - if (isEnabledStop(code)) + if (isEnabledStop(code)) { watchedStops_[code].count_ |= kStopDisabledBit; + } } void Simulator::increaseStopCounter(uint32_t code) { MOZ_ASSERT(code <= kMaxStopCode); if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) { printf("Stop counter for code %i has overflowed.\n" @@ -2488,18 +2552,19 @@ Simulator::printStopInfo(uint32_t code) } } } void Simulator::signalExceptions() { for (int i = 1; i < kNumExceptions; i++) { - if (exceptions[i] != 0) + if (exceptions[i] != 0) { MOZ_CRASH("Error: Exception raised."); + } } } // Helper function for decodeTypeRegister. void Simulator::configureTypeRegister(SimInstruction* instr, int64_t& alu_out, __int128& i128hilo, @@ -2677,42 +2742,46 @@ Simulator::configureTypeRegister(SimInst case ff_multu: u128hilo = U64(U32(I32_CHECK(rs))) * U64(U32(I32_CHECK(rt))); break; case ff_dmultu: u128hilo = U128(rs) * U128(rt); break; case ff_add: alu_out = I32_CHECK(rs) + I32_CHECK(rt); - if ((alu_out << 32) != (alu_out << 31)) + if ((alu_out << 32) != (alu_out << 31)) { exceptions[kIntegerOverflow] = 1; + } alu_out = I32(alu_out); break; case ff_dadd: temp = I128(rs) + I128(rt); - if ((temp << 64) != (temp << 63)) + if ((temp << 64) != (temp << 63)) { exceptions[kIntegerOverflow] = 1; + } alu_out = I64(temp); break; case ff_addu: alu_out = I32(I32_CHECK(rs) + I32_CHECK(rt)); break; case ff_daddu: alu_out = rs + rt; break; case ff_sub: alu_out = I32_CHECK(rs) - I32_CHECK(rt); - if ((alu_out << 32) != (alu_out << 31)) + if ((alu_out << 32) != (alu_out << 31)) { exceptions[kIntegerUnderflow] = 1; + } alu_out = I32(alu_out); break; case ff_dsub: temp = I128(rs) - I128(rt); - if ((temp << 64) != (temp << 63)) + if ((temp << 64) != (temp << 63)) { exceptions[kIntegerUnderflow] = 1; + } alu_out = I64(temp); break; case ff_subu: alu_out = I32(I32_CHECK(rs) - I32_CHECK(rt)); break; case ff_dsubu: alu_out = rs - rt; break; @@ -2819,33 +2888,35 @@ Simulator::configureTypeRegister(SimInst switch (instr->functionFieldRaw()) { case ff_ins: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of insert. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of insert. uint16_t lsb = sa; uint16_t size = msb - lsb + 1; uint32_t mask = (1 << size) - 1; - if (lsb > msb) + if (lsb > msb) { alu_out = Unpredictable; - else + } else { alu_out = (U32(I32_CHECK(rt)) & ~(mask << lsb)) | ((U32(I32_CHECK(rs)) & mask) << lsb); + } break; } case ff_dins: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of insert. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of insert. uint16_t lsb = sa; uint16_t size = msb - lsb + 1; uint64_t mask = (1ul << size) - 1; - if (lsb > msb) + if (lsb > msb) { alu_out = Unpredictable; - else + } else { alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb); + } break; } case ff_dinsm: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of insert. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of insert. uint16_t lsb = sa; uint16_t size = msb - lsb + 33; @@ -2855,33 +2926,35 @@ Simulator::configureTypeRegister(SimInst } case ff_dinsu: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of insert. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of insert. uint16_t lsb = sa + 32; uint16_t size = msb - lsb + 33; uint64_t mask = (1ul << size) - 1; - if (sa > msb) + if (sa > msb) { alu_out = Unpredictable; - else + } else { alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb); + } break; } case ff_ext: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of extract. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of extract. uint16_t lsb = sa; uint16_t size = msb + 1; uint32_t mask = (1 << size) - 1; - if ((lsb + msb) > 31) + if ((lsb + msb) > 31) { alu_out = Unpredictable; - else + } else { alu_out = (U32(I32_CHECK(rs)) & (mask << lsb)) >> lsb; + } break; } case ff_dext: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of extract. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of extract. uint16_t lsb = sa; uint16_t size = msb + 1; @@ -2891,40 +2964,43 @@ Simulator::configureTypeRegister(SimInst } case ff_dextm: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of extract. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of extract. uint16_t lsb = sa; uint16_t size = msb + 33; uint64_t mask = (1ul << size) - 1; - if ((lsb + msb + 32 + 1) > 64) + if ((lsb + msb + 32 + 1) > 64) { alu_out = Unpredictable; - else + } else { alu_out = (U64(rs) & (mask << lsb)) >> lsb; + } break; } case ff_dextu: { // Mips64r2 instruction. // Interpret rd field as 5-bit msb of extract. uint16_t msb = rd_reg; // Interpret sa field as 5-bit lsb of extract. uint16_t lsb = sa + 32; uint16_t size = msb + 1; uint64_t mask = (1ul << size) - 1; - if ((lsb + msb + 1) > 64) + if ((lsb + msb + 1) > 64) { alu_out = Unpredictable; - else + } else { alu_out = (U64(rs) & (mask << lsb)) >> lsb; + } break; } case ff_bshfl: { // Mips32r2 instruction. - if (16 == sa) // seb + if (16 == sa) { // seb alu_out = I64(I8(I32_CHECK(rt))); - else if (24 == sa) // seh + } else if (24 == sa) { // seh alu_out = I64(I16(I32_CHECK(rt))); + } break; } default: MOZ_CRASH(); }; break; default: MOZ_CRASH(); @@ -3080,80 +3156,88 @@ Simulator::decodeTypeRegister(SimInstruc float rounded = std::floor(fs_value + 0.5); int32_t result = I32(rounded); if ((result & 1) != 0 && result - fs_value == 0.5) { // If the number is halfway between two integers, // round to the even one. result--; } setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(fs_value, rounded)) + if (setFCSRRoundError<int32_t>(fs_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_trunc_w_fmt: { // Truncate float to word (round towards 0). float rounded = truncf(fs_value); int32_t result = I32(rounded); setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(fs_value, rounded)) + if (setFCSRRoundError<int32_t>(fs_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_floor_w_fmt: { // Round float to word towards negative infinity. float rounded = std::floor(fs_value); int32_t result = I32(rounded); setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(fs_value, rounded)) + if (setFCSRRoundError<int32_t>(fs_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_ceil_w_fmt: { // Round double to word towards positive infinity. float rounded = std::ceil(fs_value); int32_t result = I32(rounded); setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(fs_value, rounded)) + if (setFCSRRoundError<int32_t>(fs_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_cvt_l_fmt: // Mips64r2: Truncate float to 64-bit long-word. // Rounding modes are not yet supported. MOZ_ASSERT((FCSR_ & 3) == 0); // In rounding mode 0 it should behave like ROUND. MOZ_FALLTHROUGH; case ff_round_l_fmt: { // Mips64r2 instruction. float rounded = fs_value > 0 ? std::floor(fs_value + 0.5) : std::ceil(fs_value - 0.5); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(fs_value, rounded)) + if (setFCSRRoundError<int64_t>(fs_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_trunc_l_fmt: { // Mips64r2 instruction. float rounded = truncf(fs_value); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(fs_value, rounded)) + if (setFCSRRoundError<int64_t>(fs_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_floor_l_fmt: { // Mips64r2 instruction. float rounded = std::floor(fs_value); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(fs_value, rounded)) + if (setFCSRRoundError<int64_t>(fs_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_ceil_l_fmt: { // Mips64r2 instruction. float rounded = std::ceil(fs_value); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(fs_value, rounded)) + if (setFCSRRoundError<int64_t>(fs_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_cvt_ps_s: case ff_c_f_fmt: MOZ_CRASH(); break; case ff_movf_fmt: if (testFCSRBit(fcsr_cc)) { @@ -3238,83 +3322,91 @@ Simulator::decodeTypeRegister(SimInstruc double rounded = std::floor(ds_value + 0.5); int32_t result = I32(rounded); if ((result & 1) != 0 && result - ds_value == 0.5) { // If the number is halfway between two integers, // round to the even one. result--; } setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(ds_value, rounded)) + if (setFCSRRoundError<int32_t>(ds_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_trunc_w_fmt: { // Truncate double to word (round towards 0). double rounded = trunc(ds_value); int32_t result = I32(rounded); setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(ds_value, rounded)) + if (setFCSRRoundError<int32_t>(ds_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_floor_w_fmt: { // Round double to word towards negative infinity. double rounded = std::floor(ds_value); int32_t result = I32(rounded); setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(ds_value, rounded)) + if (setFCSRRoundError<int32_t>(ds_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_ceil_w_fmt: { // Round double to word towards positive infinity. double rounded = std::ceil(ds_value); int32_t result = I32(rounded); setFpuRegisterLo(fd_reg, result); - if (setFCSRRoundError<int32_t>(ds_value, rounded)) + if (setFCSRRoundError<int32_t>(ds_value, rounded)) { setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } break; } case ff_cvt_s_fmt: // Convert double to float (single). setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value)); break; case ff_cvt_l_fmt: // Mips64r2: Truncate double to 64-bit long-word. // Rounding modes are not yet supported. MOZ_ASSERT((FCSR_ & 3) == 0); // In rounding mode 0 it should behave like ROUND. MOZ_FALLTHROUGH; case ff_round_l_fmt: { // Mips64r2 instruction. double rounded = ds_value > 0 ? std::floor(ds_value + 0.5) : std::ceil(ds_value - 0.5); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(ds_value, rounded)) + if (setFCSRRoundError<int64_t>(ds_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_trunc_l_fmt: { // Mips64r2 instruction. double rounded = trunc(ds_value); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(ds_value, rounded)) + if (setFCSRRoundError<int64_t>(ds_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_floor_l_fmt: { // Mips64r2 instruction. double rounded = std::floor(ds_value); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(ds_value, rounded)) + if (setFCSRRoundError<int64_t>(ds_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_ceil_l_fmt: { // Mips64r2 instruction. double rounded = std::ceil(ds_value); i64 = I64(rounded); setFpuRegister(fd_reg, i64); - if (setFCSRRoundError<int64_t>(ds_value, rounded)) + if (setFCSRRoundError<int64_t>(ds_value, rounded)) { setFpuRegister(fd_reg, kFPUInvalidResult64); + } break; } case ff_c_f_fmt: MOZ_CRASH(); break; case ff_movz_fmt: if (rt == 0) { setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg)); @@ -3456,34 +3548,38 @@ Simulator::decodeTypeRegister(SimInstruc case ff_teq: case ff_tne: if (do_interrupt) { softwareInterrupt(instr); } break; // Conditional moves. case ff_movn: - if (rt) + if (rt) { setRegister(rd_reg, rs); + } break; case ff_movci: { uint32_t cc = instr->fbccValue(); uint32_t fcsr_cc = GetFCSRConditionBit(cc); if (instr->bit(16)) { // Read Tf bit. - if (testFCSRBit(fcsr_cc)) + if (testFCSRBit(fcsr_cc)) { setRegister(rd_reg, rs); + } } else { - if (!testFCSRBit(fcsr_cc)) + if (!testFCSRBit(fcsr_cc)) { setRegister(rd_reg, rs); + } } break; } case ff_movz: - if (!rt) + if (!rt) { setRegister(rd_reg, rs); + } break; default: // For other special opcodes we do the default operation. setRegister(rd_reg, alu_out); }; break; case op_special2: switch (instr->functionFieldRaw()) { case ff_mul: @@ -3573,20 +3669,21 @@ Simulator::decodeTypeImmediate(SimInstru switch (instr->rsFieldRaw()) { case rs_bc1: // Branch on coprocessor condition. cc = instr->fbccValue(); fcsr_cc = GetFCSRConditionBit(cc); cc_value = testFCSRBit(fcsr_cc); do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value; execute_branch_delay_instruction = true; // Set next_pc. - if (do_branch) + if (do_branch) { next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize; - else + } else { next_pc = current_pc + kBranchReturnOffset; + } break; default: MOZ_CRASH(); }; break; // ------------- op_regimm class. case op_regimm: switch (instr->rtFieldRaw()) { @@ -3610,18 +3707,19 @@ Simulator::decodeTypeImmediate(SimInstru case rt_bltzal: case rt_bgez: case rt_bgezal: // Branch instructions common part. execute_branch_delay_instruction = true; // Set next_pc. if (do_branch) { next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize; - if (instr->isLinkingInstruction()) + if (instr->isLinkingInstruction()) { setRegister(31, current_pc + kBranchReturnOffset); + } } else { next_pc = current_pc + kBranchReturnOffset; } break; default: break; }; break; // case op_regimm. @@ -3638,24 +3736,26 @@ Simulator::decodeTypeImmediate(SimInstru do_branch = rs <= 0; break; case op_bgtz: do_branch = rs > 0; break; // ------------- Arithmetic instructions. case op_addi: alu_out = I32_CHECK(rs) + se_imm16; - if ((alu_out << 32) != (alu_out << 31)) + if ((alu_out << 32) != (alu_out << 31)) { exceptions[kIntegerOverflow] = 1; + } alu_out = I32_CHECK(alu_out); break; case op_daddi: temp = alu_out = rs + se_imm16; - if ((temp << 64) != (temp << 63)) + if ((temp << 64) != (temp << 63)) { exceptions[kIntegerOverflow] = 1; + } alu_out = I64(temp); break; case op_addiu: alu_out = I32(I32_CHECK(rs) + se_imm16); break; case op_daddiu: alu_out = rs + se_imm16; break; @@ -3929,18 +4029,19 @@ Simulator::decodeTypeImmediate(SimInstru // We don't check for end_sim_pc. First it should not be met as the current // pc is valid. Secondly a jump should always execute its branch delay slot. SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize); branchDelayInstructionDecode(branch_delay_instr); } // If needed update pc after the branch delay execution. - if (next_pc != bad_ra) + if (next_pc != bad_ra) { set_pc(next_pc); + } } // Type 3: instructions using a 26 bits immediate. (e.g. j, jal). void Simulator::decodeTypeJump(SimInstruction* instr) { // Get current pc. int64_t current_pc = get_pc(); @@ -3953,18 +4054,19 @@ Simulator::decodeTypeJump(SimInstruction // We don't check for end_sim_pc. First it should not be met as the current pc // is valid. Secondly a jump should always execute its branch delay slot. SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize); branchDelayInstructionDecode(branch_delay_instr); // Update pc and ra if necessary. // Do this after the branch delay execution. - if (instr->isLinkingInstruction()) + if (instr->isLinkingInstruction()) { setRegister(31, current_pc + 2 * SimInstruction::kInstrSize); + } set_pc(next_pc); pc_modified_ = true; } // Executes the current instruction. void Simulator::instructionDecode(SimInstruction* instr) { @@ -3982,18 +4084,19 @@ Simulator::instructionDecode(SimInstruct decodeTypeImmediate(instr); break; case SimInstruction::kJumpType: decodeTypeJump(instr); break; default: UNSUPPORTED(); } - if (!pc_modified_) + if (!pc_modified_) { setRegister(pc, reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize); + } } void Simulator::branchDelayInstructionDecode(SimInstruction* instr) { if (instr->instructionBits() == NopInst) { // Short-cut generic nop instructions. They are always valid and they // never change the simulator state. @@ -4013,51 +4116,55 @@ Simulator::enable_single_stepping(Single single_step_callback_ = cb; single_step_callback_arg_ = arg; single_step_callback_(single_step_callback_arg_, this, (void*)get_pc()); } void Simulator::disable_single_stepping() { - if (!single_stepping_) + if (!single_stepping_) { return; + } single_step_callback_(single_step_callback_arg_, this, (void*)get_pc()); single_stepping_ = false; single_step_callback_ = nullptr; single_step_callback_arg_ = nullptr; } template<bool enableStopSimAt> void Simulator::execute() { - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } // Get the PC to simulate. Cannot use the accessor here as we need the // raw PC value and not the one used as input to arithmetic instructions. int64_t program_counter = get_pc(); while (program_counter != end_sim_pc) { if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) { MipsDebugger dbg(this); dbg.debug(); } else { - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, (void*)program_counter); + } SimInstruction* instr = reinterpret_cast<SimInstruction *>(program_counter); instructionDecode(instr); icount_++; } program_counter = get_pc(); } - if (single_stepping_) + if (single_stepping_) { single_step_callback_(single_step_callback_arg_, this, nullptr); + } } void Simulator::callInternal(uint8_t* entry) { // Prepare to execute the code at entry. setRegister(pc, reinterpret_cast<int64_t>(entry)); // Put down marker for end of simulation. The simulator will stop simulation @@ -4090,20 +4197,21 @@ Simulator::callInternal(uint8_t* entry) setRegister(s4, callee_saved_value); setRegister(s5, callee_saved_value); setRegister(s6, callee_saved_value); setRegister(s7, callee_saved_value); setRegister(gp, callee_saved_value); setRegister(fp, callee_saved_value); // Start the simulation. - if (Simulator::StopSimAt != -1) + if (Simulator::StopSimAt != -1) { execute<true>(); - else + } else { execute<false>(); + } // Check that the callee-saved registers have been preserved. MOZ_ASSERT(callee_saved_value == getRegister(s0)); MOZ_ASSERT(callee_saved_value == getRegister(s1)); MOZ_ASSERT(callee_saved_value == getRegister(s2)); MOZ_ASSERT(callee_saved_value == getRegister(s3)); MOZ_ASSERT(callee_saved_value == getRegister(s4)); MOZ_ASSERT(callee_saved_value == getRegister(s5)); @@ -4130,32 +4238,34 @@ int64_t Simulator::call(uint8_t* entry, int argument_count, ...) { va_list parameters; va_start(parameters, argument_count); int64_t original_stack = getRegister(sp); // Compute position of stack on entry to generated code. int64_t entry_stack = original_stack; - if (argument_count > kCArgSlotCount) + if (argument_count > kCArgSlotCount) { entry_stack = entry_stack - argument_count * sizeof(int64_t); - else + } else { entry_stack = entry_stack - kCArgsSlotsSize; + } entry_stack &= ~U64(ABIStackAlignment - 1); intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); // Setup the arguments. for (int i = 0; i < argument_count; i++) { js::jit::Register argReg; - if (GetIntArgReg(i, &argReg)) + if (GetIntArgReg(i, &argReg)) { setRegister(argReg.code(), va_arg(parameters, int64_t)); - else + } else { stack_argument[i] = va_arg(parameters, int64_t); + } } va_end(parameters); setRegister(sp, entry_stack); callInternal(entry); // Pop stack passed arguments.
--- a/js/src/jit/mips64/Trampoline-mips64.cpp +++ b/js/src/jit/mips64/Trampoline-mips64.cpp @@ -654,18 +654,19 @@ JitRuntime::generateVMWrapper(JSContext* static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0, "Wrapper register set should be a superset of Volatile register set."); // The context is the first argument; a0 is the first argument register. Register cxreg = a0; regs.take(cxreg); // If it isn't a tail call, then the return address needs to be saved - if (f.expectTailCall == NonTailCall) + if (f.expectTailCall == NonTailCall) { masm.pushReturnAddress(); + } // We're aligned to an exit frame, so link it up. masm.loadJSContext(cxreg); masm.enterExitFrame(cxreg, regs.getAny(), &f); // Save the base of the argument set stored on the stack. Register argsBase = InvalidReg; if (f.explicitArgs) { @@ -709,54 +710,58 @@ JitRuntime::generateVMWrapper(JSContext* masm.movePtr(StackPointer, outReg); break; default: MOZ_ASSERT(f.outParam == Type_Void); break; } - if (!generateTLEnterVM(masm, f)) + if (!generateTLEnterVM(masm, f)) { return false; + } masm.setupUnalignedABICall(regs.getAny()); masm.passABIArg(cxreg); size_t argDisp = 0; // Copy any arguments. for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) { switch (f.argProperties(explicitArg)) { case VMFunction::WordByValue: - if (f.argPassedInFloatReg(explicitArg)) + if (f.argPassedInFloatReg(explicitArg)) { masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE); - else + } else { masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL); + } argDisp += sizeof(void*); break; case VMFunction::WordByRef: masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), MoveOp::GENERAL); argDisp += sizeof(void*); break; case VMFunction::DoubleByValue: case VMFunction::DoubleByRef: MOZ_CRASH("NYI: MIPS64 callVM should not be used with 128bits values."); break; } } // Copy the implicit outparam, if any. - if (InvalidReg != outReg) + if (InvalidReg != outReg) { masm.passABIArg(outReg); + } masm.callWithABI(f.wrapped, MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame); - if (!generateTLExitVM(masm, f)) + if (!generateTLExitVM(masm, f)) { return false; + } // Test for failure. switch (f.failType()) { case Type_Object: masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel()); break; case Type_Bool: // Called functions return bools, which are 0/false and non-zero/true