author | Sean Stangl <sstangl@mozilla.com> |
Tue, 14 Jul 2015 12:20:02 -0700 | |
changeset 256251 | d18e3a6d5149099a25c54f51a8b3972b395dee6b |
parent 256250 | a220d2e6b9047a8c004b4a93c656ec862d4ee0f3 |
child 256252 | 1d7026c52dbba3c0c89df3a81e3059052a128682 |
push id | 29171 |
push user | cbook@mozilla.com |
push date | Wed, 05 Aug 2015 11:13:29 +0000 |
treeherder | mozilla-central@b12a261ee32e [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | h4writer |
bugs | 1183842 |
milestone | 42.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/jit/arm64/MacroAssembler-arm64.h +++ b/js/src/jit/arm64/MacroAssembler-arm64.h @@ -1552,49 +1552,46 @@ class MacroAssemblerCompat : public vixl void retn(Imm32 n) { // ip0 <- [sp]; sp += n; ret ip0 Ldr(vixl::ip0, MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex)); syncStackPtr(); // SP is always used to transmit the stack between calls. Ret(vixl::ip0); } - void j(Condition code, Label* dest) { - b(dest, code); - } - void j(Label* dest) { - b(dest, Always); + void j(Condition cond, Label* dest) { + B(dest, cond); } void branch(Condition cond, Label* label) { - b(label, cond); + B(label, cond); } void branch(JitCode* target) { syncStackPtr(); addPendingJump(nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); b(-1); // The jump target will be patched by executableCopy(). } void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) { // since rhs is an operand, do the compare backwards Cmp(ARMRegister(rhs, 32), lhs); - b(label, Assembler::InvertCmpCondition(cond)); + B(label, Assembler::InvertCmpCondition(cond)); } void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) { ARMRegister l = lhs.reg(); Cmp(l, Operand(rhs.value)); - b(label, cond); + B(label, cond); } void branch32(Condition cond, Register lhs, Register rhs, Label* label) { cmp32(lhs, rhs); - b(label, cond); + B(label, cond); } void branch32(Condition cond, Register lhs, Imm32 imm, Label* label) { cmp32(lhs, imm); - b(label, cond); + B(label, cond); } void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) { vixl::UseScratchRegisterScope temps(this); const Register scratch = temps.AcquireX().asUnsized(); MOZ_ASSERT(scratch != lhs.base); MOZ_ASSERT(scratch != rhs); load32(lhs, scratch); branch32(cond, scratch, rhs, label); @@ -1671,17 +1668,17 @@ class MacroAssemblerCompat : public vixl vixl::UseScratchRegisterScope temps(this); const ARMRegister scratch64 = temps.AcquireX(); load_bo = immPool64(scratch64, (uint64_t)label, &pe); } MOZ_ASSERT(!label->bound()); if (cond != Always) { Label notTaken; - b(¬Taken, Assembler::InvertCondition(cond)); + B(¬Taken, Assembler::InvertCondition(cond)); branch_bo = b(-1); bind(¬Taken); } else { nop(); branch_bo = b(-1); } label->use(branch_bo.getOffset()); return CodeOffsetJump(load_bo.getOffset(), pe.index()); @@ -2531,17 +2528,17 @@ class MacroAssemblerCompat : public vixl branch(Zero, label); branch(Overflow, label); } else { // truthy values are non-zero and not nan. // If it is overflow Label onFalse; branch(Zero, &onFalse); branch(Overflow, &onFalse); - b(label); + B(label); bind(&onFalse); } } Condition testBooleanTruthy(bool truthy, const ValueOperand& operand) { ARMRegister payload32(operand.valueReg(), 32); Tst(payload32, payload32); return truthy ? NonZero : Zero;
--- a/js/src/jit/arm64/vixl/Instructions-vixl.cpp +++ b/js/src/jit/arm64/vixl/Instructions-vixl.cpp @@ -206,16 +206,39 @@ LSDataSize CalcLSPairDataSize(LoadStoreP case LDP_x: case STP_d: case LDP_d: return LSDoubleWord; default: return LSWord; } } +int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) { + switch (branch_type) { + case UncondBranchType: + return ImmUncondBranch_width; + case CondBranchType: + return ImmCondBranch_width; + case CompareBranchType: + return ImmCmpBranch_width; + case TestBranchType: + return ImmTestBranch_width; + default: + VIXL_UNREACHABLE(); + return 0; + } +} + + +bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, + int32_t offset) { + return is_intn(ImmBranchRangeBitwidth(branch_type), offset); +} + + const Instruction* Instruction::ImmPCOffsetTarget() const { const Instruction * base = this; ptrdiff_t offset; if (IsPCRelAddressing()) { // ADR and ADRP. offset = ImmPCRel(); if (Mask(PCRelAddressingMask) == ADRP) { base = AlignDown(base, kPageSize);
--- a/js/src/jit/arm64/vixl/Instructions-vixl.h +++ b/js/src/jit/arm64/vixl/Instructions-vixl.h @@ -266,16 +266,19 @@ class Instruction { ptrdiff_t ImmPCRawOffset() const; void SetBits32(int msb, int lsb, unsigned value); #define DEFINE_SETTERS(Name, HighBit, LowBit, Func) \ inline void Set##Name(unsigned n) { SetBits32(HighBit, LowBit, n); } INSTRUCTION_FIELDS_LIST(DEFINE_SETTERS) #undef DEFINE_SETTERS + static int ImmBranchRangeBitwidth(ImmBranchType branch_type); + static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset); + // Indicate whether Rd can be the stack pointer or the zero register. This // does not check that the instruction actually has an Rd field. Reg31Mode RdMode() const { // The following instructions use sp or wsp as Rd: // Add/sub (immediate) when not setting the flags. // Add/sub (extended) when not setting the flags. // Logical (immediate) when not setting the flags. // Otherwise, r31 is the zero register.
--- a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp +++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp @@ -58,16 +58,35 @@ void MacroAssembler::B(Label* label, Bra break; default: VIXL_UNREACHABLE(); } } } +void MacroAssembler::B(Label* label, Condition cond) { + VIXL_ASSERT((cond != al) && (cond != nv)); + + if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) { + // If the label is out of range, invert the condition to use an + // unconditional branch, which has 26 bits instead of 19. + Label done; + b(&done, InvertCondition(cond)); + b(label); + bind(&done); + } else { + // TODO: Need to register a slot in a literal pool, so that we can + // write a branch instruction there and use that to branch in case + // the unbound label winds up being out of range. + b(label, cond); + } +} + + void MacroAssembler::And(const Register& rd, const Register& rn, const Operand& operand) { LogicalMacro(rd, rn, operand, AND); } void MacroAssembler::Ands(const Register& rd, const Register& rn, const Operand& operand) { LogicalMacro(rd, rn, operand, ANDS); }
--- a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h +++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h @@ -323,20 +323,17 @@ class MacroAssembler : public js::jit::A return static_cast<BranchType>(type ^ 1); } void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1); void B(Label* label) { b(label); } - void B(Label* label, Condition cond) { - VIXL_ASSERT((cond != al) && (cond != nv)); - b(label, cond); - } + void B(Label* label, Condition cond); void B(Condition cond, Label* label) { B(label, cond); } void Bfi(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { VIXL_ASSERT(!rd.IsZero()); VIXL_ASSERT(!rn.IsZero()); bfi(rd, rn, lsb, width); } @@ -1071,16 +1068,20 @@ class MacroAssembler : public js::jit::A const CPURegister& dst2, const CPURegister& dst3); // Perform necessary maintenance operations before a push or pop. // // Note that size is per register, and is specified in bytes. void PrepareForPush(int count, int size); void PrepareForPop(int count, int size); + bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) { + return !Instruction::IsValidImmPCOffset(branch_type, nextOffset().diffB<int32_t>(label)); + } + #if DEBUG // Tell whether any of the macro instruction can be used. When false the // MacroAssembler will assert if a method which can emit a variable number // of instructions is called. #endif // The register to use as a stack pointer for stack operations. Register sp_;