Bug 1279248 - Part 7: Implement the 64bit variant of Add on x86, r=lth
authorHannes Verschore <hv1989@gmail.com>
Fri, 29 Jul 2016 16:51:41 +0200
changeset 347328 e3e8bb6b8d812c9ec3f2003fe1cf0c7e2af8d3f2
parent 347327 02f604c9ad7330732c13792141aa24dc5f0c4d92
child 347329 5512359e559fa64dc8bfb0245766d9ebdd2540fc
push id6389
push userraliiev@mozilla.com
push dateMon, 19 Sep 2016 13:38:22 +0000
treeherdermozilla-beta@01d67bfe6c81 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1279248
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1279248 - Part 7: Implement the 64bit variant of Add on x86, r=lth
js/src/jit/MacroAssembler.h
js/src/jit/shared/LIR-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/CodeGenerator-x64.h
js/src/jit/x64/MacroAssembler-x64-inl.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.h
js/src/jit/x86/BaseAssembler-x86.h
js/src/jit/x86/MacroAssembler-x86-inl.h
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -748,16 +748,18 @@ class MacroAssembler : public MacroAssem
     inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
     inline void addPtr(ImmPtr imm, Register dest);
     inline void addPtr(Imm32 imm, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
     inline void addPtr(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86, x64);
     inline void addPtr(const Address& src, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
 
     inline void add64(Register64 src, Register64 dest) PER_ARCH;
     inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
+    inline void add64(Imm64 imm, Register64 dest) DEFINED_ON(x86, x64);
+    inline void add64(const Operand& src, Register64 dest) DEFINED_ON(x64);
 
     inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
 
     inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
     inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
 
     inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
     inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -3767,16 +3767,19 @@ class LAddI : public LBinaryMath<0>
         return mir_->toAdd();
     }
 };
 
 class LAddI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
 {
   public:
     LIR_HEADER(AddI64)
+
+    static const size_t Lhs = 0;
+    static const size_t Rhs = INT64_PIECES;
 };
 
 // Subtracts two integers, returning an integer value.
 class LSubI : public LBinaryMath<0>
 {
     bool recoversInput_;
 
   public:
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -282,30 +282,16 @@ CodeGeneratorX64::visitRotate64(LRotate6
         if (mir->isLeftRotate())
             masm.rolq_cl(input);
         else
             masm.rorq_cl(input);
     }
 }
 
 void
-CodeGeneratorX64::visitAddI64(LAddI64* lir)
-{
-    Register lhs = ToRegister(lir->getOperand(0));
-    const LAllocation* rhs = lir->getOperand(1);
-
-    MOZ_ASSERT(ToRegister(lir->getDef(0)) == lhs);
-
-    if (rhs->isConstant())
-        masm.addPtr(ImmWord(ToInt64(rhs)), lhs);
-    else
-        masm.addq(ToOperand(rhs), lhs);
-}
-
-void
 CodeGeneratorX64::visitSubI64(LSubI64* lir)
 {
     Register lhs = ToRegister(lir->getOperand(0));
     const LAllocation* rhs = lir->getOperand(1);
 
     MOZ_ASSERT(ToRegister(lir->getDef(0)) == lhs);
 
     if (rhs->isConstant())
--- a/js/src/jit/x64/CodeGenerator-x64.h
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -48,17 +48,16 @@ class CodeGeneratorX64 : public CodeGene
     void visitUnbox(LUnbox* unbox);
     void visitCompareB(LCompareB* lir);
     void visitCompareBAndBranch(LCompareBAndBranch* lir);
     void visitCompareBitwise(LCompareBitwise* lir);
     void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
     void visitCompareI64(LCompareI64* lir);
     void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
     void visitRotate64(LRotate64* lir);
-    void visitAddI64(LAddI64* lir);
     void visitSubI64(LSubI64* lir);
     void visitMulI64(LMulI64* lir);
     void visitDivOrModI64(LDivOrModI64* lir);
     void visitUDivOrMod64(LUDivOrMod64* lir);
     void visitNotI64(LNotI64* lir);
     void visitClzI64(LClzI64* lir);
     void visitCtzI64(LCtzI64* lir);
     void visitPopcntI64(LPopcntI64* lir);
--- a/js/src/jit/x64/MacroAssembler-x64-inl.h
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -173,28 +173,40 @@ MacroAssembler::addPtr(Imm32 imm, const 
 
 void
 MacroAssembler::addPtr(const Address& src, Register dest)
 {
     addq(Operand(src), dest);
 }
 
 void
+MacroAssembler::add64(const Operand& src, Register64 dest)
+{
+    addq(src, dest.reg);
+}
+
+void
 MacroAssembler::add64(Register64 src, Register64 dest)
 {
     addq(src.reg, dest.reg);
 }
 
 void
 MacroAssembler::add64(Imm32 imm, Register64 dest)
 {
     addq(imm, dest.reg);
 }
 
 void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+    addPtr(ImmWord(imm.value), dest.reg);
+}
+
+void
 MacroAssembler::subPtr(Register src, Register dest)
 {
     subq(src, dest);
 }
 
 void
 MacroAssembler::subPtr(Register src, const Address& dest)
 {
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -890,16 +890,32 @@ CodeGeneratorX86Shared::visitAddI(LAddI*
             masm.j(Assembler::Overflow, ool->entry());
         } else {
             bailoutIf(Assembler::Overflow, ins->snapshot());
         }
     }
 }
 
 void
+CodeGeneratorX86Shared::visitAddI64(LAddI64* lir)
+{
+    const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+    const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+    if (IsConstant(rhs)) {
+        masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+        return;
+    }
+
+    masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
 CodeGeneratorX86Shared::visitSubI(LSubI* ins)
 {
     if (ins->rhs()->isConstant())
         masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
     else
         masm.subl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
 
     if (ins->snapshot()) {
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -233,16 +233,17 @@ class CodeGeneratorX86Shared : public Co
     virtual void visitAbsF(LAbsF* ins);
     virtual void visitClzI(LClzI* ins);
     virtual void visitCtzI(LCtzI* ins);
     virtual void visitPopcntI(LPopcntI* ins);
     virtual void visitSqrtD(LSqrtD* ins);
     virtual void visitSqrtF(LSqrtF* ins);
     virtual void visitPowHalfD(LPowHalfD* ins);
     virtual void visitAddI(LAddI* ins);
+    virtual void visitAddI64(LAddI64* ins);
     virtual void visitSubI(LSubI* ins);
     virtual void visitMulI(LMulI* ins);
     virtual void visitDivI(LDivI* ins);
     virtual void visitDivPowTwoI(LDivPowTwoI* ins);
     virtual void visitDivOrModConstantI(LDivOrModConstantI* ins);
     virtual void visitModI(LModI* ins);
     virtual void visitModPowTwoI(LModPowTwoI* ins);
     virtual void visitBitNotI(LBitNotI* ins);
--- a/js/src/jit/x86/BaseAssembler-x86.h
+++ b/js/src/jit/x86/BaseAssembler-x86.h
@@ -18,19 +18,23 @@ class BaseAssemblerX86 : public BaseAsse
 {
   public:
 
     // Arithmetic operations:
 
     void adcl_ir(int32_t imm, RegisterID dst)
     {
         spew("adcl       $%d, %s", imm, GPReg32Name(dst));
-        MOZ_ASSERT(CAN_SIGN_EXTEND_8_32(imm));
-        m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADC);
-        m_formatter.immediate8s(imm);
+        if (CAN_SIGN_EXTEND_8_32(imm)) {
+            m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADC);
+            m_formatter.immediate8s(imm);
+        } else {
+            m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADC);
+            m_formatter.immediate32(imm);
+        }
     }
 
     void adcl_im(int32_t imm, const void* addr)
     {
         spew("adcl       %d, %p", imm, addr);
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADC);
             m_formatter.immediate8s(imm);
--- a/js/src/jit/x86/MacroAssembler-x86-inl.h
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -166,16 +166,27 @@ MacroAssembler::add64(Register64 src, Re
 void
 MacroAssembler::add64(Imm32 imm, Register64 dest)
 {
     addl(imm, dest.low);
     adcl(Imm32(0), dest.high);
 }
 
 void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+    if (imm.low().value == 0) {
+        addl(imm.hi(), dest.high);
+        return;
+    }
+    addl(imm.low(), dest.low);
+    adcl(imm.hi(), dest.high);
+}
+
+void
 MacroAssembler::addConstantDouble(double d, FloatRegister dest)
 {
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
     masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
     propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
 }