Bug 1279248 - Part 4: Implement the 64bit variant of Compare on x86, r=nbp
authorHannes Verschore <hv1989@gmail.com>
Fri, 29 Jul 2016 16:51:41 +0200
changeset 332359 8bc3e8bbbf524ea9888df6c341db15c2df62e555
parent 332358 c65db91b411f3bad4a0675c457c6a52e90fb630b
child 332360 27826b22e1406ba39e5bc6f11546f411b7ad7324
push id9858
push userjlund@mozilla.com
push dateMon, 01 Aug 2016 14:37:10 +0000
treeherdermozilla-aurora@203106ef6cb6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs1279248
milestone50.0a1
Bug 1279248 - Part 4: Implement the 64bit variant of Compare on x86, r=nbp
js/src/jit/Lowering.cpp
js/src/jit/MacroAssembler.h
js/src/jit/shared/LIR-shared.h
js/src/jit/shared/LOpcodes-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/CodeGenerator-x64.h
js/src/jit/x64/MacroAssembler-x64-inl.h
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/CodeGenerator-x86.h
js/src/jit/x86/MacroAssembler-x86-inl.h
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -847,20 +847,20 @@ LIRGenerator::visitTest(MTest* test)
             return;
         }
 
         // Compare and branch Int64.
         if (comp->compareType() == MCompare::Compare_Int64 ||
             comp->compareType() == MCompare::Compare_UInt64)
         {
             JSOp op = ReorderComparison(comp->jsop(), &left, &right);
-            LCompare64AndBranch* lir = new(alloc()) LCompare64AndBranch(comp, op,
-                                                                        useInt64Register(left),
-                                                                        useInt64OrConstant(right),
-                                                                        ifTrue, ifFalse);
+            LCompareI64AndBranch* lir = new(alloc()) LCompareI64AndBranch(comp, op,
+                                                                          useInt64Register(left),
+                                                                          useInt64OrConstant(right),
+                                                                          ifTrue, ifFalse);
             add(lir, test);
             return;
         }
 
         // Compare and branch doubles.
         if (comp->isDoubleComparison()) {
             LAllocation lhs = useRegister(left);
             LAllocation rhs = useRegister(right);
@@ -1093,17 +1093,17 @@ LIRGenerator::visitCompare(MCompare* com
         return;
     }
 
     // Compare Int64.
     if (comp->compareType() == MCompare::Compare_Int64 ||
         comp->compareType() == MCompare::Compare_UInt64)
     {
         JSOp op = ReorderComparison(comp->jsop(), &left, &right);
-        define(new(alloc()) LCompare64(op, useInt64Register(left), useInt64OrConstant(right)),
+        define(new(alloc()) LCompareI64(op, useInt64Register(left), useInt64OrConstant(right)),
                comp);
         return;
     }
 
     // Compare doubles.
     if (comp->isDoubleComparison()) {
         define(new(alloc()) LCompareD(useRegister(left), useRegister(right)), comp);
         return;
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -905,16 +905,23 @@ class MacroAssembler : public MacroAssem
     inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
 
     inline void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) DEFINED_ON(x86_shared);
     inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) DEFINED_ON(x86_shared);
 
     inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
         DEFINED_ON(arm, arm64, mips_shared, x86, x64);
 
+    // The supported condition are Equal, NotEqual, LessThan(orEqual), GreaterThan(orEqual),
+    // Below(orEqual) and Above(orEqual).
+    inline void branch64(Condition cond, Register64 lhs, Imm64 val, Label* label)
+        DEFINED_ON(x86, x64);
+    inline void branch64(Condition cond, Register64 lhs, Register64 rhs, Label* label)
+        DEFINED_ON(x86, x64);
+    // Only NotEqual conditions are allowed for the branch64 variants with Address as lhs.
     inline void branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) PER_ARCH;
 
     // Compare the value at |lhs| with the value at |rhs|.  The scratch
     // register *must not* be the base of |lhs| or |rhs|.
     inline void branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
                          Label* label) PER_ARCH;
 
     inline void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) PER_SHARED_ARCH;
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -2501,27 +2501,27 @@ class LCompare : public LInstructionHelp
     MCompare* mir() {
         return mir_->toCompare();
     }
     const char* extraName() const {
         return CodeName[jsop_];
     }
 };
 
-class LCompare64 : public LInstructionHelper<1, 2 * INT64_PIECES, 0>
+class LCompareI64 : public LInstructionHelper<1, 2 * INT64_PIECES, 0>
 {
     JSOp jsop_;
 
   public:
-    LIR_HEADER(Compare64)
+    LIR_HEADER(CompareI64)
 
     static const size_t Lhs = 0;
     static const size_t Rhs = INT64_PIECES;
 
-    LCompare64(JSOp jsop, const LInt64Allocation& left, const LInt64Allocation& right)
+    LCompareI64(JSOp jsop, const LInt64Allocation& left, const LInt64Allocation& right)
       : jsop_(jsop)
     {
         setInt64Operand(Lhs, left);
         setInt64Operand(Rhs, right);
     }
 
     JSOp jsop() const {
         return jsop_;
@@ -2529,30 +2529,30 @@ class LCompare64 : public LInstructionHe
     MCompare* mir() {
         return mir_->toCompare();
     }
     const char* extraName() const {
         return CodeName[jsop_];
     }
 };
 
-class LCompare64AndBranch : public LControlInstructionHelper<2, 2 * INT64_PIECES, 0>
+class LCompareI64AndBranch : public LControlInstructionHelper<2, 2 * INT64_PIECES, 0>
 {
     MCompare* cmpMir_;
     JSOp jsop_;
 
   public:
-    LIR_HEADER(Compare64AndBranch)
+    LIR_HEADER(CompareI64AndBranch)
 
     static const size_t Lhs = 0;
     static const size_t Rhs = INT64_PIECES;
 
-    LCompare64AndBranch(MCompare* cmpMir, JSOp jsop,
-                        const LInt64Allocation& left, const LInt64Allocation& right,
-                        MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+    LCompareI64AndBranch(MCompare* cmpMir, JSOp jsop,
+                         const LInt64Allocation& left, const LInt64Allocation& right,
+                         MBasicBlock* ifTrue, MBasicBlock* ifFalse)
       : cmpMir_(cmpMir), jsop_(jsop)
     {
         setInt64Operand(Lhs, left);
         setInt64Operand(Rhs, right);
         setSuccessor(0, ifTrue);
         setSuccessor(1, ifFalse);
     }
 
--- a/js/src/jit/shared/LOpcodes-shared.h
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -121,18 +121,18 @@
     _(TestDAndBranch)               \
     _(TestFAndBranch)               \
     _(TestVAndBranch)               \
     _(TestOAndBranch)               \
     _(FunctionDispatch)             \
     _(ObjectGroupDispatch)          \
     _(Compare)                      \
     _(CompareAndBranch)             \
-    _(Compare64)                    \
-    _(Compare64AndBranch)           \
+    _(CompareI64)                   \
+    _(CompareI64AndBranch)          \
     _(CompareD)                     \
     _(CompareDAndBranch)            \
     _(CompareF)                     \
     _(CompareFAndBranch)            \
     _(CompareS)                     \
     _(CompareStrictS)               \
     _(CompareB)                     \
     _(CompareBAndBranch)            \
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -211,48 +211,57 @@ CodeGeneratorX64::visitCompareBitwiseAnd
     MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
                mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
 
     masm.cmpPtr(lhs.valueReg(), rhs.valueReg());
     emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
 }
 
 void
-CodeGeneratorX64::visitCompare64(LCompare64* lir)
+CodeGeneratorX64::visitCompareI64(LCompareI64* lir)
 {
     MCompare* mir = lir->mir();
     MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
                mir->compareType() == MCompare::Compare_UInt64);
 
-    Register lhs = ToRegister(lir->getOperand(0));
-    const LAllocation* rhs = lir->getOperand(1);
+    const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+    const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+    Register lhsReg = ToRegister64(lhs).reg;
+    Register output = ToRegister(lir->output());
 
-    if (rhs->isConstant())
-        masm.cmpPtr(lhs, ImmWord(ToInt64(rhs)));
-    else
-        masm.cmpPtr(lhs, ToOperand(rhs));
+    if (IsConstant(rhs)) {
+        ImmWord imm = ImmWord(ToInt64(rhs));
+        masm.cmpPtr(lhsReg, imm);
+    } else {
+        Register rhsReg = ToRegister64(rhs).reg;
+        masm.cmpPtr(lhsReg, Operand(rhsReg));
+    }
 
     bool isSigned = mir->compareType() == MCompare::Compare_Int64;
-    masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), ToRegister(lir->output()));
+    masm.emitSet(JSOpToCondition(lir->jsop(), isSigned), output);
 }
 
 void
-CodeGeneratorX64::visitCompare64AndBranch(LCompare64AndBranch* lir)
+CodeGeneratorX64::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
 {
     MCompare* mir = lir->cmpMir();
     MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
                mir->compareType() == MCompare::Compare_UInt64);
 
-    Register lhs = ToRegister(lir->getOperand(0));
-    const LAllocation* rhs = lir->getOperand(1);
+    const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+    const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+    Register lhsReg = ToRegister64(lhs).reg;
 
-    if (rhs->isConstant())
-        masm.cmpPtr(lhs, ImmWord(ToInt64(rhs)));
-    else
-        masm.cmpPtr(lhs, ToOperand(rhs));
+    if (IsConstant(rhs)) {
+        ImmWord imm = ImmWord(ToInt64(rhs));
+        masm.cmpPtr(lhsReg, imm);
+    } else {
+        Register rhsReg = ToRegister64(rhs).reg;
+        masm.cmpPtr(lhsReg, Operand(rhsReg));
+    }
 
     bool isSigned = mir->compareType() == MCompare::Compare_Int64;
     emitBranch(JSOpToCondition(lir->jsop(), isSigned), lir->ifTrue(), lir->ifFalse());
 }
 
 void
 CodeGeneratorX64::visitBitOpI64(LBitOpI64* lir)
 {
--- a/js/src/jit/x64/CodeGenerator-x64.h
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -45,18 +45,18 @@ class CodeGeneratorX64 : public CodeGene
   public:
     void visitValue(LValue* value);
     void visitBox(LBox* box);
     void visitUnbox(LUnbox* unbox);
     void visitCompareB(LCompareB* lir);
     void visitCompareBAndBranch(LCompareBAndBranch* lir);
     void visitCompareBitwise(LCompareBitwise* lir);
     void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
-    void visitCompare64(LCompare64* lir);
-    void visitCompare64AndBranch(LCompare64AndBranch* lir);
+    void visitCompareI64(LCompareI64* lir);
+    void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
     void visitBitOpI64(LBitOpI64* lir);
     void visitShiftI64(LShiftI64* lir);
     void visitRotate64(LRotate64* lir);
     void visitAddI64(LAddI64* lir);
     void visitSubI64(LSubI64* lir);
     void visitMulI64(LMulI64* lir);
     void visitDivOrModI64(LDivOrModI64* lir);
     void visitUDivOrMod64(LUDivOrMod64* lir);
--- a/js/src/jit/x64/MacroAssembler-x64-inl.h
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -378,16 +378,42 @@ void
 MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
 {
     ScratchRegisterScope scratch(*this);
     mov(lhs, scratch);
     branch32(cond, Address(scratch, 0), rhs, label);
 }
 
 void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+               cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+               cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+               cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+               cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+               "other condition codes not supported");
+
+    branchPtr(cond, lhs.reg, ImmWord(val.value), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+               cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual ||
+               cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual ||
+               cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+               cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+               "other condition codes not supported");
+
+    branchPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void
 MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
 {
     MOZ_ASSERT(cond == Assembler::NotEqual,
                "other condition codes not supported");
 
     branchPtr(cond, lhs, ImmWord(val.value), label);
 }
 
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -171,16 +171,61 @@ AssemblerX86Shared::InvertCondition(Cond
         return AboveOrEqual;
       case BelowOrEqual:
         return Above;
       default:
         MOZ_CRASH("unexpected condition");
     }
 }
 
+AssemblerX86Shared::Condition
+AssemblerX86Shared::UnsignedCondition(Condition cond)
+{
+    switch (cond) {
+      case Zero:
+      case NonZero:
+        return cond;
+      case LessThan:
+      case Below:
+        return Below;
+      case LessThanOrEqual:
+      case BelowOrEqual:
+        return BelowOrEqual;
+      case GreaterThan:
+      case Above:
+        return Above;
+      case AboveOrEqual:
+      case GreaterThanOrEqual:
+        return AboveOrEqual;
+      default:
+        MOZ_CRASH("unexpected condition");
+    }
+}
+
+AssemblerX86Shared::Condition
+AssemblerX86Shared::ConditionWithoutEqual(Condition cond)
+{
+    switch (cond) {
+      case LessThan:
+      case LessThanOrEqual:
+          return LessThan;
+      case Below:
+      case BelowOrEqual:
+        return Below;
+      case GreaterThan:
+      case GreaterThanOrEqual:
+        return GreaterThan;
+      case Above:
+      case AboveOrEqual:
+        return Above;
+      default:
+        MOZ_CRASH("unexpected condition");
+    }
+}
+
 void
 AssemblerX86Shared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                                 const Disassembler::HeapAccess& heapAccess)
 {
 #ifdef DEBUG
     Disassembler::VerifyHeapAccess(masm.data() + begin, masm.data() + end, heapAccess);
 #endif
 }
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -376,16 +376,18 @@ class AssemblerX86Shared : public Assemb
 
     static void StaticAsserts() {
         // DoubleConditionBits should not interfere with x86 condition codes.
         JS_STATIC_ASSERT(!((Equal | NotEqual | Above | AboveOrEqual | Below |
                             BelowOrEqual | Parity | NoParity) & DoubleConditionBits));
     }
 
     static Condition InvertCondition(Condition cond);
+    static Condition UnsignedCondition(Condition cond);
+    static Condition ConditionWithoutEqual(Condition cond);
 
     // Return the primary condition to test. Some primary conditions may not
     // handle NaNs properly and may therefore require a secondary condition.
     // Use NaNCondFromDoubleCondition to determine what else is needed.
     static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
         return static_cast<Condition>(cond & ~DoubleConditionBits);
     }
 
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -1183,8 +1183,128 @@ CodeGeneratorX86::visitWasmTruncateToInt
         masm.bind(&done);
         return;
     }
 
     emitWasmSignedTruncateToInt32(ool, output);
 
     masm.bind(ool->rejoin());
 }
+
+void
+CodeGeneratorX86::visitCompareI64(LCompareI64* lir)
+{
+    MCompare* mir = lir->mir();
+    MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+               mir->compareType() == MCompare::Compare_UInt64);
+
+    const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+    const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+    Register64 lhsRegs = ToRegister64(lhs);
+    Register output = ToRegister(lir->output());
+
+    bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+    Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+    Label done;
+
+    masm.move32(Imm32(1), output);
+
+    if (IsConstant(rhs)) {
+        Imm64 imm = Imm64(ToInt64(rhs));
+        masm.branch64(condition, lhsRegs, imm, &done);
+    } else {
+        Register64 rhsRegs = ToRegister64(rhs);
+        masm.branch64(condition, lhsRegs, rhsRegs, &done);
+    }
+
+    masm.xorl(output, output);
+    masm.bind(&done);
+}
+
+void
+CodeGeneratorX86::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+    MCompare* mir = lir->cmpMir();
+    MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+               mir->compareType() == MCompare::Compare_UInt64);
+
+    const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+    const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+    Register64 lhsRegs = ToRegister64(lhs);
+
+    bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+    Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+    if (IsConstant(rhs)) {
+        Imm64 imm = Imm64(ToInt64(rhs));
+        switch(lir->jsop()) {
+          case JSOP_EQ:
+          case JSOP_STRICTEQ:
+            masm.cmp32(lhsRegs.high, imm.hi());
+            jumpToBlock(lir->ifFalse(), Assembler::NotEqual);
+            masm.cmp32(lhsRegs.low, imm.low());
+            emitBranch(condition, lir->ifTrue(), lir->ifFalse());
+            break;
+          case JSOP_NE:
+          case JSOP_STRICTNE:
+            masm.cmp32(lhsRegs.high, imm.hi());
+            jumpToBlock(lir->ifTrue(), Assembler::NotEqual);
+            masm.cmp32(lhsRegs.low, imm.low());
+            emitBranch(condition, lir->ifTrue(), lir->ifFalse());
+            break;
+          case JSOP_LT:
+          case JSOP_LE:
+          case JSOP_GT:
+          case JSOP_GE: {
+            Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(condition);
+            Assembler::Condition cond2 = Assembler::ConditionWithoutEqual(Assembler::InvertCondition(condition));
+            Assembler::Condition cond3 = Assembler::UnsignedCondition(condition);
+
+            masm.cmp32(lhsRegs.high, imm.hi());
+            jumpToBlock(lir->ifTrue(), cond1);
+            jumpToBlock(lir->ifFalse(), cond2);
+            masm.cmp32(lhsRegs.low, imm.low());
+            jumpToBlock(lir->ifTrue(), cond3);
+            jumpToBlock(lir->ifFalse());
+            break;
+          }
+          default:
+            MOZ_CRASH("unexpected op");
+        }
+        return;
+    }
+
+    Register64 rhsRegs = ToRegister64(rhs);
+    switch(lir->jsop()) {
+      case JSOP_EQ:
+      case JSOP_STRICTEQ:
+        masm.cmp32(lhsRegs.high, rhsRegs.high);
+        jumpToBlock(lir->ifFalse(), Assembler::NotEqual);
+        masm.cmp32(lhsRegs.low, rhsRegs.low);
+        emitBranch(condition, lir->ifTrue(), lir->ifFalse());
+        break;
+      case JSOP_NE:
+      case JSOP_STRICTNE:
+        masm.cmp32(lhsRegs.high, rhsRegs.high);
+        jumpToBlock(lir->ifTrue(), Assembler::NotEqual);
+        masm.cmp32(lhsRegs.low, rhsRegs.low);
+        emitBranch(condition, lir->ifTrue(), lir->ifFalse());
+        break;
+      case JSOP_LT:
+      case JSOP_LE:
+      case JSOP_GT:
+      case JSOP_GE: {
+        Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(condition);
+        Assembler::Condition cond2 = Assembler::ConditionWithoutEqual(Assembler::InvertCondition(condition));
+        Assembler::Condition cond3 = Assembler::UnsignedCondition(condition);
+
+        masm.cmp32(lhsRegs.high, rhsRegs.high);
+        jumpToBlock(lir->ifTrue(), cond1);
+        jumpToBlock(lir->ifFalse(), cond2);
+        masm.cmp32(lhsRegs.low, rhsRegs.low);
+        jumpToBlock(lir->ifTrue(), cond3);
+        jumpToBlock(lir->ifFalse());
+        break;
+      }
+      default:
+        MOZ_CRASH("unexpected op");
+    }
+}
--- a/js/src/jit/x86/CodeGenerator-x86.h
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -68,16 +68,19 @@ class CodeGeneratorX86 : public CodeGene
     void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
     void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
     void visitWasmTruncateToInt32(LWasmTruncateToInt32* ins);
 
     void visitOutOfLineTruncate(OutOfLineTruncate* ool);
     void visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool);
 
+    void visitCompareI64(LCompareI64* lir);
+    void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+
   private:
     void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
                                    const MWasmMemoryAccess* access);
 };
 
 typedef CodeGeneratorX86 CodeGeneratorSpecific;
 
 } // namespace jit
--- a/js/src/jit/x86/MacroAssembler-x86-inl.h
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -304,16 +304,100 @@ MacroAssembler::branch32(Condition cond,
 void
 MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
 {
     cmpl(rhs, lhs);
     j(cond, label);
 }
 
 void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* label)
+{
+    Label fail;
+
+    switch(cond) {
+      case Assembler::Equal:
+        branch32(Assembler::NotEqual, lhs.low, val.low(), &fail);
+        branch32(Assembler::Equal, lhs.high, val.hi(), label);
+        break;
+      case Assembler::NotEqual:
+        branch32(Assembler::NotEqual, lhs.low, val.low(), label);
+        branch32(Assembler::NotEqual, lhs.high, val.hi(), label);
+        break;
+      case Assembler::LessThan:
+      case Assembler::LessThanOrEqual:
+      case Assembler::GreaterThan:
+      case Assembler::GreaterThanOrEqual:
+      case Assembler::Below:
+      case Assembler::BelowOrEqual:
+      case Assembler::Above:
+      case Assembler::AboveOrEqual: {
+        Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+        Assembler::Condition cond2 =
+            Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+        Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+        cmp32(lhs.high, val.hi());
+        j(cond1, label);
+        j(cond2, &fail);
+        cmp32(lhs.low, val.low());
+        j(cond3, label);
+        break;
+      }
+      default:
+        MOZ_CRASH("Condition code not supported");
+        break;
+    }
+
+    bind(&fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* label)
+{
+    Label fail;
+
+    switch(cond) {
+      case Assembler::Equal:
+        branch32(Assembler::NotEqual, lhs.low, rhs.low, &fail);
+        branch32(Assembler::Equal, lhs.high, rhs.high, label);
+        break;
+      case Assembler::NotEqual:
+        branch32(Assembler::NotEqual, lhs.low, rhs.low, label);
+        branch32(Assembler::NotEqual, lhs.high, rhs.high, label);
+        break;
+      case Assembler::LessThan:
+      case Assembler::LessThanOrEqual:
+      case Assembler::GreaterThan:
+      case Assembler::GreaterThanOrEqual:
+      case Assembler::Below:
+      case Assembler::BelowOrEqual:
+      case Assembler::Above:
+      case Assembler::AboveOrEqual: {
+        Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+        Assembler::Condition cond2 =
+            Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+        Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+        cmp32(lhs.high, rhs.high);
+        j(cond1, label);
+        j(cond2, &fail);
+        cmp32(lhs.low, rhs.low);
+        j(cond3, label);
+        break;
+      }
+      default:
+        MOZ_CRASH("Condition code not supported");
+        break;
+    }
+
+    bind(&fail);
+}
+
+void
 MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
 {
     MOZ_ASSERT(cond == Assembler::NotEqual,
                "other condition codes not supported");
 
     branch32(cond, lhs, val.firstHalf(), label);
     branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), val.secondHalf(), label);
 }