Bug 1289054: Part 25: Don't reuse input during lowering for int 64 values on 32 bit platforms, r=bbouvier
authorHannes Verschore <hv1989@gmail.com>
Fri, 29 Jul 2016 16:53:50 +0200
changeset 349444 5b6fd86e965ec386e93ac060375dd8639bd99944
parent 349443 0f0fe678a40ae291bf1ee4baec03da61e63635cf
child 349445 3fcedf633473cbfd56e0a192a700b02b89955aac
push id1230
push userjlund@mozilla.com
push dateMon, 31 Oct 2016 18:13:35 +0000
treeherdermozilla-release@5e06e3766db2 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1289054
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1289054: Part 25: Don't reuse input during lowering for int 64 values on 32 bit platforms, r=bbouvier
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/shared/Lowering-shared-inl.h
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x64/Lowering-x64.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/Lowering-x86.h
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -292,25 +292,26 @@ CodeGeneratorARM::visitAddI(LAddI* ins)
         bailoutIf(Assembler::Overflow, ins->snapshot());
 }
 
 void
 CodeGeneratorARM::visitAddI64(LAddI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    masm.move64(ToRegister64(lhs), out);
 
     if (IsConstant(rhs)) {
-        masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+        masm.add64(Imm64(ToInt64(rhs)), out);
         return;
     }
 
-    masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+    masm.add64(ToOperandOrRegister64(rhs), out);
 }
 
 void
 CodeGeneratorARM::visitSubI(LSubI* ins)
 {
     const LAllocation* lhs = ins->getOperand(0);
     const LAllocation* rhs = ins->getOperand(1);
     const LDefinition* dest = ins->getDef(0);
@@ -326,25 +327,26 @@ CodeGeneratorARM::visitSubI(LSubI* ins)
         bailoutIf(Assembler::Overflow, ins->snapshot());
 }
 
 void
 CodeGeneratorARM::visitSubI64(LSubI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    masm.move64(ToRegister64(lhs), out);
 
     if (IsConstant(rhs)) {
-        masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+        masm.sub64(Imm64(ToInt64(rhs)), out);
         return;
     }
 
-    masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+    masm.sub64(ToOperandOrRegister64(rhs), out);
 }
 
 void
 CodeGeneratorARM::visitMulI(LMulI* ins)
 {
     const LAllocation* lhs = ins->getOperand(0);
     const LAllocation* rhs = ins->getOperand(1);
     const LDefinition* dest = ins->getDef(0);
@@ -458,49 +460,50 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
     }
 }
 
 void
 CodeGeneratorARM::visitMulI64(LMulI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
-
-    MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+    Register64 out = ToOutRegister64(lir);
+
+    masm.move64(ToRegister64(lhs), out);
 
     if (IsConstant(rhs)) {
         int64_t constant = ToInt64(rhs);
         switch (constant) {
           case -1:
-            masm.neg64(ToRegister64(lhs));
+            masm.neg64(out);
             return;
           case 0:
-            masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+            masm.xor64(out, out);
             return;
           case 1:
             // nop
             return;
           case 2:
-            masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+            masm.add64(out, out);
             return;
           default:
             if (constant > 0) {
                 // Use shift if constant is power of 2.
                 int32_t shift = mozilla::FloorLog2(constant);
                 if (int64_t(1) << shift == constant) {
-                    masm.lshift64(Imm32(shift), ToRegister64(lhs));
+                    masm.lshift64(Imm32(shift), out);
                     return;
                 }
             }
             Register temp = ToTempRegisterOrInvalid(lir->temp());
-            masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+            masm.mul64(Imm64(constant), out, temp);
         }
     } else {
         Register temp = ToTempRegisterOrInvalid(lir->temp());
-        masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+        masm.mul64(ToOperandOrRegister64(rhs), out, temp);
     }
 }
 
 void
 CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register output,
                              LSnapshot* snapshot, Label& done)
 {
     if (mir->canBeNegativeOverflow()) {
@@ -3424,112 +3427,116 @@ CodeGeneratorARM::visitCompareI64AndBran
     }
 }
 
 void
 CodeGeneratorARM::visitShiftI64(LShiftI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
     LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    masm.move64(ToRegister64(lhs), out);
 
     if (rhs->isConstant()) {
         int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
         switch (lir->bitop()) {
           case JSOP_LSH:
             if (shift)
-                masm.lshift64(Imm32(shift), ToRegister64(lhs));
+                masm.lshift64(Imm32(shift), out);
             break;
           case JSOP_RSH:
             if (shift)
-                masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+                masm.rshift64Arithmetic(Imm32(shift), out);
             break;
           case JSOP_URSH:
             if (shift)
-                masm.rshift64(Imm32(shift), ToRegister64(lhs));
+                masm.rshift64(Imm32(shift), out);
             break;
           default:
             MOZ_CRASH("Unexpected shift op");
         }
         return;
     }
 
     switch (lir->bitop()) {
       case JSOP_LSH:
-        masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+        masm.lshift64(ToRegister(rhs), out);
         break;
       case JSOP_RSH:
-        masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+        masm.rshift64Arithmetic(ToRegister(rhs), out);
         break;
       case JSOP_URSH:
-        masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+        masm.rshift64(ToRegister(rhs), out);
         break;
       default:
         MOZ_CRASH("Unexpected shift op");
     }
 }
 
 void
 CodeGeneratorARM::visitBitOpI64(LBitOpI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    masm.move64(ToRegister64(lhs), out);
 
     switch (lir->bitop()) {
       case JSOP_BITOR:
         if (IsConstant(rhs))
-            masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+            masm.or64(Imm64(ToInt64(rhs)), out);
         else
-            masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+            masm.or64(ToOperandOrRegister64(rhs), out);
         break;
       case JSOP_BITXOR:
         if (IsConstant(rhs))
-            masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+            masm.xor64(Imm64(ToInt64(rhs)), out);
         else
-            masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+            masm.xor64(ToOperandOrRegister64(rhs), out);
         break;
       case JSOP_BITAND:
         if (IsConstant(rhs))
-            masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+            masm.and64(Imm64(ToInt64(rhs)), out);
         else
-            masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+            masm.and64(ToOperandOrRegister64(rhs), out);
         break;
       default:
         MOZ_CRASH("unexpected binary opcode");
     }
 }
 
 void
 CodeGeneratorARM::visitRotateI64(LRotateI64* lir)
 {
     MRotate* mir = lir->mir();
     LAllocation* count = lir->count();
 
     Register64 input = ToRegister64(lir->input());
     Register64 output = ToOutRegister64(lir);
     Register temp = ToTempRegisterOrInvalid(lir->temp());
 
+    masm.move64(input, output);
+
     if (count->isConstant()) {
         int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
         if (!c) {
             masm.move64(input, output);
             return;
         }
         if (mir->isLeftRotate())
-            masm.rotateLeft64(Imm32(c), input, output, temp);
+            masm.rotateLeft64(Imm32(c), output, output, temp);
         else
-            masm.rotateRight64(Imm32(c), input, output, temp);
+            masm.rotateRight64(Imm32(c), output, output, temp);
     } else {
         if (mir->isLeftRotate())
-            masm.rotateLeft64(ToRegister(count), input, output, temp);
+            masm.rotateLeft64(ToRegister(count), output, output, temp);
         else
-            masm.rotateRight64(ToRegister(count), input, output, temp);
+            masm.rotateRight64(ToRegister(count), output, output, temp);
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSPassStackArgI64(LAsmJSPassStackArgI64* ins)
 {
     const MAsmJSPassStackArg* mir = ins->mir();
     Address dst(StackPointer, mir->spOffset());
@@ -3538,20 +3545,21 @@ CodeGeneratorARM::visitAsmJSPassStackArg
     else
         masm.store64(ToRegister64(ins->arg()), dst);
 }
 
 void
 CodeGeneratorARM::visitAsmSelectI64(LAsmSelectI64* lir)
 {
     Register cond = ToRegister(lir->condExpr());
+    const LInt64Allocation trueExpr = lir->trueExpr();
     const LInt64Allocation falseExpr = lir->falseExpr();
-
     Register64 out = ToOutRegister64(lir);
-    MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+
+    masm.move64(ToRegister64(trueExpr), out);
 
     masm.ma_cmp(cond, Imm32(0));
     if (falseExpr.low().isRegister()) {
         masm.ma_mov(ToRegister(falseExpr.low()), out.low, LeaveCC, Assembler::Equal);
         masm.ma_mov(ToRegister(falseExpr.high()), out.high, LeaveCC, Assembler::Equal);
     } else {
         masm.ma_ldr(ToAddress(falseExpr.low()), out.low, Offset, Assembler::Equal);
         masm.ma_ldr(ToAddress(falseExpr.high()), out.high, Offset, Assembler::Equal);
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -191,42 +191,40 @@ LIRGeneratorARM::lowerForALU(LInstructio
                                          useRegisterOrConstantAtStart(rhs));
     define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
 }
 
 void
 LIRGeneratorARM::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
                                   MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
 {
-    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
-    ins->setInt64Operand(INT64_PIECES,
-                         lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
-    defineInt64ReuseInput(ins, mir, 0);
+    ins->setInt64Operand(0, useInt64Register(lhs));
+    ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+    defineInt64(ins, mir);
 }
 
 void
 LIRGeneratorARM::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
 {
     bool constantNeedTemp = true;
     if (rhs->isConstant()) {
         int64_t constant = rhs->toConstant()->toInt64();
         int32_t shift = mozilla::FloorLog2(constant);
         // See special cases in CodeGeneratorARM::visitMulI64
         if (constant >= -1 && constant <= 2)
             constantNeedTemp = false;
         if (int64_t(1) << shift == constant)
             constantNeedTemp = false;
     }
 
-    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
-    ins->setInt64Operand(INT64_PIECES,
-                         lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+    ins->setInt64Operand(0, useInt64Register(lhs));
+    ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
     if (constantNeedTemp)
         ins->setTemp(0, temp());
-    defineInt64ReuseInput(ins, mir, 0);
+    defineInt64(ins, mir);
 }
 
 void
 LIRGeneratorARM::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input)
 {
     ins->setOperand(0, useRegisterAtStart(input));
     define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
 }
@@ -293,19 +291,19 @@ LIRGeneratorARM::lowerForShift(LInstruct
 template<size_t Temps>
 void
 LIRGeneratorARM::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
                                     MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
 {
     if (mir->isRotate() && !rhs->isConstant())
         ins->setTemp(0, temp());
 
-    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+    ins->setInt64Operand(0, useInt64Register(lhs));
     ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
-    defineInt64ReuseInput(ins, mir, 0);
+    defineInt64(ins, mir);
 }
 
 template void LIRGeneratorARM::lowerForShiftInt64(
     LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
     MDefinition* lhs, MDefinition* rhs);
 template void LIRGeneratorARM::lowerForShiftInt64(
     LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
     MDefinition* lhs, MDefinition* rhs);
@@ -506,22 +504,22 @@ LIRGeneratorARM::lowerUrshD(MUrsh* mir)
     LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
     define(lir, mir);
 }
 
 void
 LIRGeneratorARM::visitAsmSelect(MAsmSelect* ins)
 {
     if (ins->type() == MIRType::Int64) {
-        auto* lir = new(alloc()) LAsmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
-                                               useInt64(ins->falseExpr()),
+        auto* lir = new(alloc()) LAsmSelectI64(useInt64Register(ins->trueExpr()),
+                                               useInt64Register(ins->falseExpr()),
                                                useRegister(ins->condExpr())
                                               );
 
-        defineInt64ReuseInput(lir, ins, LAsmSelectI64::TrueExprIndex);
+        defineInt64(lir, ins);
         return;
     }
 
     auto* lir = new(alloc()) LAsmSelect(useRegisterAtStart(ins->trueExpr()),
                                         useRegister(ins->falseExpr()),
                                         useRegister(ins->condExpr())
                                        );
 
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -121,16 +121,20 @@ LIRGeneratorShared::defineReuseInput(LIn
 
     define(lir, mir, def);
 }
 
 template <size_t Ops, size_t Temps> void
 LIRGeneratorShared::defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
                                           MDefinition* mir, uint32_t operand)
 {
+#if JS_BITS_PER_WORD == 32
+    MOZ_CRASH("Temporarily disabled due to bug 1290450.");
+#endif
+
     // Note: Any other operand that is not the same as this operand should be
     // marked as not being "atStart". The regalloc cannot handle those and can
     // overwrite the inputs!
 
     // The input should be used at the start of the instruction, to avoid moves.
     MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
 #if JS_BITS_PER_WORD == 32
     MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -62,16 +62,63 @@ LIRGeneratorX64::lowerForMulInt64(LMulI6
 {
     // X64 doesn't need a temp for 64bit multiplication.
     ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
     ins->setInt64Operand(INT64_PIECES,
                          lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
     defineInt64ReuseInput(ins, mir, 0);
 }
 
+template<size_t Temps>
+void
+LIRGeneratorX64::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+                                    MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+
+    // shift operator should be constant or in register ecx
+    // x86 can't shift a non-ecx register
+    if (rhs->isConstant()) {
+        ins->setOperand(INT64_PIECES, useOrConstantAtStart(rhs));
+    } else {
+        // The operands are int64, but we only care about the lower 32 bits of
+        // the RHS. On 32-bit, the code below will load that part in ecx and
+        // will discard the upper half.
+        ensureDefined(rhs);
+        bool useAtStart = (lhs == rhs);
+        LUse use(ecx, useAtStart);
+        use.setVirtualRegister(rhs->virtualRegister());
+        ins->setOperand(INT64_PIECES, use);
+    }
+
+    defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorX64::lowerForShiftInt64(
+    LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+    MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorX64::lowerForShiftInt64(
+    LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+    MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorX64::visitAsmSelect(MAsmSelect* ins)
+{
+    if (ins->type() != MIRType::Int64) {
+        lowerAsmSelect(ins);
+        return;
+    }
+
+    auto* lir = new(alloc()) LAsmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
+                                           useInt64(ins->falseExpr()),
+                                           useRegister(ins->condExpr())
+                                          );
+    defineInt64ReuseInput(lir, ins, LAsmSelectI64::TrueExprIndex);
+}
+
 void
 LIRGeneratorX64::visitBox(MBox* box)
 {
     MDefinition* opd = box->getOperand(0);
 
     // If the operand is a constant, emit near its uses.
     if (opd->isConstant() && box->canEmitAtUses()) {
         emitAtUses(box);
--- a/js/src/jit/x64/Lowering-x64.h
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -23,16 +23,20 @@ class LIRGeneratorX64 : public LIRGenera
     void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
     void defineUntypedPhi(MPhi* phi, size_t lirIndex);
     void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
     void defineInt64Phi(MPhi* phi, size_t lirIndex);
 
     void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
                           MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
     void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+    template<size_t Temps>
+    void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+                            MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
 
     // Returns a box allocation. reg2 is ignored on 64-bit platforms.
     LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register, bool useAtStart = false);
 
     // x86 has constraints on what registers can be formatted for 1-byte
     // stores and loads; on x64 all registers are okay.
     LAllocation useByteOpRegister(MDefinition* mir);
     LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
@@ -57,16 +61,17 @@ class LIRGeneratorX64 : public LIRGenera
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+    void visitAsmSelect(MAsmSelect* ins);
     void visitWasmStore(MWasmStore* ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
     void visitSubstr(MSubstr* ins);
     void visitRandom(MRandom* ins);
     void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
     void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
     void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
 };
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -900,25 +900,27 @@ CodeGeneratorX86Shared::visitAddI(LAddI*
     }
 }
 
 void
 CodeGeneratorX86Shared::visitAddI64(LAddI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    if (ToRegister64(lhs) != out)
+        masm.move64(ToRegister64(lhs), out);
 
     if (IsConstant(rhs)) {
-        masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+        masm.add64(Imm64(ToInt64(rhs)), out);
         return;
     }
 
-    masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+    masm.add64(ToOperandOrRegister64(rhs), out);
 }
 
 void
 CodeGeneratorX86Shared::visitSubI(LSubI* ins)
 {
     if (ins->rhs()->isConstant())
         masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
     else
@@ -935,25 +937,27 @@ CodeGeneratorX86Shared::visitSubI(LSubI*
     }
 }
 
 void
 CodeGeneratorX86Shared::visitSubI64(LSubI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    if (ToRegister64(lhs) != out)
+        masm.move64(ToRegister64(lhs), out);
 
     if (IsConstant(rhs)) {
-        masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+        masm.sub64(Imm64(ToInt64(rhs)), out);
         return;
     }
 
-    masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+    masm.sub64(ToOperandOrRegister64(rhs), out);
 }
 
 void
 CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation* ool)
 {
     LInstruction* ins = ool->ins();
     Register reg = ToRegister(ins->getDef(0));
 
@@ -1066,49 +1070,51 @@ CodeGeneratorX86Shared::visitMulI(LMulI*
     }
 }
 
 void
 CodeGeneratorX86Shared::visitMulI64(LMulI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
-
-    MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+    Register64 out = ToOutRegister64(lir);
+
+    if (ToRegister64(lhs) != out)
+        masm.move64(ToRegister64(lhs), out);
 
     if (IsConstant(rhs)) {
         int64_t constant = ToInt64(rhs);
         switch (constant) {
           case -1:
-            masm.neg64(ToRegister64(lhs));
+            masm.neg64(out);
             return;
           case 0:
-            masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+            masm.xor64(out, out);
             return;
           case 1:
             // nop
             return;
           case 2:
-            masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+            masm.add64(out, out);
             return;
           default:
             if (constant > 0) {
                 // Use shift if constant is power of 2.
                 int32_t shift = mozilla::FloorLog2(constant);
                 if (int64_t(1) << shift == constant) {
-                    masm.lshift64(Imm32(shift), ToRegister64(lhs));
+                    masm.lshift64(Imm32(shift), out);
                     return;
                 }
             }
             Register temp = ToTempRegisterOrInvalid(lir->temp());
-            masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+            masm.mul64(Imm64(constant), out, temp);
         }
     } else {
         Register temp = ToTempRegisterOrInvalid(lir->temp());
-        masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+        masm.mul64(ToOperandOrRegister64(rhs), out, temp);
     }
 }
 
 class ReturnZero : public OutOfLineCodeBase<CodeGeneratorX86Shared>
 {
     Register reg_;
 
   public:
@@ -1745,37 +1751,39 @@ CodeGeneratorX86Shared::visitBitOpI(LBit
     }
 }
 
 void
 CodeGeneratorX86Shared::visitBitOpI64(LBitOpI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
     const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    if (ToRegister64(lhs) != out)
+        masm.move64(ToRegister64(lhs), out);
 
     switch (lir->bitop()) {
       case JSOP_BITOR:
         if (IsConstant(rhs))
-            masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+            masm.or64(Imm64(ToInt64(rhs)), out);
         else
-            masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+            masm.or64(ToOperandOrRegister64(rhs), out);
         break;
       case JSOP_BITXOR:
         if (IsConstant(rhs))
-            masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+            masm.xor64(Imm64(ToInt64(rhs)), out);
         else
-            masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+            masm.xor64(ToOperandOrRegister64(rhs), out);
         break;
       case JSOP_BITAND:
         if (IsConstant(rhs))
-            masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+            masm.and64(Imm64(ToInt64(rhs)), out);
         else
-            masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+            masm.and64(ToOperandOrRegister64(rhs), out);
         break;
       default:
         MOZ_CRASH("unexpected binary opcode");
     }
 }
 
 void
 CodeGeneratorX86Shared::visitShiftI(LShiftI* ins)
@@ -1829,50 +1837,52 @@ CodeGeneratorX86Shared::visitShiftI(LShi
     }
 }
 
 void
 CodeGeneratorX86Shared::visitShiftI64(LShiftI64* lir)
 {
     const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
     LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
-
-    MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+    Register64 out = ToOutRegister64(lir);
+
+    if (ToRegister64(lhs) != out)
+        masm.move64(ToRegister64(lhs), out);
 
     if (rhs->isConstant()) {
         int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
         switch (lir->bitop()) {
           case JSOP_LSH:
             if (shift)
-                masm.lshift64(Imm32(shift), ToRegister64(lhs));
+                masm.lshift64(Imm32(shift), out);
             break;
           case JSOP_RSH:
             if (shift)
-                masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+                masm.rshift64Arithmetic(Imm32(shift), out);
             break;
           case JSOP_URSH:
             if (shift)
-                masm.rshift64(Imm32(shift), ToRegister64(lhs));
+                masm.rshift64(Imm32(shift), out);
             break;
           default:
             MOZ_CRASH("Unexpected shift op");
         }
         return;
     }
 
     MOZ_ASSERT(ToRegister(rhs) == ecx);
     switch (lir->bitop()) {
       case JSOP_LSH:
-        masm.lshift64(ecx, ToRegister64(lhs));
+        masm.lshift64(ecx, out);
         break;
       case JSOP_RSH:
-        masm.rshift64Arithmetic(ecx, ToRegister64(lhs));
+        masm.rshift64Arithmetic(ecx, out);
         break;
       case JSOP_URSH:
-        masm.rshift64(ecx, ToRegister64(lhs));
+        masm.rshift64(ecx, out);
         break;
       default:
         MOZ_CRASH("Unexpected shift op");
     }
 }
 
 void
 CodeGeneratorX86Shared::visitUrshD(LUrshD* ins)
@@ -4810,31 +4820,32 @@ CodeGeneratorX86Shared::visitRotateI64(L
 {
     MRotate* mir = lir->mir();
     LAllocation* count = lir->count();
 
     Register64 input = ToRegister64(lir->input());
     Register64 output = ToOutRegister64(lir);
     Register temp = ToTempRegisterOrInvalid(lir->temp());
 
-    MOZ_ASSERT(input == output);
+    if (input != output)
+        masm.move64(input, output);
 
     if (count->isConstant()) {
         int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
         if (!c)
             return;
         if (mir->isLeftRotate())
-            masm.rotateLeft64(Imm32(c), input, output, temp);
+            masm.rotateLeft64(Imm32(c), output, output, temp);
         else
-            masm.rotateRight64(Imm32(c), input, output, temp);
+            masm.rotateRight64(Imm32(c), output, output, temp);
     } else {
         if (mir->isLeftRotate())
-            masm.rotateLeft64(ToRegister(count), input, output, temp);
+            masm.rotateLeft64(ToRegister(count), output, output, temp);
         else
-            masm.rotateRight64(ToRegister(count), input, output, temp);
+            masm.rotateRight64(ToRegister(count), output, output, temp);
     }
 }
 
 void
 CodeGeneratorX86Shared::visitPopcntI64(LPopcntI64* lir)
 {
     Register64 input = ToRegister64(lir->getInt64Operand(0));
     Register64 output = ToOutRegister64(lir);
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -75,52 +75,16 @@ LIRGeneratorX86Shared::lowerForShift(LIn
     if (rhs->isConstant())
         ins->setOperand(1, useOrConstantAtStart(rhs));
     else
         ins->setOperand(1, lhs != rhs ? useFixed(rhs, ecx) : useFixedAtStart(rhs, ecx));
 
     defineReuseInput(ins, mir, 0);
 }
 
-template<size_t Temps>
-void
-LIRGeneratorX86Shared::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
-                                          MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
-{
-    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
-#if defined(JS_NUNBOX32)
-    if (mir->isRotate())
-        ins->setTemp(0, temp());
-#endif
-
-    // shift operator should be constant or in register ecx
-    // x86 can't shift a non-ecx register
-    if (rhs->isConstant()) {
-        ins->setOperand(INT64_PIECES, useOrConstantAtStart(rhs));
-    } else {
-        // The operands are int64, but we only care about the lower 32 bits of
-        // the RHS. On 32-bit, the code below will load that part in ecx and
-        // will discard the upper half.
-        ensureDefined(rhs);
-        bool useAtStart = (lhs == rhs);
-        LUse use(ecx, useAtStart);
-        use.setVirtualRegister(rhs->virtualRegister());
-        ins->setOperand(INT64_PIECES, use);
-    }
-
-    defineInt64ReuseInput(ins, mir, 0);
-}
-
-template void LIRGeneratorX86Shared::lowerForShiftInt64(
-    LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
-    MDefinition* lhs, MDefinition* rhs);
-template void LIRGeneratorX86Shared::lowerForShiftInt64(
-    LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
-    MDefinition* lhs, MDefinition* rhs);
-
 void
 LIRGeneratorX86Shared::lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
                                    MDefinition* input)
 {
     ins->setOperand(0, useRegisterAtStart(input));
     defineReuseInput(ins, mir, 0);
 }
 
@@ -281,28 +245,19 @@ LIRGeneratorX86Shared::lowerModI(MMod* m
                                     useRegister(mod->rhs()),
                                     tempFixed(eax));
     if (mod->fallible())
         assignSnapshot(lir, Bailout_DoubleOutput);
     defineFixed(lir, mod, LAllocation(AnyRegister(edx)));
 }
 
 void
-LIRGeneratorX86Shared::visitAsmSelect(MAsmSelect* ins)
+LIRGeneratorX86Shared::lowerAsmSelect(MAsmSelect* ins)
 {
-    if (ins->type() == MIRType::Int64) {
-        auto* lir = new(alloc()) LAsmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
-                                               useInt64(ins->falseExpr()),
-                                               useRegister(ins->condExpr())
-                                              );
-
-        defineInt64ReuseInput(lir, ins, LAsmSelectI64::TrueExprIndex);
-        return;
-    }
-
+    MOZ_ASSERT(ins->type() != MIRType::Int64);
     auto* lir = new(alloc()) LAsmSelect(useRegisterAtStart(ins->trueExpr()),
                                         use(ins->falseExpr()),
                                         useRegister(ins->condExpr())
                                        );
 
     defineReuseInput(lir, ins, LAsmSelect::TrueExprIndex);
 }
 
--- a/js/src/jit/x86-shared/Lowering-x86-shared.h
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -28,32 +28,28 @@ class LIRGeneratorX86Shared : public LIR
     void visitPowHalf(MPowHalf* ins);
     void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
                        MDefinition* rhs);
     void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, MDefinition* input);
     void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
                      MDefinition* rhs);
 
     template<size_t Temps>
-    void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
-                            MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
-
-    template<size_t Temps>
     void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, MDefinition* lhs,
                      MDefinition* rhs);
     void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
                          MDefinition* lhs, MDefinition* rhs);
     void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
                          MDefinition* lhs, MDefinition* rhs);
     void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
                                  MDefinition* lhs, MDefinition* rhs);
     void visitAsmJSNeg(MAsmJSNeg* ins);
     void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
     void lowerWasmLoad(MWasmLoad* ins);
-    void visitAsmSelect(MAsmSelect* ins);
+    void lowerAsmSelect(MAsmSelect* ins);
     void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
     void lowerDivI(MDiv* div);
     void lowerModI(MMod* mod);
     void lowerUDiv(MDiv* div);
     void lowerUMod(MMod* mod);
     void lowerUrshD(MUrsh* mir);
     void lowerTruncateDToInt32(MTruncateToInt32* ins);
     void lowerTruncateFToInt32(MTruncateToInt32* ins);
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -1507,20 +1507,21 @@ CodeGeneratorX86::visitUDivOrModI64(LUDi
 }
 
 void
 CodeGeneratorX86::visitAsmSelectI64(LAsmSelectI64* lir)
 {
     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
 
     Register cond = ToRegister(lir->condExpr());
+    Register64 trueExpr = ToRegister64(lir->trueExpr());
     Register64 falseExpr = ToRegister64(lir->falseExpr());
     Register64 out = ToOutRegister64(lir);
 
-    MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
+    masm.move64(trueExpr, out);
 
     Label done;
     masm.branchTest32(Assembler::NonZero, cond, cond, &done);
     masm.movl(falseExpr.low, out.low);
     masm.movl(falseExpr.high, out.high);
     masm.bind(&done);
 }
 
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -197,20 +197,19 @@ LIRGeneratorX86::lowerInt64PhiInput(MPhi
     low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
     high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
 }
 
 void
 LIRGeneratorX86::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
                                   MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
 {
-    ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
-    ins->setInt64Operand(INT64_PIECES,
-                         lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
-    defineInt64ReuseInput(ins, mir, 0);
+    ins->setInt64Operand(0, useInt64Register(lhs));
+    ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+    defineInt64(ins, mir);
 }
 
 void
 LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
 {
     bool constantNeedTemp = true;
     if (rhs->isConstant()) {
         int64_t constant = rhs->toConstant()->toInt64();
@@ -227,16 +226,65 @@ LIRGeneratorX86::lowerForMulInt64(LMulI6
     ins->setInt64Operand(INT64_PIECES,
             lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
     if (constantNeedTemp)
         ins->setTemp(0, temp());
     defineInt64Fixed(ins, mir, LInt64Allocation(LAllocation(AnyRegister(edx)),
                                                 LAllocation(AnyRegister(eax))));
 }
 
+template<size_t Temps>
+void
+LIRGeneratorX86::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+                                    MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+    ins->setInt64Operand(0, useInt64Register(lhs));
+    if (mir->isRotate())
+        ins->setTemp(0, temp());
+
+    // shift operator should be constant or in register ecx
+    // x86 can't shift a non-ecx register
+    if (rhs->isConstant()) {
+        ins->setOperand(INT64_PIECES, useOrConstant(rhs));
+    } else {
+        // The operands are int64, but we only care about the lower 32 bits of
+        // the RHS. The code below will load that part in ecx and
+        // will discard the upper half.
+        ensureDefined(rhs);
+        bool useAtStart = (lhs == rhs);
+        LUse use(ecx, useAtStart);
+        use.setVirtualRegister(rhs->virtualRegister());
+        ins->setOperand(INT64_PIECES, use);
+    }
+
+    defineInt64(ins, mir);
+}
+
+template void LIRGeneratorX86::lowerForShiftInt64(
+    LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+    MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorX86::lowerForShiftInt64(
+    LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+    MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorX86::visitAsmSelect(MAsmSelect* ins)
+{
+    if (ins->type() != MIRType::Int64) {
+        lowerAsmSelect(ins);
+        return;
+    }
+
+    auto* lir = new(alloc()) LAsmSelectI64(useInt64Register(ins->trueExpr()),
+                                           useInt64(ins->falseExpr()),
+                                           useRegister(ins->condExpr())
+                                          );
+    defineInt64(lir, ins);
+}
+
 void
 LIRGeneratorX86::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
 {
     lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
 }
 
 void
 LIRGeneratorX86::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
--- a/js/src/jit/x86/Lowering-x86.h
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -43,16 +43,19 @@ class LIRGeneratorX86 : public LIRGenera
     void defineUntypedPhi(MPhi* phi, size_t lirIndex);
 
     void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
     void defineInt64Phi(MPhi* phi, size_t lirIndex);
 
     void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
                           MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
     void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+    template<size_t Temps>
+    void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+                            MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
 
     void lowerDivI64(MDiv* div);
     void lowerModI64(MMod* mod);
     void lowerUDivI64(MDiv* div);
     void lowerUModI64(MMod* mod);
 
   public:
     void visitWasmLoad(MWasmLoad* ins);
@@ -64,16 +67,17 @@ class LIRGeneratorX86 : public LIRGenera
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+    void visitAsmSelect(MAsmSelect* ins);
     void visitWasmStore(MWasmStore* ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
     void visitSubstr(MSubstr* ins);
     void visitRandom(MRandom* ins);
     void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
     void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
     void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
     void lowerPhi(MPhi* phi);