Bug 1532286 - Wasm/Ion: generate better code for wasmSelect in some cases. r=jandem.
authorJulian Seward <jseward@acm.org>
Fri, 15 Mar 2019 17:11:06 +0100
changeset 470539 5e18acc4a42b127d90ef83658aa132bb139d7483
parent 470538 7a1707c388c7c5cd787d8b0747aed226dbea773e
child 470540 4b0811d7b8e1591d4e48a26bdb8666a1bc5ccf61
child 470705 139558138e3eb0ab692bfa8bc60067437b5b43e0
push id35907
push useraciure@mozilla.com
push dateTue, 23 Apr 2019 22:16:10 +0000
treeherdermozilla-central@4b0811d7b8e1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1532286
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1532286 - Wasm/Ion: generate better code for wasmSelect in some cases. r=jandem. This patch creates better code, when compiling Wasm via Ion, for 32-bit integer selects which are guarded by a 32-bit integer compare. No other type combinations are optimised at present. Changes: * LIR-shared.h: Add new LIR node LWasmCompareAndSelect, holding the values to compare, the values to select over, and the comparison details. * CanEmitCompareAtUses: - Rewrite (transform mechanically) remove hard-to-reason-about control flow (a loop which probably only iterates once, and a boolean control variable). These are both removed and replaced by obvious straight-line code. - Also allow deferred emission when there is a single user and it is a WasmSelect. * LIRGenerator::visitWasmSelect for both arm and x86-shared: - All target specific variants have been removed and replaced by a shared version. - If the condition is a compare, marked 'may be emitted at use point', and has the correct types, bundle up the comparison arguments and values to be selected-over into a single LWasmCompareAndSelect node. * CodeGenerator::visitWasmCompareAndSelect for both arm and x86-shared: - All target specific variants have been removed and replaced by a shared version. - From an LWasmCompareAndSelect node, generate the desired optimal sequence: a compare immediately followed by a conditional move. * CodeGenerator::generateBody(), ifdef JS_JITSPEW, one liner ridealong: print "# " before the LIR instruction name, so as to make reading IONFLAGS=codegen output easier. No (release-) functional change. * New test case select-int32.js. Apart from the removal of aarch64-specific lowering and codegen routines, none of the changes apply to aarch64, because wasm-via-Ion on aarch64 is not currently supported. Differential Revision: https://phabricator.services.mozilla.com/D27071
js/src/jit-test/tests/wasm/select-int32.js
js/src/jit/CodeGenerator.cpp
js/src/jit/Lowering.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/MacroAssembler-arm-inl.h
js/src/jit/arm64/Lowering-arm64.cpp
js/src/jit/arm64/MacroAssembler-arm64-inl.h
js/src/jit/shared/LIR-shared.h
js/src/jit/x86-shared/Lowering-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/select-int32.js
@@ -0,0 +1,123 @@
+
+// Checks that optimised handling, via Ion, of select-of-I32-values as driven
+// by comparison-of-I32-values, works correctly.  See bug 1532286 comment 7.
+
+function moduleText(cmp_kind /* "eq", "ne", "le_u" etc */)
+{
+  return `(module
+          (func (export "cmpI32_selI32")
+              (result i32)
+              (param i32) (param i32) (param i32) (param i32)
+              (select (get_local 2)
+                      (get_local 3)
+                      (i32.${cmp_kind} (get_local 0) (get_local 1))
+              )
+          ))`;
+}
+
+// Make sure both endpoints and the middlepoint are covered, with one value
+// either side, so as to expose both signedness and off-by-one failures.
+// Ditto quarter points for good measure.
+const vals = [0x0, 0x1,
+              0x3fffffff, 0x40000000, 0x40000001,
+              0x7fffffff, 0x80000000, 0x80000001,
+              0xbfffffff, 0xC0000000, 0xC0000001,
+              0xfffffffe, 0xffffffff];
+
+const testNames
+      = ["eq", "ne", "lt_s", "lt_u", "gt_s", "gt_u",
+                     "le_s", "le_u", "ge_s", "ge_u"];
+
+const expected_eq
+      = [0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,
+         1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
+         0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,
+         1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
+         0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0];
+
+const expected_ne
+      = [1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+         0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+         1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+         0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+         1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1];
+
+const expected_lt_s
+      = [1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,
+         1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,
+         1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,
+         0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,
+         1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1];
+
+const expected_lt_u
+      = [1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,
+         0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,
+         1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,1,
+         1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,
+         1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1];
+
+const expected_gt_s
+      = [1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,
+         0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
+         1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,
+         1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,
+         1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1];
+
+const expected_gt_u
+      = [1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,
+         1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,
+         1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,0,
+         0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
+         1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1];
+
+const expected_le_s
+      = [0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,
+         1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
+         0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+         0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,
+         0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0];
+
+const expected_le_u
+      = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,
+         0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,
+         0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,1,
+         1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,
+         0,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0];
+
+const expected_ge_s
+      = [0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,
+         0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+         0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,
+         1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,
+         0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0];
+
+const expected_ge_u
+      = [0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,
+         1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,
+         0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,
+         0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,
+         0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0];
+
+const testExpVecs
+      = [expected_eq, expected_ne,
+         expected_lt_s, expected_lt_u, expected_gt_s, expected_gt_u,
+         expected_le_s, expected_le_u, expected_ge_s, expected_ge_u]
+
+assertEq(testNames.length, testExpVecs.length);
+
+for (i in testNames) {
+    const testExp = testExpVecs[i];
+    assertEq(testExp.length, vals.length * vals.length);
+
+    const inst = wasmEvalText(moduleText(testNames[i]));
+
+    let ctr = 0;
+    for (let a of vals) {
+        for (let b of vals) {
+            const actual = inst.exports.cmpI32_selI32(a, b, 42, 1337);
+            const expected = testExp[ctr] == 0 ? 42 : 1337;
+            assertEq(expected, actual);
+            ctr++;
+        }
+    }
+}
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -6211,17 +6211,17 @@ bool CodeGenerator::generateBody() {
 
     for (LInstructionIterator iter = current->begin(); iter != current->end();
          iter++) {
       if (!alloc().ensureBallast()) {
         return false;
       }
 
 #ifdef JS_JITSPEW
-      JitSpewStart(JitSpew_Codegen, "instruction %s", iter->opName());
+      JitSpewStart(JitSpew_Codegen, "# instruction %s", iter->opName());
       if (const char* extra = iter->getExtraName()) {
         JitSpewCont(JitSpew_Codegen, ":%s", extra);
       }
       JitSpewFin(JitSpew_Codegen);
 #endif
 
       if (counts) {
         blockCounts->visitInstruction(*iter);
@@ -14005,13 +14005,52 @@ void CodeGenerator::visitWasmNullConstan
   masm.xorPtr(ToRegister(lir->output()), ToRegister(lir->output()));
 }
 
 void CodeGenerator::visitIsNullPointer(LIsNullPointer* lir) {
   masm.cmpPtrSet(Assembler::Equal, ToRegister(lir->value()), ImmWord(0),
                  ToRegister(lir->output()));
 }
 
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+  bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+                    ins->compareType() == MCompare::Compare_UInt32;
+  bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+  if (cmpIs32bit && selIs32bit) {
+    Register out = ToRegister(ins->output());
+    MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == out,
+               "true expr input is reused for output");
+
+    Assembler::Condition cond = Assembler::InvertCondition(JSOpToCondition(
+                                    ins->compareType(), ins->jsop()));
+    const LAllocation* rhs = ins->rightExpr();
+    const LAllocation* falseExpr = ins->ifFalseExpr();
+    Register lhs = ToRegister(ins->leftExpr());
+
+    if (rhs->isRegister()) {
+      if (falseExpr->isRegister()) {
+        // On arm32, this is the only one of the four cases that can actually
+        // happen, since |rhs| and |falseExpr| are marked useAny() by
+        // LIRGenerator::visitWasmSelect, and useAny() means "register only"
+        // on arm32.
+        masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr), out);
+      } else {
+        masm.cmp32Load32(cond, lhs, ToRegister(rhs), ToAddress(falseExpr), out);
+      }
+    } else {
+      if (falseExpr->isRegister()) {
+        masm.cmp32Move32(cond, lhs, ToAddress(rhs), ToRegister(falseExpr), out);
+      } else {
+        masm.cmp32Load32(cond, lhs, ToAddress(rhs), ToAddress(falseExpr), out);
+      }
+    }
+    return;
+  }
+
+  MOZ_CRASH("in CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+}
+
 static_assert(!std::is_polymorphic<CodeGenerator>::value,
               "CodeGenerator should not have any virtual methods");
 
 }  // namespace jit
 }  // namespace js
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -899,31 +899,38 @@ void LIRGenerator::visitObjectGroupDispa
   add(lir, ins);
 }
 
 static inline bool CanEmitCompareAtUses(MInstruction* ins) {
   if (!ins->canEmitAtUses()) {
     return false;
   }
 
-  bool foundTest = false;
-  for (MUseIterator iter(ins->usesBegin()); iter != ins->usesEnd(); iter++) {
-    MNode* node = iter->consumer();
-    if (!node->isDefinition()) {
-      return false;
-    }
-    if (!node->toDefinition()->isTest()) {
-      return false;
-    }
-    if (foundTest) {
-      return false;
-    }
-    foundTest = true;
-  }
-  return true;
+  // If the result is never used, we can usefully defer emission to the use
+  // point, since that will never happen.
+  MUseIterator iter(ins->usesBegin());
+  if (iter == ins->usesEnd()) {
+    return true;
+  }
+
+  // If the first use isn't of the expected form, the answer is No.
+  MNode* node = iter->consumer();
+  if (!node->isDefinition()) {
+    return false;
+  }
+
+  MDefinition* use = node->toDefinition();
+  if (!use->isTest() && !use->isWasmSelect()) {
+    return false;
+  }
+
+  // Emission can be deferred to the first use point, but only if there are no
+  // other use points.
+  iter++;
+  return iter == ins->usesEnd();
 }
 
 void LIRGenerator::visitCompare(MCompare* comp) {
   MDefinition* left = comp->lhs();
   MDefinition* right = comp->rhs();
 
   // Try to fold the comparison so that we don't have to handle all cases.
   bool result;
@@ -5044,10 +5051,56 @@ void LIRGenerator::visitIonToWasmCall(MI
         MOZ_CRASH("Uninitialized ABIArg kind");
     }
   }
 
   defineReturn(lir, ins);
   assignSafepoint(lir, ins);
 }
 
+void LIRGenerator::visitWasmSelect(MWasmSelect* ins) {
+  MDefinition* condExpr = ins->condExpr();
+
+  // Pick off specific cases that we can do with LWasmCompareAndSelect.
+  // Currently only{U,}Int32 selection driven by a comparison of {U,}Int32
+  // values.
+  if (condExpr->isCompare() && condExpr->isEmittedAtUses() &&
+      ins->type() == MIRType::Int32) {
+    MCompare* comp = condExpr->toCompare();
+    JSOp jsop = comp->jsop();
+    MCompare::CompareType compTy = comp->compareType();
+    // We don't currently generate any other JSOPs for the comparison, and if
+    // that changes, we want to know about it.  Hence this assertion.
+    MOZ_ASSERT(jsop == JSOP_EQ || jsop == JSOP_NE || jsop == JSOP_LT ||
+               jsop == JSOP_GT || jsop == JSOP_LE || jsop == JSOP_GE);
+    if (compTy == MCompare::Compare_Int32 ||
+        compTy == MCompare::Compare_UInt32) {
+      auto* lir = new (alloc())
+        LWasmCompareAndSelect(useRegister(comp->lhs()), useAny(comp->rhs()),
+                              compTy, jsop,
+                              useRegisterAtStart(ins->trueExpr()),
+                              useAny(ins->falseExpr()));
+
+      defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+      return;
+    }
+    // Otherwise fall through to normal handling, which appears to emit the
+    // condexpr itself anyway.
+  }
+
+  if (ins->type() == MIRType::Int64) {
+    auto* lir = new (alloc()) LWasmSelectI64(
+        useInt64RegisterAtStart(ins->trueExpr()), useInt64(ins->falseExpr()),
+        useRegister(ins->condExpr()));
+
+    defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
+    return;
+  }
+
+  auto* lir = new (alloc())
+      LWasmSelect(useRegisterAtStart(ins->trueExpr()), useAny(ins->falseExpr()),
+                  useRegister(ins->condExpr()));
+
+  defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
+}
+
 static_assert(!std::is_polymorphic<LIRGenerator>::value,
               "LIRGenerator should not have any virtual methods");
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1627,16 +1627,24 @@ class MacroAssembler : public MacroAssem
   inline void cmp32Move32(Condition cond, Register lhs, Register rhs,
                           Register src, Register dest)
       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
 
   inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs,
                           Register src, Register dest)
       DEFINED_ON(arm, arm64, mips_shared, x86_shared);
 
+  inline void cmp32Load32(Condition cond, Register lhs, const Address& rhs,
+                          const Address& src, Register dest)
+      DEFINED_ON(arm, arm64, x86_shared);
+
+  inline void cmp32Load32(Condition cond, Register lhs, Register rhs,
+                          const Address& src, Register dest)
+      DEFINED_ON(arm, arm64, x86_shared);
+
   inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
                            Register src, Register dest)
       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
 
   inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask,
                             const Address& src, Register dest)
       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
 
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -466,33 +466,16 @@ void LIRGeneratorARM::lowerUrshD(MUrsh* 
   MOZ_ASSERT(lhs->type() == MIRType::Int32);
   MOZ_ASSERT(rhs->type() == MIRType::Int32);
 
   LUrshD* lir = new (alloc())
       LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
   define(lir, mir);
 }
 
-void LIRGenerator::visitWasmSelect(MWasmSelect* ins) {
-  if (ins->type() == MIRType::Int64) {
-    auto* lir = new (alloc()) LWasmSelectI64(
-        useInt64RegisterAtStart(ins->trueExpr()), useInt64(ins->falseExpr()),
-        useRegister(ins->condExpr()));
-
-    defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
-    return;
-  }
-
-  auto* lir = new (alloc())
-      LWasmSelect(useRegisterAtStart(ins->trueExpr()),
-                  useRegister(ins->falseExpr()), useRegister(ins->condExpr()));
-
-  defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
-}
-
 void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
   if (ins->type() == MIRType::Int32) {
     define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
   } else if (ins->type() == MIRType::Float32) {
     define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
   } else {
     MOZ_ASSERT(ins->type() == MIRType::Double);
     define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -1894,16 +1894,30 @@ void MacroAssembler::cmp32Move32(Conditi
                                  const Address& rhs, Register src,
                                  Register dest) {
   ScratchRegisterScope scratch(*this);
   SecondScratchRegisterScope scratch2(*this);
   ma_ldr(rhs, scratch, scratch2);
   cmp32Move32(cond, lhs, scratch, src, dest);
 }
 
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+                                 const Address& rhs, const Address& src,
+                                 Register dest) {
+  // This is never used, but must be present to facilitate linking on arm.
+  MOZ_CRASH("No known use cases");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+                                 Register rhs, const Address& src,
+                                 Register dest) {
+  // This is never used, but must be present to facilitate linking on arm.
+  MOZ_CRASH("No known use cases");
+}
+
 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
                                    Imm32 mask, const Address& src,
                                    Register dest) {
   MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
   test32(addr, mask);
   ScratchRegisterScope scratch(*this);
   ma_ldr(src, dest, scratch, Offset, cond);
 }
--- a/js/src/jit/arm64/Lowering-arm64.cpp
+++ b/js/src/jit/arm64/Lowering-arm64.cpp
@@ -303,20 +303,16 @@ void LIRGenerator::visitWasmNeg(MWasmNeg
     case MIRType::Double:
       define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
       break;
     default:
       MOZ_CRASH("unexpected type");
   }
 }
 
-void LIRGenerator::visitWasmSelect(MWasmSelect* ins) {
-  MOZ_CRASH("visitWasmSelect");
-}
-
 void LIRGeneratorARM64::lowerUDiv(MDiv* div) {
   LAllocation lhs = useRegister(div->lhs());
   // TODO (Bug 1523568): Implement the division-avoidance paths when rhs is
   // constant.
 
   // Generate UDiv
   LAllocation rhs = useRegister(div->rhs());
   LDefinition remainder = LDefinition::BogusTemp();
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -1584,16 +1584,28 @@ void MacroAssembler::cmp32Move32(Conditi
 void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
                                  const Address& rhs, Register src,
                                  Register dest) {
   cmp32(lhs, rhs);
   Csel(ARMRegister(dest, 32), ARMRegister(src, 32), ARMRegister(dest, 32),
        cond);
 }
 
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+                                 const Address& rhs, const Address& src,
+                                 Register dest) {
+  MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+                                 Register rhs, const Address& src,
+                                 Register dest) {
+  MOZ_CRASH("NYI");
+}
+
 void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
                                   Register src, Register dest) {
   cmp32(lhs, rhs);
   Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64),
        cond);
 }
 
 void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -6193,16 +6193,49 @@ class LWasmSelectI64
     setOperand(CondIndex, cond);
   }
 
   const LInt64Allocation trueExpr() { return getInt64Operand(TrueExprIndex); }
   const LInt64Allocation falseExpr() { return getInt64Operand(FalseExprIndex); }
   const LAllocation* condExpr() { return getOperand(CondIndex); }
 };
 
+class LWasmCompareAndSelect : public LWasmSelectBase<1, 4> {
+  MCompare::CompareType compareType_;
+  JSOp jsop_;
+
+ public:
+  LIR_HEADER(WasmCompareAndSelect);
+
+  static const size_t LeftExprIndex = 0;
+  static const size_t RightExprIndex = 1;
+  static const size_t IfTrueExprIndex = 2;
+  static const size_t IfFalseExprIndex = 3;
+
+  LWasmCompareAndSelect(const LAllocation& leftExpr,
+                        const LAllocation& rightExpr,
+                        MCompare::CompareType compareType, JSOp jsop,
+                        const LAllocation& ifTrueExpr,
+                        const LAllocation& ifFalseExpr)
+      : LWasmSelectBase(classOpcode), compareType_(compareType), jsop_(jsop) {
+    setOperand(LeftExprIndex, leftExpr);
+    setOperand(RightExprIndex, rightExpr);
+    setOperand(IfTrueExprIndex, ifTrueExpr);
+    setOperand(IfFalseExprIndex, ifFalseExpr);
+  }
+
+  const LAllocation* leftExpr() { return getOperand(LeftExprIndex); }
+  const LAllocation* rightExpr() { return getOperand(RightExprIndex); }
+  const LAllocation* ifTrueExpr() { return getOperand(IfTrueExprIndex); }
+  const LAllocation* ifFalseExpr() { return getOperand(IfFalseExprIndex); }
+
+  MCompare::CompareType compareType() { return compareType_; }
+  JSOp jsop() { return jsop_; }
+};
+
 class LWasmAddOffset : public LInstructionHelper<1, 1, 0> {
  public:
   LIR_HEADER(WasmAddOffset);
   explicit LWasmAddOffset(const LAllocation& base)
       : LInstructionHelper(classOpcode) {
     setOperand(0, base);
   }
   MWasmAddOffset* mir() const { return mir_->toWasmAddOffset(); }
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -241,33 +241,16 @@ void LIRGeneratorX86Shared::lowerModI(MM
   LModI* lir = new (alloc())
       LModI(useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(eax));
   if (mod->fallible()) {
     assignSnapshot(lir, Bailout_DoubleOutput);
   }
   defineFixed(lir, mod, LAllocation(AnyRegister(edx)));
 }
 
-void LIRGenerator::visitWasmSelect(MWasmSelect* ins) {
-  if (ins->type() == MIRType::Int64) {
-    auto* lir = new (alloc()) LWasmSelectI64(
-        useInt64RegisterAtStart(ins->trueExpr()), useInt64(ins->falseExpr()),
-        useRegister(ins->condExpr()));
-
-    defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
-    return;
-  }
-
-  auto* lir = new (alloc())
-      LWasmSelect(useRegisterAtStart(ins->trueExpr()), use(ins->falseExpr()),
-                  useRegister(ins->condExpr()));
-
-  defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
-}
-
 void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
   switch (ins->type()) {
     case MIRType::Int32:
       defineReuseInput(new (alloc()) LNegI(useRegisterAtStart(ins->input())),
                        ins, 0);
       break;
     case MIRType::Float32:
       defineReuseInput(new (alloc()) LNegF(useRegisterAtStart(ins->input())),
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -987,16 +987,30 @@ void MacroAssembler::cmp32Move32(Conditi
 
 void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
                                  const Address& rhs, Register src,
                                  Register dest) {
   cmp32(lhs, Operand(rhs));
   cmovCCl(cond, src, dest);
 }
 
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+                                 const Address& rhs, const Address& src,
+                                 Register dest) {
+  cmp32(lhs, Operand(rhs));
+  cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+                                 Register rhs, const Address& src,
+                                 Register dest) {
+  cmp32(lhs, rhs);
+  cmovCCl(cond, Operand(src), dest);
+}
+
 void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
                                          Register dest) {
   // Note: use movl instead of move32/xorl to ensure flags are not clobbered.
   movl(Imm32(0), scratch);
   spectreMovePtr(cond, scratch, dest);
 }
 
 // ========================================================================