Bug 1640669 - Part 3: Hide platform-specific bits better. r=jseward
authorLars T Hansen <lhansen@mozilla.com>
Fri, 31 Jul 2020 15:07:39 +0000
changeset 542870 684e8b6a41091c9b221bba91696182e558e56182
parent 542869 ce7b1451c1009774c89f46b808eec9e3590ac6d4
child 542871 24a69cc00e3bc9b688fb40cb6e789bdd2c9ccff5
push id37657
push usernerli@mozilla.com
push dateSat, 01 Aug 2020 09:48:10 +0000
treeherdermozilla-central@750bc4c5c4ad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjseward
bugs1640669
milestone81.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1640669 - Part 3: Hide platform-specific bits better. r=jseward Factor a predicate that determines whether we should optimize a reduce+branch operation. Differential Revision: https://phabricator.services.mozilla.com/D85261
js/src/jit/Lowering.cpp
js/src/jit/x86-shared/Lowering-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.h
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -858,34 +858,24 @@ void LIRGenerator::visitTest(MTest* test
   }
 
 #ifdef ENABLE_WASM_SIMD
   // Check if the operand for this test is an any_true/all_true SIMD operation.
   // If it is, we want to emit an LWasmReduceAndBranchSimd128 node to avoid
   // generating an intermediate boolean result.
   if (opd->isWasmReduceSimd128() && opd->isEmittedAtUses()) {
     MWasmReduceSimd128* node = opd->toWasmReduceSimd128();
-    switch (node->simdOp()) {
-      case wasm::SimdOp::I8x16AnyTrue:
-      case wasm::SimdOp::I16x8AnyTrue:
-      case wasm::SimdOp::I32x4AnyTrue:
-      case wasm::SimdOp::I8x16AllTrue:
-      case wasm::SimdOp::I16x8AllTrue:
-      case wasm::SimdOp::I32x4AllTrue:
-      case wasm::SimdOp::I16x8Bitmask: {
+    if (canFoldReduceSimd128AndBranch(node->simdOp())) {
 #  ifdef DEBUG
-        js::wasm::ReportSimdAnalysis("simd128-to-scalar-and-branch -> folded");
+      js::wasm::ReportSimdAnalysis("simd128-to-scalar-and-branch -> folded");
 #  endif
-        auto* lir = new (alloc()) LWasmReduceAndBranchSimd128(
-            useRegister(node->input()), node->simdOp(), ifTrue, ifFalse);
-        add(lir, test);
-        return;
-      }
-      default:
-        break;
+      auto* lir = new (alloc()) LWasmReduceAndBranchSimd128(
+          useRegister(node->input()), node->simdOp(), ifTrue, ifFalse);
+      add(lir, test);
+      return;
     }
   }
 #endif
 
   if (opd->isIsObject() && opd->isEmittedAtUses()) {
     MDefinition* input = opd->toIsObject()->input();
     MOZ_ASSERT(input->type() == MIRType::Value);
 
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -1675,35 +1675,42 @@ void LIRGenerator::visitWasmUnarySimd128
       break;
   }
 
   LWasmUnarySimd128* lir =
       new (alloc()) LWasmUnarySimd128(useRegister(ins->input()), tempReg);
   define(lir, ins);
 }
 
-static bool CanEmitWasmReduceSimd128AtUses(MWasmReduceSimd128* ins) {
+bool LIRGeneratorX86Shared::canFoldReduceSimd128AndBranch(wasm::SimdOp op) {
+  switch (op) {
+    case wasm::SimdOp::I8x16AnyTrue:
+    case wasm::SimdOp::I16x8AnyTrue:
+    case wasm::SimdOp::I32x4AnyTrue:
+    case wasm::SimdOp::I8x16AllTrue:
+    case wasm::SimdOp::I16x8AllTrue:
+    case wasm::SimdOp::I32x4AllTrue:
+    case wasm::SimdOp::I16x8Bitmask:
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool LIRGeneratorX86Shared::canEmitWasmReduceSimd128AtUses(
+    MWasmReduceSimd128* ins) {
   if (!ins->canEmitAtUses()) {
     return false;
   }
   // Only specific ops generating int32.
   if (ins->type() != MIRType::Int32) {
     return false;
   }
-  switch (ins->simdOp()) {
-    case wasm::SimdOp::I8x16AnyTrue:
-    case wasm::SimdOp::I16x8AnyTrue:
-    case wasm::SimdOp::I32x4AnyTrue:
-    case wasm::SimdOp::I8x16AllTrue:
-    case wasm::SimdOp::I16x8AllTrue:
-    case wasm::SimdOp::I32x4AllTrue:
-    case wasm::SimdOp::I16x8Bitmask:
-      break;
-    default:
-      return false;
+  if (!canFoldReduceSimd128AndBranch(ins->simdOp())) {
+    return false;
   }
   // If never used then defer (it will be removed).
   MUseIterator iter(ins->usesBegin());
   if (iter == ins->usesEnd()) {
     return true;
   }
   // We require an MTest consumer.
   MNode* node = iter->consumer();
@@ -1711,17 +1718,17 @@ static bool CanEmitWasmReduceSimd128AtUs
     return false;
   }
   // Defer only if there's only one use.
   iter++;
   return iter == ins->usesEnd();
 }
 
 void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
-  if (CanEmitWasmReduceSimd128AtUses(ins)) {
+  if (canEmitWasmReduceSimd128AtUses(ins)) {
     emitAtUses(ins);
     return;
   }
   if (ins->type() == MIRType::Int64) {
     auto* lir =
         new (alloc()) LWasmReduceSimd128ToInt64(useRegister(ins->input()));
     defineInt64(lir, ins);
   } else {
--- a/js/src/jit/x86-shared/Lowering-x86-shared.h
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -49,14 +49,19 @@ class LIRGeneratorX86Shared : public LIR
   void lowerTruncateDToInt32(MTruncateToInt32* ins);
   void lowerTruncateFToInt32(MTruncateToInt32* ins);
   void lowerCompareExchangeTypedArrayElement(
       MCompareExchangeTypedArrayElement* ins, bool useI386ByteRegisters);
   void lowerAtomicExchangeTypedArrayElement(
       MAtomicExchangeTypedArrayElement* ins, bool useI386ByteRegisters);
   void lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins,
                                          bool useI386ByteRegisters);
+
+#ifdef ENABLE_WASM_SIMD
+  bool canFoldReduceSimd128AndBranch(wasm::SimdOp op);
+  bool canEmitWasmReduceSimd128AtUses(MWasmReduceSimd128* ins);
+#endif
 };
 
 }  // namespace jit
 }  // namespace js
 
 #endif /* jit_x86_shared_Lowering_x86_shared_h */