Bug 1432345 - Baldr: add index masking for 32-bit wasm loads and stores (r=jandem)
authorLuke Wagner <luke@mozilla.com>
Mon, 26 Feb 2018 13:40:01 -0600
changeset 405305 bf401fe9c95c34f150ae187613e755d718f86973
parent 405304 a8e5b45eedde64ef114e7a0a5e3d6fafac3f723a
child 405306 6568fdf9c0ee96c681a781b3d1741cd093ca07d8
push id100212
push userlwagner@mozilla.com
push dateMon, 26 Feb 2018 19:43:53 +0000
treeherdermozilla-inbound@bf401fe9c95c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1432345
milestone60.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1432345 - Baldr: add index masking for 32-bit wasm loads and stores (r=jandem)
js/src/jit/Lowering.cpp
js/src/jit/MIR.h
js/src/jit/MacroAssembler.h
js/src/jit/WasmBCE.cpp
js/src/jit/arm/MacroAssembler-arm-inl.h
js/src/jit/shared/LIR-shared.h
js/src/jit/x86/MacroAssembler-x86-inl.h
js/src/wasm/WasmCompile.cpp
js/src/wasm/WasmIonCompile.cpp
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -4527,19 +4527,26 @@ LIRGenerator::visitWasmBoundsCheck(MWasm
     MOZ_ASSERT(!ins->isRedundant());
 
     MDefinition* index = ins->index();
     MOZ_ASSERT(index->type() == MIRType::Int32);
 
     MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
     MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
 
-    auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index),
-                                              useRegisterAtStart(boundsCheckLimit));
-    add(lir, ins);
+
+    if (JitOptions.spectreIndexMasking) {
+        auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index),
+                                                  useRegister(boundsCheckLimit));
+        defineReuseInput(lir, ins, 0);
+    } else {
+        auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index),
+                                                  useRegisterAtStart(boundsCheckLimit));
+        add(lir, ins);
+    }
 #endif
 }
 
 void
 LIRGenerator::visitWasmAlignmentCheck(MWasmAlignmentCheck* ins)
 {
     MDefinition* index = ins->index();
     MOZ_ASSERT(index->type() == MIRType::Int32);
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -14231,16 +14231,19 @@ class MWasmBoundsCheck
 
     explicit MWasmBoundsCheck(MDefinition* index, MDefinition* boundsCheckLimit,
                               wasm::BytecodeOffset bytecodeOffset)
       : MBinaryInstruction(classOpcode, index, boundsCheckLimit),
         bytecodeOffset_(bytecodeOffset)
     {
         // Bounds check is effectful: it throws for OOB.
         setGuard();
+
+        if (JitOptions.spectreIndexMasking)
+            setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(WasmBoundsCheck)
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, index), (1, boundsCheckLimit))
 
     AliasSet getAliasSet() const override {
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1465,18 +1465,20 @@ class MacroAssembler : public MacroAssem
   public:
     // ========================================================================
     // wasm support
 
     CodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
 
     void wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset);
 
-    // Emit a bounds check against the wasm heap limit, jumping to 'label' if 'cond' holds.
-    // Required when WASM_HUGE_MEMORY is not defined.
+    // Emit a bounds check against the wasm heap limit, jumping to 'label' if
+    // 'cond' holds. Required when WASM_HUGE_MEMORY is not defined. If
+    // JitOptions.spectreMaskIndex is true, in speculative executions 'index' is
+    // saturated in-place to 'boundsCheckLimit'.
     template <class L>
     inline void wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, L label)
         DEFINED_ON(arm, arm64, mips32, mips64, x86);
 
     template <class L>
     inline void wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
         DEFINED_ON(arm, arm64, mips32, mips64, x86);
 
--- a/js/src/jit/WasmBCE.cpp
+++ b/js/src/jit/WasmBCE.cpp
@@ -10,16 +10,18 @@
 
 using namespace js;
 using namespace js::jit;
 using namespace mozilla;
 
 typedef js::HashMap<uint32_t, MDefinition*, DefaultHasher<uint32_t>, SystemAllocPolicy>
     LastSeenMap;
 
+unsigned redundantCount = 0;
+
 // The Wasm Bounds Check Elimination (BCE) pass looks for bounds checks
 // on SSA values that have already been checked. (in the same block or in a
 // dominating block). These bounds checks are redundant and thus eliminated.
 //
 // Note: This is safe in the presense of dynamic memory sizes as long as they
 // can ONLY GROW. If we allow SHRINKING the heap, this pass should be
 // RECONSIDERED.
 //
@@ -55,23 +57,35 @@ jit::EliminateBoundsChecks(MIRGenerator*
                 MOZ_ASSERT(wasm::MaxMemoryAccessSize < wasm::GuardSize,
                            "Guard page handles partial out-of-bounds");
 #endif
 
                 if (addr->isConstant() && addr->toConstant()->type() == MIRType::Int32 &&
                     uint32_t(addr->toConstant()->toInt32()) < mir->minWasmHeapLength())
                 {
                     bc->setRedundant();
+                    redundantCount++;
+                    if (JitOptions.spectreIndexMasking)
+                        bc->replaceAllUsesWith(addr);
+                    else
+                        MOZ_ASSERT(!bc->hasUses());
                 }
                 else
                 {
                     LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
                     if (ptr) {
-                        if (ptr->value()->block()->dominates(block))
+                        MDefinition* prevCheckOrPhi = ptr->value();
+                        if (prevCheckOrPhi->block()->dominates(block)) {
                             bc->setRedundant();
+                            redundantCount++;
+                            if (JitOptions.spectreIndexMasking)
+                                bc->replaceAllUsesWith(prevCheckOrPhi);
+                            else
+                                MOZ_ASSERT(!bc->hasUses());
+                        }
                     } else {
                         if (!lastSeen.add(ptr, addr->id(), def))
                             return false;
                     }
                 }
                 break;
               }
               case MDefinition::Opcode::Phi: {
@@ -85,16 +99,23 @@ jit::EliminateBoundsChecks(MIRGenerator*
                 // phi node checked.
                 //
                 // Note that any phi that is part of a cycle
                 // will not be "safe" since the value coming on the backedge
                 // cannot be in lastSeen because its block hasn't been traversed yet.
                 for (int i = 0, nOps = phi->numOperands(); i < nOps; i++) {
                     MDefinition* src = phi->getOperand(i);
 
+                    if (JitOptions.spectreIndexMasking) {
+                        if (src->isWasmBoundsCheck())
+                            src = src->toWasmBoundsCheck()->index();
+                    } else {
+                        MOZ_ASSERT(!src->isWasmBoundsCheck());
+                    }
+
                     LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id());
                     if (!checkPtr || !checkPtr->value()->block()->dominates(block)) {
                         phiChecked = false;
                         break;
                     }
                 }
 
                 if (phiChecked) {
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -2296,27 +2296,31 @@ MacroAssembler::clampIntToUint8(Register
 // wasm support
 
 template <class L>
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, L label)
 {
     as_cmp(index, O2Reg(boundsCheckLimit));
     as_b(label, cond);
+    if (JitOptions.spectreIndexMasking)
+        ma_mov(boundsCheckLimit, index, LeaveCC, cond);
 }
 
 template <class L>
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
 {
     ScratchRegisterScope scratch(*this);
     MOZ_ASSERT(boundsCheckLimit.offset == offsetof(wasm::TlsData, boundsCheckLimit));
     ma_ldr(DTRAddr(boundsCheckLimit.base, DtrOffImm(boundsCheckLimit.offset)), scratch);
     as_cmp(index, O2Reg(scratch));
     as_b(label, cond);
+    if (JitOptions.spectreIndexMasking)
+        ma_mov(scratch, index, LeaveCC, cond);
 }
 
 //}}} check_macroassembler_style
 // ===============================================================
 
 void
 MacroAssemblerARMCompat::incrementInt32Value(const Address& addr)
 {
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -8423,17 +8423,17 @@ class LWasmAddOffset : public LInstructi
     MWasmAddOffset* mir() const {
         return mir_->toWasmAddOffset();
     }
     const LAllocation* base() {
         return getOperand(0);
     }
 };
 
-class LWasmBoundsCheck : public LInstructionHelper<0, 2, 0>
+class LWasmBoundsCheck : public LInstructionHelper<1, 2, 0>
 {
   public:
     LIR_HEADER(WasmBoundsCheck);
     explicit LWasmBoundsCheck(const LAllocation& ptr,
                               const LAllocation& boundsCheckLimit = LAllocation())
     {
         setOperand(0, ptr);
         setOperand(1, boundsCheckLimit);
--- a/js/src/jit/x86/MacroAssembler-x86-inl.h
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -1104,24 +1104,28 @@ MacroAssembler::truncateDoubleToUInt64(A
 // wasm support
 
 template <class L>
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, L label)
 {
     cmp32(index, boundsCheckLimit);
     j(cond, label);
+    if (JitOptions.spectreIndexMasking)
+        cmovCCl(cond, Operand(boundsCheckLimit), index);
 }
 
 template <class L>
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
 {
     cmp32(index, Operand(boundsCheckLimit));
     j(cond, label);
+    if (JitOptions.spectreIndexMasking)
+        cmovCCl(cond, Operand(boundsCheckLimit), index);
 }
 
 //}}} check_macroassembler_style
 // ===============================================================
 
 // Note: this function clobbers the source register.
 void
 MacroAssemblerX86::convertUInt32ToDouble(Register src, FloatRegister dest)
--- a/js/src/wasm/WasmCompile.cpp
+++ b/js/src/wasm/WasmCompile.cpp
@@ -409,16 +409,18 @@ InitialCompileFlags(const CompileArgs& a
     } else {
         *mode = CompileMode::Once;
         *tier = debugEnabled || !ionEnabled ? Tier::Baseline : Tier::Ion;
     }
 
     *debug = debugEnabled ? DebugEnabled::True : DebugEnabled::False;
 }
 
+extern unsigned redundantCount;
+
 SharedModule
 wasm::CompileBuffer(const CompileArgs& args, const ShareableBytes& bytecode, UniqueChars* error)
 {
     MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
 
     Decoder d(bytecode.bytes, 0, error);
 
     CompileMode mode;
@@ -436,17 +438,21 @@ wasm::CompileBuffer(const CompileArgs& a
         return nullptr;
 
     if (!DecodeCodeSection(env, d, mg))
         return nullptr;
 
     if (!DecodeModuleTail(d, &env))
         return nullptr;
 
-    return mg.finishModule(bytecode);
+    auto module = mg.finishModule(bytecode);
+
+    printf("# redundant: %u\n", redundantCount);
+
+    return module;
 }
 
 bool
 wasm::CompileTier2(const CompileArgs& args, Module& module, Atomic<bool>* cancelled)
 {
     MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
 
     UniqueChars error;
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -827,18 +827,22 @@ class FunctionCompiler
             *base = computeEffectiveAddress(*base, access);
 
         if (alignmentCheck) {
             curBlock_->add(MWasmAlignmentCheck::New(alloc(), *base, access->byteSize(),
                                                     bytecodeOffset()));
         }
 
         MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
-        if (boundsCheckLimit)
-            curBlock_->add(MWasmBoundsCheck::New(alloc(), *base, boundsCheckLimit, bytecodeOffset()));
+        if (boundsCheckLimit) {
+            auto* ins = MWasmBoundsCheck::New(alloc(), *base, boundsCheckLimit, bytecodeOffset());
+            curBlock_->add(ins);
+            if (JitOptions.spectreIndexMasking)
+                *base = ins;
+        }
     }
 
     bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
         if (result == ValType::I64 && access->byteSize() <= 4) {
             // These smaller accesses should all be zero-extending.
             MOZ_ASSERT(!isSignedIntType(access->type()));
             return true;
         }