Bug 1578031 - Correct return type for `GetOffsetGuardLimit`. r=lth a=lizzard
authorRyan Hunt <rhunt@eqrion.net>
Tue, 03 Sep 2019 02:53:40 +0000
changeset 551609 6cdaee1535a1b66ad6bd44c7f78ea77ccfdfa4ac
parent 551608 02bc71fde278c94227ad2f05503e72a96abb62b5
child 551610 62a886e8b21ea1508f8bb41492d6e034c84afd2d
push id11888
push userbtara@mozilla.com
push dateWed, 04 Sep 2019 21:04:46 +0000
treeherdermozilla-beta@d5b25a250b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth, lizzard
bugs1578031
milestone70.0
Bug 1578031 - Correct return type for `GetOffsetGuardLimit`. r=lth a=lizzard Differential Revision: https://phabricator.services.mozilla.com/D44311
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/wasm/WasmTypes.h
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -5923,17 +5923,17 @@ void MacroAssemblerARM::outOfLineWasmTru
 
 void MacroAssemblerARM::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
                                      Register memoryBase, Register ptr,
                                      Register ptrScratch, AnyRegister output,
                                      Register64 out64) {
   MOZ_ASSERT(ptr == ptrScratch);
 
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   Scalar::Type type = access.type();
 
   // Maybe add the offset.
   if (offset || type == Scalar::Int64) {
     ScratchRegisterScope scratch(asMasm());
     if (offset) {
       ma_add(Imm32(offset), ptr, scratch);
@@ -5994,17 +5994,17 @@ void MacroAssemblerARM::wasmLoadImpl(con
 
 void MacroAssemblerARM::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
                                       AnyRegister value, Register64 val64,
                                       Register memoryBase, Register ptr,
                                       Register ptrScratch) {
   MOZ_ASSERT(ptr == ptrScratch);
 
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   unsigned byteSize = access.byteSize();
   Scalar::Type type = access.type();
 
   // Maybe add the offset.
   if (offset || type == Scalar::Int64) {
     ScratchRegisterScope scratch(asMasm());
     if (offset) {
@@ -6055,17 +6055,17 @@ void MacroAssemblerARM::wasmStoreImpl(co
 void MacroAssemblerARM::wasmUnalignedLoadImpl(
     const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
     Register ptrScratch, AnyRegister outAny, Register64 out64, Register tmp,
     Register tmp2, Register tmp3) {
   MOZ_ASSERT(ptr == ptrScratch);
   MOZ_ASSERT(tmp != ptr);
 
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   if (offset) {
     ScratchRegisterScope scratch(asMasm());
     ma_add(Imm32(offset), ptr, scratch);
   }
 
   // Add memoryBase to ptr, so we can use base+index addressing in the byte
   // loads.
@@ -6147,17 +6147,17 @@ void MacroAssemblerARM::wasmUnalignedSto
   MOZ_ASSERT(ptr == ptrScratch);
   // They can't both be valid, but they can both be invalid.
   MOZ_ASSERT(floatValue.isInvalid() || val64 == Register64::Invalid());
   // Don't try extremely clever optimizations.
   MOZ_ASSERT_IF(val64 != Register64::Invalid(),
                 valOrTmp != val64.high && valOrTmp != val64.low);
 
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   unsigned byteSize = access.byteSize();
   MOZ_ASSERT(byteSize == 8 || byteSize == 4 || byteSize == 2);
 
   if (offset) {
     ScratchRegisterScope scratch(asMasm());
     ma_add(Imm32(offset), ptr, scratch);
   }
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -290,17 +290,17 @@ static inline ARMFPRegister SelectFPReg(
   return ARMFPRegister(any.fpu(), size);
 }
 
 void MacroAssemblerCompat::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
                                         Register memoryBase_, Register ptr_,
                                         Register ptrScratch_,
                                         AnyRegister outany, Register64 out64) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   MOZ_ASSERT(ptr_ == ptrScratch_);
 
   ARMRegister memoryBase(memoryBase_, 64);
   ARMRegister ptr(ptr_, 64);
   if (offset) {
     Add(ptr, ptr, Operand(offset));
   }
@@ -359,17 +359,17 @@ void MacroAssemblerCompat::wasmLoadImpl(
   asMasm().memoryBarrierAfter(access.sync());
 }
 
 void MacroAssemblerCompat::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
                                          AnyRegister valany, Register64 val64,
                                          Register memoryBase_, Register ptr_,
                                          Register ptrScratch_) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   MOZ_ASSERT(ptr_ == ptrScratch_);
 
   ARMRegister memoryBase(memoryBase_, 64);
   ARMRegister ptr(ptr_, 64);
   if (offset) {
     Add(ptr, ptr, Operand(offset));
   }
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -1836,17 +1836,17 @@ void MacroAssembler::wasmUnalignedStoreF
   wasmStoreImpl(access, AnyRegister(floatValue), memoryBase, ptr, ptrScratch,
                 tmp);
 }
 
 void MacroAssemblerMIPSShared::wasmLoadImpl(
     const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
     Register ptrScratch, AnyRegister output, Register tmp) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
 
   // Maybe add the offset.
   if (offset) {
     asMasm().addPtr(Imm32(offset), ptrScratch);
     ptr = ptrScratch;
   }
 
@@ -1915,17 +1915,17 @@ void MacroAssemblerMIPSShared::wasmLoadI
   asMasm().append(access, asMasm().size() - 4);
   asMasm().memoryBarrierAfter(access.sync());
 }
 
 void MacroAssemblerMIPSShared::wasmStoreImpl(
     const wasm::MemoryAccessDesc& access, AnyRegister value,
     Register memoryBase, Register ptr, Register ptrScratch, Register tmp) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
 
   // Maybe add the offset.
   if (offset) {
     asMasm().addPtr(Imm32(offset), ptrScratch);
     ptr = ptrScratch;
   }
 
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -2453,17 +2453,17 @@ void MacroAssemblerMIPSCompat::wasmLoadI
   }
   asMasm().memoryBarrierAfter(access.sync());
 }
 
 void MacroAssemblerMIPSCompat::wasmStoreI64Impl(
     const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
     Register ptr, Register ptrScratch, Register tmp) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
 
   // Maybe add the offset.
   if (offset) {
     asMasm().addPtr(Imm32(offset), ptrScratch);
     ptr = ptrScratch;
   }
 
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -2246,17 +2246,17 @@ void MacroAssembler::wasmTruncateFloat32
     bind(oolRejoin);
   }
 }
 
 void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(
     const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
     Register ptrScratch, Register64 output, Register tmp) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
 
   // Maybe add the offset.
   if (offset) {
     asMasm().addPtr(Imm32(offset), ptrScratch);
     ptr = ptrScratch;
   }
 
@@ -2305,17 +2305,17 @@ void MacroAssemblerMIPS64Compat::wasmLoa
   asMasm().append(access, asMasm().size() - 4);
   asMasm().memoryBarrierAfter(access.sync());
 }
 
 void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
     const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
     Register ptr, Register ptrScratch, Register tmp) {
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
   MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
 
   // Maybe add the offset.
   if (offset) {
     asMasm().addPtr(Imm32(offset), ptrScratch);
     ptr = ptrScratch;
   }
 
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -395,17 +395,17 @@ void CodeGeneratorX64::wasmStore(const w
   }
 }
 
 template <typename T>
 void CodeGeneratorX64::emitWasmLoad(T* ins) {
   const MWasmLoad* mir = ins->mir();
 
   uint32_t offset = mir->access().offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* ptr = ins->ptr();
   Operand srcAddr = ptr->isBogus()
                         ? Operand(HeapReg, offset)
                         : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
 
   if (mir->type() == MIRType::Int64) {
     masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
@@ -419,17 +419,17 @@ void CodeGenerator::visitWasmLoad(LWasmL
 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
 
 template <typename T>
 void CodeGeneratorX64::emitWasmStore(T* ins) {
   const MWasmStore* mir = ins->mir();
   const wasm::MemoryAccessDesc& access = mir->access();
 
   uint32_t offset = access.offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* value = ins->getOperand(ins->ValueIndex);
   const LAllocation* ptr = ins->ptr();
   Operand dstAddr = ptr->isBogus()
                         ? Operand(HeapReg, offset)
                         : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
 
   wasmStore(access, value, dstAddr);
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -233,17 +233,17 @@ void CodeGenerator::visitWasmUint32ToFlo
   masm.convertUInt32ToFloat32(temp, output);
 }
 
 template <typename T>
 void CodeGeneratorX86::emitWasmLoad(T* ins) {
   const MWasmLoad* mir = ins->mir();
 
   uint32_t offset = mir->access().offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* ptr = ins->ptr();
   const LAllocation* memoryBase = ins->memoryBase();
 
   // Lowering has set things up so that we can use a BaseIndex form if the
   // pointer is constant and the offset is zero, or if the pointer is zero.
 
   Operand srcAddr =
@@ -265,17 +265,17 @@ void CodeGenerator::visitWasmLoad(LWasmL
 
 void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
 
 template <typename T>
 void CodeGeneratorX86::emitWasmStore(T* ins) {
   const MWasmStore* mir = ins->mir();
 
   uint32_t offset = mir->access().offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* ptr = ins->ptr();
   const LAllocation* memoryBase = ins->memoryBase();
 
   // Lowering has set things up so that we can use a BaseIndex form if the
   // pointer is constant and the offset is zero, or if the pointer is zero.
 
   Operand dstAddr =
@@ -380,34 +380,34 @@ void CodeGenerator::visitWasmAtomicBinop
   } else {
     masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), memAddr,
                             InvalidReg);
   }
 }
 
 void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
   uint32_t offset = ins->mir()->access().offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* memoryBase = ins->memoryBase();
   const LAllocation* ptr = ins->ptr();
   BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
 
   MOZ_ASSERT(ToRegister(ins->t1()) == ecx);
   MOZ_ASSERT(ToRegister(ins->t2()) == ebx);
   MOZ_ASSERT(ToOutRegister64(ins).high == edx);
   MOZ_ASSERT(ToOutRegister64(ins).low == eax);
 
   masm.wasmAtomicLoad64(ins->mir()->access(), srcAddr, Register64(ecx, ebx),
                         Register64(edx, eax));
 }
 
 void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* ins) {
   uint32_t offset = ins->mir()->access().offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* memoryBase = ins->memoryBase();
   const LAllocation* ptr = ins->ptr();
   Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
 
   MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
   MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
   MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
@@ -417,17 +417,17 @@ void CodeGenerator::visitWasmCompareExch
 
   masm.append(ins->mir()->access(), masm.size());
   masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
 }
 
 template <typename T>
 void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(
     T* ins, const wasm::MemoryAccessDesc& access) {
-  MOZ_ASSERT(access.offset() < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(access.offset() < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* memoryBase = ins->memoryBase();
   const LAllocation* ptr = ins->ptr();
   Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne,
                   access.offset());
 
   DebugOnly<const LInt64Allocation> value = ins->value();
   MOZ_ASSERT(ToRegister64(value).low == ebx);
@@ -457,17 +457,17 @@ void CodeGenerator::visitWasmAtomicExcha
   MOZ_ASSERT(ToOutRegister64(ins).high == edx);
   MOZ_ASSERT(ToOutRegister64(ins).low == eax);
 
   emitWasmStoreOrExchangeAtomicI64(ins, ins->access());
 }
 
 void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
   uint32_t offset = ins->access().offset();
-  MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+  MOZ_ASSERT(offset < wasm::MaxOffsetGuardLimit);
 
   const LAllocation* memoryBase = ins->memoryBase();
   const LAllocation* ptr = ins->ptr();
 
   BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
 
   MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
   MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -2325,24 +2325,30 @@ static_assert(HugeOffsetGuardLimit < UIN
 static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
 static const size_t GuardSize = PageSize;
 
 static_assert(MaxMemoryAccessSize < GuardSize,
               "Guard page handles partial out-of-bounds");
 static_assert(OffsetGuardLimit < UINT32_MAX,
               "checking for overflow against OffsetGuardLimit is enough.");
 
-static constexpr bool GetOffsetGuardLimit(bool hugeMemory) {
+static constexpr size_t GetOffsetGuardLimit(bool hugeMemory) {
 #ifdef WASM_SUPPORTS_HUGE_MEMORY
   return hugeMemory ? HugeOffsetGuardLimit : OffsetGuardLimit;
 #else
   return OffsetGuardLimit;
 #endif
 }
 
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+static const size_t MaxOffsetGuardLimit = HugeOffsetGuardLimit;
+#else
+static const size_t MaxOffsetGuardLimit = OffsetGuardLimit;
+#endif
+
 // Return whether the given immediate satisfies the constraints of the platform
 // (viz. that, on ARM, IsValidARMImmediate).
 
 extern bool IsValidBoundsCheckImmediate(uint32_t i);
 
 // For a given WebAssembly/asm.js max size, return the number of bytes to
 // map which will necessarily be a multiple of the system page size and greater
 // than maxSize. For a returned mappedSize: