Bug 1518210 - Wasm: Conditionally create huge memory's based on wasm::IsHugeMemoryEnabled. r=lth
☠☠ backed out by ea9924171afd ☠ ☠
authorRyan Hunt <rhunt@eqrion.net>
Fri, 30 Aug 2019 02:38:20 +0000
changeset 554552 b88d66dddefff7b557143585fb73f2cf9d3c6648
parent 554551 40e3f38af193cf11426ed48d89780c1768deb05d
child 554553 39fc18ada840d9de69c45ca8484361a58ce0449b
push id2165
push userffxbld-merge
push dateMon, 14 Oct 2019 16:30:58 +0000
treeherdermozilla-release@0eae18af659f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1518210
milestone70.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1518210 - Wasm: Conditionally create huge memory's based on wasm::IsHugeMemoryEnabled. r=lth This commit modifies WasmMemoryObject, ArrayBufferObject, SharedArrayBufferObject to support conditionally using huge memory based on the global flag. The memory logic is fairly involved and entangled, making this change a bit tricky. The following changes were made: * Stopped conditionally compiling huge memory constants and prefixed them with `Huge` * Stopped conditionally compiling `ExtendBufferMapping` and `wasmMovingGrowToSize` * Renamed `CreateBuffer` to `CreateSpecificWasmBuffer` * For clarity * Moved maxSize clamping into `CreateSpecificWasmBuffer` * Lets us keep one callsite to `wasm::IsHugeMemoryEnabled` during memory creation * Moved mappedSize computation out of `RawbufT::Allocate` to `CreateSpecificWasmBuffer` * Lets us keep one callsite to `wasm::IsHugeMemoryEnabled` during memory creation * Moved `boundsCheckLimit` computation from `ArrayBufferObjectMaybeShared` to `WasmMemoryObject` * Lets WasmMemoryObject be responsible for knowing whether it is 'huge' or not * Added method to determine if a `WasmMemoryObject` is huge or not * Lets use know whether we support `movingGrow` or have a `boundsCheckLimit` * Refactored `WasmMemoryObject::grow` to have one callsite to `wasmMovingGrowToSize` * For clarity * Added release assert in `Module::instantiateMemory` * Ensures we have a huge memory or bounds checks if needed Differential Revision: https://phabricator.services.mozilla.com/D41869
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayBufferObject.h
js/src/vm/SharedArrayObject.cpp
js/src/vm/SharedArrayObject.h
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmCraneliftCompile.cpp
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmJS.cpp
js/src/wasm/WasmJS.h
js/src/wasm/WasmModule.cpp
js/src/wasm/WasmTypes.cpp
js/src/wasm/WasmTypes.h
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -222,43 +222,41 @@ bool js::CommitBufferMemory(void* dataEn
 #if defined(MOZ_VALGRIND) && \
     defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
   VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
 #endif
 
   return true;
 }
 
-#ifndef WASM_HUGE_MEMORY
 bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize,
                              size_t newMappedSize) {
   MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
   MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0);
   MOZ_ASSERT(newMappedSize >= mappedSize);
 
-#  ifdef XP_WIN
+#ifdef XP_WIN
   void* mappedEnd = (char*)dataPointer + mappedSize;
   uint32_t delta = newMappedSize - mappedSize;
   if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) {
     return false;
   }
   return true;
-#  elif defined(XP_LINUX)
+#elif defined(XP_LINUX)
   // Note this will not move memory (no MREMAP_MAYMOVE specified)
   if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) {
     return false;
   }
   return true;
-#  else
+#else
   // No mechanism for remapping on MacOS and other Unices. Luckily
   // shouldn't need it here as most of these are 64-bit.
   return false;
-#  endif
+#endif
 }
-#endif
 
 void js::UnmapBufferMemory(void* base, size_t mappedSize) {
   MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
 
 #ifdef XP_WIN
   VirtualFree(base, 0, MEM_RELEASE);
 #else   // XP_WIN
   munmap(base, mappedSize);
@@ -606,40 +604,31 @@ class js::WasmArrayRawBuffer {
   WasmArrayRawBuffer(uint8_t* buffer, const Maybe<uint32_t>& maxSize,
                      size_t mappedSize)
       : maxSize_(maxSize), mappedSize_(mappedSize) {
     MOZ_ASSERT(buffer == dataPointer());
   }
 
  public:
   static WasmArrayRawBuffer* Allocate(uint32_t numBytes,
-                                      const Maybe<uint32_t>& maxSize);
+                                      const Maybe<uint32_t>& maxSize,
+                                      const Maybe<size_t>& mappedSize);
   static void Release(void* mem);
 
   uint8_t* dataPointer() {
     uint8_t* ptr = reinterpret_cast<uint8_t*>(this);
     return ptr + sizeof(WasmArrayRawBuffer);
   }
 
   uint8_t* basePointer() { return dataPointer() - gc::SystemPageSize(); }
 
   size_t mappedSize() const { return mappedSize_; }
 
   Maybe<uint32_t> maxSize() const { return maxSize_; }
 
-#ifndef WASM_HUGE_MEMORY
-  uint32_t boundsCheckLimit() const {
-    MOZ_ASSERT(mappedSize_ <= UINT32_MAX);
-    MOZ_ASSERT(mappedSize_ >= wasm::GuardSize);
-    MOZ_ASSERT(
-        wasm::IsValidBoundsCheckImmediate(mappedSize_ - wasm::GuardSize));
-    return mappedSize_ - wasm::GuardSize;
-  }
-#endif
-
   MOZ_MUST_USE bool growToSizeInPlace(uint32_t oldSize, uint32_t newSize) {
     MOZ_ASSERT(newSize >= oldSize);
     MOZ_ASSERT_IF(maxSize(), newSize <= maxSize().value());
     MOZ_ASSERT(newSize <= mappedSize());
 
     uint32_t delta = newSize - oldSize;
     MOZ_ASSERT(delta % wasm::PageSize == 0);
 
@@ -648,17 +637,16 @@ class js::WasmArrayRawBuffer {
 
     if (delta && !CommitBufferMemory(dataEnd, delta)) {
       return false;
     }
 
     return true;
   }
 
-#ifndef WASM_HUGE_MEMORY
   bool extendMappedSize(uint32_t maxSize) {
     size_t newMappedSize = wasm::ComputeMappedSize(maxSize);
     MOZ_ASSERT(mappedSize_ <= newMappedSize);
     if (mappedSize_ == newMappedSize) {
       return true;
     }
 
     if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) {
@@ -678,30 +666,27 @@ class js::WasmArrayRawBuffer {
     MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0);
 
     if (!extendMappedSize(newMaxSize.value())) {
       return;
     }
 
     maxSize_ = Some(newMaxSize.value());
   }
-#endif  // WASM_HUGE_MEMORY
 };
 
 /* static */
-WasmArrayRawBuffer* WasmArrayRawBuffer::Allocate(
-    uint32_t numBytes, const Maybe<uint32_t>& maxSize) {
+WasmArrayRawBuffer* WasmArrayRawBuffer::Allocate(uint32_t numBytes,
+                                                 const Maybe<uint32_t>& maxSize,
+                                                 const Maybe<size_t>& mapped) {
   MOZ_RELEASE_ASSERT(numBytes <= ArrayBufferObject::MaxBufferByteLength);
 
-  size_t mappedSize;
-#ifdef WASM_HUGE_MEMORY
-  mappedSize = wasm::HugeMappedSize;
-#else
-  mappedSize = wasm::ComputeMappedSize(maxSize.valueOr(numBytes));
-#endif
+  size_t mappedSize = mapped.isSome()
+                          ? *mapped
+                          : wasm::ComputeMappedSize(maxSize.valueOr(numBytes));
 
   MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize());
   MOZ_RELEASE_ASSERT(numBytes <= maxSize.valueOr(UINT32_MAX));
   MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0);
   MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
 
   uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
   uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
@@ -731,57 +716,92 @@ void WasmArrayRawBuffer::Release(void* m
 }
 
 WasmArrayRawBuffer* ArrayBufferObject::BufferContents::wasmBuffer() const {
   MOZ_RELEASE_ASSERT(kind_ == WASM);
   return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer));
 }
 
 template <typename ObjT, typename RawbufT>
-static bool CreateBuffer(
+static bool CreateSpecificWasmBuffer(
     JSContext* cx, uint32_t initialSize, const Maybe<uint32_t>& maxSize,
     MutableHandleArrayBufferObjectMaybeShared maybeSharedObject) {
 #define ROUND_UP(v, a) ((v) % (a) == 0 ? (v) : v + a - ((v) % (a)))
 
-  RawbufT* buffer = RawbufT::Allocate(initialSize, maxSize);
+  bool useHugeMemory = wasm::IsHugeMemoryEnabled();
+
+  Maybe<uint32_t> clampedMaxSize = maxSize;
+  if (clampedMaxSize) {
+#ifdef JS_64BIT
+    // On 64-bit platforms when we aren't using huge memory, clamp clampedMaxSize to
+    // a smaller value that satisfies the 32-bit invariants
+    // clampedMaxSize + wasm::PageSize < UINT32_MAX and clampedMaxSize % wasm::PageSize == 0
+    if (!useHugeMemory && clampedMaxSize.value() >= (UINT32_MAX - wasm::PageSize)) {
+      uint32_t clamp = (wasm::MaxMemoryMaximumPages - 2) * wasm::PageSize;
+      MOZ_ASSERT(clamp < UINT32_MAX);
+      MOZ_ASSERT(initialSize <= clamp);
+      clampedMaxSize = Some(clamp);
+    }
+#else
+    static_assert(sizeof(uintptr_t) == 4, "assuming not 64 bit implies 32 bit");
+
+    // On 32-bit platforms, prevent applications specifying a large max
+    // (like UINT32_MAX) from unintentially OOMing the browser: they just
+    // want "a lot of memory". Maintain the invariant that
+    // initialSize <= clampedMaxSize.
+    static const uint32_t OneGiB = 1 << 30;
+    uint32_t clamp = Max(OneGiB, initialSize);
+    clampedMaxSize = Some(Min(clamp, *clampedMaxSize));
+#endif
+  }
+
+  Maybe<size_t> mappedSize;
+
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+  if (useHugeMemory) {
+    mappedSize = Some(wasm::HugeMappedSize);
+  }
+#endif
+
+  RawbufT* buffer = RawbufT::Allocate(initialSize, clampedMaxSize, mappedSize);
   if (!buffer) {
-#ifdef WASM_HUGE_MEMORY
-    wasm::Log(cx, "huge Memory allocation failed");
-    ReportOutOfMemory(cx);
-    return false;
-#else
-    // If we fail, and have a maxSize, try to reserve the biggest chunk in
-    // the range [initialSize, maxSize) using log backoff.
-    if (!maxSize) {
+    if (useHugeMemory) {
+      wasm::Log(cx, "huge Memory allocation failed");
+      ReportOutOfMemory(cx);
+      return false;
+    }
+
+    // If we fail, and have a clampedMaxSize, try to reserve the biggest chunk in
+    // the range [initialSize, clampedMaxSize) using log backoff.
+    if (!clampedMaxSize) {
       wasm::Log(cx, "new Memory({initial=%u bytes}) failed", initialSize);
       ReportOutOfMemory(cx);
       return false;
     }
 
-    uint32_t cur = maxSize.value() / 2;
+    uint32_t cur = clampedMaxSize.value() / 2;
 
     for (; cur > initialSize; cur /= 2) {
-      buffer = RawbufT::Allocate(initialSize,
-                                 mozilla::Some(ROUND_UP(cur, wasm::PageSize)));
+      uint32_t clampedMaxSize = ROUND_UP(cur, wasm::PageSize);
+      buffer = RawbufT::Allocate(initialSize, Some(clampedMaxSize), mappedSize);
       if (buffer) {
         break;
       }
     }
 
     if (!buffer) {
       wasm::Log(cx, "new Memory({initial=%u bytes}) failed", initialSize);
       ReportOutOfMemory(cx);
       return false;
     }
 
     // Try to grow our chunk as much as possible.
     for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) {
       buffer->tryGrowMaxSizeInPlace(ROUND_UP(d, wasm::PageSize));
     }
-#endif
   }
 
 #undef ROUND_UP
 
   // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case
   // of failure.
   ObjT* object = ObjT::createFromNewRawBuffer(cx, buffer, initialSize);
   if (!object) {
@@ -800,77 +820,55 @@ static bool CreateBuffer(
     if (allocatedSinceLastTrigger > AllocatedBuffersPerTrigger) {
       Unused << cx->runtime()->gc.triggerGC(JS::GCReason::TOO_MUCH_WASM_MEMORY);
       allocatedSinceLastTrigger = 0;
     }
   } else {
     allocatedSinceLastTrigger = 0;
   }
 
-  if (maxSize) {
-#ifdef WASM_HUGE_MEMORY
-    wasm::Log(cx, "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded",
-              unsigned(initialSize), unsigned(*maxSize));
-#else
-    wasm::Log(cx,
-              "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded "
-              "with internal maximum of %u",
-              unsigned(initialSize), unsigned(*maxSize),
-              unsigned(object->wasmMaxSize().value()));
-#endif
+  if (clampedMaxSize) {
+    if (useHugeMemory) {
+      wasm::Log(cx,
+                "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded",
+                unsigned(initialSize), unsigned(*clampedMaxSize));
+    } else {
+      wasm::Log(cx,
+                "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded "
+                "with internal maximum of %u",
+                unsigned(initialSize), unsigned(*clampedMaxSize),
+                unsigned(object->wasmMaxSize().value()));
+    }
   } else {
     wasm::Log(cx, "new Memory({initial:%u bytes}) succeeded",
               unsigned(initialSize));
   }
 
   return true;
 }
 
 bool js::CreateWasmBuffer(JSContext* cx, const wasm::Limits& memory,
                           MutableHandleArrayBufferObjectMaybeShared buffer) {
   MOZ_ASSERT(memory.initial % wasm::PageSize == 0);
   MOZ_RELEASE_ASSERT(cx->wasmHaveSignalHandlers);
   MOZ_RELEASE_ASSERT((memory.initial / wasm::PageSize) <=
                      wasm::MaxMemoryInitialPages);
 
-  // Prevent applications specifying a large max (like UINT32_MAX) from
-  // unintentially OOMing the browser on 32-bit: they just want "a lot of
-  // memory". Maintain the invariant that initialSize <= maxSize.
-
-  Maybe<uint32_t> maxSize = memory.maximum;
-  if (sizeof(void*) == 4 && maxSize) {
-    static const uint32_t OneGiB = 1 << 30;
-    uint32_t clamp = Max(OneGiB, memory.initial);
-    maxSize = Some(Min(clamp, *maxSize));
-  }
-
-#ifndef WASM_HUGE_MEMORY
-  if (sizeof(void*) == 8 && maxSize &&
-      maxSize.value() >= (UINT32_MAX - wasm::PageSize)) {
-    // On 64-bit platforms that don't define WASM_HUGE_MEMORY
-    // clamp maxSize to smaller value that satisfies the 32-bit invariants
-    // maxSize + wasm::PageSize < UINT32_MAX and maxSize % wasm::PageSize == 0
-    uint32_t clamp = (wasm::MaxMemoryMaximumPages - 2) * wasm::PageSize;
-    MOZ_ASSERT(clamp < UINT32_MAX);
-    MOZ_ASSERT(memory.initial <= clamp);
-    maxSize = Some(clamp);
-  }
-#endif
-
   if (memory.shared == wasm::Shareable::True) {
     if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) {
       JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
                                 JSMSG_WASM_NO_SHMEM_LINK);
       return false;
     }
-    return CreateBuffer<SharedArrayBufferObject, SharedArrayRawBuffer>(
-        cx, memory.initial, maxSize, buffer);
+    return CreateSpecificWasmBuffer<SharedArrayBufferObject,
+                                    SharedArrayRawBuffer>(
+        cx, memory.initial, memory.maximum, buffer);
   }
-  return CreateBuffer<ArrayBufferObject, WasmArrayRawBuffer>(cx, memory.initial,
-                                                             maxSize, buffer);
+  return CreateSpecificWasmBuffer<ArrayBufferObject, WasmArrayRawBuffer>(
+      cx, memory.initial, memory.maximum, buffer);
 }
 
 bool ArrayBufferObject::prepareForAsmJS() {
   MOZ_ASSERT(byteLength() % wasm::PageSize == 0,
              "prior size checking should have guaranteed page-size multiple");
   MOZ_ASSERT(byteLength() > 0,
              "prior size checking should have excluded empty buffers");
 
@@ -1091,79 +1089,55 @@ bool ArrayBufferObject::wasmGrowToSizeIn
 
   // Set |newBuf|'s contents to |oldBuf|'s original contents.
   newBuf->initialize(newSize, oldContents);
   AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents);
 
   return true;
 }
 
-#ifndef WASM_HUGE_MEMORY
 /* static */
 bool ArrayBufferObject::wasmMovingGrowToSize(
     uint32_t newSize, HandleArrayBufferObject oldBuf,
     MutableHandleArrayBufferObject newBuf, JSContext* cx) {
   // On failure, do not throw and ensure that the original buffer is
   // unmodified and valid.
 
   if (newSize > ArrayBufferObject::MaxBufferByteLength) {
     return false;
   }
 
-  if (newSize <= oldBuf->wasmBoundsCheckLimit() ||
+  if (wasm::ComputeMappedSize(newSize) <= oldBuf->wasmMappedSize() ||
       oldBuf->contents().wasmBuffer()->extendMappedSize(newSize)) {
     return wasmGrowToSizeInPlace(newSize, oldBuf, newBuf, cx);
   }
 
   newBuf.set(ArrayBufferObject::createEmpty(cx));
   if (!newBuf) {
     cx->clearPendingException();
     return false;
   }
 
   WasmArrayRawBuffer* newRawBuf =
-      WasmArrayRawBuffer::Allocate(newSize, Nothing());
+      WasmArrayRawBuffer::Allocate(newSize, Nothing(), Nothing());
   if (!newRawBuf) {
     return false;
   }
 
   AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents);
 
   BufferContents contents =
       BufferContents::createWasm(newRawBuf->dataPointer());
   newBuf->initialize(newSize, contents);
 
   memcpy(newBuf->dataPointer(), oldBuf->dataPointer(), oldBuf->byteLength());
   ArrayBufferObject::detach(cx, oldBuf);
   return true;
 }
 
-uint32_t ArrayBufferObject::wasmBoundsCheckLimit() const {
-  if (isWasm()) {
-    return contents().wasmBuffer()->boundsCheckLimit();
-  }
-  return byteLength();
-}
-
-uint32_t ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const {
-  if (is<ArrayBufferObject>()) {
-    return as<ArrayBufferObject>().wasmBoundsCheckLimit();
-  }
-  return as<SharedArrayBufferObject>().wasmBoundsCheckLimit();
-}
-#else
-uint32_t ArrayBufferObject::wasmBoundsCheckLimit() const {
-  return byteLength();
-}
-
-uint32_t ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const {
-  return byteLength();
-}
-#endif
-
 uint32_t ArrayBufferObject::flags() const {
   return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32());
 }
 
 void ArrayBufferObject::setFlags(uint32_t flags) {
   setFixedSlot(FLAGS_SLOT, Int32Value(flags));
 }
 
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -30,25 +30,23 @@ class WasmArrayRawBuffer;
 void* MapBufferMemory(size_t mappedSize, size_t initialCommittedSize);
 
 // Commit additional memory in an existing mapping.  `dataEnd` must be the
 // correct value for the end of the existing committed area, and `delta` must be
 // a byte amount to grow the mapping by, and must be a multiple of the page
 // size.  Returns false on failure.
 bool CommitBufferMemory(void* dataEnd, uint32_t delta);
 
-#ifndef WASM_HUGE_MEMORY
 // Extend an existing mapping by adding uncommited pages to it.  `dataStart`
 // must be the pointer to the start of the existing mapping, `mappedSize` the
 // size of the existing mapping, and `newMappedSize` the size of the extended
 // mapping (sizes in bytes), with `mappedSize` <= `newMappedSize`.  Both sizes
 // must be divisible by the page size.  Returns false on failure.
 bool ExtendBufferMapping(void* dataStart, size_t mappedSize,
                          size_t newMappedSize);
-#endif
 
 // Remove an existing mapping.  `dataStart` must be the pointer to the start of
 // the mapping, and `mappedSize` the size of that mapping.
 void UnmapBufferMemory(void* dataStart, size_t mappedSize);
 
 // Return the number of currently live mapped buffers.
 int32_t LiveMappedBufferCount();
 
@@ -117,17 +115,16 @@ class ArrayBufferObjectMaybeShared : pub
   // WebAssembly support:
   // Note: the eventual goal is to remove this from ArrayBuffer and have
   // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object.
 
   mozilla::Maybe<uint32_t> wasmMaxSize() const {
     return WasmArrayBufferMaxSize(this);
   }
   size_t wasmMappedSize() const { return WasmArrayBufferMappedSize(this); }
-  uint32_t wasmBoundsCheckLimit() const;
 
   inline bool isPreparedForAsmJS() const;
   inline bool isWasm() const;
 };
 
 typedef Rooted<ArrayBufferObjectMaybeShared*>
     RootedArrayBufferObjectMaybeShared;
 typedef Handle<ArrayBufferObjectMaybeShared*>
@@ -415,22 +412,19 @@ class ArrayBufferObject : public ArrayBu
    */
   MOZ_MUST_USE bool prepareForAsmJS();
 
   size_t wasmMappedSize() const;
   mozilla::Maybe<uint32_t> wasmMaxSize() const;
   static MOZ_MUST_USE bool wasmGrowToSizeInPlace(
       uint32_t newSize, Handle<ArrayBufferObject*> oldBuf,
       MutableHandle<ArrayBufferObject*> newBuf, JSContext* cx);
-#ifndef WASM_HUGE_MEMORY
   static MOZ_MUST_USE bool wasmMovingGrowToSize(
       uint32_t newSize, Handle<ArrayBufferObject*> oldBuf,
       MutableHandle<ArrayBufferObject*> newBuf, JSContext* cx);
-#endif
-  uint32_t wasmBoundsCheckLimit() const;
 
   static void finalize(JSFreeOp* fop, JSObject* obj);
 
   static BufferContents createMappedContents(int fd, size_t offset,
                                              size_t length);
 
   static size_t offsetOfDataSlot() { return getFixedSlotOffset(DATA_SLOT); }
 
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -24,86 +24,83 @@
 #include "vm/NativeObject-inl.h"
 
 using mozilla::CheckedInt;
 using mozilla::Maybe;
 using mozilla::Nothing;
 
 using namespace js;
 
-static size_t SharedArrayMappedSizeForWasm(size_t declaredMaxSize) {
-#ifdef WASM_HUGE_MEMORY
-  return wasm::HugeMappedSize;
-#else
-  return wasm::ComputeMappedSize(declaredMaxSize);
-#endif
-}
-
 static uint32_t SharedArrayAccessibleSize(uint32_t length) {
   return AlignBytes(length, gc::SystemPageSize());
 }
 
-// `max` must be something for wasm, nothing for other cases.
+// `maxSize` must be something for wasm, nothing for other cases.
 SharedArrayRawBuffer* SharedArrayRawBuffer::Allocate(
-    uint32_t length, const Maybe<uint32_t>& max) {
+    uint32_t length, const Maybe<uint32_t>& maxSize,
+    const Maybe<size_t>& mappedSize) {
   MOZ_RELEASE_ASSERT(length <= ArrayBufferObject::MaxBufferByteLength);
 
-  bool preparedForWasm = max.isSome();
-
   uint32_t accessibleSize = SharedArrayAccessibleSize(length);
   if (accessibleSize < length) {
     return nullptr;
   }
 
-  uint32_t maxSize = max.isSome() ? *max : accessibleSize;
+  bool preparedForWasm = maxSize.isSome();
+  uint32_t computedMaxSize;
+  size_t computedMappedSize;
 
-  size_t mappedSize;
   if (preparedForWasm) {
-    mappedSize = SharedArrayMappedSizeForWasm(maxSize);
+    computedMaxSize = *maxSize;
+    computedMappedSize = mappedSize.isSome()
+                             ? *mappedSize
+                             : wasm::ComputeMappedSize(computedMaxSize);
   } else {
-    mappedSize = accessibleSize;
+    computedMappedSize = accessibleSize;
+    computedMaxSize = accessibleSize;
   }
 
-  uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
+  MOZ_ASSERT(accessibleSize <= computedMaxSize);
+  MOZ_ASSERT(accessibleSize <= computedMappedSize);
+
+  uint64_t mappedSizeWithHeader = computedMappedSize + gc::SystemPageSize();
   uint64_t accessibleSizeWithHeader = accessibleSize + gc::SystemPageSize();
 
   void* p = MapBufferMemory(mappedSizeWithHeader, accessibleSizeWithHeader);
   if (!p) {
     return nullptr;
   }
 
   uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
   uint8_t* base = buffer - sizeof(SharedArrayRawBuffer);
   SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(
-      buffer, length, maxSize, mappedSize, preparedForWasm);
+      buffer, length, computedMaxSize, computedMappedSize, preparedForWasm);
   MOZ_ASSERT(rawbuf->length_ == length);  // Deallocation needs this
   return rawbuf;
 }
 
-#ifndef WASM_HUGE_MEMORY
 void SharedArrayRawBuffer::tryGrowMaxSizeInPlace(uint32_t deltaMaxSize) {
   CheckedInt<uint32_t> newMaxSize = maxSize_;
   newMaxSize += deltaMaxSize;
   MOZ_ASSERT(newMaxSize.isValid());
   MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0);
 
-  size_t newMappedSize = SharedArrayMappedSizeForWasm(newMaxSize.value());
+  size_t newMappedSize = wasm::ComputeMappedSize(newMaxSize.value());
   MOZ_ASSERT(mappedSize_ <= newMappedSize);
   if (mappedSize_ == newMappedSize) {
     return;
   }
 
   if (!ExtendBufferMapping(basePointer(), mappedSize_, newMappedSize)) {
     return;
   }
 
   mappedSize_ = newMappedSize;
   maxSize_ = newMaxSize.value();
 }
-#endif
 
 bool SharedArrayRawBuffer::wasmGrowToSizeInPlace(const Lock&,
                                                  uint32_t newLength) {
   if (newLength > ArrayBufferObject::MaxBufferByteLength) {
     return false;
   }
 
   MOZ_ASSERT(newLength >= length_);
@@ -220,17 +217,17 @@ bool SharedArrayBufferObject::class_cons
   args.rval().setObject(*bufobj);
   return true;
 }
 
 SharedArrayBufferObject* SharedArrayBufferObject::New(JSContext* cx,
                                                       uint32_t length,
                                                       HandleObject proto) {
   SharedArrayRawBuffer* buffer =
-      SharedArrayRawBuffer::Allocate(length, Nothing());
+      SharedArrayRawBuffer::Allocate(length, Nothing(), Nothing());
   if (!buffer) {
     return nullptr;
   }
 
   SharedArrayBufferObject* obj = New(cx, buffer, length, proto);
   if (!obj) {
     buffer->dropReference();
     return nullptr;
@@ -283,25 +280,16 @@ void SharedArrayBufferObject::Finalize(J
   // which causes a SharedArrayRawBuffer to never be attached.
   Value v = buf.getReservedSlot(RAWBUF_SLOT);
   if (!v.isUndefined()) {
     buf.rawBufferObject()->dropReference();
     buf.dropRawBuffer();
   }
 }
 
-#ifndef WASM_HUGE_MEMORY
-uint32_t SharedArrayBufferObject::wasmBoundsCheckLimit() const {
-  if (isWasm()) {
-    return rawBufferObject()->boundsCheckLimit();
-  }
-  return byteLength();
-}
-#endif
-
 /* static */
 void SharedArrayBufferObject::addSizeOfExcludingThis(
     JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info) {
   // Divide the buffer size by the refcount to get the fraction of the buffer
   // owned by this thread. It's conceivable that the refcount might change in
   // the middle of memory reporting, in which case the amount reported for
   // some threads might be to high (if the refcount goes up) or too low (if
   // the refcount goes down). But that's unlikely and hard to avoid, so we
--- a/js/src/vm/SharedArrayObject.h
+++ b/js/src/vm/SharedArrayObject.h
@@ -86,18 +86,19 @@ class SharedArrayRawBuffer {
     SharedArrayRawBuffer* buf;
 
    public:
     explicit Lock(SharedArrayRawBuffer* buf) : buf(buf) { buf->lock_.lock(); }
     ~Lock() { buf->lock_.unlock(); }
   };
 
   // max must be Something for wasm, Nothing for other uses
-  static SharedArrayRawBuffer* Allocate(uint32_t initial,
-                                        const mozilla::Maybe<uint32_t>& max);
+  static SharedArrayRawBuffer* Allocate(
+      uint32_t length, const mozilla::Maybe<uint32_t>& maxSize,
+      const mozilla::Maybe<size_t>& mappedSize);
 
   // This may be called from multiple threads.  The caller must take
   // care of mutual exclusion.
   FutexWaiter* waiters() const { return waiters_; }
 
   // This may be called from multiple threads.  The caller must take
   // care of mutual exclusion.
   void setWaiters(FutexWaiter* waiters) { waiters_ = waiters; }
@@ -109,25 +110,19 @@ class SharedArrayRawBuffer {
   }
 
   uint32_t byteLength(const Lock&) const { return length_; }
 
   uint32_t maxSize() const { return maxSize_; }
 
   size_t mappedSize() const { return mappedSize_; }
 
-#ifndef WASM_HUGE_MEMORY
-  uint32_t boundsCheckLimit() const { return mappedSize_ - wasm::GuardSize; }
-#endif
-
   bool isWasm() const { return preparedForWasm_; }
 
-#ifndef WASM_HUGE_MEMORY
   void tryGrowMaxSizeInPlace(uint32_t deltaMaxSize);
-#endif
 
   bool wasmGrowToSizeInPlace(const Lock&, uint32_t newLength);
 
   uint32_t refcount() const { return refcount_; }
 
   MOZ_MUST_USE bool addReference();
   void dropReference();
 
@@ -228,20 +223,16 @@ class SharedArrayBufferObject : public A
       JSContext* cx, SharedArrayRawBuffer* buffer, uint32_t initialSize);
 
   mozilla::Maybe<uint32_t> wasmMaxSize() const {
     return mozilla::Some(rawBufferObject()->maxSize());
   }
 
   size_t wasmMappedSize() const { return rawBufferObject()->mappedSize(); }
 
-#ifndef WASM_HUGE_MEMORY
-  uint32_t wasmBoundsCheckLimit() const;
-#endif
-
  private:
   void acceptRawBuffer(SharedArrayRawBuffer* buffer, uint32_t length);
   void dropRawBuffer();
 };
 
 bool IsSharedArrayBuffer(HandleValue v);
 bool IsSharedArrayBuffer(HandleObject o);
 bool IsSharedArrayBuffer(JSObject* o);
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -5240,18 +5240,20 @@ class BaseCompiler final : public BaseCo
   // Heap access.
 
   void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
                      uint32_t local) {
     if (local >= sizeof(BCESet) * 8) {
       return;
     }
 
+    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
+
     if ((bceSafe_ & (BCESet(1) << local)) &&
-        access->offset() < wasm::OffsetGuardLimit) {
+        access->offset() < offsetGuardLimit) {
       check->omitBoundsCheck = true;
     }
 
     // The local becomes safe even if the offset is beyond the guard limit.
     bceSafe_ |= (BCESet(1) << local);
   }
 
   void bceLocalIsUpdated(uint32_t local) {
@@ -5259,19 +5261,20 @@ class BaseCompiler final : public BaseCo
       return;
     }
 
     bceSafe_ &= ~(BCESet(1) << local);
   }
 
   void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
                            RegI32 tls, RegI32 ptr) {
+    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
+
     // Fold offset if necessary for further computations.
-
-    if (access->offset() >= OffsetGuardLimit ||
+    if (access->offset() >= offsetGuardLimit ||
         (access->isAtomic() && !check->omitAlignmentCheck &&
          !check->onlyPointerAlignment)) {
       Label ok;
       masm.branchAdd32(Assembler::CarryClear, Imm32(access->offset()), ptr,
                        &ok);
       masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
       masm.bind(&ok);
       access->clearOffset();
@@ -9331,19 +9334,20 @@ RegI32 BaseCompiler::popMemoryAccess(Mem
                                      AccessCheck* check) {
   check->onlyPointerAlignment =
       (access->offset() & (access->byteSize() - 1)) == 0;
 
   int32_t addrTemp;
   if (popConstI32(&addrTemp)) {
     uint32_t addr = addrTemp;
 
+    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
+
     uint64_t ea = uint64_t(addr) + uint64_t(access->offset());
-    uint64_t limit =
-        uint64_t(env_.minMemoryLength) + uint64_t(wasm::OffsetGuardLimit);
+    uint64_t limit = uint64_t(env_.minMemoryLength) + offsetGuardLimit;
 
     check->omitBoundsCheck = ea < limit;
     check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
 
     // Fold the offset into the pointer if we can, as this is always
     // beneficial.
 
     if (ea <= UINT32_MAX) {
--- a/js/src/wasm/WasmCraneliftCompile.cpp
+++ b/js/src/wasm/WasmCraneliftCompile.cpp
@@ -228,16 +228,19 @@ class AutoCranelift {
  public:
   explicit AutoCranelift(const ModuleEnvironment& env)
       : env_(env), compiler_(nullptr) {
 #ifdef WASM_SUPPORTS_HUGE_MEMORY
     if (env.hugeMemoryEnabled()) {
       // In the huge memory configuration, we always reserve the full 4 GB
       // index space for a heap.
       staticEnv_.staticMemoryBound = HugeIndexRange;
+      staticEnv_.memoryGuardSize = HugeOffsetGuardLimit;
+    } else {
+      staticEnv_.memoryGuardSize = OffsetGuardLimit;
     }
 #endif
     // Otherwise, heap bounds are stored in the `boundsCheckLimit` field
     // of TlsData.
   }
   bool init() {
     compiler_ = cranelift_compiler_create(&staticEnv_, &env_);
     return !!compiler_;
@@ -284,17 +287,17 @@ CraneliftStaticEnvironment::CraneliftSta
       hasLzcnt(false),
 #endif
 #if defined(XP_WIN)
       platformIsWindows(true),
 #else
       platformIsWindows(false),
 #endif
       staticMemoryBound(0),
-      memoryGuardSize(OffsetGuardLimit),
+      memoryGuardSize(0),
       instanceTlsOffset(offsetof(TlsData, instance)),
       interruptTlsOffset(offsetof(TlsData, interrupt)),
       cxTlsOffset(offsetof(TlsData, cx)),
       realmCxOffset(JSContext::offsetOfRealm()),
       realmTlsOffset(offsetof(TlsData, realm)),
       realmFuncImportTlsOffset(offsetof(FuncImportTls, realm)) {
 }
 
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -1218,18 +1218,17 @@ Instance::Instance(JSContext* cx, Handle
   for (auto t : code_->tiers()) {
     MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
   }
 #endif
   MOZ_ASSERT(tables_.length() == metadata().tables.length());
 
   tlsData()->memoryBase =
       memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
-  tlsData()->boundsCheckLimit =
-      memory ? memory->buffer().wasmBoundsCheckLimit() : 0;
+  tlsData()->boundsCheckLimit = memory ? memory->boundsCheckLimit() : 0;
   tlsData()->instance = this;
   tlsData()->realm = realm_;
   tlsData()->cx = cx;
   tlsData()->resetInterrupt(cx);
   tlsData()->jumpTable = code_->tieringJumpTable();
   tlsData()->addressOfNeedsIncrementalBarrier =
       (uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
 
@@ -1890,17 +1889,17 @@ void Instance::ensureProfilingLabels(boo
 }
 
 void Instance::onMovingGrowMemory() {
   MOZ_ASSERT(!isAsmJS());
   MOZ_ASSERT(!memory_->isShared());
 
   ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
   tlsData()->memoryBase = buffer.dataPointer();
-  tlsData()->boundsCheckLimit = buffer.wasmBoundsCheckLimit();
+  tlsData()->boundsCheckLimit = memory_->boundsCheckLimit();
 }
 
 void Instance::onMovingGrowTable(const Table* theTable) {
   MOZ_ASSERT(!isAsmJS());
 
   // `theTable` has grown and we must update cached data for it.  Importantly,
   // we can have cached those data in more than one location: we'll have
   // cached them once for each time the table was imported into this instance.
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -609,44 +609,42 @@ class FunctionCompiler {
     *mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
     return true;
   }
 
   void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
                                         MDefinition** base) {
     MOZ_ASSERT(!inDeadCode());
 
+    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
+
     // Fold a constant base into the offset (so the base is 0 in which case
     // the codegen is optimized), if it doesn't wrap or trigger an
     // MWasmAddOffset.
     if ((*base)->isConstant()) {
       uint32_t basePtr = (*base)->toConstant()->toInt32();
       uint32_t offset = access->offset();
 
-      static_assert(
-          OffsetGuardLimit < UINT32_MAX,
-          "checking for overflow against OffsetGuardLimit is enough.");
-
-      if (offset < OffsetGuardLimit && basePtr < OffsetGuardLimit - offset) {
+      if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
         auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
         curBlock_->add(ins);
         *base = ins;
         access->setOffset(access->offset() + basePtr);
       }
     }
 
     bool mustAdd = false;
     bool alignmentCheck = needAlignmentCheck(access, *base, &mustAdd);
 
     // If the offset is bigger than the guard region, a separate instruction
     // is necessary to add the offset to the base and check for overflow.
     //
     // Also add the offset if we have a Wasm atomic access that needs
     // alignment checking and the offset affects alignment.
-    if (access->offset() >= OffsetGuardLimit || mustAdd ||
+    if (access->offset() >= offsetGuardLimit || mustAdd ||
         !JitOptions.wasmFoldOffsets) {
       *base = computeEffectiveAddress(*base, access);
     }
 
     if (alignmentCheck) {
       curBlock_->add(MWasmAlignmentCheck::New(
           alloc(), *base, access->byteSize(), bytecodeOffset()));
     }
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -1819,24 +1819,41 @@ WasmMemoryObject::InstanceSet* WasmMemor
 
     InitReservedSlot(this, OBSERVERS_SLOT, observers.release(),
                      MemoryUse::WasmMemoryObservers);
   }
 
   return &observers();
 }
 
-bool WasmMemoryObject::movingGrowable() const {
-#ifdef WASM_HUGE_MEMORY
+bool WasmMemoryObject::isHuge() const {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+  static_assert(ArrayBufferObject::MaxBufferByteLength < HugeMappedSize,
+                "Non-huge buffer may be confused as huge");
+  return buffer().wasmMappedSize() >= HugeMappedSize;
+#else
   return false;
-#else
-  return !buffer().wasmMaxSize();
 #endif
 }
 
+bool WasmMemoryObject::movingGrowable() const {
+  return !isHuge() && !buffer().wasmMaxSize();
+}
+
+uint32_t WasmMemoryObject::boundsCheckLimit() const {
+  if (!buffer().isWasm() || isHuge()) {
+    return buffer().byteLength();
+  }
+  size_t mappedSize = buffer().wasmMappedSize();
+  MOZ_ASSERT(mappedSize <= UINT32_MAX);
+  MOZ_ASSERT(mappedSize >= wasm::GuardSize);
+  MOZ_ASSERT(wasm::IsValidBoundsCheckImmediate(mappedSize - wasm::GuardSize));
+  return mappedSize - wasm::GuardSize;
+}
+
 bool WasmMemoryObject::addMovingGrowObserver(JSContext* cx,
                                              WasmInstanceObject* instance) {
   MOZ_ASSERT(movingGrowable());
 
   InstanceSet* observers = getOrCreateObservers(cx);
   if (!observers) {
     return false;
   }
@@ -1895,38 +1912,33 @@ uint32_t WasmMemoryObject::grow(HandleWa
   newSize += delta;
   newSize *= PageSize;
   if (!newSize.isValid()) {
     return -1;
   }
 
   RootedArrayBufferObject newBuf(cx);
 
-  if (Maybe<uint32_t> maxSize = oldBuf->wasmMaxSize()) {
-    if (newSize.value() > maxSize.value()) {
+  if (memory->movingGrowable()) {
+    MOZ_ASSERT(!memory->isHuge());
+    if (!ArrayBufferObject::wasmMovingGrowToSize(newSize.value(), oldBuf,
+                                                 &newBuf, cx)) {
       return -1;
     }
+  } else {
+    if (Maybe<uint32_t> maxSize = oldBuf->wasmMaxSize()) {
+      if (newSize.value() > maxSize.value()) {
+        return -1;
+      }
+    }
 
     if (!ArrayBufferObject::wasmGrowToSizeInPlace(newSize.value(), oldBuf,
                                                   &newBuf, cx)) {
       return -1;
     }
-  } else {
-#ifdef WASM_HUGE_MEMORY
-    if (!ArrayBufferObject::wasmGrowToSizeInPlace(newSize.value(), oldBuf,
-                                                  &newBuf, cx)) {
-      return -1;
-    }
-#else
-    MOZ_ASSERT(memory->movingGrowable());
-    if (!ArrayBufferObject::wasmMovingGrowToSize(newSize.value(), oldBuf,
-                                                 &newBuf, cx)) {
-      return -1;
-    }
-#endif
   }
 
   memory->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuf));
 
   // Only notify moving-grow-observers after the BUFFER_SLOT has been updated
   // since observers will call buffer().
   if (memory->hasObservers()) {
     for (InstanceSet::Range r = memory->observers().all(); !r.empty();
--- a/js/src/wasm/WasmJS.h
+++ b/js/src/wasm/WasmJS.h
@@ -324,17 +324,19 @@ class WasmMemoryObject : public NativeOb
   ArrayBufferObjectMaybeShared& buffer() const;
 
   // The current length of the memory.  In the case of shared memory, the
   // length can change at any time.  Also note that this will acquire a lock
   // for shared memory, so do not call this from a signal handler.
   uint32_t volatileMemoryLength() const;
 
   bool isShared() const;
+  bool isHuge() const;
   bool movingGrowable() const;
+  uint32_t boundsCheckLimit() const;
 
   // If isShared() is true then obtain the underlying buffer object.
   SharedArrayRawBuffer* sharedArrayRawBuffer() const;
 
   bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
   static uint32_t grow(HandleWasmMemoryObject memory, uint32_t delta,
                        JSContext* cx);
 };
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -788,16 +788,18 @@ bool Module::instantiateMemory(JSContext
     RootedObject proto(
         cx, &cx->global()->getPrototype(JSProto_WasmMemory).toObject());
     memory.set(WasmMemoryObject::create(cx, buffer, proto));
     if (!memory) {
       return false;
     }
   }
 
+  MOZ_RELEASE_ASSERT(memory->isHuge() == metadata().omitsBoundsChecks);
+
   return true;
 }
 
 bool Module::instantiateImportedTable(JSContext* cx, const TableDesc& td,
                                       Handle<WasmTableObject*> tableObj,
                                       WasmTableObjectVector* tableObjs,
                                       SharedTableVector* tables) const {
   MOZ_ASSERT(tableObj);
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -51,16 +51,21 @@ static_assert(MaxMemoryInitialPages <=
 // All plausible targets must be able to do at least IEEE754 double
 // loads/stores, hence the lower limit of 8.  Some Intel processors support
 // AVX-512 loads/stores, hence the upper limit of 64.
 static_assert(MaxMemoryAccessSize >= 8, "MaxMemoryAccessSize too low");
 static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
 static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize - 1)) == 0,
               "MaxMemoryAccessSize is not a power of two");
 
+#if defined(WASM_SUPPORTS_HUGE_MEMORY)
+static_assert(HugeMappedSize > ArrayBufferObject::MaxBufferByteLength,
+              "Normal array buffer could be confused with huge memory");
+#endif
+
 Val::Val(const LitVal& val) {
   type_ = val.type();
   switch (type_.code()) {
     case ValType::I32:
       u.i32_ = val.i32();
       return;
     case ValType::F32:
       u.f32_ = val.f32();
@@ -580,47 +585,43 @@ uint32_t wasm::RoundUpToNextValidARMImme
     i = (i + 0x00ffffff) & ~0x00ffffff;
   }
 
   MOZ_ASSERT(IsValidARMImmediate(i));
 
   return i;
 }
 
-#ifndef WASM_HUGE_MEMORY
-
 bool wasm::IsValidBoundsCheckImmediate(uint32_t i) {
-#  ifdef JS_CODEGEN_ARM
+#ifdef JS_CODEGEN_ARM
   return IsValidARMImmediate(i);
-#  else
+#else
   return true;
-#  endif
+#endif
 }
 
 size_t wasm::ComputeMappedSize(uint32_t maxSize) {
   MOZ_ASSERT(maxSize % PageSize == 0);
 
   // It is the bounds-check limit, not the mapped size, that gets baked into
   // code. Thus round up the maxSize to the next valid immediate value
   // *before* adding in the guard page.
 
-#  ifdef JS_CODEGEN_ARM
+#ifdef JS_CODEGEN_ARM
   uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
-#  else
+#else
   uint32_t boundsCheckLimit = maxSize;
-#  endif
+#endif
   MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
 
   MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
   MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
   return boundsCheckLimit + GuardSize;
 }
 
-#endif  // WASM_HUGE_MEMORY
-
 /* static */
 DebugFrame* DebugFrame::from(Frame* fp) {
   MOZ_ASSERT(fp->tls->instance->code().metadata().debugEnabled);
   auto* df =
       reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
   MOZ_ASSERT(fp->instance() == df->instance());
   return df;
 }
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -2288,61 +2288,74 @@ static const unsigned PageSize = 64 * 10
 // check limit. If the memory access is unaligned, this means that, even if the
 // bounds check succeeds, a few bytes of the access can extend past the end of
 // memory. To guard against this, extra space is included in the guard region to
 // catch the overflow. MaxMemoryAccessSize is a conservative approximation of
 // the maximum guard space needed to catch all unaligned overflows.
 
 static const unsigned MaxMemoryAccessSize = LitVal::sizeofLargestValue();
 
-#ifdef WASM_HUGE_MEMORY
-
-// On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+
+// On WASM_SUPPORTS_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
 // unconditionally allocates a huge region of virtual memory of size
 // wasm::HugeMappedSize. This allows all memory resizing to work without
 // reallocation and provides enough guard space for all offsets to be folded
 // into memory accesses.
 
-static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
-static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
-static const uint64_t UnalignedGuardPage = PageSize;
+static const uint64_t HugeIndexRange = uint64_t(UINT32_MAX) + 1;
+static const uint64_t HugeOffsetGuardLimit = uint64_t(INT32_MAX) + 1;
+static const uint64_t HugeUnalignedGuardPage = PageSize;
 static const uint64_t HugeMappedSize =
-    IndexRange + OffsetGuardLimit + UnalignedGuardPage;
-
-static_assert(MaxMemoryAccessSize <= UnalignedGuardPage,
+    HugeIndexRange + HugeOffsetGuardLimit + HugeUnalignedGuardPage;
+
+static_assert(MaxMemoryAccessSize <= HugeUnalignedGuardPage,
               "rounded up to static page size");
-
-#else  // !WASM_HUGE_MEMORY
-
-// On !WASM_HUGE_MEMORY platforms:
+static_assert(HugeOffsetGuardLimit < UINT32_MAX,
+              "checking for overflow against OffsetGuardLimit is enough.");
+
+#endif
+
+// On !WASM_SUPPORTS_HUGE_MEMORY platforms:
 //  - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
 //    original ArrayBuffer allocation which has no guard region at all.
 //  - For WebAssembly memories, an additional GuardSize is mapped after the
 //    accessible region of the memory to catch folded (base+offset) accesses
 //    where `offset < OffsetGuardLimit` as well as the overflow from unaligned
 //    accesses, as described above for MaxMemoryAccessSize.
 
 static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
 static const size_t GuardSize = PageSize;
 
+static_assert(MaxMemoryAccessSize < GuardSize,
+              "Guard page handles partial out-of-bounds");
+static_assert(OffsetGuardLimit < UINT32_MAX,
+              "checking for overflow against OffsetGuardLimit is enough.");
+
+static constexpr bool GetOffsetGuardLimit(bool hugeMemory) {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+  return hugeMemory ? HugeOffsetGuardLimit : OffsetGuardLimit;
+#else
+  return OffsetGuardLimit;
+#endif
+}
+
 // Return whether the given immediate satisfies the constraints of the platform
 // (viz. that, on ARM, IsValidARMImmediate).
 
 extern bool IsValidBoundsCheckImmediate(uint32_t i);
 
 // For a given WebAssembly/asm.js max size, return the number of bytes to
 // map which will necessarily be a multiple of the system page size and greater
 // than maxSize. For a returned mappedSize:
 //   boundsCheckLimit = mappedSize - GuardSize
 //   IsValidBoundsCheckImmediate(boundsCheckLimit)
 
 extern size_t ComputeMappedSize(uint32_t maxSize);
 
-#endif  // WASM_HUGE_MEMORY
-
 // wasm::Frame represents the bytes pushed by the call instruction and the fixed
 // prologue generated by wasm::GenerateCallablePrologue.
 //
 // Across all architectures it is assumed that, before the call instruction, the
 // stack pointer is WasmStackAlignment-aligned. Thus after the prologue, and
 // before the function has made its stack reservation, the stack alignment is
 // sizeof(Frame) % WasmStackAlignment.
 //