Backed out 9 changesets (bug 1518210) for hazard failure on ArrayBufferObject.cpp CLOSED TREE
authorBogdan Tara <btara@mozilla.com>
Fri, 30 Aug 2019 06:12:06 +0300
changeset 551303 ea9924171afd99fe6f60ea4f61ac7a90bad2a0fd
parent 551302 80bb50e715a30879981baac975767e26f42a7a87
child 551304 cb477711fa64b193a78e910895807cb9952ec7a0
push id11865
push userbtara@mozilla.com
push dateMon, 02 Sep 2019 08:54:37 +0000
treeherdermozilla-beta@37f59c4671b3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1518210
milestone70.0a1
backs out6e2e9274465d755271a5c1ffc32e1a42a608d879
39fc18ada840d9de69c45ca8484361a58ce0449b
b88d66dddefff7b557143585fb73f2cf9d3c6648
40e3f38af193cf11426ed48d89780c1768deb05d
777aa22c9e8a19fc1df9e575db3b46702805bf23
eb3fbf8bfb2b344b83c483a35f221b61bfcc76e2
dc63fd0bbe584922c065ea58c368c6bfa601e24f
12ea41537e05b07fccf665e4fe69d692804b5b70
4c8fe76ad293573a6a0e9eac934b99028e6da20d
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 9 changesets (bug 1518210) for hazard failure on ArrayBufferObject.cpp CLOSED TREE Backed out changeset 6e2e9274465d (bug 1518210) Backed out changeset 39fc18ada840 (bug 1518210) Backed out changeset b88d66dddeff (bug 1518210) Backed out changeset 40e3f38af193 (bug 1518210) Backed out changeset 777aa22c9e8a (bug 1518210) Backed out changeset eb3fbf8bfb2b (bug 1518210) Backed out changeset dc63fd0bbe58 (bug 1518210) Backed out changeset 12ea41537e05 (bug 1518210) Backed out changeset 4c8fe76ad293 (bug 1518210)
js/src/builtin/TestingFunctions.cpp
js/src/jit-test/tests/wasm/bench/directives.txt
js/src/jit-test/tests/wasm/directives.txt
js/src/jit-test/tests/wasm/gc/directives.txt
js/src/jit-test/tests/wasm/regress/directives.txt
js/src/jit-test/tests/wasm/spec/directives.txt
js/src/jit-test/tests/wasm/timeout/directives.txt
js/src/jit/CodeGenerator.cpp
js/src/jit/Lowering.cpp
js/src/jit/MacroAssembler.h
js/src/jit/WasmBCE.cpp
js/src/jit/arm64/MacroAssembler-arm64-inl.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/js-config.mozbuild
js/src/jsapi.cpp
js/src/jsapi.h
js/src/shell/fuzz-flags.txt
js/src/shell/js.cpp
js/src/tests/lib/jittests.py
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayBufferObject.h
js/src/vm/MutexIDs.h
js/src/vm/SharedArrayObject.cpp
js/src/vm/SharedArrayObject.h
js/src/wasm/AsmJS.cpp
js/src/wasm/Makefile
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmCode.cpp
js/src/wasm/WasmCode.h
js/src/wasm/WasmCompile.cpp
js/src/wasm/WasmCompile.h
js/src/wasm/WasmCraneliftCompile.cpp
js/src/wasm/WasmGenerator.cpp
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmJS.cpp
js/src/wasm/WasmJS.h
js/src/wasm/WasmModule.cpp
js/src/wasm/WasmModule.h
js/src/wasm/WasmProcess.cpp
js/src/wasm/WasmProcess.h
js/src/wasm/WasmTypes.cpp
js/src/wasm/WasmTypes.h
js/src/wasm/WasmValidate.cpp
js/src/wasm/WasmValidate.h
js/xpconnect/src/XPCJSContext.cpp
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -692,26 +692,16 @@ static bool WasmStreamingIsSupported(JSC
 }
 
 static bool WasmCachingIsSupported(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
   args.rval().setBoolean(wasm::HasCachingSupport(cx));
   return true;
 }
 
-static bool WasmHugeMemoryIsSupported(JSContext* cx, unsigned argc, Value* vp) {
-  CallArgs args = CallArgsFromVp(argc, vp);
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-  args.rval().setBoolean(true);
-#else
-  args.rval().setBoolean(false);
-#endif
-  return true;
-}
-
 static bool WasmUsesCranelift(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
 #ifdef ENABLE_WASM_CRANELIFT
   bool usesCranelift = cx->options().wasmCranelift();
 #else
   bool usesCranelift = false;
 #endif
   args.rval().setBoolean(usesCranelift);
@@ -6370,21 +6360,16 @@ gc::ZealModeHelpText),
     JS_FN_HELP("wasmStreamingIsSupported", WasmStreamingIsSupported, 0, 0,
 "wasmStreamingIsSupported()",
 "  Returns a boolean indicating whether WebAssembly caching is supported by the runtime."),
 
     JS_FN_HELP("wasmCachingIsSupported", WasmCachingIsSupported, 0, 0,
 "wasmCachingIsSupported()",
 "  Returns a boolean indicating whether WebAssembly caching is supported by the runtime."),
 
-    JS_FN_HELP("wasmHugeMemoryIsSupported", WasmHugeMemoryIsSupported, 0, 0,
-"wasmHugeMemoryIsSupported()",
-"  Returns a boolean indicating whether WebAssembly supports using a large"
-"  virtual memory reservation in order to elide bounds checks on this platform."),
-
     JS_FN_HELP("wasmUsesCranelift", WasmUsesCranelift, 0, 0,
 "wasmUsesCranelift()",
 "  Returns a boolean indicating whether Cranelift is currently enabled for backend\n"
 "  compilation. This doesn't necessarily mean a module will be compiled with \n"
 "  Cranelift (e.g. when baseline is also enabled)."),
 
     JS_FN_HELP("wasmThreadsSupported", WasmThreadsSupported, 0, 0,
 "wasmThreadsSupported()",
--- a/js/src/jit-test/tests/wasm/bench/directives.txt
+++ b/js/src/jit-test/tests/wasm/bench/directives.txt
@@ -1,1 +1,1 @@
-|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemoryIsSupported(); include:wasm.js
+|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm.js
--- a/js/src/jit-test/tests/wasm/directives.txt
+++ b/js/src/jit-test/tests/wasm/directives.txt
@@ -1,1 +1,1 @@
-|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemoryIsSupported(); include:wasm.js
+|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm.js
--- a/js/src/jit-test/tests/wasm/gc/directives.txt
+++ b/js/src/jit-test/tests/wasm/gc/directives.txt
@@ -1,1 +1,1 @@
-|jit-test| test-also=--wasm-gc; test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--wasm-gc --wasm-compiler=baseline; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemoryIsSupported(); include:wasm.js
+|jit-test| test-also=--wasm-gc; test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--wasm-gc --wasm-compiler=baseline; include:wasm.js
--- a/js/src/jit-test/tests/wasm/regress/directives.txt
+++ b/js/src/jit-test/tests/wasm/regress/directives.txt
@@ -1,1 +1,1 @@
-|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemoryIsSupported(); include:wasm.js
+|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm.js
--- a/js/src/jit-test/tests/wasm/spec/directives.txt
+++ b/js/src/jit-test/tests/wasm/spec/directives.txt
@@ -1,1 +1,1 @@
-|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; include:wasm-testharness.js
+|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm-testharness.js
--- a/js/src/jit-test/tests/wasm/timeout/directives.txt
+++ b/js/src/jit-test/tests/wasm/timeout/directives.txt
@@ -1,2 +1,2 @@
-|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; skip-variant-if: --disable-wasm-huge-memory, !wasmHugeMemoryIsSupported();
+|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -13281,23 +13281,27 @@ void CodeGenerator::visitWasmInterruptCh
 void CodeGenerator::visitWasmTrap(LWasmTrap* lir) {
   MOZ_ASSERT(gen->compilingWasm());
   const MWasmTrap* mir = lir->mir();
 
   masm.wasmTrap(mir->trap(), mir->bytecodeOffset());
 }
 
 void CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) {
+#ifdef WASM_HUGE_MEMORY
+  MOZ_CRASH("No wasm bounds check for huge memory");
+#else
   const MWasmBoundsCheck* mir = ins->mir();
   Register ptr = ToRegister(ins->ptr());
   Register boundsCheckLimit = ToRegister(ins->boundsCheckLimit());
   Label ok;
   masm.wasmBoundsCheck(Assembler::Below, ptr, boundsCheckLimit, &ok);
   masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
   masm.bind(&ok);
+#endif
 }
 
 void CodeGenerator::visitWasmAlignmentCheck(LWasmAlignmentCheck* ins) {
   const MWasmAlignmentCheck* mir = ins->mir();
   Register ptr = ToRegister(ins->ptr());
   Label ok;
   masm.branchTest32(Assembler::Zero, ptr, Imm32(mir->byteSize() - 1), &ok);
   masm.wasmTrap(wasm::Trap::UnalignedAccess, mir->bytecodeOffset());
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -4201,16 +4201,19 @@ void LIRGenerator::visitWasmAddOffset(MW
 }
 
 void LIRGenerator::visitWasmLoadTls(MWasmLoadTls* ins) {
   auto* lir = new (alloc()) LWasmLoadTls(useRegisterAtStart(ins->tlsPtr()));
   define(lir, ins);
 }
 
 void LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins) {
+#ifdef WASM_HUGE_MEMORY
+  MOZ_CRASH("No bounds checking on huge memory");
+#else
   MOZ_ASSERT(!ins->isRedundant());
 
   MDefinition* index = ins->index();
   MOZ_ASSERT(index->type() == MIRType::Int32);
 
   MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
   MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
 
@@ -4218,16 +4221,17 @@ void LIRGenerator::visitWasmBoundsCheck(
     auto* lir = new (alloc()) LWasmBoundsCheck(useRegisterAtStart(index),
                                                useRegister(boundsCheckLimit));
     defineReuseInput(lir, ins, 0);
   } else {
     auto* lir = new (alloc()) LWasmBoundsCheck(
         useRegisterAtStart(index), useRegisterAtStart(boundsCheckLimit));
     add(lir, ins);
   }
+#endif
 }
 
 void LIRGenerator::visitWasmAlignmentCheck(MWasmAlignmentCheck* ins) {
   MDefinition* index = ins->index();
   MOZ_ASSERT(index->type() == MIRType::Int32);
 
   auto* lir = new (alloc()) LWasmAlignmentCheck(useRegisterAtStart(index));
   add(lir, ins);
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1111,19 +1111,16 @@ class MacroAssembler : public MacroAssem
 
   template <class L>
   inline void branch32(Condition cond, Register lhs, Register rhs,
                        L label) PER_SHARED_ARCH;
   template <class L>
   inline void branch32(Condition cond, Register lhs, Imm32 rhs,
                        L label) PER_SHARED_ARCH;
 
-  inline void branch32(Condition cond, Register lhs, const Address& rhs,
-                       Label* label) DEFINED_ON(arm64);
-
   inline void branch32(Condition cond, const Address& lhs, Register rhs,
                        Label* label) PER_SHARED_ARCH;
   inline void branch32(Condition cond, const Address& lhs, Imm32 rhs,
                        Label* label) PER_SHARED_ARCH;
 
   inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs,
                        Label* label)
       DEFINED_ON(arm, arm64, mips_shared, x86, x64);
--- a/js/src/jit/WasmBCE.cpp
+++ b/js/src/jit/WasmBCE.cpp
@@ -43,16 +43,21 @@ bool jit::EliminateBoundsChecks(MIRGener
           MDefinition* addr = bc->index();
 
           // Eliminate constant-address bounds checks to addresses below
           // the heap minimum.
           //
           // The payload of the MConstant will be Double if the constant
           // result is above 2^31-1, but we don't care about that for BCE.
 
+#ifndef WASM_HUGE_MEMORY
+          MOZ_ASSERT(wasm::MaxMemoryAccessSize < wasm::GuardSize,
+                     "Guard page handles partial out-of-bounds");
+#endif
+
           if (addr->isConstant() &&
               addr->toConstant()->type() == MIRType::Int32 &&
               uint32_t(addr->toConstant()->toInt32()) <
                   mir->minWasmHeapLength()) {
             bc->setRedundant();
             if (JitOptions.spectreIndexMasking) {
               bc->replaceAllUsesWith(addr);
             } else {
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -795,26 +795,16 @@ void MacroAssembler::branch32(Condition 
 
 template <class L>
 void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
                               L label) {
   cmp32(lhs, imm);
   B(label, cond);
 }
 
-void MacroAssembler::branch32(Condition cond, Register lhs, const Address& rhs,
-                              Label* label) {
-  vixl::UseScratchRegisterScope temps(this);
-  const Register scratch = temps.AcquireX().asUnsized();
-  MOZ_ASSERT(scratch != lhs);
-  MOZ_ASSERT(scratch != rhs.base);
-  load32(rhs, scratch);
-  branch32(cond, lhs, scratch, label);
-}
-
 void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
                               Label* label) {
   vixl::UseScratchRegisterScope temps(this);
   const Register scratch = temps.AcquireX().asUnsized();
   MOZ_ASSERT(scratch != lhs.base);
   MOZ_ASSERT(scratch != rhs);
   load32(lhs, scratch);
   branch32(cond, scratch, rhs, label);
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -1086,31 +1086,24 @@ CodeOffset MacroAssembler::wasmTrapInstr
                              /* max number of instructions in scope = */ 1);
   CodeOffset offs(currentOffset());
   Unreachable();
   return offs;
 }
 
 void MacroAssembler::wasmBoundsCheck(Condition cond, Register index,
                                      Register boundsCheckLimit, Label* label) {
-  branch32(cond, index, boundsCheckLimit, label);
-  if (JitOptions.spectreIndexMasking) {
-    csel(ARMRegister(index, 32), vixl::wzr, ARMRegister(index, 32), cond);
-  }
+  // Not used on ARM64, we rely on signal handling instead
+  MOZ_CRASH("NYI - wasmBoundsCheck");
 }
 
 void MacroAssembler::wasmBoundsCheck(Condition cond, Register index,
                                      Address boundsCheckLimit, Label* label) {
-  MOZ_ASSERT(boundsCheckLimit.offset ==
-             offsetof(wasm::TlsData, boundsCheckLimit));
-
-  branch32(cond, index, boundsCheckLimit, label);
-  if (JitOptions.spectreIndexMasking) {
-    csel(ARMRegister(index, 32), vixl::wzr, ARMRegister(index, 32), cond);
-  }
+  // Not used on ARM64, we rely on signal handling instead
+  MOZ_CRASH("NYI - wasmBoundsCheck");
 }
 
 // FCVTZU behaves as follows:
 //
 // on NaN it produces zero
 // on too large it produces UINT_MAX (for appropriate type)
 // on too small it produces zero
 //
--- a/js/src/js-config.mozbuild
+++ b/js/src/js-config.mozbuild
@@ -2,17 +2,17 @@
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Some huge-mapping optimization instead of bounds checks on supported
 # platforms.
 if CONFIG['JS_CODEGEN_X64'] or CONFIG['JS_CODEGEN_ARM64']:
-    DEFINES['WASM_SUPPORTS_HUGE_MEMORY'] = True
+    DEFINES['WASM_HUGE_MEMORY'] = True
 
 # Enables CACHEIR_LOGS to diagnose IC coverage, and
 # Structured spewer for diagnostics
 if CONFIG['MOZ_DEBUG'] or CONFIG['NIGHTLY_BUILD']:
     DEFINES['JS_CACHEIR_SPEW'] = True
     DEFINES['JS_STRUCTURED_SPEW'] = True
 
 # CTypes
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -89,17 +89,16 @@
 #include "vm/SavedStacks.h"
 #include "vm/SelfHosting.h"
 #include "vm/Shape.h"
 #include "vm/StringType.h"
 #include "vm/SymbolType.h"
 #include "vm/WrapperObject.h"
 #include "vm/Xdr.h"
 #include "wasm/WasmModule.h"
-#include "wasm/WasmProcess.h"
 
 #include "debugger/DebugAPI-inl.h"
 #include "vm/Compartment-inl.h"
 #include "vm/Interpreter-inl.h"
 #include "vm/JSAtom-inl.h"
 #include "vm/JSFunction-inl.h"
 #include "vm/JSScript-inl.h"
 #include "vm/NativeObject-inl.h"
@@ -5868,17 +5867,20 @@ bool JS::IsWasmModuleObject(HandleObject
 }
 
 JS_PUBLIC_API RefPtr<JS::WasmModule> JS::GetWasmModule(HandleObject obj) {
   MOZ_ASSERT(JS::IsWasmModuleObject(obj));
   WasmModuleObject& mobj = obj->unwrapAs<WasmModuleObject>();
   return const_cast<wasm::Module*>(&mobj.module());
 }
 
-bool JS::DisableWasmHugeMemory() { return wasm::DisableHugeMemory(); }
+JS_PUBLIC_API RefPtr<JS::WasmModule> JS::DeserializeWasmModule(
+    const uint8_t* bytecode, size_t bytecodeLength) {
+  return wasm::DeserializeModule(bytecode, bytecodeLength);
+}
 
 JS_PUBLIC_API void JS::SetProcessLargeAllocationFailureCallback(
     JS::LargeAllocationFailureCallback lafc) {
   MOZ_ASSERT(!OnLargeAllocationFailure);
   OnLargeAllocationFailure = lafc;
 }
 
 JS_PUBLIC_API void JS::SetOutOfMemoryCallback(JSContext* cx,
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -3028,22 +3028,22 @@ struct WasmModule : js::AtomicRefCounted
   virtual JSObject* createObject(JSContext* cx) = 0;
 };
 
 extern JS_PUBLIC_API bool IsWasmModuleObject(HandleObject obj);
 
 extern JS_PUBLIC_API RefPtr<WasmModule> GetWasmModule(HandleObject obj);
 
 /**
- * Attempt to disable Wasm's usage of reserving a large virtual memory
- * allocation to avoid bounds checking overhead. This must be called before any
- * Wasm module or memory is created in this process, or else this function will
- * fail.
+ * This function will be removed when bug 1487479 expunges the last remaining
+ * bits of wasm IDB support.
  */
-extern JS_PUBLIC_API MOZ_MUST_USE bool DisableWasmHugeMemory();
+
+extern JS_PUBLIC_API RefPtr<WasmModule> DeserializeWasmModule(
+    const uint8_t* bytecode, size_t bytecodeLength);
 
 /**
  * If a large allocation fails when calling pod_{calloc,realloc}CanGC, the JS
  * engine may call the large-allocation-failure callback, if set, to allow the
  * embedding to flush caches, possibly perform shrinking GCs, etc. to make some
  * room. The allocation will then be retried (and may still fail.) This callback
  * can be called on any thread and must be set at most once in a process.
  */
--- a/js/src/shell/fuzz-flags.txt
+++ b/js/src/shell/fuzz-flags.txt
@@ -61,17 +61,16 @@
 --no-incremental-gc
 
 # wasm flags
 --wasm-gc
 --wasm-compiler=ion
 --wasm-compiler=baseline
 --wasm-compiler=cranelift
 --test-wasm-await-tier2
---wasm-disable-huge-memory
 
 # CPU instruction set-related
 --enable-avx
 --no-avx
 --no-sse3
 --no-sse4
 
 # arm specific, no-ops on other platforms.
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -5813,17 +5813,17 @@ class AutoPipe {
     fds_[1] = -1;
   }
 };
 
 static int sArgc;
 static char** sArgv;
 static const char sWasmCompileAndSerializeFlag[] =
     "--wasm-compile-and-serialize";
-static Vector<const char*, 5, js::SystemAllocPolicy> sCompilerProcessFlags;
+static Vector<const char*, 4, js::SystemAllocPolicy> sCompilerProcessFlags;
 
 static bool CompileAndSerializeInSeparateProcess(JSContext* cx,
                                                  const uint8_t* bytecode,
                                                  size_t bytecodeLength,
                                                  wasm::Bytes* serialized) {
   AutoPipe stdIn, stdOut;
   if (!stdIn.init() || !stdOut.init()) {
     return false;
@@ -10934,18 +10934,16 @@ int main(int argc, char** argv, char** e
       !op.addBoolOption('\0', "no-ion", "Disable IonMonkey") ||
       !op.addBoolOption('\0', "no-asmjs", "Disable asm.js compilation") ||
       !op.addStringOption(
           '\0', "wasm-compiler", "[option]",
           "Choose to enable a subset of the wasm compilers (valid options are "
           "none/baseline/ion/cranelift/baseline+ion/baseline+cranelift)") ||
       !op.addBoolOption('\0', "wasm-verbose",
                         "Enable WebAssembly verbose logging") ||
-      !op.addBoolOption('\0', "disable-wasm-huge-memory",
-                        "Disable WebAssembly huge memory") ||
       !op.addBoolOption('\0', "test-wasm-await-tier2",
                         "Forcibly activate tiering and block "
                         "instantiation on completion of tier2") ||
 #ifdef ENABLE_WASM_GC
       !op.addBoolOption('\0', "wasm-gc",
                         "Enable experimental wasm GC features") ||
 #else
       !op.addBoolOption('\0', "wasm-gc", "No-op") ||
@@ -11357,24 +11355,16 @@ int main(int argc, char** argv, char** e
 #endif
 
   js::SetPreserveWrapperCallback(cx, DummyPreserveWrapperCallback);
 
   JS::SetModuleResolveHook(cx->runtime(), ShellModuleResolveHook);
   JS::SetModuleDynamicImportHook(cx->runtime(), ShellModuleDynamicImportHook);
   JS::SetModuleMetadataHook(cx->runtime(), CallModuleMetadataHook);
 
-  if (op.getBoolOption("disable-wasm-huge-memory")) {
-    if (!sCompilerProcessFlags.append("--disable-wasm-huge-memory")) {
-      return EXIT_FAILURE;
-    }
-    bool disabledHugeMemory = JS::DisableWasmHugeMemory();
-    MOZ_RELEASE_ASSERT(disabledHugeMemory);
-  }
-
   result = Shell(cx, &op, envp);
 
 #ifdef DEBUG
   if (OOM_printAllocationCount) {
     printf("OOM max count: %" PRIu64 "\n", js::oom::simulator.counter());
   }
 #endif
 
--- a/js/src/tests/lib/jittests.py
+++ b/js/src/tests/lib/jittests.py
@@ -81,23 +81,16 @@ def js_quote(quote, s):
             result += c
     result += quote
     return result
 
 
 os.path.relpath = _relpath
 
 
-def extend_condition(condition, value):
-    if condition:
-        condition += " || "
-    condition += "({})".format(value)
-    return condition
-
-
 class JitTest:
 
     VALGRIND_CMD = []
     paths = (d for d in os.environ['PATH'].split(os.pathsep))
     valgrinds = (os.path.join(d, 'valgrind') for d in paths)
     if any(os.path.exists(p) for p in valgrinds):
         VALGRIND_CMD = [
             'valgrind', '-q', '--smc-check=all-non-file',
@@ -149,17 +142,16 @@ class JitTest:
         self.is_module = False
         self.is_binast = False
         # Reflect.stringify implementation to test
         self.test_reflect_stringify = None
 
         # Skip-if condition. We don't have a xulrunner, but we can ask the shell
         # directly.
         self.skip_if_cond = ''
-        self.skip_variant_if_cond = {}
 
         # Expected by the test runner. Always true for jit-tests.
         self.enable = True
 
     def copy(self):
         t = JitTest(self.path)
         t.jitflags = self.jitflags[:]
         t.slow = self.slow
@@ -174,25 +166,21 @@ class JitTest:
         t.expect_error = self.expect_error
         t.expect_status = self.expect_status
         t.expect_crash = self.expect_crash
         t.test_reflect_stringify = self.test_reflect_stringify
         t.enable = True
         t.is_module = self.is_module
         t.is_binast = self.is_binast
         t.skip_if_cond = self.skip_if_cond
-        t.skip_variant_if_cond = self.skip_variant_if_cond
         return t
 
     def copy_and_extend_jitflags(self, variant):
         t = self.copy()
         t.jitflags.extend(variant)
-        for flags in variant:
-            if flags in self.skip_variant_if_cond:
-                t.skip_if_cond = extend_condition(t.skip_if_cond, self.skip_variant_if_cond[flags])
         return t
 
     def copy_variants(self, variants):
         # Append variants to be tested in addition to the current set of tests.
         variants = variants + self.test_also
 
         # For each existing variant, duplicates it for each list of options in
         # test_join.  This will multiply the number of variants by 2 for set of
@@ -280,25 +268,20 @@ class JitTest:
                             test.jitflags.append('--thread-count={}'.format(
                                 int(value, 0)))
                         except ValueError:
                             print("warning: couldn't parse thread-count"
                                   " {}".format(value))
                     elif name == 'include':
                         test.other_includes.append(value)
                     elif name == 'skip-if':
-                        test.skip_if_cond = extend_condition(test.skip_if_cond, value)
-                    elif name == 'skip-variant-if':
-                        try:
-                            [variant, condition] = value.split(',')
-                            test.skip_variant_if_cond[variant] = extend_condition(
-                                test.skip_if_cond,
-                                condition)
-                        except ValueError:
-                            print("warning: couldn't parse skip-variant-if")
+                        # Ensure that skip-ifs are composable
+                        if test.skip_if_cond:
+                            test.skip_if_cond += " || "
+                        test.skip_if_cond += "({})".format(value)
                     else:
                         print('{}: warning: unrecognized |jit-test| attribute'
                               ' {}'.format(path, part))
                 else:
                     if name == 'slow':
                         test.slow = True
                     elif name == 'allow-oom':
                         test.allow_oom = True
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -222,41 +222,43 @@ bool js::CommitBufferMemory(void* dataEn
 #if defined(MOZ_VALGRIND) && \
     defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
   VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
 #endif
 
   return true;
 }
 
+#ifndef WASM_HUGE_MEMORY
 bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize,
                              size_t newMappedSize) {
   MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
   MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0);
   MOZ_ASSERT(newMappedSize >= mappedSize);
 
-#ifdef XP_WIN
+#  ifdef XP_WIN
   void* mappedEnd = (char*)dataPointer + mappedSize;
   uint32_t delta = newMappedSize - mappedSize;
   if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) {
     return false;
   }
   return true;
-#elif defined(XP_LINUX)
+#  elif defined(XP_LINUX)
   // Note this will not move memory (no MREMAP_MAYMOVE specified)
   if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) {
     return false;
   }
   return true;
-#else
+#  else
   // No mechanism for remapping on MacOS and other Unices. Luckily
   // shouldn't need it here as most of these are 64-bit.
   return false;
+#  endif
+}
 #endif
-}
 
 void js::UnmapBufferMemory(void* base, size_t mappedSize) {
   MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
 
 #ifdef XP_WIN
   VirtualFree(base, 0, MEM_RELEASE);
 #else   // XP_WIN
   munmap(base, mappedSize);
@@ -604,31 +606,40 @@ class js::WasmArrayRawBuffer {
   WasmArrayRawBuffer(uint8_t* buffer, const Maybe<uint32_t>& maxSize,
                      size_t mappedSize)
       : maxSize_(maxSize), mappedSize_(mappedSize) {
     MOZ_ASSERT(buffer == dataPointer());
   }
 
  public:
   static WasmArrayRawBuffer* Allocate(uint32_t numBytes,
-                                      const Maybe<uint32_t>& maxSize,
-                                      const Maybe<size_t>& mappedSize);
+                                      const Maybe<uint32_t>& maxSize);
   static void Release(void* mem);
 
   uint8_t* dataPointer() {
     uint8_t* ptr = reinterpret_cast<uint8_t*>(this);
     return ptr + sizeof(WasmArrayRawBuffer);
   }
 
   uint8_t* basePointer() { return dataPointer() - gc::SystemPageSize(); }
 
   size_t mappedSize() const { return mappedSize_; }
 
   Maybe<uint32_t> maxSize() const { return maxSize_; }
 
+#ifndef WASM_HUGE_MEMORY
+  uint32_t boundsCheckLimit() const {
+    MOZ_ASSERT(mappedSize_ <= UINT32_MAX);
+    MOZ_ASSERT(mappedSize_ >= wasm::GuardSize);
+    MOZ_ASSERT(
+        wasm::IsValidBoundsCheckImmediate(mappedSize_ - wasm::GuardSize));
+    return mappedSize_ - wasm::GuardSize;
+  }
+#endif
+
   MOZ_MUST_USE bool growToSizeInPlace(uint32_t oldSize, uint32_t newSize) {
     MOZ_ASSERT(newSize >= oldSize);
     MOZ_ASSERT_IF(maxSize(), newSize <= maxSize().value());
     MOZ_ASSERT(newSize <= mappedSize());
 
     uint32_t delta = newSize - oldSize;
     MOZ_ASSERT(delta % wasm::PageSize == 0);
 
@@ -637,16 +648,17 @@ class js::WasmArrayRawBuffer {
 
     if (delta && !CommitBufferMemory(dataEnd, delta)) {
       return false;
     }
 
     return true;
   }
 
+#ifndef WASM_HUGE_MEMORY
   bool extendMappedSize(uint32_t maxSize) {
     size_t newMappedSize = wasm::ComputeMappedSize(maxSize);
     MOZ_ASSERT(mappedSize_ <= newMappedSize);
     if (mappedSize_ == newMappedSize) {
       return true;
     }
 
     if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) {
@@ -666,27 +678,30 @@ class js::WasmArrayRawBuffer {
     MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0);
 
     if (!extendMappedSize(newMaxSize.value())) {
       return;
     }
 
     maxSize_ = Some(newMaxSize.value());
   }
+#endif  // WASM_HUGE_MEMORY
 };
 
 /* static */
-WasmArrayRawBuffer* WasmArrayRawBuffer::Allocate(uint32_t numBytes,
-                                                 const Maybe<uint32_t>& maxSize,
-                                                 const Maybe<size_t>& mapped) {
+WasmArrayRawBuffer* WasmArrayRawBuffer::Allocate(
+    uint32_t numBytes, const Maybe<uint32_t>& maxSize) {
   MOZ_RELEASE_ASSERT(numBytes <= ArrayBufferObject::MaxBufferByteLength);
 
-  size_t mappedSize = mapped.isSome()
-                          ? *mapped
-                          : wasm::ComputeMappedSize(maxSize.valueOr(numBytes));
+  size_t mappedSize;
+#ifdef WASM_HUGE_MEMORY
+  mappedSize = wasm::HugeMappedSize;
+#else
+  mappedSize = wasm::ComputeMappedSize(maxSize.valueOr(numBytes));
+#endif
 
   MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize());
   MOZ_RELEASE_ASSERT(numBytes <= maxSize.valueOr(UINT32_MAX));
   MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0);
   MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
 
   uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
   uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
@@ -716,92 +731,57 @@ void WasmArrayRawBuffer::Release(void* m
 }
 
 WasmArrayRawBuffer* ArrayBufferObject::BufferContents::wasmBuffer() const {
   MOZ_RELEASE_ASSERT(kind_ == WASM);
   return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer));
 }
 
 template <typename ObjT, typename RawbufT>
-static bool CreateSpecificWasmBuffer(
+static bool CreateBuffer(
     JSContext* cx, uint32_t initialSize, const Maybe<uint32_t>& maxSize,
     MutableHandleArrayBufferObjectMaybeShared maybeSharedObject) {
 #define ROUND_UP(v, a) ((v) % (a) == 0 ? (v) : v + a - ((v) % (a)))
 
-  bool useHugeMemory = wasm::IsHugeMemoryEnabled();
-
-  Maybe<uint32_t> clampedMaxSize = maxSize;
-  if (clampedMaxSize) {
-#ifdef JS_64BIT
-    // On 64-bit platforms when we aren't using huge memory, clamp clampedMaxSize to
-    // a smaller value that satisfies the 32-bit invariants
-    // clampedMaxSize + wasm::PageSize < UINT32_MAX and clampedMaxSize % wasm::PageSize == 0
-    if (!useHugeMemory && clampedMaxSize.value() >= (UINT32_MAX - wasm::PageSize)) {
-      uint32_t clamp = (wasm::MaxMemoryMaximumPages - 2) * wasm::PageSize;
-      MOZ_ASSERT(clamp < UINT32_MAX);
-      MOZ_ASSERT(initialSize <= clamp);
-      clampedMaxSize = Some(clamp);
-    }
+  RawbufT* buffer = RawbufT::Allocate(initialSize, maxSize);
+  if (!buffer) {
+#ifdef WASM_HUGE_MEMORY
+    wasm::Log(cx, "huge Memory allocation failed");
+    ReportOutOfMemory(cx);
+    return false;
 #else
-    static_assert(sizeof(uintptr_t) == 4, "assuming not 64 bit implies 32 bit");
-
-    // On 32-bit platforms, prevent applications specifying a large max
-    // (like UINT32_MAX) from unintentially OOMing the browser: they just
-    // want "a lot of memory". Maintain the invariant that
-    // initialSize <= clampedMaxSize.
-    static const uint32_t OneGiB = 1 << 30;
-    uint32_t clamp = Max(OneGiB, initialSize);
-    clampedMaxSize = Some(Min(clamp, *clampedMaxSize));
-#endif
-  }
-
-  Maybe<size_t> mappedSize;
-
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-  if (useHugeMemory) {
-    mappedSize = Some(wasm::HugeMappedSize);
-  }
-#endif
-
-  RawbufT* buffer = RawbufT::Allocate(initialSize, clampedMaxSize, mappedSize);
-  if (!buffer) {
-    if (useHugeMemory) {
-      wasm::Log(cx, "huge Memory allocation failed");
-      ReportOutOfMemory(cx);
-      return false;
-    }
-
-    // If we fail, and have a clampedMaxSize, try to reserve the biggest chunk in
-    // the range [initialSize, clampedMaxSize) using log backoff.
-    if (!clampedMaxSize) {
+    // If we fail, and have a maxSize, try to reserve the biggest chunk in
+    // the range [initialSize, maxSize) using log backoff.
+    if (!maxSize) {
       wasm::Log(cx, "new Memory({initial=%u bytes}) failed", initialSize);
       ReportOutOfMemory(cx);
       return false;
     }
 
-    uint32_t cur = clampedMaxSize.value() / 2;
+    uint32_t cur = maxSize.value() / 2;
 
     for (; cur > initialSize; cur /= 2) {
-      uint32_t clampedMaxSize = ROUND_UP(cur, wasm::PageSize);
-      buffer = RawbufT::Allocate(initialSize, Some(clampedMaxSize), mappedSize);
+      buffer = RawbufT::Allocate(initialSize,
+                                 mozilla::Some(ROUND_UP(cur, wasm::PageSize)));
       if (buffer) {
         break;
       }
     }
 
     if (!buffer) {
       wasm::Log(cx, "new Memory({initial=%u bytes}) failed", initialSize);
       ReportOutOfMemory(cx);
       return false;
     }
 
     // Try to grow our chunk as much as possible.
     for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) {
       buffer->tryGrowMaxSizeInPlace(ROUND_UP(d, wasm::PageSize));
     }
+#endif
   }
 
 #undef ROUND_UP
 
   // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case
   // of failure.
   ObjT* object = ObjT::createFromNewRawBuffer(cx, buffer, initialSize);
   if (!object) {
@@ -820,55 +800,77 @@ static bool CreateSpecificWasmBuffer(
     if (allocatedSinceLastTrigger > AllocatedBuffersPerTrigger) {
       Unused << cx->runtime()->gc.triggerGC(JS::GCReason::TOO_MUCH_WASM_MEMORY);
       allocatedSinceLastTrigger = 0;
     }
   } else {
     allocatedSinceLastTrigger = 0;
   }
 
-  if (clampedMaxSize) {
-    if (useHugeMemory) {
-      wasm::Log(cx,
-                "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded",
-                unsigned(initialSize), unsigned(*clampedMaxSize));
-    } else {
-      wasm::Log(cx,
-                "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded "
-                "with internal maximum of %u",
-                unsigned(initialSize), unsigned(*clampedMaxSize),
-                unsigned(object->wasmMaxSize().value()));
-    }
+  if (maxSize) {
+#ifdef WASM_HUGE_MEMORY
+    wasm::Log(cx, "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded",
+              unsigned(initialSize), unsigned(*maxSize));
+#else
+    wasm::Log(cx,
+              "new Memory({initial:%u bytes, maximum:%u bytes}) succeeded "
+              "with internal maximum of %u",
+              unsigned(initialSize), unsigned(*maxSize),
+              unsigned(object->wasmMaxSize().value()));
+#endif
   } else {
     wasm::Log(cx, "new Memory({initial:%u bytes}) succeeded",
               unsigned(initialSize));
   }
 
   return true;
 }
 
 bool js::CreateWasmBuffer(JSContext* cx, const wasm::Limits& memory,
                           MutableHandleArrayBufferObjectMaybeShared buffer) {
   MOZ_ASSERT(memory.initial % wasm::PageSize == 0);
   MOZ_RELEASE_ASSERT(cx->wasmHaveSignalHandlers);
   MOZ_RELEASE_ASSERT((memory.initial / wasm::PageSize) <=
                      wasm::MaxMemoryInitialPages);
 
+  // Prevent applications specifying a large max (like UINT32_MAX) from
+  // unintentially OOMing the browser on 32-bit: they just want "a lot of
+  // memory". Maintain the invariant that initialSize <= maxSize.
+
+  Maybe<uint32_t> maxSize = memory.maximum;
+  if (sizeof(void*) == 4 && maxSize) {
+    static const uint32_t OneGiB = 1 << 30;
+    uint32_t clamp = Max(OneGiB, memory.initial);
+    maxSize = Some(Min(clamp, *maxSize));
+  }
+
+#ifndef WASM_HUGE_MEMORY
+  if (sizeof(void*) == 8 && maxSize &&
+      maxSize.value() >= (UINT32_MAX - wasm::PageSize)) {
+    // On 64-bit platforms that don't define WASM_HUGE_MEMORY
+    // clamp maxSize to smaller value that satisfies the 32-bit invariants
+    // maxSize + wasm::PageSize < UINT32_MAX and maxSize % wasm::PageSize == 0
+    uint32_t clamp = (wasm::MaxMemoryMaximumPages - 2) * wasm::PageSize;
+    MOZ_ASSERT(clamp < UINT32_MAX);
+    MOZ_ASSERT(memory.initial <= clamp);
+    maxSize = Some(clamp);
+  }
+#endif
+
   if (memory.shared == wasm::Shareable::True) {
     if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) {
       JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
                                 JSMSG_WASM_NO_SHMEM_LINK);
       return false;
     }
-    return CreateSpecificWasmBuffer<SharedArrayBufferObject,
-                                    SharedArrayRawBuffer>(
-        cx, memory.initial, memory.maximum, buffer);
+    return CreateBuffer<SharedArrayBufferObject, SharedArrayRawBuffer>(
+        cx, memory.initial, maxSize, buffer);
   }
-  return CreateSpecificWasmBuffer<ArrayBufferObject, WasmArrayRawBuffer>(
-      cx, memory.initial, memory.maximum, buffer);
+  return CreateBuffer<ArrayBufferObject, WasmArrayRawBuffer>(cx, memory.initial,
+                                                             maxSize, buffer);
 }
 
 bool ArrayBufferObject::prepareForAsmJS() {
   MOZ_ASSERT(byteLength() % wasm::PageSize == 0,
              "prior size checking should have guaranteed page-size multiple");
   MOZ_ASSERT(byteLength() > 0,
              "prior size checking should have excluded empty buffers");
 
@@ -1089,55 +1091,79 @@ bool ArrayBufferObject::wasmGrowToSizeIn
 
   // Set |newBuf|'s contents to |oldBuf|'s original contents.
   newBuf->initialize(newSize, oldContents);
   AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents);
 
   return true;
 }
 
+#ifndef WASM_HUGE_MEMORY
 /* static */
 bool ArrayBufferObject::wasmMovingGrowToSize(
     uint32_t newSize, HandleArrayBufferObject oldBuf,
     MutableHandleArrayBufferObject newBuf, JSContext* cx) {
   // On failure, do not throw and ensure that the original buffer is
   // unmodified and valid.
 
   if (newSize > ArrayBufferObject::MaxBufferByteLength) {
     return false;
   }
 
-  if (wasm::ComputeMappedSize(newSize) <= oldBuf->wasmMappedSize() ||
+  if (newSize <= oldBuf->wasmBoundsCheckLimit() ||
       oldBuf->contents().wasmBuffer()->extendMappedSize(newSize)) {
     return wasmGrowToSizeInPlace(newSize, oldBuf, newBuf, cx);
   }
 
   newBuf.set(ArrayBufferObject::createEmpty(cx));
   if (!newBuf) {
     cx->clearPendingException();
     return false;
   }
 
   WasmArrayRawBuffer* newRawBuf =
-      WasmArrayRawBuffer::Allocate(newSize, Nothing(), Nothing());
+      WasmArrayRawBuffer::Allocate(newSize, Nothing());
   if (!newRawBuf) {
     return false;
   }
 
   AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents);
 
   BufferContents contents =
       BufferContents::createWasm(newRawBuf->dataPointer());
   newBuf->initialize(newSize, contents);
 
   memcpy(newBuf->dataPointer(), oldBuf->dataPointer(), oldBuf->byteLength());
   ArrayBufferObject::detach(cx, oldBuf);
   return true;
 }
 
+uint32_t ArrayBufferObject::wasmBoundsCheckLimit() const {
+  if (isWasm()) {
+    return contents().wasmBuffer()->boundsCheckLimit();
+  }
+  return byteLength();
+}
+
+uint32_t ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const {
+  if (is<ArrayBufferObject>()) {
+    return as<ArrayBufferObject>().wasmBoundsCheckLimit();
+  }
+  return as<SharedArrayBufferObject>().wasmBoundsCheckLimit();
+}
+#else
+uint32_t ArrayBufferObject::wasmBoundsCheckLimit() const {
+  return byteLength();
+}
+
+uint32_t ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const {
+  return byteLength();
+}
+#endif
+
 uint32_t ArrayBufferObject::flags() const {
   return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32());
 }
 
 void ArrayBufferObject::setFlags(uint32_t flags) {
   setFixedSlot(FLAGS_SLOT, Int32Value(flags));
 }
 
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -30,23 +30,25 @@ class WasmArrayRawBuffer;
 void* MapBufferMemory(size_t mappedSize, size_t initialCommittedSize);
 
 // Commit additional memory in an existing mapping.  `dataEnd` must be the
 // correct value for the end of the existing committed area, and `delta` must be
 // a byte amount to grow the mapping by, and must be a multiple of the page
 // size.  Returns false on failure.
 bool CommitBufferMemory(void* dataEnd, uint32_t delta);
 
+#ifndef WASM_HUGE_MEMORY
 // Extend an existing mapping by adding uncommited pages to it.  `dataStart`
 // must be the pointer to the start of the existing mapping, `mappedSize` the
 // size of the existing mapping, and `newMappedSize` the size of the extended
 // mapping (sizes in bytes), with `mappedSize` <= `newMappedSize`.  Both sizes
 // must be divisible by the page size.  Returns false on failure.
 bool ExtendBufferMapping(void* dataStart, size_t mappedSize,
                          size_t newMappedSize);
+#endif
 
 // Remove an existing mapping.  `dataStart` must be the pointer to the start of
 // the mapping, and `mappedSize` the size of that mapping.
 void UnmapBufferMemory(void* dataStart, size_t mappedSize);
 
 // Return the number of currently live mapped buffers.
 int32_t LiveMappedBufferCount();
 
@@ -115,16 +117,17 @@ class ArrayBufferObjectMaybeShared : pub
   // WebAssembly support:
   // Note: the eventual goal is to remove this from ArrayBuffer and have
   // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object.
 
   mozilla::Maybe<uint32_t> wasmMaxSize() const {
     return WasmArrayBufferMaxSize(this);
   }
   size_t wasmMappedSize() const { return WasmArrayBufferMappedSize(this); }
+  uint32_t wasmBoundsCheckLimit() const;
 
   inline bool isPreparedForAsmJS() const;
   inline bool isWasm() const;
 };
 
 typedef Rooted<ArrayBufferObjectMaybeShared*>
     RootedArrayBufferObjectMaybeShared;
 typedef Handle<ArrayBufferObjectMaybeShared*>
@@ -412,19 +415,22 @@ class ArrayBufferObject : public ArrayBu
    */
   MOZ_MUST_USE bool prepareForAsmJS();
 
   size_t wasmMappedSize() const;
   mozilla::Maybe<uint32_t> wasmMaxSize() const;
   static MOZ_MUST_USE bool wasmGrowToSizeInPlace(
       uint32_t newSize, Handle<ArrayBufferObject*> oldBuf,
       MutableHandle<ArrayBufferObject*> newBuf, JSContext* cx);
+#ifndef WASM_HUGE_MEMORY
   static MOZ_MUST_USE bool wasmMovingGrowToSize(
       uint32_t newSize, Handle<ArrayBufferObject*> oldBuf,
       MutableHandle<ArrayBufferObject*> newBuf, JSContext* cx);
+#endif
+  uint32_t wasmBoundsCheckLimit() const;
 
   static void finalize(JSFreeOp* fop, JSObject* obj);
 
   static BufferContents createMappedContents(int fd, size_t offset,
                                              size_t length);
 
   static size_t offsetOfDataSlot() { return getFixedSlotOffset(DATA_SLOT); }
 
--- a/js/src/vm/MutexIDs.h
+++ b/js/src/vm/MutexIDs.h
@@ -51,17 +51,16 @@
   _(WasmFuncTypeIdSet, 500)           \
   _(WasmCodeProfilingLabels, 500)     \
   _(WasmCompileTaskState, 500)        \
   _(WasmCodeBytesEnd, 500)            \
   _(WasmStreamEnd, 500)               \
   _(WasmStreamStatus, 500)            \
   _(WasmRuntimeInstances, 500)        \
   _(WasmSignalInstallState, 500)      \
-  _(WasmHugeMemoryEnabled, 500)       \
   _(MemoryTracker, 500)               \
                                       \
   _(ThreadId, 600)                    \
   _(WasmCodeSegmentMap, 600)          \
   _(TraceLoggerGraphState, 600)       \
   _(VTuneLock, 600)
 
 namespace js {
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -24,83 +24,86 @@
 #include "vm/NativeObject-inl.h"
 
 using mozilla::CheckedInt;
 using mozilla::Maybe;
 using mozilla::Nothing;
 
 using namespace js;
 
+static size_t SharedArrayMappedSizeForWasm(size_t declaredMaxSize) {
+#ifdef WASM_HUGE_MEMORY
+  return wasm::HugeMappedSize;
+#else
+  return wasm::ComputeMappedSize(declaredMaxSize);
+#endif
+}
+
 static uint32_t SharedArrayAccessibleSize(uint32_t length) {
   return AlignBytes(length, gc::SystemPageSize());
 }
 
-// `maxSize` must be something for wasm, nothing for other cases.
+// `max` must be something for wasm, nothing for other cases.
 SharedArrayRawBuffer* SharedArrayRawBuffer::Allocate(
-    uint32_t length, const Maybe<uint32_t>& maxSize,
-    const Maybe<size_t>& mappedSize) {
+    uint32_t length, const Maybe<uint32_t>& max) {
   MOZ_RELEASE_ASSERT(length <= ArrayBufferObject::MaxBufferByteLength);
 
+  bool preparedForWasm = max.isSome();
+
   uint32_t accessibleSize = SharedArrayAccessibleSize(length);
   if (accessibleSize < length) {
     return nullptr;
   }
 
-  bool preparedForWasm = maxSize.isSome();
-  uint32_t computedMaxSize;
-  size_t computedMappedSize;
+  uint32_t maxSize = max.isSome() ? *max : accessibleSize;
 
+  size_t mappedSize;
   if (preparedForWasm) {
-    computedMaxSize = *maxSize;
-    computedMappedSize = mappedSize.isSome()
-                             ? *mappedSize
-                             : wasm::ComputeMappedSize(computedMaxSize);
+    mappedSize = SharedArrayMappedSizeForWasm(maxSize);
   } else {
-    computedMappedSize = accessibleSize;
-    computedMaxSize = accessibleSize;
+    mappedSize = accessibleSize;
   }
 
-  MOZ_ASSERT(accessibleSize <= computedMaxSize);
-  MOZ_ASSERT(accessibleSize <= computedMappedSize);
-
-  uint64_t mappedSizeWithHeader = computedMappedSize + gc::SystemPageSize();
+  uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
   uint64_t accessibleSizeWithHeader = accessibleSize + gc::SystemPageSize();
 
   void* p = MapBufferMemory(mappedSizeWithHeader, accessibleSizeWithHeader);
   if (!p) {
     return nullptr;
   }
 
   uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
   uint8_t* base = buffer - sizeof(SharedArrayRawBuffer);
   SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(
-      buffer, length, computedMaxSize, computedMappedSize, preparedForWasm);
+      buffer, length, maxSize, mappedSize, preparedForWasm);
   MOZ_ASSERT(rawbuf->length_ == length);  // Deallocation needs this
   return rawbuf;
 }
 
+#ifndef WASM_HUGE_MEMORY
 void SharedArrayRawBuffer::tryGrowMaxSizeInPlace(uint32_t deltaMaxSize) {
   CheckedInt<uint32_t> newMaxSize = maxSize_;
   newMaxSize += deltaMaxSize;
   MOZ_ASSERT(newMaxSize.isValid());
   MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0);
 
-  size_t newMappedSize = wasm::ComputeMappedSize(newMaxSize.value());
+  size_t newMappedSize = SharedArrayMappedSizeForWasm(newMaxSize.value());
   MOZ_ASSERT(mappedSize_ <= newMappedSize);
   if (mappedSize_ == newMappedSize) {
     return;
   }
 
   if (!ExtendBufferMapping(basePointer(), mappedSize_, newMappedSize)) {
     return;
   }
 
   mappedSize_ = newMappedSize;
   maxSize_ = newMaxSize.value();
 }
+#endif
 
 bool SharedArrayRawBuffer::wasmGrowToSizeInPlace(const Lock&,
                                                  uint32_t newLength) {
   if (newLength > ArrayBufferObject::MaxBufferByteLength) {
     return false;
   }
 
   MOZ_ASSERT(newLength >= length_);
@@ -217,17 +220,17 @@ bool SharedArrayBufferObject::class_cons
   args.rval().setObject(*bufobj);
   return true;
 }
 
 SharedArrayBufferObject* SharedArrayBufferObject::New(JSContext* cx,
                                                       uint32_t length,
                                                       HandleObject proto) {
   SharedArrayRawBuffer* buffer =
-      SharedArrayRawBuffer::Allocate(length, Nothing(), Nothing());
+      SharedArrayRawBuffer::Allocate(length, Nothing());
   if (!buffer) {
     return nullptr;
   }
 
   SharedArrayBufferObject* obj = New(cx, buffer, length, proto);
   if (!obj) {
     buffer->dropReference();
     return nullptr;
@@ -280,16 +283,25 @@ void SharedArrayBufferObject::Finalize(J
   // which causes a SharedArrayRawBuffer to never be attached.
   Value v = buf.getReservedSlot(RAWBUF_SLOT);
   if (!v.isUndefined()) {
     buf.rawBufferObject()->dropReference();
     buf.dropRawBuffer();
   }
 }
 
+#ifndef WASM_HUGE_MEMORY
+uint32_t SharedArrayBufferObject::wasmBoundsCheckLimit() const {
+  if (isWasm()) {
+    return rawBufferObject()->boundsCheckLimit();
+  }
+  return byteLength();
+}
+#endif
+
 /* static */
 void SharedArrayBufferObject::addSizeOfExcludingThis(
     JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info) {
   // Divide the buffer size by the refcount to get the fraction of the buffer
   // owned by this thread. It's conceivable that the refcount might change in
   // the middle of memory reporting, in which case the amount reported for
   // some threads might be to high (if the refcount goes up) or too low (if
   // the refcount goes down). But that's unlikely and hard to avoid, so we
--- a/js/src/vm/SharedArrayObject.h
+++ b/js/src/vm/SharedArrayObject.h
@@ -86,19 +86,18 @@ class SharedArrayRawBuffer {
     SharedArrayRawBuffer* buf;
 
    public:
     explicit Lock(SharedArrayRawBuffer* buf) : buf(buf) { buf->lock_.lock(); }
     ~Lock() { buf->lock_.unlock(); }
   };
 
   // max must be Something for wasm, Nothing for other uses
-  static SharedArrayRawBuffer* Allocate(
-      uint32_t length, const mozilla::Maybe<uint32_t>& maxSize,
-      const mozilla::Maybe<size_t>& mappedSize);
+  static SharedArrayRawBuffer* Allocate(uint32_t initial,
+                                        const mozilla::Maybe<uint32_t>& max);
 
   // This may be called from multiple threads.  The caller must take
   // care of mutual exclusion.
   FutexWaiter* waiters() const { return waiters_; }
 
   // This may be called from multiple threads.  The caller must take
   // care of mutual exclusion.
   void setWaiters(FutexWaiter* waiters) { waiters_ = waiters; }
@@ -110,19 +109,25 @@ class SharedArrayRawBuffer {
   }
 
   uint32_t byteLength(const Lock&) const { return length_; }
 
   uint32_t maxSize() const { return maxSize_; }
 
   size_t mappedSize() const { return mappedSize_; }
 
+#ifndef WASM_HUGE_MEMORY
+  uint32_t boundsCheckLimit() const { return mappedSize_ - wasm::GuardSize; }
+#endif
+
   bool isWasm() const { return preparedForWasm_; }
 
+#ifndef WASM_HUGE_MEMORY
   void tryGrowMaxSizeInPlace(uint32_t deltaMaxSize);
+#endif
 
   bool wasmGrowToSizeInPlace(const Lock&, uint32_t newLength);
 
   uint32_t refcount() const { return refcount_; }
 
   MOZ_MUST_USE bool addReference();
   void dropReference();
 
@@ -223,16 +228,20 @@ class SharedArrayBufferObject : public A
       JSContext* cx, SharedArrayRawBuffer* buffer, uint32_t initialSize);
 
   mozilla::Maybe<uint32_t> wasmMaxSize() const {
     return mozilla::Some(rawBufferObject()->maxSize());
   }
 
   size_t wasmMappedSize() const { return rawBufferObject()->mappedSize(); }
 
+#ifndef WASM_HUGE_MEMORY
+  uint32_t wasmBoundsCheckLimit() const;
+#endif
+
  private:
   void acceptRawBuffer(SharedArrayRawBuffer* buffer, uint32_t length);
   void dropRawBuffer();
 };
 
 bool IsSharedArrayBuffer(HandleValue v);
 bool IsSharedArrayBuffer(HandleObject o);
 bool IsSharedArrayBuffer(JSObject* o);
--- a/js/src/wasm/AsmJS.cpp
+++ b/js/src/wasm/AsmJS.cpp
@@ -1350,17 +1350,17 @@ class MOZ_STACK_CLASS JS_HAZ_ROOTED Modu
         funcDefs_(cx),
         tables_(cx),
         globalMap_(cx),
         sigSet_(cx),
         funcImportMap_(cx),
         arrayViews_(cx),
         compilerEnv_(CompileMode::Once, Tier::Optimized, OptimizedBackend::Ion,
                      DebugEnabled::False, /* ref types */ false,
-                     /* gc types */ false, /* huge memory */ false),
+                     /* gc types */ false),
         env_(&compilerEnv_, Shareable::False, ModuleKind::AsmJS) {
     compilerEnv_.computeParameters(/* gc types */ false);
     env_.minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
   }
 
  protected:
   MOZ_MUST_USE bool addStandardLibraryMathInfo() {
     static constexpr struct {
--- a/js/src/wasm/Makefile
+++ b/js/src/wasm/Makefile
@@ -18,17 +18,17 @@ help:
 
 update:
 	[ -d ./spec ] || git clone https://github.com/webassembly/spec ./spec
 	(cd ./spec/interpreter && make)
 	./spec/test/build.py \
 		--use-sync \
 		--js ../jit-test/tests/wasm/spec \
 		--html ../../../testing/web-platform/mozilla/tests/wasm
-	echo "|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; test-also=--disable-wasm-huge-memory; include:wasm-testharness.js" > ../jit-test/tests/wasm/spec/directives.txt
+	echo "|jit-test| test-also=--wasm-compiler=ion; test-also=--wasm-compiler=baseline; test-also=--test-wasm-await-tier2; include:wasm-testharness.js" > ../jit-test/tests/wasm/spec/directives.txt
 	echo "|jit-test| skip-if:true" > ../jit-test/tests/wasm/spec/harness/directives.txt
 
 run:
 	@[ -z $(MOZCONFIG) ] && echo "You need to define the MOZCONFIG env variable first."
 	@[ -z $(MOZCONFIG) ] || ../../../mach wpt /_mozilla/wasm
 
 expectations:
 	@[ -z $(MOZCONFIG) ] && echo "You need to define the MOZCONFIG env variable first." || true
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -5240,20 +5240,18 @@ class BaseCompiler final : public BaseCo
   // Heap access.
 
   void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
                      uint32_t local) {
     if (local >= sizeof(BCESet) * 8) {
       return;
     }
 
-    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
-
     if ((bceSafe_ & (BCESet(1) << local)) &&
-        access->offset() < offsetGuardLimit) {
+        access->offset() < wasm::OffsetGuardLimit) {
       check->omitBoundsCheck = true;
     }
 
     // The local becomes safe even if the offset is beyond the guard limit.
     bceSafe_ |= (BCESet(1) << local);
   }
 
   void bceLocalIsUpdated(uint32_t local) {
@@ -5261,20 +5259,19 @@ class BaseCompiler final : public BaseCo
       return;
     }
 
     bceSafe_ &= ~(BCESet(1) << local);
   }
 
   void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
                            RegI32 tls, RegI32 ptr) {
-    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
-
     // Fold offset if necessary for further computations.
-    if (access->offset() >= offsetGuardLimit ||
+
+    if (access->offset() >= OffsetGuardLimit ||
         (access->isAtomic() && !check->omitAlignmentCheck &&
          !check->onlyPointerAlignment)) {
       Label ok;
       masm.branchAdd32(Assembler::CarryClear, Imm32(access->offset()), ptr,
                        &ok);
       masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
       masm.bind(&ok);
       access->clearOffset();
@@ -5290,36 +5287,38 @@ class BaseCompiler final : public BaseCo
       masm.branchTest32(Assembler::Zero, ptr, Imm32(access->byteSize() - 1),
                         &ok);
       masm.wasmTrap(Trap::UnalignedAccess, bytecodeOffset());
       masm.bind(&ok);
     }
 
     // Ensure no tls if we don't need it.
 
-    if (env_.hugeMemoryEnabled()) {
-      // We have HeapReg and no bounds checking and need load neither
-      // memoryBase nor boundsCheckLimit from tls.
-      MOZ_ASSERT_IF(check->omitBoundsCheck, tls.isInvalid());
-    }
+#ifdef WASM_HUGE_MEMORY
+    // We have HeapReg and no bounds checking and need load neither
+    // memoryBase nor boundsCheckLimit from tls.
+    MOZ_ASSERT_IF(check->omitBoundsCheck, tls.isInvalid());
+#endif
 #ifdef JS_CODEGEN_ARM
     // We have HeapReg on ARM and don't need to load the memoryBase from tls.
     MOZ_ASSERT_IF(check->omitBoundsCheck, tls.isInvalid());
 #endif
 
     // Bounds check if required.
 
-    if (!env_.hugeMemoryEnabled() && !check->omitBoundsCheck) {
+#ifndef WASM_HUGE_MEMORY
+    if (!check->omitBoundsCheck) {
       Label ok;
       masm.wasmBoundsCheck(Assembler::Below, ptr,
                            Address(tls, offsetof(TlsData, boundsCheckLimit)),
                            &ok);
       masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
       masm.bind(&ok);
     }
+#endif
   }
 
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) ||      \
     defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
     defined(JS_CODEGEN_MIPS64)
   BaseIndex prepareAtomicMemoryAccess(MemoryAccessDesc* access,
                                       AccessCheck* check, RegI32 tls,
                                       RegI32 ptr) {
@@ -5364,21 +5363,23 @@ class BaseCompiler final : public BaseCo
       }
     }
 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     *temp1 = needI32();
 #endif
   }
 
   MOZ_MUST_USE bool needTlsForAccess(const AccessCheck& check) {
-#if defined(JS_CODEGEN_X86)
-    // x86 requires Tls for memory base
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
+    defined(JS_CODEGEN_MIPS64)
+    return !check.omitBoundsCheck;
+#elif defined(JS_CODEGEN_X86)
     return true;
 #else
-    return !env_.hugeMemoryEnabled() && !check.omitBoundsCheck;
+    return false;
 #endif
   }
 
   // ptr and dest may be the same iff dest is I32.
   // This may destroy ptr even if ptr and dest are not the same.
   MOZ_MUST_USE bool load(MemoryAccessDesc* access, AccessCheck* check,
                          RegI32 tls, RegI32 ptr, AnyReg dest, RegI32 temp1,
                          RegI32 temp2, RegI32 temp3) {
@@ -9334,20 +9335,19 @@ RegI32 BaseCompiler::popMemoryAccess(Mem
                                      AccessCheck* check) {
   check->onlyPointerAlignment =
       (access->offset() & (access->byteSize() - 1)) == 0;
 
   int32_t addrTemp;
   if (popConstI32(&addrTemp)) {
     uint32_t addr = addrTemp;
 
-    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
-
     uint64_t ea = uint64_t(addr) + uint64_t(access->offset());
-    uint64_t limit = uint64_t(env_.minMemoryLength) + offsetGuardLimit;
+    uint64_t limit =
+        uint64_t(env_.minMemoryLength) + uint64_t(wasm::OffsetGuardLimit);
 
     check->omitBoundsCheck = ea < limit;
     check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
 
     // Fold the offset into the pointer if we can, as this is always
     // beneficial.
 
     if (ea <= UINT32_MAX) {
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -886,50 +886,45 @@ bool MetadataTier::clone(const MetadataT
 
   return true;
 }
 
 size_t Metadata::serializedSize() const {
   return sizeof(pod()) + SerializedVectorSize(funcTypeIds) +
          SerializedPodVectorSize(globals) + SerializedPodVectorSize(tables) +
          sizeof(moduleName) + SerializedPodVectorSize(funcNames) +
-         filename.serializedSize() + sourceMapURL.serializedSize() +
-         sizeof(uint8_t);
+         filename.serializedSize() + sourceMapURL.serializedSize();
 }
 
 uint8_t* Metadata::serialize(uint8_t* cursor) const {
   MOZ_ASSERT(!debugEnabled && debugFuncArgTypes.empty() &&
              debugFuncReturnTypes.empty());
   cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
   cursor = SerializeVector(cursor, funcTypeIds);
   cursor = SerializePodVector(cursor, globals);
   cursor = SerializePodVector(cursor, tables);
   cursor = WriteBytes(cursor, &moduleName, sizeof(moduleName));
   cursor = SerializePodVector(cursor, funcNames);
   cursor = filename.serialize(cursor);
   cursor = sourceMapURL.serialize(cursor);
-  cursor = WriteScalar(cursor, uint8_t(omitsBoundsChecks));
   return cursor;
 }
 
 /* static */ const uint8_t* Metadata::deserialize(const uint8_t* cursor) {
-  uint8_t scalarOmitsBoundsChecks = 0;
   (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
       (cursor = DeserializeVector(cursor, &funcTypeIds)) &&
       (cursor = DeserializePodVector(cursor, &globals)) &&
       (cursor = DeserializePodVector(cursor, &tables)) &&
       (cursor = ReadBytes(cursor, &moduleName, sizeof(moduleName))) &&
       (cursor = DeserializePodVector(cursor, &funcNames)) &&
       (cursor = filename.deserialize(cursor)) &&
-      (cursor = sourceMapURL.deserialize(cursor)) &&
-      (cursor = ReadScalar<uint8_t>(cursor, &scalarOmitsBoundsChecks));
+      (cursor = sourceMapURL.deserialize(cursor));
   debugEnabled = false;
   debugFuncArgTypes.clear();
   debugFuncReturnTypes.clear();
-  omitsBoundsChecks = !!scalarOmitsBoundsChecks;
   return cursor;
 }
 
 size_t Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
   return SizeOfVectorExcludingThis(funcTypeIds, mallocSizeOf) +
          globals.sizeOfExcludingThis(mallocSizeOf) +
          tables.sizeOfExcludingThis(mallocSizeOf) +
          funcNames.sizeOfExcludingThis(mallocSizeOf) +
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -334,17 +334,16 @@ typedef Vector<ValTypeVector, 0, SystemA
 typedef Vector<ExprType, 0, SystemAllocPolicy> FuncReturnTypesVector;
 
 struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
   FuncTypeWithIdVector funcTypeIds;
   GlobalDescVector globals;
   TableDescVector tables;
   CacheableChars filename;
   CacheableChars sourceMapURL;
-  bool omitsBoundsChecks;
 
   // namePayload points at the name section's CustomSection::payload so that
   // the Names (which are use payload-relative offsets) can be used
   // independently of the Module without duplicating the name section.
   SharedBytes namePayload;
   Maybe<Name> moduleName;
   NameVector funcNames;
 
--- a/js/src/wasm/WasmCompile.cpp
+++ b/js/src/wasm/WasmCompile.cpp
@@ -23,17 +23,16 @@
 
 #include "jit/ProcessExecutableMemory.h"
 #include "util/Text.h"
 #include "wasm/WasmBaselineCompile.h"
 #include "wasm/WasmCraneliftCompile.h"
 #include "wasm/WasmGenerator.h"
 #include "wasm/WasmIonCompile.h"
 #include "wasm/WasmOpIter.h"
-#include "wasm/WasmProcess.h"
 #include "wasm/WasmSignalHandlers.h"
 #include "wasm/WasmValidate.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 uint32_t wasm::ObservedCPUFeatures() {
@@ -137,17 +136,16 @@ SharedCompileArgs CompileArgs::build(JSC
 
   target->baselineEnabled = baseline;
   target->ionEnabled = ion;
   target->craneliftEnabled = cranelift;
   target->debugEnabled = debug;
   target->sharedMemoryEnabled = sharedMemory;
   target->forceTiering = forceTiering;
   target->gcEnabled = gc;
-  target->hugeMemory = wasm::IsHugeMemoryEnabled();
 
   return target;
 }
 
 // Classify the current system as one of a set of recognizable classes.  This
 // really needs to get our tier-1 systems right.
 //
 // TODO: We don't yet have a good measure of how fast a system is.  We
@@ -433,26 +431,24 @@ static bool TieringBeneficial(uint32_t c
 
 CompilerEnvironment::CompilerEnvironment(const CompileArgs& args)
     : state_(InitialWithArgs), args_(&args) {}
 
 CompilerEnvironment::CompilerEnvironment(CompileMode mode, Tier tier,
                                          OptimizedBackend optimizedBackend,
                                          DebugEnabled debugEnabled,
                                          bool refTypesConfigured,
-                                         bool gcTypesConfigured,
-                                         bool hugeMemory)
+                                         bool gcTypesConfigured)
     : state_(InitialWithModeTierDebug),
       mode_(mode),
       tier_(tier),
       optimizedBackend_(optimizedBackend),
       debug_(debugEnabled),
       refTypes_(refTypesConfigured),
-      gcTypes_(gcTypesConfigured),
-      hugeMemory_(hugeMemory) {}
+      gcTypes_(gcTypesConfigured) {}
 
 void CompilerEnvironment::computeParameters(bool gcFeatureOptIn) {
   MOZ_ASSERT(state_ == InitialWithModeTierDebug);
 
   if (gcTypes_) {
     gcTypes_ = gcFeatureOptIn;
   }
   state_ = Computed;
@@ -467,17 +463,16 @@ void CompilerEnvironment::computeParamet
   }
 
   bool gcEnabled = args_->gcEnabled && gcFeatureOptIn;
   bool baselineEnabled = args_->baselineEnabled;
   bool ionEnabled = args_->ionEnabled;
   bool debugEnabled = args_->debugEnabled;
   bool craneliftEnabled = args_->craneliftEnabled;
   bool forceTiering = args_->forceTiering;
-  bool hugeMemory = args_->hugeMemory;
 
   bool hasSecondTier = ionEnabled || craneliftEnabled;
   MOZ_ASSERT_IF(gcEnabled || debugEnabled, baselineEnabled);
   MOZ_ASSERT_IF(forceTiering, baselineEnabled && hasSecondTier);
 
   // HasCompilerSupport() should prevent failure here
   MOZ_RELEASE_ASSERT(baselineEnabled || ionEnabled || craneliftEnabled);
 
@@ -498,17 +493,16 @@ void CompilerEnvironment::computeParamet
   }
 
   optimizedBackend_ =
       craneliftEnabled ? OptimizedBackend::Cranelift : OptimizedBackend::Ion;
 
   debug_ = debugEnabled ? DebugEnabled::True : DebugEnabled::False;
   gcTypes_ = gcEnabled;
   refTypes_ = !craneliftEnabled;
-  hugeMemory_ = hugeMemory;
   state_ = Computed;
 }
 
 template <class DecoderT>
 static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
                                uint32_t funcIndex) {
   uint32_t bodySize;
   if (!d.readVarU32(&bodySize)) {
@@ -605,18 +599,17 @@ void wasm::CompileTier2(const CompileArg
   bool gcTypesConfigured = false;  // No optimized backend support yet
   bool refTypesConfigured = !args.craneliftEnabled;
   OptimizedBackend optimizedBackend = args.craneliftEnabled
                                           ? OptimizedBackend::Cranelift
                                           : OptimizedBackend::Ion;
 
   CompilerEnvironment compilerEnv(CompileMode::Tier2, Tier::Optimized,
                                   optimizedBackend, DebugEnabled::False,
-                                  refTypesConfigured, gcTypesConfigured,
-                                  args.hugeMemory);
+                                  refTypesConfigured, gcTypesConfigured);
 
   ModuleEnvironment env(&compilerEnv, args.sharedMemoryEnabled
                                           ? Shareable::True
                                           : Shareable::False);
   if (!DecodeModuleEnvironment(d, &env)) {
     return;
   }
 
--- a/js/src/wasm/WasmCompile.h
+++ b/js/src/wasm/WasmCompile.h
@@ -52,17 +52,16 @@ struct CompileArgs : ShareableBase<Compi
 
   bool baselineEnabled;
   bool ionEnabled;
   bool craneliftEnabled;
   bool debugEnabled;
   bool sharedMemoryEnabled;
   bool forceTiering;
   bool gcEnabled;
-  bool hugeMemory;
 
   // CompileArgs has two constructors:
   //
   // - one through a factory function `build`, which checks that flags are
   // consistent with each other.
   // - one that gives complete access to underlying fields.
   //
   // You should use the first one in general, unless you have a very good
@@ -74,18 +73,17 @@ struct CompileArgs : ShareableBase<Compi
   explicit CompileArgs(ScriptedCaller&& scriptedCaller)
       : scriptedCaller(std::move(scriptedCaller)),
         baselineEnabled(false),
         ionEnabled(false),
         craneliftEnabled(false),
         debugEnabled(false),
         sharedMemoryEnabled(false),
         forceTiering(false),
-        gcEnabled(false),
-        hugeMemory(false) {}
+        gcEnabled(false) {}
 };
 
 // Return the estimated compiled (machine) code size for the given bytecode size
 // compiled at the given tier.
 
 double EstimateCompiledCodeSize(Tier tier, size_t bytecodeSize);
 
 // Compile the given WebAssembly bytecode with the given arguments into a
--- a/js/src/wasm/WasmCraneliftCompile.cpp
+++ b/js/src/wasm/WasmCraneliftCompile.cpp
@@ -222,30 +222,17 @@ static bool GenerateCraneliftCode(WasmMa
 
 class AutoCranelift {
   CraneliftStaticEnvironment staticEnv_;
   CraneliftModuleEnvironment env_;
   CraneliftCompiler* compiler_;
 
  public:
   explicit AutoCranelift(const ModuleEnvironment& env)
-      : env_(env), compiler_(nullptr) {
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-    if (env.hugeMemoryEnabled()) {
-      // In the huge memory configuration, we always reserve the full 4 GB
-      // index space for a heap.
-      staticEnv_.staticMemoryBound = HugeIndexRange;
-      staticEnv_.memoryGuardSize = HugeOffsetGuardLimit;
-    } else {
-      staticEnv_.memoryGuardSize = OffsetGuardLimit;
-    }
-#endif
-    // Otherwise, heap bounds are stored in the `boundsCheckLimit` field
-    // of TlsData.
-  }
+      : env_(env), compiler_(nullptr) {}
   bool init() {
     compiler_ = cranelift_compiler_create(&staticEnv_, &env_);
     return !!compiler_;
   }
   ~AutoCranelift() {
     if (compiler_) {
       cranelift_compiler_destroy(compiler_);
     }
@@ -255,18 +242,20 @@ class AutoCranelift {
 
 CraneliftFuncCompileInput::CraneliftFuncCompileInput(
     const FuncCompileInput& func)
     : bytecode(func.begin),
       bytecodeSize(func.end - func.begin),
       index(func.index),
       offset_in_module(func.lineOrBytecode) {}
 
+#ifndef WASM_HUGE_MEMORY
 static_assert(offsetof(TlsData, boundsCheckLimit) == sizeof(size_t),
               "fix make_heap() in wasm2clif.rs");
+#endif
 
 CraneliftStaticEnvironment::CraneliftStaticEnvironment()
     :
 #ifdef JS_CODEGEN_X64
       hasSse2(Assembler::HasSSE2()),
       hasSse3(Assembler::HasSSE3()),
       hasSse41(Assembler::HasSSE41()),
       hasSse42(Assembler::HasSSE42()),
@@ -286,18 +275,28 @@ CraneliftStaticEnvironment::CraneliftSta
       hasBmi2(false),
       hasLzcnt(false),
 #endif
 #if defined(XP_WIN)
       platformIsWindows(true),
 #else
       platformIsWindows(false),
 #endif
-      staticMemoryBound(0),
-      memoryGuardSize(0),
+      staticMemoryBound(
+#ifdef WASM_HUGE_MEMORY
+          // In the huge memory configuration, we always reserve the full 4 GB
+          // index space for a heap.
+          IndexRange
+#else
+          // Otherwise, heap bounds are stored in the `boundsCheckLimit` field
+          // of TlsData.
+          0
+#endif
+          ),
+      memoryGuardSize(OffsetGuardLimit),
       instanceTlsOffset(offsetof(TlsData, instance)),
       interruptTlsOffset(offsetof(TlsData, interrupt)),
       cxTlsOffset(offsetof(TlsData, cx)),
       realmCxOffset(JSContext::offsetOfRealm()),
       realmTlsOffset(offsetof(TlsData, realm)),
       realmFuncImportTlsOffset(offsetof(FuncImportTls, realm)) {
 }
 
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -1070,17 +1070,16 @@ SharedMetadata ModuleGenerator::finishMe
   metadata_->minMemoryLength = env_->minMemoryLength;
   metadata_->maxMemoryLength = env_->maxMemoryLength;
   metadata_->startFuncIndex = env_->startFuncIndex;
   metadata_->tables = std::move(env_->tables);
   metadata_->globals = std::move(env_->globals);
   metadata_->nameCustomSectionIndex = env_->nameCustomSectionIndex;
   metadata_->moduleName = env_->moduleName;
   metadata_->funcNames = std::move(env_->funcNames);
-  metadata_->omitsBoundsChecks = env_->hugeMemoryEnabled();
 
   // Copy over additional debug information.
 
   if (env_->debugEnabled()) {
     metadata_->debugEnabled = true;
 
     const size_t numFuncTypes = env_->funcTypes.length();
     if (!metadata_->debugFuncArgTypes.resize(numFuncTypes)) {
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -1218,17 +1218,18 @@ Instance::Instance(JSContext* cx, Handle
   for (auto t : code_->tiers()) {
     MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
   }
 #endif
   MOZ_ASSERT(tables_.length() == metadata().tables.length());
 
   tlsData()->memoryBase =
       memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
-  tlsData()->boundsCheckLimit = memory ? memory->boundsCheckLimit() : 0;
+  tlsData()->boundsCheckLimit =
+      memory ? memory->buffer().wasmBoundsCheckLimit() : 0;
   tlsData()->instance = this;
   tlsData()->realm = realm_;
   tlsData()->cx = cx;
   tlsData()->resetInterrupt(cx);
   tlsData()->jumpTable = code_->tieringJumpTable();
   tlsData()->addressOfNeedsIncrementalBarrier =
       (uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
 
@@ -1889,17 +1890,17 @@ void Instance::ensureProfilingLabels(boo
 }
 
 void Instance::onMovingGrowMemory() {
   MOZ_ASSERT(!isAsmJS());
   MOZ_ASSERT(!memory_->isShared());
 
   ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
   tlsData()->memoryBase = buffer.dataPointer();
-  tlsData()->boundsCheckLimit = memory_->boundsCheckLimit();
+  tlsData()->boundsCheckLimit = buffer.wasmBoundsCheckLimit();
 }
 
 void Instance::onMovingGrowTable(const Table* theTable) {
   MOZ_ASSERT(!isAsmJS());
 
   // `theTable` has grown and we must update cached data for it.  Importantly,
   // we can have cached those data in more than one location: we'll have
   // cached them once for each time the table was imported into this instance.
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -570,19 +570,21 @@ class FunctionCompiler {
                              offsetof(wasm::TlsData, memoryBase),
                              MIRType::Pointer, aliases);
     curBlock_->add(load);
 #endif
     return load;
   }
 
   MWasmLoadTls* maybeLoadBoundsCheckLimit() {
-    if (env_.hugeMemoryEnabled()) {
+#ifdef WASM_HUGE_MEMORY
+    if (!env_.isAsmJS()) {
       return nullptr;
     }
+#endif
     AliasSet aliases = env_.maxMemoryLength.isSome()
                            ? AliasSet::None()
                            : AliasSet::Load(AliasSet::WasmHeapMeta);
     auto load = MWasmLoadTls::New(alloc(), tlsPointer_,
                                   offsetof(wasm::TlsData, boundsCheckLimit),
                                   MIRType::Int32, aliases);
     curBlock_->add(load);
     return load;
@@ -609,42 +611,44 @@ class FunctionCompiler {
     *mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
     return true;
   }
 
   void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
                                         MDefinition** base) {
     MOZ_ASSERT(!inDeadCode());
 
-    uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
-
     // Fold a constant base into the offset (so the base is 0 in which case
     // the codegen is optimized), if it doesn't wrap or trigger an
     // MWasmAddOffset.
     if ((*base)->isConstant()) {
       uint32_t basePtr = (*base)->toConstant()->toInt32();
       uint32_t offset = access->offset();
 
-      if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
+      static_assert(
+          OffsetGuardLimit < UINT32_MAX,
+          "checking for overflow against OffsetGuardLimit is enough.");
+
+      if (offset < OffsetGuardLimit && basePtr < OffsetGuardLimit - offset) {
         auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
         curBlock_->add(ins);
         *base = ins;
         access->setOffset(access->offset() + basePtr);
       }
     }
 
     bool mustAdd = false;
     bool alignmentCheck = needAlignmentCheck(access, *base, &mustAdd);
 
     // If the offset is bigger than the guard region, a separate instruction
     // is necessary to add the offset to the base and check for overflow.
     //
     // Also add the offset if we have a Wasm atomic access that needs
     // alignment checking and the offset affects alignment.
-    if (access->offset() >= offsetGuardLimit || mustAdd ||
+    if (access->offset() >= OffsetGuardLimit || mustAdd ||
         !JitOptions.wasmFoldOffsets) {
       *base = computeEffectiveAddress(*base, access);
     }
 
     if (alignmentCheck) {
       curBlock_->add(MWasmAlignmentCheck::New(
           alloc(), *base, access->byteSize(), bytecodeOffset()));
     }
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -36,17 +36,16 @@
 #include "vm/Interpreter.h"
 #include "vm/StringType.h"
 #include "wasm/WasmBaselineCompile.h"
 #include "wasm/WasmCompile.h"
 #include "wasm/WasmCraneliftCompile.h"
 #include "wasm/WasmInstance.h"
 #include "wasm/WasmIonCompile.h"
 #include "wasm/WasmModule.h"
-#include "wasm/WasmProcess.h"
 #include "wasm/WasmSignalHandlers.h"
 #include "wasm/WasmStubs.h"
 #include "wasm/WasmValidate.h"
 
 #include "vm/ArrayBufferObject-inl.h"
 #include "vm/JSObject-inl.h"
 #include "vm/NativeObject-inl.h"
 
@@ -490,20 +489,16 @@ bool wasm::CompileAndSerialize(const Sha
   }
 
   // The caller has ensured HasCachingSupport(). Moreover, we want to ensure
   // we go straight to tier-2 so that we synchronously call
   // JS::OptimizedEncodingListener::storeOptimizedEncoding().
   compileArgs->baselineEnabled = false;
   compileArgs->ionEnabled = true;
 
-  // The caller must ensure that huge memory support is configured the same in
-  // the receiving process of this serialized module.
-  compileArgs->hugeMemory = wasm::IsHugeMemoryEnabled();
-
   SerializeListener listener(serialized);
 
   UniqueChars error;
   UniqueCharsVector warnings;
   SharedModule module =
       CompileBuffer(*compileArgs, bytecode, &error, &warnings, &listener);
   if (!module) {
     fprintf(stderr, "Compilation error: %s\n", error ? error.get() : "oom");
@@ -1819,41 +1814,24 @@ WasmMemoryObject::InstanceSet* WasmMemor
 
     InitReservedSlot(this, OBSERVERS_SLOT, observers.release(),
                      MemoryUse::WasmMemoryObservers);
   }
 
   return &observers();
 }
 
-bool WasmMemoryObject::isHuge() const {
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-  static_assert(ArrayBufferObject::MaxBufferByteLength < HugeMappedSize,
-                "Non-huge buffer may be confused as huge");
-  return buffer().wasmMappedSize() >= HugeMappedSize;
+bool WasmMemoryObject::movingGrowable() const {
+#ifdef WASM_HUGE_MEMORY
+  return false;
 #else
-  return false;
+  return !buffer().wasmMaxSize();
 #endif
 }
 
-bool WasmMemoryObject::movingGrowable() const {
-  return !isHuge() && !buffer().wasmMaxSize();
-}
-
-uint32_t WasmMemoryObject::boundsCheckLimit() const {
-  if (!buffer().isWasm() || isHuge()) {
-    return buffer().byteLength();
-  }
-  size_t mappedSize = buffer().wasmMappedSize();
-  MOZ_ASSERT(mappedSize <= UINT32_MAX);
-  MOZ_ASSERT(mappedSize >= wasm::GuardSize);
-  MOZ_ASSERT(wasm::IsValidBoundsCheckImmediate(mappedSize - wasm::GuardSize));
-  return mappedSize - wasm::GuardSize;
-}
-
 bool WasmMemoryObject::addMovingGrowObserver(JSContext* cx,
                                              WasmInstanceObject* instance) {
   MOZ_ASSERT(movingGrowable());
 
   InstanceSet* observers = getOrCreateObservers(cx);
   if (!observers) {
     return false;
   }
@@ -1912,33 +1890,38 @@ uint32_t WasmMemoryObject::grow(HandleWa
   newSize += delta;
   newSize *= PageSize;
   if (!newSize.isValid()) {
     return -1;
   }
 
   RootedArrayBufferObject newBuf(cx);
 
-  if (memory->movingGrowable()) {
-    MOZ_ASSERT(!memory->isHuge());
-    if (!ArrayBufferObject::wasmMovingGrowToSize(newSize.value(), oldBuf,
-                                                 &newBuf, cx)) {
+  if (Maybe<uint32_t> maxSize = oldBuf->wasmMaxSize()) {
+    if (newSize.value() > maxSize.value()) {
       return -1;
     }
-  } else {
-    if (Maybe<uint32_t> maxSize = oldBuf->wasmMaxSize()) {
-      if (newSize.value() > maxSize.value()) {
-        return -1;
-      }
-    }
 
     if (!ArrayBufferObject::wasmGrowToSizeInPlace(newSize.value(), oldBuf,
                                                   &newBuf, cx)) {
       return -1;
     }
+  } else {
+#ifdef WASM_HUGE_MEMORY
+    if (!ArrayBufferObject::wasmGrowToSizeInPlace(newSize.value(), oldBuf,
+                                                  &newBuf, cx)) {
+      return -1;
+    }
+#else
+    MOZ_ASSERT(memory->movingGrowable());
+    if (!ArrayBufferObject::wasmMovingGrowToSize(newSize.value(), oldBuf,
+                                                 &newBuf, cx)) {
+      return -1;
+    }
+#endif
   }
 
   memory->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuf));
 
   // Only notify moving-grow-observers after the BUFFER_SLOT has been updated
   // since observers will call buffer().
   if (memory->hasObservers()) {
     for (InstanceSet::Range r = memory->observers().all(); !r.empty();
--- a/js/src/wasm/WasmJS.h
+++ b/js/src/wasm/WasmJS.h
@@ -324,19 +324,17 @@ class WasmMemoryObject : public NativeOb
   ArrayBufferObjectMaybeShared& buffer() const;
 
   // The current length of the memory.  In the case of shared memory, the
   // length can change at any time.  Also note that this will acquire a lock
   // for shared memory, so do not call this from a signal handler.
   uint32_t volatileMemoryLength() const;
 
   bool isShared() const;
-  bool isHuge() const;
   bool movingGrowable() const;
-  uint32_t boundsCheckLimit() const;
 
   // If isShared() is true then obtain the underlying buffer object.
   SharedArrayRawBuffer* sharedArrayRawBuffer() const;
 
   bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
   static uint32_t grow(HandleWasmMemoryObject memory, uint32_t delta,
                        JSContext* cx);
 };
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -340,32 +340,81 @@ bool wasm::GetOptimizedEncodingBuildId(J
   // and cpu-id.
 
   if (!GetBuildId || !GetBuildId(buildId)) {
     return false;
   }
 
   uint32_t cpu = ObservedCPUFeatures();
 
-  if (!buildId->reserve(buildId->length() +
-                        12 /* "()" + 8 nibbles + "m[+-]" */)) {
+  if (!buildId->reserve(buildId->length() + 10 /* "()" + 8 nibbles */)) {
     return false;
   }
 
   buildId->infallibleAppend('(');
   while (cpu) {
     buildId->infallibleAppend('0' + (cpu & 0xf));
     cpu >>= 4;
   }
   buildId->infallibleAppend(')');
 
-  buildId->infallibleAppend('m');
-  buildId->infallibleAppend(wasm::IsHugeMemoryEnabled() ? '+' : '-');
+  return true;
+}
+
+RefPtr<JS::WasmModule> wasm::DeserializeModule(const uint8_t* bytecode,
+                                               size_t bytecodeLength) {
+  // We have to compile new code here so if we're fundamentally unable to
+  // compile, we have to fail. If you change this code, update the
+  // MutableCompileArgs setting below.
+  if (!BaselineCanCompile() && !IonCanCompile()) {
+    return nullptr;
+  }
+
+  MutableBytes bytecodeCopy = js_new<ShareableBytes>();
+  if (!bytecodeCopy ||
+      !bytecodeCopy->bytes.initLengthUninitialized(bytecodeLength)) {
+    return nullptr;
+  }
+
+  memcpy(bytecodeCopy->bytes.begin(), bytecode, bytecodeLength);
+
+  ScriptedCaller scriptedCaller;
+  scriptedCaller.filename = nullptr;
+  scriptedCaller.line = 0;
 
-  return true;
+  MutableCompileArgs args = js_new<CompileArgs>(std::move(scriptedCaller));
+  if (!args) {
+    return nullptr;
+  }
+
+  // The true answer to whether various flags are enabled is provided by
+  // the JSContext that originated the call that caused this deserialization
+  // attempt to happen. We don't have that context here, so we assume that
+  // shared memory is enabled; we will catch a wrong assumption later, during
+  // instantiation.
+  //
+  // (We would prefer to store this value with the Assumptions when
+  // serializing, and for the caller of the deserialization machinery to
+  // provide the value from the originating context.)
+  //
+  // Note this is guarded at the top of this function.
+
+  args->ionEnabled = IonCanCompile();
+  args->baselineEnabled = BaselineCanCompile();
+  args->sharedMemoryEnabled = true;
+
+  UniqueChars error;
+  UniqueCharsVector warnings;
+  SharedModule module = CompileBuffer(*args, *bytecodeCopy, &error, &warnings);
+  if (!module) {
+    return nullptr;
+  }
+
+  // The public interface is effectively const.
+  return RefPtr<JS::WasmModule>(const_cast<Module*>(module.get()));
 }
 
 /* virtual */
 void Module::addSizeOfMisc(MallocSizeOf mallocSizeOf,
                            Metadata::SeenSet* seenMetadata,
                            Code::SeenSet* seenCode, size_t* code,
                            size_t* data) const {
   code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
@@ -792,18 +841,16 @@ bool Module::instantiateMemory(JSContext
     RootedObject proto(
         cx, &cx->global()->getPrototype(JSProto_WasmMemory).toObject());
     memory.set(WasmMemoryObject::create(cx, buffer, proto));
     if (!memory) {
       return false;
     }
   }
 
-  MOZ_RELEASE_ASSERT(memory->isHuge() == metadata().omitsBoundsChecks);
-
   return true;
 }
 
 bool Module::instantiateImportedTable(JSContext* cx, const TableDesc& td,
                                       Handle<WasmTableObject*> tableObj,
                                       WasmTableObjectVector* tableObjs,
                                       SharedTableVector* tables) const {
   MOZ_ASSERT(tableObj);
--- a/js/src/wasm/WasmModule.h
+++ b/js/src/wasm/WasmModule.h
@@ -230,12 +230,15 @@ class Module : public JS::WasmModule {
 
 typedef RefPtr<Module> MutableModule;
 typedef RefPtr<const Module> SharedModule;
 
 // JS API implementations:
 
 MOZ_MUST_USE bool GetOptimizedEncodingBuildId(JS::BuildIdCharVector* buildId);
 
+RefPtr<JS::WasmModule> DeserializeModule(const uint8_t* bytecode,
+                                         size_t bytecodeLength);
+
 }  // namespace wasm
 }  // namespace js
 
 #endif  // wasm_module_h
--- a/js/src/wasm/WasmProcess.cpp
+++ b/js/src/wasm/WasmProcess.cpp
@@ -16,17 +16,16 @@
  * limitations under the License.
  */
 
 #include "wasm/WasmProcess.h"
 
 #include "mozilla/BinarySearch.h"
 #include "mozilla/ScopeExit.h"
 
-#include "threading/ExclusiveData.h"
 #include "vm/MutexIDs.h"
 #include "wasm/cranelift/clifapi.h"
 #include "wasm/WasmBuiltins.h"
 #include "wasm/WasmCode.h"
 #include "wasm/WasmInstance.h"
 
 using namespace js;
 using namespace wasm;
@@ -278,66 +277,19 @@ bool wasm::InCompiledCode(void* pc) {
     return true;
   }
 
   const CodeRange* codeRange;
   uint8_t* codeBase;
   return LookupBuiltinThunk(pc, &codeRange, &codeBase);
 }
 
-/**
- * ReadLockFlag maintains a flag that can be mutated multiple times before it
- * is read, at which point it maintains the same value.
- */
-class ReadLockFlag {
- private:
-  bool enabled_;
-  bool read_;
-
- public:
-  ReadLockFlag() : enabled_(false), read_(false) {}
-
-  bool get() {
-    read_ = true;
-    return enabled_;
-  }
-
-  bool set(bool enabled) {
-    if (read_) {
-      return false;
-    }
-    enabled_ = enabled;
-    return true;
-  }
-};
-
-ExclusiveData<ReadLockFlag> sHugeMemoryEnabled(mutexid::WasmHugeMemoryEnabled);
-
-bool wasm::IsHugeMemoryEnabled() {
-  auto state = sHugeMemoryEnabled.lock();
-  return state->get();
-}
-
-bool wasm::DisableHugeMemory() {
-  auto state = sHugeMemoryEnabled.lock();
-  return state->set(false);
-}
-
 bool wasm::Init() {
   MOZ_RELEASE_ASSERT(!sProcessCodeSegmentMap);
 
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-  {
-    auto state = sHugeMemoryEnabled.lock();
-    if (!state->set(true)) {
-      return false;
-    }
-  }
-#endif
-
 #ifdef ENABLE_WASM_CRANELIFT
   cranelift_initialize();
 #endif
 
   ProcessCodeSegmentMap* map = js_new<ProcessCodeSegmentMap>();
   if (!map) {
     return false;
   }
--- a/js/src/wasm/WasmProcess.h
+++ b/js/src/wasm/WasmProcess.h
@@ -15,17 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #ifndef wasm_process_h
 #define wasm_process_h
 
 #include "mozilla/Atomics.h"
-#include "mozilla/Attributes.h"
 
 namespace js {
 namespace wasm {
 
 class Code;
 class CodeRange;
 class CodeSegment;
 
@@ -49,22 +48,16 @@ extern mozilla::Atomic<bool> CodeExists;
 
 // These methods allow to (un)register CodeSegments so they can be looked up
 // via pc in the methods described above.
 
 bool RegisterCodeSegment(const CodeSegment* cs);
 
 void UnregisterCodeSegment(const CodeSegment* cs);
 
-// Whether this process is configured to use huge memory or not.
-
-bool IsHugeMemoryEnabled();
-
-MOZ_MUST_USE bool DisableHugeMemory();
-
 // Called once before/after the last VM execution which could execute or compile
 // wasm.
 
 bool Init();
 
 void ShutDown();
 
 }  // namespace wasm
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -29,19 +29,25 @@
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::IsPowerOfTwo;
 using mozilla::MakeEnumeratedRange;
 
-// We have only tested huge memory on x64 and arm64.
+// We have only tested x64 with WASM_HUGE_MEMORY.
 
-#if defined(WASM_SUPPORTS_HUGE_MEMORY)
+#if defined(JS_CODEGEN_X64) && !defined(WASM_HUGE_MEMORY)
+#  error "Not an expected configuration"
+#endif
+
+// We have only tested WASM_HUGE_MEMORY on x64 and arm64.
+
+#if defined(WASM_HUGE_MEMORY)
 #  if !(defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64))
 #    error "Not an expected configuration"
 #  endif
 #endif
 
 // More sanity checks.
 
 static_assert(MaxMemoryInitialPages <=
@@ -51,21 +57,16 @@ static_assert(MaxMemoryInitialPages <=
 // All plausible targets must be able to do at least IEEE754 double
 // loads/stores, hence the lower limit of 8.  Some Intel processors support
 // AVX-512 loads/stores, hence the upper limit of 64.
 static_assert(MaxMemoryAccessSize >= 8, "MaxMemoryAccessSize too low");
 static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
 static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize - 1)) == 0,
               "MaxMemoryAccessSize is not a power of two");
 
-#if defined(WASM_SUPPORTS_HUGE_MEMORY)
-static_assert(HugeMappedSize > ArrayBufferObject::MaxBufferByteLength,
-              "Normal array buffer could be confused with huge memory");
-#endif
-
 Val::Val(const LitVal& val) {
   type_ = val.type();
   switch (type_.code()) {
     case ValType::I32:
       u.i32_ = val.i32();
       return;
     case ValType::F32:
       u.f32_ = val.f32();
@@ -585,43 +586,47 @@ uint32_t wasm::RoundUpToNextValidARMImme
     i = (i + 0x00ffffff) & ~0x00ffffff;
   }
 
   MOZ_ASSERT(IsValidARMImmediate(i));
 
   return i;
 }
 
+#ifndef WASM_HUGE_MEMORY
+
 bool wasm::IsValidBoundsCheckImmediate(uint32_t i) {
-#ifdef JS_CODEGEN_ARM
+#  ifdef JS_CODEGEN_ARM
   return IsValidARMImmediate(i);
-#else
+#  else
   return true;
-#endif
+#  endif
 }
 
 size_t wasm::ComputeMappedSize(uint32_t maxSize) {
   MOZ_ASSERT(maxSize % PageSize == 0);
 
   // It is the bounds-check limit, not the mapped size, that gets baked into
   // code. Thus round up the maxSize to the next valid immediate value
   // *before* adding in the guard page.
 
-#ifdef JS_CODEGEN_ARM
+#  ifdef JS_CODEGEN_ARM
   uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
-#else
+#  else
   uint32_t boundsCheckLimit = maxSize;
-#endif
+#  endif
   MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
 
   MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
   MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
   return boundsCheckLimit + GuardSize;
 }
 
+#endif  // WASM_HUGE_MEMORY
+
 /* static */
 DebugFrame* DebugFrame::from(Frame* fp) {
   MOZ_ASSERT(fp->tls->instance->code().metadata().debugEnabled);
   auto* df =
       reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
   MOZ_ASSERT(fp->instance() == df->instance());
   return df;
 }
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -2288,74 +2288,61 @@ static const unsigned PageSize = 64 * 10
 // check limit. If the memory access is unaligned, this means that, even if the
 // bounds check succeeds, a few bytes of the access can extend past the end of
 // memory. To guard against this, extra space is included in the guard region to
 // catch the overflow. MaxMemoryAccessSize is a conservative approximation of
 // the maximum guard space needed to catch all unaligned overflows.
 
 static const unsigned MaxMemoryAccessSize = LitVal::sizeofLargestValue();
 
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-
-// On WASM_SUPPORTS_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
+#ifdef WASM_HUGE_MEMORY
+
+// On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
 // unconditionally allocates a huge region of virtual memory of size
 // wasm::HugeMappedSize. This allows all memory resizing to work without
 // reallocation and provides enough guard space for all offsets to be folded
 // into memory accesses.
 
-static const uint64_t HugeIndexRange = uint64_t(UINT32_MAX) + 1;
-static const uint64_t HugeOffsetGuardLimit = uint64_t(INT32_MAX) + 1;
-static const uint64_t HugeUnalignedGuardPage = PageSize;
+static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
+static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
+static const uint64_t UnalignedGuardPage = PageSize;
 static const uint64_t HugeMappedSize =
-    HugeIndexRange + HugeOffsetGuardLimit + HugeUnalignedGuardPage;
-
-static_assert(MaxMemoryAccessSize <= HugeUnalignedGuardPage,
+    IndexRange + OffsetGuardLimit + UnalignedGuardPage;
+
+static_assert(MaxMemoryAccessSize <= UnalignedGuardPage,
               "rounded up to static page size");
-static_assert(HugeOffsetGuardLimit < UINT32_MAX,
-              "checking for overflow against OffsetGuardLimit is enough.");
-
-#endif
-
-// On !WASM_SUPPORTS_HUGE_MEMORY platforms:
+
+#else  // !WASM_HUGE_MEMORY
+
+// On !WASM_HUGE_MEMORY platforms:
 //  - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
 //    original ArrayBuffer allocation which has no guard region at all.
 //  - For WebAssembly memories, an additional GuardSize is mapped after the
 //    accessible region of the memory to catch folded (base+offset) accesses
 //    where `offset < OffsetGuardLimit` as well as the overflow from unaligned
 //    accesses, as described above for MaxMemoryAccessSize.
 
 static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
 static const size_t GuardSize = PageSize;
 
-static_assert(MaxMemoryAccessSize < GuardSize,
-              "Guard page handles partial out-of-bounds");
-static_assert(OffsetGuardLimit < UINT32_MAX,
-              "checking for overflow against OffsetGuardLimit is enough.");
-
-static constexpr bool GetOffsetGuardLimit(bool hugeMemory) {
-#ifdef WASM_SUPPORTS_HUGE_MEMORY
-  return hugeMemory ? HugeOffsetGuardLimit : OffsetGuardLimit;
-#else
-  return OffsetGuardLimit;
-#endif
-}
-
 // Return whether the given immediate satisfies the constraints of the platform
 // (viz. that, on ARM, IsValidARMImmediate).
 
 extern bool IsValidBoundsCheckImmediate(uint32_t i);
 
 // For a given WebAssembly/asm.js max size, return the number of bytes to
 // map which will necessarily be a multiple of the system page size and greater
 // than maxSize. For a returned mappedSize:
 //   boundsCheckLimit = mappedSize - GuardSize
 //   IsValidBoundsCheckImmediate(boundsCheckLimit)
 
 extern size_t ComputeMappedSize(uint32_t maxSize);
 
+#endif  // WASM_HUGE_MEMORY
+
 // wasm::Frame represents the bytes pushed by the call instruction and the fixed
 // prologue generated by wasm::GenerateCallablePrologue.
 //
 // Across all architectures it is assumed that, before the call instruction, the
 // stack pointer is WasmStackAlignment-aligned. Thus after the prologue, and
 // before the function has made its stack reservation, the stack alignment is
 // sizeof(Frame) % WasmStackAlignment.
 //
--- a/js/src/wasm/WasmValidate.cpp
+++ b/js/src/wasm/WasmValidate.cpp
@@ -2896,21 +2896,20 @@ bool wasm::DecodeModuleTail(Decoder& d, 
 // Validate algorithm.
 
 bool wasm::Validate(JSContext* cx, const ShareableBytes& bytecode,
                     UniqueChars* error) {
   Decoder d(bytecode.bytes, 0, error);
 
   bool gcTypesConfigured = HasGcSupport(cx);
   bool refTypesConfigured = HasReftypesSupport(cx);
-  bool hugeMemory = false;
-
-  CompilerEnvironment compilerEnv(
-      CompileMode::Once, Tier::Optimized, OptimizedBackend::Ion,
-      DebugEnabled::False, refTypesConfigured, gcTypesConfigured, hugeMemory);
+
+  CompilerEnvironment compilerEnv(CompileMode::Once, Tier::Optimized,
+                                  OptimizedBackend::Ion, DebugEnabled::False,
+                                  refTypesConfigured, gcTypesConfigured);
   ModuleEnvironment env(
       &compilerEnv,
       cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()
           ? Shareable::True
           : Shareable::False);
   if (!DecodeModuleEnvironment(d, &env)) {
     return false;
   }
--- a/js/src/wasm/WasmValidate.h
+++ b/js/src/wasm/WasmValidate.h
@@ -66,32 +66,31 @@ struct CompilerEnvironment {
     // Value in the other two states.
     struct {
       CompileMode mode_;
       Tier tier_;
       OptimizedBackend optimizedBackend_;
       DebugEnabled debug_;
       bool refTypes_;
       bool gcTypes_;
-      bool hugeMemory_;
     };
   };
 
  public:
   // Retain a reference to the CompileArgs. A subsequent computeParameters()
   // will compute all parameters from the CompileArgs and additional values.
   explicit CompilerEnvironment(const CompileArgs& args);
 
   // Save the provided values for mode, tier, and debug, and the initial value
   // for gcTypes/refTypes. A subsequent computeParameters() will compute the
   // final value of gcTypes/refTypes.
   CompilerEnvironment(CompileMode mode, Tier tier,
                       OptimizedBackend optimizedBackend,
                       DebugEnabled debugEnabled, bool refTypesConfigured,
-                      bool gcTypesConfigured, bool hugeMemory);
+                      bool gcTypesConfigured);
 
   // Compute any remaining compilation parameters.
   void computeParameters(Decoder& d, bool gcFeatureOptIn);
 
   // Compute any remaining compilation parameters.  Only use this method if
   // the CompilerEnvironment was created with values for mode, tier, and
   // debug.
   void computeParameters(bool gcFeatureOptIn);
@@ -116,20 +115,16 @@ struct CompilerEnvironment {
   bool gcTypes() const {
     MOZ_ASSERT(isComputed());
     return gcTypes_;
   }
   bool refTypes() const {
     MOZ_ASSERT(isComputed());
     return refTypes_;
   }
-  bool hugeMemory() const {
-    MOZ_ASSERT(isComputed());
-    return hugeMemory_;
-  }
 };
 
 // ModuleEnvironment contains all the state necessary to process or render
 // functions, and all of the state necessary to validate all aspects of the
 // functions.
 //
 // A ModuleEnvironment is created by decoding all the sections before the wasm
 // code section and then used immutably during. When compiling a module using a
@@ -210,19 +205,16 @@ struct ModuleEnvironment {
   bool gcTypesEnabled() const { return compilerEnv->gcTypes(); }
   bool refTypesEnabled() const { return compilerEnv->refTypes(); }
   bool usesMemory() const { return memoryUsage != MemoryUsage::None; }
   bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
   bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
   bool debugEnabled() const {
     return compilerEnv->debug() == DebugEnabled::True;
   }
-  bool hugeMemoryEnabled() const {
-    return !isAsmJS() && compilerEnv->hugeMemory();
-  }
   bool funcIsImport(uint32_t funcIndex) const {
     return funcIndex < funcImportGlobalDataOffsets.length();
   }
   bool isRefSubtypeOf(ValType one, ValType two) const {
     MOZ_ASSERT(one.isReference());
     MOZ_ASSERT(two.isReference());
 #if defined(ENABLE_WASM_REFTYPES)
 #  if defined(ENABLE_WASM_GC)
--- a/js/xpconnect/src/XPCJSContext.cpp
+++ b/js/xpconnect/src/XPCJSContext.cpp
@@ -820,19 +820,16 @@ static void LoadStartupJSPrefs(XPCJSCont
       JS_OPTIONS_DOT_STR "spectre.object_mitigations.misc");
   bool spectreStringMitigations =
       Preferences::GetBool(JS_OPTIONS_DOT_STR "spectre.string_mitigations");
   bool spectreValueMasking =
       Preferences::GetBool(JS_OPTIONS_DOT_STR "spectre.value_masking");
   bool spectreJitToCxxCalls =
       Preferences::GetBool(JS_OPTIONS_DOT_STR "spectre.jit_to_C++_calls");
 
-  bool disableWasmHugeMemory =
-      Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_disable_huge_memory");
-
   nsCOMPtr<nsIXULRuntime> xr = do_GetService("@mozilla.org/xre/runtime;1");
   if (xr) {
     bool safeMode = false;
     xr->GetInSafeMode(&safeMode);
     if (safeMode) {
       useBaselineInterp = false;
       useBaselineJit = false;
       useIon = false;
@@ -877,20 +874,16 @@ static void LoadStartupJSPrefs(XPCJSCont
                                 JSJITCOMPILER_SPECTRE_OBJECT_MITIGATIONS_MISC,
                                 spectreObjectMitigationsMisc);
   JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_SPECTRE_STRING_MITIGATIONS,
                                 spectreStringMitigations);
   JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_SPECTRE_VALUE_MASKING,
                                 spectreValueMasking);
   JS_SetGlobalJitCompilerOption(cx, JSJITCOMPILER_SPECTRE_JIT_TO_CXX_CALLS,
                                 spectreJitToCxxCalls);
-  if (disableWasmHugeMemory) {
-    bool disabledHugeMemory = JS::DisableWasmHugeMemory();
-    MOZ_RELEASE_ASSERT(disabledHugeMemory);
-  }
 }
 
 static void ReloadPrefsCallback(const char* pref, XPCJSContext* xpccx) {
   // Note: Prefs that require a restart are handled in LoadStartupJSPrefs above.
 
   JSContext* cx = xpccx->Context();
 
   bool useAsmJS = Preferences::GetBool(JS_OPTIONS_DOT_STR "asmjs");