Bug 1287967 - Baldr: Add current_memory and grow_memory (r=luke,sunfish)
authorDimo <dbounov@mozilla.com>
Mon, 29 Aug 2016 21:30:04 -0500
changeset 311792 f6fddb22a8b58e5f3dcb6264e6765044b2e0d538
parent 311791 396585308c33461063ee9ab01c7063c6e526874b
child 311793 0b638e24de7ef09f9165a56b0f65c8ccef5218bd
push idunknown
push userunknown
push dateunknown
reviewersluke, sunfish
bugs1287967
milestone51.0a1
Bug 1287967 - Baldr: Add current_memory and grow_memory (r=luke,sunfish) MozReview-Commit-ID: 8whwVTSYV9a
CLOBBER
dom/promise/tests/test_webassembly_compile.html
js/src/asmjs/AsmJS.cpp
js/src/asmjs/WasmAST.h
js/src/asmjs/WasmBaselineCompile.cpp
js/src/asmjs/WasmBinaryIterator.cpp
js/src/asmjs/WasmBinaryIterator.h
js/src/asmjs/WasmBinaryToAST.cpp
js/src/asmjs/WasmBinaryToExperimentalText.cpp
js/src/asmjs/WasmBinaryToText.cpp
js/src/asmjs/WasmCode.cpp
js/src/asmjs/WasmCode.h
js/src/asmjs/WasmCompile.cpp
js/src/asmjs/WasmGenerator.h
js/src/asmjs/WasmInstance.cpp
js/src/asmjs/WasmInstance.h
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmJS.cpp
js/src/asmjs/WasmModule.cpp
js/src/asmjs/WasmSignalHandlers.cpp
js/src/asmjs/WasmSignalHandlers.h
js/src/asmjs/WasmTextToBinary.cpp
js/src/asmjs/WasmTypes.cpp
js/src/asmjs/WasmTypes.h
js/src/builtin/TestingFunctions.cpp
js/src/jit-test/tests/asm.js/testHeapAccess.js
js/src/jit-test/tests/asm.js/testJumpRange.js
js/src/jit-test/tests/asm.js/testTimeout-deactivate-reactivate-signals.js
js/src/jit-test/tests/asm.js/testTimeout1-nosignals.js
js/src/jit-test/tests/asm.js/testTimeout2-nosignals.js
js/src/jit-test/tests/asm.js/testTimeout3-nosignals.js
js/src/jit-test/tests/asm.js/testTimeout4-nosignals.js
js/src/jit-test/tests/ion/iloop-nosignaling.js
js/src/jit-test/tests/wasm/basic-grow-memory.js
js/src/jit-test/tests/wasm/basic-memory.js
js/src/jit-test/tests/wasm/import-export.js
js/src/jit-test/tests/wasm/jsapi.js
js/src/jit-test/tests/wasm/signals-enabled.js
js/src/jit-test/tests/wasm/spec/grow-memory.wast
js/src/jit-test/tests/wasm/spec/grow-memory.wast.js
js/src/jit/JitOptions.cpp
js/src/jit/JitOptions.h
js/src/jit/Lowering.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/RegisterSets.h
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
js/src/jit/arm64/vixl/Simulator-vixl.h
js/src/jit/mips32/Simulator-mips32.cpp
js/src/jit/mips32/Simulator-mips32.h
js/src/jit/mips64/Simulator-mips64.cpp
js/src/jit/mips64/Simulator-mips64.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jsapi.cpp
js/src/jsapi.h
js/src/old-configure.in
js/src/vm/ArrayBufferObject-inl.h
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayBufferObject.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/SharedArrayObject.cpp
--- a/CLOBBER
+++ b/CLOBBER
@@ -17,9 +17,9 @@
 #
 # Modifying this file will now automatically clobber the buildbot machines \o/
 #
 
 # Are you updating CLOBBER because you think it's needed for your WebIDL
 # changes to stick? As of bug 928195, this shouldn't be necessary! Please
 # don't change CLOBBER for WebIDL changes any more.
 
-Bug 1294660 - CSS properties regeneration needs a clobber
+Bug 1287967 - Changing js/src/old-configure.in seems to require clobber
--- a/dom/promise/tests/test_webassembly_compile.html
+++ b/dom/promise/tests/test_webassembly_compile.html
@@ -6,33 +6,37 @@
 <head>
   <title>WebAssembly.compile Test</title>
   <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
 </head>
 <body>
 <script>
 const wasmTextToBinary = SpecialPowers.unwrap(SpecialPowers.Cu.getJSTestingFunctions().wasmTextToBinary);
+const wasmIsSupported = SpecialPowers.Cu.getJSTestingFunctions().wasmIsSupported
 const fooModuleCode = wasmTextToBinary(`(module
   (func $foo (result i32) (i32.const 42))
   (export "foo" $foo)
 )`, 'new-format');
 
 function checkFooModule(m) {
   ok(m instanceof WebAssembly.Module, "got a module");
   var i = new WebAssembly.Instance(m);
   ok(i instanceof WebAssembly.Instance, "got an instance");
   ok(i.exports.foo() === 42, "got 42");
 }
 
 function propertiesExist() {
   ok(WebAssembly, "WebAssembly object should exist");
   ok(WebAssembly.compile, "WebAssembly.compile function should exist");
 
-  runTest();
+  if (!wasmIsSupported())
+    SimpleTest.finish();
+  else
+    runTest();
 }
 
 function compileFail() {
   WebAssembly.compile().then(
     () => { ok(false, "should have failed"); runTest() }
   ).catch(
     err => { ok(err instanceof TypeError, "empty compile failed"); runTest() }
   );
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -1767,17 +1767,17 @@ class MOZ_STACK_CLASS ModuleValidator
             if (!scriptedCaller.filename)
                 return false;
         }
 
         CompileArgs args;
         if (!args.initFromContext(cx_, Move(scriptedCaller)))
             return false;
 
-        auto genData = MakeUnique<ModuleGeneratorData>(args.assumptions.usesSignal, ModuleKind::AsmJS);
+        auto genData = MakeUnique<ModuleGeneratorData>(ModuleKind::AsmJS);
         if (!genData ||
             !genData->sigs.resize(MaxSigs) ||
             !genData->funcSigs.resize(MaxFuncs) ||
             !genData->funcImports.resize(MaxImports) ||
             !genData->tables.resize(MaxTables) ||
             !genData->asmJSSigToTableIndex.resize(MaxSigs))
         {
             return false;
@@ -7801,18 +7801,17 @@ CheckBuffer(JSContext* cx, const AsmJSMe
                         metadata.minMemoryLength));
         if (!msg)
             return false;
         return LinkFail(cx, msg.get());
     }
 
     if (buffer->is<ArrayBufferObject>()) {
         Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
-        bool useSignalHandlers = metadata.assumptions.usesSignal.forOOB;
-        if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, useSignalHandlers))
+        if (!ArrayBufferObject::prepareForAsmJS(cx, abheap))
             return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
     }
 
     return true;
 }
 
 static bool
 GetImports(JSContext* cx, const AsmJSMetadata& metadata, HandleValue globalVal,
@@ -8812,44 +8811,29 @@ js::AsmJSFunctionToString(JSContext* cx,
             return nullptr;
         if (!out.append(src))
             return nullptr;
     }
 
     return out.finishString();
 }
 
-/*****************************************************************************/
-// asm.js heap
-
+// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
+// greater or equal to MinHeapLength
 static const size_t MinHeapLength = PageSize;
 
-// From the asm.js spec Linking section:
-//  the heap object's byteLength must be either
-//    2^n for n in [12, 24)
-//  or
-//    2^24 * n for n >= 1.
-
 bool
 js::IsValidAsmJSHeapLength(uint32_t length)
 {
-    bool valid = length >= MinHeapLength &&
-                 (IsPowerOfTwo(length) ||
-                  (length & 0x00ffffff) == 0);
-
-    MOZ_ASSERT_IF(valid, length % PageSize == 0);
-    MOZ_ASSERT_IF(valid, length == RoundUpToNextValidAsmJSHeapLength(length));
-
-    return valid;
+    if (length < MinHeapLength)
+        return false;
+
+    return wasm::IsValidARMLengthImmediate(length);
 }
 
 uint32_t
 js::RoundUpToNextValidAsmJSHeapLength(uint32_t length)
 {
     if (length <= MinHeapLength)
         return MinHeapLength;
 
-    if (length <= 16 * 1024 * 1024)
-        return mozilla::RoundUpPow2(length);
-
-    MOZ_ASSERT(length <= 0xff000000);
-    return (length + 0x00ffffff) & ~0x00ffffff;
-}
+    return wasm::RoundUpToNextValidARMLengthImmediate(length);
+}
--- a/js/src/asmjs/WasmAST.h
+++ b/js/src/asmjs/WasmAST.h
@@ -194,23 +194,23 @@ enum class AstExprKind
     CallIndirect,
     ComparisonOperator,
     Const,
     ConversionOperator,
     GetGlobal,
     GetLocal,
     If,
     Load,
-    Nop,
     Return,
     SetGlobal,
     SetLocal,
     Store,
     TernaryOperator,
     UnaryOperator,
+    NullaryOperator,
     Unreachable
 };
 
 class AstExpr : public AstNode
 {
     const AstExprKind kind_;
 
   protected:
@@ -223,24 +223,16 @@ class AstExpr : public AstNode
 
     template <class T>
     T& as() {
         MOZ_ASSERT(kind() == T::Kind);
         return static_cast<T&>(*this);
     }
 };
 
-struct AstNop : AstExpr
-{
-    static const AstExprKind Kind = AstExprKind::Nop;
-    AstNop()
-      : AstExpr(AstExprKind::Nop)
-    {}
-};
-
 struct AstUnreachable : AstExpr
 {
     static const AstExprKind Kind = AstExprKind::Unreachable;
     AstUnreachable()
       : AstExpr(AstExprKind::Unreachable)
     {}
 };
 
@@ -828,16 +820,30 @@ class AstModule : public AstNode
     bool append(AstGlobal* glob) {
         return globals_.append(glob);
     }
     const AstGlobalVector& globals() const {
         return globals_;
     }
 };
 
+class AstNullaryOperator final : public AstExpr
+{
+    Expr expr_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::NullaryOperator;
+    explicit AstNullaryOperator(Expr expr)
+      : AstExpr(Kind),
+        expr_(expr)
+    {}
+
+    Expr expr() const { return expr_; }
+};
+
 class AstUnaryOperator final : public AstExpr
 {
     Expr expr_;
     AstExpr* op_;
 
   public:
     static const AstExprKind Kind = AstExprKind::UnaryOperator;
     explicit AstUnaryOperator(Expr expr, AstExpr* op)
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -91,16 +91,17 @@
  *   Or something like that.  Wasm makes this simple.  Static
  *   assignments are desirable because they are not flushed to memory
  *   by the pre-block sync() call.)
  */
 
 #include "asmjs/WasmBaselineCompile.h"
 #include "asmjs/WasmBinaryIterator.h"
 #include "asmjs/WasmGenerator.h"
+#include "asmjs/WasmSignalHandlers.h"
 #include "jit/AtomicOp.h"
 #include "jit/IonTypes.h"
 #include "jit/JitAllocPolicy.h"
 #include "jit/Label.h"
 #include "jit/MacroAssembler.h"
 #include "jit/MIR.h"
 #include "jit/Registers.h"
 #include "jit/RegisterSets.h"
@@ -445,16 +446,18 @@ class BaseCompiler
     int32_t                     localSize_;      // Size of local area in bytes (stable after beginFunction)
     int32_t                     varLow_;         // Low byte offset of local area for true locals (not parameters)
     int32_t                     varHigh_;        // High byte offset + 1 of local area for true locals
     int32_t                     maxFramePushed_; // Max value of masm.framePushed() observed
     bool                        deadCode_;       // Flag indicating we should decode & discard the opcode
     ValTypeVector               SigDD_;
     ValTypeVector               SigD_;
     ValTypeVector               SigF_;
+    ValTypeVector               SigI_;
+    ValTypeVector               Sig_;
     Label                       returnLabel_;
     Label                       outOfLinePrologue_;
     Label                       bodyLabel_;
 
     FuncCompileResults&         compileResults_;
     MacroAssembler&             masm;            // No '_' suffix - too tedious...
 
     AllocatableGeneralRegisterSet availGPR_;
@@ -2006,16 +2009,20 @@ class BaseCompiler
                 MOZ_CRASH("BaseCompiler platform hook: startCallArgs");
 #endif
             }
         } else if (stackArgAreaSize > 0) {
             masm.reserveStack(stackArgAreaSize);
         }
     }
 
+    const ABIArg reserveArgument(FunctionCall& call) {
+        return call.abi_.next(MIRType::Pointer);
+    }
+
     // TODO / OPTIMIZE: Note passArg is used only in one place.  I'm
     // not saying we should manually inline it, but we could hoist the
     // dispatch into the caller and have type-specific implementations
     // of passArg: passArgI32(), etc.  Then those might be inlined, at
     // least in PGO builds.
     //
     // The bulk of the work here (60%) is in the next() call, though.
     //
@@ -2175,27 +2182,31 @@ class BaseCompiler
         masm.loadWasmPinnedRegsFromTls();
     }
 
     void builtinCall(SymbolicAddress builtin, const FunctionCall& call)
     {
         callSymbolic(builtin, call);
     }
 
+    void builtinInstanceMethodCall(SymbolicAddress builtin, const ABIArg& instanceArg,
+                                   const FunctionCall& call)
+    {
+        CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
+        masm.wasmCallBuiltinInstanceMethod(instanceArg, builtin);
+    }
+
     //////////////////////////////////////////////////////////////////////
     //
     // Sundry low-level code generators.
 
     void addInterruptCheck()
     {
-        if (mg_.usesSignal.forInterrupt)
-            return;
-
-        // FIXME - implement this.
-        MOZ_CRASH("Only interrupting signal handlers supported");
+        // Always use signals for interrupts with Asm.JS/Wasm
+        MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
     }
 
     void jumpTable(LabelVector& labels) {
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
         for (uint32_t i = 0; i < labels.length(); i++) {
             CodeLabel cl;
             masm.writeCodePointer(cl.patchAt());
             cl.target()->bind(labels[i]->offset());
@@ -2897,52 +2908,38 @@ class BaseCompiler
 
     bool needsBoundsCheckBranch(const MWasmMemoryAccess& access) const {
         // A heap access needs a bounds-check branch if we're not relying on signal
         // handlers to catch errors, and if it's not proven to be within bounds.
         // We use signal-handlers on x64, but on x86 there isn't enough address
         // space for a guard region.  Also, on x64 the atomic loads and stores
         // can't (yet) use the signal handlers.
 
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-        if (mg_.usesSignal.forOOB && !access.isAtomicAccess())
-            return false;
+#ifdef WASM_HUGE_MEMORY
+        return false;
+#else
+        return access.needsBoundsCheck();
 #endif
-
-        return access.needsBoundsCheck();
     }
 
     bool throwOnOutOfBounds(const MWasmMemoryAccess& access) {
-        return access.isAtomicAccess() || !isCompilingAsmJS();
+        return !isCompilingAsmJS();
     }
 
     // For asm.js code only: If we have a non-zero offset, it's possible that
     // |ptr| itself is out of bounds, while adding the offset computes an
     // in-bounds address. To catch this case, we need a second branch, which we
     // emit out of line since it's unlikely to be needed in normal programs.
     // For this, we'll generate an OffsetBoundsCheck OOL stub.
 
     bool needsOffsetBoundsCheck(const MWasmMemoryAccess& access) const {
         return isCompilingAsmJS() && access.offset() != 0;
     }
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-
-# if defined(JS_CODEGEN_X64)
-    // TODO / CLEANUP - copied from CodeGenerator-x64.cpp, should share.
-
-    MemoryAccess
-    WasmMemoryAccess(uint32_t before)
-    {
-        if (isCompilingAsmJS())
-            return MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset);
-        return MemoryAccess(before, MemoryAccess::Throw, MemoryAccess::DontWrapOffset);
-    }
-# endif
-
     class OffsetBoundsCheck : public OutOfLineCode
     {
         Label* maybeOutOfBounds;
         Register ptrReg;
         int32_t offset;
 
       public:
         OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset)
@@ -3141,17 +3138,18 @@ class BaseCompiler
               case Scalar::Uint32:    masm.movl(srcAddr, dest.i32().reg); break;
               case Scalar::Float32:   masm.loadFloat32(srcAddr, dest.f32().reg); break;
               case Scalar::Float64:   masm.loadDouble(srcAddr, dest.f64().reg); break;
               default:
                 MOZ_CRASH("Compiler bug: Unexpected array type");
             }
         }
 
-        masm.append(WasmMemoryAccess(before));
+        if (isCompilingAsmJS())
+            masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
         // TODO: call verifyHeapAccessDisassembly somehow
 # elif defined(JS_CODEGEN_X86)
         Operand srcAddr(ptr.reg, access.offset());
 
         if (dest.tag == AnyReg::I64)
             MOZ_CRASH("Not implemented: I64 support");
 
         bool mustMove = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
@@ -3221,17 +3219,18 @@ class BaseCompiler
           case Scalar::Uint32:       masm.movl(intReg, dstAddr); break;
           case Scalar::Int64:        masm.movq(intReg, dstAddr); break;
           case Scalar::Float32:      masm.storeFloat32(src.f32().reg, dstAddr); break;
           case Scalar::Float64:      masm.storeDouble(src.f64().reg, dstAddr); break;
           default:
             MOZ_CRASH("Compiler bug: Unexpected array type");
         }
 
-        masm.append(WasmMemoryAccess(before));
+        if (isCompilingAsmJS())
+            masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
         // TODO: call verifyHeapAccessDisassembly somehow
 # elif defined(JS_CODEGEN_X86)
         Operand dstAddr(ptr.reg, access.offset());
 
         if (src.tag == AnyReg::I64)
             MOZ_CRASH("Not implemented: I64 support");
 
         bool didMove = false;
@@ -3492,16 +3491,18 @@ class BaseCompiler
     void emitConvertU64ToF32();
     void emitConvertF32ToF64();
     void emitConvertI32ToF64();
     void emitConvertU32ToF64();
     void emitConvertI64ToF64();
     void emitConvertU64ToF64();
     void emitReinterpretI32AsF32();
     void emitReinterpretI64AsF64();
+    MOZ_MUST_USE bool emitGrowMemory(uint32_t callOffset);
+    MOZ_MUST_USE bool emitCurrentMemory(uint32_t callOffset);
 };
 
 void
 BaseCompiler::emitAddI32()
 {
     int32_t c;
     if (popConstI32(c)) {
         RegI32 r = popI32();
@@ -6081,16 +6082,82 @@ BaseCompiler::emitStoreWithCoercion(ValT
     }
     else
         MOZ_CRASH("unexpected coerced store");
 
     return true;
 }
 
 bool
+BaseCompiler::emitGrowMemory(uint32_t callOffset)
+{
+    if (deadCode_)
+        return skipCall(SigI_, ExprType::I32);
+
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
+
+    sync();
+
+    uint32_t numArgs = 1;
+    size_t stackSpace = stackConsumed(numArgs);
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, EscapesSandbox(true), IsBuiltinCall(true));
+
+    ABIArg instanceArg = reserveArgument(baselineCall);
+
+    if (!emitCallArgs(SigI_, baselineCall))
+        return false;
+
+    if (!iter_.readCallReturn(ExprType::I32))
+        return false;
+
+    builtinInstanceMethodCall(SymbolicAddress::GrowMemory, instanceArg, baselineCall);
+
+    endCall(baselineCall);
+
+    popValueStackBy(numArgs);
+    masm.freeStack(stackSpace);
+
+    pushReturned(baselineCall, ExprType::I32);
+
+    return true;
+}
+
+bool
+BaseCompiler::emitCurrentMemory(uint32_t callOffset)
+{
+    if (deadCode_)
+        return skipCall(Sig_, ExprType::I32);
+
+    uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
+
+    sync();
+
+    FunctionCall baselineCall(lineOrBytecode);
+    beginCall(baselineCall, EscapesSandbox(true), IsBuiltinCall(true));
+
+    ABIArg instanceArg = reserveArgument(baselineCall);
+
+    if (!emitCallArgs(Sig_, baselineCall))
+        return false;
+
+    if (!iter_.readCallReturn(ExprType::I32))
+        return false;
+
+    builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, instanceArg, baselineCall);
+
+    endCall(baselineCall);
+
+    pushReturned(baselineCall, ExprType::I32);
+
+    return true;
+}
+
+bool
 BaseCompiler::emitBody()
 {
     uint32_t overhead = 0;
 
     for (;;) {
 
         Nothing unused_a, unused_b;
 
@@ -6139,17 +6206,17 @@ BaseCompiler::emitBody()
         uint32_t exprOffset = iter_.currentOffset();
 
         Expr expr;
         CHECK(iter_.readExpr(&expr));
 
         switch (expr) {
           // Control opcodes
           case Expr::Nop:
-            CHECK(iter_.readNullary());
+            CHECK(iter_.readNullary(ExprType::Void));
             if (!deadCode_)
                 pushVoid();
             NEXT();
           case Expr::Block:
             CHECK_NEXT(emitBlock());
           case Expr::Loop:
             CHECK_NEXT(emitLoop());
           case Expr::If:
@@ -6629,21 +6696,21 @@ BaseCompiler::emitBody()
           // Atomics
           case Expr::I32AtomicsLoad:
           case Expr::I32AtomicsStore:
           case Expr::I32AtomicsBinOp:
           case Expr::I32AtomicsCompareExchange:
           case Expr::I32AtomicsExchange:
             MOZ_CRASH("Unimplemented Atomics");
 
-          // Future opcodes
+          // Memory Related
+          case Expr::GrowMemory:
+            CHECK_NEXT(emitGrowMemory(exprOffset));
           case Expr::CurrentMemory:
-            MOZ_CRASH("Unimplemented CurrentMemory");
-          case Expr::GrowMemory:
-            MOZ_CRASH("Unimplemented GrowMemory");
+            CHECK_NEXT(emitCurrentMemory(exprOffset));
 
           case Expr::Limit:;
         }
 
         MOZ_CRASH("unexpected wasm opcode");
 
 #undef CHECK
 #undef NEXT
@@ -6779,16 +6846,18 @@ bool
 BaseCompiler::init()
 {
     if (!SigDD_.append(ValType::F64) || !SigDD_.append(ValType::F64))
         return false;
     if (!SigD_.append(ValType::F64))
         return false;
     if (!SigF_.append(ValType::F32))
         return false;
+    if (!SigI_.append(ValType::I32))
+        return false;
 
     const ValTypeVector& args = func_.sig().args();
 
     // localInfo_ contains an entry for every local in locals_, followed by
     // entries for special locals. Currently the only special local is the TLS
     // pointer.
     tlsSlot_ = locals_.length();
     if (!localInfo_.resize(locals_.length() + 1))
@@ -6881,20 +6950,21 @@ volatileReturnGPR()
 LiveRegisterSet BaseCompiler::VolatileReturnGPR = volatileReturnGPR();
 
 } // wasm
 } // js
 
 bool
 js::wasm::BaselineCanCompile(const FunctionGenerator* fg)
 {
+    // On all platforms we require signals for AsmJS/Wasm.
+    // If we made it this far we must have signals.
+    MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
+
 #if defined(JS_CODEGEN_X64)
-    if (!fg->usesSignalsForInterrupts())
-        return false;
-
     if (fg->usesAtomics())
         return false;
 
     if (fg->usesSimd())
         return false;
 
     return true;
 #else
--- a/js/src/asmjs/WasmBinaryIterator.cpp
+++ b/js/src/asmjs/WasmBinaryIterator.cpp
@@ -103,16 +103,17 @@ wasm::Classify(Expr expr)
       case Expr::F32x4neg:
       case Expr::F32x4sqrt:
       case Expr::F32x4abs:
       case Expr::F32x4reciprocalApproximation:
       case Expr::F32x4reciprocalSqrtApproximation:
       case Expr::B8x16not:
       case Expr::B16x8not:
       case Expr::B32x4not:
+      case Expr::GrowMemory:
         return ExprKind::Unary;
       case Expr::I32Add:
       case Expr::I32Sub:
       case Expr::I32Mul:
       case Expr::I32DivS:
       case Expr::I32DivU:
       case Expr::I32RemS:
       case Expr::I32RemU:
@@ -466,14 +467,13 @@ wasm::Classify(Expr expr)
       case Expr::F32x4equal:
       case Expr::F32x4notEqual:
       case Expr::F32x4greaterThan:
       case Expr::F32x4greaterThanOrEqual:
       case Expr::F32x4lessThan:
       case Expr::F32x4lessThanOrEqual:
         return ExprKind::SimdComparison;
       case Expr::CurrentMemory:
-      case Expr::GrowMemory:
-        break;
+        return ExprKind::Nullary;
     }
     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
 }
 #endif
--- a/js/src/asmjs/WasmBinaryIterator.h
+++ b/js/src/asmjs/WasmBinaryIterator.h
@@ -449,17 +449,17 @@ class MOZ_STACK_CLASS ExprIter : private
     MOZ_MUST_USE bool readUnary(ValType operandType, Value* input);
     MOZ_MUST_USE bool readConversion(ValType operandType, ValType resultType, Value* input);
     MOZ_MUST_USE bool readBinary(ValType operandType, Value* lhs, Value* rhs);
     MOZ_MUST_USE bool readComparison(ValType operandType, Value* lhs, Value* rhs);
     MOZ_MUST_USE bool readLoad(ValType resultType, uint32_t byteSize,
                                LinearMemoryAddress<Value>* addr);
     MOZ_MUST_USE bool readStore(ValType resultType, uint32_t byteSize,
                                 LinearMemoryAddress<Value>* addr, Value* value);
-    MOZ_MUST_USE bool readNullary();
+    MOZ_MUST_USE bool readNullary(ExprType retType);
     MOZ_MUST_USE bool readSelect(ExprType* type,
                                  Value* trueValue, Value* falseValue, Value* condition);
     MOZ_MUST_USE bool readGetLocal(const ValTypeVector& locals, uint32_t* id);
     MOZ_MUST_USE bool readSetLocal(const ValTypeVector& locals, uint32_t* id, Value* value);
     MOZ_MUST_USE bool readGetGlobal(const GlobalDescVector& globals, uint32_t* id);
     MOZ_MUST_USE bool readSetGlobal(const GlobalDescVector& globals, uint32_t* id, Value* value);
     MOZ_MUST_USE bool readI32Const(int32_t* i32);
     MOZ_MUST_USE bool readI64Const(int64_t* i64);
@@ -1113,21 +1113,21 @@ ExprIter<Policy>::readStore(ValType resu
 
     infalliblePush(TypeAndValue<Value>(ToExprType(resultType), Output ? *value : Value()));
 
     return true;
 }
 
 template <typename Policy>
 inline bool
-ExprIter<Policy>::readNullary()
+ExprIter<Policy>::readNullary(ExprType retType)
 {
     MOZ_ASSERT(Classify(expr_) == ExprKind::Nullary);
 
-    return push(ExprType::Void);
+    return push(retType);
 }
 
 template <typename Policy>
 inline bool
 ExprIter<Policy>::readSelect(ExprType* type, Value* trueValue, Value* falseValue, Value* condition)
 {
     MOZ_ASSERT(Classify(expr_) == ExprKind::Select);
 
--- a/js/src/asmjs/WasmBinaryToAST.cpp
+++ b/js/src/asmjs/WasmBinaryToAST.cpp
@@ -509,16 +509,30 @@ AstDecodeUnary(AstDecodeContext& c, ValT
     if (!unary)
         return false;
 
     c.iter().setResult(AstDecodeStackItem(unary, 1));
     return true;
 }
 
 static bool
+AstDecodeNullary(AstDecodeContext& c, ExprType type, Expr expr)
+{
+    if (!c.iter().readNullary(type))
+        return false;
+
+    AstNullaryOperator* nullary = new(c.lifo) AstNullaryOperator(expr);
+    if (!nullary)
+        return false;
+
+    c.iter().setResult(AstDecodeStackItem(nullary, 0));
+    return true;
+}
+
+static bool
 AstDecodeBinary(AstDecodeContext& c, ValType type, Expr expr)
 {
     AstDecodeStackItem lhs;
     AstDecodeStackItem rhs;
     if (!c.iter().readBinary(type, &lhs, &rhs))
         return false;
 
     AstBinaryOperator* binary = new(c.lifo) AstBinaryOperator(expr, lhs.expr, rhs.expr);
@@ -708,22 +722,18 @@ AstDecodeExpr(AstDecodeContext& c)
     uint32_t exprOffset = c.iter().currentOffset();
     Expr expr;
     if (!c.iter().readExpr(&expr))
         return false;
 
     AstExpr* tmp;
     switch (expr) {
       case Expr::Nop:
-        if (!c.iter().readNullary())
+        if (!AstDecodeNullary(c, ExprType::Void, expr))
             return false;
-        tmp = new(c.lifo) AstNop();
-        if (!tmp)
-            return false;
-        c.iter().setResult(AstDecodeStackItem(tmp));
         break;
       case Expr::Call:
         if (!AstDecodeCall(c))
             return false;
         break;
       case Expr::CallIndirect:
         if (!AstDecodeCallIndirect(c))
             return false;
@@ -795,16 +805,17 @@ AstDecodeExpr(AstDecodeContext& c)
         break;
       case Expr::End:
         if (!AstDecodeEnd(c))
             return false;
         break;
       case Expr::I32Clz:
       case Expr::I32Ctz:
       case Expr::I32Popcnt:
+      case Expr::GrowMemory:
         if (!AstDecodeUnary(c, ValType::I32, expr))
             return false;
         break;
       case Expr::I64Clz:
       case Expr::I64Ctz:
       case Expr::I64Popcnt:
         if (!AstDecodeUnary(c, ValType::I64, expr))
             return false;
@@ -1079,16 +1090,20 @@ AstDecodeExpr(AstDecodeContext& c)
       case Expr::BrTable:
         if (!AstDecodeBrTable(c))
             return false;
         break;
       case Expr::Return:
         if (!AstDecodeReturn(c))
             return false;
         break;
+      case Expr::CurrentMemory:
+        if (!AstDecodeNullary(c, ExprType::I32, expr))
+            return false;
+        break;
       case Expr::Unreachable:
         if (!c.iter().readUnreachable())
             return false;
         tmp = new(c.lifo) AstUnreachable();
         if (!tmp)
             return false;
         c.iter().setResult(AstDecodeStackItem(tmp));
         break;
--- a/js/src/asmjs/WasmBinaryToExperimentalText.cpp
+++ b/js/src/asmjs/WasmBinaryToExperimentalText.cpp
@@ -151,17 +151,18 @@ IsDropValueExpr(AstExpr& expr)
     // the function return type.
     switch (expr.kind()) {
       case AstExprKind::Branch:
         return !expr.as<AstBranch>().maybeValue();
       case AstExprKind::BranchTable:
         return !expr.as<AstBranchTable>().maybeValue();
       case AstExprKind::If:
         return !expr.as<AstIf>().hasElse();
-      case AstExprKind::Nop:
+      case AstExprKind::NullaryOperator:
+        return expr.as<AstNullaryOperator>().expr() == Expr::Nop;
       case AstExprKind::Unreachable:
       case AstExprKind::Return:
         return true;
       default:
         return false;
     }
 }
 
@@ -352,19 +353,27 @@ PrintBlockLevelExpr(WasmPrintContext& c,
     }
     return c.buffer.append('\n');
 }
 
 /*****************************************************************************/
 // binary format parsing and rendering
 
 static bool
-PrintNop(WasmPrintContext& c, AstNop& nop)
+PrintNullaryOperator(WasmPrintContext& c, AstNullaryOperator& op)
 {
-    return c.buffer.append("nop");
+    const char* opStr;
+
+    switch (op.expr()) {
+        case Expr::Nop:             opStr = "nop"; break;
+        case Expr::CurrentMemory:   opStr = "curent_memory"; break;
+        default:  return false;
+    }
+
+    return c.buffer.append(opStr, strlen(opStr));
 }
 
 static bool
 PrintUnreachable(WasmPrintContext& c, AstUnreachable& unreachable)
 {
     return c.buffer.append("unreachable");
 }
 
@@ -636,16 +645,17 @@ PrintUnaryOperator(WasmPrintContext& c, 
       case Expr::F32Sqrt:    opStr = "f32.sqrt"; break;
       case Expr::F32Trunc:   opStr = "f32.trunc"; break;
       case Expr::F32Nearest: opStr = "f32.nearest"; break;
       case Expr::F64Abs:     opStr = "f64.abs"; break;
       case Expr::F64Neg:     opStr = "f64.neg"; prefixStr = "-"; precedence = NegatePrecedence; break;
       case Expr::F64Ceil:    opStr = "f64.ceil"; break;
       case Expr::F64Floor:   opStr = "f64.floor"; break;
       case Expr::F64Sqrt:    opStr = "f64.sqrt"; break;
+      case Expr::GrowMemory: opStr = "grow_memory"; break;
       default: return false;
     }
 
     if (c.f.allowAsciiOperators && prefixStr) {
         if (!c.f.reduceParens || lastPrecedence > precedence) {
             if (!c.buffer.append("("))
                 return false;
         }
@@ -1332,18 +1342,18 @@ PrintExpr(WasmPrintContext& c, AstExpr& 
     if (c.maybeSourceMap) {
         uint32_t lineno = c.buffer.lineno();
         uint32_t column = c.buffer.column();
         if (!c.maybeSourceMap->exprlocs().emplaceBack(lineno, column, expr.offset()))
             return false;
     }
 
     switch (expr.kind()) {
-      case AstExprKind::Nop:
-        return PrintNop(c, expr.as<AstNop>());
+      case AstExprKind::NullaryOperator:
+        return PrintNullaryOperator(c, expr.as<AstNullaryOperator>());
       case AstExprKind::Unreachable:
         return PrintUnreachable(c, expr.as<AstUnreachable>());
       case AstExprKind::Call:
         return PrintCall(c, expr.as<AstCall>());
       case AstExprKind::CallIndirect:
         return PrintCallIndirect(c, expr.as<AstCallIndirect>());
       case AstExprKind::Const:
         return PrintConst(c, expr.as<AstConst>());
--- a/js/src/asmjs/WasmBinaryToText.cpp
+++ b/js/src/asmjs/WasmBinaryToText.cpp
@@ -217,22 +217,16 @@ RenderFullLine(WasmRenderContext& c, Ast
         return false;
     return c.buffer.append('\n');
 }
 
 /*****************************************************************************/
 // binary format parsing and rendering
 
 static bool
-RenderNop(WasmRenderContext& c, AstNop& nop)
-{
-    return c.buffer.append("(nop)");
-}
-
-static bool
 RenderUnreachable(WasmRenderContext& c, AstUnreachable& unreachable)
 {
     return c.buffer.append("(trap)");
 }
 
 static bool
 RenderCallArgs(WasmRenderContext& c, const AstExprVector& args)
 {
@@ -406,16 +400,35 @@ RenderBlock(WasmRenderContext& c, AstBlo
     if (!RenderExprList(c, block.exprs()))
         return false;
     c.indent--;
 
     return c.buffer.append(")");
 }
 
 static bool
+RenderNullaryOperator(WasmRenderContext& c, AstNullaryOperator& op)
+{
+    if (!c.buffer.append("("))
+      return false;
+
+    const char* opStr;
+    switch (op.expr()) {
+      case Expr::Nop:               opStr = "nop"; break;
+      case Expr::CurrentMemory:     opStr = "current_memory"; break;
+      default: return false;
+    }
+
+    if (!c.buffer.append(opStr, strlen(opStr)))
+        return false;
+
+    return c.buffer.append(")");
+}
+
+static bool
 RenderUnaryOperator(WasmRenderContext& c, AstUnaryOperator& op)
 {
     if (!c.buffer.append("("))
       return false;
 
     const char* opStr;
     switch (op.expr()) {
       case Expr::I32Eqz:     opStr = "i32.eqz"; break;
@@ -432,16 +445,17 @@ RenderUnaryOperator(WasmRenderContext& c
       case Expr::F32Sqrt:    opStr = "f32.sqrt"; break;
       case Expr::F32Trunc:   opStr = "f32.trunc"; break;
       case Expr::F32Nearest: opStr = "f32.nearest"; break;
       case Expr::F64Abs:     opStr = "f64.abs"; break;
       case Expr::F64Neg:     opStr = "f64.neg"; break;
       case Expr::F64Ceil:    opStr = "f64.ceil"; break;
       case Expr::F64Floor:   opStr = "f64.floor"; break;
       case Expr::F64Sqrt:    opStr = "f64.sqrt"; break;
+      case Expr::GrowMemory: opStr = "grow_memory"; break;
       default: return false;
     }
     if (!c.buffer.append(opStr, strlen(opStr)))
         return false;
 
     if (!c.buffer.append(" "))
         return false;
 
@@ -950,18 +964,18 @@ RenderReturn(WasmRenderContext& c, AstRe
 
     return c.buffer.append(")");
 }
 
 static bool
 RenderExpr(WasmRenderContext& c, AstExpr& expr)
 {
     switch (expr.kind()) {
-      case AstExprKind::Nop:
-        return RenderNop(c, expr.as<AstNop>());
+      case AstExprKind::NullaryOperator:
+        return RenderNullaryOperator(c, expr.as<AstNullaryOperator>());
       case AstExprKind::Unreachable:
         return RenderUnreachable(c, expr.as<AstUnreachable>());
       case AstExprKind::Call:
         return RenderCall(c, expr.as<AstCall>());
       case AstExprKind::CallIndirect:
         return RenderCallIndirect(c, expr.as<AstCallIndirect>());
       case AstExprKind::Const:
         return RenderConst(c, expr.as<AstConst>());
--- a/js/src/asmjs/WasmCode.cpp
+++ b/js/src/asmjs/WasmCode.cpp
@@ -103,18 +103,24 @@ StaticallyLink(CodeSegment& cs, const Li
 
     *(double*)(cs.globalData() + NaN64GlobalDataOffset) = GenericNaN();
     *(float*)(cs.globalData() + NaN32GlobalDataOffset) = GenericNaN();
 }
 
 static void
 SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryObject memory)
 {
-    for (const BoundsCheck& check : metadata.boundsChecks)
-        Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), memory->buffer().byteLength());
+    if (!metadata.boundsChecks.empty()) {
+        uint32_t length = memory->buffer().wasmBoundsCheckLimit();
+        MOZ_RELEASE_ASSERT(length == LegalizeMapLength(length));
+        MOZ_RELEASE_ASSERT(length >= memory->buffer().wasmActualByteLength());
+
+        for (const BoundsCheck& check : metadata.boundsChecks)
+            Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), length);
+    }
 
 #if defined(JS_CODEGEN_X86)
     uint8_t* base = memory->buffer().dataPointerEither().unwrap();
     for (const MemoryAccess& access : metadata.memoryAccesses) {
         // Patch memory pointer immediate.
         void* addr = access.patchMemoryPtrImmAt(cs.base());
         uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
         MOZ_ASSERT(disp <= INT32_MAX);
@@ -596,17 +602,17 @@ Code::lookupRange(void* pc) const
 
     size_t match;
     if (!BinarySearch(metadata_->codeRanges, lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &metadata_->codeRanges[match];
 }
 
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
+#ifdef WASM_HUGE_MEMORY
 struct MemoryAccessOffset
 {
     const MemoryAccessVector& accesses;
     explicit MemoryAccessOffset(const MemoryAccessVector& accesses) : accesses(accesses) {}
     uintptr_t operator[](size_t index) const {
         return accesses[index].insnOffset();
     }
 };
@@ -621,17 +627,17 @@ Code::lookupMemoryAccess(void* pc) const
     size_t upperBound = metadata_->memoryAccesses.length();
 
     size_t match;
     if (!BinarySearch(MemoryAccessOffset(metadata_->memoryAccesses), lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &metadata_->memoryAccesses[match];
 }
-#endif // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
+#endif
 
 bool
 Code::getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const
 {
     const Bytes* maybeBytecode = maybeBytecode_ ? &maybeBytecode_.get()->bytes : nullptr;
     return metadata_->getFuncName(cx, maybeBytecode, funcIndex, name);
 }
 
--- a/js/src/asmjs/WasmCode.h
+++ b/js/src/asmjs/WasmCode.h
@@ -412,17 +412,17 @@ class MetadataCacheablePod
     static_assert(NO_START_FUNCTION > MaxFuncs, "sentinel value");
 
     uint32_t              startFuncIndex_;
 
   public:
     ModuleKind            kind;
     MemoryUsage           memoryUsage;
     uint32_t              minMemoryLength;
-    uint32_t              maxMemoryLength;
+    Maybe<uint32_t>       maxMemoryLength;
 
     explicit MetadataCacheablePod(ModuleKind kind) {
         mozilla::PodZero(this);
         startFuncIndex_ = NO_START_FUNCTION;
         this->kind = kind;
     }
 
     bool hasStartFunction() const {
@@ -517,17 +517,17 @@ class Code
 
     const CodeSegment& segment() const { return *segment_; }
     const Metadata& metadata() const { return *metadata_; }
 
     // Frame iterator support:
 
     const CallSite* lookupCallSite(void* returnAddress) const;
     const CodeRange* lookupRange(void* pc) const;
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
+#ifdef WASM_HUGE_MEMORY
     const MemoryAccess* lookupMemoryAccess(void* pc) const;
 #endif
 
     // Return the name associated with a given function index, or generate one
     // if none was given by the module.
 
     bool getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const;
     JSAtom* getFuncAtom(JSContext* cx, uint32_t funcIndex) const;
--- a/js/src/asmjs/WasmCompile.cpp
+++ b/js/src/asmjs/WasmCompile.cpp
@@ -19,16 +19,17 @@
 #include "asmjs/WasmCompile.h"
 
 #include "mozilla/CheckedInt.h"
 
 #include "jsprf.h"
 
 #include "asmjs/WasmBinaryIterator.h"
 #include "asmjs/WasmGenerator.h"
+#include "asmjs/WasmSignalHandlers.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::CheckedInt;
 using mozilla::IsNaN;
 
@@ -192,17 +193,17 @@ static bool
 DecodeExpr(FunctionDecoder& f)
 {
     Expr expr;
     if (!f.iter().readExpr(&expr))
         return false;
 
     switch (expr) {
       case Expr::Nop:
-        return f.iter().readNullary();
+        return f.iter().readNullary(ExprType::Void);
       case Expr::Call:
         return DecodeCall(f);
       case Expr::CallIndirect:
         return DecodeCallIndirect(f);
       case Expr::CallImport:
         return DecodeCallImport(f);
       case Expr::I32Const:
         return f.iter().readI32Const(nullptr);
@@ -434,16 +435,22 @@ DecodeExpr(FunctionDecoder& f)
         return f.checkHasMemory() &&
                f.iter().readStore(ValType::I64, 8, nullptr, nullptr);
       case Expr::F32Store:
         return f.checkHasMemory() &&
                f.iter().readStore(ValType::F32, 4, nullptr, nullptr);
       case Expr::F64Store:
         return f.checkHasMemory() &&
                f.iter().readStore(ValType::F64, 8, nullptr, nullptr);
+      case Expr::GrowMemory:
+        return f.checkHasMemory() &&
+               f.iter().readUnary(ValType::I32, nullptr);
+      case Expr::CurrentMemory:
+        return f.checkHasMemory() &&
+               f.iter().readNullary(ExprType::I32);
       case Expr::Br:
         return f.iter().readBr(nullptr, nullptr, nullptr);
       case Expr::BrIf:
         return f.iter().readBrIf(nullptr, nullptr, nullptr, nullptr);
       case Expr::BrTable:
         return DecodeBrTable(f);
       case Expr::Return:
         return f.iter().readReturn(nullptr);
@@ -664,19 +671,17 @@ DecodeResizableMemory(Decoder& d, Module
     init->minMemoryLength = initialBytes.value();
 
     if (resizable.maximum) {
         CheckedInt<uint32_t> maximumBytes = *resizable.maximum;
         maximumBytes *= PageSize;
         if (!maximumBytes.isValid())
             return Fail(d, "maximum memory size too big");
 
-        init->maxMemoryLength = maximumBytes.value();
-    } else {
-        init->maxMemoryLength = UINT32_MAX;
+        init->maxMemoryLength = Some(maximumBytes.value());
     }
 
     return true;
 }
 
 static bool
 DecodeResizableTable(Decoder& d, ModuleGeneratorData* init)
 {
@@ -933,17 +938,17 @@ DecodeMemorySection(Decoder& d, bool new
         uint8_t u8;
         if (!d.readFixedU8(&u8))
             return Fail(d, "expected exported byte");
 
         *exported = u8;
         MOZ_ASSERT(init->memoryUsage == MemoryUsage::None);
         init->memoryUsage = MemoryUsage::Unshared;
         init->minMemoryLength = initialSize.value();
-        init->maxMemoryLength = maxSize.value();
+        init->maxMemoryLength = Some(maxSize.value());
     }
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "memory section byte size mismatch");
 
     return true;
 }
 
@@ -1529,19 +1534,21 @@ CompileArgs::initFromContext(ExclusiveCo
     alwaysBaseline = cx->options().wasmAlwaysBaseline();
     this->scriptedCaller = Move(scriptedCaller);
     return assumptions.initBuildIdFromContext(cx);
 }
 
 SharedModule
 wasm::Compile(const ShareableBytes& bytecode, const CompileArgs& args, UniqueChars* error)
 {
+    MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
+
     bool newFormat = args.assumptions.newFormat;
 
-    auto init = js::MakeUnique<ModuleGeneratorData>(args.assumptions.usesSignal);
+    auto init = js::MakeUnique<ModuleGeneratorData>();
     if (!init)
         return nullptr;
 
     Decoder d(bytecode.begin(), bytecode.end(), error);
 
     if (!DecodePreamble(d))
         return nullptr;
 
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -45,34 +45,31 @@ struct FuncImportGenDesc
     explicit FuncImportGenDesc(const SigWithId* sig) : sig(sig), globalDataOffset(0) {}
 };
 
 typedef Vector<FuncImportGenDesc, 0, SystemAllocPolicy> FuncImportGenDescVector;
 
 struct ModuleGeneratorData
 {
     ModuleKind                kind;
-    SignalUsage               usesSignal;
     MemoryUsage               memoryUsage;
     mozilla::Atomic<uint32_t> minMemoryLength;
-    uint32_t                  maxMemoryLength;
+    Maybe<uint32_t>           maxMemoryLength;
 
     SigWithIdVector           sigs;
     SigWithIdPtrVector        funcSigs;
     FuncImportGenDescVector   funcImports;
     GlobalDescVector          globals;
     TableDescVector           tables;
     Uint32Vector              asmJSSigToTableIndex;
 
-    explicit ModuleGeneratorData(SignalUsage usesSignal, ModuleKind kind = ModuleKind::Wasm)
+    explicit ModuleGeneratorData(ModuleKind kind = ModuleKind::Wasm)
       : kind(kind),
-        usesSignal(usesSignal),
         memoryUsage(MemoryUsage::None),
-        minMemoryLength(0),
-        maxMemoryLength(UINT32_MAX)
+        minMemoryLength(0)
     {}
 
     bool isAsmJS() const {
         return kind == ModuleKind::AsmJS;
     }
 };
 
 typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
@@ -140,17 +137,16 @@ class MOZ_STACK_CLASS ModuleGenerator
   public:
     explicit ModuleGenerator(ImportVector&& imports);
     ~ModuleGenerator();
 
     MOZ_MUST_USE bool init(UniqueModuleGeneratorData shared, const CompileArgs& args,
                            Metadata* maybeAsmJSMetadata = nullptr);
 
     bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
-    SignalUsage usesSignal() const { return metadata_->assumptions.usesSignal; }
     jit::MacroAssembler& masm() { return masm_; }
 
     // Memory:
     bool usesMemory() const { return UsesMemory(shared_->memoryUsage); }
     uint32_t minMemoryLength() const { return shared_->minMemoryLength; }
 
     // Tables:
     uint32_t numTables() const { return numTables_; }
@@ -245,20 +241,16 @@ class MOZ_STACK_CLASS FunctionGenerator
 
     bool usesAtomics() const {
         return usesAtomics_;
     }
     void setUsesAtomics() {
         usesAtomics_ = true;
     }
 
-    bool usesSignalsForInterrupts() const {
-        return m_->usesSignal().forInterrupt;
-    }
-
     Bytes& bytes() {
         return bytes_;
     }
     MOZ_MUST_USE bool addCallSiteLineNum(uint32_t lineno) {
         return callSiteLineNums_.append(lineno);
     }
 };
 
--- a/js/src/asmjs/WasmInstance.cpp
+++ b/js/src/asmjs/WasmInstance.cpp
@@ -275,16 +275,64 @@ Instance::callImport_f64(Instance* insta
     JSContext* cx = instance->cx();
     RootedValue rval(cx);
     if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval))
         return false;
 
     return ToNumber(cx, rval, (double*)argv);
 }
 
+/* static */ uint32_t
+Instance::growMemory_i32(Instance* instance, uint32_t delta)
+{
+    return instance->growMemory(delta);
+}
+
+/* static */ uint32_t
+Instance::currentMemory_i32(Instance* instance)
+{
+    return instance->currentMemory();
+}
+
+uint32_t
+Instance::growMemory(uint32_t delta)
+{
+    MOZ_RELEASE_ASSERT(memory_);
+
+    // Using uint64_t to avoid worrying about overflows in safety comp.
+    uint64_t curNumPages = currentMemory();
+    uint64_t newNumPages = curNumPages + (uint64_t) delta;
+
+    if (metadata().maxMemoryLength) {
+        ArrayBufferObject &buf = memory_->buffer().as<ArrayBufferObject>();
+        // Guaranteed by instantiateMemory
+        MOZ_RELEASE_ASSERT(buf.wasmMaxSize() && buf.wasmMaxSize() <= metadata().maxMemoryLength);
+
+        if (newNumPages * wasm::PageSize > buf.wasmMaxSize().value())
+            return (uint32_t) -1;
+
+        // Try to grow the memory
+        if (!buf.growForWasm(delta))
+            return (uint32_t) -1;
+    } else {
+        return -1; // TODO: implement grow_memory w/o max when we add realloc
+    }
+
+    return curNumPages;
+}
+
+uint32_t
+Instance::currentMemory()
+{
+    MOZ_RELEASE_ASSERT(memory_);
+    uint32_t curMemByteLen = memory_->buffer().wasmActualByteLength();
+    MOZ_ASSERT(curMemByteLen % wasm::PageSize == 0);
+    return curMemByteLen / wasm::PageSize;
+}
+
 Instance::Instance(JSContext* cx,
                    Handle<WasmInstanceObject*> object,
                    UniqueCode code,
                    HandleWasmMemoryObject memory,
                    SharedTableVector&& tables,
                    Handle<FunctionVector> funcImports,
                    const ValVector& globalImports)
   : compartment_(cx->compartment()),
@@ -398,16 +446,38 @@ Instance::~Instance()
 
         for (const SigWithId& sig : metadata().sigIds) {
             if (const void* sigId = *addressOfSigId(sig.id))
                 lockedSigIdSet->deallocateSigId(sig, sigId);
         }
     }
 }
 
+size_t
+Instance::memoryMappedSize() const
+{
+    return memory_->buffer().wasmMappedSize();
+}
+
+bool
+Instance::memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const
+{
+    MOZ_ASSERT(numBytes > 0);
+
+    if (!metadata().usesMemory())
+        return false;
+
+    uint8_t* base = memoryBase().unwrap(/* comparison */);
+    if (addr < base)
+        return false;
+
+    size_t lastByteOffset = addr - base + (numBytes - 1);
+    return lastByteOffset >= memoryLength() && lastByteOffset < memoryMappedSize();
+}
+
 void
 Instance::tracePrivate(JSTracer* trc)
 {
     // This method is only called from WasmInstanceObject so the only reason why
     // TraceEdge is called is so that the pointer can be updated during a moving
     // GC. TraceWeakEdge may sound better, but it is less efficient given that
     // we know object_ is already marked.
     MOZ_ASSERT(!gc::IsAboutToBeFinalized(&object_));
@@ -439,17 +509,17 @@ Instance::memoryBase() const
     MOZ_ASSERT(metadata().usesMemory());
     MOZ_ASSERT(tlsData_.memoryBase == memory_->buffer().dataPointerEither());
     return memory_->buffer().dataPointerEither();
 }
 
 size_t
 Instance::memoryLength() const
 {
-    return memory_->buffer().byteLength();
+    return memory_->buffer().wasmActualByteLength();
 }
 
 template<typename T>
 static JSObject*
 CreateCustomNaNObject(JSContext* cx, T* addr)
 {
     MOZ_ASSERT(IsNaN(*addr));
 
--- a/js/src/asmjs/WasmInstance.h
+++ b/js/src/asmjs/WasmInstance.h
@@ -51,19 +51,23 @@ class Instance
     FuncImportTls& funcImportTls(const FuncImport& fi);
 
     // Import call slow paths which are called directly from wasm code.
     friend void* AddressOf(SymbolicAddress, ExclusiveContext*);
     static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
+    static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
+    static uint32_t currentMemory_i32(Instance* instance);
 
     bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
                     MutableHandleValue rval);
+    uint32_t growMemory(uint32_t delta);
+    uint32_t currentMemory();
 
     // Only WasmInstanceObject can call the private trace function.
     friend class js::WasmInstanceObject;
     void tracePrivate(JSTracer* trc);
 
   public:
     Instance(JSContext* cx,
              HandleWasmInstanceObject object,
@@ -82,16 +86,18 @@ class Instance
     const Code& code() const { return *code_; }
     const CodeSegment& codeSegment() const { return code_->segment(); }
     uint8_t* codeBase() const { return code_->segment().base(); }
     const Metadata& metadata() const { return code_->metadata(); }
     bool isAsmJS() const { return metadata().isAsmJS(); }
     const SharedTableVector& tables() const { return tables_; }
     SharedMem<uint8_t*> memoryBase() const;
     size_t memoryLength() const;
+    size_t memoryMappedSize() const;
+    bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const;
     TlsData& tlsData() { return tlsData_; }
 
     // This method returns a pointer to the GC object that owns this Instance.
     // Instances may be reached via weak edges (e.g., Compartment::instances_)
     // so this perform a read-barrier on the returned object.
 
     WasmInstanceObject* object() const;
 
@@ -102,16 +108,17 @@ class Instance
 
     // Initially, calls to imports in wasm code call out through the generic
     // callImport method. If the imported callee gets JIT compiled and the types
     // match up, callImport will patch the code to instead call through a thunk
     // directly into the JIT code. If the JIT code is released, the Instance must
     // be notified so it can go back to the generic callImport.
 
     void deoptimizeImportExit(uint32_t funcImportIndex);
+    bool memoryAccessWouldFault(uint8_t* addr, unsigned numBytes);
 
     // See Code::ensureProfilingState comment.
 
     MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
 
     // about:memory reporting:
 
     void addSizeOfMisc(MallocSizeOf mallocSizeOf,
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -16,16 +16,17 @@
  * limitations under the License.
  */
 
 #include "asmjs/WasmIonCompile.h"
 
 #include "asmjs/WasmBaselineCompile.h"
 #include "asmjs/WasmBinaryIterator.h"
 #include "asmjs/WasmGenerator.h"
+#include "asmjs/WasmSignalHandlers.h"
 
 #include "jit/CodeGenerator.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::DebugOnly;
@@ -79,16 +80,19 @@ class CallCompileState
     // Set by FunctionCompiler::finishCall(), tells a potentially-inter-module
     // call the offset of the reserved space in which it can save the caller's
     // WasmTlsReg.
     uint32_t tlsStackOffset_;
 
     // Accumulates the register arguments while compiling arguments.
     MWasmCall::Args regArgs_;
 
+    // Reserved argument for passing Instance* to builtin instance method calls.
+    ABIArg instanceArg_;
+
     // Accumulates the stack arguments while compiling arguments. This is only
     // necessary to track when childClobbers_ is true so that the stack offsets
     // can be updated.
     Vector<MAsmJSPassStackArg*, 0, SystemAllocPolicy> stackArgs_;
 
     // Set by child calls (i.e., calls that execute while evaluating a parent's
     // operands) to indicate that the child and parent call cannot reuse the
     // same stack space -- the parent must store its stack arguments below the
@@ -701,18 +705,20 @@ class FunctionCompiler
     // False means we're sure to be out-of-bounds after this bounds check.
     bool maybeAddBoundsCheck(MDefinition* base, const MWasmMemoryAccess& access)
     {
         if (access.offset() > uint32_t(INT32_MAX)) {
             curBlock_->end(MWasmTrap::New(alloc(), Trap::OutOfBounds));
             curBlock_ = nullptr;
             return false;
         }
-        if (!mg().usesSignal.forOOB)
-            curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
+
+#ifndef WASM_HUGE_MEMORY
+        curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
+#endif
         return true;
     }
 
     MDefinition* loadHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access,
                                  bool isInt64 = false)
     {
         if (inDeadCode())
             return nullptr;
@@ -826,31 +832,18 @@ class FunctionCompiler
     {
         if (inDeadCode())
             return;
         curBlock_->add(MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v));
     }
 
     void addInterruptCheck()
     {
-        if (mg_.usesSignal.forInterrupt)
-            return;
-
-        if (inDeadCode())
-            return;
-
-        // WasmHandleExecutionInterrupt takes 0 arguments and the stack is
-        // always ABIStackAlignment-aligned, but don't forget to account for
-        // ShadowStackSpace and any other ABI warts.
-        ABIArgGenerator abi;
-
-        propagateMaxStackArgBytes(abi.stackBytesConsumedSoFar());
-
-        CallSiteDesc callDesc(0, CallSiteDesc::Relative);
-        curBlock_->add(MAsmJSInterruptCheck::New(alloc()));
+        // We rely on signal handlers for interrupts on Asm.JS/Wasm
+        MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
     }
 
     MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)
     {
         if (inDeadCode())
             return nullptr;
 
         MOZ_ASSERT(IsSimdType(base->type()));
@@ -894,16 +887,27 @@ class FunctionCompiler
 
     bool startCall(CallCompileState* call)
     {
         // Always push calls to maintain the invariant that if we're inDeadCode
         // in finishCall, we have something to pop.
         return callStack_.append(call);
     }
 
+    bool passInstance(CallCompileState* args)
+    {
+        if (inDeadCode())
+            return true;
+
+        // Should only pass an instance once.
+        MOZ_ASSERT(args->instanceArg_ == ABIArg());
+        args->instanceArg_ = args->abi_.next(MIRType::Pointer);
+        return true;
+    }
+
     bool passArg(MDefinition* argDef, ValType type, CallCompileState* call)
     {
         if (inDeadCode())
             return true;
 
         ABIArg arg = call->abi_.next(ToMIRType(type));
         switch (arg.kind()) {
 #ifdef JS_CODEGEN_REGISTER_PAIR
@@ -968,16 +972,23 @@ class FunctionCompiler
             call->tlsStackOffset_ = stackBytes;
             stackBytes += sizeof(void*);
         }
 
         if (call->childClobbers_) {
             call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
             for (MAsmJSPassStackArg* stackArg : call->stackArgs_)
                 stackArg->incrementOffset(call->spIncrement_);
+
+            // If instanceArg_ is not initialized then instanceArg_.kind() != ABIArg::Stack
+            if (call->instanceArg_.kind() == ABIArg::Stack) {
+                call->instanceArg_ = ABIArg(call->instanceArg_.offsetFromArgBase() +
+                                            call->spIncrement_);
+            }
+
             stackBytes += call->spIncrement_;
         } else {
             call->spIncrement_ = 0;
             stackBytes = Max(stackBytes, call->maxChildStackBytes_);
         }
 
         propagateMaxStackArgBytes(stackBytes);
         return true;
@@ -1086,16 +1097,36 @@ class FunctionCompiler
         if (!ins)
             return false;
 
         curBlock_->add(ins);
         *def = ins;
         return true;
     }
 
+    bool builtinInstanceMethodCall(SymbolicAddress builtin, const CallCompileState& call,
+                                   ValType ret, MDefinition** def)
+    {
+        if (inDeadCode()) {
+            *def = nullptr;
+            return true;
+        }
+
+        CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
+        auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
+                                                            call.instanceArg_, call.regArgs_,
+                                                            ToMIRType(ret), call.spIncrement_);
+        if (!ins)
+            return false;
+
+        curBlock_->add(ins);
+        *def = ins;
+        return true;
+    }
+
     /*********************************************** Control flow generation */
 
     inline bool inDeadCode() const {
         return curBlock_ == nullptr;
     }
 
     void returnExpr(MDefinition* expr)
     {
@@ -2958,31 +2989,86 @@ EmitSimdOp(FunctionCompiler& f, ValType 
         return EmitSimdBitcast(f, ValType::F32x4, type);
       case SimdOperation::Fn_fromFloat64x2Bits:
         MOZ_CRASH("NYI");
     }
     MOZ_CRASH("unexpected opcode");
 }
 
 static bool
+EmitGrowMemory(FunctionCompiler& f, uint32_t callOffset)
+{
+    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
+
+    CallCompileState args(f, lineOrBytecode);
+    if (!f.startCall(&args))
+        return false;
+
+    if (!f.passInstance(&args))
+        return false;
+
+    MDefinition* delta;
+    if (!f.iter().readUnary(ValType::I32, &delta))
+        return false;
+
+    if (!f.passArg(delta, ValType::I32, &args))
+        return false;
+
+    f.finishCall(&args, PassTls::False, InterModule::False);
+
+    MDefinition* ret;
+    if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
+        return false;
+
+    f.iter().setResult(ret);
+    return true;
+}
+
+static bool
+EmitCurrentMemory(FunctionCompiler& f, uint32_t callOffset)
+{
+    uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
+
+    CallCompileState args(f, lineOrBytecode);
+
+    if (!f.iter().readNullary(ExprType::I32))
+        return false;
+
+    if (!f.startCall(&args))
+        return false;
+
+    if (!f.passInstance(&args))
+        return false;
+
+    f.finishCall(&args, PassTls::False, InterModule::False);
+
+    MDefinition* ret;
+    if (!f.builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, args, ValType::I32, &ret))
+        return false;
+
+    f.iter().setResult(ret);
+    return true;
+}
+
+static bool
 EmitExpr(FunctionCompiler& f)
 {
     if (!f.mirGen().ensureBallast())
         return false;
 
     uint32_t exprOffset = f.iter().currentOffset();
 
     Expr expr;
     if (!f.iter().readExpr(&expr))
         return false;
 
     switch (expr) {
       // Control opcodes
       case Expr::Nop:
-        return f.iter().readNullary();
+        return f.iter().readNullary(ExprType::Void);
       case Expr::Block:
         return EmitBlock(f);
       case Expr::Loop:
         return EmitLoop(f);
       case Expr::If:
         return EmitIf(f);
       case Expr::Else:
         return EmitElse(f);
@@ -3527,21 +3613,21 @@ EmitExpr(FunctionCompiler& f)
       case Expr::I32AtomicsStore:
         return EmitAtomicsStore(f);
       case Expr::I32AtomicsBinOp:
         return EmitAtomicsBinOp(f);
       case Expr::I32AtomicsCompareExchange:
         return EmitAtomicsCompareExchange(f);
       case Expr::I32AtomicsExchange:
         return EmitAtomicsExchange(f);
-
-      // Future opcodes
+      // Memory Operators
+      case Expr::GrowMemory:
+        return EmitGrowMemory(f, exprOffset);
       case Expr::CurrentMemory:
-      case Expr::GrowMemory:
-        MOZ_CRASH("NYI");
+        return EmitCurrentMemory(f, exprOffset);
       case Expr::Limit:;
     }
 
     MOZ_CRASH("unexpected wasm opcode");
 }
 
 bool
 wasm::IonCompileFunction(IonCompileTask* task)
@@ -3564,17 +3650,16 @@ wasm::IonCompileFunction(IonCompileTask*
     // Set up for Ion compilation.
 
     JitContext jitContext(&results.alloc());
     const JitCompileOptions options;
     MIRGraph graph(&results.alloc());
     CompileInfo compileInfo(locals.length());
     MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
                      IonOptimizations.get(OptimizationLevel::AsmJS));
-    mir.initUsesSignalHandlersForAsmJSOOB(task->mg().usesSignal.forOOB);
     mir.initMinAsmJSHeapLength(task->mg().minMemoryLength);
 
     // Build MIR graph
     {
         FunctionCompiler f(task->mg(), d, func, locals, mir, results);
         if (!f.init())
             return false;
 
--- a/js/src/asmjs/WasmJS.cpp
+++ b/js/src/asmjs/WasmJS.cpp
@@ -13,40 +13,47 @@
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/WasmJS.h"
 
+#include "mozilla/Maybe.h"
+
 #include "asmjs/WasmCompile.h"
 #include "asmjs/WasmInstance.h"
 #include "asmjs/WasmModule.h"
+#include "asmjs/WasmSignalHandlers.h"
 #include "builtin/Promise.h"
 #include "jit/JitOptions.h"
 #include "vm/Interpreter.h"
 
 #include "jsobjinlines.h"
 
 #include "vm/NativeObject-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
+using mozilla::Nothing;
 
 bool
 wasm::HasCompilerSupport(ExclusiveContext* cx)
 {
     if (!cx->jitSupportsFloatingPoint())
         return false;
 
     if (!cx->jitSupportsUnalignedAccesses())
         return false;
 
+    if (!wasm::HaveSignalHandlers())
+        return false;
+
 #if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
     return false;
 #else
     return true;
 #endif
 }
 
 static bool
@@ -741,35 +748,60 @@ WasmMemoryObject::construct(JSContext* c
     if (!args.get(0).isObject()) {
         JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_DESC_ARG, "memory");
         return false;
     }
 
     JSAtom* initialAtom = Atomize(cx, "initial", strlen("initial"));
     if (!initialAtom)
         return false;
+    RootedId initialId(cx, AtomToId(initialAtom));
+
+    JSAtom* maximumAtom = Atomize(cx, "maximum", strlen("maximum"));
+    if (!maximumAtom)
+        return false;
+    RootedId maximumId(cx, AtomToId(maximumAtom));
 
     RootedObject obj(cx, &args[0].toObject());
-    RootedId id(cx, AtomToId(initialAtom));
     RootedValue initialVal(cx);
-    if (!GetProperty(cx, obj, obj, id, &initialVal))
+    if (!GetProperty(cx, obj, obj, initialId, &initialVal))
         return false;
 
     double initialDbl;
     if (!ToInteger(cx, initialVal, &initialDbl))
         return false;
 
     if (initialDbl < 0 || initialDbl > INT32_MAX / PageSize) {
         JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_SIZE, "Memory", "initial");
         return false;
     }
 
-    uint32_t bytes = uint32_t(initialDbl) * PageSize;
-    bool signalsForOOB = SignalUsage().forOOB;
-    RootedArrayBufferObject buffer(cx, ArrayBufferObject::createForWasm(cx, bytes, signalsForOOB));
+    Maybe<uint32_t> maxSize;
+
+    bool found;
+    if (HasProperty(cx, obj, maximumId, &found) && found) {
+        RootedValue maxVal(cx);
+        if (!GetProperty(cx, obj, obj, maximumId, &maxVal))
+            return false;
+
+        double maxDbl;
+        if (!ToInteger(cx, maxVal, &maxDbl))
+            return false;
+
+        if (maxDbl < initialDbl || maxDbl > UINT32_MAX / PageSize) {
+            JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_SIZE, "Memory",
+                                 "maximum");
+            return false;
+        }
+
+        maxSize = Some<uint32_t>(uint32_t(maxDbl) * PageSize);
+    }
+
+    uint32_t initialSize = uint32_t(initialDbl) * PageSize;
+    RootedArrayBufferObject buffer(cx, ArrayBufferObject::createForWasm(cx, initialSize, maxSize));
     if (!buffer)
         return false;
 
     RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmMemory).toObject());
     RootedWasmMemoryObject memoryObj(cx, WasmMemoryObject::create(cx, buffer, proto));
     if (!memoryObj)
         return false;
 
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -512,33 +512,52 @@ Module::instantiateMemory(JSContext* cx,
         MOZ_ASSERT(!memory);
         MOZ_ASSERT(dataSegments_.empty());
         return true;
     }
 
     RootedArrayBufferObjectMaybeShared buffer(cx);
     if (memory) {
         buffer = &memory->buffer();
-        uint32_t length = buffer->byteLength();
-        if (length < metadata_->minMemoryLength || length > metadata_->maxMemoryLength) {
+        uint32_t length = buffer->wasmActualByteLength();
+        uint32_t declaredMaxLength = metadata_->maxMemoryLength.valueOr(UINT32_MAX);
+
+        // It's not an error to import a memory whose mapped size is less than
+        // the maxMemoryLength required for the module. This is the same as trying to
+        // map up to maxMemoryLength but actually getting less.
+        if (length < metadata_->minMemoryLength || length > declaredMaxLength) {
             JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory");
             return false;
         }
 
-        // This can't happen except via the shell toggling signals.enabled.
-        if (metadata_->assumptions.usesSignal.forOOB &&
-            !buffer->is<SharedArrayBufferObject>() &&
-            !buffer->as<ArrayBufferObject>().isWasmMapped())
-        {
-            JS_ReportError(cx, "can't access same buffer with and without signals enabled");
-            return false;
+        // For asm.js maxMemoryLength doesn't play a role since we can't grow memory.
+        // For wasm we require that either both memory and module don't specify a max size
+        // OR that the memory's max size is less than the modules.
+        if (!metadata_->isAsmJS()) {
+            Maybe<uint32_t> memMaxSize =
+                buffer->as<ArrayBufferObject>().wasmMaxSize();
+
+            if (metadata_->maxMemoryLength.isSome() != memMaxSize.isSome() ||
+                metadata_->maxMemoryLength < memMaxSize) {
+                JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE,
+                                     "Memory");
+                return false;
+            }
         }
+
+        MOZ_RELEASE_ASSERT(buffer->is<SharedArrayBufferObject>() ||
+                           buffer->as<ArrayBufferObject>().isWasm());
+
+        // We currently assume SharedArrayBuffer => asm.js. Can remove this
+        // once wasmMaxSize/mappedSize/growForWasm have been implemented in SAB
+        MOZ_ASSERT_IF(buffer->is<SharedArrayBufferObject>(), metadata_->isAsmJS());
     } else {
         buffer = ArrayBufferObject::createForWasm(cx, metadata_->minMemoryLength,
-                                                  metadata_->assumptions.usesSignal.forOOB);
+                                                  metadata_->maxMemoryLength);
+
         if (!buffer)
             return false;
 
         RootedObject proto(cx);
         if (metadata_->assumptions.newFormat)
             proto = &cx->global()->getPrototype(JSProto_WasmMemory).toObject();
 
         memory.set(WasmMemoryObject::create(cx, buffer, proto));
--- a/js/src/asmjs/WasmSignalHandlers.cpp
+++ b/js/src/asmjs/WasmSignalHandlers.cpp
@@ -314,17 +314,17 @@ enum { REG_EIP = 14 };
 #if !defined(XP_WIN)
 # define CONTEXT ucontext_t
 #endif
 
 // Define a context type for use in the emulator code. This is usually just
 // the same as CONTEXT, but on Mac we use a different structure since we call
 // into the emulator code from a Mach exception handler rather than a
 // sigaction-style signal handler.
-#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
+#if defined(XP_DARWIN)
 # if defined(JS_CODEGEN_X64)
 struct macos_x64_context {
     x86_thread_state64_t thread;
     x86_float_state64_t float_;
 };
 #  define EMULATOR_CONTEXT macos_x64_context
 # elif defined(JS_CODEGEN_X86)
 struct macos_x86_context {
@@ -362,18 +362,16 @@ ContextToPC(CONTEXT* context)
 {
 #ifdef JS_CODEGEN_NONE
     MOZ_CRASH();
 #else
     return reinterpret_cast<uint8_t**>(&PC_sig(context));
 #endif
 }
 
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-
 #if defined(JS_CODEGEN_X64)
 MOZ_COLD static void
 SetFPRegToNaN(size_t size, void* fp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
     memset(fp_reg, 0, Simd128DataSize);
     switch (size) {
       case 4: *static_cast<float*>(fp_reg) = GenericNaN(); break;
@@ -533,17 +531,16 @@ AddressOfGPRegisterSlot(EMULATOR_CONTEXT
       case X86Encoding::r13: return &context->thread.__r13;
       case X86Encoding::r14: return &context->thread.__r14;
       case X86Encoding::r15: return &context->thread.__r15;
       default: break;
     }
     MOZ_CRASH();
 }
 # endif  // !XP_DARWIN
-#endif // JS_CODEGEN_X64
 
 MOZ_COLD static void
 SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
                               const Disassembler::OtherOperand& value)
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
     else
@@ -604,28 +601,26 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c
     return reinterpret_cast<uint8_t*>(result);
 }
 
 MOZ_COLD static uint8_t*
 EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
                   const MemoryAccess* memoryAccess, const Instance& instance)
 {
     MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
-    MOZ_RELEASE_ASSERT(instance.metadata().assumptions.usesSignal.forOOB);
     MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase()));
 
     // Disassemble the instruction which caused the trap so that we can extract
     // information about it and decide what to do.
     Disassembler::HeapAccess access;
     uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
     const Disassembler::ComplexAddress& address = access.address();
     MOZ_RELEASE_ASSERT(end > pc);
     MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(end));
 
-#if defined(JS_CODEGEN_X64)
     // Check x64 asm.js heap access invariants.
     MOZ_RELEASE_ASSERT(address.disp() >= 0);
     MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
     MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
     MOZ_RELEASE_ASSERT(address.scale() == 0);
     if (address.hasBase()) {
         uintptr_t base;
         StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
@@ -633,31 +628,32 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
         MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == instance.memoryBase());
     }
     if (address.hasIndex()) {
         uintptr_t index;
         StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.index()));
         MOZ_RELEASE_ASSERT(uint32_t(index) == index);
     }
-#endif
 
     // Determine the actual effective address of the faulting access. We can't
     // rely on the faultingAddress given to us by the OS, because we need the
     // address of the start of the access, and the OS may sometimes give us an
     // address somewhere in the middle of the heap access.
     uint8_t* accessAddress = ComputeAccessAddress(context, address);
     MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
                        "Given faulting address does not appear to be within computed "
                        "faulting address range");
     MOZ_RELEASE_ASSERT(accessAddress >= instance.memoryBase(),
                        "Access begins outside the asm.js heap");
-    MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() + MappedSize,
+    MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() +
+                       instance.memoryMappedSize(),
                        "Access extends beyond the asm.js heap guard region");
-    MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() + instance.memoryLength(),
+    MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() +
+                       instance.memoryLength(),
                        "Computed access address is not actually out of bounds");
 
     // Wasm loads/stores don't wrap offsets at all, so hitting the guard page
     // means we are out of bounds in any cases.
     if (!memoryAccess->wrapOffset()) {
         MOZ_ASSERT(memoryAccess->throwOnOOB());
         return instance.codeSegment().outOfBoundsCode();
     }
@@ -737,42 +733,23 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
             MOZ_CRASH("no int64 accesses in asm.js");
           case Disassembler::HeapAccess::Unknown:
             MOZ_CRASH("Failed to disassemble instruction");
         }
     }
 
     return end;
 }
-
-#elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
-
-MOZ_COLD static uint8_t*
-EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
-                  const MemoryAccess* memoryAccess, const Instance& instance)
-{
-    // We forbid ARM instruction sets below ARMv7, so that solves unaligned
-    // integer memory accesses. So the only way to land here is because of a
-    // non-default configured kernel or an unaligned floating-point access.
-    // TODO Handle FPU unaligned accesses on ARM (bug 1283121).
-    return instance.codeSegment().unalignedAccessCode();
-}
-
-#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
-
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
+#endif // JS_CODEGEN_X64
 
 MOZ_COLD static bool
 IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
 {
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-    size_t accessLimit = MappedSize;
-#elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
-    size_t accessLimit = instance.memoryLength();
-#endif
+    size_t accessLimit = instance.memoryMappedSize();
+
     return instance.metadata().usesMemory() &&
            faultingAddress >= instance.memoryBase() &&
            faultingAddress < instance.memoryBase() + accessLimit;
 }
 
 #if defined(XP_WIN)
 
 static bool
@@ -817,25 +794,28 @@ HandleFault(PEXCEPTION_POINTERS exceptio
         // to InterruptRunningCode's use of SuspendThread. When this happens,
         // after ResumeThread, the exception handler is called with pc equal to
         // instance.interrupt, which is logically wrong. The Right Thing would
         // be for the OS to make fault-handling atomic (so that CONTEXT.pc was
         // always the logically-faulting pc). Fortunately, we can detect this
         // case and silence the exception ourselves (the exception will
         // retrigger after the interrupt jumps back to resumePC).
         return pc == instance->codeSegment().interruptCode() &&
-               instance->codeSegment().containsFunctionPC(activation->resumePC()) &&
-               instance->code().lookupMemoryAccess(activation->resumePC());
+               instance->codeSegment().containsFunctionPC(activation->resumePC());
     }
 
+#ifdef WASM_HUGE_MEMORY
     const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
     if (!memoryAccess)
-        return false;
-
-    *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
+        *ppc = instance->codeSegment().outOfBoundsCode();
+    else
+        *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
+#else
+    *ppc = instance->codeSegment().outOfBoundsCode();
+#endif
     return true;
 }
 
 static LONG WINAPI
 AsmJSFaultHandler(LPEXCEPTION_POINTERS exception)
 {
     if (HandleFault(exception))
         return EXCEPTION_CONTINUE_EXECUTION;
@@ -948,21 +928,25 @@ HandleMachException(JSRuntime* rt, const
 
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
 
     // This check isn't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!IsHeapAccessAddress(*instance, faultingAddress))
         return false;
 
+#ifdef WASM_HUGE_MEMORY
     const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
     if (!memoryAccess)
-        return false;
-
-    *ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance);
+        *ppc = instance->codeSegment().outOfBoundsCode();
+    else
+        *ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance);
+#else
+    *ppc = instance->codeSegment().outOfBoundsCode();
+#endif
 
     // Update the thread state with the new pc and register values.
     kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
     if (kret != KERN_SUCCESS)
         return false;
     kret = thread_set_state(rtThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
     if (kret != KERN_SUCCESS)
         return false;
@@ -1163,21 +1147,33 @@ HandleFault(int signum, siginfo_t* info,
 
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
 
     // This check isn't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!IsHeapAccessAddress(*instance, faultingAddress))
         return false;
 
+#ifdef WASM_HUGE_MEMORY
+    MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
     const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
-    if (signal == Signal::SegFault && !memoryAccess)
-        return false;
-
-    *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
+    if (!memoryAccess)
+        *ppc = instance->codeSegment().outOfBoundsCode();
+    else
+        *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
+#elif defined(JS_CODEGEN_ARM)
+    MOZ_RELEASE_ASSERT(signal == Signal::BusError || signal == Signal::SegFault);
+    if (signal == Signal::BusError)
+        *ppc = instance->codeSegment().unalignedAccessCode();
+    else
+        *ppc = instance->codeSegment().outOfBoundsCode();
+#else
+    MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
+    *ppc = instance->codeSegment().outOfBoundsCode();
+#endif
 
     return true;
 }
 
 static struct sigaction sPrevSEGVHandler;
 static struct sigaction sPrevSIGBUSHandler;
 
 template<Signal signal>
@@ -1206,17 +1202,16 @@ AsmJSFaultHandler(int signum, siginfo_t*
     if (previousSignal->sa_flags & SA_SIGINFO)
         previousSignal->sa_sigaction(signum, info, context);
     else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN)
         sigaction(signum, previousSignal, nullptr);
     else
         previousSignal->sa_handler(signum);
 }
 # endif // XP_WIN || XP_DARWIN || assume unix
-#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
 
 static void
 RedirectIonBackedgesToInterruptCheck(JSRuntime* rt)
 {
     if (jit::JitRuntime* jitRuntime = rt->jitRuntime()) {
         // If the backedge list is being mutated, the pc must be in C++ code and
         // thus not in a JIT iloop. We assume that the interrupt flag will be
         // checked at least once before entering JIT code (if not, no big deal;
@@ -1271,25 +1266,26 @@ JitInterruptHandler(int signum, siginfo_
 {
     if (JSRuntime* rt = RuntimeForCurrentThread()) {
         RedirectJitCodeToInterruptCheck(rt, (CONTEXT*)context);
         rt->finishHandlingJitInterrupt();
     }
 }
 #endif
 
+static bool sTriedInstallSignalHandlers = false;
+static bool sHaveSignalHandlers = false;
+
 static bool
 ProcessHasSignalHandlers()
 {
     // We assume that there are no races creating the first JSRuntime of the process.
-    static bool sTried = false;
-    static bool sResult = false;
-    if (sTried)
-        return sResult;
-    sTried = true;
+    if (sTriedInstallSignalHandlers)
+        return sHaveSignalHandlers;
+    sTriedInstallSignalHandlers = true;
 
     // Developers might want to forcibly disable signals to avoid seeing
     // spurious SIGSEGVs in the debugger.
     if (getenv("JS_DISABLE_SLOW_SCRIPT_SIGNALS") || getenv("JS_NO_SIGNALS"))
         return false;
 
 #if defined(ANDROID)
     // Before Android 4.4 (SDK version 19), there is a bug
@@ -1328,83 +1324,73 @@ ProcessHasSignalHandlers()
     // doing to avoid problematic interference.
     if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
         (prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
     {
         MOZ_CRASH("contention for interrupt signal");
     }
 #endif // defined(XP_WIN)
 
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
     // Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
     // access and/or unaligned accesses.
 # if defined(XP_WIN)
     if (!AddVectoredExceptionHandler(/* FirstHandler = */ true, AsmJSFaultHandler))
         return false;
 # elif defined(XP_DARWIN)
     // OSX handles seg faults via the Mach exception handler above, so don't
     // install AsmJSFaultHandler.
 # else
     // SA_NODEFER allows us to reenter the signal handler if we crash while
     // handling the signal, and fall through to the Breakpad handler by testing
     // handlingSegFault.
 
-#  if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
+    // Allow handling OOB with signals on all architectures
     struct sigaction faultHandler;
     faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
     faultHandler.sa_sigaction = &AsmJSFaultHandler<Signal::SegFault>;
     sigemptyset(&faultHandler.sa_mask);
     if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
         MOZ_CRASH("unable to install segv handler");
-#  elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
+
+#  if defined(JS_CODEGEN_ARM)
+    // On Arm Handle Unaligned Accesses
     struct sigaction busHandler;
     busHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
     busHandler.sa_sigaction = &AsmJSFaultHandler<Signal::BusError>;
     sigemptyset(&busHandler.sa_mask);
     if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler))
         MOZ_CRASH("unable to install sigbus handler");
 #  endif
 # endif
-#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
 
-    sResult = true;
+    sHaveSignalHandlers = true;
     return true;
 }
 
 bool
 wasm::EnsureSignalHandlers(JSRuntime* rt)
 {
     // Nothing to do if the platform doesn't support it.
     if (!ProcessHasSignalHandlers())
         return true;
 
-#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
+#if defined(XP_DARWIN)
     // On OSX, each JSRuntime gets its own handler thread.
     if (!rt->wasmMachExceptionHandler.installed() && !rt->wasmMachExceptionHandler.install(rt))
         return false;
 #endif
 
     return true;
 }
 
-static bool sHandlersSuppressedForTesting = false;
-
 bool
 wasm::HaveSignalHandlers()
 {
-    if (!ProcessHasSignalHandlers())
-        return false;
-
-    return !sHandlersSuppressedForTesting;
-}
-
-void
-wasm::SuppressSignalHandlersForTesting(bool suppress)
-{
-    sHandlersSuppressedForTesting = suppress;
+    MOZ_ASSERT(sTriedInstallSignalHandlers);
+    return sHaveSignalHandlers;
 }
 
 // JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
 // C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
 // checked at every Baseline and Ion JIT function prologue). The remaining
 // sources of potential iloops (Ion loop backedges and all wasm code) are
 // handled by this function:
 //  1. Ion loop backedges are patched to instead point to a stub that handles
--- a/js/src/asmjs/WasmSignalHandlers.h
+++ b/js/src/asmjs/WasmSignalHandlers.h
@@ -16,17 +16,17 @@
  * limitations under the License.
  */
 
 #ifndef wasm_signal_handlers_h
 #define wasm_signal_handlers_h
 
 #include "mozilla/Attributes.h"
 
-#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
+#if defined(XP_DARWIN)
 # include <mach/mach.h>
 #endif
 #include "threading/Thread.h"
 
 struct JSRuntime;
 
 namespace js {
 
@@ -37,27 +37,22 @@ InterruptRunningJitCode(JSRuntime* rt);
 namespace wasm {
 
 // Ensure the given JSRuntime is set up to use signals. Failure to enable signal
 // handlers indicates some catastrophic failure and creation of the runtime must
 // fail.
 MOZ_MUST_USE bool
 EnsureSignalHandlers(JSRuntime* rt);
 
-// Return whether signals can be used in this process for interrupts or, ifdef
-// ASMJS_MAY_USE_SIGNAL_HANDLERS, asm.js/wasm out-of-bounds. This value can
-// change over time solely due to DisableSignalHandlersForTesting.
+// Return whether signals can be used in this process for interrupts or
+// asm.js/wasm out-of-bounds.
 bool
 HaveSignalHandlers();
 
-// Artificially suppress signal handler support, for testing purposes.
-void
-SuppressSignalHandlersForTesting(bool suppress);
-
-#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
+#if defined(XP_DARWIN)
 // On OSX we are forced to use the lower-level Mach exception mechanism instead
 // of Unix signals. Mach exceptions are not handled on the victim's stack but
 // rather require an extra thread. For simplicity, we create one such thread
 // per JSRuntime (upon the first use of asm.js in the JSRuntime). This thread
 // and related resources are owned by AsmJSMachExceptionHandler which is owned
 // by JSRuntime.
 class MachExceptionHandler
 {
--- a/js/src/asmjs/WasmTextToBinary.cpp
+++ b/js/src/asmjs/WasmTextToBinary.cpp
@@ -95,17 +95,16 @@ class WasmToken
         Index,
         Memory,
         NegativeZero,
         Load,
         Local,
         Loop,
         Module,
         Name,
-        Nop,
         Offset,
         OpenParen,
         Param,
         Resizable,
         Result,
         Return,
         Segment,
         SetGlobal,
@@ -113,16 +112,17 @@ class WasmToken
         SignedInteger,
         Start,
         Store,
         Table,
         TernaryOpcode,
         Text,
         Then,
         Type,
+        NullaryOpcode,
         UnaryOpcode,
         Unreachable,
         UnsignedInteger,
         ValueType
     };
   private:
     Kind kind_;
     const char16_t* begin_;
@@ -190,17 +190,17 @@ class WasmToken
     explicit WasmToken(Kind kind, Expr expr, const char16_t* begin, const char16_t* end)
       : kind_(kind),
         begin_(begin),
         end_(end)
     {
         MOZ_ASSERT(begin != end);
         MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
                    kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
-                   kind_ == Load || kind_ == Store);
+                   kind_ == Load || kind_ == Store || kind_ == NullaryOpcode);
         u.expr_ = expr;
     }
     explicit WasmToken(const char16_t* begin)
       : kind_(Error),
         begin_(begin),
         end_(begin)
     {}
     Kind kind() const {
@@ -240,17 +240,17 @@ class WasmToken
     }
     ValType valueType() const {
         MOZ_ASSERT(kind_ == ValueType || kind_ == Const);
         return u.valueType_;
     }
     Expr expr() const {
         MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
                    kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
-                   kind_ == Load || kind_ == Store);
+                   kind_ == Load || kind_ == Store || kind_ == NullaryOpcode);
         return u.expr_;
     }
 };
 
 } // end anonymous namespace
 
 static bool
 IsWasmNewLine(char16_t c)
@@ -752,16 +752,18 @@ WasmTokenStream::next()
       case 'c':
         if (consume(u"call")) {
             if (consume(u"_indirect"))
                 return WasmToken(WasmToken::CallIndirect, begin, cur_);
             if (consume(u"_import"))
                 return WasmToken(WasmToken::CallImport, begin, cur_);
             return WasmToken(WasmToken::Call, begin, cur_);
         }
+        if (consume(u"current_memory"))
+            return WasmToken(WasmToken::NullaryOpcode, Expr::CurrentMemory, begin, cur_);
         break;
 
       case 'd':
         if (consume(u"data"))
             return WasmToken(WasmToken::Data, begin, cur_);
         break;
 
       case 'e':
@@ -983,16 +985,18 @@ WasmTokenStream::next()
 
       case 'g':
         if (consume(u"get_global"))
             return WasmToken(WasmToken::GetGlobal, begin, cur_);
         if (consume(u"get_local"))
             return WasmToken(WasmToken::GetLocal, begin, cur_);
         if (consume(u"global"))
             return WasmToken(WasmToken::Global, begin, cur_);
+        if (consume(u"grow_memory"))
+            return WasmToken(WasmToken::UnaryOpcode, Expr::GrowMemory, begin, cur_);
         break;
 
       case 'i':
         if (consume(u"i32")) {
             if (!consume(u"."))
                 return WasmToken(WasmToken::ValueType, ValType::I32, begin, cur_);
 
             switch (*cur_) {
@@ -1297,17 +1301,17 @@ WasmTokenStream::next()
         if (consume(u"memory"))
             return WasmToken(WasmToken::Memory, begin, cur_);
         break;
 
       case 'n':
         if (consume(u"nan"))
             return nan(begin);
         if (consume(u"nop"))
-            return WasmToken(WasmToken::Nop, begin, cur_);
+            return WasmToken(WasmToken::NullaryOpcode, Expr::Nop, begin, cur_);
         break;
 
       case 'o':
         if (consume(u"offset"))
             return WasmToken(WasmToken::Offset, begin, cur_);
         break;
 
       case 'p':
@@ -1909,16 +1913,22 @@ ParseUnaryOperator(WasmParseContext& c, 
 {
     AstExpr* op = ParseExpr(c);
     if (!op)
         return nullptr;
 
     return new(c.lifo) AstUnaryOperator(expr, op);
 }
 
+static AstNullaryOperator*
+ParseNullaryOperator(WasmParseContext& c, Expr expr)
+{
+    return new(c.lifo) AstNullaryOperator(expr);
+}
+
 static AstBinaryOperator*
 ParseBinaryOperator(WasmParseContext& c, Expr expr)
 {
     AstExpr* lhs = ParseExpr(c);
     if (!lhs)
         return nullptr;
 
     AstExpr* rhs = ParseExpr(c);
@@ -2178,18 +2188,16 @@ ParseBranchTable(WasmParseContext& c, Wa
 }
 
 static AstExpr*
 ParseExprInsideParens(WasmParseContext& c)
 {
     WasmToken token = c.ts.get();
 
     switch (token.kind()) {
-      case WasmToken::Nop:
-        return new(c.lifo) AstNop;
       case WasmToken::Unreachable:
         return new(c.lifo) AstUnreachable;
       case WasmToken::BinaryOpcode:
         return ParseBinaryOperator(c, token.expr());
       case WasmToken::Block:
         return ParseBlock(c, Expr::Block);
       case WasmToken::Br:
         return ParseBranch(c, Expr::Br);
@@ -2226,16 +2234,18 @@ ParseExprInsideParens(WasmParseContext& 
       case WasmToken::SetLocal:
         return ParseSetLocal(c);
       case WasmToken::Store:
         return ParseStore(c, token.expr());
       case WasmToken::TernaryOpcode:
         return ParseTernaryOperator(c, token.expr());
       case WasmToken::UnaryOpcode:
         return ParseUnaryOperator(c, token.expr());
+      case WasmToken::NullaryOpcode:
+        return ParseNullaryOperator(c, token.expr());
       default:
         c.ts.generateError(token, c.error);
         return nullptr;
     }
 }
 
 static bool
 ParseValueTypeList(WasmParseContext& c, AstValTypeVector* vec)
@@ -3112,17 +3122,17 @@ ResolveBranchTable(Resolver& r, AstBranc
 
     return ResolveExpr(r, bt.index());
 }
 
 static bool
 ResolveExpr(Resolver& r, AstExpr& expr)
 {
     switch (expr.kind()) {
-      case AstExprKind::Nop:
+      case AstExprKind::NullaryOperator:
       case AstExprKind::Unreachable:
         return true;
       case AstExprKind::BinaryOperator:
         return ResolveBinaryOperator(r, expr.as<AstBinaryOperator>());
       case AstExprKind::Block:
         return ResolveBlock(r, expr.as<AstBlock>());
       case AstExprKind::Branch:
         return ResolveBranch(r, expr.as<AstBranch>());
@@ -3439,16 +3449,22 @@ EncodeSetGlobal(Encoder& e, AstSetGlobal
 static bool
 EncodeUnaryOperator(Encoder& e, AstUnaryOperator& b)
 {
     return EncodeExpr(e, *b.op()) &&
            e.writeExpr(b.expr());
 }
 
 static bool
+EncodeNullaryOperator(Encoder& e, AstNullaryOperator& b)
+{
+    return e.writeExpr(b.expr());
+}
+
+static bool
 EncodeBinaryOperator(Encoder& e, AstBinaryOperator& b)
 {
     return EncodeExpr(e, *b.lhs()) &&
            EncodeExpr(e, *b.rhs()) &&
            e.writeExpr(b.expr());
 }
 
 static bool
@@ -3575,18 +3591,16 @@ EncodeBranchTable(Encoder& e, AstBranchT
 
     return true;
 }
 
 static bool
 EncodeExpr(Encoder& e, AstExpr& expr)
 {
     switch (expr.kind()) {
-      case AstExprKind::Nop:
-        return e.writeExpr(Expr::Nop);
       case AstExprKind::Unreachable:
         return e.writeExpr(Expr::Unreachable);
       case AstExprKind::BinaryOperator:
         return EncodeBinaryOperator(e, expr.as<AstBinaryOperator>());
       case AstExprKind::Block:
         return EncodeBlock(e, expr.as<AstBlock>());
       case AstExprKind::Branch:
         return EncodeBranch(e, expr.as<AstBranch>());
@@ -3617,16 +3631,18 @@ EncodeExpr(Encoder& e, AstExpr& expr)
       case AstExprKind::Store:
         return EncodeStore(e, expr.as<AstStore>());
       case AstExprKind::BranchTable:
         return EncodeBranchTable(e, expr.as<AstBranchTable>());
       case AstExprKind::TernaryOperator:
         return EncodeTernaryOperator(e, expr.as<AstTernaryOperator>());
       case AstExprKind::UnaryOperator:
         return EncodeUnaryOperator(e, expr.as<AstUnaryOperator>());
+      case AstExprKind::NullaryOperator:
+        return EncodeNullaryOperator(e, expr.as<AstNullaryOperator>());
     }
     MOZ_CRASH("Bad expr kind");
 }
 
 /*****************************************************************************/
 // wasm AST binary serialization
 
 static bool
--- a/js/src/asmjs/WasmTypes.cpp
+++ b/js/src/asmjs/WasmTypes.cpp
@@ -346,44 +346,27 @@ wasm::AddressOf(SymbolicAddress imm, Exc
       case SymbolicAddress::ExpD:
         return FuncCast<double (double)>(fdlibm::exp, Args_Double_Double);
       case SymbolicAddress::LogD:
         return FuncCast<double (double)>(fdlibm::log, Args_Double_Double);
       case SymbolicAddress::PowD:
         return FuncCast(ecmaPow, Args_Double_DoubleDouble);
       case SymbolicAddress::ATan2D:
         return FuncCast(ecmaAtan2, Args_Double_DoubleDouble);
+      case SymbolicAddress::GrowMemory:
+        return FuncCast<uint32_t (Instance*, uint32_t)>(Instance::growMemory_i32, Args_General2);
+      case SymbolicAddress::CurrentMemory:
+        return FuncCast<uint32_t (Instance*)>(Instance::currentMemory_i32, Args_General1);
       case SymbolicAddress::Limit:
         break;
     }
 
     MOZ_CRASH("Bad SymbolicAddress");
 }
 
-SignalUsage::SignalUsage()
-  :
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
-    // Signal-handling is only used to eliminate bounds checks when the OS page
-    // size is an even divisor of the WebAssembly page size.
-    forOOB(HaveSignalHandlers() &&
-           gc::SystemPageSize() <= PageSize &&
-           PageSize % gc::SystemPageSize() == 0 &&
-           !JitOptions.wasmExplicitBoundsChecks),
-#else
-    forOOB(false),
-#endif
-    forInterrupt(HaveSignalHandlers())
-{}
-
-bool
-SignalUsage::operator==(SignalUsage rhs) const
-{
-    return forOOB == rhs.forOOB && forInterrupt == rhs.forInterrupt;
-}
-
 static uint32_t
 GetCPUID()
 {
     enum Arch {
         X86 = 0x1,
         X64 = 0x2,
         ARM = 0x3,
         MIPS = 0x4,
@@ -560,25 +543,23 @@ SigWithId::deserialize(const uint8_t* cu
 
 size_t
 SigWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return Sig::sizeOfExcludingThis(mallocSizeOf);
 }
 
 Assumptions::Assumptions(JS::BuildIdCharVector&& buildId)
-  : usesSignal(),
-    cpuId(GetCPUID()),
+  : cpuId(GetCPUID()),
     buildId(Move(buildId)),
     newFormat(false)
 {}
 
 Assumptions::Assumptions()
-  : usesSignal(),
-    cpuId(GetCPUID()),
+  : cpuId(GetCPUID()),
     buildId(),
     newFormat(false)
 {}
 
 bool
 Assumptions::initBuildIdFromContext(ExclusiveContext* cx)
 {
     if (!cx->buildIdOp() || !cx->buildIdOp()(&buildId)) {
@@ -586,58 +567,107 @@ Assumptions::initBuildIdFromContext(Excl
         return false;
     }
     return true;
 }
 
 bool
 Assumptions::clone(const Assumptions& other)
 {
-    usesSignal = other.usesSignal;
     cpuId = other.cpuId;
     newFormat = other.newFormat;
     return buildId.appendAll(other.buildId);
 }
 
 bool
 Assumptions::operator==(const Assumptions& rhs) const
 {
-    return usesSignal == rhs.usesSignal &&
-           cpuId == rhs.cpuId &&
+    return cpuId == rhs.cpuId &&
            buildId.length() == rhs.buildId.length() &&
            PodEqual(buildId.begin(), rhs.buildId.begin(), buildId.length()) &&
            newFormat == rhs.newFormat;
 }
 
 size_t
 Assumptions::serializedSize() const
 {
-    return sizeof(usesSignal) +
-           sizeof(uint32_t) +
+    return sizeof(uint32_t) +
            SerializedPodVectorSize(buildId) +
            sizeof(bool);
 }
 
 uint8_t*
 Assumptions::serialize(uint8_t* cursor) const
 {
-    cursor = WriteBytes(cursor, &usesSignal, sizeof(usesSignal));
     cursor = WriteScalar<uint32_t>(cursor, cpuId);
     cursor = SerializePodVector(cursor, buildId);
     cursor = WriteScalar<bool>(cursor, newFormat);
     return cursor;
 }
 
 const uint8_t*
 Assumptions::deserialize(const uint8_t* cursor)
 {
-    (cursor = ReadBytes(cursor, &usesSignal, sizeof(usesSignal))) &&
     (cursor = ReadScalar<uint32_t>(cursor, &cpuId)) &&
     (cursor = DeserializePodVector(cursor, &buildId)) &&
     (cursor = ReadScalar<bool>(cursor, &newFormat));
     return cursor;
 }
 
 size_t
 Assumptions::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return buildId.sizeOfExcludingThis(mallocSizeOf);
 }
+
+//  Heap length on ARM should fit in an ARM immediate. We approximate the set
+//  of valid ARM immediates with the predicate:
+//    2^n for n in [16, 24)
+//  or
+//    2^24 * n for n >= 1.
+bool
+wasm::IsValidARMLengthImmediate(uint32_t length)
+{
+    bool valid = (IsPowerOfTwo(length) ||
+                  (length & 0x00ffffff) == 0);
+
+    MOZ_ASSERT_IF(valid, length % PageSize == 0);
+
+    return valid;
+}
+
+uint32_t
+wasm::RoundUpToNextValidARMLengthImmediate(uint32_t length)
+{
+    MOZ_ASSERT(length <= 0xff000000);
+
+    if (length <= 16 * 1024 * 1024)
+        length = length ? mozilla::RoundUpPow2(length) : 0;
+    else
+        length = (length + 0x00ffffff) & ~0x00ffffff;
+
+    MOZ_ASSERT(IsValidARMLengthImmediate(length));
+
+    return length;
+}
+
+size_t
+wasm::LegalizeMapLength(size_t requestedSize)
+{
+#ifdef WASM_HUGE_MEMORY
+    // On 64-bit platforms just give us a 4G guard region
+    return wasm::MappedSize;
+#else
+    uint32_t res = requestedSize;
+
+    // On 32-bit platforms clamp down to 1GB
+    uint32_t MaxMappedSize = (1 << 30);
+    res = Min(res, MaxMappedSize);
+
+# ifdef JS_CODEGEN_ARM
+    // On Arm round so that it fits in a single instruction
+    res = RoundUpToNextValidARMLengthImmediate(res);
+    MOZ_RELEASE_ASSERT(res <= MaxMappedSize);
+# endif
+
+    return res;
+#endif
+}
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -805,19 +805,19 @@ class BoundsCheck
     void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
 
   private:
     uint32_t cmpOffset_; // absolute offset of the comparison
 };
 
 // Summarizes a heap access made by wasm code that needs to be patched later
 // and/or looked up by the wasm signal handlers. Different architectures need
-// to know different things (x64: offset and length, ARM: where to patch in
-// heap length, x86: where to patch in heap length and base).
-
+// to know different things (x64: intruction offset, wrapping and failure
+// behavior, ARM: nothing, x86: offset of end of instruction (heap length to
+// patch is last 4 bytes of instruction)).
 #if defined(JS_CODEGEN_X86)
 class MemoryAccess
 {
     uint32_t nextInsOffset_;
 
   public:
     MemoryAccess() = default;
 
@@ -863,17 +863,17 @@ class MemoryAccess
     bool throwOnOOB() const { return throwOnOOB_; }
     bool wrapOffset() const { return wrapOffset_; }
 
     void offsetBy(uint32_t offset) { insnOffset_ += offset; }
 };
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
       defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
       defined(JS_CODEGEN_NONE)
-// Nothing! We just want bounds checks on these platforms.
+// Nothing! We don't patch or emulate memory accesses on these platforms.
 class MemoryAccess {
   public:
     void offsetBy(uint32_t) { MOZ_CRASH(); }
     uint32_t insnOffset() const { MOZ_CRASH(); }
 };
 #endif
 
 WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
@@ -933,16 +933,18 @@ enum class SymbolicAddress
     DivI64,
     UDivI64,
     ModI64,
     UModI64,
     TruncateDoubleToInt64,
     TruncateDoubleToUint64,
     Uint64ToFloatingPoint,
     Int64ToFloatingPoint,
+    GrowMemory,
+    CurrentMemory,
     Limit
 };
 
 void*
 AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
 
 // A wasm::Trap is a reason for why we reached a trap in executed code. Each
 // different trap is mapped to a different error message.
@@ -990,38 +992,22 @@ enum class JumpTarget
     // Non-traps
     StackOverflow,
     Throw,
     Limit
 };
 
 typedef EnumeratedArray<JumpTarget, JumpTarget::Limit, Uint32Vector> JumpSiteArray;
 
-// The SignalUsage struct captures global parameters that affect all wasm code
-// generation. It also currently is the single source of truth for whether or
-// not to use signal handlers for different purposes.
-
-struct SignalUsage
-{
-    // NB: these fields are serialized as a POD in Assumptions.
-    bool forOOB;
-    bool forInterrupt;
-
-    SignalUsage();
-    bool operator==(SignalUsage rhs) const;
-    bool operator!=(SignalUsage rhs) const { return !(*this == rhs); }
-};
-
 // Assumptions captures ambient state that must be the same when compiling and
 // deserializing a module for the compiled code to be valid. If it's not, then
 // the module must be recompiled from scratch.
 
 struct Assumptions
 {
-    SignalUsage           usesSignal;
     uint32_t              cpuId;
     JS::BuildIdCharVector buildId;
     bool                  newFormat;
 
     explicit Assumptions(JS::BuildIdCharVector&& buildId);
 
     // If Assumptions is constructed without arguments, initBuildIdFromContext()
     // must be called to complete initialization.
@@ -1068,17 +1054,19 @@ struct TableDesc
 WASM_DECLARE_POD_VECTOR(TableDesc, TableDescVector)
 
 // CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
 // This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
 
 class CalleeDesc
 {
   public:
-    enum Which { Internal, Import, WasmTable, AsmJSTable, Builtin };
+    // Unlike Builtin, BuilinInstanceMethod expects an implicit Instance*
+    // as its first argument. (e.g. see Instance::growMemory)
+    enum Which { Internal, Import, WasmTable, AsmJSTable, Builtin, BuiltinInstanceMethod };
 
   private:
     Which which_;
     union U {
         U() {}
         uint32_t internalFuncIndex_;
         struct {
             uint32_t globalDataOffset_;
@@ -1118,16 +1106,22 @@ class CalleeDesc
         return c;
     }
     static CalleeDesc builtin(SymbolicAddress callee) {
         CalleeDesc c;
         c.which_ = Builtin;
         c.u.builtin_ = callee;
         return c;
     }
+    static CalleeDesc builtinInstanceMethod(SymbolicAddress callee) {
+        CalleeDesc c;
+        c.which_ = BuiltinInstanceMethod;
+        c.u.builtin_ = callee;
+        return c;
+    }
     Which which() const {
         return which_;
     }
     uint32_t internalFuncIndex() const {
         MOZ_ASSERT(which_ == Internal);
         return u.internalFuncIndex_;
     }
     uint32_t importGlobalDataOffset() const {
@@ -1149,17 +1143,17 @@ class CalleeDesc
         MOZ_ASSERT(which_ == WasmTable);
         return u.table.desc_.external;
     }
     SigIdDesc wasmTableSigId() const {
         MOZ_ASSERT(which_ == WasmTable);
         return u.table.sigId_;
     }
     SymbolicAddress builtin() const {
-        MOZ_ASSERT(which_ == Builtin);
+        MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
         return u.builtin_;
     }
 };
 
 // ExportArg holds the unboxed operands to the wasm entry trampoline which can
 // be called through an ExportFuncPtr.
 
 struct ExportArg
@@ -1247,21 +1241,26 @@ struct ExternalTableElem
 };
 
 // Constants:
 
 // The WebAssembly spec hard-codes the virtual page size to be 64KiB and
 // requires linear memory to always be a multiple of 64KiB.
 static const unsigned PageSize = 64 * 1024;
 
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
+#ifdef JS_CODEGEN_X64
+#define WASM_HUGE_MEMORY
 static const uint64_t Uint32Range = uint64_t(UINT32_MAX) + 1;
 static const uint64_t MappedSize = 2 * Uint32Range + PageSize;
 #endif
 
+bool IsValidARMLengthImmediate(uint32_t length);
+uint32_t RoundUpToNextValidARMLengthImmediate(uint32_t length);
+size_t LegalizeMapLength(size_t requestedSize);
+
 static const unsigned NaN64GlobalDataOffset       = 0;
 static const unsigned NaN32GlobalDataOffset       = NaN64GlobalDataOffset + sizeof(double);
 static const unsigned InitialGlobalDataBytes      = NaN32GlobalDataOffset + sizeof(float);
 
 static const unsigned MaxSigs                     =        4 * 1024;
 static const unsigned MaxFuncs                    =      512 * 1024;
 static const unsigned MaxGlobals                  =        4 * 1024;
 static const unsigned MaxLocals                   =       64 * 1024;
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -509,38 +509,16 @@ static bool
 WasmIsSupported(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     args.rval().setBoolean(wasm::HasCompilerSupport(cx) && cx->options().wasm());
     return true;
 }
 
 static bool
-WasmUsesSignalForOOB(JSContext* cx, unsigned argc, Value* vp)
-{
-    CallArgs args = CallArgsFromVp(argc, vp);
-    args.rval().setBoolean(wasm::SignalUsage().forOOB);
-    return true;
-}
-
-static bool
-SuppressSignalHandlers(JSContext* cx, unsigned argc, Value* vp)
-{
-    CallArgs args = CallArgsFromVp(argc, vp);
-
-    if (!args.requireAtLeast(cx, "suppressSignalHandlers", 1))
-        return false;
-
-    wasm::SuppressSignalHandlersForTesting(ToBoolean(args[0]));
-
-    args.rval().setUndefined();
-    return true;
-}
-
-static bool
 WasmTextToBinary(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     RootedObject callee(cx, &args.callee());
 
     if (!args.requireAtLeast(cx, "wasmTextToBinary", 1))
         return false;
 
@@ -3830,25 +3808,16 @@ gc::ZealModeHelpText),
 "isAsmJSFunction(fn)",
 "  Returns whether the given value is a nested function in an asm.js module that has been\n"
 "  both compile- and link-time validated."),
 
     JS_FN_HELP("wasmIsSupported", WasmIsSupported, 0, 0,
 "wasmIsSupported()",
 "  Returns a boolean indicating whether WebAssembly is supported on the current device."),
 
-    JS_FN_HELP("wasmUsesSignalForOOB", WasmUsesSignalForOOB, 0, 0,
-"wasmUsesSignalForOOB()",
-"  Return whether wasm and asm.js use a signal handler for detecting out-of-bounds."),
-
-    JS_FN_HELP("suppressSignalHandlers", SuppressSignalHandlers, 1, 0,
-"suppressSignalHandlers(suppress)",
-"  This function allows artificially suppressing signal handler support, even if the underlying "
-"  platform supports it."),
-
     JS_FN_HELP("wasmTextToBinary", WasmTextToBinary, 1, 0,
 "wasmTextToBinary(str)",
 "  Translates the given text wasm module into its binary encoding."),
 
     JS_FN_HELP("wasmBinaryToText", WasmBinaryToText, 1, 0,
 "wasmBinaryToText(bin)",
 "  Translates binary encoding to text format"),
 
--- a/js/src/jit-test/tests/asm.js/testHeapAccess.js
+++ b/js/src/jit-test/tests/asm.js/testHeapAccess.js
@@ -20,34 +20,27 @@ setCachingEnabled(true);
 
 var code = asmCompile('glob', 'imp', 'b', USE_ASM + HEAP_IMPORTS + 'function f(i) {i=i|0; i32[0] = i; return i8[0]|0}; return f');
 var f = asmLink(code, this, null, new ArrayBuffer(BUF_MIN));
 assertEq(f(0),0);
 assertEq(f(0x7f),0x7f);
 assertEq(f(0xff),-1);
 assertEq(f(0x100),0);
 
-// Test signal handlers deactivation
-if (wasmUsesSignalForOOB()) {
-    suppressSignalHandlers(true);
-    assertEq(wasmUsesSignalForOOB(), false);
+{
+      var buf = new ArrayBuffer(BUF_MIN);
+      var code = asmCompile('glob', 'imp', 'b', USE_ASM + HEAP_IMPORTS + '/* not a clone */ function f(i) {i=i|0; i32[0] = i; return i8[0]|0}; return f');
+      var f = asmLink(code, this, null, buf);
+      assertEq(f(0),0);
+      assertEq(f(0x7f),0x7f);
+      assertEq(f(0xff),-1);
+      assertEq(f(0x100),0);
 
-    var buf = new ArrayBuffer(BUF_MIN);
-    var code = asmCompile('glob', 'imp', 'b', USE_ASM + HEAP_IMPORTS + '/* not a clone */ function f(i) {i=i|0; i32[0] = i; return i8[0]|0}; return f');
-    var f = asmLink(code, this, null, buf);
-    assertEq(f(0),0);
-    assertEq(f(0x7f),0x7f);
-    assertEq(f(0xff),-1);
-    assertEq(f(0x100),0);
-
-    // Bug 1088655
-    assertEq(asmLink(asmCompile('stdlib', 'foreign', 'heap', USE_ASM + 'var i32=new stdlib.Int32Array(heap); function f(i) {i=i|0;var j=0x10000;return (i32[j>>2] = i)|0 } return f'), this, null, buf)(1), 1);
-
-    suppressSignalHandlers(false);
-    assertEq(wasmUsesSignalForOOB(), true);
+      // Bug 1088655
+      assertEq(asmLink(asmCompile('stdlib', 'foreign', 'heap', USE_ASM + 'var i32=new stdlib.Int32Array(heap); function f(i) {i=i|0;var j=0x10000;return (i32[j>>2] = i)|0 } return f'), this, null, buf)(1), 1);
 }
 
 setCachingEnabled(false);
 
 var code = asmCompile('glob', 'imp', 'b', USE_ASM + HEAP_IMPORTS + 'function f(i) {i=i|0; i32[0] = i; return u8[0]|0}; return f');
 var f = asmLink(code, this, null, new ArrayBuffer(BUF_MIN));
 assertEq(f(0),0);
 assertEq(f(0x7f),0x7f);
--- a/js/src/jit-test/tests/asm.js/testJumpRange.js
+++ b/js/src/jit-test/tests/asm.js/testJumpRange.js
@@ -2,50 +2,45 @@ load(libdir + "asm.js");
 load(libdir + "asserts.js");
 
 var fatFunc = USE_ASM + '\n';
 for (var i = 0; i < 100; i++)
     fatFunc += "function f" + i + "() { return ((f" + (i+1) + "()|0)+1)|0 }\n";
 fatFunc += "function f100() { return 42 }\n";
 fatFunc += "return f0";
 
-for (var signals = 0; signals <= 1; signals++) {
-    suppressSignalHandlers(Boolean(signals));
-
-    for (let threshold of [0, 50, 100, 5000, -1]) {
-        setJitCompilerOption("jump-threshold", threshold);
+for (let threshold of [0, 50, 100, 5000, -1]) {
+    setJitCompilerOption("jump-threshold", threshold);
 
-        assertEq(asmCompile(
-            USE_ASM + `
-                function h() { return ((g()|0)+2)|0 }
-                function g() { return ((f()|0)+1)|0 }
-                function f() { return 42 }
-                return h
-            `)()(), 45);
+    assertEq(asmCompile(
+        USE_ASM + `
+            function h() { return ((g()|0)+2)|0 }
+            function g() { return ((f()|0)+1)|0 }
+            function f() { return 42 }
+            return h
+        `)()(), 45);
 
-        if (isSimdAvailable() && this.SIMD) {
-            var buf = new ArrayBuffer(BUF_MIN);
-            new Int32Array(buf)[0] = 10;
-            new Float32Array(buf)[1] = 42;
-            assertEq(asmCompile('stdlib', 'ffis', 'buf',
-                USE_ASM + `
-                    var H = new stdlib.Uint8Array(buf);
-                    var i4 = stdlib.SIMD.Int32x4;
-                    var f4 = stdlib.SIMD.Float32x4;
-                    var i4load = i4.load;
-                    var f4load = f4.load;
-                    var toi4 = i4.fromFloat32x4;
-                    var i4ext = i4.extractLane;
-                    function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
-                    function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
-                    function h(i) { i=i|0; return g(i)|0 }
-                    return h
-                `)(this, null, buf)(0), 52);
-        }
+    if (isSimdAvailable() && this.SIMD) {
+        var buf = new ArrayBuffer(BUF_MIN);
+        new Int32Array(buf)[0] = 10;
+        new Float32Array(buf)[1] = 42;
+        assertEq(asmCompile('stdlib', 'ffis', 'buf',
+            USE_ASM + `
+                var H = new stdlib.Uint8Array(buf);
+                var i4 = stdlib.SIMD.Int32x4;
+                var f4 = stdlib.SIMD.Float32x4;
+                var i4load = i4.load;
+                var f4load = f4.load;
+                var toi4 = i4.fromFloat32x4;
+                var i4ext = i4.extractLane;
+                function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
+                function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
+                function h(i) { i=i|0; return g(i)|0 }
+                return h
+            `)(this, null, buf)(0), 52);
+    }
 
-        enableSPSProfiling();
-        asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
-        disableSPSProfiling();
+    enableSPSProfiling();
+    asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
+    disableSPSProfiling();
 
-        assertEq(asmCompile(fatFunc)()(), 142);
-    }
+    assertEq(asmCompile(fatFunc)()(), 142);
 }
-
deleted file mode 100644
--- a/js/src/jit-test/tests/asm.js/testTimeout-deactivate-reactivate-signals.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// |jit-test| exitstatus: 6;
-
-load(libdir + "asm.js");
-
-setCachingEnabled(true);
-
-var jco = getJitCompilerOptions();
-if (!isCachingEnabled() || !isAsmJSCompilationAvailable())
-    quit(6);
-
-// Modules compiled without signal handlers should still work even if signal
-// handlers have been reactivated.
-suppressSignalHandlers(true);
-
-var code = USE_ASM + "function f() {} function g() { while(1) { f() } } return g";
-
-var m = asmCompile(code);
-assertEq(isAsmJSModule(m), true);
-assertEq(isAsmJSModuleLoadedFromCache(m), false);
-
-suppressSignalHandlers(false);
-
-var m = asmCompile(code);
-assertEq(isAsmJSModule(m), true);
-assertEq(isAsmJSModuleLoadedFromCache(m), false);
-
-var g = asmLink(m);
-timeout(1);
-g();
-assertEq(true, false);
deleted file mode 100644
--- a/js/src/jit-test/tests/asm.js/testTimeout1-nosignals.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// |jit-test| exitstatus: 6;
-
-load(libdir + "asm.js");
-
-suppressSignalHandlers(true);
-var g = asmLink(asmCompile(USE_ASM + "function f() {} function g() { while(1) { f() } } return g"));
-timeout(1);
-g();
-assertEq(true, false);
deleted file mode 100644
--- a/js/src/jit-test/tests/asm.js/testTimeout2-nosignals.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// |jit-test| exitstatus: 6;
-
-load(libdir + "asm.js");
-
-suppressSignalHandlers(true);
-var g = asmLink(asmCompile(USE_ASM + "function g() { while(1) {} } return g"));
-timeout(1);
-g();
-assertEq(true, false);
deleted file mode 100644
--- a/js/src/jit-test/tests/asm.js/testTimeout3-nosignals.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// |jit-test| exitstatus: 6;
-
-load(libdir + "asm.js");
-
-suppressSignalHandlers(true);
-var f = asmLink(asmCompile(USE_ASM + "function f(i) { i=i|0; if (!i) return; f((i-1)|0); f((i-1)|0); f((i-1)|0); f((i-1)|0); f((i-1)|0); } return f"));
-timeout(1);
-f(100);
-assertEq(true, false);
deleted file mode 100644
--- a/js/src/jit-test/tests/asm.js/testTimeout4-nosignals.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// |jit-test| exitstatus: 6;
-
-load(libdir + "asm.js");
-
-suppressSignalHandlers(true);
-var g = asmLink(asmCompile(USE_ASM + "function f(d) { d=+d; d=d*.1; d=d/.4; return +d } function g() { while(1) { +f(1.1) } } return g"));
-timeout(1);
-g();
-assertEq(true, false);
--- a/js/src/jit-test/tests/ion/iloop-nosignaling.js
+++ b/js/src/jit-test/tests/ion/iloop-nosignaling.js
@@ -1,5 +1,5 @@
 // |jit-test| exitstatus: 6;
 
-suppressSignalHandlers(true);
+setJitCompilerOption('ion.interrupt-without-signals', 1);
 timeout(1);
 for(;;);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/basic-grow-memory.js
@@ -0,0 +1,52 @@
+// |jit-test| test-also-wasm-baseline
+load(libdir + "wasm.js");
+
+function linearModule(min, max, ops) {
+  var opsText = ops.map(function (op) {
+    if (op[0] == "CM") {
+      res = `(if (i32.ne (current_memory) (i32.const ${op[1]}))
+                  (i32.load offset=10 (i32.const 4294967295))
+                  (i32.const 0))`
+    } else if (op[0] == "GM") {
+      res = `(if (i32.ne (grow_memory (i32.const ${op[1]})) (i32.const ${op[2]}))
+                 (i32.load offset=10 (i32.const 4294967295))
+                 (i32.const 0))`
+    } else if (op[0] == "L") {
+      var type = op[1];
+      var ext = op[2];
+      var off = op[3];
+      var loc = op[4]
+      var align = 0;
+      res = `(${type}.load${ext} offset=${off} (i32.const ${op[4]}))`;
+    } else if (op[0] == "S") {
+      var type = op[1];
+      var ext = op[2];
+      var off = op[3];
+      var loc = op[4]
+      var align = 0;
+      res = `(${type}.store${ext} offset=${off} (i32.const ${op[4]}) (i32.const 42))`;
+    }
+    return res;
+  }).join("\n")
+
+  text =
+    `(module
+       (memory ${min} ${max}` +
+         (min != 0 ? `(segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")
+                      (segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")`
+                      : "") +
+       `)
+       (func (result i32)
+        ` + opsText + `
+         (current_memory)
+       ) (export "" 0))`
+
+  return wasmEvalText(text);
+}
+
+function assertOOB(lambda) {
+  assertErrorMessage(lambda, Error, /invalid or out-of-range index/);
+}
+
+// Just grow some memory
+assertEq(linearModule(3,5, [["CM", 3]])(), 3);
--- a/js/src/jit-test/tests/wasm/basic-memory.js
+++ b/js/src/jit-test/tests/wasm/basic-memory.js
@@ -143,19 +143,24 @@ for (let [type, ext] of [
     ['i64', '32'],
     ['f32', ''],
     ['f64', ''],
 ])
 {
     assertErrorMessage(() => badStoreModule(type, ext), TypeError, /can't touch memory/);
 }
 
-for (var ind = 0; ind < 2; ind++) {
+for (var ind = 0; ind < 1; ind++) {
+    /*
+     * TODO: wasm.explicit-bounds-check option is being deprecated. We will be adding a
+     * new option that treats all offset as "non-foldable". When that is added trigger
+     * it here when ind == 1.
     if (ind == 1)
         setJitCompilerOption('wasm.explicit-bounds-checks', 1);
+    */
 
     testLoad('i32', '', 0, 0, 0, 0x03020100);
     testLoad('i32', '', 1, 0, 1, 0x04030201);
     testLoad('i32', '', 0, 4, 0, 0x07060504);
     testLoad('i32', '', 1, 3, 4, 0x07060504);
     testLoad('f32', '', 0, 0, 0, 3.820471434542632e-37);
     testLoad('f32', '', 1, 0, 1, 1.539989614439558e-36);
     testLoad('f32', '', 0, 4, 0, 1.0082513512365273e-34);
--- a/js/src/jit-test/tests/wasm/import-export.js
+++ b/js/src/jit-test/tests/wasm/import-export.js
@@ -3,60 +3,76 @@ load(libdir + 'wasm.js');
 load(libdir + 'asserts.js');
 
 const Module = WebAssembly.Module;
 const Instance = WebAssembly.Instance;
 const Memory = WebAssembly.Memory;
 const Table = WebAssembly.Table;
 
 const mem1Page = new Memory({initial:1});
+const mem1PageMax1 = new Memory({initial:1, maximum: 1});
 const mem2Page = new Memory({initial:2});
+const mem2PageMax2 = new Memory({initial:2, maximum: 2});
+const mem2PageMax3 = new Memory({initial:2, maximum: 3});
+const mem2PageMax4 = new Memory({initial:2, maximum: 4});
 const mem3Page = new Memory({initial:3});
+const mem3PageMax3 = new Memory({initial:3, maximum: 3});
 const mem4Page = new Memory({initial:4});
+const mem4PageMax4 = new Memory({initial:4, maximum: 4});
 const tab1Elem = new Table({initial:1, element:"anyfunc"});
 const tab2Elem = new Table({initial:2, element:"anyfunc"});
 const tab3Elem = new Table({initial:3, element:"anyfunc"});
 const tab4Elem = new Table({initial:4, element:"anyfunc"});
 
 // Explicitly opt into the new binary format for imports and exports until it
 // is used by default everywhere.
 const textToBinary = str => wasmTextToBinary(str, 'new-format');
 
+assertErrorMessage(() => new Memory({initial:2, maximum:1}), TypeError, /bad Memory maximum size/);
+
 const m1 = new Module(textToBinary('(module (import "foo" "bar") (import "baz" "quux"))'));
 assertErrorMessage(() => new Instance(m1), TypeError, /no import object given/);
 assertErrorMessage(() => new Instance(m1, {foo:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m1, {foo:{bar:{}}}), TypeError, /import object field is not a Function/);
 assertErrorMessage(() => new Instance(m1, {foo:{bar:()=>{}}, baz:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m1, {foo:{bar:()=>{}}, baz:{}}), TypeError, /import object field is not a Function/);
 assertEq(new Instance(m1, {foo:{bar:()=>{}}, baz:{quux:()=>{}}}) instanceof Instance, true);
 
 const m2 = new Module(textToBinary('(module (import "x" "y" (memory 2 3)))'));
 assertErrorMessage(() => new Instance(m2), TypeError, /no import object given/);
 assertErrorMessage(() => new Instance(m2, {x:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m2, {x:{y:{}}}), TypeError, /import object field is not a Memory/);
 assertErrorMessage(() => new Instance(m2, {x:{y:mem1Page}}), TypeError, /imported Memory with incompatible size/);
+assertErrorMessage(() => new Instance(m2, {x:{y:mem1PageMax1}}), TypeError, /imported Memory with incompatible size/);
 assertErrorMessage(() => new Instance(m2, {x:{y:mem4Page}}), TypeError, /imported Memory with incompatible size/);
-assertEq(new Instance(m2, {x:{y:mem2Page}}) instanceof Instance, true);
-assertEq(new Instance(m2, {x:{y:mem3Page}}) instanceof Instance, true);
+assertErrorMessage(() => new Instance(m2, {x:{y:mem4PageMax4}}), TypeError, /imported Memory with incompatible size/);
+assertErrorMessage(() => new Instance(m2, {x:{y:mem2Page}}), TypeError, /imported Memory with incompatible size/);
+assertEq(new Instance(m2, {x:{y:mem2PageMax2}}) instanceof Instance, true);
+assertErrorMessage(() => new Instance(m2, {x:{y:mem3Page}}), TypeError, /imported Memory with incompatible size/);
+assertEq(new Instance(m2, {x:{y:mem3PageMax3}}) instanceof Instance, true);
+assertEq(new Instance(m2, {x:{y:mem2PageMax3}}) instanceof Instance, true);
+assertErrorMessage(() => new Instance(m2, {x:{y:mem2PageMax4}}), TypeError, /imported Memory with incompatible size/);
 
 const m3 = new Module(textToBinary('(module (import "foo" "bar" (memory 1 1)) (import "baz" "quux"))'));
 assertErrorMessage(() => new Instance(m3), TypeError, /no import object given/);
 assertErrorMessage(() => new Instance(m3, {foo:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m3, {foo:{bar:{}}}), TypeError, /import object field is not a Memory/);
 assertErrorMessage(() => new Instance(m3, {foo:{bar:mem1Page}, baz:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m3, {foo:{bar:mem1Page}, baz:{quux:mem1Page}}), TypeError, /import object field is not a Function/);
-assertEq(new Instance(m3, {foo:{bar:mem1Page}, baz:{quux:()=>{}}}) instanceof Instance, true);
+assertErrorMessage(() => new Instance(m3, {foo:{bar:mem1Page}, baz:{quux:()=>{}}}), TypeError, /imported Memory with incompatible size/);
+assertEq(new Instance(m3, {foo:{bar:mem1PageMax1}, baz:{quux:()=>{}}}) instanceof Instance, true);
 
 const m4 = new Module(textToBinary('(module (import "baz" "quux") (import "foo" "bar" (memory 1 1)))'));
 assertErrorMessage(() => new Instance(m4), TypeError, /no import object given/);
 assertErrorMessage(() => new Instance(m4, {baz:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m4, {baz:{quux:{}}}), TypeError, /import object field is not a Function/);
 assertErrorMessage(() => new Instance(m4, {baz:{quux:()=>{}}, foo:null}), TypeError, /import object field is not an Object/);
 assertErrorMessage(() => new Instance(m4, {baz:{quux:()=>{}}, foo:{bar:()=>{}}}), TypeError, /import object field is not a Memory/);
-assertEq(new Instance(m3, {baz:{quux:()=>{}}, foo:{bar:mem1Page}}) instanceof Instance, true);
+assertErrorMessage(() => new Instance(m4, {baz:{quux:()=>{}}, foo:{bar:mem1Page}}), TypeError, /imported Memory with incompatible size/);
+assertEq(new Instance(m3, {baz:{quux:()=>{}}, foo:{bar:mem1PageMax1}}) instanceof Instance, true);
 
 const m5 = new Module(textToBinary('(module (import "a" "b" (memory 2)))'));
 assertErrorMessage(() => new Instance(m5, {a:{b:mem1Page}}), TypeError, /imported Memory with incompatible size/);
 assertEq(new Instance(m5, {a:{b:mem2Page}}) instanceof Instance, true);
 assertEq(new Instance(m5, {a:{b:mem3Page}}) instanceof Instance, true);
 assertEq(new Instance(m5, {a:{b:mem4Page}}) instanceof Instance, true);
 
 const m6 = new Module(textToBinary('(module (import "a" "b" (table 2)))'));
@@ -120,17 +136,17 @@ var importObj = {
 };
 assertEq(new Instance(m1, importObj) instanceof Instance, true);
 assertEq(arr.join(), "foo,bar,baz,quux");
 
 var arr = [];
 var importObj = {
     get foo() {
         arr.push("foo");
-        return { get bar() { arr.push("bar"); return new WebAssembly.Memory({initial:1}) } }
+        return { get bar() { arr.push("bar"); return new WebAssembly.Memory({initial:1, maximum:1}) } }
     },
     get baz() {
         arr.push("baz");
         return { get quux() { arr.push("quux"); return () => arr.push("bad") } }
     }
 };
 assertEq(new Instance(m3, importObj) instanceof Instance, true);
 assertEq(arr.join(), "foo,bar,baz,quux");
@@ -259,17 +275,17 @@ assertEq(e.tbl1.get(1), e.f3);
 e.tbl1.set(1, null);
 assertEq(e.tbl1.get(1), null);
 e.tbl1.set(3, e.f1);
 assertEq(e.tbl1.get(0), e.tbl1.get(3));
 
 // Re-exports and Identity:
 
 var code = textToBinary('(module (import "a" "b" (memory 1 1)) (export "foo" memory) (export "bar" memory))');
-var mem = new Memory({initial:1});
+var mem = new Memory({initial:1, maximum:1});
 var e = new Instance(new Module(code), {a:{b:mem}}).exports;
 assertEq(mem, e.foo);
 assertEq(mem, e.bar);
 
 var code = textToBinary('(module (import "a" "b" (table 1 1)) (export "foo" table) (export "bar" table))');
 var tbl = new Table({initial:1, element:"anyfunc"});
 var e = new Instance(new Module(code), {a:{b:tbl}}).exports;
 assertEq(tbl, e.foo);
@@ -315,17 +331,17 @@ var m = new Module(textToBinary(`
     (module
         (import "a" "b" (memory 1 1))
         (data 0 "\\0a\\0b")
         (data 100 "\\0c\\0d")
         (func $get (param $p i32) (result i32)
             (i32.load8_u (get_local $p)))
         (export "get" $get))
 `));
-var mem = new Memory({initial:1});
+var mem = new Memory({initial:1, maximum:1});
 var {get} = new Instance(m, {a:{b:mem}}).exports;
 assertEq(get(0), 0xa);
 assertEq(get(1), 0xb);
 assertEq(get(2), 0x0);
 assertEq(get(100), 0xc);
 assertEq(get(101), 0xd);
 assertEq(get(102), 0x0);
 var i8 = new Uint8Array(mem.buffer);
--- a/js/src/jit-test/tests/wasm/jsapi.js
+++ b/js/src/jit-test/tests/wasm/jsapi.js
@@ -118,16 +118,19 @@ const Memory = WebAssembly.Memory;
 assertEq(Memory, memoryDesc.value);
 assertEq(Memory.length, 1);
 assertEq(Memory.name, "Memory");
 assertErrorMessage(() => Memory(), TypeError, /constructor without new is forbidden/);
 assertErrorMessage(() => new Memory(1), TypeError, "first argument must be a memory descriptor");
 assertErrorMessage(() => new Memory({initial:{valueOf() { throw new Error("here")}}}), Error, "here");
 assertErrorMessage(() => new Memory({initial:-1}), TypeError, /bad Memory initial size/);
 assertErrorMessage(() => new Memory({initial:Math.pow(2,32)}), TypeError, /bad Memory initial size/);
+assertErrorMessage(() => new Memory({initial:1, maximum: Math.pow(2,32)/Math.pow(2,14) }), TypeError, /bad Memory maximum size/);
+assertErrorMessage(() => new Memory({initial:2, maximum: 1 }), TypeError, /bad Memory maximum size/);
+assertErrorMessage(() => new Memory({maximum: -1 }), TypeError, /bad Memory maximum size/);
 assertEq(new Memory({initial:1}) instanceof Memory, true);
 assertEq(new Memory({initial:1.5}).buffer.byteLength, 64*1024);
 
 // 'WebAssembly.Memory.prototype' property
 const memoryProtoDesc = Object.getOwnPropertyDescriptor(Memory, 'prototype');
 assertEq(typeof memoryProtoDesc.value, "object");
 assertEq(memoryProtoDesc.writable, false);
 assertEq(memoryProtoDesc.enumerable, false);
deleted file mode 100644
--- a/js/src/jit-test/tests/wasm/signals-enabled.js
+++ /dev/null
@@ -1,29 +0,0 @@
-load(libdir + 'wasm.js');
-load(libdir + 'asserts.js');
-
-// Explicitly opt into the new binary format for imports and exports until it
-// is used by default everywhere.
-const textToBinary = str => wasmTextToBinary(str, 'new-format');
-
-if (!wasmUsesSignalForOOB())
-    quit();
-
-const Module = WebAssembly.Module;
-const Instance = WebAssembly.Instance;
-const Memory = WebAssembly.Memory;
-const code = textToBinary('(module (import "x" "y" (memory 1 1)))');
-
-suppressSignalHandlers(true);
-var mem = new Memory({initial:1});
-suppressSignalHandlers(false);
-var m = new Module(code);
-suppressSignalHandlers(true);
-assertErrorMessage(() => new Instance(m, {x:{y:mem}}), Error, /signals/);
-var m = new Module(code);
-suppressSignalHandlers(false);
-assertEq(new Instance(m, {x:{y:mem}}) instanceof Instance, true);
-var mem = new Memory({initial:1});
-suppressSignalHandlers(true);
-var m = new Module(code);
-suppressSignalHandlers(false);
-assertEq(new Instance(m, {x:{y:mem}}) instanceof Instance, true);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/spec/grow-memory.wast
@@ -0,0 +1,127 @@
+;; Withut a memory, can't use current_memory and grow_memory.
+(assert_invalid
+  (module
+    (func $cm (result i32)
+      (current_memory)
+    )
+  )
+  "memory operators require a memory section"
+)
+
+(assert_invalid
+  (module
+    (func $gm (param i32) (result i32)
+      (grow_memory (get_local 0))
+    )
+  )
+  "memory operators require a memory section"
+)
+
+;; Test Current Memory
+(module (memory 0 10)
+  (func $gm (param i32) (result i32)
+    (grow_memory (get_local 0))
+  )
+
+  (func $cm (result i32)
+    (current_memory)
+  )
+
+  (func $ldst8 (param i32) (param i32) (result i32)
+    (block
+      (i32.store8 (get_local 0) (get_local 1))
+      (i32.load8_u (get_local 0))
+    )
+  )
+
+  (func $ldst16 (param i32) (param i32) (result i32)
+    (block
+      (i32.store16 (get_local 0) (get_local 1))
+      (i32.load16_u (get_local 0))
+    )
+  )
+
+  (func $ldst32 (param i32) (param i32) (result i32)
+    (block
+      (i32.store (get_local 0) (get_local 1))
+      (i32.load (get_local 0))
+    )
+  )
+
+  (func $ldst64 (param i32) (param i64) (result i64)
+    (block
+      (i64.store (get_local 0) (get_local 1))
+      (i64.load (get_local 0))
+    )
+  )
+
+  (export "cm" $cm)
+  (export "gm" $gm)
+  (export "ldst8" $ldst8)
+  (export "ldst16" $ldst16)
+  (export "ldst32" $ldst32)
+  (export "ldst64" $ldst64)
+)
+
+;; Call current_memory on 0-sized memory
+(assert_return (invoke "cm") (i32.const 0))
+
+;; Growing by 0 is ok and doesn't map any new pages
+(assert_return (invoke "gm" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "cm") (i32.const 0))
+(assert_trap (invoke "ldst8" (i32.const 0) (i32.const 42)) "out of bounds memory access")
+
+;; Can't grow by more than whats allowed
+(assert_return (invoke "gm" (i32.const 11)) (i32.const -1))
+(assert_return (invoke "cm") (i32.const 0))
+(assert_trap (invoke "ldst8" (i32.const 0) (i32.const 42)) "out of bounds memory access")
+
+;; Growing by X enables exactly X pages
+(assert_return (invoke "gm" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "cm") (i32.const 1))
+(assert_return (invoke "ldst8" (i32.const 0) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst8" (i32.const 65535) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst16" (i32.const 65534) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst32" (i32.const 65532) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst64" (i32.const 65528) (i64.const 42)) (i64.const 42))
+(assert_trap (invoke "ldst8" (i32.const 65536) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst16" (i32.const 65535) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst32" (i32.const 65533) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst64" (i32.const 65529) (i64.const 42)) "out of bounds memory access")
+
+;; grow_memory returns last page size and again we've added only as many pages as requested.
+(assert_return (invoke "gm" (i32.const 2)) (i32.const 1))
+(assert_return (invoke "cm") (i32.const 3))
+
+;; and again we have only allocated 2 additional pages.
+(assert_return (invoke "ldst8" (i32.const 0) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst8" (i32.const 196607) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst16" (i32.const 196606) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst32" (i32.const 196604) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst64" (i32.const 196600) (i64.const 42)) (i64.const 42))
+(assert_trap (invoke "ldst8" (i32.const 196608) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst16" (i32.const 196607) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst32" (i32.const 196605) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst64" (i32.const 196601) (i64.const 42)) "out of bounds memory access")
+
+;; One more time can't grow by more than whats allowed and failed growing doesn't add new pages
+(assert_return (invoke "gm" (i32.const 8)) (i32.const -1))
+(assert_return (invoke "cm") (i32.const 3))
+(assert_return (invoke "ldst8" (i32.const 196607) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst16" (i32.const 196606) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst32" (i32.const 196604) (i32.const 42)) (i32.const 42))
+(assert_return (invoke "ldst64" (i32.const 196600) (i64.const 42)) (i64.const 42))
+(assert_trap (invoke "ldst8" (i32.const 196608) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst16" (i32.const 196607) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst32" (i32.const 196605) (i32.const 42)) "out of bounds memory access")
+(assert_trap (invoke "ldst64" (i32.const 196601) (i64.const 42)) "out of bounds memory access")
+
+;; Can't grow by number of pages that would overflow UINT32 when scaled by the wasm page size
+(assert_return (invoke "gm" (i32.const 65534)) (i32.const -1))
+(assert_return (invoke "cm") (i32.const 3))
+(assert_return (invoke "gm" (i32.const 65535)) (i32.const -1))
+(assert_return (invoke "cm") (i32.const 3))
+(assert_return (invoke "gm" (i32.const 65536)) (i32.const -1))
+(assert_return (invoke "cm") (i32.const 3))
+(assert_return (invoke "gm" (i32.const 65537)) (i32.const -1))
+(assert_return (invoke "cm") (i32.const 3))
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/spec/grow-memory.wast.js
@@ -0,0 +1,2 @@
+// |jit-test| test-also-wasm-baseline
+var importedArgs = ['grow-memory.wast']; load(scriptdir + '../spec.js');
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -219,19 +219,19 @@ DefaultJitOptions::DefaultJitOptions()
     }
 
     // Toggles whether unboxed plain objects can be created by the VM.
     SET_DEFAULT(disableUnboxedObjects, false);
 
     // Test whether wasm int64 / double NaN bits testing is enabled.
     SET_DEFAULT(wasmTestMode, false);
 
-    // Determines whether explicit bounds check will be used for OOB
-    // instead of signals (even when signals are available).
-    SET_DEFAULT(wasmExplicitBoundsChecks, false);
+    // Determines whether we suppress using signal handlers
+    // for interrupting jit-ed code. This is used only for testing.
+    SET_DEFAULT(ionInterruptWithoutSignals, false);
 }
 
 bool
 DefaultJitOptions::isSmallFunction(JSScript* script) const
 {
     return script->length() <= smallFunctionMaxBytecodeLength_;
 }
 
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -65,17 +65,17 @@ struct DefaultJitOptions
     bool disableSharedStubs;
     bool disableSincos;
     bool disableSink;
     bool eagerCompilation;
     bool forceInlineCaches;
     bool limitScriptSize;
     bool osr;
     bool wasmTestMode;
-    bool wasmExplicitBoundsChecks;
+    bool ionInterruptWithoutSignals;
     uint32_t baselineWarmUpThreshold;
     uint32_t exceptionBailoutThreshold;
     uint32_t frequentBailoutThreshold;
     uint32_t maxStackArgs;
     uint32_t osrPcMismatchesBeforeRecompile;
     uint32_t smallFunctionMaxBytecodeLength_;
     uint32_t jumpThreshold;
     uint32_t branchPruningHitCountFactor;
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -89,17 +89,17 @@ LIRGenerator::visitIsConstructing(MIsCon
 {
     define(new(alloc()) LIsConstructing(), ins);
 }
 
 static void
 TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
 {
     // Implicit interrupt checks require asm.js signal handlers to be installed.
-    if (!wasm::HaveSignalHandlers())
+    if (!wasm::HaveSignalHandlers() || JitOptions.ionInterruptWithoutSignals)
         return;
 
     // To avoid triggering expensive interrupts (backedge patching) in
     // requestMajorGC and requestMinorGC, use an implicit interrupt check only
     // if the loop body can not trigger GC or affect GC state like the store
     // buffer. We do this by checking there are no safepoints attached to LIR
     // instructions inside the loop.
 
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -5412,16 +5412,34 @@ MWasmCall::New(TempAllocator& alloc, con
     for (size_t i = 0; i < call->argRegs_.length(); i++)
         call->initOperand(i, args[i].def);
     if (callee.isTable())
         call->initOperand(call->argRegs_.length(), tableIndex);
 
     return call;
 }
 
+MWasmCall*
+MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
+                                        const wasm::CallSiteDesc& desc,
+                                        const wasm::SymbolicAddress builtin,
+                                        const ABIArg& instanceArg,
+                                        const Args& args,
+                                        MIRType resultType,
+                                        uint32_t spIncrement)
+{
+    auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
+    MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
+                                     MWasmCall::DontSaveTls, nullptr);
+
+    MOZ_ASSERT(instanceArg != ABIArg()); // instanceArg must be initialized.
+    call->instanceArg_ = instanceArg;
+    return call;
+}
+
 void
 MSqrt::trySpecializeFloat32(TempAllocator& alloc) {
     if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) {
         if (input()->type() == MIRType::Float32)
             ConvertDefinitionToDouble<0>(alloc, input(), this);
         return;
     }
 
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13605,16 +13605,17 @@ class MWasmCall final
   : public MVariadicInstruction,
     public NoTypePolicy::Data
 {
     wasm::CallSiteDesc desc_;
     wasm::CalleeDesc callee_;
     FixedList<AnyRegister> argRegs_;
     uint32_t spIncrement_;
     uint32_t tlsStackOffset_;
+    ABIArg instanceArg_;
 
     MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement,
               uint32_t tlsStackOffset)
       : desc_(desc),
         callee_(callee),
         spIncrement_(spIncrement),
         tlsStackOffset_(tlsStackOffset)
     { }
@@ -13631,16 +13632,24 @@ class MWasmCall final
 
     static const uint32_t DontSaveTls = UINT32_MAX;
 
     static MWasmCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc,
                           const wasm::CalleeDesc& callee, const Args& args, MIRType resultType,
                           uint32_t spIncrement, uint32_t tlsStackOffset,
                           MDefinition* tableIndex = nullptr);
 
+    static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
+                                                   const wasm::CallSiteDesc& desc,
+                                                   const wasm::SymbolicAddress builtin,
+                                                   const ABIArg& instanceArg,
+                                                   const Args& args,
+                                                   MIRType resultType,
+                                                   uint32_t spIncrement);
+
     size_t numArgs() const {
         return argRegs_.length();
     }
     AnyRegister registerForArg(size_t index) const {
         MOZ_ASSERT(index < numArgs());
         return argRegs_[index];
     }
     const wasm::CallSiteDesc& desc() const {
@@ -13658,16 +13667,20 @@ class MWasmCall final
     uint32_t tlsStackOffset() const {
         MOZ_ASSERT(saveTls());
         return tlsStackOffset_;
     }
 
     bool possiblyCalls() const override {
         return true;
     }
+
+    const ABIArg& instanceArg() const {
+        return instanceArg_;
+    }
 };
 
 class MAsmSelect
   : public MTernaryInstruction,
     public NoTypePolicy::Data
 {
     MAsmSelect(MDefinition* trueExpr, MDefinition* falseExpr, MDefinition *condExpr)
       : MTernaryInstruction(trueExpr, falseExpr, condExpr)
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -34,21 +34,16 @@ class OptimizationInfo;
 
 class MIRGenerator
 {
   public:
     MIRGenerator(CompileCompartment* compartment, const JitCompileOptions& options,
                  TempAllocator* alloc, MIRGraph* graph,
                  const CompileInfo* info, const OptimizationInfo* optimizationInfo);
 
-    void initUsesSignalHandlersForAsmJSOOB(bool init) {
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-        usesSignalHandlersForAsmJSOOB_ = init;
-#endif
-    }
     void initMinAsmJSHeapLength(uint32_t init) {
         minAsmJSHeapLength_ = init;
     }
 
     TempAllocator& alloc() {
         return *alloc_;
     }
     MIRGraph& graph() {
@@ -195,19 +190,16 @@ class MIRGenerator
     bool modifiesFrameArguments_;
 
     bool instrumentedProfiling_;
     bool instrumentedProfilingIsCached_;
     bool safeForMinorGC_;
 
     void addAbortedPreliminaryGroup(ObjectGroup* group);
 
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-    bool usesSignalHandlersForAsmJSOOB_;
-#endif
     uint32_t minAsmJSHeapLength_;
 
     void setForceAbort() {
         shouldForceAbort_ = true;
     }
     bool shouldForceAbort() {
         return shouldForceAbort_;
     }
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -34,19 +34,16 @@ MIRGenerator::MIRGenerator(CompileCompar
     wasmMaxStackArgBytes_(0),
     performsCall_(false),
     usesSimd_(false),
     cachedUsesSimd_(false),
     modifiesFrameArguments_(false),
     instrumentedProfiling_(false),
     instrumentedProfilingIsCached_(false),
     safeForMinorGC_(true),
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-    usesSignalHandlersForAsmJSOOB_(false),
-#endif
     minAsmJSHeapLength_(0),
     options(options),
     gs_(alloc)
 { }
 
 bool
 MIRGenerator::usesSimd()
 {
@@ -111,56 +108,52 @@ MIRGenerator::addAbortedPreliminaryGroup
 bool
 MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const
 {
     // A heap access needs a bounds-check branch if we're not relying on signal
     // handlers to catch errors, and if it's not proven to be within bounds.
     // We use signal-handlers on x64, but on x86 there isn't enough address
     // space for a guard region.  Also, on x64 the atomic loads and stores
     // can't (yet) use the signal handlers.
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-    if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
-        return false;
+#ifdef WASM_HUGE_MEMORY
+    return false;
+#else
+    return access->needsBoundsCheck();
 #endif
-    return access->needsBoundsCheck();
 }
 
 size_t
 MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
 {
     // This determines whether it's ok to fold up to WasmImmediateRange
     // offsets, instead of just WasmCheckedImmediateRange.
 
     static_assert(WasmCheckedImmediateRange <= WasmImmediateRange,
                   "WasmImmediateRange should be the size of an unconstrained "
                   "address immediate");
 
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
+#ifdef WASM_HUGE_MEMORY
     static_assert(wasm::Uint32Range + WasmImmediateRange + sizeof(wasm::Val) < wasm::MappedSize,
                   "When using signal handlers for bounds checking, a uint32 is added to the base "
                   "address followed by an immediate in the range [0, WasmImmediateRange). An "
                   "unaligned access (whose size is conservatively approximated by wasm::Val) may "
                   "spill over, so ensure a space at the end.");
-
-    // Signal-handling can be dynamically disabled by OS bugs or flags.
-    // Bug 1254935: Atomic accesses can't be handled with signal handlers yet.
-    if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
-        return WasmImmediateRange;
-#endif
-
+    return WasmImmediateRange;
+#else
     // On 32-bit platforms, if we've proven the access is in bounds after
     // 32-bit wrapping, we can fold full offsets because they're added with
     // 32-bit arithmetic.
     if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck())
         return WasmImmediateRange;
 
     // Otherwise, only allow the checked size. This is always less than the
     // minimum heap length, and allows explicit bounds checks to fold in the
     // offset without overflow.
     return WasmCheckedImmediateRange;
+#endif
 }
 
 void
 MIRGraph::addBlock(MBasicBlock* block)
 {
     MOZ_ASSERT(block);
     block->setId(blockIdGen_++);
     blocks_.pushBack(block);
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2744,16 +2744,36 @@ MacroAssembler::wasmCallImport(const was
     // Switch to the callee's TLS and pinned registers and make the call.
     loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls), WasmTlsReg);
     loadWasmPinnedRegsFromTls();
 
     call(desc, ABINonArgReg0);
 }
 
 void
+MacroAssembler::wasmCallBuiltinInstanceMethod(const ABIArg& instanceArg,
+                                              wasm::SymbolicAddress builtin)
+{
+    MOZ_ASSERT(instanceArg != ABIArg());
+
+    if (instanceArg.kind() == ABIArg::GPR) {
+        loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), instanceArg.gpr());
+    } else if (instanceArg.kind() == ABIArg::Stack) {
+        // Safe to use ABINonArgReg0 since its the last thing before the call
+        Register scratch = ABINonArgReg0;
+        loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
+        storePtr(scratch, Address(getStackPointer(), instanceArg.offsetFromArgBase()));
+    } else {
+        MOZ_CRASH("Unknown abi passing style for pointer");
+    }
+
+    call(builtin);
+}
+
+void
 MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
 {
     Register scratch = WasmTableCallScratchReg;
     Register index = WasmTableCallIndexReg;
 
     if (callee.which() == wasm::CalleeDesc::AsmJSTable) {
         // asm.js tables require no signature check, have had their index masked
         // into range and thus need no bounds check and cannot be external.
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1328,16 +1328,22 @@ class MacroAssembler : public MacroAssem
 
     // This function takes care of loading the callee's TLS and pinned regs but
     // it is the caller's responsibility to save/restore TLS or pinned regs.
     void wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
 
     // WasmTableCallIndexReg must contain the index of the indirect call.
     void wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
 
+    // This function takes care of loading the pointer to the current instance
+    // as the implicit first argument. It preserves TLS and pinned registers.
+    // (TLS & pinned regs are non-volatile registers in the system ABI).
+    void wasmCallBuiltinInstanceMethod(const ABIArg& instanceArg,
+                                       wasm::SymbolicAddress builtin);
+
     //}}} check_macroassembler_style
   public:
 
     // Emits a test of a value against all types in a TypeSet. A scratch
     // register is required.
     template <typename Source>
     void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind, Register scratch, Label* miss);
 
--- a/js/src/jit/RegisterSets.h
+++ b/js/src/jit/RegisterSets.h
@@ -1266,16 +1266,36 @@ class ABIArg
         MOZ_ASSERT(isGeneralRegPair());
         return Register::FromCode(u.gpr_ + 1);
     }
     FloatRegister fpu() const { MOZ_ASSERT(kind() == FPU); return FloatRegister::FromCode(u.fpu_); }
     uint32_t offsetFromArgBase() const { MOZ_ASSERT(kind() == Stack); return u.offset_; }
 
     bool argInRegister() const { return kind() != Stack; }
     AnyRegister reg() const { return kind_ == GPR ? AnyRegister(gpr()) : AnyRegister(fpu()); }
+
+    bool operator==(const ABIArg& rhs) const {
+        if (kind_ != rhs.kind_)
+            return false;
+
+        switch((int8_t)kind_) {
+            case GPR:   return u.gpr_ == rhs.u.gpr_;
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+            case GPR_PAIR: return u.gpr_ == rhs.u.gpr_;
+#endif
+            case FPU:   return u.fpu_ == rhs.u.fpu_;
+            case Stack: return u.offset_ == rhs.u.offset_;
+            case -1:    return true;
+            default:    MOZ_CRASH("Invalid value for ABIArg kind");
+        }
+    }
+
+    bool operator!=(const ABIArg& rhs) const {
+        return !(*this == rhs);
+    }
 };
 
 // Get the set of registers which should be saved by a block of code which
 // clobbers all registers besides |unused|, but does not clobber floating point
 // registers.
 inline LiveGeneralRegisterSet
 SavedNonVolatileRegisters(AllocatableGeneralRegisterSet unused)
 {
--- a/js/src/jit/arm/Simulator-arm.cpp
+++ b/js/src/jit/arm/Simulator-arm.cpp
@@ -30,16 +30,17 @@
 
 #include "mozilla/Casting.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/Likely.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/SizePrintfMacros.h"
 
+#include "asmjs/WasmInstance.h"
 #include "asmjs/WasmSignalHandlers.h"
 #include "jit/arm/Assembler-arm.h"
 #include "jit/arm/disasm/Constants-arm.h"
 #include "jit/AtomicOperations.h"
 #include "threading/LockGuard.h"
 #include "vm/Runtime.h"
 #include "vm/SharedMem.h"
 
@@ -385,19 +386,19 @@ class AutoLockSimulatorCache : public Lo
     Simulator* const sim_;
 };
 
 bool Simulator::ICacheCheckingEnabled = false;
 
 int64_t Simulator::StopSimAt = -1L;
 
 Simulator*
-Simulator::Create()
+Simulator::Create(JSContext* cx)
 {
-    Simulator* sim = js_new<Simulator>();
+    Simulator* sim = js_new<Simulator>(cx);
     if (!sim)
         return nullptr;
 
     if (!sim->init()) {
         js_delete(sim);
         return nullptr;
     }
 
@@ -1070,17 +1071,18 @@ Simulator::FlushICache(void* start_addr,
         Simulator* sim = Simulator::Current();
 
         AutoLockSimulatorCache als(sim);
 
         js::jit::FlushICacheLocked(sim->icache(), start_addr, size);
     }
 }
 
-Simulator::Simulator()
+Simulator::Simulator(JSContext* cx)
+  : cx_(cx)
 {
     // Set up simulator support first. Some of this information is needed to
     // setup the architecture state.
 
     // Note, allocation and anything that depends on allocated memory is
     // deferred until init(), in order to handle OOM properly.
 
     stack_ = nullptr;
@@ -1491,19 +1493,44 @@ Simulator::exclusiveMonitorGetAndClear(b
 }
 
 void
 Simulator::exclusiveMonitorClear()
 {
     exclusiveMonitorHeld_ = false;
 }
 
+// WebAssembly memories contain an extra region of guard pages (see
+// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
+// using a signal handler that redirects PC to a stub that safely reports an
+// error. However, if the handler is hit by the simulator, the PC is in C++ code
+// and cannot be redirected. Therefore, we must avoid hitting the handler by
+// redirecting in the simulator before the real handler would have been hit.
+bool
+Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
+{
+    WasmActivation* act = cx_->wasmActivationStack();
+    if (!act)
+        return false;
+
+    void* pc = reinterpret_cast<void*>(get_pc());
+    wasm::Instance* instance = act->compartment()->wasm.lookupInstanceDeprecated(pc);
+    if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
+        return false;
+
+    set_pc(int32_t(instance->codeSegment().outOfBoundsCode()));
+    return true;
+}
+
 int
 Simulator::readW(int32_t addr, SimInstruction* instr, UnalignedPolicy f)
 {
+    if (handleWasmFault(addr, 4))
+        return -1;
+
     if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
         intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
         return *ptr;
     }
 
     // In WebAssembly, we want unaligned accesses to either raise a signal or
     // do the right thing. Making this simulator properly emulate the behavior
     // of raising a signal is complex, so as a special-case, when in wasm code,
@@ -1517,16 +1544,19 @@ Simulator::readW(int32_t addr, SimInstru
 
     printf("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
     MOZ_CRASH();
 }
 
 void
 Simulator::writeW(int32_t addr, int value, SimInstruction* instr, UnalignedPolicy f)
 {
+    if (handleWasmFault(addr, 4))
+        return;
+
     if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
         intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
         *ptr = value;
         return;
     }
 
     // See the comments above in readW.
     if (wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
@@ -1589,16 +1619,19 @@ Simulator::writeExW(int32_t addr, int va
 
     printf("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
     MOZ_CRASH();
 }
 
 uint16_t
 Simulator::readHU(int32_t addr, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 2))
+        return UINT16_MAX;
+
     // The regexp engine emits unaligned loads, so we don't check for them here
     // like most of the other methods do.
     if ((addr & 1) == 0 || !HasAlignmentFault()) {
         uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
         return *ptr;
     }
 
     // See comments in readW.
@@ -1612,16 +1645,19 @@ Simulator::readHU(int32_t addr, SimInstr
     printf("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
     MOZ_CRASH();
     return 0;
 }
 
 int16_t
 Simulator::readH(int32_t addr, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 2))
+        return -1;
+
     if ((addr & 1) == 0 || !HasAlignmentFault()) {
         int16_t* ptr = reinterpret_cast<int16_t*>(addr);
         return *ptr;
     }
 
     // See comments in readW.
     if (wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
         char* ptr = reinterpret_cast<char*>(addr);
@@ -1633,16 +1669,19 @@ Simulator::readH(int32_t addr, SimInstru
     printf("Unaligned signed halfword read at 0x%08x\n", addr);
     MOZ_CRASH();
     return 0;
 }
 
 void
 Simulator::writeH(int32_t addr, uint16_t value, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 2))
+        return;
+
     if ((addr & 1) == 0 || !HasAlignmentFault()) {
         uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
         *ptr = value;
         return;
     }
 
     // See the comments above in readW.
     if (wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
@@ -1653,16 +1692,19 @@ Simulator::writeH(int32_t addr, uint16_t
 
     printf("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
     MOZ_CRASH();
 }
 
 void
 Simulator::writeH(int32_t addr, int16_t value, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 2))
+        return;
+
     if ((addr & 1) == 0 || !HasAlignmentFault()) {
         int16_t* ptr = reinterpret_cast<int16_t*>(addr);
         *ptr = value;
         return;
     }
 
     // See the comments above in readW.
     if (wasm::IsPCInWasmCode(reinterpret_cast<void *>(get_pc()))) {
@@ -1706,16 +1748,19 @@ Simulator::writeExH(int32_t addr, uint16
         printf("Unaligned atomic unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
         MOZ_CRASH();
     }
 }
 
 uint8_t
 Simulator::readBU(int32_t addr)
 {
+    if (handleWasmFault(addr, 1))
+        return UINT8_MAX;
+
     uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
     return *ptr;
 }
 
 uint8_t
 Simulator::readExBU(int32_t addr)
 {
     SharedMem<uint8_t*> ptr = SharedMem<uint8_t*>::shared(reinterpret_cast<uint8_t*>(addr));
@@ -1734,30 +1779,39 @@ Simulator::writeExB(int32_t addr, uint8_
         return 1;
     uint8_t old = compareExchangeRelaxed(ptr, expected, value);
     return old != expected;
 }
 
 int8_t
 Simulator::readB(int32_t addr)
 {
+    if (handleWasmFault(addr, 1))
+        return -1;
+
     int8_t* ptr = reinterpret_cast<int8_t*>(addr);
     return *ptr;
 }
 
 void
 Simulator::writeB(int32_t addr, uint8_t value)
 {
+    if (handleWasmFault(addr, 1))
+        return;
+
     uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
     *ptr = value;
 }
 
 void
 Simulator::writeB(int32_t addr, int8_t value)
 {
+    if (handleWasmFault(addr, 1))
+        return;
+
     int8_t* ptr = reinterpret_cast<int8_t*>(addr);
     *ptr = value;
 }
 
 int32_t*
 Simulator::readDW(int32_t addr)
 {
     if ((addr & 3) == 0) {
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -95,22 +95,22 @@ class Simulator
         d24, d25, d26, d27, d28, d29, d30, d31,
         num_d_registers = 32,
         q0 = 0, q1, q2, q3, q4, q5, q6, q7,
         q8, q9, q10, q11, q12, q13, q14, q15,
         num_q_registers = 16
     };
 
     // Returns nullptr on OOM.
-    static Simulator* Create();
+    static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
-    Simulator();
+    explicit Simulator(JSContext* cx);
     ~Simulator();
 
     // The currently executing Simulator instance. Potentially there can be one
     // for each native thread.
     static Simulator* Current();
 
     static inline uintptr_t StackLimit() {
         return Simulator::Current()->stackLimit();
@@ -254,16 +254,19 @@ class Simulator
     inline bool isStopInstruction(SimInstruction* instr);
     inline bool isWatchedStop(uint32_t bkpt_code);
     inline bool isEnabledStop(uint32_t bkpt_code);
     inline void enableStop(uint32_t bkpt_code);
     inline void disableStop(uint32_t bkpt_code);
     inline void increaseStopCounter(uint32_t bkpt_code);
     void printStopInfo(uint32_t code);
 
+    // Handle any wasm faults, returning true if the fault was handled.
+    inline bool handleWasmFault(int32_t addr, unsigned numBytes);
+
     // Read and write memory.
     inline uint8_t readBU(int32_t addr);
     inline int8_t readB(int32_t addr);
     inline void writeB(int32_t addr, uint8_t value);
     inline void writeB(int32_t addr, int8_t value);
 
     inline uint8_t readExBU(int32_t addr);
     inline int32_t writeExB(int32_t addr, uint8_t value);
@@ -344,16 +347,18 @@ class Simulator
     template<class ReturnType, int register_size>
     void getFromVFPRegister(int reg_index, ReturnType* out);
 
     template<class InputType, int register_size>
     void setVFPRegister(int reg_index, const InputType& value);
 
     void callInternal(uint8_t* entry);
 
+    JSContext* const cx_;
+
     // Architecture state.
     // Saturating instructions require a Q flag to indicate saturation.
     // There is currently no way to read the CPSR directly, and thus read the Q
     // flag, so this is left unimplemented.
     int32_t registers_[16];
     bool n_flag_;
     bool z_flag_;
     bool c_flag_;
--- a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -148,17 +148,17 @@ void Simulator::init(Decoder* decoder, F
 }
 
 
 Simulator* Simulator::Current() {
   return js::TlsPerThreadData.get()->simulator();
 }
 
 
-Simulator* Simulator::Create() {
+Simulator* Simulator::Create(JSContext* cx) {
   Decoder *decoder = js_new<vixl::Decoder>();
   if (!decoder)
     return nullptr;
 
   // FIXME: This just leaks the Decoder object for now, which is probably OK.
   // FIXME: We should free it at some point.
   // FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts.
   Simulator *sim;
--- a/js/src/jit/arm64/vixl/Simulator-vixl.h
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -708,17 +708,17 @@ class Simulator : public DecoderVisitor 
 
  public:
   explicit Simulator(Decoder* decoder, FILE* stream = stdout);
   ~Simulator();
 
   // Moz changes.
   void init(Decoder* decoder, FILE* stream);
   static Simulator* Current();
-  static Simulator* Create();
+  static Simulator* Create(JSContext* cx);
   static void Destroy(Simulator* sim);
   uintptr_t stackLimit() const;
   uintptr_t* addressOfStackLimit();
   bool overRecursed(uintptr_t newsp = 0) const;
   bool overRecursedWithExtra(uint32_t extra) const;
   int64_t call(uint8_t* entry, int argument_count, ...);
   void setRedirection(Redirection* redirection);
   Redirection* redirection() const;
--- a/js/src/jit/mips32/Simulator-mips32.cpp
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -513,17 +513,17 @@ class AutoLockSimulatorCache : public Lo
     Simulator* const sim_;
 };
 
 bool Simulator::ICacheCheckingEnabled = false;
 
 int Simulator::StopSimAt = -1;
 
 Simulator*
-Simulator::Create()
+Simulator::Create(JSContext* cx)
 {
     Simulator* sim = js_new<Simulator>();
     if (!sim)
         return nullptr;
 
     if (!sim->init()) {
         js_delete(sim);
         return nullptr;
--- a/js/src/jit/mips32/Simulator-mips32.h
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -137,17 +137,17 @@ class Simulator {
         f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
         f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters.
         f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
         f26, f27, f28, f29, f30, f31,
         kNumFPURegisters
     };
 
     // Returns nullptr on OOM.
-    static Simulator* Create();
+    static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
     Simulator();
     ~Simulator();
 
     // The currently executing Simulator instance. Potentially there can be one
--- a/js/src/jit/mips64/Simulator-mips64.cpp
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -551,17 +551,17 @@ class AutoLockSimulatorCache : public Lo
     Simulator* const sim_;
 };
 
 bool Simulator::ICacheCheckingEnabled = false;
 
 int64_t Simulator::StopSimAt = -1;
 
 Simulator *
-Simulator::Create()
+Simulator::Create(JSContext* cx)
 {
     Simulator* sim = js_new<Simulator>();
     if (!sim)
         return nullptr;
 
     if (!sim->init()) {
         js_delete(sim);
         return nullptr;
--- a/js/src/jit/mips64/Simulator-mips64.h
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -142,17 +142,17 @@ class Simulator {
     enum FPURegister {
         f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
         f12, f13, f14, f15, f16, f17, f18, f19, f20, f21,
         f22, f23, f24, f25, f26, f27, f28, f29, f30, f31,
         kNumFPURegisters
     };
 
     // Returns nullptr on OOM.
-    static Simulator* Create();
+    static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
     Simulator();
     ~Simulator();
 
     // The currently executing Simulator instance. Potentially there can be one
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1514,16 +1514,19 @@ CodeGeneratorShared::emitWasmCallBase(LW
         break;
       case wasm::CalleeDesc::WasmTable:
       case wasm::CalleeDesc::AsmJSTable:
         masm.wasmCallIndirect(desc, callee);
         break;
       case wasm::CalleeDesc::Builtin:
         masm.call(callee.builtin());
         break;
+      case wasm::CalleeDesc::BuiltinInstanceMethod:
+        masm.wasmCallBuiltinInstanceMethod(mir->instanceArg(), callee.builtin());
+        break;
     }
 
     // After return, restore the caller's TLS and pinned registers.
     if (mir->saveTls()) {
         masm.loadPtr(Address(masm.getStackPointer(), mir->tlsStackOffset()), WasmTlsReg);
         masm.loadWasmPinnedRegsFromTls();
     }
 
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -465,24 +465,16 @@ CodeGeneratorX64::loadSimd(Scalar::Type 
 static wasm::MemoryAccess
 AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throwBehavior,
                   uint32_t offsetWithinWholeSimdVector = 0)
 {
     return wasm::MemoryAccess(before, throwBehavior, wasm::MemoryAccess::WrapOffset,
                               offsetWithinWholeSimdVector);
 }
 
-static wasm::MemoryAccess
-WasmMemoryAccess(uint32_t before)
-{
-    return wasm::MemoryAccess(before,
-                              wasm::MemoryAccess::Throw,
-                              wasm::MemoryAccess::DontWrapOffset);
-}
-
 void
 CodeGeneratorX64::load(Scalar::Type type, const Operand& srcAddr, AnyRegister out)
 {
     switch (type) {
       case Scalar::Int8:      masm.movsbl(srcAddr, out.gpr()); break;
       case Scalar::Uint8:     masm.movzbl(srcAddr, out.gpr()); break;
       case Scalar::Int16:     masm.movswl(srcAddr, out.gpr()); break;
       case Scalar::Uint16:    masm.movzwl(srcAddr, out.gpr()); break;
@@ -549,18 +541,16 @@ CodeGeneratorX64::emitWasmLoad(T* ins)
     if (isInt64)
         loadI64(accessType, srcAddr, ToOutRegister64(ins));
     else
         load(accessType, srcAddr, ToAnyRegister(ins->output()));
     uint32_t after = masm.size();
 
     verifyLoadDisassembly(before, after, isInt64, accessType, /* numElems */ 0, srcAddr,
                           *ins->output()->output());
-
-    masm.append(WasmMemoryAccess(before));
 }
 
 void
 CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
 {
     emitWasmLoad(ins);
 }
 
@@ -588,18 +578,16 @@ CodeGeneratorX64::emitWasmStore(T* ins)
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     uint32_t before = masm.size();
     store(accessType, value, dstAddr);
     uint32_t after = masm.size();
 
     verifyStoreDisassembly(before, after, mir->value()->type() == MIRType::Int64,
                            accessType, /* numElems */ 0, dstAddr, *value);
-
-    masm.append(WasmMemoryAccess(before));
 }
 
 void
 CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
 {
     emitWasmStore(ins);
 }
 
@@ -694,17 +682,20 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
     if (ool) {
         MOZ_ASSERT(hasBoundsCheck);
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(ool->rejoin());
     }
 
     memoryBarrier(mir->barrierAfter());
 
-    masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
+    // We cannot emulate atomic accesses currently.
+    masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ?
+                                           wasm::MemoryAccess::Throw :
+                                           wasm::MemoryAccess::CarryOn)));
 }
 
 void
 CodeGeneratorX64::store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr)
 {
     if (value->isConstant()) {
         const MConstant* mir = value->toConstant();
         Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
@@ -897,17 +888,20 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
     if (rejoin) {
         MOZ_ASSERT(hasBoundsCheck);
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(rejoin);
     }
 
     memoryBarrier(mir->barrierAfter());
 
-    masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
+    // See comment in visitAsmJSLoadHeap
+    masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ?
+                                           wasm::MemoryAccess::Throw :
+                                           wasm::MemoryAccess::CarryOn)));
 }
 
 void
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MAsmJSCompareExchangeHeap* mir = ins->mir();
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -536,17 +536,17 @@ CodeGeneratorX86Shared::visitWasmTruncat
 
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGeneratorX86Shared::maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr,
                                                        bool redundant)
 {
-    if (!mir->needsBoundsCheck())
+    if (!gen->needsBoundsCheckBranch(mir))
         return;
 
     MOZ_ASSERT(mir->endOffset() >= 1,
                "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
 
     // TODO: See 1287224 Unify MWasmBoundsCheck::redunant_ and needsBoundsCheck
     if (!redundant) {
         uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - mir->endOffset())).offset();
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -6166,18 +6166,18 @@ JS_SetGlobalJitCompilerOption(JSContext*
             jit::DefaultJitOptions defaultValues;
             value = defaultValues.jumpThreshold;
         }
         jit::JitOptions.jumpThreshold = value;
         break;
       case JSJITCOMPILER_WASM_TEST_MODE:
         jit::JitOptions.wasmTestMode = !!value;
         break;
-      case JSJITCOMPILER_WASM_EXPLICIT_BOUNDS_CHECKS:
-        jit::JitOptions.wasmExplicitBoundsChecks = !!value;
+      case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
+        jit::JitOptions.ionInterruptWithoutSignals = !!value;
         break;
       default:
         break;
     }
 }
 
 JS_PUBLIC_API(int)
 JS_GetGlobalJitCompilerOption(JSContext* cx, JSJitCompilerOption opt)
@@ -6196,18 +6196,18 @@ JS_GetGlobalJitCompilerOption(JSContext*
       case JSJITCOMPILER_ION_ENABLE:
         return JS::ContextOptionsRef(cx).ion();
       case JSJITCOMPILER_BASELINE_ENABLE:
         return JS::ContextOptionsRef(cx).baseline();
       case JSJITCOMPILER_OFFTHREAD_COMPILATION_ENABLE:
         return rt->canUseOffthreadIonCompilation();
       case JSJITCOMPILER_WASM_TEST_MODE:
         return jit::JitOptions.wasmTestMode ? 1 : 0;
-      case JSJITCOMPILER_WASM_EXPLICIT_BOUNDS_CHECKS:
-        return jit::JitOptions.wasmExplicitBoundsChecks ? 1 : 0;
+      case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
+        return jit::JitOptions.ionInterruptWithoutSignals ? 1 : 0;
       default:
         break;
     }
 #endif
     return 0;
 }
 
 /************************************************************************/
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -5614,21 +5614,21 @@ extern JS_PUBLIC_API(void)
 JS_SetOffthreadIonCompilationEnabled(JSContext* cx, bool enabled);
 
 #define JIT_COMPILER_OPTIONS(Register)                                     \
     Register(BASELINE_WARMUP_TRIGGER, "baseline.warmup.trigger")           \
     Register(ION_WARMUP_TRIGGER, "ion.warmup.trigger")                     \
     Register(ION_GVN_ENABLE, "ion.gvn.enable")                             \
     Register(ION_FORCE_IC, "ion.forceinlineCaches")                        \
     Register(ION_ENABLE, "ion.enable")                                     \
+    Register(ION_INTERRUPT_WITHOUT_SIGNAL, "ion.interrupt-without-signals") \
     Register(BASELINE_ENABLE, "baseline.enable")                           \
     Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable") \
     Register(JUMP_THRESHOLD, "jump-threshold")                             \
-    Register(WASM_TEST_MODE, "wasm.test-mode")                             \
-    Register(WASM_EXPLICIT_BOUNDS_CHECKS, "wasm.explicit-bounds-checks")
+    Register(WASM_TEST_MODE, "wasm.test-mode")
 
 typedef enum JSJitCompilerOption {
 #define JIT_COMPILER_DECLARE(key, str) \
     JSJITCOMPILER_ ## key,
 
     JIT_COMPILER_OPTIONS(JIT_COMPILER_DECLARE)
 #undef JIT_COMPILER_DECLARE
 
--- a/js/src/old-configure.in
+++ b/js/src/old-configure.in
@@ -1763,37 +1763,26 @@ elif test "$JS_SIMULATOR" = mips64; then
     JS_SIMULATOR_MIPS64=1
     JS_CODEGEN_MIPS64=1
 elif test "$CPU_ARCH" = "x86"; then
     AC_DEFINE(JS_CODEGEN_X86)
     JS_CODEGEN_X86=1
 elif test "$CPU_ARCH" = "x86_64"; then
     AC_DEFINE(JS_CODEGEN_X64)
     JS_CODEGEN_X64=1
-
-    dnl Signal-handler OOM checking requires large mprotected guard regions, so
-    dnl currently it is only implemented on x64.
-    AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS)
-    AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-    ASMJS_MAY_USE_SIGNAL_HANDLERS=1
-    ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB=1
 elif test "$CPU_ARCH" = "arm"; then
     AC_DEFINE(JS_CODEGEN_ARM)
     JS_CODEGEN_ARM=1
     if test -n "$MOZ_DEBUG"; then
         AC_DEFINE(JS_DISASM_ARM)
         JS_DISASM_ARM=1
     fi
 
     dnl ARM platforms may trap on unaligned accesses; catch the signal and
     dnl recover.
-    AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS)
-    AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
-    ASMJS_MAY_USE_SIGNAL_HANDLERS=1
-    ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED=1
 elif test "$CPU_ARCH" = "mips32"; then
     AC_DEFINE(JS_CODEGEN_MIPS32)
     JS_CODEGEN_MIPS32=1
 elif test "$CPU_ARCH" = "mips64"; then
     AC_DEFINE(JS_CODEGEN_MIPS64)
     JS_CODEGEN_MIPS64=1
 fi
 
@@ -1805,19 +1794,16 @@ AC_SUBST(JS_SIMULATOR_MIPS64)
 AC_SUBST(JS_CODEGEN_ARM)
 AC_SUBST(JS_CODEGEN_ARM64)
 AC_SUBST(JS_CODEGEN_MIPS32)
 AC_SUBST(JS_CODEGEN_MIPS64)
 AC_SUBST(JS_CODEGEN_X86)
 AC_SUBST(JS_CODEGEN_X64)
 AC_SUBST(JS_CODEGEN_NONE)
 AC_SUBST(JS_DISASM_ARM)
-AC_SUBST(ASMJS_MAY_USE_SIGNAL_HANDLERS)
-AC_SUBST(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-AC_SUBST(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
 
 dnl ========================================================
 dnl instruments
 dnl ========================================================
 if test -n "$MOZ_INSTRUMENTS"; then
     LIBS="$LIBS -framework CoreFoundation"
 fi
 
--- a/js/src/vm/ArrayBufferObject-inl.h
+++ b/js/src/vm/ArrayBufferObject-inl.h
@@ -38,16 +38,36 @@ ArrayBufferObjectMaybeShared::isDetached
 inline uint32_t
 AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf)
 {
     if (buf->is<ArrayBufferObject>())
         return buf->as<ArrayBufferObject>().byteLength();
     return buf->as<SharedArrayBufferObject>().byteLength();
 }
 
+inline size_t
+WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf)
+{
+    if (buf->is<ArrayBufferObject>())
+        return buf->as<ArrayBufferObject>().wasmMappedSize();
+#ifdef WASM_HUGE_MEMORY
+    return wasm::MappedSize;
+#else
+    return buf->as<SharedArrayBufferObject>().byteLength();
+#endif
+}
+
+inline uint32_t
+WasmArrayBufferActualByteLength(const ArrayBufferObjectMaybeShared* buf)
+{
+    if (buf->is<ArrayBufferObject>())
+        return buf->as<ArrayBufferObject>().wasmActualByteLength();
+    return buf->as<SharedArrayBufferObject>().byteLength();
+}
+
 inline ArrayBufferObjectMaybeShared&
 AsAnyArrayBuffer(HandleValue val)
 {
     if (val.toObject().is<ArrayBufferObject>())
         return val.toObject().as<ArrayBufferObject>();
     return val.toObject().as<SharedArrayBufferObject>();
 }
 
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -1,18 +1,21 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+#include "vm/ArrayBufferObject-inl.h"
 #include "vm/ArrayBufferObject.h"
 
 #include "mozilla/Alignment.h"
+#include "mozilla/CheckedInt.h"
 #include "mozilla/FloatingPoint.h"
+#include "mozilla/Maybe.h"
 #include "mozilla/PodOperations.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include <string.h>
 #ifndef XP_WIN
 # include <sys/mman.h>
 #endif
 
@@ -29,16 +32,17 @@
 #include "jsobj.h"
 #include "jstypes.h"
 #include "jsutil.h"
 #ifdef XP_WIN
 # include "jswin.h"
 #endif
 #include "jswrapper.h"
 
+#include "asmjs/WasmSignalHandlers.h"
 #include "asmjs/WasmTypes.h"
 #include "gc/Barrier.h"
 #include "gc/Marking.h"
 #include "gc/Memory.h"
 #include "js/Conversions.h"
 #include "js/MemoryMetrics.h"
 #include "vm/GlobalObject.h"
 #include "vm/Interpreter.h"
@@ -48,16 +52,19 @@
 #include "jsatominlines.h"
 
 #include "vm/NativeObject-inl.h"
 #include "vm/Shape-inl.h"
 
 using JS::ToInt32;
 
 using mozilla::DebugOnly;
+using mozilla::CheckedInt;
+using mozilla::Some;
+using mozilla::Maybe;
 
 using namespace js;
 using namespace js::gc;
 
 /*
  * Convert |v| to an array index for an array of length |length| per
  * the Typed Array Specification section 7.0, |subarray|. If successful,
  * the output value is in the range [0, length].
@@ -367,169 +374,383 @@ ArrayBufferObject::changeContents(JSCont
     if (InnerViewTable::ViewVector* views = innerViews.maybeViewsUnbarriered(this)) {
         for (size_t i = 0; i < views->length(); i++)
             changeViewContents(cx, (*views)[i], oldDataPointer, newContents);
     }
     if (firstView())
         changeViewContents(cx, firstView(), oldDataPointer, newContents);
 }
 
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
+/*
+ * Wasm Raw Buf Linear Memory Structure
+ *
+ * The linear heap in Wasm is an mmaped array buffer. Several
+ * constants manage its lifetime:
+ *
+ *  - length - the wasm-visible current length of the buffer. Acesses in the
+ *  range [0, length] succeed. May only increase
+ *
+ *  - boundsCheckLimit - size against which we perform bounds checks. It is
+ *  always a constant offset smaller than mapped_size. Currently that constant
+ *  offset is 0.
+ *
+ *  - max - the optional declared limit on how much length can grow.
+ *
+ *  - mapped_size - the actual mmaped size. Access in the range
+ *  [0, mapped_size] will either succeed, or be handled by the wasm signal
+ *  handlers.
+ *
+ * The below diagram shows the layout of the wams heap. The wasm-visible
+ * portion of the heap starts at 0. There is one extra page prior to the
+ * start of the wasm heap which contains the WasmArrayRawBuffer struct at
+ * its end. (i.e. right before the start of the WASM heap).
+ *
+ *  WasmArrayRawBuffer
+ *      \    ArrayBufferObject::dataPointer()
+ *       \  /
+ *        \ |
+ *  ______|_|____________________________________________________________
+ * |______|_|______________|___________________|____________|____________|
+ *          0          length              maxSize  boundsCheckLimit  mappedSize
+ *
+ * \_______________________/
+ *          COMMITED
+ *                          \____________________________________________/
+ *                                           SLOP
+ * \_____________________________________________________________________/
+ *                         MAPPED
+ *
+ * Invariants:
+ *  - length only increases
+ *  - 0 <= length <= maxSize (if present) <= boundsCheckLimit <= mappedSize
+ *  - on ARM boundsCheckLimit must be a valid ARM immediate.
+ *  - if maxSize is not specified, boundsCheckLimit/mappedSize may grow. They are
+ *    otherwise constant.
+ *
+ * NOTE: For asm.js on non-x64 we guarantee that
+ *
+ * length == maxSize == boundsCheckLimit == mappedSize
+ *
+ * That is, signal handlers will not be invoked, since they cannot emulate
+ * asm.js accesses on non-x64 architectures.
+ *
+ * The region between length and mappedSize is the SLOP - an area where we use
+ * signal handlers to catch things that slip by bounds checks. Logically it has
+ * two parts:
+ *
+ *  - from length to boundsCheckLimit - this part of the SLOP serves to catch
+ *  accesses to memory we have reserved but not yet grown into. This allows us
+ *  to grow memory up to max (when present) without having to patch/update the
+ *  bounds checks.
+ *
+ *  - from boundsCheckLimit to mappedSize - (Note: In current patch 0) - this
+ *  part of the SLOP allows us to bounds check against base pointers and fold
+ *  some constant offsets inside loads. This enables better Bounds
+ *  Check Elimination.
+ *
+ */
+
+class js::WasmArrayRawBuffer
+{
+    uint32_t length_;
+    Maybe<uint32_t> maxSize_;
+    size_t mappedSize_;
+
+  protected:
+    WasmArrayRawBuffer(uint8_t* buffer, uint32_t length, Maybe<uint32_t> maxSize, size_t mappedSize)
+      : length_(length), maxSize_(maxSize), mappedSize_(mappedSize)
+    {
+        MOZ_ASSERT(buffer == dataPointer());
+    }
+
+  public:
+    static WasmArrayRawBuffer* Allocate(uint32_t numBytes, Maybe<uint32_t> maxSize);
+    static void Release(void* mem);
+
+    uint8_t* dataPointer() {
+        uint8_t* ptr = reinterpret_cast<uint8_t*>(this);
+        return ptr + sizeof(WasmArrayRawBuffer);
+    }
+
+    uint8_t* basePointer() {
+        return dataPointer() - gc::SystemPageSize();
+    }
+
+    // TODO: actualByteLength in WasmArrayRawBuffer is a temporary hack to allow
+    // keeping track of the size of dynamically growing WASM memory. We can't
+    // keep it in the containg ArrayBufferObject's byte length field since those
+    // are immutable. This will be removed in a followup resizing patch.
+    uint32_t actualByteLength() const {
+        return length_;
+    }
+
+    size_t mappedSize() const {
+        return mappedSize_;
+    }
+
+    Maybe<uint32_t> maxSize() const {
+        return maxSize_;
+    }
+
+    size_t allocatedBytes() const {
+        return mappedSize_ + gc::SystemPageSize();
+    }
+
+    uint32_t boundsCheckLimit() const {
+#ifdef WASM_HUGE_MEMORY
+        MOZ_CRASH();
+        return 0;
+#else
+        return (uint32_t) mappedSize_;
+#endif
+    }
+
+    MOZ_MUST_USE bool growLength(uint32_t deltaLength)
+    {
+        // This should be guaranteed by Instance::growMemory
+        MOZ_ASSERT(maxSize_);
+        MOZ_ASSERT(deltaLength % wasm::PageSize == 0);
+
+        CheckedInt<uint32_t> curLength = actualByteLength();
+        CheckedInt<uint32_t> newLength = curLength + deltaLength;
+        MOZ_RELEASE_ASSERT(newLength.isValid());
+        MOZ_ASSERT(newLength.value() <= maxSize_.value());
+
+        uint8_t* dataEnd = dataPointer() + curLength.value();
+        MOZ_ASSERT(((intptr_t)dataEnd) % gc::SystemPageSize() == 0);
 # ifdef XP_WIN
-static void*
-AllocateWasmMappedMemory(uint32_t numBytes)
+        if (deltaLength && !VirtualAlloc(dataEnd, deltaLength, MEM_COMMIT, PAGE_READWRITE))
+            return false;
+# else  // XP_WIN
+        if (deltaLength && mprotect(dataEnd, deltaLength, PROT_READ | PROT_WRITE))
+            return false;
+# endif  // !XP_WIN
+
+#  if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
+        VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, deltaLength);
+#  endif
+
+        MemProfiler::SampleNative(dataEnd, deltaLength);
+
+        length_ = newLength.value();
+        return true;
+    }
+
+    // Try and grow the mapped region of memory. Does not changes current or
+    // max size. Does not move memory if no space to grow.
+    void tryGrowMaxSize(uint32_t deltaMaxSize)
+    {
+        MOZ_ASSERT(maxSize_);
+        MOZ_RELEASE_ASSERT(deltaMaxSize % wasm::PageSize  == 0);
+
+        CheckedInt<uint32_t> curMax = maxSize_.value();
+        CheckedInt<uint32_t> newMax = curMax + deltaMaxSize;
+        MOZ_RELEASE_ASSERT(newMax.isValid());
+        MOZ_RELEASE_ASSERT(newMax.value() % wasm::PageSize == 0);
+
+        size_t newMapped = wasm::LegalizeMapLength(newMax.value());
+
+# ifdef XP_WIN
+        if (!VirtualAlloc(dataPointer(), newMapped, MEM_RESERVE, PAGE_NOACCESS))
+            return;
+# elif defined(XP_DARWIN)
+        // No mechanism for remapping on MaxOS. Luckily shouldn't need it here
+        // as most MacOS configs are 64 bit
+        return;
+#else // Unix
+        // Note this will not move memory (no MREMAP_MAYMOVE specified)
+        if (MAP_FAILED == mremap(dataPointer(), mappedSize_, newMapped, 0))
+            return;
+# endif  // !XP_WIN
+
+        mappedSize_ = newMapped;
+        maxSize_ = Some(newMax.value());
+        return;
+    }
+};
+
+/* static */ WasmArrayRawBuffer*
+WasmArrayRawBuffer::Allocate(uint32_t numBytes, Maybe<uint32_t> maxSize)
 {
-    MOZ_ASSERT(numBytes % wasm::PageSize == 0);
+    size_t mappedSize = wasm::LegalizeMapLength(maxSize.valueOr(numBytes));
 
-    void* data = VirtualAlloc(nullptr, wasm::MappedSize, MEM_RESERVE, PAGE_NOACCESS);
+    MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize());
+    MOZ_RELEASE_ASSERT(numBytes <= maxSize.valueOr(UINT32_MAX));
+    MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0);
+    MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
+
+    uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
+    uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
+
+# ifdef XP_WIN
+    void* data = VirtualAlloc(nullptr, (size_t) mappedSizeWithHeader, MEM_RESERVE, PAGE_NOACCESS);
     if (!data)
         return nullptr;
 
-    if (numBytes && !VirtualAlloc(data, numBytes, MEM_COMMIT, PAGE_READWRITE)) {
+    if (!VirtualAlloc(data, numBytesWithHeader, MEM_COMMIT, PAGE_READWRITE)) {
         VirtualFree(data, 0, MEM_RELEASE);
         return nullptr;
     }
-
-    MemProfiler::SampleNative(data, numBytes);
-
-    return data;
-}
-
-static void
-ReleaseWasmMappedMemory(void* base)
-{
-    VirtualFree(base, 0, MEM_RELEASE);
-    MemProfiler::RemoveNative(base);
-}
 # else  // XP_WIN
-static void*
-AllocateWasmMappedMemory(uint32_t numBytes)
-{
-    void* data = MozTaggedAnonymousMmap(nullptr, wasm::MappedSize, PROT_NONE,
+    void* data = MozTaggedAnonymousMmap(nullptr, (size_t) mappedSizeWithHeader, PROT_NONE,
                                         MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
     if (data == MAP_FAILED)
         return nullptr;
 
-    if (numBytes && mprotect(data, numBytes, PROT_READ | PROT_WRITE)) {
-        munmap(data, wasm::MappedSize);
+    // Note we will waste a page on zero-sized memories here
+    if (mprotect(data, numBytesWithHeader, PROT_READ | PROT_WRITE)) {
+        munmap(data, mappedSizeWithHeader);
         return nullptr;
     }
-
-    MemProfiler::SampleNative(data, numBytes);
+# endif  // !XP_WIN
+    MemProfiler::SampleNative(data, numBytesWithHeader);
 
 #  if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
-    VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + numBytes,
-                                                   wasm::MappedSize - numBytes);
+    VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + numBytesWithHeader,
+                                                   mappedSizeWithHeader - numBytesWithHeader);
 #  endif
 
-    return data;
+    uint8_t* base = reinterpret_cast<uint8_t*>(data) + gc::SystemPageSize();
+    uint8_t* header = base - sizeof(WasmArrayRawBuffer);
+
+    auto rawBuf = new (header) WasmArrayRawBuffer(base, numBytes, maxSize, mappedSize);
+    return rawBuf;
 }
 
-static void
-ReleaseWasmMappedMemory(void* base)
+/* static */ void
+WasmArrayRawBuffer::Release(void* mem)
 {
-    munmap(base, wasm::MappedSize);
+    WasmArrayRawBuffer* header = (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer));
+    uint8_t* base = header->basePointer();
+    MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize());
+    size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize();
+
+# ifdef XP_WIN
+    VirtualFree(base, 0, MEM_RELEASE);
+# else  // XP_WIN
+    munmap(base, mappedSizeWithHeader);
+# endif  // !XP_WIN
     MemProfiler::RemoveNative(base);
 
 #  if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
-    VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, wasm::MappedSize);
+    VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, mappedSizeWithHeader);
 #  endif
 }
-# endif  // !XP_WIN
-#endif  // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
+
+WasmArrayRawBuffer*
+ArrayBufferObject::BufferContents::wasmBuffer() const
+{
+    MOZ_RELEASE_ASSERT(kind_ == WASM_MAPPED);
+    return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer));
+}
+
+#define ROUND_UP(v, a) ((v) % (a) == 0 ? (v) : v + a - ((v) % (a)))
 
 /* static */ ArrayBufferObject*
-ArrayBufferObject::createForWasm(JSContext* cx, uint32_t numBytes, bool signalsForOOB)
+ArrayBufferObject::createForWasm(JSContext* cx, uint32_t numBytes, Maybe<uint32_t> maxSize)
 {
     MOZ_ASSERT(numBytes % wasm::PageSize == 0);
+    MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
 
-    if (signalsForOOB) {
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
-        void* data = AllocateWasmMappedMemory(numBytes);
-        if (!data) {
+    // First try to map the maximum requested memory
+    WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(numBytes, maxSize);
+    if (!wasmBuf) {
+#ifdef  WASM_HUGE_MEMORY
+        ReportOutOfMemory(cx);
+        return nullptr;
+#else
+        // If we fail, and have a maxSize, try to reserve the biggest chunk in
+        // the range [numBytes, maxSize) using log backoff.
+        if (!maxSize) {
             ReportOutOfMemory(cx);
             return nullptr;
         }
 
-        BufferContents contents = BufferContents::create<WASM_MAPPED>(data);
-        ArrayBufferObject* buffer = ArrayBufferObject::create(cx, numBytes, contents);
-        if (!buffer) {
-            ReleaseWasmMappedMemory(data);
+        uint32_t cur = maxSize.value() / 2;
+
+        for (; cur > numBytes; cur = cur / 2) {
+            wasmBuf = WasmArrayRawBuffer::Allocate(numBytes, Some(ROUND_UP(cur, wasm::PageSize)));
+            if (wasmBuf)
+                break;
+        }
+
+        if (!wasmBuf) {
+            ReportOutOfMemory(cx);
             return nullptr;
         }
 
-        return buffer;
-#else
-        MOZ_CRASH("shouldn't be using signal handlers for out-of-bounds");
+        // Try to grow our chunk as much as possible.
+        for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2)
+            wasmBuf->tryGrowMaxSize(ROUND_UP(d, wasm::PageSize));
 #endif
     }
 
-    auto* buffer = ArrayBufferObject::create(cx, numBytes);
-    if (!buffer)
+    void *data = wasmBuf->dataPointer();
+    BufferContents contents = BufferContents::create<WASM_MAPPED>(data);
+    ArrayBufferObject* buffer = ArrayBufferObject::create(cx, numBytes, contents);
+    if (!buffer) {
+        ReportOutOfMemory(cx);
+        WasmArrayRawBuffer::Release(data);
         return nullptr;
+    }
 
-    buffer->setIsWasmMalloced();
     return buffer;
 }
 
 /* static */ bool
-ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool signalsForOOB)
+ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer)
 {
     MOZ_ASSERT(buffer->byteLength() % wasm::PageSize == 0);
-
-    if (signalsForOOB) {
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
-        if (buffer->isWasmMapped())
-            return true;
-
-        // This can't happen except via the shell toggling signals.enabled.
-        if (buffer->isWasmMalloced()) {
-            JS_ReportError(cx, "can't access same buffer with and without signals enabled");
-            return false;
-        }
-
-        if (buffer->forInlineTypedObject()) {
-            JS_ReportError(cx, "ArrayBuffer can't be used by asm.js");
-            return false;
-        }
-
-        void* data = AllocateWasmMappedMemory(buffer->byteLength());
-        if (!data) {
-            ReportOutOfMemory(cx);
-            return false;
-        }
-
-        // Copy over the current contents of the typed array.
-        memcpy(data, buffer->dataPointer(), buffer->byteLength());
-
-        // Swap the new elements into the ArrayBufferObject. Mark the
-        // ArrayBufferObject so we don't do this again.
-        BufferContents newContents = BufferContents::create<WASM_MAPPED>(data);
-        buffer->changeContents(cx, newContents);
-        MOZ_ASSERT(data == buffer->dataPointer());
-        return true;
-#else
-        MOZ_CRASH("shouldn't be using signal handlers for out-of-bounds");
-#endif  // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
-    }
+    MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
 
     if (buffer->forInlineTypedObject()) {
         JS_ReportError(cx, "ArrayBuffer can't be used by asm.js");
         return false;
     }
 
+#ifdef WASM_HUGE_MEMORY
+    if (buffer->isWasmMapped())
+        return true;
+
+    uint32_t length = buffer->byteLength();
+    // Since asm.js doesn't grow, assume max is same as length.
+    WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(length));
+    void* data = wasmBuf->dataPointer();
+
+    if (!data) {
+        // Note - we don't need the same backoff search as in WASM, since we don't over-map to
+        // allow growth in asm.js
+        ReportOutOfMemory(cx);
+        return false;
+    }
+
+    // Copy over the current contents of the typed array.
+    memcpy(data, buffer->dataPointer(), length);
+
+    // Swap the new elements into the ArrayBufferObject. Mark the
+    // ArrayBufferObject so we don't do this again.
+    BufferContents newContents = BufferContents::create<WASM_MAPPED>(data);
+    buffer->changeContents(cx, newContents);
+    MOZ_ASSERT(data == buffer->dataPointer());
+    return true;
+#else
     if (!buffer->ownsData()) {
         BufferContents contents = AllocateArrayBufferContents(cx, buffer->byteLength());
         if (!contents)
             return false;
         memcpy(contents.data(), buffer->dataPointer(), buffer->byteLength());
         buffer->changeContents(cx, contents);
     }
 
-    buffer->setIsWasmMalloced();
+    buffer->setIsAsmJSMalloced();
+
+    // On non-x64 architectures we can't yet emulate asm.js heap access.
+    MOZ_RELEASE_ASSERT(buffer->wasmActualByteLength() == buffer->wasmMappedSize());
+    MOZ_RELEASE_ASSERT(buffer->wasmActualByteLength() == buffer->wasmBoundsCheckLimit());
+#endif
     return true;
 }
 
 ArrayBufferObject::BufferContents
 ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length)
 {
     void* data = AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
     MemProfiler::SampleNative(data, length);
@@ -556,29 +777,25 @@ ArrayBufferObject::dataPointerShared() c
 
 void
 ArrayBufferObject::releaseData(FreeOp* fop)
 {
     MOZ_ASSERT(ownsData());
 
     switch (bufferKind()) {
       case PLAIN:
-      case WASM_MALLOCED:
+      case ASMJS_MALLOCED:
         fop->free_(dataPointer());
         break;
       case MAPPED:
         MemProfiler::RemoveNative(dataPointer());
         DeallocateMappedContent(dataPointer(), byteLength());
         break;
       case WASM_MAPPED:
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
-        ReleaseWasmMappedMemory(dataPointer());
-#else
-        MOZ_CRASH("shouldn't have wasm mapped ArrayBuffer");
-#endif
+        WasmArrayRawBuffer::Release(dataPointer());
         break;
     }
 }
 
 void
 ArrayBufferObject::setDataPointer(BufferContents contents, OwnsState ownsData)
 {
     setSlot(DATA_SLOT, PrivateValue(contents.data()));
@@ -594,16 +811,82 @@ ArrayBufferObject::byteLength() const
 
 void
 ArrayBufferObject::setByteLength(uint32_t length)
 {
     MOZ_ASSERT(length <= INT32_MAX);
     setSlot(BYTE_LENGTH_SLOT, Int32Value(length));
 }
 
+size_t
+ArrayBufferObject::wasmMappedSize() const
+{
+    if (isWasmMapped()) {
+        return contents().wasmBuffer()->mappedSize();
+    } else {
+        // Can use byteLength() instead of actualByteLength since if !wasmMapped()
+        // then this is an asm.js buffer, and thus cannot grow.
+        return byteLength();
+    }
+}
+
+Maybe<uint32_t>
+ArrayBufferObject::wasmMaxSize() const
+{
+    if (isWasmMapped())
+        return contents().wasmBuffer()->maxSize();
+    else
+        return Some<uint32_t>(byteLength());
+}
+
+uint32_t
+ArrayBufferObject::wasmBoundsCheckLimit() const
+{
+    if (isWasmMapped())
+        return contents().wasmBuffer()->boundsCheckLimit();
+    else
+        return byteLength();
+}
+
+uint32_t
+ArrayBufferObject::wasmActualByteLength() const
+{
+    if (isWasmMapped())
+        return contents().wasmBuffer()->actualByteLength();
+    else
+        return byteLength();
+}
+
+uint32_t
+ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const
+{
+    if (this->is<ArrayBufferObject>())
+        return this->as<ArrayBufferObject>().wasmBoundsCheckLimit();
+
+    // TODO: When SharedArrayBuffer can be used from wasm, this should be
+    // replaced by SharedArrayBufferObject::wasmBoundsCheckLimit().
+    return wasmMappedSize();
+}
+
+bool
+ArrayBufferObject::growForWasm(uint32_t delta)
+{
+    MOZ_ASSERT(isWasmMapped());
+
+    if (delta == 0)
+        return true;
+
+    // Should be guaranteed by Instance::growMemory
+    CheckedInt<uint32_t> curSize = wasmActualByteLength();
+    CheckedInt<uint32_t> newSize = curSize + CheckedInt<uint32_t>(delta) * wasm ::PageSize;
+    MOZ_RELEASE_ASSERT(newSize.isValid());
+
+    return contents().wasmBuffer()->growLength(delta * wasm::PageSize);
+}
+
 uint32_t
 ArrayBufferObject::flags() const
 {
     return uint32_t(getSlot(FLAGS_SLOT).toInt32());
 }
 
 void
 ArrayBufferObject::setFlags(uint32_t flags)
@@ -628,20 +911,18 @@ ArrayBufferObject::create(JSContext* cx,
     size_t nslots = reservedSlots;
     bool allocated = false;
     if (contents) {
         if (ownsState == OwnsData) {
             // The ABO is taking ownership, so account the bytes against the zone.
             size_t nAllocated = nbytes;
             if (contents.kind() == MAPPED)
                 nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize());
-#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
             else if (contents.kind() == WASM_MAPPED)
-                nAllocated = wasm::MappedSize;
-#endif
+                nAllocated = contents.wasmBuffer()->allocatedBytes();
             cx->zone()->updateMallocCounter(nAllocated);
         }
     } else {
         MOZ_ASSERT(ownsState == OwnsData);
         size_t usableSlots = NativeObject::MAX_FIXED_SLOTS - reservedSlots;
         if (nbytes <= usableSlots * sizeof(Value)) {
             int newSlots = (nbytes - 1) / sizeof(Value) + 1;
             MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value)));
@@ -767,17 +1048,17 @@ ArrayBufferObject::addSizeOfExcludingThi
 
     switch (buffer.bufferKind()) {
       case PLAIN:
         info->objectsMallocHeapElementsNormal += mallocSizeOf(buffer.dataPointer());
         break;
       case MAPPED:
         info->objectsNonHeapElementsNormal += buffer.byteLength();
         break;
-      case WASM_MALLOCED:
+      case ASMJS_MALLOCED:
         info->objectsMallocHeapElementsAsmJS += mallocSizeOf(buffer.dataPointer());
         break;
       case WASM_MAPPED:
         info->objectsNonHeapElementsAsmJS += buffer.byteLength();
         break;
     }
 }
 
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -2,28 +2,31 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef vm_ArrayBufferObject_h
 #define vm_ArrayBufferObject_h
 
+#include "mozilla/Maybe.h"
+
 #include "jsobj.h"
 
 #include "builtin/TypedObjectConstants.h"
 #include "js/GCHashTable.h"
 #include "vm/Runtime.h"
 #include "vm/SharedMem.h"
 
 typedef struct JSProperty JSProperty;
 
 namespace js {
 
 class ArrayBufferViewObject;
+class WasmArrayRawBuffer;
 
 // The inheritance hierarchy for the various classes relating to typed arrays
 // is as follows.
 //
 // - NativeObject
 //   - ArrayBufferObjectMaybeShared
 //     - ArrayBufferObject
 //     - SharedArrayBufferObject
@@ -66,25 +69,37 @@ class ArrayBufferViewObject;
 // that (3) may only be pointed to by the typed array the data is inline with.
 //
 // During a minor GC, (3) and (4) may move. During a compacting GC, (2), (3),
 // and (4) may move.
 
 class ArrayBufferObjectMaybeShared;
 
 uint32_t AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf);
+uint32_t WasmArrayBufferActualByteLength(const ArrayBufferObjectMaybeShared* buf);
+size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf);
+bool WasmArrayBufferGrowForWasm(ArrayBufferObjectMaybeShared* buf, uint32_t delta);
 ArrayBufferObjectMaybeShared& AsAnyArrayBuffer(HandleValue val);
 
 class ArrayBufferObjectMaybeShared : public NativeObject
 {
   public:
     uint32_t byteLength() {
         return AnyArrayBufferByteLength(this);
     }
 
+    size_t wasmMappedSize() const {
+        return WasmArrayBufferMappedSize(this);
+    }
+
+    uint32_t wasmBoundsCheckLimit() const;
+    uint32_t wasmActualByteLength() const {
+        return WasmArrayBufferActualByteLength(this);
+    }
+
     inline bool isDetached() const;
 
     inline SharedMem<uint8_t*> dataPointerEither();
 };
 
 typedef Rooted<ArrayBufferObjectMaybeShared*> RootedArrayBufferObjectMaybeShared;
 typedef Handle<ArrayBufferObjectMaybeShared*> HandleArrayBufferObjectMaybeShared;
 typedef MutableHandle<ArrayBufferObjectMaybeShared*> MutableHandleArrayBufferObjectMaybeShared;
@@ -126,17 +141,17 @@ class ArrayBufferObject : public ArrayBu
 
     enum OwnsState {
         DoesntOwnData = 0,
         OwnsData = 1,
     };
 
     enum BufferKind {
         PLAIN               = 0, // malloced or inline data
-        WASM_MALLOCED       = 1,
+        ASMJS_MALLOCED      = 1,
         WASM_MAPPED         = 2,
         MAPPED              = 3,
 
         KIND_MASK           = 0x3
     };
 
   protected:
 
@@ -194,16 +209,17 @@ class ArrayBufferObject : public ArrayBu
         {
             return BufferContents(static_cast<uint8_t*>(data), PLAIN);
         }
 
         uint8_t* data() const { return data_; }
         BufferKind kind() const { return kind_; }
 
         explicit operator bool() const { return data_ != nullptr; }
+        WasmArrayRawBuffer* wasmBuffer() const;
     };
 
     static const Class class_;
 
     static const Class protoClass;
     static const JSFunctionSpec jsfuncs[];
     static const JSFunctionSpec jsstaticfuncs[];
     static const JSPropertySpec jsstaticprops[];
@@ -250,17 +266,17 @@ class ArrayBufferObject : public ArrayBu
     bool hasStealableContents() const {
         // Inline elements strictly adhere to the corresponding buffer.
         return ownsData();
     }
 
     // Return whether the buffer is allocated by js_malloc and should be freed
     // with js_free.
     bool hasMallocedContents() const {
-        return (ownsData() && isPlain()) || isWasmMalloced();
+        return (ownsData() && isPlain()) || isAsmJSMalloced();
     }
 
     static void addSizeOfExcludingThis(JSObject* obj, mozilla::MallocSizeOf mallocSizeOf,
                                        JS::ClassInfo* info);
 
     // ArrayBufferObjects (strongly) store the first view added to them, while
     // later views are (weakly) stored in the compartment's InnerViewTable
     // below. Buffers usually only have one view, so this slot optimizes for
@@ -284,16 +300,21 @@ class ArrayBufferObject : public ArrayBu
     void setFirstView(ArrayBufferViewObject* view);
 
     uint8_t* inlineDataPointer() const;
 
   public:
     uint8_t* dataPointer() const;
     SharedMem<uint8_t*> dataPointerShared() const;
     uint32_t byteLength() const;
+    uint32_t wasmActualByteLength() const;
+    size_t wasmMappedSize() const;
+    uint32_t wasmBoundsCheckLimit() const;
+    mozilla::Maybe<uint32_t> wasmMaxSize() const;
+    MOZ_MUST_USE bool growForWasm(uint32_t delta);
     BufferContents contents() const {
         return BufferContents(dataPointer(), bufferKind());
     }
     bool hasInlineData() const {
         return dataPointer() == inlineDataPointer();
     }
 
     void releaseData(FreeOp* fop);
@@ -304,23 +325,24 @@ class ArrayBufferObject : public ArrayBu
      */
     bool hasData() const {
         return getClass() == &class_;
     }
 
     BufferKind bufferKind() const { return BufferKind(flags() & BUFFER_KIND_MASK); }
     bool isPlain() const { return bufferKind() == PLAIN; }
     bool isWasmMapped() const { return bufferKind() == WASM_MAPPED; }
-    bool isWasmMalloced() const { return bufferKind() == WASM_MALLOCED; }
-    bool isWasm() const { return isWasmMapped() || isWasmMalloced(); }
+    bool isAsmJSMalloced() const { return bufferKind() == ASMJS_MALLOCED; }
+    bool isWasm() const { return isWasmMapped() || isAsmJSMalloced(); }
     bool isMapped() const { return bufferKind() == MAPPED; }
     bool isDetached() const { return flags() & DETACHED; }
 
-    static ArrayBufferObject* createForWasm(JSContext* cx, uint32_t numBytes, bool signalsForOOB);
-    static bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool signalsForOOB);
+    static ArrayBufferObject* createForWasm(JSContext* cx, uint32_t numBytes,
+                                            mozilla::Maybe<uint32_t> maxSize);
+    static bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer);
 
     static void finalize(FreeOp* fop, JSObject* obj);
 
     static BufferContents createMappedContents(int fd, size_t offset, size_t length);
 
     static size_t offsetOfFlagsSlot() {
         return getFixedSlotOffset(FLAGS_SLOT);
     }
@@ -346,17 +368,17 @@ class ArrayBufferObject : public ArrayBu
 
     bool ownsData() const { return flags() & OWNS_DATA; }
     void setOwnsData(OwnsState owns) {
         setFlags(owns ? (flags() | OWNS_DATA) : (flags() & ~OWNS_DATA));
     }
 
     bool hasTypedObjectViews() const { return flags() & TYPED_OBJECT_VIEWS; }
 
-    void setIsWasmMalloced() { setFlags((flags() & ~KIND_MASK) | WASM_MALLOCED); }
+    void setIsAsmJSMalloced() { setFlags((flags() & ~KIND_MASK) | ASMJS_MALLOCED); }
     void setIsDetached() { setFlags(flags() | DETACHED); }
 
     void initialize(size_t byteLength, BufferContents contents, OwnsState ownsState) {
         setByteLength(byteLength);
         setFlags(0);
         setFirstView(nullptr);
         setDataPointer(contents, ownsState);
     }
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -325,17 +325,17 @@ JSRuntime::init(uint32_t maxbytes, uint3
     gcInitialized = true;
 
     if (!InitRuntimeNumberState(this))
         return false;
 
     JS::ResetTimeZone();
 
 #ifdef JS_SIMULATOR
-    simulator_ = js::jit::Simulator::Create();
+    simulator_ = js::jit::Simulator::Create(contextFromMainThread());
     if (!simulator_)
         return false;
 #endif
 
     jitSupportsFloatingPoint = js::jit::JitSupportsFloatingPoint();
     jitSupportsUnalignedAccesses = js::jit::JitSupportsUnalignedAccesses();
     jitSupportsSimd = js::jit::JitSupportsSimd();
 
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -915,17 +915,17 @@ struct JSRuntime : public JS::shadow::Ru
     mozilla::LinkedList<js::Debugger> debuggerList;
 
     /*
      * Head of circular list of all enabled Debuggers that have
      * onNewGlobalObject handler methods established.
      */
     JSCList             onNewGlobalObjectWatchers;
 
-#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
+#if defined(XP_DARWIN)
     js::wasm::MachExceptionHandler wasmMachExceptionHandler;
 #endif
 
   private:
     js::FreeOp*         defaultFreeOp_;
 
   public:
     js::FreeOp* defaultFreeOp() {
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -68,17 +68,17 @@ MarkValidRegion(void* addr, size_t len)
     return true;
 #else
     if (mprotect(addr, len, PROT_READ | PROT_WRITE))
         return false;
     return true;
 #endif
 }
 
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
+#if defined(WASM_HUGE_MEMORY)
 // Since this SharedArrayBuffer will likely be used for asm.js code, prepare it
 // for asm.js by mapping the 4gb protected zone described in WasmTypes.h.
 // Since we want to put the SharedArrayBuffer header immediately before the
 // heap but keep the heap page-aligned, allocate an extra page before the heap.
 static uint64_t
 SharedArrayMappedSize()
 {
     MOZ_RELEASE_ASSERT(sizeof(SharedArrayRawBuffer) < gc::SystemPageSize());
@@ -108,23 +108,23 @@ SharedArrayRawBuffer::New(JSContext* cx,
     // The value (uint32_t)-1 is used as a signal in various places,
     // so guard against it on principle.
     MOZ_ASSERT(length != (uint32_t)-1);
 
     // Add a page for the header and round to a page boundary.
     uint32_t allocSize = SharedArrayAllocSize(length);
     if (allocSize <= length)
         return nullptr;
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
     void* p = nullptr;
     if (!IsValidAsmJSHeapLength(length)) {
         p = MapMemory(allocSize, true);
         if (!p)
             return nullptr;
     } else {
+#ifdef WASM_HUGE_MEMORY
         // Test >= to guard against the case where multiple extant runtimes
         // race to allocate.
         if (++numLive >= maxLive) {
             JSRuntime* rt = cx->runtime();
             if (rt->largeAllocationFailureCallback)
                 rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData);
             if (numLive >= maxLive) {
                 numLive--;
@@ -143,22 +143,22 @@ SharedArrayRawBuffer::New(JSContext* cx,
             numLive--;
             return nullptr;
         }
 #   if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
         // Tell Valgrind/Memcheck to not report accesses in the inaccessible region.
         VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize,
                                                        SharedArrayMappedSize() - allocSize);
 #   endif
+#else
+        p = MapMemory(allocSize, true);
+        if (!p)
+            return nullptr;
+#endif
     }
-#else
-    void* p = MapMemory(allocSize, true);
-    if (!p)
-        return nullptr;
-#endif
     uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
     uint8_t* base = buffer - sizeof(SharedArrayRawBuffer);
     SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(buffer, length);
     MOZ_ASSERT(rawbuf->length == length); // Deallocation needs this
     return rawbuf;
 }
 
 void
@@ -177,33 +177,33 @@ SharedArrayRawBuffer::dropReference()
     // If this was the final reference, release the buffer.
     if (refcount == 0) {
         SharedMem<uint8_t*> p = this->dataPointerShared() - gc::SystemPageSize();
 
         MOZ_ASSERT(p.asValue() % gc::SystemPageSize() == 0);
 
         uint8_t* address = p.unwrap(/*safe - only reference*/);
         uint32_t allocSize = SharedArrayAllocSize(this->length);
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
         if (!IsValidAsmJSHeapLength(this->length)) {
             UnmapMemory(address, allocSize);
         } else {
+#if defined(WASM_HUGE_MEMORY)
             numLive--;
             UnmapMemory(address, SharedArrayMappedSize());
 #       if defined(MOZ_VALGRIND) \
            && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
             // Tell Valgrind/Memcheck to recommence reporting accesses in the
             // previously-inaccessible region.
             VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address,
                                                           SharedArrayMappedSize());
 #       endif
+#else
+            UnmapMemory(address, allocSize);
+#endif
         }
-#else
-        UnmapMemory(address, allocSize);
-#endif
     }
 }
 
 const JSFunctionSpec SharedArrayBufferObject::jsfuncs[] = {
     /* Nothing yet */
     JS_FS_END
 };