Bug 1495149 - Baldr: don't use signal handlers for asm.js bounds checks (r=lth,bbouvier)
authorLuke Wagner <luke@mozilla.com>
Wed, 03 Oct 2018 15:43:14 -0500
changeset 495233 d1094983384c30f5aa7860e7d5ff49be625fb47a
parent 495232 52ad4fb549815521961b743e6ad797a49b3f049d
child 495234 8b6344a8c25c48a2180b6a5ba7050e2773f22f89
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth, bbouvier
bugs1495149
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1495149 - Baldr: don't use signal handlers for asm.js bounds checks (r=lth,bbouvier)
js/src/jit/Disassembler.cpp
js/src/jit/Disassembler.h
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/EffectiveAddressAnalysis.h
js/src/jit/Lowering.cpp
js/src/jit/MIR.h
js/src/jit/MacroAssembler.h
js/src/jit/arm64/Disassembler-arm64.cpp
js/src/jit/shared/CodeGenerator-shared-inl.h
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/shared/LIR-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/Disassembler-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/moz.build
js/src/vm/ArrayBufferObject-inl.h
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayBufferObject.h
js/src/vm/Stack.cpp
js/src/wasm/AsmJS.cpp
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmSignalHandlers.cpp
js/src/wasm/WasmTypes.h
deleted file mode 100644
--- a/js/src/jit/Disassembler.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/Disassembler.h"
-
-using namespace js;
-using namespace js::jit;
-using namespace js::jit::Disassembler;
-
-#ifdef DEBUG
-bool
-Disassembler::ComplexAddress::operator==(const ComplexAddress& other) const
-{
-    return base_ == other.base_ &&
-           index_ == other.index_ &&
-           scale_ == other.scale_ &&
-           disp_ == other.disp_ &&
-           isPCRelative_ == other.isPCRelative_;
-}
-
-bool
-Disassembler::ComplexAddress::operator!=(const ComplexAddress& other) const
-{
-    return !operator==(other);
-}
-
-bool
-Disassembler::OtherOperand::operator==(const OtherOperand& other) const
-{
-    if (kind_ != other.kind_) {
-        return false;
-    }
-    switch (kind_) {
-      case Imm: return u_.imm == other.u_.imm;
-      case GPR: return u_.gpr == other.u_.gpr;
-      case FPR: return u_.fpr == other.u_.fpr;
-    }
-    MOZ_CRASH("Unexpected OtherOperand kind");
-}
-
-bool
-Disassembler::OtherOperand::operator!=(const OtherOperand& other) const
-{
-    return !operator==(other);
-}
-
-bool
-Disassembler::HeapAccess::operator==(const HeapAccess& other) const
-{
-    return kind_ == other.kind_ &&
-           size_ == other.size_ &&
-           address_ == other.address_ &&
-           otherOperand_ == other.otherOperand_;
-}
-
-bool
-Disassembler::HeapAccess::operator!=(const HeapAccess& other) const
-{
-    return !operator==(other);
-}
-
-#endif
deleted file mode 100644
--- a/js/src/jit/Disassembler.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_Disassembler_h
-#define jit_Disassembler_h
-
-#include "jit/MacroAssembler.h"
-#include "jit/Registers.h"
-
-namespace js {
-namespace jit {
-
-namespace Disassembler {
-
-class ComplexAddress {
-    int32_t disp_;
-    Register::Encoding base_ : 8;
-    Register::Encoding index_ : 8;
-    int8_t scale_; // log2 encoding
-    bool isPCRelative_;
-
-  public:
-    ComplexAddress()
-      : disp_(0),
-        base_(Registers::Invalid),
-        index_(Registers::Invalid),
-        scale_(0),
-        isPCRelative_(false)
-    {
-        MOZ_ASSERT(*this == *this);
-    }
-
-    ComplexAddress(int32_t disp, Register::Encoding base)
-      : disp_(disp),
-        base_(base),
-        index_(Registers::Invalid),
-        scale_(0),
-        isPCRelative_(false)
-    {
-        MOZ_ASSERT(*this == *this);
-        MOZ_ASSERT(base != Registers::Invalid);
-        MOZ_ASSERT(base_ == base);
-    }
-
-    ComplexAddress(int32_t disp, Register::Encoding base, Register::Encoding index, int scale)
-      : disp_(disp),
-        base_(base),
-        index_(index),
-        scale_(scale),
-        isPCRelative_(false)
-    {
-        MOZ_ASSERT(scale >= 0 && scale < 4);
-        MOZ_ASSERT_IF(index == Registers::Invalid, scale == 0);
-        MOZ_ASSERT(*this == *this);
-        MOZ_ASSERT(base_ == base);
-        MOZ_ASSERT(index_ == index);
-    }
-
-    explicit ComplexAddress(const void* addr)
-      : disp_(static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr))),
-        base_(Registers::Invalid),
-        index_(Registers::Invalid),
-        scale_(0),
-        isPCRelative_(false)
-    {
-        MOZ_ASSERT(*this == *this);
-        MOZ_ASSERT(reinterpret_cast<const void*>(uintptr_t(disp_)) == addr);
-    }
-
-    explicit ComplexAddress(const Operand& op) {
-#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
-        switch (op.kind()) {
-          case Operand::MEM_REG_DISP:
-            *this = ComplexAddress(op.disp(), op.base());
-            return;
-          case Operand::MEM_SCALE:
-            *this = ComplexAddress(op.disp(), op.base(), op.index(), op.scale());
-            return;
-          case Operand::MEM_ADDRESS32:
-            *this = ComplexAddress(op.address());
-            return;
-          default:
-            break;
-        }
-#endif
-        MOZ_CRASH("Unexpected Operand kind");
-    }
-
-    bool isPCRelative() const {
-        return isPCRelative_;
-    }
-
-    int32_t disp() const {
-        return disp_;
-    }
-
-    bool hasBase() const {
-        return base_ != Registers::Invalid;
-    }
-
-    Register::Encoding base() const {
-        MOZ_ASSERT(hasBase());
-        return base_;
-    }
-
-    bool hasIndex() const {
-        return index_ != Registers::Invalid;
-    }
-
-    Register::Encoding index() const {
-        MOZ_ASSERT(hasIndex());
-        return index_;
-    }
-
-    uint32_t scale() const {
-        return scale_;
-    }
-
-#ifdef DEBUG
-    bool operator==(const ComplexAddress& other) const;
-    bool operator!=(const ComplexAddress& other) const;
-#endif
-};
-
-// An operand other than a memory operand -- a register or an immediate.
-class OtherOperand {
-  public:
-    enum Kind {
-        Imm,
-        GPR,
-        FPR,
-    };
-
-  private:
-    Kind kind_;
-    union {
-        int32_t imm;
-        Register::Encoding gpr;
-        FloatRegister::Encoding fpr;
-    } u_;
-
-  public:
-    OtherOperand()
-      : kind_(Imm)
-    {
-        u_.imm = 0;
-        MOZ_ASSERT(*this == *this);
-    }
-
-    explicit OtherOperand(int32_t imm)
-      : kind_(Imm)
-    {
-        u_.imm = imm;
-        MOZ_ASSERT(*this == *this);
-    }
-
-    explicit OtherOperand(Register::Encoding gpr)
-      : kind_(GPR)
-    {
-        u_.gpr = gpr;
-        MOZ_ASSERT(*this == *this);
-    }
-
-    explicit OtherOperand(FloatRegister::Encoding fpr)
-      : kind_(FPR)
-    {
-        u_.fpr = fpr;
-        MOZ_ASSERT(*this == *this);
-    }
-
-    Kind kind() const {
-        return kind_;
-    }
-
-    int32_t imm() const {
-        MOZ_ASSERT(kind_ == Imm);
-        return u_.imm;
-    }
-
-    Register::Encoding gpr() const {
-        MOZ_ASSERT(kind_ == GPR);
-        return u_.gpr;
-    }
-
-    FloatRegister::Encoding fpr() const {
-        MOZ_ASSERT(kind_ == FPR);
-        return u_.fpr;
-    }
-
-#ifdef DEBUG
-    bool operator==(const OtherOperand& other) const;
-    bool operator!=(const OtherOperand& other) const;
-#endif
-};
-
-class HeapAccess {
-  public:
-    enum Kind {
-        Unknown,
-        Load,       // any bits not covered by the load are zeroed
-        LoadSext32, // like Load, but sign-extend to 32 bits
-        LoadSext64, // like Load, but sign-extend to 64 bits
-        Store
-    };
-
-  private:
-    Kind kind_;
-    size_t size_; // The number of bytes of memory accessed
-    ComplexAddress address_;
-    OtherOperand otherOperand_;
-
-  public:
-    HeapAccess()
-      : kind_(Unknown),
-        size_(0)
-    {
-        MOZ_ASSERT(*this == *this);
-    }
-
-    HeapAccess(Kind kind, size_t size, const ComplexAddress& address, const OtherOperand& otherOperand)
-      : kind_(kind),
-        size_(size),
-        address_(address),
-        otherOperand_(otherOperand)
-    {
-        MOZ_ASSERT(kind != Unknown);
-        MOZ_ASSERT_IF(kind == LoadSext32, otherOperand.kind() != OtherOperand::FPR);
-        MOZ_ASSERT_IF(kind == Load || kind == LoadSext32, otherOperand.kind() != OtherOperand::Imm);
-        MOZ_ASSERT(*this == *this);
-    }
-
-    Kind kind() const {
-        return kind_;
-    }
-
-    size_t size() const {
-        MOZ_ASSERT(kind_ != Unknown);
-        return size_;
-    }
-
-    const ComplexAddress& address() const {
-        return address_;
-    }
-
-    const OtherOperand& otherOperand() const {
-        return otherOperand_;
-    }
-
-#ifdef DEBUG
-    bool operator==(const HeapAccess& other) const;
-    bool operator!=(const HeapAccess& other) const;
-#endif
-};
-
-MOZ_COLD uint8_t* DisassembleHeapAccess(uint8_t* ptr, HeapAccess* access);
-
-#ifdef DEBUG
-void DumpHeapAccess(const HeapAccess& access);
-
-inline void
-VerifyHeapAccess(uint8_t* begin, uint8_t* end, const HeapAccess& expected)
-{
-    HeapAccess disassembled;
-    uint8_t* e = DisassembleHeapAccess(begin, &disassembled);
-    MOZ_ASSERT(e == end);
-    MOZ_ASSERT(disassembled == expected);
-}
-#endif
-
-} // namespace Disassembler
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_Disassembler_h */
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -190,83 +190,31 @@ AnalyzeLoadUnboxedScalar(MLoadUnboxedSca
     if (!add->hasLiveDefUses() && DeadIfUnused(add) && add->canRecoverOnBailout()) {
         JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u",
                 add->opName(), add->id());
         add->setRecoveredOnBailoutUnchecked();
     }
 }
 
 template<typename AsmJSMemoryAccess>
-bool
-EffectiveAddressAnalysis::tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o)
-{
-#ifdef WASM_HUGE_MEMORY
-    // Compute the new offset. Check for overflow.
-    uint32_t oldOffset = ins->offset();
-    uint32_t newOffset = oldOffset + o;
-    if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset)) {
-        return false;
-    }
-
-    // The offset must ultimately be written into the offset immediate of a load
-    // or store instruction so don't allow folding of the offset is bigger.
-    if (newOffset >= wasm::OffsetGuardLimit) {
-        return false;
-    }
-
-    // Everything checks out. This is the new offset.
-    ins->setOffset(newOffset);
-    return true;
-#else
-    return false;
-#endif
-}
-
-template<typename AsmJSMemoryAccess>
 void
 EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins)
 {
     MDefinition* base = ins->base();
 
     if (base->isConstant()) {
-        // Look for heap[i] where i is a constant offset, and fold the offset.
-        // By doing the folding now, we simplify the task of codegen; the offset
-        // is always the address mode immediate. This also allows it to avoid
-        // a situation where the sum of a constant pointer value and a non-zero
-        // offset doesn't actually fit into the address mode immediate.
-        int32_t imm = base->toConstant()->toInt32();
-        if (imm != 0 && tryAddDisplacement(ins, imm)) {
-            MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0));
-            ins->block()->insertBefore(ins, zero);
-            ins->replaceBase(zero);
-        }
-
         // If the index is within the minimum heap length, we can optimize
         // away the bounds check.
+        int32_t imm = base->toConstant()->toInt32();
         if (imm >= 0) {
             int32_t end = (uint32_t)imm + ins->byteSize();
             if (end >= imm && (uint32_t)end <= mir_->minWasmHeapLength()) {
                  ins->removeBoundsCheck();
             }
         }
-    } else if (base->isAdd()) {
-        // Look for heap[a+i] where i is a constant offset, and fold the offset.
-        // Alignment masks have already been moved out of the way by the
-        // Alignment Mask Analysis pass.
-        MDefinition* op0 = base->toAdd()->getOperand(0);
-        MDefinition* op1 = base->toAdd()->getOperand(1);
-        if (op0->isConstant()) {
-            mozilla::Swap(op0, op1);
-        }
-        if (op1->isConstant()) {
-            int32_t imm = op1->toConstant()->toInt32();
-            if (tryAddDisplacement(ins, imm)) {
-                ins->replaceBase(op0);
-            }
-        }
     }
 }
 
 // This analysis converts patterns of the form:
 //   truncate(x + (y << {0,1,2,3}))
 //   truncate(x + (y << {0,1,2,3}) + imm32)
 // into a single lea instruction, and patterns of the form:
 //   asmload(x + imm32)
--- a/js/src/jit/EffectiveAddressAnalysis.h
+++ b/js/src/jit/EffectiveAddressAnalysis.h
@@ -15,19 +15,16 @@ namespace jit {
 class MIRGraph;
 
 class EffectiveAddressAnalysis
 {
     MIRGenerator* mir_;
     MIRGraph& graph_;
 
     template <typename AsmJSMemoryAccess>
-    MOZ_MUST_USE bool tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o);
-
-    template <typename AsmJSMemoryAccess>
     void analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins);
 
   public:
     EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph)
       : mir_(mir), graph_(graph)
     {}
 
     MOZ_MUST_USE bool analyze();
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -4631,21 +4631,16 @@ LIRGenerator::visitWasmAddOffset(MWasmAd
     MOZ_ASSERT(ins->type() == MIRType::Int32);
     MOZ_ASSERT(ins->offset());
     define(new(alloc()) LWasmAddOffset(useRegisterAtStart(ins->base())), ins);
 }
 
 void
 LIRGenerator::visitWasmLoadTls(MWasmLoadTls* ins)
 {
-#ifdef WASM_HUGE_MEMORY
-    // This will disappear once we remove HeapReg and replace it with a load
-    // from Tls, but in the mean time it keeps us sane.
-    MOZ_CRASH("No WasmLoadTls here at the moment");
-#endif
     auto* lir = new(alloc()) LWasmLoadTls(useRegisterAtStart(ins->tlsPtr()));
     define(lir, ins);
 }
 
 void
 LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
 {
 #ifdef WASM_HUGE_MEMORY
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13513,124 +13513,112 @@ class MAsmJSMemoryAccess
 };
 
 class MAsmJSLoadHeap
   : public MVariadicInstruction, // 1 plus optional memoryBase and boundsCheckLimit
     public MAsmJSMemoryAccess,
     public NoTypePolicy::Data
 {
     uint32_t memoryBaseIndex_;
-    uint32_t boundsCheckIndex_;
-
-    explicit MAsmJSLoadHeap(uint32_t memoryBaseIndex, uint32_t boundsCheckIndex,
-                            Scalar::Type accessType)
+
+    explicit MAsmJSLoadHeap(uint32_t memoryBaseIndex, Scalar::Type accessType)
       : MVariadicInstruction(classOpcode),
         MAsmJSMemoryAccess(accessType),
-        memoryBaseIndex_(memoryBaseIndex),
-        boundsCheckIndex_(boundsCheckIndex)
+        memoryBaseIndex_(memoryBaseIndex)
     {
         setResultType(ScalarTypeToMIRType(accessType));
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap)
 
     static MAsmJSLoadHeap* New(TempAllocator& alloc,
                                MDefinition* memoryBase,
                                MDefinition* base,
                                MDefinition* boundsCheckLimit,
                                Scalar::Type accessType)
     {
-        uint32_t nextIndex = 1;
+        uint32_t nextIndex = 2;
         uint32_t memoryBaseIndex = memoryBase ? nextIndex++ : UINT32_MAX;
-        uint32_t boundsCheckIndex = boundsCheckLimit ? nextIndex++ : UINT32_MAX;
-
-        MAsmJSLoadHeap* load = new(alloc) MAsmJSLoadHeap(memoryBaseIndex, boundsCheckIndex,
-                                                         accessType);
+
+        MAsmJSLoadHeap* load = new(alloc) MAsmJSLoadHeap(memoryBaseIndex, accessType);
         if (!load->init(alloc, nextIndex)) {
             return nullptr;
         }
 
         load->initOperand(0, base);
+        load->initOperand(1, boundsCheckLimit);
         if (memoryBase) {
             load->initOperand(memoryBaseIndex, memoryBase);
         }
-        if (boundsCheckLimit) {
-            load->initOperand(boundsCheckIndex, boundsCheckLimit);
-        }
 
         return load;
     }
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
-    MDefinition* memoryBase() const { return getOperand(memoryBaseIndex_); }
-    MDefinition* boundsCheckLimit() const { return getOperand(boundsCheckIndex_); }
+    bool hasMemoryBase() const { return memoryBaseIndex_ != UINT32_MAX; }
+    MDefinition* memoryBase() const { MOZ_ASSERT(hasMemoryBase()); return getOperand(memoryBaseIndex_); }
+    MDefinition* boundsCheckLimit() const { return getOperand(1); }
 
     bool congruentTo(const MDefinition* ins) const override;
     AliasSet getAliasSet() const override {
         return AliasSet::Load(AliasSet::WasmHeap);
     }
     AliasType mightAlias(const MDefinition* def) const override;
 };
 
 class MAsmJSStoreHeap
   : public MVariadicInstruction, // 2 plus optional memoryBase and boundsCheckLimit
     public MAsmJSMemoryAccess,
     public NoTypePolicy::Data
 {
     uint32_t memoryBaseIndex_;
-    uint32_t boundsCheckIndex_;
-
-    explicit MAsmJSStoreHeap(uint32_t memoryBaseIndex, uint32_t boundsCheckIndex,
-                             Scalar::Type accessType)
+
+    explicit MAsmJSStoreHeap(uint32_t memoryBaseIndex, Scalar::Type accessType)
       : MVariadicInstruction(classOpcode),
         MAsmJSMemoryAccess(accessType),
-        memoryBaseIndex_(memoryBaseIndex),
-        boundsCheckIndex_(boundsCheckIndex)
+        memoryBaseIndex_(memoryBaseIndex)
     {
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap)
 
     static MAsmJSStoreHeap* New(TempAllocator& alloc,
                                 MDefinition* memoryBase,
                                 MDefinition* base,
                                 MDefinition* boundsCheckLimit,
                                 Scalar::Type accessType,
                                 MDefinition* v)
     {
-        uint32_t nextIndex = 2;
+        uint32_t nextIndex = 3;
         uint32_t memoryBaseIndex = memoryBase ? nextIndex++ : UINT32_MAX;
-        uint32_t boundsCheckIndex = boundsCheckLimit ? nextIndex++ : UINT32_MAX;
-
-        MAsmJSStoreHeap* store = new(alloc) MAsmJSStoreHeap(memoryBaseIndex, boundsCheckIndex,
-                                                            accessType);
+
+        MAsmJSStoreHeap* store = new(alloc) MAsmJSStoreHeap(memoryBaseIndex, accessType);
         if (!store->init(alloc, nextIndex)) {
             return nullptr;
         }
 
         store->initOperand(0, base);
         store->initOperand(1, v);
+        store->initOperand(2, boundsCheckLimit);
         if (memoryBase) {
             store->initOperand(memoryBaseIndex, memoryBase);
         }
-        if (boundsCheckLimit) {
-            store->initOperand(boundsCheckIndex, boundsCheckLimit);
-        }
 
         return store;
     }
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
     MDefinition* value() const { return getOperand(1); }
-    MDefinition* memoryBase() const { return getOperand(memoryBaseIndex_); }
-    MDefinition* boundsCheckLimit() const { return getOperand(boundsCheckIndex_); }
+    bool hasMemoryBase() const { return memoryBaseIndex_ != UINT32_MAX; }
+    MDefinition* memoryBase() const { MOZ_ASSERT(hasMemoryBase()); return getOperand(memoryBaseIndex_); }
+    MDefinition* boundsCheckLimit() const { return getOperand(2); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::WasmHeap);
     }
 };
 
 class MWasmCompareExchangeHeap
   : public MVariadicInstruction,
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1492,24 +1492,23 @@ class MacroAssembler : public MacroAssem
 
     CodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
 
     void wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset);
     void wasmInterruptCheck(Register tls, wasm::BytecodeOffset bytecodeOffset);
     void wasmReserveStackChecked(uint32_t amount, wasm::BytecodeOffset trapOffset);
 
     // Emit a bounds check against the wasm heap limit, jumping to 'label' if
-    // 'cond' holds. Required when WASM_HUGE_MEMORY is not defined. If
-    // JitOptions.spectreMaskIndex is true, in speculative executions 'index' is
-    // saturated in-place to 'boundsCheckLimit'.
+    // 'cond' holds. If JitOptions.spectreMaskIndex is true, in speculative
+    // executions 'index' is saturated in-place to 'boundsCheckLimit'.
     void wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, Label* label)
-        DEFINED_ON(arm, arm64, mips32, mips64, x86);
+        DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
 
     void wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, Label* label)
-        DEFINED_ON(arm, arm64, mips32, mips64, x86);
+        DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
 
     // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
     void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
     void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
     void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
     void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
 
     // For all the ARM and ARM64 wasmLoad and wasmStore functions, `ptr` MUST
deleted file mode 100644
--- a/js/src/jit/arm64/Disassembler-arm64.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * Copyright 2018 Mozilla Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jit/Disassembler.h"
-
-MOZ_COLD uint8_t*
-js::jit::Disassembler::DisassembleHeapAccess(uint8_t*, js::jit::Disassembler::HeapAccess*)
-{
-    MOZ_CRASH("NYI - asm.js not supported yet on this platform");
-}
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -3,17 +3,16 @@
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_shared_CodeGenerator_shared_inl_h
 #define jit_shared_CodeGenerator_shared_inl_h
 
 #include "jit/shared/CodeGenerator-shared.h"
-#include "jit/Disassembler.h"
 
 #include "jit/MacroAssembler-inl.h"
 
 namespace js {
 namespace jit {
 
 static inline bool
 IsConstant(const LInt64Allocation& a)
@@ -342,90 +341,16 @@ CodeGeneratorShared::restoreLiveVolatile
 {
     MOZ_ASSERT(!ins->isCall());
     LSafepoint* safepoint = ins->safepoint();
     LiveRegisterSet regs;
     regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile());
     masm.PopRegsInMask(regs);
 }
 
-void
-CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
-                                                 Scalar::Type type, Operand mem, LAllocation alloc)
-{
-#ifdef DEBUG
-    using namespace Disassembler;
-
-    Disassembler::HeapAccess::Kind kind = isLoad ? HeapAccess::Load : HeapAccess::Store;
-    switch (type) {
-      case Scalar::Int8:
-      case Scalar::Int16:
-        if (kind == HeapAccess::Load) {
-            kind = HeapAccess::LoadSext32;
-        }
-        break;
-      default:
-        break;
-    }
-
-    OtherOperand op;
-    switch (type) {
-      case Scalar::Int8:
-      case Scalar::Uint8:
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-        if (!alloc.isConstant()) {
-            op = OtherOperand(ToRegister(alloc).encoding());
-        } else {
-            // x86 doesn't allow encoding an imm64 to memory move; the value
-            // is wrapped anyways.
-            int32_t i = ToInt32(&alloc);
-
-            // Sign-extend the immediate value out to 32 bits. We do this even
-            // for unsigned element types so that we match what the disassembly
-            // code does, as it doesn't know about signedness of stores.
-            unsigned shift = 32 - TypedArrayElemSize(type) * 8;
-            i = int32_t(uint32_t(i) << shift) >> shift;
-            op = OtherOperand(i);
-        }
-        break;
-      case Scalar::Int64:
-        // Can't encode an imm64-to-memory move.
-        op = OtherOperand(ToRegister(alloc).encoding());
-        break;
-      case Scalar::Float32:
-      case Scalar::Float64:
-        op = OtherOperand(ToFloatRegister(alloc).encoding());
-        break;
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("Unexpected array type");
-    }
-
-    HeapAccess access(kind, TypedArrayElemSize(type), ComplexAddress(mem), op);
-    masm.verifyHeapAccessDisassembly(begin, end, access);
-#endif
-}
-
-void
-CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
-                                           Operand mem, LAllocation alloc)
-{
-    verifyHeapAccessDisassembly(begin, end, true, type, mem, alloc);
-}
-
-void
-CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
-                                            Operand mem, LAllocation alloc)
-{
-    verifyHeapAccessDisassembly(begin, end, false, type, mem, alloc);
-}
-
 inline bool
 CodeGeneratorShared::isGlobalObject(JSObject* object)
 {
     // Calling object->is<GlobalObject>() is racy because this relies on
     // checking the group and this can be changed while we are compiling off the
     // main thread. Note that we only check for the script realm's global here.
     return object == gen->realm->maybeGlobal();
 }
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -517,26 +517,16 @@ class CodeGeneratorShared : public LElem
     void emitTracelogStartEvent(uint32_t textId) {}
     void emitTracelogStopEvent(uint32_t textId) {}
     void emitTracelogStartEvent(const char* text, TraceLoggerTextId enabledTextId) {}
     void emitTracelogStopEvent(const char* text, TraceLoggerTextId enabledTextId) {}
     void emitTracelogIonStart() {}
     void emitTracelogIonStop() {}
 #endif
 
-  protected:
-    inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
-                                            Scalar::Type type, Operand mem, LAllocation alloc);
-
-  public:
-    inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
-                                      Operand mem, LAllocation alloc);
-    inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
-                                       Operand mem, LAllocation alloc);
-
     bool isGlobalObject(JSObject* object);
 };
 
 // An out-of-line path is generated at the end of the function.
 class OutOfLineCode : public TempObject
 {
     Label entry_;
     Label rejoin_;
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -8731,17 +8731,18 @@ class LWasmStoreI64 : public LInstructio
         return getInt64Operand(ValueIndex);
     }
 };
 
 class LAsmJSLoadHeap : public LInstructionHelper<1, 3, 0>
 {
   public:
     LIR_HEADER(AsmJSLoadHeap);
-    explicit LAsmJSLoadHeap(const LAllocation& ptr, const LAllocation& boundsCheckLimit = LAllocation(),
+    explicit LAsmJSLoadHeap(const LAllocation& ptr,
+                            const LAllocation& boundsCheckLimit,
                             const LAllocation& memoryBase = LAllocation())
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, ptr);
         setOperand(1, boundsCheckLimit);
         setOperand(2, memoryBase);
     }
     MAsmJSLoadHeap* mir() const {
@@ -8757,18 +8758,19 @@ class LAsmJSLoadHeap : public LInstructi
         return getOperand(2);
     }
 };
 
 class LAsmJSStoreHeap : public LInstructionHelper<0, 4, 0>
 {
   public:
     LIR_HEADER(AsmJSStoreHeap);
-    LAsmJSStoreHeap(const LAllocation& ptr, const LAllocation& value,
-                    const LAllocation& boundsCheckLimit = LAllocation(),
+    LAsmJSStoreHeap(const LAllocation& ptr,
+                    const LAllocation& value,
+                    const LAllocation& boundsCheckLimit,
                     const LAllocation& memoryBase = LAllocation())
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, ptr);
         setOperand(1, value);
         setOperand(2, boundsCheckLimit);
         setOperand(3, memoryBase);
     }
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -488,60 +488,16 @@ CodeGenerator::visitWasmStore(LWasmStore
 
 void
 CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins)
 {
     emitWasmStore(ins);
 }
 
 void
-CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
-{
-    const MAsmJSLoadHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
-
-    const LAllocation* ptr = ins->ptr();
-    const LDefinition* out = ins->output();
-
-    Scalar::Type accessType = mir->access().type();
-
-    Operand srcAddr = ptr->isBogus()
-                      ? Operand(HeapReg, mir->offset())
-                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
-
-    uint32_t before = masm.size();
-    masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
-    uint32_t after = masm.size();
-    verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
-}
-
-void
-CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
-{
-    const MAsmJSStoreHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
-
-    const LAllocation* ptr = ins->ptr();
-    const LAllocation* value = ins->value();
-
-    Scalar::Type accessType = mir->access().type();
-
-    canonicalizeIfDeterministic(accessType, value);
-
-    Operand dstAddr = ptr->isBogus()
-                      ? Operand(HeapReg, mir->offset())
-                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
-
-    uint32_t before = masm.size();
-    wasmStore(mir->access(), value, dstAddr);
-    uint32_t after = masm.size();
-    verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
-}
-
-void
 CodeGenerator::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
 {
     MWasmCompareExchangeHeap* mir = ins->mir();
 
     Register ptr = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -247,55 +247,16 @@ LIRGenerator::visitWasmStore(MWasmStore*
     }
 
     LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
     auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
     add(lir, ins);
 }
 
 void
-LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
-{
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
-
-    define(new(alloc()) LAsmJSLoadHeap(useRegisterOrZeroAtStart(base)), ins);
-}
-
-void
-LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
-{
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
-
-    LAsmJSStoreHeap* lir = nullptr;  // initialize to silence GCC warning
-    switch (ins->access().type()) {
-      case Scalar::Int8:
-      case Scalar::Uint8:
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-        lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
-                                           useRegisterOrConstantAtStart(ins->value()));
-        break;
-      case Scalar::Float32:
-      case Scalar::Float64:
-        lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
-                                           useRegisterAtStart(ins->value()));
-        break;
-      case Scalar::Int64:
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("unexpected array type");
-    }
-    add(lir, ins);
-}
-
-void
 LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // The output may not be used but will be clobbered regardless, so
     // pin the output to eax.
     //
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -1,16 +1,15 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "gc/Marking.h"
-#include "jit/Disassembler.h"
 #include "jit/JitRealm.h"
 #if defined(JS_CODEGEN_X86)
 # include "jit/x86/MacroAssembler-x86.h"
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/MacroAssembler-x64.h"
 #else
 # error "Wrong architecture. Only x86 and x64 should build this file!"
 #endif
@@ -228,29 +227,16 @@ AssemblerX86Shared::InvertCondition(Doub
         return DoubleLessThanOrUnordered;
       case DoubleGreaterThanOrEqualOrUnordered:
         return DoubleLessThan;
       default:
         MOZ_CRASH("unexpected condition");
     }
 }
 
-void
-AssemblerX86Shared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
-                                                const Disassembler::HeapAccess& heapAccess)
-{
-#ifdef DEBUG
-    if (masm.oom()) {
-        return;
-    }
-    unsigned char* code = masm.data();
-    Disassembler::VerifyHeapAccess(code + begin, code + end, heapAccess);
-#endif
-}
-
 CPUInfo::SSEVersion CPUInfo::maxSSEVersion = UnknownSSE;
 CPUInfo::SSEVersion CPUInfo::maxEnabledSSEVersion = UnknownSSE;
 bool CPUInfo::avxPresent = false;
 bool CPUInfo::avxEnabled = false;
 bool CPUInfo::popcntPresent = false;
 bool CPUInfo::needAmdBugWorkaround = false;
 
 static uintptr_t
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -395,16 +395,56 @@ CodeGenerator::visitWasmReinterpret(LWas
       case MIRType::Int64:
         MOZ_CRASH("not handled by this LIR opcode");
       default:
         MOZ_CRASH("unexpected WasmReinterpret");
     }
 }
 
 void
+CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+    const MAsmJSLoadHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->access().offset() == 0);
+
+    const LAllocation* ptr = ins->ptr();
+    const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+    AnyRegister out = ToAnyRegister(ins->output());
+
+    Scalar::Type accessType = mir->accessType();
+
+    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+    if (mir->needsBoundsCheck()) {
+        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+        addOutOfLineCode(ool, mir);
+
+        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ToRegister(boundsCheckLimit),
+                             ool->entry());
+    }
+
+#ifdef JS_CODEGEN_X86
+    const LAllocation* memoryBase = ins->memoryBase();
+    Operand srcAddr = ptr->isBogus()
+                    ? Operand(ToRegister(memoryBase), 0)
+                    : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne);
+#else
+    MOZ_ASSERT(!mir->hasMemoryBase());
+    Operand srcAddr = ptr->isBogus()
+                    ? Operand(HeapReg, 0)
+                    : Operand(HeapReg, ToRegister(ptr), TimesOne);
+#endif
+
+    masm.wasmLoad(mir->access(), srcAddr, out);
+
+    if (ool) {
+        masm.bind(ool->rejoin());
+    }
+}
+
+void
 CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool)
 {
     switch (ool->viewType()) {
       case Scalar::Int64:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
       case Scalar::Float32:
         masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
@@ -422,16 +462,54 @@ CodeGeneratorX86Shared::visitOutOfLineLo
         Register destReg = ool->dest().gpr();
         masm.mov(ImmWord(0), destReg);
         break;
     }
     masm.jmp(ool->rejoin());
 }
 
 void
+CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+    const MAsmJSStoreHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
+    const LAllocation* ptr = ins->ptr();
+    const LAllocation* value = ins->value();
+    const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+    Scalar::Type accessType = mir->accessType();
+    canonicalizeIfDeterministic(accessType, value);
+
+    Label rejoin;
+    if (mir->needsBoundsCheck()) {
+        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ToRegister(boundsCheckLimit),
+                             &rejoin);
+    }
+
+#ifdef JS_CODEGEN_X86
+    const LAllocation* memoryBase = ins->memoryBase();
+    Operand dstAddr = ptr->isBogus()
+                      ? Operand(ToRegister(memoryBase), 0)
+                      : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne);
+#else
+    MOZ_ASSERT(!mir->hasMemoryBase());
+    Operand dstAddr = ptr->isBogus()
+                      ? Operand(HeapReg, 0)
+                      : Operand(HeapReg, ToRegister(ptr), TimesOne);
+#endif
+
+    masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
+
+    if (rejoin.used()) {
+        masm.bind(&rejoin);
+    }
+}
+
+void
 CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir)
 {
     MWasmAddOffset* mir = lir->mir();
     Register base = ToRegister(lir->base());
     Register out = ToRegister(lir->output());
 
     if (base != out) {
         masm.move32(base, out);
deleted file mode 100644
--- a/js/src/jit/x86-shared/Disassembler-x86-shared.cpp
+++ /dev/null
@@ -1,578 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/Disassembler.h"
-
-#include "jit/x86-shared/Encoding-x86-shared.h"
-
-using namespace js;
-using namespace js::jit;
-using namespace js::jit::X86Encoding;
-using namespace js::jit::Disassembler;
-
-MOZ_COLD static bool REX_W(uint8_t rex) { return (rex >> 3) & 0x1; }
-MOZ_COLD static bool REX_R(uint8_t rex) { return (rex >> 2) & 0x1; }
-MOZ_COLD static bool REX_X(uint8_t rex) { return (rex >> 1) & 0x1; }
-MOZ_COLD static bool REX_B(uint8_t rex) { return (rex >> 0) & 0x1; }
-
-MOZ_COLD static uint8_t
-MakeREXFlags(bool w, bool r, bool x, bool b)
-{
-    uint8_t rex = (w << 3) | (r << 2) | (x << 1) | (b << 0);
-    MOZ_RELEASE_ASSERT(REX_W(rex) == w);
-    MOZ_RELEASE_ASSERT(REX_R(rex) == r);
-    MOZ_RELEASE_ASSERT(REX_X(rex) == x);
-    MOZ_RELEASE_ASSERT(REX_B(rex) == b);
-    return rex;
-}
-
-MOZ_COLD static ModRmMode
-ModRM_Mode(uint8_t modrm)
-{
-    return ModRmMode((modrm >> 6) & 0x3);
-}
-
-MOZ_COLD static uint8_t
-ModRM_Reg(uint8_t modrm)
-{
-    return (modrm >> 3) & 0x7;
-}
-
-MOZ_COLD static uint8_t
-ModRM_RM(uint8_t modrm)
-{
-    return (modrm >> 0) & 0x7;
-}
-
-MOZ_COLD static bool
-ModRM_hasSIB(uint8_t modrm)
-{
-    return ModRM_Mode(modrm) != ModRmRegister && ModRM_RM(modrm) == hasSib;
-}
-MOZ_COLD static bool
-ModRM_hasDisp8(uint8_t modrm)
-{
-    return ModRM_Mode(modrm) == ModRmMemoryDisp8;
-}
-MOZ_COLD static bool
-ModRM_hasRIP(uint8_t modrm)
-{
-#ifdef JS_CODEGEN_X64
-    return ModRM_Mode(modrm) == ModRmMemoryNoDisp && ModRM_RM(modrm) == noBase;
-#else
-    return false;
-#endif
-}
-MOZ_COLD static bool
-ModRM_hasDisp32(uint8_t modrm)
-{
-    return ModRM_Mode(modrm) == ModRmMemoryDisp32 ||
-           ModRM_hasRIP(modrm);
-}
-
-MOZ_COLD static uint8_t
-SIB_SS(uint8_t sib)
-{
-    return (sib >> 6) & 0x3;
-}
-
-MOZ_COLD static uint8_t
-SIB_Index(uint8_t sib)
-{
-    return (sib >> 3) & 0x7;
-}
-
-MOZ_COLD static uint8_t
-SIB_Base(uint8_t sib)
-{
-    return (sib >> 0) & 0x7;
-}
-
-MOZ_COLD static bool
-SIB_hasRIP(uint8_t sib)
-{
-    return SIB_Base(sib) == noBase && SIB_Index(sib) == noIndex;
-}
-
-MOZ_COLD static bool
-HasRIP(uint8_t modrm, uint8_t sib, uint8_t rex)
-{
-    return ModRM_hasRIP(modrm) && SIB_hasRIP(sib);
-}
-
-MOZ_COLD static bool
-HasDisp8(uint8_t modrm)
-{
-    return ModRM_hasDisp8(modrm);
-}
-
-MOZ_COLD static bool
-HasDisp32(uint8_t modrm, uint8_t sib)
-{
-    return ModRM_hasDisp32(modrm) ||
-           (SIB_Base(sib) == noBase &&
-            SIB_Index(sib) == noIndex &&
-            ModRM_Mode(modrm) == ModRmMemoryNoDisp);
-}
-
-MOZ_COLD static uint32_t
-Reg(uint8_t modrm, uint8_t sib, uint8_t rex)
-{
-    return ModRM_Reg(modrm) | (REX_R(rex) << 3);
-}
-
-MOZ_COLD static bool
-HasBase(uint8_t modrm, uint8_t sib)
-{
-    return !ModRM_hasSIB(modrm) ||
-           SIB_Base(sib) != noBase ||
-           SIB_Index(sib) != noIndex ||
-           ModRM_Mode(modrm) != ModRmMemoryNoDisp;
-}
-
-MOZ_COLD static RegisterID
-DecodeBase(uint8_t modrm, uint8_t sib, uint8_t rex)
-{
-    return HasBase(modrm, sib)
-           ? RegisterID((ModRM_hasSIB(modrm) ? SIB_Base(sib) : ModRM_RM(modrm)) | (REX_B(rex) << 3))
-           : invalid_reg;
-}
-
-MOZ_COLD static RegisterID
-DecodeIndex(uint8_t modrm, uint8_t sib, uint8_t rex)
-{
-    RegisterID index = RegisterID(SIB_Index(sib) | (REX_X(rex) << 3));
-    return ModRM_hasSIB(modrm) && index != noIndex ? index : invalid_reg;
-}
-
-MOZ_COLD static uint32_t
-DecodeScale(uint8_t modrm, uint8_t sib, uint8_t rex)
-{
-    return ModRM_hasSIB(modrm) ? SIB_SS(sib) : 0;
-}
-
-#define PackOpcode(op0, op1, op2) ((op0) | ((op1) << 8) | ((op2) << 16))
-#define Pack2ByteOpcode(op1) PackOpcode(OP_2BYTE_ESCAPE, op1, 0)
-#define Pack3ByteOpcode(op1, op2) PackOpcode(OP_2BYTE_ESCAPE, op1, op2)
-
-uint8_t*
-js::jit::Disassembler::DisassembleHeapAccess(uint8_t* ptr, HeapAccess* access)
-{
-    VexOperandType type = VEX_PS;
-    uint32_t opcode = OP_NOP_00;
-    uint8_t modrm = 0;
-    uint8_t sib = 0;
-    uint8_t rex = 0;
-    int32_t disp = 0;
-    int32_t imm = 0;
-    bool haveImm = false;
-    int opsize = 4;
-
-    // Legacy prefixes
-    switch (*ptr) {
-      case PRE_LOCK:
-      case PRE_PREDICT_BRANCH_NOT_TAKEN: // (obsolete), aka %cs
-      case 0x3E: // aka predict-branch-taken (obsolete)
-      case 0x36: // %ss
-      case 0x26: // %es
-      case 0x64: // %fs
-      case 0x65: // %gs
-      case 0x67: // address-size override
-        MOZ_CRASH("Unable to disassemble instruction");
-      case PRE_SSE_F2: // aka REPNZ/REPNE
-        type = VEX_SD;
-        ptr++;
-        break;
-      case PRE_SSE_F3: // aka REP/REPE/REPZ
-        type = VEX_SS;
-        ptr++;
-        break;
-      case PRE_SSE_66: // aka PRE_OPERAND_SIZE
-        type = VEX_PD;
-        opsize = 2;
-        ptr++;
-        break;
-      default:
-        break;
-    }
-
-    // REX and VEX prefixes
-    {
-        int x = 0, b = 0, m = 1, w = 0;
-        int r, l, p;
-        switch (*ptr) {
-#ifdef JS_CODEGEN_X64
-          case PRE_REX | 0x0: case PRE_REX | 0x1: case PRE_REX | 0x2: case PRE_REX | 0x3:
-          case PRE_REX | 0x4: case PRE_REX | 0x5: case PRE_REX | 0x6: case PRE_REX | 0x7:
-          case PRE_REX | 0x8: case PRE_REX | 0x9: case PRE_REX | 0xa: case PRE_REX | 0xb:
-          case PRE_REX | 0xc: case PRE_REX | 0xd: case PRE_REX | 0xe: case PRE_REX | 0xf:
-            rex = *ptr++ & 0xf;
-            goto rex_done;
-#endif
-          case PRE_VEX_C4: {
-            if (type != VEX_PS) {
-                MOZ_CRASH("Unable to disassemble instruction");
-            }
-            ++ptr;
-            uint8_t c4a = *ptr++ ^ 0xe0;
-            uint8_t c4b = *ptr++ ^ 0x78;
-            r = (c4a >> 7) & 0x1;
-            x = (c4a >> 6) & 0x1;
-            b = (c4a >> 5) & 0x1;
-            m = (c4a >> 0) & 0x1f;
-            w = (c4b >> 7) & 0x1;
-            l = (c4b >> 2) & 0x1;
-            p = (c4b >> 0) & 0x3;
-            break;
-          }
-          case PRE_VEX_C5: {
-            if (type != VEX_PS) {
-              MOZ_CRASH("Unable to disassemble instruction");
-            }
-            ++ptr;
-            uint8_t c5 = *ptr++ ^ 0xf8;
-            r = (c5 >> 7) & 0x1;
-            l = (c5 >> 2) & 0x1;
-            p = (c5 >> 0) & 0x3;
-            break;
-          }
-          default:
-            goto rex_done;
-        }
-        if (l != 0) { // 256-bit SIMD
-            MOZ_CRASH("Unable to disassemble instruction");
-        }
-        type = VexOperandType(p);
-        rex = MakeREXFlags(w, r, x, b);
-        switch (m) {
-          case 0x1:
-            opcode = Pack2ByteOpcode(*ptr++);
-            goto opcode_done;
-          case 0x2:
-            opcode = Pack3ByteOpcode(ESCAPE_38, *ptr++);
-            goto opcode_done;
-          case 0x3:
-            opcode = Pack3ByteOpcode(ESCAPE_3A, *ptr++);
-            goto opcode_done;
-          default:
-            MOZ_CRASH("Unable to disassemble instruction");
-        }
-    }
-  rex_done:;
-    if (REX_W(rex)) {
-        opsize = 8;
-    }
-
-    // Opcode.
-    opcode = *ptr++;
-    switch (opcode) {
-#ifdef JS_CODEGEN_X64
-      case OP_PUSH_EAX + 0: case OP_PUSH_EAX + 1: case OP_PUSH_EAX + 2: case OP_PUSH_EAX + 3:
-      case OP_PUSH_EAX + 4: case OP_PUSH_EAX + 5: case OP_PUSH_EAX + 6: case OP_PUSH_EAX + 7:
-      case OP_POP_EAX + 0: case OP_POP_EAX + 1: case OP_POP_EAX + 2: case OP_POP_EAX + 3:
-      case OP_POP_EAX + 4: case OP_POP_EAX + 5: case OP_POP_EAX + 6: case OP_POP_EAX + 7:
-      case OP_PUSH_Iz:
-      case OP_PUSH_Ib:
-        opsize = 8;
-        break;
-#endif
-      case OP_2BYTE_ESCAPE:
-        opcode |= *ptr << 8;
-        switch (*ptr++) {
-          case ESCAPE_38:
-          case ESCAPE_3A:
-            opcode |= *ptr++ << 16;
-            break;
-          default:
-            break;
-        }
-        break;
-      default:
-        break;
-    }
-  opcode_done:;
-
-    // ModR/M
-    modrm = *ptr++;
-
-    // SIB
-    if (ModRM_hasSIB(modrm)) {
-        sib = *ptr++;
-    }
-
-    // Address Displacement
-    if (HasDisp8(modrm)) {
-        disp = int8_t(*ptr++);
-    } else if (HasDisp32(modrm, sib)) {
-        memcpy(&disp, ptr, sizeof(int32_t));
-        ptr += sizeof(int32_t);
-    }
-
-    // Immediate operand
-    switch (opcode) {
-      case OP_PUSH_Ib:
-      case OP_IMUL_GvEvIb:
-      case OP_GROUP1_EbIb:
-      case OP_GROUP1_EvIb:
-      case OP_TEST_EAXIb:
-      case OP_GROUP2_EvIb:
-      case OP_GROUP11_EvIb:
-      case OP_GROUP3_EbIb:
-      case Pack2ByteOpcode(OP2_PSHUFD_VdqWdqIb):
-      case Pack2ByteOpcode(OP2_PSLLD_UdqIb): // aka OP2_PSRAD_UdqIb, aka OP2_PSRLD_UdqIb
-      case Pack2ByteOpcode(OP2_PEXTRW_GdUdIb):
-      case Pack2ByteOpcode(OP2_SHUFPS_VpsWpsIb):
-      case Pack3ByteOpcode(ESCAPE_3A, OP3_PEXTRD_EdVdqIb):
-      case Pack3ByteOpcode(ESCAPE_3A, OP3_BLENDPS_VpsWpsIb):
-      case Pack3ByteOpcode(ESCAPE_3A, OP3_PINSRD_VdqEdIb):
-        // 8-bit signed immediate
-        imm = int8_t(*ptr++);
-        haveImm = true;
-        break;
-      case OP_RET_Iz:
-        // 16-bit unsigned immediate
-        memcpy(&imm, ptr, sizeof(int16_t));
-        ptr += sizeof(int16_t);
-        haveImm = true;
-        break;
-      case OP_ADD_EAXIv:
-      case OP_OR_EAXIv:
-      case OP_AND_EAXIv:
-      case OP_SUB_EAXIv:
-      case OP_XOR_EAXIv:
-      case OP_CMP_EAXIv:
-      case OP_PUSH_Iz:
-      case OP_IMUL_GvEvIz:
-      case OP_GROUP1_EvIz:
-      case OP_TEST_EAXIv:
-      case OP_MOV_EAXIv:
-      case OP_GROUP3_EvIz:
-        // 32-bit signed immediate
-        memcpy(&imm, ptr, sizeof(int32_t));
-        ptr += sizeof(int32_t);
-        haveImm = true;
-        break;
-      case OP_GROUP11_EvIz:
-        // opsize-sized signed immediate
-        memcpy(&imm, ptr, opsize);
-        imm = int32_t(uint32_t(imm) << (32 - opsize * 8)) >> (32 - opsize * 8);
-        ptr += opsize;
-        haveImm = true;
-        break;
-      default:
-        break;
-    }
-
-    // Interpret the opcode.
-    if (HasRIP(modrm, sib, rex)) {
-        MOZ_CRASH("Unable to disassemble instruction");
-    }
-
-    size_t memSize = 0;
-    OtherOperand otherOperand(imm);
-    HeapAccess::Kind kind = HeapAccess::Unknown;
-    RegisterID gpr(RegisterID(Reg(modrm, sib, rex)));
-    XMMRegisterID xmm(XMMRegisterID(Reg(modrm, sib, rex)));
-    ComplexAddress addr(disp,
-                        DecodeBase(modrm, sib, rex),
-                        DecodeIndex(modrm, sib, rex),
-                        DecodeScale(modrm, sib, rex));
-    switch (opcode) {
-      case OP_GROUP11_EvIb:
-        if (gpr != RegisterID(GROUP11_MOV)) {
-            MOZ_CRASH("Unable to disassemble instruction");
-        }
-        MOZ_RELEASE_ASSERT(haveImm);
-        memSize = 1;
-        kind = HeapAccess::Store;
-        break;
-      case OP_GROUP11_EvIz:
-        if (gpr != RegisterID(GROUP11_MOV)) {
-            MOZ_CRASH("Unable to disassemble instruction");
-        }
-        MOZ_RELEASE_ASSERT(haveImm);
-        memSize = opsize;
-        kind = HeapAccess::Store;
-        break;
-      case OP_MOV_GvEv:
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = opsize;
-        kind = HeapAccess::Load;
-        break;
-      case OP_MOV_GvEb:
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = 1;
-        kind = HeapAccess::Load;
-        break;
-      case OP_MOV_EvGv:
-        if (!haveImm) {
-            otherOperand = OtherOperand(gpr);
-        }
-        memSize = opsize;
-        kind = HeapAccess::Store;
-        break;
-      case OP_MOV_EbGv:
-        if (!haveImm) {
-            otherOperand = OtherOperand(gpr);
-        }
-        memSize = 1;
-        kind = HeapAccess::Store;
-        break;
-      case Pack2ByteOpcode(OP2_MOVZX_GvEb):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = 1;
-        kind = HeapAccess::Load;
-        break;
-      case Pack2ByteOpcode(OP2_MOVZX_GvEw):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = 2;
-        kind = HeapAccess::Load;
-        break;
-      case Pack2ByteOpcode(OP2_MOVSX_GvEb):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = 1;
-        kind = opsize == 8 ? HeapAccess::LoadSext64 : HeapAccess::LoadSext32;
-        break;
-      case Pack2ByteOpcode(OP2_MOVSX_GvEw):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = 2;
-        kind = opsize == 8 ? HeapAccess::LoadSext64 : HeapAccess::LoadSext32;
-        break;
-#ifdef JS_CODEGEN_X64
-      case OP_MOVSXD_GvEv:
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(gpr);
-        memSize = 4;
-        kind = HeapAccess::LoadSext64;
-        break;
-#endif // JS_CODEGEN_X64
-      case Pack2ByteOpcode(OP2_MOVDQ_VdqWdq): // aka OP2_MOVDQ_VsdWsd
-      case Pack2ByteOpcode(OP2_MOVAPS_VsdWsd):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        memSize = 16;
-        kind = HeapAccess::Load;
-        break;
-      case Pack2ByteOpcode(OP2_MOVSD_VsdWsd): // aka OP2_MOVPS_VpsWps
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        switch (type) {
-          case VEX_SS: memSize = 4; break;
-          case VEX_SD: memSize = 8; break;
-          case VEX_PS:
-          case VEX_PD: memSize = 16; break;
-          default: MOZ_CRASH("Unexpected VEX type");
-        }
-        kind = HeapAccess::Load;
-        break;
-      case Pack2ByteOpcode(OP2_MOVDQ_WdqVdq):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        memSize = 16;
-        kind = HeapAccess::Store;
-        break;
-      case Pack2ByteOpcode(OP2_MOVSD_WsdVsd): // aka OP2_MOVPS_WpsVps
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        switch (type) {
-          case VEX_SS: memSize = 4; break;
-          case VEX_SD: memSize = 8; break;
-          case VEX_PS:
-          case VEX_PD: memSize = 16; break;
-          default: MOZ_CRASH("Unexpected VEX type");
-        }
-        kind = HeapAccess::Store;
-        break;
-      case Pack2ByteOpcode(OP2_MOVD_VdEd):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        switch (type) {
-          case VEX_PD: memSize = 4; break;
-          default: MOZ_CRASH("Unexpected VEX type");
-        }
-        kind = HeapAccess::Load;
-        break;
-      case Pack2ByteOpcode(OP2_MOVQ_WdVd):
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        switch (type) {
-          case VEX_PD: memSize = 8; break;
-          default: MOZ_CRASH("Unexpected VEX type");
-        }
-        kind = HeapAccess::Store;
-        break;
-      case Pack2ByteOpcode(OP2_MOVD_EdVd): // aka OP2_MOVQ_VdWd
-        MOZ_RELEASE_ASSERT(!haveImm);
-        otherOperand = OtherOperand(xmm);
-        switch (type) {
-          case VEX_SS: memSize = 8; kind = HeapAccess::Load; break;
-          case VEX_PD: memSize = 4; kind = HeapAccess::Store; break;
-          default: MOZ_CRASH("Unexpected VEX type");
-        }
-        break;
-      default:
-        MOZ_CRASH("Unable to disassemble instruction");
-    }
-
-    *access = HeapAccess(kind, memSize, addr, otherOperand);
-    return ptr;
-}
-
-#ifdef DEBUG
-void
-js::jit::Disassembler::DumpHeapAccess(const HeapAccess& access)
-{
-    switch (access.kind()) {
-      case HeapAccess::Store:      fprintf(stderr, "store"); break;
-      case HeapAccess::Load:       fprintf(stderr, "load"); break;
-      case HeapAccess::LoadSext32: fprintf(stderr, "loadSext32"); break;
-      case HeapAccess::LoadSext64: fprintf(stderr, "loadSext64"); break;
-      default:                     fprintf(stderr, "unknown"); break;
-    }
-    fprintf(stderr, "%u ", unsigned(access.size()));
-
-    switch (access.otherOperand().kind()) {
-      case OtherOperand::Imm:
-        fprintf(stderr, "imm %d", access.otherOperand().imm());
-        break;
-      case OtherOperand::GPR:
-        fprintf(stderr, "gpr %s", X86Encoding::GPRegName(access.otherOperand().gpr()));
-        break;
-      case OtherOperand::FPR:
-        fprintf(stderr, "fpr %s", X86Encoding::XMMRegName(access.otherOperand().fpr()));
-        break;
-      default: fprintf(stderr, "unknown");
-    }
-
-    fprintf(stderr, " @ ");
-
-    if (access.address().isPCRelative()) {
-        fprintf(stderr, MEM_o32r " ", ADDR_o32r(access.address().disp()));
-    } else if (access.address().hasIndex()) {
-        if (access.address().hasBase()) {
-            fprintf(stderr, MEM_obs " ",
-                    ADDR_obs(access.address().disp(), access.address().base(),
-                             access.address().index(), access.address().scale()));
-        } else {
-            fprintf(stderr, MEM_os " ",
-                    ADDR_os(access.address().disp(),
-                            access.address().index(), access.address().scale()));
-        }
-    } else if (access.address().hasBase()) {
-        fprintf(stderr, MEM_ob " ", ADDR_ob(access.address().disp(), access.address().base()));
-    } else {
-        fprintf(stderr, MEM_o " ", ADDR_o(access.address().disp()));
-    }
-
-    fprintf(stderr, "\n");
-}
-#endif
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -283,16 +283,94 @@ LIRGenerator::visitWasmNeg(MWasmNeg* ins
         defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins, 0);
         break;
       default:
         MOZ_CRASH();
     }
 }
 
 void
+LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType::Int32);
+
+    MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+    MOZ_ASSERT_IF(ins->needsBoundsCheck(), boundsCheckLimit->type() == MIRType::Int32);
+
+    // For simplicity, require a register if we're going to emit a bounds-check
+    // branch, so that we don't have special cases for constants. This should
+    // only happen in rare constant-folding cases since asm.js sets the minimum
+    // heap size based when accessed via constant.
+    LAllocation baseAlloc = ins->needsBoundsCheck()
+                            ? useRegisterAtStart(base)
+                            : useRegisterOrZeroAtStart(base);
+
+    LAllocation limitAlloc = ins->needsBoundsCheck()
+                           ? useRegisterAtStart(boundsCheckLimit)
+                           : LAllocation();
+    LAllocation memoryBaseAlloc = ins->hasMemoryBase()
+                                ? useRegisterAtStart(ins->memoryBase())
+                                : LAllocation();
+
+    auto* lir = new(alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, memoryBaseAlloc);
+    define(lir, ins);
+}
+
+void
+LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType::Int32);
+
+    MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+    MOZ_ASSERT_IF(ins->needsBoundsCheck(), boundsCheckLimit->type() == MIRType::Int32);
+
+    // For simplicity, require a register if we're going to emit a bounds-check
+    // branch, so that we don't have special cases for constants. This should
+    // only happen in rare constant-folding cases since asm.js sets the minimum
+    // heap size based when accessed via constant.
+    LAllocation baseAlloc = ins->needsBoundsCheck()
+                            ? useRegisterAtStart(base)
+                            : useRegisterOrZeroAtStart(base);
+
+    LAllocation limitAlloc = ins->needsBoundsCheck()
+                           ? useRegisterAtStart(boundsCheckLimit)
+                           : LAllocation();
+    LAllocation memoryBaseAlloc = ins->hasMemoryBase()
+                                ? useRegisterAtStart(ins->memoryBase())
+                                : LAllocation();
+
+    LAsmJSStoreHeap* lir = nullptr;
+    switch (ins->access().type()) {
+      case Scalar::Int8: case Scalar::Uint8:
+#ifdef JS_CODEGEN_X86
+        // See comment for LIRGeneratorX86::useByteOpRegister.
+        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax),
+                                           limitAlloc, memoryBaseAlloc);
+        break;
+#endif
+      case Scalar::Int16: case Scalar::Uint16:
+      case Scalar::Int32: case Scalar::Uint32:
+      case Scalar::Float32: case Scalar::Float64:
+        // For now, don't allow constant values. The immediate operand affects
+        // instruction layout which affects patching.
+        lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+                                            limitAlloc, memoryBaseAlloc);
+        break;
+      case Scalar::Int64:
+        MOZ_CRASH("NYI");
+      case Scalar::Uint8Clamped:
+      case Scalar::MaxTypedArrayViewType:
+        MOZ_CRASH("unexpected array type");
+    }
+    add(lir, ins);
+}
+
+void
 LIRGeneratorX86Shared::lowerUDiv(MDiv* div)
 {
     if (div->rhs()->isConstant()) {
         uint32_t rhs = div->rhs()->toConstant()->toInt32();
         int32_t shift = FloorLog2(rhs);
 
         LAllocation lhs = useRegisterAtStart(div->lhs());
         if (rhs != 0 && uint32_t(1) << shift == rhs) {
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -786,16 +786,36 @@ MacroAssembler::pushFakeReturnAddress(Re
 // WebAssembly
 
 CodeOffset
 MacroAssembler::wasmTrapInstruction()
 {
     return ud2();
 }
 
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, Label* label)
+{
+    cmp32(index, boundsCheckLimit);
+    j(cond, label);
+    if (JitOptions.spectreIndexMasking) {
+        cmovCCl(cond, Operand(boundsCheckLimit), index);
+    }
+}
+
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, Label* label)
+{
+    cmp32(index, Operand(boundsCheckLimit));
+    j(cond, label);
+    if (JitOptions.spectreIndexMasking) {
+        cmovCCl(cond, Operand(boundsCheckLimit), index);
+    }
+}
+
 // RAII class that generates the jumps to traps when it's destructed, to
 // prevent some code duplication in the outOfLineWasmTruncateXtoY methods.
 struct MOZ_RAII AutoHandleWasmTruncateToIntErrors
 {
     MacroAssembler& masm;
     Label inputIsNaN;
     Label intOverflow;
     wasm::BytecodeOffset off;
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -339,80 +339,16 @@ CodeGenerator::visitWasmStore(LWasmStore
 
 void
 CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins)
 {
     emitWasmStore(ins);
 }
 
 void
-CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
-{
-    const MAsmJSLoadHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    const LAllocation* ptr = ins->ptr();
-    const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
-    const LAllocation* memoryBase = ins->memoryBase();
-    AnyRegister out = ToAnyRegister(ins->output());
-
-    Scalar::Type accessType = mir->accessType();
-
-    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
-    if (mir->needsBoundsCheck()) {
-        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
-        addOutOfLineCode(ool, mir);
-
-        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ToRegister(boundsCheckLimit),
-                             ool->entry());
-    }
-
-    Operand srcAddr = ptr->isBogus()
-                      ? Operand(ToRegister(memoryBase), 0)
-                      : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne);
-
-    masm.wasmLoad(mir->access(), srcAddr, out);
-
-    if (ool) {
-        masm.bind(ool->rejoin());
-    }
-}
-
-void
-CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
-{
-    const MAsmJSStoreHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
-
-    const LAllocation* ptr = ins->ptr();
-    const LAllocation* value = ins->value();
-    const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
-    const LAllocation* memoryBase = ins->memoryBase();
-
-    Scalar::Type accessType = mir->accessType();
-    canonicalizeIfDeterministic(accessType, value);
-
-    Operand dstAddr = ptr->isBogus()
-                      ? Operand(ToRegister(memoryBase), 0)
-                      : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne);
-
-    Label rejoin;
-    if (mir->needsBoundsCheck()) {
-        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ToRegister(boundsCheckLimit),
-                             &rejoin);
-    }
-
-    masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
-
-    if (rejoin.used()) {
-        masm.bind(&rejoin);
-    }
-}
-
-void
 CodeGenerator::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
 {
     MWasmCompareExchangeHeap* mir = ins->mir();
 
     Register ptrReg = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     Register addrTemp = ToRegister(ins->addrTemp());
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -396,86 +396,16 @@ LIRGenerator::visitWasmStore(MWasmStore*
         MOZ_CRASH("unexpected array type");
     }
 
     auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
     add(lir, ins);
 }
 
 void
-LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
-{
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
-
-    MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
-    MOZ_ASSERT_IF(ins->needsBoundsCheck(), boundsCheckLimit->type() == MIRType::Int32);
-
-    MDefinition* memoryBase = ins->memoryBase();
-    MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
-
-    // For simplicity, require a register if we're going to emit a bounds-check
-    // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = ins->needsBoundsCheck()
-                            ? useRegisterAtStart(base)
-                            : useRegisterOrZeroAtStart(base);
-    LAllocation limitAlloc = ins->needsBoundsCheck()
-                           ? useRegisterAtStart(boundsCheckLimit)
-                           : LAllocation();
-
-    auto* lir = new(alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, useRegisterAtStart(memoryBase));
-    define(lir, ins);
-}
-
-void
-LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
-{
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
-
-    MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
-    MOZ_ASSERT_IF(ins->needsBoundsCheck(), boundsCheckLimit->type() == MIRType::Int32);
-
-    MDefinition* memoryBase = ins->memoryBase();
-    MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
-
-    // For simplicity, require a register if we're going to emit a bounds-check
-    // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = ins->needsBoundsCheck()
-                            ? useRegisterAtStart(base)
-                            : useRegisterOrZeroAtStart(base);
-    LAllocation limitAlloc = ins->needsBoundsCheck()
-                           ? useRegisterAtStart(boundsCheckLimit)
-                           : LAllocation();
-
-    LAsmJSStoreHeap* lir = nullptr;
-    switch (ins->access().type()) {
-      case Scalar::Int8: case Scalar::Uint8:
-        // See comment for LIRGeneratorX86::useByteOpRegister.
-        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax),
-                                           limitAlloc, useRegisterAtStart(memoryBase));
-        break;
-      case Scalar::Int16: case Scalar::Uint16:
-      case Scalar::Int32: case Scalar::Uint32:
-      case Scalar::Float32: case Scalar::Float64:
-        // For now, don't allow constant values. The immediate operand affects
-        // instruction layout which affects patching.
-        lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
-                                            limitAlloc, useRegisterAtStart(memoryBase));
-        break;
-      case Scalar::Int64:
-        MOZ_CRASH("NYI");
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("unexpected array type");
-    }
-    add(lir, ins);
-}
-
-void
 LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* memoryBase = ins->memoryBase();
     MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
 
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -620,36 +620,16 @@ MacroAssembler::storeUnboxedValue(const 
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const BaseObjectElementIndex& dest, MIRType slotType);
 
 // wasm specific methods, used in both the wasm baseline compiler and ion.
 
 void
-MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, Label* label)
-{
-    cmp32(index, boundsCheckLimit);
-    j(cond, label);
-    if (JitOptions.spectreIndexMasking) {
-        cmovCCl(cond, Operand(boundsCheckLimit), index);
-    }
-}
-
-void
-MacroAssembler::wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, Label* label)
-{
-    cmp32(index, Operand(boundsCheckLimit));
-    j(cond, label);
-    if (JitOptions.spectreIndexMasking) {
-        cmovCCl(cond, Operand(boundsCheckLimit), index);
-    }
-}
-
-void
 MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
 {
     MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP || srcAddr.kind() == Operand::MEM_SCALE);
 
     memoryBarrierBefore(access.sync());
 
     append(access, size());
     switch (access.type()) {
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -282,17 +282,16 @@ UNIFIED_SOURCES += [
     'jit/BitSet.cpp',
     'jit/BytecodeAnalysis.cpp',
     'jit/C1Spewer.cpp',
     'jit/CacheIR.cpp',
     'jit/CacheIRCompiler.cpp',
     'jit/CacheIRSpewer.cpp',
     'jit/CodeGenerator.cpp',
     'jit/CompileWrappers.cpp',
-    'jit/Disassembler.cpp',
     'jit/EdgeCaseAnalysis.cpp',
     'jit/EffectiveAddressAnalysis.cpp',
     'jit/ExecutableAllocator.cpp',
     'jit/FoldLinearArithConstants.cpp',
     'jit/InstructionReordering.cpp',
     'jit/Ion.cpp',
     'jit/IonAnalysis.cpp',
     'jit/IonBuilder.cpp',
@@ -514,19 +513,16 @@ elif CONFIG['JS_CODEGEN_X86'] or CONFIG[
         'jit/x86-shared/Assembler-x86-shared.cpp',
         'jit/x86-shared/AssemblerBuffer-x86-shared.cpp',
         'jit/x86-shared/CodeGenerator-x86-shared.cpp',
         'jit/x86-shared/Lowering-x86-shared.cpp',
         'jit/x86-shared/MacroAssembler-x86-shared-SIMD.cpp',
         'jit/x86-shared/MacroAssembler-x86-shared.cpp',
         'jit/x86-shared/MoveEmitter-x86-shared.cpp',
     ]
-    SOURCES += [
-        'jit/x86-shared/Disassembler-x86-shared.cpp',  # using namespace js::jit::X86Encoding;
-    ]
     if CONFIG['JS_CODEGEN_X64']:
         LOpcodesGenerated.inputs += ['jit/x64/LIR-x64.h']
         UNIFIED_SOURCES += [
             'jit/x64/Assembler-x64.cpp',
             'jit/x64/Bailouts-x64.cpp',
             'jit/x64/CodeGenerator-x64.cpp',
             'jit/x64/Lowering-x64.cpp',
             'jit/x64/MacroAssembler-x64.cpp',
@@ -567,17 +563,16 @@ elif CONFIG['JS_CODEGEN_ARM']:
         ]
 elif CONFIG['JS_CODEGEN_ARM64']:
     LOpcodesGenerated.inputs += ['jit/arm64/LIR-arm64.h']
     UNIFIED_SOURCES += [
         'jit/arm64/Architecture-arm64.cpp',
         'jit/arm64/Assembler-arm64.cpp',
         'jit/arm64/Bailouts-arm64.cpp',
         'jit/arm64/CodeGenerator-arm64.cpp',
-        'jit/arm64/Disassembler-arm64.cpp',
         'jit/arm64/Lowering-arm64.cpp',
         'jit/arm64/MacroAssembler-arm64.cpp',
         'jit/arm64/MoveEmitter-arm64.cpp',
         'jit/arm64/Trampoline-arm64.cpp',
         'jit/arm64/vixl/Assembler-vixl.cpp',
         'jit/arm64/vixl/Cpu-vixl.cpp',
         'jit/arm64/vixl/Decoder-vixl.cpp',
         'jit/arm64/vixl/Disasm-vixl.cpp',
--- a/js/src/vm/ArrayBufferObject-inl.h
+++ b/js/src/vm/ArrayBufferObject-inl.h
@@ -42,17 +42,17 @@ AnyArrayBufferByteLength(const ArrayBuff
 {
     if (buf->is<ArrayBufferObject>()) {
         return buf->as<ArrayBufferObject>().byteLength();
     }
     return buf->as<SharedArrayBufferObject>().byteLength();
 }
 
 inline uint32_t
-ArrayBufferObjectMaybeShared::byteLength()
+ArrayBufferObjectMaybeShared::byteLength() const
 {
     return AnyArrayBufferByteLength(this);
 }
 
 inline bool
 AnyArrayBufferIsPreparedForAsmJS(const ArrayBufferObjectMaybeShared* buf)
 {
     if (buf->is<ArrayBufferObject>()) {
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -610,19 +610,19 @@ ArrayBufferObject::changeContents(JSCont
  * Wasm Raw Buf Linear Memory Structure
  *
  * The linear heap in Wasm is an mmaped array buffer. Several
  * constants manage its lifetime:
  *
  *  - length - the wasm-visible current length of the buffer. Accesses in the
  *    range [0, length] succeed. May only increase.
  *
- *  - boundsCheckLimit - when !WASM_HUGE_MEMORY, the size against which we
- *    perform bounds checks. It is always a constant offset smaller than
- *    mappedSize. Currently that constant offset is 64k (wasm::GuardSize).
+ *  - boundsCheckLimit - the size against which we perform bounds checks. It is
+ *    always a constant offset smaller than mappedSize. Currently that constant
+ *    offset is 64k (wasm::GuardSize).
  *
  *  - maxSize - the optional declared limit on how much length can grow.
  *
  *  - mappedSize - the actual mmaped size. Access in the range
  *    [0, mappedSize] will either succeed, or be handled by the wasm signal
  *    handlers.
  *
  * The below diagram shows the layout of the wasm heap. The wasm-visible
@@ -930,59 +930,25 @@ js::CreateWasmBuffer(JSContext* cx, cons
                                                                buffer);
 }
 
 // Note this function can return false with or without an exception pending. The
 // asm.js caller checks cx->isExceptionPending before propagating failure.
 // Returning false without throwing means that asm.js linking will fail which
 // will recompile as non-asm.js.
 /* static */ bool
-ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool needGuard)
+ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer)
 {
-#ifdef WASM_HUGE_MEMORY
-    MOZ_ASSERT(needGuard);
-#endif
     MOZ_ASSERT(buffer->byteLength() % wasm::PageSize == 0);
     MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
 
     if (buffer->forInlineTypedObject()) {
         return false;
     }
 
-    if (needGuard) {
-        if (buffer->isWasm() && buffer->isPreparedForAsmJS()) {
-            return true;
-        }
-
-        // Non-prepared-for-asm.js wasm buffers can be detached at any time.
-        // This error can only be triggered for Atomics on !WASM_HUGE_MEMORY
-        // so this error is only visible in testing.
-        if (buffer->isWasm() || buffer->isPreparedForAsmJS()) {
-            return false;
-        }
-
-        uint32_t length = buffer->byteLength();
-        WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(length));
-        if (!wasmBuf) {
-            ReportOutOfMemory(cx);
-            return false;
-        }
-
-        void* data = wasmBuf->dataPointer();
-        memcpy(data, buffer->dataPointer(), length);
-
-        // Swap the new elements into the ArrayBufferObject. Mark the
-        // ArrayBufferObject so we don't do this again.
-        buffer->changeContents(cx, BufferContents::create<WASM>(data), OwnsData);
-        buffer->setIsPreparedForAsmJS();
-        MOZ_ASSERT(data == buffer->dataPointer());
-        cx->updateMallocCounter(wasmBuf->mappedSize());
-        return true;
-    }
-
     if (!buffer->isWasm() && buffer->isPreparedForAsmJS()) {
         return true;
     }
 
     // Non-prepared-for-asm.js wasm buffers can be detached at any time.
     if (buffer->isWasm()) {
         return false;
     }
@@ -1206,16 +1172,28 @@ ArrayBufferObject::wasmBoundsCheckLimit(
 uint32_t
 ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const
 {
     if (is<ArrayBufferObject>()) {
         return as<ArrayBufferObject>().wasmBoundsCheckLimit();
     }
     return as<SharedArrayBufferObject>().wasmBoundsCheckLimit();
 }
+#else
+uint32_t
+ArrayBufferObject::wasmBoundsCheckLimit() const
+{
+    return byteLength();
+}
+
+uint32_t
+ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const
+{
+    return byteLength();
+}
 #endif
 
 uint32_t
 ArrayBufferObject::flags() const
 {
     return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32());
 }
 
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -100,33 +100,31 @@ int32_t LiveMappedBufferCount();
 class ArrayBufferObjectMaybeShared;
 
 mozilla::Maybe<uint32_t> WasmArrayBufferMaxSize(const ArrayBufferObjectMaybeShared* buf);
 size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf);
 
 class ArrayBufferObjectMaybeShared : public NativeObject
 {
   public:
-    inline uint32_t byteLength();
+    inline uint32_t byteLength() const;
     inline bool isDetached() const;
     inline SharedMem<uint8_t*> dataPointerEither();
 
     // WebAssembly support:
     // Note: the eventual goal is to remove this from ArrayBuffer and have
     // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object.
 
     mozilla::Maybe<uint32_t> wasmMaxSize() const {
         return WasmArrayBufferMaxSize(this);
     }
     size_t wasmMappedSize() const {
         return WasmArrayBufferMappedSize(this);
     }
-#ifndef WASM_HUGE_MEMORY
     uint32_t wasmBoundsCheckLimit() const;
-#endif
 
     inline bool isPreparedForAsmJS() const;
     inline bool isWasm() const;
 };
 
 typedef Rooted<ArrayBufferObjectMaybeShared*> RootedArrayBufferObjectMaybeShared;
 typedef Handle<ArrayBufferObjectMaybeShared*> HandleArrayBufferObjectMaybeShared;
 typedef MutableHandle<ArrayBufferObjectMaybeShared*> MutableHandleArrayBufferObjectMaybeShared;
@@ -382,31 +380,30 @@ class ArrayBufferObject : public ArrayBu
     bool isPlain() const { return bufferKind() == PLAIN; }
     bool isWasm() const { return bufferKind() == WASM; }
     bool isMapped() const { return bufferKind() == MAPPED; }
     bool isExternal() const { return bufferKind() == EXTERNAL; }
     bool isDetached() const { return flags() & DETACHED; }
     bool isPreparedForAsmJS() const { return flags() & FOR_ASMJS; }
 
     // WebAssembly support:
-    static MOZ_MUST_USE bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer,
-                                             bool needGuard);
+    static MOZ_MUST_USE bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer);
     size_t wasmMappedSize() const;
     mozilla::Maybe<uint32_t> wasmMaxSize() const;
     static MOZ_MUST_USE bool wasmGrowToSizeInPlace(uint32_t newSize,
                                                    Handle<ArrayBufferObject*> oldBuf,
                                                    MutableHandle<ArrayBufferObject*> newBuf,
                                                    JSContext* cx);
 #ifndef WASM_HUGE_MEMORY
     static MOZ_MUST_USE bool wasmMovingGrowToSize(uint32_t newSize,
                                                   Handle<ArrayBufferObject*> oldBuf,
                                                   MutableHandle<ArrayBufferObject*> newBuf,
                                                   JSContext* cx);
+#endif
     uint32_t wasmBoundsCheckLimit() const;
-#endif
 
     static void finalize(FreeOp* fop, JSObject* obj);
 
     static BufferContents createMappedContents(int fd, size_t offset, size_t length);
 
     static size_t offsetOfFlagsSlot() {
         return getFixedSlotOffset(FLAGS_SLOT);
     }
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -1855,17 +1855,17 @@ jit::JitActivation::traceIonRecovery(JST
 void
 jit::JitActivation::startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset,
                                   const wasm::RegisterState& state)
 {
     MOZ_ASSERT(!isWasmTrapping());
 
     bool unwound;
     wasm::UnwindState unwindState;
-    MOZ_ALWAYS_TRUE(wasm::StartUnwinding(state, &unwindState, &unwound));
+    MOZ_RELEASE_ASSERT(wasm::StartUnwinding(state, &unwindState, &unwound));
     MOZ_ASSERT(unwound == (trap == wasm::Trap::IndirectCallBadSig));
 
     void* pc = unwindState.pc;
     wasm::Frame* fp = unwindState.fp;
 
     const wasm::Code& code = fp->tls->instance->code();
     MOZ_RELEASE_ASSERT(&code == wasm::LookupCode(pc));
 
--- a/js/src/wasm/AsmJS.cpp
+++ b/js/src/wasm/AsmJS.cpp
@@ -6550,27 +6550,18 @@ CheckBuffer(JSContext* cx, const AsmJSMe
                         metadata.minMemoryLength));
         if (!msg) {
             return false;
         }
         return LinkFail(cx, msg.get());
     }
 
     if (buffer->is<ArrayBufferObject>()) {
-        // On 64-bit, bounds checks are statically removed so the huge guard
-        // region is always necessary. On 32-bit, allocating a guard page
-        // requires reallocating the incoming ArrayBuffer which could trigger
-        // OOM. Thus, don't ask for a guard page in this case;
-#ifdef WASM_HUGE_MEMORY
-        bool needGuard = true;
-#else
-        bool needGuard = false;
-#endif
         Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
-        if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard)) {
+        if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer)) {
             return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
         }
     } else {
         return LinkFail(cx, "Unable to prepare SharedArrayBuffer for asm.js use");
     }
 
     MOZ_ASSERT(buffer->isPreparedForAsmJS());
     return true;
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -792,19 +792,17 @@ Instance::Instance(JSContext* cx,
 #ifdef DEBUG
     for (auto t : code_->tiers()) {
         MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
     }
 #endif
     MOZ_ASSERT(tables_.length() == metadata().tables.length());
 
     tlsData()->memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
-#ifndef WASM_HUGE_MEMORY
     tlsData()->boundsCheckLimit = memory ? memory->buffer().wasmBoundsCheckLimit() : 0;
-#endif
     tlsData()->instance = this;
     tlsData()->realm = realm_;
     tlsData()->cx = cx;
     tlsData()->resetInterrupt(cx);
     tlsData()->jumpTable = code_->tieringJumpTable();
     tlsData()->addressOfNeedsIncrementalBarrier =
         (uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
 
@@ -1232,19 +1230,17 @@ Instance::ensureProfilingLabels(bool pro
 void
 Instance::onMovingGrowMemory(uint8_t* prevMemoryBase)
 {
     MOZ_ASSERT(!isAsmJS());
     MOZ_ASSERT(!memory_->isShared());
 
     ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
     tlsData()->memoryBase = buffer.dataPointer();
-#ifndef WASM_HUGE_MEMORY
     tlsData()->boundsCheckLimit = buffer.wasmBoundsCheckLimit();
-#endif
 }
 
 void
 Instance::onMovingGrowTable()
 {
     MOZ_ASSERT(!isAsmJS());
     MOZ_ASSERT(tables_.length() == 1);
     TableTls& table = tableTls(metadata().tables[0]);
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -591,24 +591,26 @@ class FunctionCompiler
         load = MWasmLoadTls::New(alloc(), tlsPointer_, offsetof(wasm::TlsData, memoryBase),
                                  MIRType::Pointer, aliases);
         curBlock_->add(load);
 #endif
         return load;
     }
 
     MWasmLoadTls* maybeLoadBoundsCheckLimit() {
-        MWasmLoadTls* load = nullptr;
-#ifndef WASM_HUGE_MEMORY
+#ifdef WASM_HUGE_MEMORY
+        if (!env_.isAsmJS()) {
+            return nullptr;
+        }
+#endif
         AliasSet aliases = env_.maxMemoryLength.isSome() ? AliasSet::None()
                                                          : AliasSet::Load(AliasSet::WasmHeapMeta);
-        load = MWasmLoadTls::New(alloc(), tlsPointer_, offsetof(wasm::TlsData, boundsCheckLimit),
-                                 MIRType::Int32, aliases);
+        auto load = MWasmLoadTls::New(alloc(), tlsPointer_, offsetof(wasm::TlsData, boundsCheckLimit),
+                                      MIRType::Int32, aliases);
         curBlock_->add(load);
-#endif
         return load;
     }
 
     // Only sets *mustAdd if it also returns true.
     bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base, bool* mustAdd) {
         MOZ_ASSERT(!*mustAdd);
 
         // asm.js accesses are always aligned and need no checks.
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -14,27 +14,32 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "wasm/WasmSignalHandlers.h"
 
 #include "mozilla/DebugOnly.h"
-#include "mozilla/PodOperations.h"
 #include "mozilla/ScopeExit.h"
 #include "mozilla/ThreadLocal.h"
 
-#include "jit/AtomicOperations.h"
-#include "jit/Disassembler.h"
 #include "vm/Runtime.h"
-#include "wasm/WasmBuiltins.h"
 #include "wasm/WasmInstance.h"
 
-#include "vm/ArrayBufferObject-inl.h"
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+
+// =============================================================================
+// This following pile of macros and includes defines the ToRegisterState() and
+// the ContextTo{PC,FP,SP,LR}() functions from the (highly) platform-specific
+// CONTEXT struct which is provided to the signal handler.
+// =============================================================================
 
 #if defined(XP_WIN)
 # include "util/Windows.h"
 #else
 # include <signal.h>
 # include <sys/mman.h>
 #endif
 
@@ -46,86 +51,35 @@
 # if defined(__DragonFly__)
 #  include <machine/npx.h> // for union savefpu
 # elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
        defined(__NetBSD__) || defined(__OpenBSD__)
 #  include <machine/fpu.h> // for struct savefpu/fxsave64
 # endif
 #endif
 
-using namespace js;
-using namespace js::jit;
-using namespace js::wasm;
-
-using JS::GenericNaN;
-using mozilla::DebugOnly;
-
-// Crashing inside the signal handler can cause the handler to be recursively
-// invoked, eventually blowing the stack without actually showing a crash
-// report dialog via Breakpad. To guard against this we watch for such
-// recursion and fall through to the next handler immediately rather than
-// trying to handle it.
-
-static MOZ_THREAD_LOCAL(bool) sAlreadyInSignalHandler;
-
-struct AutoSignalHandler
-{
-    explicit AutoSignalHandler()
-    {
-        MOZ_ASSERT(!sAlreadyInSignalHandler.get());
-        sAlreadyInSignalHandler.set(true);
-    }
-
-    ~AutoSignalHandler() {
-        MOZ_ASSERT(sAlreadyInSignalHandler.get());
-        sAlreadyInSignalHandler.set(false);
-    }
-};
-
 #if defined(XP_WIN)
-# define XMM_sig(p,i) ((p)->Xmm##i)
 # define EIP_sig(p) ((p)->Eip)
 # define EBP_sig(p) ((p)->Ebp)
 # define ESP_sig(p) ((p)->Esp)
 # define RIP_sig(p) ((p)->Rip)
-# define RAX_sig(p) ((p)->Rax)
-# define RCX_sig(p) ((p)->Rcx)
-# define RDX_sig(p) ((p)->Rdx)
-# define RBX_sig(p) ((p)->Rbx)
 # define RSP_sig(p) ((p)->Rsp)
 # define RBP_sig(p) ((p)->Rbp)
-# define RSI_sig(p) ((p)->Rsi)
-# define RDI_sig(p) ((p)->Rdi)
-# define R8_sig(p) ((p)->R8)
-# define R9_sig(p) ((p)->R9)
-# define R10_sig(p) ((p)->R10)
 # define R11_sig(p) ((p)->R11)
-# define R12_sig(p) ((p)->R12)
 # define R13_sig(p) ((p)->R13)
 # define R14_sig(p) ((p)->R14)
 # define R15_sig(p) ((p)->R15)
 #elif defined(__OpenBSD__)
-# define XMM_sig(p,i) ((p)->sc_fpstate->fx_xmm[i])
 # define EIP_sig(p) ((p)->sc_eip)
 # define EBP_sig(p) ((p)->sc_ebp)
 # define ESP_sig(p) ((p)->sc_esp)
 # define RIP_sig(p) ((p)->sc_rip)
-# define RAX_sig(p) ((p)->sc_rax)
-# define RCX_sig(p) ((p)->sc_rcx)
-# define RDX_sig(p) ((p)->sc_rdx)
-# define RBX_sig(p) ((p)->sc_rbx)
 # define RSP_sig(p) ((p)->sc_rsp)
 # define RBP_sig(p) ((p)->sc_rbp)
-# define RSI_sig(p) ((p)->sc_rsi)
-# define RDI_sig(p) ((p)->sc_rdi)
-# define R8_sig(p) ((p)->sc_r8)
-# define R9_sig(p) ((p)->sc_r9)
-# define R10_sig(p) ((p)->sc_r10)
 # define R11_sig(p) ((p)->sc_r11)
-# define R12_sig(p) ((p)->sc_r12)
 # if defined(__arm__)
 #  define R13_sig(p) ((p)->sc_usr_sp)
 #  define R14_sig(p) ((p)->sc_usr_lr)
 #  define R15_sig(p) ((p)->sc_pc)
 # else
 #  define R13_sig(p) ((p)->sc_r13)
 #  define R14_sig(p) ((p)->sc_r14)
 #  define R15_sig(p) ((p)->sc_r15)
@@ -137,39 +91,27 @@ struct AutoSignalHandler
 #  define R31_sig(p) ((p)->sc_sp)
 # endif
 # if defined(__mips__)
 #  define EPC_sig(p) ((p)->sc_pc)
 #  define RFP_sig(p) ((p)->sc_regs[30])
 # endif
 #elif defined(__linux__) || defined(__sun)
 # if defined(__linux__)
-#  define XMM_sig(p,i) ((p)->uc_mcontext.fpregs->_xmm[i])
 #  define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
 #  define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
 #  define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
 # else
-#  define XMM_sig(p,i) ((p)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[i])
 #  define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
 #  define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
 #  define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
 # endif
 # define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
-# define RAX_sig(p) ((p)->uc_mcontext.gregs[REG_RAX])
-# define RCX_sig(p) ((p)->uc_mcontext.gregs[REG_RCX])
-# define RDX_sig(p) ((p)->uc_mcontext.gregs[REG_RDX])
-# define RBX_sig(p) ((p)->uc_mcontext.gregs[REG_RBX])
 # define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
 # define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
-# define RSI_sig(p) ((p)->uc_mcontext.gregs[REG_RSI])
-# define RDI_sig(p) ((p)->uc_mcontext.gregs[REG_RDI])
-# define R8_sig(p) ((p)->uc_mcontext.gregs[REG_R8])
-# define R9_sig(p) ((p)->uc_mcontext.gregs[REG_R9])
-# define R10_sig(p) ((p)->uc_mcontext.gregs[REG_R10])
-# define R12_sig(p) ((p)->uc_mcontext.gregs[REG_R12])
 # if defined(__linux__) && defined(__arm__)
 #  define R11_sig(p) ((p)->uc_mcontext.arm_fp)
 #  define R13_sig(p) ((p)->uc_mcontext.arm_sp)
 #  define R14_sig(p) ((p)->uc_mcontext.arm_lr)
 #  define R15_sig(p) ((p)->uc_mcontext.arm_pc)
 # else
 #  define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
 #  define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
@@ -190,75 +132,47 @@ struct AutoSignalHandler
 # endif
 # if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
 #  define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
 #  define FP_sig(p) ((p)->uc_mcontext.mc_fp)
 #  define SP_sig(p) ((p)->uc_mcontext.mc_i7)
 # endif
 # if defined(__linux__) && \
      (defined(__ppc64__) ||  defined (__PPC64__) || defined(__ppc64le__) || defined (__PPC64LE__))
-// powerpc stack frame pointer (SFP or SP or FP)
 #  define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
-// powerpc next instruction pointer (NIP or PC)
 #  define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
 # endif
 #elif defined(__NetBSD__)
-# define XMM_sig(p,i) (((struct fxsave64*)(p)->uc_mcontext.__fpregs)->fx_xmm[i])
 # define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
 # define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
 # define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
 # define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
-# define RAX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RAX])
-# define RCX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RCX])
-# define RDX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDX])
-# define RBX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBX])
 # define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
 # define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
-# define RSI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSI])
-# define RDI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDI])
-# define R8_sig(p) ((p)->uc_mcontext.__gregs[_REG_R8])
-# define R9_sig(p) ((p)->uc_mcontext.__gregs[_REG_R9])
-# define R10_sig(p) ((p)->uc_mcontext.__gregs[_REG_R10])
 # define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
-# define R12_sig(p) ((p)->uc_mcontext.__gregs[_REG_R12])
 # define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
 # define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
 # define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
 # if defined(__aarch64__)
 #  define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
 #  define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
 #  define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
 #  define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
 # endif
 # if defined(__mips__)
 #  define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
 #  define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
 # endif
 #elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
-# if defined(__DragonFly__)
-#  define XMM_sig(p,i) (((union savefpu*)(p)->uc_mcontext.mc_fpregs)->sv_xmm.sv_xmm[i])
-# else
-#  define XMM_sig(p,i) (((struct savefpu*)(p)->uc_mcontext.mc_fpstate)->sv_xmm[i])
-# endif
 # define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
 # define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
 # define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
 # define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
-# define RAX_sig(p) ((p)->uc_mcontext.mc_rax)
-# define RCX_sig(p) ((p)->uc_mcontext.mc_rcx)
-# define RDX_sig(p) ((p)->uc_mcontext.mc_rdx)
-# define RBX_sig(p) ((p)->uc_mcontext.mc_rbx)
 # define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
 # define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
-# define RSI_sig(p) ((p)->uc_mcontext.mc_rsi)
-# define RDI_sig(p) ((p)->uc_mcontext.mc_rdi)
-# define R8_sig(p) ((p)->uc_mcontext.mc_r8)
-# define R9_sig(p) ((p)->uc_mcontext.mc_r9)
-# define R10_sig(p) ((p)->uc_mcontext.mc_r10)
-# define R12_sig(p) ((p)->uc_mcontext.mc_r12)
 # if defined(__FreeBSD__) && defined(__arm__)
 #  define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
 #  define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
 #  define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
 #  define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
 # else
 #  define R11_sig(p) ((p)->uc_mcontext.mc_r11)
 #  define R13_sig(p) ((p)->uc_mcontext.mc_r13)
@@ -477,508 +391,105 @@ ToRegisterState(CONTEXT* context)
 #if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
     state.lr = ContextToLR(context);
 #else
     state.lr = (void*)UINTPTR_MAX;
 #endif
     return state;
 }
 
-#if defined(WASM_HUGE_MEMORY)
-MOZ_COLD static void
-SetFPRegToNaN(size_t size, void* fp_reg)
-{
-    MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
-    memset(fp_reg, 0, Simd128DataSize);
-    switch (size) {
-      case 4: *static_cast<float*>(fp_reg) = GenericNaN(); break;
-      case 8: *static_cast<double*>(fp_reg) = GenericNaN(); break;
-      default:
-        // All SIMD accesses throw on OOB.
-        MOZ_CRASH("unexpected size in SetFPRegToNaN");
-    }
-}
-
-MOZ_COLD static void
-SetGPRegToZero(void* gp_reg)
-{
-    memset(gp_reg, 0, sizeof(intptr_t));
-}
-
-MOZ_COLD static void
-SetFPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* fp_reg)
-{
-    MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
-    memset(fp_reg, 0, Simd128DataSize);
-    AtomicOperations::memcpySafeWhenRacy(fp_reg, addr, size);
-}
-
-MOZ_COLD static void
-SetGPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* gp_reg)
-{
-    MOZ_RELEASE_ASSERT(size <= sizeof(void*));
-    memset(gp_reg, 0, sizeof(void*));
-    AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
-}
-
-MOZ_COLD static void
-SetGPRegToLoadedValueSext32(SharedMem<void*> addr, size_t size, void* gp_reg)
-{
-    MOZ_RELEASE_ASSERT(size <= sizeof(int32_t));
-    int8_t msb = AtomicOperations::loadSafeWhenRacy(addr.cast<uint8_t*>() + (size - 1));
-    memset(gp_reg, 0, sizeof(void*));
-    memset(gp_reg, msb >> 7, sizeof(int32_t));
-    AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
-}
+// =============================================================================
+// All signals/exceptions funnel down to this one trap-handling function which
+// tests whether the pc is in a wasm module and, if so, whether there is
+// actually a trap expected at this pc. These tests both avoid real bugs being
+// silently converted to wasm traps and provides the trapping wasm bytecode
+// offset we need to report in the error.
+//
+// Crashing inside wasm trap handling (due to a bug in trap handling or exposed
+// during trap handling) must be reported like a normal crash, not cause the
+// crash report to be lost. On Windows and non-Mach Unix, a crash during the
+// handler reenters the handler, possibly repeatedly until exhausting the stack,
+// and so we prevent recursion with the thread-local sAlreadyHandlingTrap. On
+// Mach, the wasm exception handler has its own thread and is installed only on
+// the thread-level debugging ports of JSRuntime threads, so a crash on
+// exception handler thread will not recurse; it will bubble up to the
+// process-level debugging ports (where Breakpad is installed).
+// =============================================================================
 
-MOZ_COLD static void
-StoreValueFromFPReg(SharedMem<void*> addr, size_t size, const void* fp_reg)
-{
-    MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
-    AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(fp_reg), size);
-}
-
-MOZ_COLD static void
-StoreValueFromGPReg(SharedMem<void*> addr, size_t size, const void* gp_reg)
-{
-    MOZ_RELEASE_ASSERT(size <= sizeof(void*));
-    AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(gp_reg), size);
-}
-
-MOZ_COLD static void
-StoreValueFromGPImm(SharedMem<void*> addr, size_t size, int32_t imm)
-{
-    MOZ_RELEASE_ASSERT(size <= sizeof(imm));
-    AtomicOperations::memcpySafeWhenRacy(addr, static_cast<void*>(&imm), size);
-}
+static MOZ_THREAD_LOCAL(bool) sAlreadyHandlingTrap;
 
-#if defined(JS_CODEGEN_X64)
-# if !defined(XP_DARWIN)
-MOZ_COLD static void*
-AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
-{
-    switch (encoding) {
-      case X86Encoding::xmm0:  return &XMM_sig(context, 0);
-      case X86Encoding::xmm1:  return &XMM_sig(context, 1);
-      case X86Encoding::xmm2:  return &XMM_sig(context, 2);
-      case X86Encoding::xmm3:  return &XMM_sig(context, 3);
-      case X86Encoding::xmm4:  return &XMM_sig(context, 4);
-      case X86Encoding::xmm5:  return &XMM_sig(context, 5);
-      case X86Encoding::xmm6:  return &XMM_sig(context, 6);
-      case X86Encoding::xmm7:  return &XMM_sig(context, 7);
-      case X86Encoding::xmm8:  return &XMM_sig(context, 8);
-      case X86Encoding::xmm9:  return &XMM_sig(context, 9);
-      case X86Encoding::xmm10: return &XMM_sig(context, 10);
-      case X86Encoding::xmm11: return &XMM_sig(context, 11);
-      case X86Encoding::xmm12: return &XMM_sig(context, 12);
-      case X86Encoding::xmm13: return &XMM_sig(context, 13);
-      case X86Encoding::xmm14: return &XMM_sig(context, 14);
-      case X86Encoding::xmm15: return &XMM_sig(context, 15);
-      default: break;
-    }
-    MOZ_CRASH();
-}
-
-MOZ_COLD static void*
-AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
+struct AutoHandlingTrap
 {
-    switch (code) {
-      case X86Encoding::rax: return &RAX_sig(context);
-      case X86Encoding::rcx: return &RCX_sig(context);
-      case X86Encoding::rdx: return &RDX_sig(context);
-      case X86Encoding::rbx: return &RBX_sig(context);
-      case X86Encoding::rsp: return &RSP_sig(context);
-      case X86Encoding::rbp: return &RBP_sig(context);
-      case X86Encoding::rsi: return &RSI_sig(context);
-      case X86Encoding::rdi: return &RDI_sig(context);
-      case X86Encoding::r8:  return &R8_sig(context);
-      case X86Encoding::r9:  return &R9_sig(context);
-      case X86Encoding::r10: return &R10_sig(context);
-      case X86Encoding::r11: return &R11_sig(context);
-      case X86Encoding::r12: return &R12_sig(context);
-      case X86Encoding::r13: return &R13_sig(context);
-      case X86Encoding::r14: return &R14_sig(context);
-      case X86Encoding::r15: return &R15_sig(context);
-      default: break;
-    }
-    MOZ_CRASH();
-}
-# else
-MOZ_COLD static void*
-AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
-{
-    switch (encoding) {
-      case X86Encoding::xmm0:  return &context->float_.__fpu_xmm0;
-      case X86Encoding::xmm1:  return &context->float_.__fpu_xmm1;
-      case X86Encoding::xmm2:  return &context->float_.__fpu_xmm2;
-      case X86Encoding::xmm3:  return &context->float_.__fpu_xmm3;
-      case X86Encoding::xmm4:  return &context->float_.__fpu_xmm4;
-      case X86Encoding::xmm5:  return &context->float_.__fpu_xmm5;
-      case X86Encoding::xmm6:  return &context->float_.__fpu_xmm6;
-      case X86Encoding::xmm7:  return &context->float_.__fpu_xmm7;
-      case X86Encoding::xmm8:  return &context->float_.__fpu_xmm8;
-      case X86Encoding::xmm9:  return &context->float_.__fpu_xmm9;
-      case X86Encoding::xmm10: return &context->float_.__fpu_xmm10;
-      case X86Encoding::xmm11: return &context->float_.__fpu_xmm11;
-      case X86Encoding::xmm12: return &context->float_.__fpu_xmm12;
-      case X86Encoding::xmm13: return &context->float_.__fpu_xmm13;
-      case X86Encoding::xmm14: return &context->float_.__fpu_xmm14;
-      case X86Encoding::xmm15: return &context->float_.__fpu_xmm15;
-      default: break;
-    }
-    MOZ_CRASH();
-}
-
-MOZ_COLD static void*
-AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
-{
-    switch (code) {
-      case X86Encoding::rax: return &context->thread.__rax;
-      case X86Encoding::rcx: return &context->thread.__rcx;
-      case X86Encoding::rdx: return &context->thread.__rdx;
-      case X86Encoding::rbx: return &context->thread.__rbx;
-      case X86Encoding::rsp: return &context->thread.__rsp;
-      case X86Encoding::rbp: return &context->thread.__rbp;
-      case X86Encoding::rsi: return &context->thread.__rsi;
-      case X86Encoding::rdi: return &context->thread.__rdi;
-      case X86Encoding::r8:  return &context->thread.__r8;
-      case X86Encoding::r9:  return &context->thread.__r9;
-      case X86Encoding::r10: return &context->thread.__r10;
-      case X86Encoding::r11: return &context->thread.__r11;
-      case X86Encoding::r12: return &context->thread.__r12;
-      case X86Encoding::r13: return &context->thread.__r13;
-      case X86Encoding::r14: return &context->thread.__r14;
-      case X86Encoding::r15: return &context->thread.__r15;
-      default: break;
-    }
-    MOZ_CRASH();
-}
-# endif  // !XP_DARWIN
-#elif defined(JS_CODEGEN_ARM64)
-MOZ_COLD static void*
-AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
-{
-    MOZ_CRASH("NYI - asm.js not supported yet on this platform");
-}
-
-MOZ_COLD static void*
-AddressOfGPRegisterSlot(CONTEXT* context, Registers::Code code)
-{
-    MOZ_CRASH("NYI - asm.js not supported yet on this platform");
-}
-#endif
-
-MOZ_COLD static void
-SetRegisterToCoercedUndefined(CONTEXT* context, size_t size,
-                              const Disassembler::OtherOperand& value)
-{
-    if (value.kind() == Disassembler::OtherOperand::FPR) {
-        SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
-    } else {
-        SetGPRegToZero(AddressOfGPRegisterSlot(context, value.gpr()));
-    }
-}
-
-MOZ_COLD static void
-SetRegisterToLoadedValue(CONTEXT* context, SharedMem<void*> addr, size_t size,
-                         const Disassembler::OtherOperand& value)
-{
-    if (value.kind() == Disassembler::OtherOperand::FPR) {
-        SetFPRegToLoadedValue(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
-    } else {
-        SetGPRegToLoadedValue(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
-    }
-}
-
-MOZ_COLD static void
-SetRegisterToLoadedValueSext32(CONTEXT* context, SharedMem<void*> addr, size_t size,
-                               const Disassembler::OtherOperand& value)
-{
-    SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
-}
-
-MOZ_COLD static void
-StoreValueFromRegister(CONTEXT* context, SharedMem<void*> addr, size_t size,
-                       const Disassembler::OtherOperand& value)
-{
-    if (value.kind() == Disassembler::OtherOperand::FPR) {
-        StoreValueFromFPReg(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
-    } else if (value.kind() == Disassembler::OtherOperand::GPR) {
-        StoreValueFromGPReg(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
-    } else {
-        StoreValueFromGPImm(addr, size, value.imm());
-    }
-}
-
-MOZ_COLD static uint8_t*
-ComputeAccessAddress(CONTEXT* context, const Disassembler::ComplexAddress& address)
-{
-    MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
-
-    uintptr_t result = address.disp();
-
-    if (address.hasBase()) {
-        uintptr_t base;
-        StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
-                            AddressOfGPRegisterSlot(context, address.base()));
-        result += base;
+    AutoHandlingTrap() {
+        MOZ_ASSERT(!sAlreadyHandlingTrap.get());
+        sAlreadyHandlingTrap.set(true);
     }
 
-    if (address.hasIndex()) {
-        uintptr_t index;
-        StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
-                            AddressOfGPRegisterSlot(context, address.index()));
-        MOZ_ASSERT(address.scale() < 32, "address shift overflow");
-        result += index * (uintptr_t(1) << address.scale());
-    }
-
-    return reinterpret_cast<uint8_t*>(result);
-}
-#endif // WASM_HUGE_MEMORY
-
-MOZ_COLD static MOZ_MUST_USE bool
-HandleOutOfBounds(CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
-                  const ModuleSegment* segment, const Instance& instance, JitActivation* activation,
-                  uint8_t** ppc)
-{
-    MOZ_RELEASE_ASSERT(segment->code().containsCodePC(pc));
-
-    Trap trap;
-    BytecodeOffset bytecode;
-    MOZ_ALWAYS_TRUE(segment->code().lookupTrap(pc, &trap, &bytecode));
-
-    if (trap != Trap::OutOfBounds) {
-        return false;
-    }
-
-    if (bytecode.isValid()) {
-        activation->startWasmTrap(Trap::OutOfBounds, bytecode.offset(), ToRegisterState(context));
-        *ppc = segment->trapCode();
-        return true;
+    ~AutoHandlingTrap() {
+        MOZ_ASSERT(sAlreadyHandlingTrap.get());
+        sAlreadyHandlingTrap.set(false);
     }
-
-#ifndef WASM_HUGE_MEMORY
-    return false;
-#else
-    // On WASM_HUGE_MEMORY platforms, asm.js code may fault. asm.js does not
-    // trap on fault and so has no trap out-of-line path. Instead, stores are
-    // silently ignored (by advancing the pc past the store and resuming) and
-    // loads silently succeed with a JS-semantics-determined value.
-    MOZ_RELEASE_ASSERT(instance.isAsmJS());
-
-    // Asm.JS memory cannot grow or shrink - only wasm can grow or shrink it,
-    // and asm.js is not allowed to use wasm memory.  On this Asm.JS-only path
-    // we therefore need not worry about memory growing or shrinking while the
-    // signal handler is executing, and we can read the length without locking
-    // the memory.  Indeed, the buffer's byteLength always holds the correct
-    // value.
-    uint32_t memoryLength = instance.memory()->buffer().byteLength();
-
-    // Disassemble the instruction which caused the trap so that we can extract
-    // information about it and decide what to do.
-    Disassembler::HeapAccess access;
-    uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
-    const Disassembler::ComplexAddress& address = access.address();
-    MOZ_RELEASE_ASSERT(end > pc);
-    MOZ_RELEASE_ASSERT(segment->containsCodePC(end));
-
-    // Check x64 asm.js heap access invariants.
-    MOZ_RELEASE_ASSERT(address.disp() >= 0);
-    MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
-    MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
-    MOZ_RELEASE_ASSERT(address.scale() == 0);
-    if (address.hasBase()) {
-        uintptr_t base;
-        StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
-                            AddressOfGPRegisterSlot(context, address.base()));
-        MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == instance.memoryBase());
-    }
-    if (address.hasIndex()) {
-        uintptr_t index;
-        StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
-                            AddressOfGPRegisterSlot(context, address.index()));
-        MOZ_RELEASE_ASSERT(uint32_t(index) == index);
-    }
-
-    // Determine the actual effective address of the faulting access. We can't
-    // rely on the faultingAddress given to us by the OS, because we need the
-    // address of the start of the access, and the OS may sometimes give us an
-    // address somewhere in the middle of the heap access.
-    uint8_t* accessAddress = ComputeAccessAddress(context, address);
-    MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
-                       "Given faulting address does not appear to be within computed "
-                       "faulting address range");
-    MOZ_RELEASE_ASSERT(accessAddress >= instance.memoryBase(),
-                       "Access begins outside the asm.js heap");
-    MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() +
-                       instance.memoryMappedSize(),
-                       "Access extends beyond the asm.js heap guard region");
-    MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() +
-                       memoryLength,
-                       "Computed access address is not actually out of bounds");
+};
 
-    // The basic sandbox model is that all heap accesses are a heap base
-    // register plus an index, and the index is always computed with 32-bit
-    // operations, so we know it can only be 4 GiB off of the heap base.
-    //
-    // However, we wish to support the optimization of folding immediates
-    // and scaled indices into addresses, and any address arithmetic we fold
-    // gets done at full pointer width, so it doesn't get properly wrapped.
-    // We support this by extending HugeMappedSize to the greatest size that
-    // could be reached by such an unwrapped address, and then when we arrive
-    // here in the signal handler for such an access, we compute the fully
-    // wrapped address, and perform the load or store on it.
-    //
-    // Taking a signal is really slow, but in theory programs really shouldn't
-    // be hitting this anyway.
-    intptr_t unwrappedOffset = accessAddress - instance.memoryBase().unwrap(/* for value */);
-    uint32_t wrappedOffset = uint32_t(unwrappedOffset);
-    size_t size = access.size();
-    MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
-    bool inBounds = wrappedOffset + size < memoryLength;
+static MOZ_MUST_USE bool
+HandleTrap(CONTEXT* context, JSContext* cx)
+{
+    MOZ_ASSERT(sAlreadyHandlingTrap.get());
 
-    if (inBounds) {
-        // We now know that this is an access that is actually in bounds when
-        // properly wrapped. Complete the load or store with the wrapped
-        // address.
-        SharedMem<uint8_t*> wrappedAddress = instance.memoryBase() + wrappedOffset;
-        MOZ_RELEASE_ASSERT(wrappedAddress >= instance.memoryBase());
-        MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
-        MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.memoryBase() + memoryLength);
-        switch (access.kind()) {
-          case Disassembler::HeapAccess::Load:
-            SetRegisterToLoadedValue(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
-            break;
-          case Disassembler::HeapAccess::LoadSext32:
-            SetRegisterToLoadedValueSext32(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
-            break;
-          case Disassembler::HeapAccess::Store:
-            StoreValueFromRegister(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
-            break;
-          case Disassembler::HeapAccess::LoadSext64:
-            MOZ_CRASH("no int64 accesses in asm.js");
-          case Disassembler::HeapAccess::Unknown:
-            MOZ_CRASH("Failed to disassemble instruction");
-        }
-    } else {
-        // We now know that this is an out-of-bounds access made by an asm.js
-        // load/store that we should handle.
-        switch (access.kind()) {
-          case Disassembler::HeapAccess::Load:
-          case Disassembler::HeapAccess::LoadSext32:
-            // Assign the JS-defined result value to the destination register
-            // (ToInt32(undefined) or ToNumber(undefined), determined by the
-            // type of the destination register). Very conveniently, we can
-            // infer the type from the register class, so the only types using
-            // FP registers are float32 and double.
-            SetRegisterToCoercedUndefined(context, access.size(), access.otherOperand());
-            break;
-          case Disassembler::HeapAccess::Store:
-            // Do nothing.
-            break;
-          case Disassembler::HeapAccess::LoadSext64:
-            MOZ_CRASH("no int64 accesses in asm.js");
-          case Disassembler::HeapAccess::Unknown:
-            MOZ_CRASH("Failed to disassemble instruction");
-        }
-    }
-
-    *ppc = end;
-    return true;
-#endif // WASM_HUGE_MEMORY
-}
-
-MOZ_COLD static bool
-IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
-{
-    size_t accessLimit = instance.memoryMappedSize();
-
-    return instance.metadata().usesMemory() &&
-           faultingAddress >= instance.memoryBase() &&
-           faultingAddress < instance.memoryBase() + accessLimit;
-}
-
-#if defined(XP_WIN)
-
-static bool
-HandleFault(PEXCEPTION_POINTERS exception)
-{
-    EXCEPTION_RECORD* record = exception->ExceptionRecord;
-    CONTEXT* context = exception->ContextRecord;
-
-    if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
-        record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION)
-    {
-        return false;
-    }
-
-    uint8_t** ppc = ContextToPC(context);
-    uint8_t* pc = *ppc;
-
+    uint8_t* pc = *ContextToPC(context);
     const CodeSegment* codeSegment = LookupCodeSegment(pc);
     if (!codeSegment || !codeSegment->isModule()) {
         return false;
     }
 
-    const ModuleSegment* moduleSegment = codeSegment->asModule();
-
-    JitActivation* activation = TlsContext.get()->activation()->asJit();
-    MOZ_ASSERT(activation);
-
-    const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(context));
-    if (!instance) {
-        return false;
-    }
+    const ModuleSegment& segment = *codeSegment->asModule();
 
-    if (record->ExceptionCode == EXCEPTION_ILLEGAL_INSTRUCTION) {
-        Trap trap;
-        BytecodeOffset bytecode;
-        if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) {
-            return false;
-        }
-
-        activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(context));
-        *ppc = moduleSegment->trapCode();
-        return true;
-    }
-
-    MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code());
-
-    if (record->NumberParameters < 2) {
+    Trap trap;
+    BytecodeOffset bytecode;
+    if (!segment.code().lookupTrap(pc, &trap, &bytecode)) {
         return false;
     }
 
-    uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(record->ExceptionInformation[1]);
+    // We have a safe, expected wasm trap. Call startWasmTrap() to store enough
+    // register state at the point of the trap to allow stack unwinding or
+    // resumption, both of which will call finishWasmTrap().
+    jit::JitActivation* activation = cx->activation()->asJit();
+    activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(context));
+    *ContextToPC(context) = segment.trapCode();
+    return true;
+}
 
-    // This check isn't necessary, but, since we can, check anyway to make
-    // sure we aren't covering up a real bug.
-    if (!IsHeapAccessAddress(*instance, faultingAddress)) {
-        return false;
-    }
+// =============================================================================
+// The following platform specific signal/exception handlers are installed by
+// wasm::EnsureSignalHandlers() and funnel all potential wasm traps into
+// HandleTrap() above.
+// =============================================================================
 
-    MOZ_ASSERT(activation->compartment() == instance->realm()->compartment());
-
-    return HandleOutOfBounds(context, pc, faultingAddress, moduleSegment, *instance, activation, ppc);
-}
+#if defined(XP_WIN)
 
 static LONG WINAPI
-WasmFaultHandler(LPEXCEPTION_POINTERS exception)
+WasmTrapHandler(LPEXCEPTION_POINTERS exception)
 {
-    // Before anything else, prevent handler recursion.
-    if (sAlreadyInSignalHandler.get()) {
+    if (sAlreadyHandlingTrap.get()) {
         return EXCEPTION_CONTINUE_SEARCH;
     }
-    AutoSignalHandler ash;
+    AutoHandlingTrap aht;
 
-    if (HandleFault(exception)) {
-        return EXCEPTION_CONTINUE_EXECUTION;
+    EXCEPTION_RECORD* record = exception->ExceptionRecord;
+    if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
+        record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION)
+    {
+        return EXCEPTION_CONTINUE_SEARCH;
     }
 
-    // No need to worry about calling other handlers, the OS does this for us.
-    return EXCEPTION_CONTINUE_SEARCH;
+    if (!HandleTrap(exception->ContextRecord, TlsContext.get())) {
+        return EXCEPTION_CONTINUE_SEARCH;
+    }
+
+    return EXCEPTION_CONTINUE_EXECUTION;
 }
 
 #elif defined(XP_DARWIN)
 # include <mach/exc.h>
 
 // This definition was generated by mig (the Mach Interface Generator) for the
 // routine 'exception_raise' (exc.defs).
 #pragma pack(4)
@@ -1036,70 +547,26 @@ HandleMachException(JSContext* cx, const
         return false;
     }
     kret = thread_get_state(cxThread, float_state,
                             (thread_state_t)&context.float_, &float_state_count);
     if (kret != KERN_SUCCESS) {
         return false;
     }
 
-    uint8_t** ppc = ContextToPC(&context);
-    uint8_t* pc = *ppc;
-
     if (request.body.exception != EXC_BAD_ACCESS &&
         request.body.exception != EXC_BAD_INSTRUCTION)
     {
         return false;
     }
 
-    // The faulting thread is suspended so we can access cx fields that can
-    // normally only be accessed by the cx's main thread.
-    AutoNoteSingleThreadedRegion anstr;
-
-    const CodeSegment* codeSegment = LookupCodeSegment(pc);
-    if (!codeSegment || !codeSegment->isModule()) {
-        return false;
-    }
-
-    const ModuleSegment* moduleSegment = codeSegment->asModule();
-
-    const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(&context));
-    if (!instance) {
-        return false;
-    }
-
-    JitActivation* activation = cx->activation()->asJit();
-    MOZ_ASSERT(activation->compartment() == instance->realm()->compartment());
-
-    if (request.body.exception == EXC_BAD_INSTRUCTION) {
-        Trap trap;
-        BytecodeOffset bytecode;
-        if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) {
-            return false;
-        }
-
-        activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(&context));
-        *ppc = moduleSegment->trapCode();
-    } else {
-        MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code());
-
-        MOZ_ASSERT(request.body.exception == EXC_BAD_ACCESS);
-        if (request.body.codeCnt != 2) {
-            return false;
-        }
-
-        uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
-
-        // This check isn't necessary, but, since we can, check anyway to make
-        // sure we aren't covering up a real bug.
-        if (!IsHeapAccessAddress(*instance, faultingAddress)) {
-            return false;
-        }
-
-        if (!HandleOutOfBounds(&context, pc, faultingAddress, moduleSegment, *instance, activation, ppc)) {
+    {
+        AutoNoteSingleThreadedRegion anstr;
+        AutoHandlingTrap aht;
+        if (!HandleTrap(&context, cx)) {
             return false;
         }
     }
 
     // Update the thread state with the new pc and register values.
     kret = thread_set_state(cxThread, float_state, (thread_state_t)&context.float_, float_state_count);
     if (kret != KERN_SUCCESS) {
         return false;
@@ -1264,108 +731,34 @@ MachExceptionHandler::install(JSContext*
     installed_ = true;
     onFailure.release();
     return true;
 }
 
 #else  // If not Windows or Mac, assume Unix
 
 #ifdef __mips__
-    static const uint32_t kWasmTrapSignal = SIGFPE;
+static const uint32_t kWasmTrapSignal = SIGFPE;
 #else
-    static const uint32_t kWasmTrapSignal = SIGILL;
+static const uint32_t kWasmTrapSignal = SIGILL;
 #endif
 
-// Be very cautious and default to not handling; we don't want to accidentally
-// silence real crashes from real bugs.
-static bool
-HandleFault(int signum, siginfo_t* info, void* ctx)
-{
-    // Before anything else, prevent handler recursion.
-    if (sAlreadyInSignalHandler.get()) {
-        return false;
-    }
-    AutoSignalHandler ash;
-
-    MOZ_RELEASE_ASSERT(signum == SIGSEGV || signum == SIGBUS || signum == kWasmTrapSignal);
-
-    CONTEXT* context = (CONTEXT*)ctx;
-    uint8_t** ppc = ContextToPC(context);
-    uint8_t* pc = *ppc;
-
-    const CodeSegment* segment = LookupCodeSegment(pc);
-    if (!segment || !segment->isModule()) {
-        return false;
-    }
-
-    const ModuleSegment* moduleSegment = segment->asModule();
-
-    const Instance* instance = LookupFaultingInstance(*moduleSegment, pc, ContextToFP(context));
-    if (!instance) {
-        return false;
-    }
-
-    JitActivation* activation = TlsContext.get()->activation()->asJit();
-    MOZ_ASSERT(activation->compartment() == instance->realm()->compartment());
-
-    if (signum == kWasmTrapSignal) {
-        // Wasm traps for MIPS raise only integer overflow fp exception.
-#ifdef __mips__
-        if (info->si_code != FPE_INTOVF) {
-            return false;
-        }
-#endif
-        Trap trap;
-        BytecodeOffset bytecode;
-        if (!moduleSegment->code().lookupTrap(pc, &trap, &bytecode)) {
-            return false;
-        }
-
-        activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(context));
-        *ppc = moduleSegment->trapCode();
-        return true;
-    }
-
-    MOZ_RELEASE_ASSERT(&instance->code() == &moduleSegment->code());
-
-    uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
-
-    // Although it's not strictly necessary, to make sure we're not covering up
-    // any real bugs, check that the faulting address is indeed in the
-    // instance's memory.
-    if (!faultingAddress) {
-        // On some Linux systems, the kernel apparently sometimes "gives up" and
-        // passes a null faultingAddress with si_code set to SI_KERNEL.
-        // This is observed on some automation machines for some out-of-bounds
-        // atomic accesses on x86/64.
-#ifdef SI_KERNEL
-        if (info->si_code != SI_KERNEL) {
-            return false;
-        }
-#else
-        return false;
-#endif
-    } else {
-        if (!IsHeapAccessAddress(*instance, faultingAddress)) {
-            return false;
-        }
-    }
-
-    return HandleOutOfBounds(context, pc, faultingAddress, moduleSegment, *instance, activation, ppc);
-}
-
 static struct sigaction sPrevSEGVHandler;
 static struct sigaction sPrevSIGBUSHandler;
 static struct sigaction sPrevWasmTrapHandler;
 
 static void
-WasmFaultHandler(int signum, siginfo_t* info, void* context)
+WasmTrapHandler(int signum, siginfo_t* info, void* context)
 {
-    if (HandleFault(signum, info, context)) {
-        return;
+    if (!sAlreadyHandlingTrap.get()) {
+        AutoHandlingTrap aht;
+        MOZ_RELEASE_ASSERT(signum == SIGSEGV || signum == SIGBUS || signum == kWasmTrapSignal);
+        if (HandleTrap((CONTEXT*)context, TlsContext.get())) {
+            return;
+        }
     }
 
     struct sigaction* previousSignal = nullptr;
     switch (signum) {
       case SIGSEGV: previousSignal = &sPrevSEGVHandler; break;
       case SIGBUS: previousSignal = &sPrevSIGBUSHandler; break;
       case kWasmTrapSignal: previousSignal = &sPrevWasmTrapHandler; break;
     }
@@ -1421,69 +814,69 @@ ProcessHasSignalHandlers()
 
 #if defined(ANDROID) && defined(MOZ_LINKER)
     // Signal handling is broken on some android systems.
     if (IsSignalHandlingBroken()) {
         return false;
     }
 #endif
 
-    // Initalize ThreadLocal flag used by WasmFaultHandler
-    sAlreadyInSignalHandler.infallibleInit();
+    // Initalize ThreadLocal flag used by WasmTrapHandler
+    sAlreadyHandlingTrap.infallibleInit();
 
     // Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
     // access and/or unaligned accesses.
 #if defined(XP_WIN)
 # if defined(MOZ_ASAN)
     // Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
     // in the first handler position. This requires some coordination with
     // MemoryProtectionExceptionHandler::isDisabled().
     const bool firstHandler = false;
 # else
-    // Otherwise, WasmFaultHandler needs to go first, so that we can recover
+    // Otherwise, WasmTrapHandler needs to go first, so that we can recover
     // from wasm faults and continue execution without triggering handlers
     // such as MemoryProtectionExceptionHandler that assume we are crashing.
     const bool firstHandler = true;
 # endif
-    if (!AddVectoredExceptionHandler(firstHandler, WasmFaultHandler)) {
+    if (!AddVectoredExceptionHandler(firstHandler, WasmTrapHandler)) {
         return false;
     }
 #elif defined(XP_DARWIN)
     // OSX handles seg faults via the Mach exception handler above, so don't
-    // install WasmFaultHandler.
+    // install WasmTrapHandler.
 #else
     // SA_NODEFER allows us to reenter the signal handler if we crash while
     // handling the signal, and fall through to the Breakpad handler by testing
     // handlingSegFault.
 
     // Allow handling OOB with signals on all architectures
     struct sigaction faultHandler;
     faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
-    faultHandler.sa_sigaction = WasmFaultHandler;
+    faultHandler.sa_sigaction = WasmTrapHandler;
     sigemptyset(&faultHandler.sa_mask);
     if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler)) {
         MOZ_CRASH("unable to install segv handler");
     }
 
 # if defined(JS_CODEGEN_ARM)
     // On Arm Handle Unaligned Accesses
     struct sigaction busHandler;
     busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
-    busHandler.sa_sigaction = WasmFaultHandler;
+    busHandler.sa_sigaction = WasmTrapHandler;
     sigemptyset(&busHandler.sa_mask);
     if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler)) {
         MOZ_CRASH("unable to install sigbus handler");
     }
 # endif
 
     // Install a handler to handle the instructions that are emitted to implement
     // wasm traps.
     struct sigaction wasmTrapHandler;
     wasmTrapHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
-    wasmTrapHandler.sa_sigaction = WasmFaultHandler;
+    wasmTrapHandler.sa_sigaction = WasmTrapHandler;
     sigemptyset(&wasmTrapHandler.sa_mask);
     if (sigaction(kWasmTrapSignal, &wasmTrapHandler, &sPrevWasmTrapHandler)) {
         MOZ_CRASH("unable to install wasm trap handler");
     }
 #endif
 
     sHaveSignalHandlers = true;
     return true;
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -1980,20 +1980,18 @@ typedef Vector<TableDesc, 0, SystemAlloc
 //
 // After the TlsData struct follows the module's declared TLS variables.
 
 struct TlsData
 {
     // Pointer to the base of the default memory (or null if there is none).
     uint8_t* memoryBase;
 
-#ifndef WASM_HUGE_MEMORY
     // Bounds check limit of memory, in bytes (or zero if there is no memory).
     uint32_t boundsCheckLimit;
-#endif
 
     // Pointer to the Instance that contains this TLS data.
     Instance* instance;
 
     // Equal to instance->realm_.
     JS::Realm* realm;
 
     // The containing JSContext.