Bug 1268024: split HeapAccess into MemoryAccess and BoundsCheck; r=luke
authorBenjamin Bouvier <benj@benj.me>
Mon, 13 Jun 2016 10:24:51 +0200
changeset 341729 a134a50729fadcdd7b0804862946c533a60bcd7e
parent 341728 032b92aa4c442fa736bfcb5655daa19b9dee8a3f
child 341730 dcd9e1d4254a732ed1b6e707bc5f42c940e41fa9
push id6389
push userraliiev@mozilla.com
push dateMon, 19 Sep 2016 13:38:22 +0000
treeherdermozilla-beta@01d67bfe6c81 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1268024
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1268024: split HeapAccess into MemoryAccess and BoundsCheck; r=luke MozReview-Commit-ID: 5F3fFNACx7u
js/src/asmjs/WasmCode.cpp
js/src/asmjs/WasmCode.h
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmInstance.cpp
js/src/asmjs/WasmInstance.h
js/src/asmjs/WasmModule.h
js/src/asmjs/WasmSignalHandlers.cpp
js/src/asmjs/WasmTypes.h
js/src/jit-test/tests/asm.js/testAtomics.js
js/src/jit-test/tests/asm.js/testBug1164391.js
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips32/Assembler-mips32.cpp
js/src/jit/mips32/Assembler-mips32.h
js/src/jit/mips64/Assembler-mips64.cpp
js/src/jit/mips64/Assembler-mips64.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.h
js/src/jit/x86/CodeGenerator-x86.cpp
--- a/js/src/asmjs/WasmCode.cpp
+++ b/js/src/asmjs/WasmCode.cpp
@@ -103,53 +103,27 @@ StaticallyLink(CodeSegment& cs, const Li
         for (size_t i = 0; i < table.elemOffsets.length(); i++)
             array[i] = cs.code() + table.elemOffsets[i];
     }
 }
 
 static void
 SpecializeToHeap(CodeSegment& cs, const Metadata& metadata, uint8_t* heapBase, uint32_t heapLength)
 {
-#if defined(JS_CODEGEN_X86)
+    for (const BoundsCheck& check : metadata.boundsChecks)
+        Assembler::UpdateBoundsCheck(check.patchAt(cs.code()), heapLength);
 
-    // An access is out-of-bounds iff
-    //      ptr + offset + data-type-byte-size > heapLength
-    // i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size
-    // and offset are already included in the addend so we
-    // just have to add the heap length here.
-    for (const HeapAccess& access : metadata.heapAccesses) {
-        if (access.hasLengthCheck())
-            X86Encoding::AddInt32(access.patchLengthAt(cs.code()), heapLength);
+#if defined(JS_CODEGEN_X86)
+    for (const MemoryAccess& access : metadata.memoryAccesses) {
+        // Patch memory pointer immediate.
         void* addr = access.patchHeapPtrImmAt(cs.code());
         uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
         MOZ_ASSERT(disp <= INT32_MAX);
         X86Encoding::SetPointer(addr, (void*)(heapBase + disp));
     }
-
-#elif defined(JS_CODEGEN_X64)
-
-    // Even with signal handling being used for most bounds checks, there may be
-    // atomic operations that depend on explicit checks.
-    //
-    // If we have any explicit bounds checks, we need to patch the heap length
-    // checks at the right places. All accesses that have been recorded are the
-    // only ones that need bound checks (see also
-    // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
-    for (const HeapAccess& access : metadata.heapAccesses) {
-        // See comment above for x86 codegen.
-        if (access.hasLengthCheck())
-            X86Encoding::AddInt32(access.patchLengthAt(cs.code()), heapLength);
-    }
-
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
-      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-
-    for (const HeapAccess& access : metadata.heapAccesses)
-        Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + cs.code()));
-
 #endif
 }
 
 static bool
 SendCodeRangesToProfiler(ExclusiveContext* cx, CodeSegment& cs, const Metadata& metadata)
 {
     bool enabled = false;
 #ifdef JS_ION_PERF
@@ -498,60 +472,64 @@ CacheableChars::sizeOfExcludingThis(Mall
 }
 
 size_t
 Metadata::serializedSize() const
 {
     return sizeof(pod()) +
            SerializedVectorSize(imports) +
            SerializedVectorSize(exports) +
-           SerializedPodVectorSize(heapAccesses) +
+           SerializedPodVectorSize(memoryAccesses) +
+           SerializedPodVectorSize(boundsChecks) +
            SerializedPodVectorSize(codeRanges) +
            SerializedPodVectorSize(callSites) +
            SerializedPodVectorSize(callThunks) +
            SerializedVectorSize(funcNames) +
            filename.serializedSize();
 }
 
 uint8_t*
 Metadata::serialize(uint8_t* cursor) const
 {
     cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
     cursor = SerializeVector(cursor, imports);
     cursor = SerializeVector(cursor, exports);
-    cursor = SerializePodVector(cursor, heapAccesses);
+    cursor = SerializePodVector(cursor, memoryAccesses);
+    cursor = SerializePodVector(cursor, boundsChecks);
     cursor = SerializePodVector(cursor, codeRanges);
     cursor = SerializePodVector(cursor, callSites);
     cursor = SerializePodVector(cursor, callThunks);
     cursor = SerializeVector(cursor, funcNames);
     cursor = filename.serialize(cursor);
     return cursor;
 }
 
 /* static */ const uint8_t*
 Metadata::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
     (cursor = DeserializeVector(cx, cursor, &imports)) &&
     (cursor = DeserializeVector(cx, cursor, &exports)) &&
-    (cursor = DeserializePodVector(cx, cursor, &heapAccesses)) &&
+    (cursor = DeserializePodVector(cx, cursor, &memoryAccesses)) &&
+    (cursor = DeserializePodVector(cx, cursor, &boundsChecks)) &&
     (cursor = DeserializePodVector(cx, cursor, &codeRanges)) &&
     (cursor = DeserializePodVector(cx, cursor, &callSites)) &&
     (cursor = DeserializePodVector(cx, cursor, &callThunks)) &&
     (cursor = DeserializeVector(cx, cursor, &funcNames)) &&
     (cursor = filename.deserialize(cx, cursor));
     return cursor;
 }
 
 size_t
 Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return SizeOfVectorExcludingThis(imports, mallocSizeOf) +
            SizeOfVectorExcludingThis(exports, mallocSizeOf) +
-           heapAccesses.sizeOfExcludingThis(mallocSizeOf) +
+           memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
+           boundsChecks.sizeOfExcludingThis(mallocSizeOf) +
            codeRanges.sizeOfExcludingThis(mallocSizeOf) +
            callSites.sizeOfExcludingThis(mallocSizeOf) +
            callThunks.sizeOfExcludingThis(mallocSizeOf) +
            SizeOfVectorExcludingThis(funcNames, mallocSizeOf) +
            filename.sizeOfExcludingThis(mallocSizeOf);
 }
 
 const char*
--- a/js/src/asmjs/WasmCode.h
+++ b/js/src/asmjs/WasmCode.h
@@ -404,17 +404,18 @@ struct Metadata : ShareableBase<Metadata
 {
     virtual ~Metadata() {}
 
     MetadataCacheablePod& pod() { return *this; }
     const MetadataCacheablePod& pod() const { return *this; }
 
     ImportVector          imports;
     ExportVector          exports;
-    HeapAccessVector      heapAccesses;
+    MemoryAccessVector    memoryAccesses;
+    BoundsCheckVector     boundsChecks;
     CodeRangeVector       codeRanges;
     CallSiteVector        callSites;
     CallThunkVector       callThunks;
     CacheableCharsVector  funcNames;
     CacheableChars        filename;
 
     bool usesHeap() const { return UsesHeap(heapUsage); }
     bool hasSharedHeap() const { return heapUsage == HeapUsage::Shared; }
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -910,21 +910,23 @@ ModuleGenerator::finish(ImportNameVector
     memset(code.begin() + bytesNeeded, 0, padding);
 
     // Convert the CallSiteAndTargetVector (needed during generation) to a
     // CallSiteVector (what is stored in the Module).
     if (!metadata_->callSites.appendAll(masm_.callSites()))
         return nullptr;
 
     // The MacroAssembler has accumulated all the heap accesses during codegen.
-    metadata_->heapAccesses = masm_.extractHeapAccesses();
+    metadata_->memoryAccesses = masm_.extractMemoryAccesses();
+    metadata_->boundsChecks = masm_.extractBoundsChecks();
 
     // These Vectors can get large and the excess capacity can be significant,
     // so realloc them down to size.
-    metadata_->heapAccesses.podResizeToFit();
+    metadata_->memoryAccesses.podResizeToFit();
+    metadata_->boundsChecks.podResizeToFit();
     metadata_->codeRanges.podResizeToFit();
     metadata_->callSites.podResizeToFit();
     metadata_->callThunks.podResizeToFit();
 
     // Assert CodeRanges are sorted.
 #ifdef DEBUG
     uint32_t lastEnd = 0;
     for (const CodeRange& codeRange : metadata_->codeRanges) {
--- a/js/src/asmjs/WasmInstance.cpp
+++ b/js/src/asmjs/WasmInstance.cpp
@@ -829,40 +829,42 @@ Instance::lookupCodeRange(void* pc) cons
 
     size_t match;
     if (!BinarySearch(metadata_->codeRanges, lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &metadata_->codeRanges[match];
 }
 
-struct HeapAccessOffset
+#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
+struct MemoryAccessOffset
 {
-    const HeapAccessVector& accesses;
-    explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
+    const MemoryAccessVector& accesses;
+    explicit MemoryAccessOffset(const MemoryAccessVector& accesses) : accesses(accesses) {}
     uintptr_t operator[](size_t index) const {
         return accesses[index].insnOffset();
     }
 };
 
-const HeapAccess*
-Instance::lookupHeapAccess(void* pc) const
+const MemoryAccess*
+Instance::lookupMemoryAccess(void* pc) const
 {
     MOZ_ASSERT(codeSegment_->containsFunctionPC(pc));
 
     uint32_t target = ((uint8_t*)pc) - codeSegment_->code();
     size_t lowerBound = 0;
-    size_t upperBound = metadata_->heapAccesses.length();
+    size_t upperBound = metadata_->memoryAccesses.length();
 
     size_t match;
-    if (!BinarySearch(HeapAccessOffset(metadata_->heapAccesses), lowerBound, upperBound, target, &match))
+    if (!BinarySearch(MemoryAccessOffset(metadata_->memoryAccesses), lowerBound, upperBound, target, &match))
         return nullptr;
 
-    return &metadata_->heapAccesses[match];
+    return &metadata_->memoryAccesses[match];
 }
+#endif // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
 
 void
 Instance::addSizeOfMisc(MallocSizeOf mallocSizeOf,
                         Metadata::SeenSet* seenMetadata,
                         ShareableBytes::SeenSet* seenBytes,
                         size_t* code, size_t* data) const
 {
     *code += codeSegment_->codeLength();
--- a/js/src/asmjs/WasmInstance.h
+++ b/js/src/asmjs/WasmInstance.h
@@ -137,17 +137,19 @@ class Instance
     // be notified so it can go back to the generic callImport.
 
     void deoptimizeImportExit(uint32_t importIndex);
 
     // Stack frame iterator support:
 
     const CallSite* lookupCallSite(void* returnAddress) const;
     const CodeRange* lookupCodeRange(void* pc) const;
-    const HeapAccess* lookupHeapAccess(void* pc) const;
+#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
+    const MemoryAccess* lookupMemoryAccess(void* pc) const;
+#endif
 
     // about:memory reporting:
 
     void addSizeOfMisc(MallocSizeOf mallocSizeOf,
                        Metadata::SeenSet* seenMetadata,
                        ShareableBytes::SeenSet* seenBytes,
                        size_t* code, size_t* data) const;
 };
--- a/js/src/asmjs/WasmModule.h
+++ b/js/src/asmjs/WasmModule.h
@@ -130,17 +130,17 @@ static const uint32_t MemoryExport = UIN
 struct ExportMap
 {
     CacheableCharsVector fieldNames;
     Uint32Vector fieldsToExports;
 
     WASM_DECLARE_SERIALIZABLE(ExportMap)
 };
 
-// Module represents a compiled wasm module and primarily provides two 
+// Module represents a compiled wasm module and primarily provides two
 // operations: instantiation and serialization. A Module can be instantiated any
 // number of times to produce new Instance objects. A Module can be serialized
 // any number of times such that the serialized bytes can be deserialized later
 // to produce and new, equivalent Module.
 //
 // Since fully linked-and-instantiated code (represented by CodeSegment) cannot
 // be shared between instances, Module stores an unlinked, uninstantiated copy
 // of the code (represented by the Bytes) and creates a new CodeSegment each
--- a/js/src/asmjs/WasmSignalHandlers.cpp
+++ b/js/src/asmjs/WasmSignalHandlers.cpp
@@ -597,22 +597,21 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c
         result += index * (uintptr_t(1) << address.scale());
     }
 
     return reinterpret_cast<uint8_t*>(result);
 }
 
 MOZ_COLD static uint8_t*
 EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
-                  const HeapAccess* heapAccess, const Instance& instance)
+                  const MemoryAccess* memoryAccess, const Instance& instance)
 {
     MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
     MOZ_RELEASE_ASSERT(instance.metadata().compileArgs.useSignalHandlersForOOB);
-    MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck());
-    MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - instance.codeSegment().code()));
+    MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeSegment().code()));
 
     // Disassemble the instruction which caused the trap so that we can extract
     // information about it and decide what to do.
     Disassembler::HeapAccess access;
     uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
     const Disassembler::ComplexAddress& address = access.address();
     MOZ_RELEASE_ASSERT(end > pc);
     MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(end));
@@ -669,18 +668,18 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
     intptr_t unwrappedOffset = accessAddress - instance.heap().unwrap(/*safe - for value*/);
     uint32_t wrappedOffset = uint32_t(unwrappedOffset);
     size_t size = access.size();
     MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
     bool inBounds = wrappedOffset + size < instance.heapLength();
 
     // If this is storing Z of an XYZ, check whether X is also in bounds, so
     // that we don't store anything before throwing.
-    MOZ_RELEASE_ASSERT(unwrappedOffset > heapAccess->offsetWithinWholeSimdVector());
-    uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - heapAccess->offsetWithinWholeSimdVector());
+    MOZ_RELEASE_ASSERT(unwrappedOffset > memoryAccess->offsetWithinWholeSimdVector());
+    uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - memoryAccess->offsetWithinWholeSimdVector());
     if (wrappedBaseOffset >= instance.heapLength())
         inBounds = false;
 
     if (inBounds) {
         // We now know that this is an access that is actually in bounds when
         // properly wrapped. Complete the load or store with the wrapped
         // address.
         SharedMem<uint8_t*> wrappedAddress = instance.heap() + wrappedOffset;
@@ -699,17 +698,17 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
             break;
           case Disassembler::HeapAccess::Unknown:
             MOZ_CRASH("Failed to disassemble instruction");
         }
     } else {
         // We now know that this is an out-of-bounds access made by an asm.js
         // load/store that we should handle.
 
-        if (heapAccess->throwOnOOB())
+        if (memoryAccess->throwOnOOB())
             return instance.codeSegment().outOfBoundsCode();
 
         switch (access.kind()) {
           case Disassembler::HeapAccess::Load:
           case Disassembler::HeapAccess::LoadSext32:
             // Assign the JS-defined result value to the destination register
             // (ToInt32(undefined) or ToNumber(undefined), determined by the
             // type of the destination register). Very conveniently, we can
@@ -728,17 +727,17 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
 
     return end;
 }
 
 #elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
 
 MOZ_COLD static uint8_t*
 EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
-                  const HeapAccess* heapAccess, const Instance& instance)
+                  const MemoryAccess* memoryAccess, const Instance& instance)
 {
     // TODO: Implement unaligned accesses.
     return instance.codeSegment().outOfBoundsCode();
 }
 
 #endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
 
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
@@ -799,24 +798,24 @@ HandleFault(PEXCEPTION_POINTERS exceptio
         // after ResumeThread, the exception handler is called with pc equal to
         // instance.interrupt, which is logically wrong. The Right Thing would
         // be for the OS to make fault-handling atomic (so that CONTEXT.pc was
         // always the logically-faulting pc). Fortunately, we can detect this
         // case and silence the exception ourselves (the exception will
         // retrigger after the interrupt jumps back to resumePC).
         return pc == instance.codeSegment().interruptCode() &&
                instance.codeSegment().containsFunctionPC(activation->resumePC()) &&
-               instance.lookupHeapAccess(activation->resumePC());
+               instance.lookupMemoryAccess(activation->resumePC());
     }
 
-    const HeapAccess* heapAccess = instance.lookupHeapAccess(pc);
-    if (!heapAccess)
+    const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
+    if (!memoryAccess)
         return false;
 
-    *ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, instance);
+    *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, instance);
     return true;
 }
 
 static LONG WINAPI
 AsmJSFaultHandler(LPEXCEPTION_POINTERS exception)
 {
     if (HandleFault(exception))
         return EXCEPTION_CONTINUE_EXECUTION;
@@ -929,21 +928,21 @@ HandleMachException(JSRuntime* rt, const
 
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
 
     // This check isn't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!IsHeapAccessAddress(instance, faultingAddress))
         return false;
 
-    const HeapAccess* heapAccess = instance.lookupHeapAccess(pc);
-    if (!heapAccess)
+    const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
+    if (!memoryAccess)
         return false;
 
-    *ppc = EmulateHeapAccess(&context, pc, faultingAddress, heapAccess, instance);
+    *ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, instance);
 
     // Update the thread state with the new pc and register values.
     kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
     if (kret != KERN_SUCCESS)
         return false;
     kret = thread_set_state(rtThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
     if (kret != KERN_SUCCESS)
         return false;
@@ -1131,21 +1130,21 @@ HandleFault(int signum, siginfo_t* info,
 
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
 
     // This check isn't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!IsHeapAccessAddress(instance, faultingAddress))
         return false;
 
-    const HeapAccess* heapAccess = instance.lookupHeapAccess(pc);
-    if (!heapAccess)
+    const MemoryAccess* memoryAccess = instance.lookupMemoryAccess(pc);
+    if (!memoryAccess)
         return false;
 
-    *ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, instance);
+    *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, instance);
 
     return true;
 }
 
 static struct sigaction sPrevSEGVHandler;
 
 static void
 AsmJSFaultHandler(int signum, siginfo_t* info, void* context)
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -574,116 +574,104 @@ class CallSiteAndTarget : public CallSit
     static const uint32_t NOT_INTERNAL = UINT32_MAX;
 
     bool isInternal() const { return targetIndex_ != NOT_INTERNAL; }
     uint32_t targetIndex() const { MOZ_ASSERT(isInternal()); return targetIndex_; }
 };
 
 typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
 
+// Metadata for a bounds check that may need patching later.
+
+class BoundsCheck
+{
+  public:
+    BoundsCheck() = default;
+
+    explicit BoundsCheck(uint32_t cmpOffset)
+      : cmpOffset_(cmpOffset)
+    { }
+
+    uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
+    void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
+
+  private:
+    uint32_t cmpOffset_; // absolute offset of the comparison
+};
+
 // Summarizes a heap access made by wasm code that needs to be patched later
 // and/or looked up by the wasm signal handlers. Different architectures need
 // to know different things (x64: offset and length, ARM: where to patch in
 // heap length, x86: where to patch in heap length and base).
 
 #if defined(JS_CODEGEN_X86)
-class HeapAccess
+class MemoryAccess
 {
-    uint32_t insnOffset_;
-    uint8_t opLength_;  // the length of the load/store instruction
-    uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
+    uint32_t nextInsOffset_;
 
   public:
-    HeapAccess() = default;
-    static const uint32_t NoLengthCheck = UINT32_MAX;
+    MemoryAccess() = default;
 
-    // If 'cmp' equals 'insnOffset' or if it is not supplied then the
-    // cmpDelta_ is zero indicating that there is no length to patch.
-    HeapAccess(uint32_t insnOffset, uint32_t after, uint32_t cmp = NoLengthCheck) {
-        mozilla::PodZero(this);  // zero padding for Valgrind
-        insnOffset_ = insnOffset;
-        opLength_ = after - insnOffset;
-        cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
-    }
+    explicit MemoryAccess(uint32_t nextInsOffset)
+      : nextInsOffset_(nextInsOffset)
+    { }
 
-    uint32_t insnOffset() const { return insnOffset_; }
-    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
-    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
-    void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
-    bool hasLengthCheck() const { return cmpDelta_ > 0; }
-    void* patchLengthAt(uint8_t* code) const {
-        MOZ_ASSERT(hasLengthCheck());
-        return code + (insnOffset_ - cmpDelta_);
-    }
+    void* patchHeapPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
+    void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
 };
 #elif defined(JS_CODEGEN_X64)
-class HeapAccess
+class MemoryAccess
 {
-  public:
-    enum WhatToDoOnOOB {
-        CarryOn, // loads return undefined, stores do nothing.
-        Throw    // throw a RangeError
-    };
-
-  private:
     uint32_t insnOffset_;
     uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
     bool throwOnOOB_;                     // should we throw on OOB?
-    uint8_t cmpDelta_;                    // the number of bytes from the cmp to the load/store instruction
+    bool wrapOffset_;                     // should we wrap the offset on OOB?
 
   public:
-    HeapAccess() = default;
-    static const uint32_t NoLengthCheck = UINT32_MAX;
+    enum OutOfBoundsBehavior {
+        Throw,
+        CarryOn,
+    };
+    enum WrappingBehavior {
+        WrapOffset,
+        DontWrapOffset,
+    };
 
-    // If 'cmp' equals 'insnOffset' or if it is not supplied then the
-    // cmpDelta_ is zero indicating that there is no length to patch.
-    HeapAccess(uint32_t insnOffset, WhatToDoOnOOB oob,
-               uint32_t cmp = NoLengthCheck,
-               uint32_t offsetWithinWholeSimdVector = 0)
+    MemoryAccess() = default;
+
+    MemoryAccess(uint32_t insnOffset, OutOfBoundsBehavior onOOB, WrappingBehavior onWrap,
+                 uint32_t offsetWithinWholeSimdVector = 0)
+      : insnOffset_(insnOffset),
+        offsetWithinWholeSimdVector_(offsetWithinWholeSimdVector),
+        throwOnOOB_(onOOB == OutOfBoundsBehavior::Throw),
+        wrapOffset_(onWrap == WrappingBehavior::WrapOffset)
     {
-        mozilla::PodZero(this);  // zero padding for Valgrind
-        insnOffset_ = insnOffset;
-        offsetWithinWholeSimdVector_ = offsetWithinWholeSimdVector;
-        throwOnOOB_ = oob == Throw;
-        cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
-        MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
+        MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector, "fits in uint8");
     }
 
     uint32_t insnOffset() const { return insnOffset_; }
-    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
-    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
+    uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
     bool throwOnOOB() const { return throwOnOOB_; }
-    uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
-    bool hasLengthCheck() const { return cmpDelta_ > 0; }
-    void* patchLengthAt(uint8_t* code) const {
-        MOZ_ASSERT(hasLengthCheck());
-        return code + (insnOffset_ - cmpDelta_);
-    }
+    bool wrapOffset() const { return wrapOffset_; }
+
+    void offsetBy(uint32_t offset) { insnOffset_ += offset; }
 };
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
-      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-class HeapAccess
-{
-    uint32_t insnOffset_;
+      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+      defined(JS_CODEGEN_NONE)
+// Nothing! We just want bounds checks on these platforms.
+class MemoryAccess {
   public:
-    HeapAccess() = default;
-    explicit HeapAccess(uint32_t insnOffset) : insnOffset_(insnOffset) {}
-    uint32_t insnOffset() const { return insnOffset_; }
-    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
-    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
-};
-#elif defined(JS_CODEGEN_NONE)
-class HeapAccess {
-  public:
-    void offsetInsnOffsetBy(uint32_t) { MOZ_CRASH(); }
+    void offsetBy(uint32_t) { MOZ_CRASH(); }
     uint32_t insnOffset() const { MOZ_CRASH(); }
 };
 #endif
 
-WASM_DECLARE_POD_VECTOR(HeapAccess, HeapAccessVector)
+WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
+WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
 
 // A wasm::SymbolicAddress represents a pointer to a well-known function or
 // object that is embedded in wasm code. Since wasm code is serialized and
 // later deserialized into a different address space, symbolic addresses must be
 // used for *all* pointers into the address space. The MacroAssembler records a
 // list of all SymbolicAddresses and the offsets of their use in the code for
 // later patching during static linking.
 
--- a/js/src/jit-test/tests/asm.js/testAtomics.js
+++ b/js/src/jit-test/tests/asm.js/testAtomics.js
@@ -1362,17 +1362,17 @@ var loadModule_int8_code =
 
 var loadModule_int8 = asmCompile('stdlib', 'foreign', 'heap', loadModule_int8_code);
 
 function test_int8(heap) {
     var i8a = new Int8Array(heap);
     var i8m = loadModule_int8(this, {}, heap);
 
     for ( var i=0 ; i < i8a.length ; i++ )
-	i8a[i] = 0;
+        i8a[i] = 0;
 
     var size = Int8Array.BYTES_PER_ELEMENT;
 
     i8a[0] = 123;
     assertEq(i8m.load(), 123);
     assertEq(i8m.load_i(0), 123);
 
     assertEq(i8m.store(), 37);
--- a/js/src/jit-test/tests/asm.js/testBug1164391.js
+++ b/js/src/jit-test/tests/asm.js/testBug1164391.js
@@ -4,23 +4,22 @@ if (!this.SharedArrayBuffer)
 load(libdir + "asserts.js");
 
 function m(stdlib, ffi, heap) {
     "use asm";
     var HEAP32 = new stdlib.Int32Array(heap);
     var add = stdlib.Atomics.add;
     var load = stdlib.Atomics.load;
     function add_sharedEv(i1) {
-	i1 = i1 | 0;
-	load(HEAP32, i1 >> 2);
-	add(HEAP32, i1 >> 2, 1);
-	load(HEAP32, i1 >> 2);
+        i1 = i1 | 0;
+        load(HEAP32, i1 >> 2);
+        add(HEAP32, i1 >> 2, 1);
+        load(HEAP32, i1 >> 2);
     }
     return {add_sharedEv:add_sharedEv};
 }
 
 if (isAsmJSCompilationAvailable())
     assertEq(isAsmJSModule(m), true);
 
 var sab = new SharedArrayBuffer(65536);
 var {add_sharedEv} = m(this, {}, sab);
 assertErrorMessage(() => add_sharedEv(sab.byteLength), RangeError, /out-of-range index/);
-
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -3301,30 +3301,29 @@ Assembler::BailoutTableStart(uint8_t* co
 {
     Instruction* inst = (Instruction*)code;
     // Skip a pool with an artificial guard or NOP fill.
     inst = inst->skipPool();
     MOZ_ASSERT(inst->is<InstBLImm>());
     return (uint8_t*) inst;
 }
 
-void Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
+void
+Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
 {
+    Instruction* inst = (Instruction*) patchAt;
     MOZ_ASSERT(inst->is<InstCMP>());
     InstCMP* cmp = inst->as<InstCMP>();
 
     Register index;
     cmp->extractOp1(&index);
 
-#ifdef DEBUG
-    Operand2 op = cmp->extractOp2();
-    MOZ_ASSERT(op.isImm8());
-#endif
-
-    Imm8 imm8 = Imm8(heapSize);
+    MOZ_ASSERT(cmp->extractOp2().isImm8());
+
+    Imm8 imm8 = Imm8(heapLength);
     MOZ_ASSERT(!imm8.invalid);
 
     *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
     // NOTE: we don't update the Auto Flush Cache!  this function is currently
     // only called from within ModuleGenerator::finish, which does that
     // for us. Don't call this!
 }
 
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1947,17 +1947,17 @@ class Assembler : public AssemblerShared
     static void ToggleToJmp(CodeLocationLabel inst_);
     static void ToggleToCmp(CodeLocationLabel inst_);
 
     static uint8_t* BailoutTableStart(uint8_t* code);
 
     static size_t ToggledCallSize(uint8_t* code);
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
-    static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
+    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
     void processCodeLabels(uint8_t* rawCode);
 
     bool bailed() {
         return m_buffer.bail();
     }
 
     void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                      const Disassembler::HeapAccess& heapAccess)
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2272,17 +2272,19 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
         } else {
             masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg,
                                   ToRegister(ins->output()), Offset, Assembler::Always);
         }
         memoryBarrier(mir->barrierAfter());
         return;
     }
 
-    BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
+    uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
+    masm.append(wasm::BoundsCheck(cmpOffset));
+
     if (isFloat) {
         FloatRegister dst = ToFloatRegister(ins->output());
         VFPRegister vd(dst);
         if (size == 32) {
             masm.ma_vldr(Address(GlobalReg, wasm::NaN32GlobalDataOffset - AsmJSGlobalRegBias),
                          vd.singleOverlay(), Assembler::AboveOrEqual);
             masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
         } else {
@@ -2294,17 +2296,16 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
         Register d = ToRegister(ins->output());
         if (mir->isAtomicAccess())
             masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
         else
             masm.ma_mov(Imm32(0), d, Assembler::AboveOrEqual);
         masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(wasm::HeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     bool isSigned;
     int size;
@@ -2355,57 +2356,55 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA
         } else {
             masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
                                   ToRegister(ins->value()), Offset, Assembler::Always);
         }
         memoryBarrier(mir->barrierAfter());
         return;
     }
 
-    BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
+    uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
+    masm.append(wasm::BoundsCheck(cmpOffset));
+
     if (isFloat) {
         VFPRegister vd(ToFloatRegister(ins->value()));
         if (size == 32)
             masm.ma_vstr(vd.singleOverlay(), HeapReg, ptrReg, 0, 0, Assembler::Below);
         else
             masm.ma_vstr(vd, HeapReg, ptrReg, 0, 0, Assembler::Below);
     } else {
         if (mir->isAtomicAccess())
             masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
         masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
                               ToRegister(ins->value()), Offset, Assembler::Below);
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(wasm::HeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     const LAllocation* ptr = ins->ptr();
     Register ptrReg = ToRegister(ptr);
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
-    uint32_t maybeCmpOffset = 0;
     if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
-        maybeCmpOffset = bo.getOffset();
+        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
         masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
+        masm.append(wasm::BoundsCheck(cmpOffset));
     }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         ToAnyRegister(ins->output()));
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
 {
     const MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type viewType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
@@ -2431,28 +2430,24 @@ CodeGeneratorARM::visitAsmJSAtomicExchan
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    uint32_t maybeCmpOffset = 0;
     if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
-        maybeCmpOffset = bo.getOffset();
+        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
         masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
+        masm.append(wasm::BoundsCheck(cmpOffset));
     }
 
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
-
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
 {
     const MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type viewType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
@@ -2482,34 +2477,30 @@ CodeGeneratorARM::visitAsmJSAtomicBinopH
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
-    uint32_t maybeCmpOffset = 0;
     if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
-        maybeCmpOffset = bo.getOffset();
+        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
         masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
+        masm.append(wasm::BoundsCheck(cmpOffset));
     }
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
                                    ToAnyRegister(ins->output()));
     else
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
                                    ToAnyRegister(ins->output()));
-
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -2517,30 +2508,26 @@ CodeGeneratorARM::visitAsmJSAtomicBinopH
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
-    uint32_t maybeCmpOffset = 0;
     if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
-        maybeCmpOffset = bo.getOffset();
+        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
         masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
+        masm.append(wasm::BoundsCheck(cmpOffset));
     }
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
-
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
 {
     const MAsmJSAtomicBinopHeap* mir = ins->mir();
     Scalar::Type viewType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -630,19 +630,20 @@ Assembler::FixupNurseryObjects(JSContext
 
 void
 Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
 {
     MOZ_CRASH("PatchInstructionImmediate()");
 }
 
 void
-Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
+Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
 {
-    int32_t mask = ~(heapSize - 1);
+    Instruction* inst = (Instruction*) patchAt;
+    int32_t mask = ~(heapLength - 1);
     unsigned n, imm_s, imm_r;
     if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r))
         MOZ_CRASH("Could not encode immediate!?");
 
     inst->SetImmR(imm_r);
     inst->SetImmS(imm_s);
     inst->SetBitN(n);
 }
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -368,17 +368,17 @@ class Assembler : public vixl::Assembler
             return reinterpret_cast<Instruction*>(&ldr);
         }
     };
 
     // Offset of the patchable target for the given entry.
     static const size_t OffsetOfJumpTableEntryPointer = 8;
 
   public:
-    static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
+    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
 
     void writeCodePointer(AbsoluteLabel* absoluteLabel) {
         MOZ_ASSERT(!absoluteLabel->bound());
         uintptr_t x = LabelBase::INVALID_OFFSET;
         BufferOffset off = EmitData(&x, sizeof(uintptr_t));
 
         // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
         // of uses of an AbsoluteLabel through the assembly. ARM only uses labels
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1807,17 +1807,17 @@ CodeGeneratorMIPSShared::visitAsmJSLoadH
         if (mir->isAtomicAccess())
             masm.ma_b(wasm::JumpTarget::OutOfBounds);
         else
             masm.move32(Imm32(0), ToRegister(out));
     }
     masm.bind(&done);
 
     memoryBarrier(mir->barrierAfter());
-    masm.append(wasm::HeapAccess(bo.getOffset()));
+    masm.append(wasm::BoundsCheck(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     const LAllocation* value = ins->value();
     const LAllocation* ptr = ins->ptr();
@@ -1895,17 +1895,17 @@ CodeGeneratorMIPSShared::visitAsmJSStore
     masm.ma_b(&done, ShortJump);
     masm.bind(&outOfRange);
     // Offset is out of range.
     if (mir->isAtomicAccess())
         masm.ma_b(wasm::JumpTarget::OutOfBounds);
     masm.bind(&done);
 
     memoryBarrier(mir->barrierAfter());
-    masm.append(wasm::HeapAccess(bo.getOffset()));
+    masm.append(wasm::BoundsCheck(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     const LAllocation* ptr = ins->ptr();
@@ -1925,17 +1925,17 @@ CodeGeneratorMIPSShared::visitAsmJSCompa
         maybeCmpOffset = bo.getOffset();
         masm.ma_b(ptrReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
     }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         valueTemp, offsetTemp, maskTemp,
                                         ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
+        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
@@ -1952,17 +1952,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
         BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
         maybeCmpOffset = bo.getOffset();
         masm.ma_b(ptrReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
     }
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, valueTemp,
                                        offsetTemp, maskTemp, ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
+        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -1990,17 +1990,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
                                    valueTemp, offsetTemp, maskTemp,
                                    ToAnyRegister(ins->output()));
     else
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
                                    valueTemp, offsetTemp, maskTemp,
                                    ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
+        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -2026,17 +2026,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
                                    valueTemp, offsetTemp, maskTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
                                    valueTemp, offsetTemp, maskTemp);
 
     if (mir->needsBoundsCheck())
-        masm.append(wasm::HeapAccess(maybeCmpOffset));
+        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
 {
     const MAsmJSPassStackArg* mir = ins->mir();
     if (ins->arg()->isConstant()) {
         masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
--- a/js/src/jit/mips32/Assembler-mips32.cpp
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -444,16 +444,17 @@ Assembler::ToggleCall(CodeLocationLabel 
         InstNOP nop;
         *i2 = nop;
     }
 
     AutoFlushICache::flush(uintptr_t(i2), 4);
 }
 
 void
-Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
+Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
 {
+    Instruction* inst = (Instruction*) patchAt;
     InstImm* i0 = (InstImm*) inst;
     InstImm* i1 = (InstImm*) i0->next();
 
     // Replace with new value
     Assembler::UpdateLuiOriValue(i0, i1, heapSize);
 }
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -161,17 +161,17 @@ class Assembler : public AssemblerMIPSSh
     static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
 
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
     static uint32_t ExtractInstructionImmediate(uint8_t* code);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
-    static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
+    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
 }; // Assembler
 
 static const uint32_t NumIntArgRegs = 4;
 
 static inline bool
 GetIntArgReg(uint32_t usedArgSlots, Register* out)
 {
     if (usedArgSlots < NumIntArgRegs) {
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -485,13 +485,13 @@ Assembler::ToggleCall(CodeLocationLabel 
         InstNOP nop;
         *i4 = nop;
     }
 
     AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t));
 }
 
 void
-Assembler::UpdateBoundsCheck(uint64_t heapSize, Instruction* inst)
+Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
 {
     // Replace with new value
-    Assembler::UpdateLoad64Value(inst, heapSize);
+    Assembler::UpdateLoad64Value((Instruction*) patchAt, heapLength);
 }
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -163,17 +163,17 @@ class Assembler : public AssemblerMIPSSh
     static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
 
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
     static uint64_t ExtractInstructionImmediate(uint8_t* code);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
-    static void UpdateBoundsCheck(uint64_t logHeapSize, Instruction* inst);
+    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
 }; // Assembler
 
 static const uint32_t NumIntArgRegs = 8;
 static const uint32_t NumFloatArgRegs = NumIntArgRegs;
 
 static inline bool
 GetIntArgReg(uint32_t usedArgSlots, Register* out)
 {
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -142,16 +142,18 @@ class Assembler : public AssemblerShared
     static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) { MOZ_CRASH(); }
     static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
     static void PatchInstructionImmediate(uint8_t*, PatchedImmPtr) { MOZ_CRASH(); }
 
     static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
     static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
     static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
 
+    static void UpdateBoundsCheck(uint8_t*, uint32_t) { MOZ_CRASH(); }
+
     static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
 
     void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                      const Disassembler::HeapAccess& heapAccess)
     {
         MOZ_CRASH();
     }
 };
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -702,17 +702,18 @@ struct AsmJSAbsoluteAddress
     wasm::SymbolicAddress target;
 };
 
 // The base class of all Assemblers for all archs.
 class AssemblerShared
 {
     wasm::CallSiteAndTargetVector callsites_;
     wasm::JumpSiteArray jumpsites_;
-    wasm::HeapAccessVector heapAccesses_;
+    wasm::MemoryAccessVector memoryAccesses_;
+    wasm::BoundsCheckVector boundsChecks_;
     Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
     Vector<AsmJSAbsoluteAddress, 0, SystemAllocPolicy> asmJSAbsoluteAddresses_;
 
   protected:
     Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
 
     bool enoughMemory_;
     bool embedsNurseryPointers_;
@@ -750,18 +751,21 @@ class AssemblerShared
     wasm::CallSiteAndTargetVector& callSites() { return callsites_; }
 
     void append(wasm::JumpTarget target, uint32_t offset) {
         enoughMemory_ &= jumpsites_[target].append(offset);
     }
     const wasm::JumpSiteArray& jumpSites() { return jumpsites_; }
     void clearJumpSites() { for (auto& v : jumpsites_) v.clear(); }
 
-    void append(wasm::HeapAccess access) { enoughMemory_ &= heapAccesses_.append(access); }
-    wasm::HeapAccessVector&& extractHeapAccesses() { return Move(heapAccesses_); }
+    void append(wasm::MemoryAccess access) { enoughMemory_ &= memoryAccesses_.append(access); }
+    wasm::MemoryAccessVector&& extractMemoryAccesses() { return Move(memoryAccesses_); }
+
+    void append(wasm::BoundsCheck check) { enoughMemory_ &= boundsChecks_.append(check); }
+    wasm::BoundsCheckVector&& extractBoundsChecks() { return Move(boundsChecks_); }
 
     void append(AsmJSGlobalAccess access) { enoughMemory_ &= asmJSGlobalAccesses_.append(access); }
     size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
     AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
 
     void append(AsmJSAbsoluteAddress link) { enoughMemory_ &= asmJSAbsoluteAddresses_.append(link); }
     size_t numAsmJSAbsoluteAddresses() const { return asmJSAbsoluteAddresses_.length(); }
     AsmJSAbsoluteAddress asmJSAbsoluteAddress(size_t i) const { return asmJSAbsoluteAddresses_[i]; }
@@ -789,20 +793,25 @@ class AssemblerShared
         for (wasm::JumpTarget target : mozilla::MakeEnumeratedRange(wasm::JumpTarget::Limit)) {
             wasm::Uint32Vector& offsets = jumpsites_[target];
             i = offsets.length();
             enoughMemory_ &= offsets.appendAll(other.jumpsites_[target]);
             for (; i < offsets.length(); i++)
                 offsets[i] += delta;
         }
 
-        i = heapAccesses_.length();
-        enoughMemory_ &= heapAccesses_.appendAll(other.heapAccesses_);
-        for (; i < heapAccesses_.length(); i++)
-            heapAccesses_[i].offsetInsnOffsetBy(delta);
+        i = memoryAccesses_.length();
+        enoughMemory_ &= memoryAccesses_.appendAll(other.memoryAccesses_);
+        for (; i < memoryAccesses_.length(); i++)
+            memoryAccesses_[i].offsetBy(delta);
+
+        i = boundsChecks_.length();
+        enoughMemory_ &= boundsChecks_.appendAll(other.boundsChecks_);
+        for (; i < boundsChecks_.length(); i++)
+            boundsChecks_[i].offsetBy(delta);
 
         i = asmJSGlobalAccesses_.length();
         enoughMemory_ &= asmJSGlobalAccesses_.appendAll(other.asmJSGlobalAccesses_);
         for (; i < asmJSGlobalAccesses_.length(); i++)
             asmJSGlobalAccesses_[i].patchAt.offsetBy(delta);
 
         i = asmJSAbsoluteAddresses_.length();
         enoughMemory_ &= asmJSAbsoluteAddresses_.appendAll(other.asmJSAbsoluteAddresses_);
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -615,68 +615,76 @@ CodeGeneratorX64::loadSimd(Scalar::Type 
       case Scalar::Float32:
       case Scalar::Float64:
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("should only handle SIMD types");
     }
 }
 
+static wasm::MemoryAccess
+AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throwBehavior,
+                  uint32_t offsetWithinWholeSimdVector = 0)
+{
+    return wasm::MemoryAccess(before, throwBehavior, wasm::MemoryAccess::WrapOffset,
+                              offsetWithinWholeSimdVector);
+}
+
 void
 CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister out = ToFloatRegister(ins->output());
     const LAllocation* ptr = ins->ptr();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
+    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
         Operand srcAddrZ =
             ptr->isBogus()
             ? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
             : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
 
         // Load XY
         uint32_t before = masm.size();
         loadSimd(type, 2, srcAddr, out);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 2, srcAddr,
                                     *ins->output()->output());
-        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
+        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
 
         // Load Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = after;
         loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
         after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 1, srcAddrZ,
                                     LFloatReg(ScratchSimd128Reg));
-        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw,
-                                     wasm::HeapAccess::NoLengthCheck, 8));
+        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8));
 
         // Move ZW atop XY
         masm.vmovlhps(ScratchSimd128Reg, out, out);
     } else {
         uint32_t before = masm.size();
         loadSimd(type, numElems, srcAddr, out);
         uint32_t after = masm.size();
-        verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr, *ins->output()->output());
-        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
+        verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr,
+                                    *ins->output()->output());
+        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
     }
 
-    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
+    if (hasBoundsCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -688,17 +696,17 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
     const LDefinition* out = ins->output();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     memoryBarrier(mir->barrierBefore());
 
     OutOfLineLoadTypedArrayOutOfBounds* ool;
-    uint32_t maybeCmpOffset = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
+    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
 
     uint32_t before = masm.size();
     switch (accessType) {
       case Scalar::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
       case Scalar::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
       case Scalar::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
       case Scalar::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
       case Scalar::Int32:
@@ -713,23 +721,24 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
       case Scalar::MaxTypedArrayViewType:
           MOZ_CRASH("unexpected array type");
     }
     uint32_t after = masm.size();
 
     verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());
 
     if (ool) {
+        MOZ_ASSERT(hasBoundsCheck);
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(ool->rejoin());
     }
 
     memoryBarrier(mir->barrierAfter());
 
-    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::CarryOn, maybeCmpOffset));
+    masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
 }
 
 void
 CodeGeneratorX64::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
                             const Operand& dstAddr)
 {
     switch (type) {
       case Scalar::Float32x4: {
@@ -782,55 +791,55 @@ CodeGeneratorX64::emitSimdStore(LAsmJSSt
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister in = ToFloatRegister(ins->value());
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
+    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
         Operand dstAddrZ =
             ptr->isBogus()
             ? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
             : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
 
         // It's possible that the Z could be out of bounds when the XY is in
         // bounds. To avoid storing the XY before the exception is thrown, we
-        // store the Z first, and record its offset in the HeapAccess so
+        // store the Z first, and record its offset in the MemoryAccess so
         // that the signal handler knows to check the bounds of the full
         // access, rather than just the Z.
         masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
         uint32_t before = masm.size();
         storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 1, dstAddrZ,
                                     LFloatReg(ScratchSimd128Reg));
-        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset, 8));
+        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8));
 
         // Store XY
         before = after;
         storeSimd(type, 2, in, dstAddr);
         after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 2, dstAddr, *ins->value());
-        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw));
+        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
     } else {
         uint32_t before = masm.size();
         storeSimd(type, numElems, in, dstAddr);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, numElems, dstAddr, *ins->value());
-        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
+        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
     }
 
-    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
+    if (hasBoundsCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -844,17 +853,17 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     memoryBarrier(mir->barrierBefore());
 
     Label* rejoin;
-    uint32_t maybeCmpOffset = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
+    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
 
     uint32_t before = masm.size();
     if (value->isConstant()) {
         switch (accessType) {
           case Scalar::Int8:
           case Scalar::Uint8:        masm.movb(Imm32(ToInt32(value)), dstAddr); break;
           case Scalar::Int16:
           case Scalar::Uint16:       masm.movw(Imm32(ToInt32(value)), dstAddr); break;
@@ -900,167 +909,147 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
             MOZ_CRASH("unexpected array type");
         }
     }
     uint32_t after = masm.size();
 
     verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, accessType, 0, dstAddr, *value);
 
     if (rejoin) {
+        MOZ_ASSERT(hasBoundsCheck);
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(rejoin);
     }
 
     memoryBarrier(mir->barrierAfter());
 
-    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::CarryOn, maybeCmpOffset));
+    masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
+}
+
+static void
+MaybeAddAtomicsBoundsCheck(MacroAssemblerX64& masm, MAsmJSHeapAccess* mir, Register ptr)
+{
+    if (!mir->needsBoundsCheck())
+        return;
+
+    // Note that we can't use the same machinery as normal asm.js loads/stores
+    // since signal-handler bounds checking is not yet implemented for atomic
+    // accesses.
+    uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(-mir->endOffset())).offset();
+    masm.append(wasm::BoundsCheck(cmpOffset));
+    masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
 }
 
 void
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
-    const LAllocation* ptr = ins->ptr();
 
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-    MOZ_ASSERT(ptr->isRegister());
-    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
-
+    Register ptr = ToRegister(ins->ptr());
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
-    // Note that we can't use the same machinery as normal asm.js loads/stores
-    // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
-    if (mir->needsBoundsCheck()) {
-        maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
-        masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
-    }
-    uint32_t before = masm.size();
+    MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
+
     masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                         srcAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+    MOZ_ASSERT(ins->mir()->accessType() <= Scalar::Uint32);
+
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
-    const LAllocation* ptr = ins->ptr();
 
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-    MOZ_ASSERT(ptr->isRegister());
-    MOZ_ASSERT(accessType <= Scalar::Uint32);
-
-    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+    Register ptr = ToRegister(ins->ptr());
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
     Register value = ToRegister(ins->value());
 
-    // Note that we can't use the same machinery as normal asm.js loads/stores
-    // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
-    if (mir->needsBoundsCheck()) {
-        maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
-        masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
-    }
-    uint32_t before = masm.size();
+    MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
+
     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        value,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MAsmJSAtomicBinopHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
-    Register ptrReg = ToRegister(ins->ptr());
-    Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
-    const LAllocation* value = ins->value();
+    accessType = accessType == Scalar::Uint32 ? Scalar::Int32 : accessType;
     AtomicOp op = mir->operation();
 
-    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset());
+    Register ptr = ToRegister(ins->ptr());
+    Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
 
-    // Note that we can't use the same machinery as normal asm.js loads/stores
-    // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
-    if (mir->needsBoundsCheck()) {
-        maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
-        masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
-    }
-    uint32_t before = masm.size();
+    const LAllocation* value = ins->value();
+
+    MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
+
+    AnyRegister output = ToAnyRegister(ins->output());
     if (value->isConstant()) {
-        atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
-                                   Imm32(ToInt32(value)),
-                                   srcAddr,
-                                   temp,
-                                   InvalidReg,
-                                   ToAnyRegister(ins->output()));
+        atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
+                                   output);
     } else {
-        atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
-                                   ToRegister(value),
-                                   srcAddr,
-                                   temp,
-                                   InvalidReg,
-                                   ToAnyRegister(ins->output()));
+        atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
+                                   output);
     }
+
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MAsmJSAtomicBinopHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
-    Register ptrReg = ToRegister(ins->ptr());
-    const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
-    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset());
+    Register ptr = ToRegister(ins->ptr());
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
+    const LAllocation* value = ins->value();
 
-    // Note that we can't use the same machinery as normal asm.js loads/stores
-    // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
-    if (mir->needsBoundsCheck()) {
-        maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
-        masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
-    }
+    MaybeAddAtomicsBoundsCheck(masm, mir, ptr);
 
-    uint32_t before = masm.size();
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
     else
         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
 {
     MAsmJSLoadGlobalVar* mir = ins->mir();
 
     MIRType type = mir->type();
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -1074,16 +1074,31 @@ class AssemblerX86Shared : public Assemb
     }
     static void patchTwoByteNopToJump(uint8_t* jump, uint8_t* target) {
         X86Encoding::BaseAssembler::patchTwoByteNopToJump(jump, target);
     }
     static void patchJumpToTwoByteNop(uint8_t* jump) {
         X86Encoding::BaseAssembler::patchJumpToTwoByteNop(jump);
     }
 
+    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) {
+        // An access is out-of-bounds iff
+        //          ptr + offset + data-type-byte-size > heapLength
+        //     i.e. ptr > heapLength - data-type-byte-size - offset.
+        // data-type-byte-size and offset are already included in the addend so
+        // we just have to add the heap length here.
+        //
+        // On x64, even with signal handling being used for most bounds checks,
+        // there may be atomic operations that depend on explicit checks. All
+        // accesses that have been recorded are the only ones that need bound
+        // checks (see also
+        // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
+        X86Encoding::AddInt32(patchAt, heapLength);
+    }
+
     void breakpoint() {
         masm.int3();
     }
 
     static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
     static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
     static bool HasSSSE3() { return CPUInfo::IsSSSE3Present(); }
     static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -419,17 +419,17 @@ CodeGeneratorX86Shared::visitOffsetBound
     // pointer to 32-bits. We'll zero out the sign extension immediately
     // after the access to restore asm.js invariants.
     masm.movslq(oolCheck->ptrReg(), oolCheck->ptrReg());
 #endif
 
     masm.jmp(oolCheck->rejoin());
 }
 
-uint32_t
+void
 CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
                                                    const MInstruction* mir,
                                                    Register ptr, Label* maybeFail)
 {
     // Emit a bounds-checking branch for |access|.
 
     MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
 
@@ -455,65 +455,68 @@ CodeGeneratorX86Shared::emitAsmJSBoundsC
     if (maybeFail)
         masm.j(Assembler::Above, maybeFail);
     else
         masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
 
     if (pass)
         masm.bind(pass);
 
-    return cmpOffset;
+    masm.append(wasm::BoundsCheck(cmpOffset));
 }
 
-uint32_t
+bool
 CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess* access,
                                                           const MInstruction* mir,
                                                           const LAllocation* ptr)
 {
     if (!gen->needsAsmJSBoundsCheckBranch(access))
-        return wasm::HeapAccess::NoLengthCheck;
-
-    return emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
+        return false;
+
+    emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
+    return true;
 }
 
-uint32_t
+bool
 CodeGeneratorX86Shared::maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
                                                       OutOfLineLoadTypedArrayOutOfBounds** ool)
 {
     MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
     *ool = nullptr;
 
     if (!gen->needsAsmJSBoundsCheckBranch(mir))
-        return wasm::HeapAccess::NoLengthCheck;
-
-    if (mir->isAtomicAccess())
-        return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), nullptr);
-
-    *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
-                                                           mir->accessType());
-
-    addOutOfLineCode(*ool, mir);
-    return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), (*ool)->entry());
+        return false;
+
+    Label* rejoin = nullptr;
+    if (!mir->isAtomicAccess()) {
+        *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
+                                                               mir->accessType());
+        addOutOfLineCode(*ool, mir);
+        rejoin = (*ool)->entry();
+    }
+
+    emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), rejoin);
+    return true;
 }
 
-uint32_t
+bool
 CodeGeneratorX86Shared::maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
                                                        Label** rejoin)
 {
     MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
+
     *rejoin = nullptr;
-
     if (!gen->needsAsmJSBoundsCheckBranch(mir))
-        return wasm::HeapAccess::NoLengthCheck;
-
-    if (mir->isAtomicAccess())
-        return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), nullptr);
-
-    *rejoin = alloc().lifoAlloc()->newInfallible<Label>();
-    return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
+        return false;
+
+    if (!mir->isAtomicAccess())
+        *rejoin = alloc().lifoAlloc()->newInfallible<Label>();
+
+    emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
+    return true;
 }
 
 void
 CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
                                                            Register ptr)
 {
     // Clean up after performing a heap access checked by a branch.
 
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -89,35 +89,34 @@ class CodeGeneratorX86Shared : public Co
         LInstruction* ins() const { return ins_; }
 
         void accept(CodeGeneratorX86Shared* codegen) {
             codegen->visitOutOfLineSimdFloatToIntCheck(this);
         }
     };
 
   private:
-    MOZ_MUST_USE uint32_t
+    void
     emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* mir, const MInstruction* ins,
                                Register ptr, Label* fail);
 
   public:
     // For SIMD and atomic loads and stores (which throw on out-of-bounds):
-    MOZ_MUST_USE uint32_t
+    bool
     maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess* mir, const MInstruction* ins,
                                       const LAllocation* ptr);
 
     // For asm.js plain and atomic loads that possibly require a bounds check:
-    MOZ_MUST_USE uint32_t
+    bool
     maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
                                   OutOfLineLoadTypedArrayOutOfBounds** ool);
 
     // For asm.js plain and atomic stores that possibly require a bounds check:
-    MOZ_MUST_USE uint32_t
-    maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
-                                   Label** rejoin);
+    bool
+    maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins, Label** rejoin);
 
     void cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* mir, Register ptr);
 
     NonAssertingLabel deoptLabel_;
 
     Operand ToOperand(const LAllocation& a);
     Operand ToOperand(const LAllocation* a);
     Operand ToOperand(const LDefinition* def);
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -400,51 +400,45 @@ CodeGeneratorX86::emitSimdLoad(LAsmJSLoa
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister out = ToFloatRegister(ins->output());
     const LAllocation* ptr = ins->ptr();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
-    uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
+    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
         Operand srcAddrZ =
             ptr->isBogus()
             ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
             : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
 
         // Load XY
-        uint32_t before = masm.size();
         loadSimd(type, 2, srcAddr, out);
-        uint32_t after = masm.size();
-        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::MemoryAccess(masm.size()));
 
         // Load Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
-        before = after;
         loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
-        after = masm.size();
-        masm.append(wasm::HeapAccess(before, after));
+        masm.append(wasm::MemoryAccess(masm.size()));
 
         // Move ZW atop XY
         masm.vmovlhps(ScratchSimd128Reg, out, out);
     } else {
-        uint32_t before = masm.size();
         loadSimd(type, numElems, srcAddr, out);
-        uint32_t after = masm.size();
-        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::MemoryAccess(masm.size()));
     }
 
-    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
+    if (hasBoundsCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -456,30 +450,30 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAs
     const LDefinition* out = ins->output();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
     memoryBarrier(mir->barrierBefore());
 
     OutOfLineLoadTypedArrayOutOfBounds* ool;
-    uint32_t maybeCmpOffset = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
+    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
 
-    uint32_t before = masm.size();
     load(accessType, srcAddr, out);
     uint32_t after = masm.size();
 
     if (ool) {
+        MOZ_ASSERT(hasBoundsCheck);
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(ool->rejoin());
     }
 
     memoryBarrier(mir->barrierAfter());
 
-    masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+    masm.append(wasm::MemoryAccess(after));
 }
 
 void
 CodeGeneratorX86::store(Scalar::Type accessType, const LAllocation* value, const Operand& dstAddr)
 {
     switch (accessType) {
       case Scalar::Int8:
       case Scalar::Uint8Clamped:
@@ -598,50 +592,44 @@ CodeGeneratorX86::emitSimdStore(LAsmJSSt
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister in = ToFloatRegister(ins->value());
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
-    uint32_t maybeCmpOffset = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
+    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
         Operand dstAddrZ =
             ptr->isBogus()
             ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
             : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
 
         // Store XY
-        uint32_t before = masm.size();
         storeSimd(type, 2, in, dstAddr);
-        uint32_t after = masm.size();
-        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::MemoryAccess(masm.size()));
 
         masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
 
         // Store Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
-        before = masm.size();
         storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
-        after = masm.size();
-        masm.append(wasm::HeapAccess(before, after));
+        masm.append(wasm::MemoryAccess(masm.size()));
     } else {
-        uint32_t before = masm.size();
         storeSimd(type, numElems, in, dstAddr);
-        uint32_t after = masm.size();
-        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::MemoryAccess(masm.size()));
     }
 
-    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
+    if (hasBoundsCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -655,30 +643,30 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LA
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
     memoryBarrier(mir->barrierBefore());
 
     Label* rejoin;
-    uint32_t maybeCmpOffset = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
+    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
 
-    uint32_t before = masm.size();
     store(accessType, value, dstAddr);
     uint32_t after = masm.size();
 
     if (rejoin) {
+        MOZ_ASSERT(hasBoundsCheck);
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(rejoin);
     }
 
     memoryBarrier(mir->barrierAfter());
 
-    masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+    masm.append(wasm::MemoryAccess(after));
 }
 
 void
 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
@@ -701,30 +689,27 @@ CodeGeneratorX86::visitAsmJSCompareExcha
 // Perform bounds checking on the access if necessary; if it fails,
 // jump to out-of-line code that throws.  If the bounds check passes,
 // set up the heap address in addrTemp.
 
 void
 CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck,
                                             uint32_t offset, uint32_t endOffset)
 {
-    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
-
     if (boundsCheck) {
-        maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
+        uint32_t cmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
         masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
+        masm.append(wasm::BoundsCheck(cmpOffset));
     }
 
     // Add in the actual heap pointer explicitly, to avoid opening up
     // the abstraction that is atomicBinopToTypedIntArray at this time.
     masm.movl(ptrReg, addrTemp);
-    uint32_t before = masm.size();
     masm.addlWithPatch(Imm32(offset), addrTemp);
-    uint32_t after = masm.size();
-    masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
+    masm.append(wasm::MemoryAccess(masm.size()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());