Bug 1277973 - Baldr: hoist wasm::MemoryAccessDesc (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Thu, 13 Oct 2016 13:17:55 -0500
changeset 317903 959f1e7b26fa630e9b6f842cf96934b2b1d15b6b
parent 317902 33b295d58244ce40eda37a5ce57a14ff189b30b3
child 317904 6b8136cb9a4d195df6db0ffd784caf6150e4416a
push id33170
push usercbook@mozilla.com
push dateFri, 14 Oct 2016 10:37:07 +0000
treeherderautoland@0d101ebfd95c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1277973
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1277973 - Baldr: hoist wasm::MemoryAccessDesc (r=bbouvier) MozReview-Commit-ID: J5rFi5dPnfP
js/src/asmjs/WasmBaselineCompile.cpp
js/src/asmjs/WasmIonCompile.cpp
js/src/jit/MIR.h
js/src/jit/MacroAssembler.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/MacroAssembler-arm-inl.h
js/src/jit/arm64/MacroAssembler-arm64-inl.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/CodeGenerator-mips-shared.h
js/src/jit/mips-shared/Lowering-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
js/src/jit/mips32/CodeGenerator-mips32.cpp
js/src/jit/mips64/CodeGenerator-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/shared/Assembler-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/CodeGenerator-x64.h
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/CodeGenerator-x86.h
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -3015,18 +3015,18 @@ class BaseCompiler
 
     //////////////////////////////////////////////////////////////////////
     //
     // Heap access.
 
     // Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the
     // peculiar property of not throwing on out-of-bounds. Everything else
     // (wasm, SIMD.js, Atomics) throws on out-of-bounds.
-    bool isAsmJSAccess(const MWasmMemoryAccess& access) {
-        return isCompilingAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess();
+    bool isAsmJSAccess(const MemoryAccessDesc& access) {
+        return isCompilingAsmJS() && !access.isSimd() && !access.isAtomic();
     }
 
 #ifndef WASM_HUGE_MEMORY
     class AsmJSLoadOOB : public OutOfLineCode
     {
         Scalar::Type viewType;
         AnyRegister dest;
 
@@ -3069,115 +3069,115 @@ class BaseCompiler
             Unused << dest;
             MOZ_CRASH("Compiler bug: Unexpected platform.");
 #endif
         }
     };
 #endif
 
   private:
-    void checkOffset(MWasmMemoryAccess* access, RegI32 ptr) {
+    void checkOffset(MemoryAccessDesc* access, RegI32 ptr) {
         if (access->offset() >= OffsetGuardLimit) {
             masm.branchAdd32(Assembler::CarrySet,
                              Imm32(access->offset()), ptr.reg,
                              JumpTarget::OutOfBounds);
             access->clearOffset();
         }
     }
 
   public:
     MOZ_MUST_USE
-    bool load(MWasmMemoryAccess access, RegI32 ptr, AnyReg dest) {
+    bool load(MemoryAccessDesc access, RegI32 ptr, AnyReg dest) {
         checkOffset(&access, ptr);
 
         OutOfLineCode* ool = nullptr;
 #ifndef WASM_HUGE_MEMORY
         if (isAsmJSAccess(access)) {
-            ool = new (alloc_) AsmJSLoadOOB(access.accessType(), dest.any());
+            ool = new (alloc_) AsmJSLoadOOB(access.type(), dest.any());
             if (!addOutOfLineCode(ool))
                 return false;
 
             masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, ool->entry());
         } else {
             masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds);
         }
 #endif
 
 # if defined(JS_CODEGEN_X64)
         Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset());
 
         uint32_t before = masm.size();
         if (dest.tag == AnyReg::I64)
-            masm.wasmLoadI64(access.accessType(), srcAddr, dest.i64().reg);
+            masm.wasmLoadI64(access, srcAddr, dest.i64().reg);
         else
-            masm.wasmLoad(access.accessType(), 0, srcAddr, dest.any());
+            masm.wasmLoad(access, srcAddr, dest.any());
 
         if (isAsmJSAccess(access))
             masm.append(MemoryAccess(before));
 # elif defined(JS_CODEGEN_X86)
         Operand srcAddr(ptr.reg, access.offset());
 
         if (dest.tag == AnyReg::I64) {
-            masm.wasmLoadI64(access.accessType(), srcAddr, dest.i64().reg);
+            masm.wasmLoadI64(access, srcAddr, dest.i64().reg);
         } else {
             bool byteRegConflict = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
             AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
 
-            masm.wasmLoad(access.accessType(), 0, srcAddr, out);
+            masm.wasmLoad(access, srcAddr, out);
 
             if (byteRegConflict)
                 masm.mov(ScratchRegX86, dest.i32().reg);
         }
 # else
         MOZ_CRASH("Compiler bug: Unexpected platform.");
 # endif
 
         if (ool)
             masm.bind(ool->rejoin());
         return true;
     }
 
     MOZ_MUST_USE
-    bool store(MWasmMemoryAccess access, RegI32 ptr, AnyReg src) {
+    bool store(MemoryAccessDesc access, RegI32 ptr, AnyReg src) {
         checkOffset(&access, ptr);
 
         Label rejoin;
 #ifndef WASM_HUGE_MEMORY
         if (isAsmJSAccess(access))
             masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, &rejoin);
         else
             masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds);
 #endif
 
         // Emit the store
 # if defined(JS_CODEGEN_X64)
         Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset());
 
         uint32_t before = masm.size();
-        masm.wasmStore(access.accessType(), 0, src.any(), dstAddr);
+        masm.wasmStore(access, src.any(), dstAddr);
 
         if (isCompilingAsmJS())
             masm.append(MemoryAccess(before));
 # elif defined(JS_CODEGEN_X86)
         Operand dstAddr(ptr.reg, access.offset());
 
-        if (access.accessType() == Scalar::Int64) {
-            masm.wasmStoreI64(src.i64().reg, dstAddr);
+        if (access.type() == Scalar::Int64) {
+            masm.wasmStoreI64(access, src.i64().reg, dstAddr);
         } else {
             AnyRegister value;
             if (src.tag == AnyReg::I64) {
                 value = AnyRegister(src.i64().reg.low);
             } else if (access.byteSize() == 1 && !singleByteRegs_.has(src.i32().reg)) {
                 masm.mov(src.i32().reg, ScratchRegX86);
                 value = AnyRegister(ScratchRegX86);
             } else {
                 value = src.any();
             }
 
-            masm.wasmStore(access.accessType(), 0, value, dstAddr);
+            masm.wasmStore(access, value, dstAddr);
         }
 # else
         MOZ_CRASH("Compiler bug: unexpected platform");
 # endif
 
         if (rejoin.used())
             masm.bind(&rejoin);
 
@@ -5790,17 +5790,17 @@ BaseCompiler::emitLoad(ValType type, Sca
         return false;
 
     if (deadCode_)
         return true;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
 
     switch (type) {
       case ValType::I32: {
         RegI32 rp = popI32();
         if (!load(access, rp, AnyReg(rp)))
             return false;
         pushI32(rp);
         break;
@@ -5856,17 +5856,17 @@ BaseCompiler::emitStore(ValType resultTy
         return false;
 
     if (deadCode_)
         return true;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
 
     switch (resultType) {
       case ValType::I32: {
         RegI32 rp, rv;
         pop2xI32(&rp, &rv);
         if (!store(access, rp, AnyReg(rv)))
             return false;
         freeI32(rp);
@@ -5916,17 +5916,17 @@ BaseCompiler::emitTeeStore(ValType resul
         return false;
 
     if (deadCode_)
         return true;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
 
     switch (resultType) {
       case ValType::I32: {
         RegI32 rp, rv;
         pop2xI32(&rp, &rv);
         if (!store(access, rp, AnyReg(rv)))
             return false;
         freeI32(rp);
@@ -6199,17 +6199,17 @@ BaseCompiler::emitTeeStoreWithCoercion(V
         return false;
 
     if (deadCode_)
         return true;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64) {
         RegF32 rv = popF32();
         RegF64 rw = needF64();
         masm.convertFloat32ToDouble(rv.reg, rw.reg);
         RegI32 rp = popI32();
         if (!store(access, rp, AnyReg(rw)))
             return false;
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -697,17 +697,17 @@ class FunctionCompiler
     void assign(unsigned slot, MDefinition* def)
     {
         if (inDeadCode())
             return;
         curBlock_->setSlot(info().localSlot(slot), def);
     }
 
   private:
-    void checkOffsetAndBounds(MWasmMemoryAccess* access, MDefinition** base)
+    void checkOffsetAndBounds(MemoryAccessDesc* access, MDefinition** base)
     {
         // If the offset is bigger than the guard region, a separate instruction
         // is necessary to add the offset to the base and check for overflow.
         if (access->offset() >= OffsetGuardLimit || !JitOptions.wasmFoldOffsets) {
             auto* ins = MWasmAddOffset::New(alloc(), *base, access->offset());
             curBlock_->add(ins);
 
             *base = ins;
@@ -717,82 +717,82 @@ class FunctionCompiler
 #ifndef WASM_HUGE_MEMORY
         curBlock_->add(MWasmBoundsCheck::New(alloc(), *base));
 #endif
     }
 
     // Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the
     // peculiar property of not throwing on out-of-bounds. Everything else
     // (wasm, SIMD.js, Atomics) throws on out-of-bounds.
-    bool isAsmJSAccess(const MWasmMemoryAccess& access) {
-        return mg().isAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess();
+    bool isAsmJSAccess(const MemoryAccessDesc& access) {
+        return mg().isAsmJS() && !access.isSimd() && !access.isAtomic();
     }
 
   public:
-    MDefinition* load(MDefinition* base, MWasmMemoryAccess access, ValType result)
+    MDefinition* load(MDefinition* base, MemoryAccessDesc access, ValType result)
     {
         if (inDeadCode())
             return nullptr;
 
         MInstruction* load = nullptr;
         if (isAsmJSAccess(access)) {
             MOZ_ASSERT(access.offset() == 0);
-            load = MAsmJSLoadHeap::New(alloc(), base, access.accessType());
+            load = MAsmJSLoadHeap::New(alloc(), base, access.type());
         } else {
             checkOffsetAndBounds(&access, &base);
             load = MWasmLoad::New(alloc(), base, access, ToMIRType(result));
         }
 
         curBlock_->add(load);
         return load;
     }
 
-    void store(MDefinition* base, MWasmMemoryAccess access, MDefinition* v)
+    void store(MDefinition* base, MemoryAccessDesc access, MDefinition* v)
     {
         if (inDeadCode())
             return;
 
         MInstruction* store = nullptr;
         if (isAsmJSAccess(access)) {
             MOZ_ASSERT(access.offset() == 0);
-            store = MAsmJSStoreHeap::New(alloc(), base, access.accessType(), v);
+            store = MAsmJSStoreHeap::New(alloc(), base, access.type(), v);
         } else {
             checkOffsetAndBounds(&access, &base);
             store = MWasmStore::New(alloc(), base, access, v);
         }
 
         curBlock_->add(store);
     }
 
-    MDefinition* atomicCompareExchangeHeap(MDefinition* base, MWasmMemoryAccess access,
+    MDefinition* atomicCompareExchangeHeap(MDefinition* base, MemoryAccessDesc access,
                                            MDefinition* oldv, MDefinition* newv)
     {
         if (inDeadCode())
             return nullptr;
 
         checkOffsetAndBounds(&access, &base);
         auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv, tlsPointer_);
         curBlock_->add(cas);
         return cas;
     }
 
-    MDefinition* atomicExchangeHeap(MDefinition* base, MWasmMemoryAccess access,
+    MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc access,
                                     MDefinition* value)
     {
         if (inDeadCode())
             return nullptr;
 
         checkOffsetAndBounds(&access, &base);
         auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value, tlsPointer_);
         curBlock_->add(cas);
         return cas;
     }
 
     MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
-                                 MDefinition* base, MWasmMemoryAccess access,
+                                 MDefinition* base, MemoryAccessDesc access,
                                  MDefinition* v)
     {
         if (inDeadCode())
             return nullptr;
 
         checkOffsetAndBounds(&access, &base);
         auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v, tlsPointer_);
         curBlock_->add(binop);
@@ -2372,43 +2372,43 @@ EmitSelect(FunctionCompiler& f)
 
 static bool
 EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.iter().setResult(f.load(addr.base, access, type));
     return true;
 }
 
 static bool
 EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitTeeStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
@@ -2418,17 +2418,17 @@ EmitTeeStoreWithCoercion(FunctionCompile
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64)
         value = f.unary<MToDouble>(value);
     else if (resultType == ValType::F64 && viewType == Scalar::Float32)
         value = f.unary<MToFloat32>(value);
     else
         MOZ_CRASH("unexpected coerced store");
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitUnaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddress callee,
                          ValType operandType)
 {
@@ -2491,78 +2491,78 @@ EmitBinaryMathBuiltinCall(FunctionCompil
 static bool
 EmitAtomicsLoad(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     if (!f.iter().readAtomicLoad(&addr, &viewType))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
-                             MembarBeforeLoad, MembarAfterLoad);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, 0,
+                            MembarBeforeLoad, MembarAfterLoad);
     f.iter().setResult(f.load(addr.base, access, ValType::I32));
     return true;
 }
 
 static bool
 EmitAtomicsStore(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicStore(&addr, &viewType, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
-                             MembarBeforeStore, MembarAfterStore);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, 0,
+                            MembarBeforeStore, MembarAfterStore);
     f.store(addr.base, access, value);
     f.iter().setResult(value);
     return true;
 }
 
 static bool
 EmitAtomicsBinOp(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     jit::AtomicOp op;
     MDefinition* value;
     if (!f.iter().readAtomicBinOp(&addr, &viewType, &op, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.iter().setResult(f.atomicBinopHeap(op, addr.base, access, value));
     return true;
 }
 
 static bool
 EmitAtomicsCompareExchange(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* oldValue;
     MDefinition* newValue;
     if (!f.iter().readAtomicCompareExchange(&addr, &viewType, &oldValue, &newValue))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.iter().setResult(f.atomicCompareExchangeHeap(addr.base, access, oldValue, newValue));
     return true;
 }
 
 static bool
 EmitAtomicsExchange(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicExchange(&addr, &viewType, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset);
     f.iter().setResult(f.atomicExchangeHeap(addr.base, access, value));
     return true;
 }
 
 static bool
 EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp)
 {
     MSimdUnaryArith::Operation op;
@@ -2774,17 +2774,17 @@ EmitSimdLoad(FunctionCompiler& f, ValTyp
 
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, numElems);
     f.iter().setResult(f.load(addr.base, access, resultType));
     return true;
 }
 
 static bool
 EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
 {
     unsigned defaultNumElems;
@@ -2793,17 +2793,17 @@ EmitSimdStore(FunctionCompiler& f, ValTy
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
+    MemoryAccessDesc access(viewType, addr.align, addr.offset, numElems);
     f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitSimdSelect(FunctionCompiler& f, ValType simdType)
 {
     MDefinition* trueValue;
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13514,103 +13514,70 @@ class MWasmAddOffset
         return AliasSet::None();
     }
 
     uint32_t offset() const {
         return offset_;
     }
 };
 
-class MWasmMemoryAccess
-{
-    uint32_t offset_;
-    uint32_t align_;
-    Scalar::Type accessType_ : 8;
-    unsigned numSimdElems_;
-    MemoryBarrierBits barrierBefore_;
-    MemoryBarrierBits barrierAfter_;
-
-  public:
-    explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, uint32_t offset,
-                               unsigned numSimdElems = 0,
-                               MemoryBarrierBits barrierBefore = MembarNobits,
-                               MemoryBarrierBits barrierAfter = MembarNobits)
-      : offset_(offset),
-        align_(align),
-        accessType_(accessType),
-        numSimdElems_(numSimdElems),
-        barrierBefore_(barrierBefore),
-        barrierAfter_(barrierAfter)
-    {
-        MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
-        MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
-    }
-
-    uint32_t offset() const { return offset_; }
-    uint32_t align() const { return align_; }
-    Scalar::Type accessType() const { return accessType_; }
-    unsigned byteSize() const {
-        return Scalar::isSimdType(accessType())
-               ? Scalar::scalarByteSize(accessType()) * numSimdElems()
-               : TypedArrayElemSize(accessType());
-    }
-    unsigned numSimdElems() const { return numSimdElems_; }
-    MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
-    MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
-    bool isAtomicAccess() const { return (barrierBefore_ | barrierAfter_) != MembarNobits; }
-    bool isSimdAccess() const { return Scalar::isSimdType(accessType_); }
-    bool isUnaligned() const { return align() && align() < byteSize(); }
-
-    void clearOffset() { offset_ = 0; }
-};
-
 class MWasmLoad
   : public MUnaryInstruction,
-    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, MIRType resultType)
+    wasm::MemoryAccessDesc access_;
+
+    MWasmLoad(MDefinition* base, const wasm::MemoryAccessDesc& access, MIRType resultType)
       : MUnaryInstruction(base),
-        MWasmMemoryAccess(access)
+        access_(access)
     {
         setGuard();
         setResultType(resultType);
     }
 
   public:
     INSTRUCTION_HEADER(WasmLoad)
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, base))
 
+    const wasm::MemoryAccessDesc& access() const {
+        return access_;
+    }
+
     AliasSet getAliasSet() const override {
         // When a barrier is needed, make the instruction effectful by giving
         // it a "store" effect.
-        if (isAtomicAccess())
+        if (access_.isAtomic())
             return AliasSet::Store(AliasSet::AsmJSHeap);
         return AliasSet::Load(AliasSet::AsmJSHeap);
     }
 };
 
 class MWasmStore
   : public MBinaryInstruction,
-    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MWasmStore(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* value)
+    wasm::MemoryAccessDesc access_;
+
+    MWasmStore(MDefinition* base, const wasm::MemoryAccessDesc& access, MDefinition* value)
       : MBinaryInstruction(base, value),
-        MWasmMemoryAccess(access)
+        access_(access)
     {
         setGuard();
     }
 
   public:
     INSTRUCTION_HEADER(WasmStore)
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, base), (1, value))
 
+    const wasm::MemoryAccessDesc& access() const {
+        return access_;
+    }
+
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSMemoryAccess
 {
     uint32_t offset_;
@@ -13628,16 +13595,20 @@ class MAsmJSMemoryAccess
     }
 
     uint32_t offset() const { return offset_; }
     uint32_t endOffset() const { return offset() + byteSize(); }
     Scalar::Type accessType() const { return accessType_; }
     unsigned byteSize() const { return TypedArrayElemSize(accessType()); }
     bool needsBoundsCheck() const { return needsBoundsCheck_; }
 
+    wasm::MemoryAccessDesc access() const {
+        return wasm::MemoryAccessDesc(accessType_, Scalar::byteSize(accessType_), offset_);
+    }
+
     void removeBoundsCheck() { needsBoundsCheck_ = false; }
     void setOffset(uint32_t o) { offset_ = o; }
 };
 
 class MAsmJSLoadHeap
   : public MUnaryInstruction,
     public MAsmJSMemoryAccess,
     public NoTypePolicy::Data
@@ -13683,91 +13654,99 @@ class MAsmJSStoreHeap
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSCompareExchangeHeap
   : public MQuaternaryInstruction,
-    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
+    wasm::MemoryAccessDesc access_;
+
+    MAsmJSCompareExchangeHeap(MDefinition* base, const wasm::MemoryAccessDesc& access,
                               MDefinition* oldv, MDefinition* newv, MDefinition* tls)
         : MQuaternaryInstruction(base, oldv, newv, tls),
-          MWasmMemoryAccess(access)
+          access_(access)
     {
         setGuard();             // Not removable
         setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSCompareExchangeHeap)
     TRIVIAL_NEW_WRAPPERS
 
+    const wasm::MemoryAccessDesc& access() const { return access_; }
+
     MDefinition* base() const { return getOperand(0); }
     MDefinition* oldValue() const { return getOperand(1); }
     MDefinition* newValue() const { return getOperand(2); }
     MDefinition* tls() const { return getOperand(3); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicExchangeHeap
   : public MTernaryInstruction,
-    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSAtomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
+    wasm::MemoryAccessDesc access_;
+
+    MAsmJSAtomicExchangeHeap(MDefinition* base, const wasm::MemoryAccessDesc& access,
                              MDefinition* value, MDefinition* tls)
         : MTernaryInstruction(base, value, tls),
-          MWasmMemoryAccess(access)
+          access_(access)
     {
         setGuard();             // Not removable
         setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicExchangeHeap)
     TRIVIAL_NEW_WRAPPERS
 
+    const wasm::MemoryAccessDesc& access() const { return access_; }
+
     MDefinition* base() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
     MDefinition* tls() const { return getOperand(2); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicBinopHeap
   : public MTernaryInstruction,
-    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
     AtomicOp op_;
-
-    MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const MWasmMemoryAccess& access,
+    wasm::MemoryAccessDesc access_;
+
+    MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const wasm::MemoryAccessDesc& access,
                           MDefinition* v, MDefinition* tls)
         : MTernaryInstruction(base, v, tls),
-          MWasmMemoryAccess(access),
-          op_(op)
+          op_(op),
+          access_(access)
     {
         setGuard();         // Not removable
         setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicBinopHeap)
     TRIVIAL_NEW_WRAPPERS
 
     AtomicOp operation() const { return op_; }
+    const wasm::MemoryAccessDesc& access() const { return access_; }
+
     MDefinition* base() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
     MDefinition* tls() const { return getOperand(2); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1301,16 +1301,18 @@ class MacroAssembler : public MacroAssem
 
     inline void storeFloat32x3(FloatRegister src, const Address& dest) PER_SHARED_ARCH;
     inline void storeFloat32x3(FloatRegister src, const BaseIndex& dest) PER_SHARED_ARCH;
 
     template <typename T>
     void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const T& dest,
                            MIRType slotType) PER_ARCH;
 
+    inline void memoryBarrier(MemoryBarrierBits barrier) PER_SHARED_ARCH;
+
   public:
     // ========================================================================
     // Truncate floating point.
 
     // Undefined behaviour when truncation is outside Int64 range.
     // Needs a temp register if SSE3 is not present.
     inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
         DEFINED_ON(x86_shared);
@@ -1335,20 +1337,20 @@ class MacroAssembler : public MacroAssem
     // Called after compilation completes to patch the given limit into the
     // given instruction's immediate.
     static inline void wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) PER_ARCH;
 
     // On x86, each instruction adds its own wasm::MemoryAccess's to the
     // wasm::MemoryAccessVector (there can be multiple when i64 is involved).
     // On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller
     // is responsible for doing this instead.
-    void wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
-    void wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
-    void wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
-    void wasmStoreI64(Register64 value, Operand dstAddr) DEFINED_ON(x86);
+    void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
+    void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
+    void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
+    void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
 
     // wasm specific methods, used in both the wasm baseline compiler and ion.
     void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64);
     void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared);
     void outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned, Label* rejoin) DEFINED_ON(x86_shared);
 
     void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64);
     void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared);
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2358,38 +2358,38 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
 }
 
 template <typename T>
 void
 CodeGeneratorARM::emitWasmLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
-    Scalar::Type type = mir->accessType();
+    Scalar::Type type = mir->access().type();
 
     // Maybe add the offset.
     if (offset || type == Scalar::Int64) {
         ScratchRegisterScope scratch(masm);
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         if (offset)
             masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
     bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
                     type == Scalar::Int64;
-    unsigned byteSize = mir->byteSize();
-
-    memoryBarrier(mir->barrierBefore());
+    unsigned byteSize = mir->access().byteSize();
+
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     if (mir->type() == MIRType::Int64) {
         Register64 output = ToOutRegister64(lir);
         if (type == Scalar::Int64) {
             MOZ_ASSERT(INT64LOW_OFFSET == 0);
             masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
             masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
             masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
@@ -2408,17 +2408,17 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
             ScratchRegisterScope scratch(masm);
             masm.ma_add(HeapReg, ptr, scratch);
             masm.ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
         } else {
             masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
         }
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
 {
     emitWasmLoad(lir);
 }
 
@@ -2429,30 +2429,30 @@ CodeGeneratorARM::visitWasmLoadI64(LWasm
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptrCopy());
     if (offset) {
         ScratchRegisterScope scratch(masm);
         masm.ma_add(Imm32(offset), ptr, scratch);
     }
 
     // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
     masm.ma_add(HeapReg, ptr);
 
-    unsigned byteSize = mir->byteSize();
-    Scalar::Type type = mir->accessType();
+    unsigned byteSize = mir->access().byteSize();
+    Scalar::Type type = mir->access().type();
     bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
                     type == Scalar::Int64;
 
     MIRType mirType = mir->type();
 
     Register tmp = ToRegister(lir->getTemp(1));
 
     Register low;
@@ -2461,17 +2461,17 @@ CodeGeneratorARM::emitWasmUnalignedLoad(
     else if (mirType == MIRType::Int64)
         low = ToOutRegister64(lir).low;
     else
         low = ToRegister(lir->output());
 
     MOZ_ASSERT(low != tmp);
     MOZ_ASSERT(low != ptr);
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
 
     if (IsFloatingPointType(mirType)) {
         FloatRegister output = ToFloatRegister(lir->output());
         if (byteSize == 4) {
             MOZ_ASSERT(output.isSingle());
             masm.ma_vxfer(low, output);
@@ -2492,17 +2492,17 @@ CodeGeneratorARM::emitWasmUnalignedLoad(
             // Propagate sign.
             if (isSigned)
                 masm.ma_asr(Imm32(31), output.low, output.high);
             else
                 masm.ma_mov(Imm32(0), output.high);
         }
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
 {
     emitWasmUnalignedLoad(lir);
 }
 
@@ -2525,35 +2525,35 @@ CodeGeneratorARM::visitWasmAddOffset(LWa
 }
 
 template <typename T>
 void
 CodeGeneratorARM::emitWasmStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
-    unsigned byteSize = mir->byteSize();
-    Scalar::Type type = mir->accessType();
+    unsigned byteSize = mir->access().byteSize();
+    Scalar::Type type = mir->access().type();
 
     // Maybe add the offset.
     if (offset || type == Scalar::Int64) {
         ScratchRegisterScope scratch(masm);
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         if (offset)
             masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     if (type == Scalar::Int64) {
         MOZ_ASSERT(INT64LOW_OFFSET == 0);
 
         Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
         masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
         masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
         masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
@@ -2567,17 +2567,17 @@ CodeGeneratorARM::emitWasmStore(T* lir)
             masm.ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
         } else {
             bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
             Register val = value.gpr();
             masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
         }
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
 {
     emitWasmStore(lir);
 }
 
@@ -2588,42 +2588,42 @@ CodeGeneratorARM::visitWasmStoreI64(LWas
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptrCopy());
     if (offset) {
         ScratchRegisterScope scratch(masm);
         masm.ma_add(Imm32(offset), ptr, scratch);
     }
 
     // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
     masm.ma_add(HeapReg, ptr);
 
     MIRType mirType = mir->value()->type();
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 
     Register val = ToRegister(lir->valueHelper());
     if (IsFloatingPointType(mirType)) {
         masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val);
     } else if (mirType == MIRType::Int64) {
         Register64 input = ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex));
         if (input.low != val)
             masm.ma_mov(input.low, val);
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     masm.emitUnalignedStore(Min(byteSize, 4u), ptr, val);
 
     if (byteSize > 4) {
         // It's a double or an int64 load.
         // Load the high 32 bits when counter == 4.
         if (IsFloatingPointType(mirType)) {
             FloatRegister fp = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
             MOZ_ASSERT(fp.isDouble());
@@ -2631,17 +2631,17 @@ CodeGeneratorARM::emitWasmUnalignedStore
             masm.ma_vxfer(fp, scratch, val);
         } else {
             MOZ_ASSERT(mirType == MIRType::Int64);
             masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val);
         }
         masm.emitUnalignedStore(4, ptr, val, /* offset */ 4);
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
 {
     emitWasmUnalignedStore(lir);
 }
 
@@ -2715,19 +2715,19 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA
         }
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
-
-    Scalar::Type vt = mir->accessType();
+    MOZ_ASSERT(mir->access().offset() == 0);
+
+    Scalar::Type vt = mir->access().type();
     const LAllocation* ptr = ins->ptr();
     Register ptrReg = ToRegister(ptr);
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
@@ -2735,88 +2735,88 @@ CodeGeneratorARM::visitAsmJSCompareExcha
                                         srcAddr, oldval, newval, InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
 {
     const MAsmJSCompareExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldval());
     Register newval = ToRegister(ins->newval());
     Register tls = ToRegister(ins->tls());
     Register instance = ToRegister(ins->getTemp(0));
     Register viewType = ToRegister(ins->getTemp(1));
 
     MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
 
     masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.ma_mov(Imm32(mir->accessType()), viewType);
+    masm.ma_mov(Imm32(mir->access().type()), viewType);
 
     masm.setupAlignedABICall();
     masm.passABIArg(instance);
     masm.passABIArg(viewType);
     masm.passABIArg(ptr);
     masm.passABIArg(oldval);
     masm.passABIArg(newval);
     masm.callWithABI(wasm::SymbolicAddress::AtomicCmpXchg);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
-
-    Scalar::Type vt = mir->accessType();
+    MOZ_ASSERT(mir->access().offset() == 0);
+
+    Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
 {
     const MAsmJSAtomicExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     Register tls = ToRegister(ins->tls());
     Register instance = ToRegister(ins->getTemp(0));
     Register viewType = ToRegister(ins->getTemp(1));
 
     MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
 
     masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.ma_mov(Imm32(mir->accessType()), viewType);
+    masm.ma_mov(Imm32(mir->access().type()), viewType);
 
     masm.setupAlignedABICall();
     masm.passABIArg(instance);
     masm.passABIArg(viewType);
     masm.passABIArg(ptr);
     masm.passABIArg(value);
     masm.callWithABI(wasm::SymbolicAddress::AtomicXchg);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
     MOZ_ASSERT(mir->hasUses());
 
-    Scalar::Type vt = mir->accessType();
+    Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
@@ -2830,20 +2830,20 @@ CodeGeneratorARM::visitAsmJSAtomicBinopH
                                    ToAnyRegister(ins->output()));
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
     MOZ_ASSERT(!mir->hasUses());
 
-    Scalar::Type vt = mir->accessType();
+    Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
@@ -2852,26 +2852,26 @@ CodeGeneratorARM::visitAsmJSAtomicBinopH
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
 {
     const MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     Register tls = ToRegister(ins->tls());
     Register instance = ToRegister(ins->getTemp(0));
     Register viewType = ToRegister(ins->getTemp(1));
 
     masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.move32(Imm32(mir->accessType()), viewType);
+    masm.move32(Imm32(mir->access().type()), viewType);
 
     masm.setupAlignedABICall();
     masm.passABIArg(instance);
     masm.passABIArg(viewType);
     masm.passABIArg(ptr);
     masm.passABIArg(value);
 
     switch (mir->operation()) {
@@ -3144,33 +3144,19 @@ CodeGeneratorARM::visitNegD(LNegD* ins)
 void
 CodeGeneratorARM::visitNegF(LNegF* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
 }
 
 void
-CodeGeneratorARM::memoryBarrier(MemoryBarrierBits barrier)
-{
-    // On ARMv6 the optional argument (BarrierST, etc) is ignored.
-    if (barrier == (MembarStoreStore|MembarSynchronizing))
-        masm.ma_dsb(masm.BarrierST);
-    else if (barrier & MembarSynchronizing)
-        masm.ma_dsb();
-    else if (barrier == MembarStoreStore)
-        masm.ma_dmb(masm.BarrierST);
-    else if (barrier)
-        masm.ma_dmb();
-}
-
-void
 CodeGeneratorARM::visitMemoryBarrier(LMemoryBarrier* ins)
 {
-    memoryBarrier(ins->type());
+    masm.memoryBarrier(ins->type());
 }
 
 void
 CodeGeneratorARM::setReturnDoubleRegs(LiveRegisterSet* regs)
 {
     MOZ_ASSERT(ReturnFloat32Reg.code_ == FloatRegisters::s0);
     MOZ_ASSERT(ReturnDoubleReg.code_ == FloatRegisters::s0);
     FloatRegister s1 = {FloatRegisters::s1, VFPRegister::Single};
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -204,18 +204,16 @@ class CodeGeneratorARM : public CodeGene
     // Functions for LTestVAndBranch.
     Register splitTagForTest(const ValueOperand& value);
 
     void divICommon(MDiv* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
                     Label& done);
     void modICommon(MMod* mir, Register lhs, Register rhs, Register output, LSnapshot* snapshot,
                     Label& done);
 
-    void memoryBarrier(MemoryBarrierBits barrier);
-
   public:
     CodeGeneratorARM(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
 
   public:
     void visitBox(LBox* box);
     void visitBoxFloatingPoint(LBoxFloatingPoint* box);
     void visitUnbox(LUnbox* unbox);
     void visitValue(LValue* value);
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -610,17 +610,17 @@ LIRGeneratorARM::visitAsmJSUnsignedToFlo
 void
 LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
-    if (ins->isUnaligned()) {
+    if (ins->access().isUnaligned()) {
         // Unaligned access expected! Revert to a byte load.
         LDefinition ptrCopy = tempCopy(base, 0);
 
         LDefinition noTemp = LDefinition::BogusTemp();
         if (ins->type() == MIRType::Int64) {
             auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, ptrCopy, temp(), noTemp, noTemp);
             defineInt64(lir, ins);
             return;
@@ -638,38 +638,38 @@ LIRGeneratorARM::visitWasmLoad(MWasmLoad
 
         auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, ptrCopy, temp(), temp2, temp3);
         define(lir, ins);
         return;
     }
 
     if (ins->type() == MIRType::Int64) {
         auto* lir = new(alloc()) LWasmLoadI64(ptr);
-        if (ins->offset() || ins->accessType() == Scalar::Int64)
+        if (ins->access().offset() || ins->access().type() == Scalar::Int64)
             lir->setTemp(0, tempCopy(base, 0));
         defineInt64(lir, ins);
         return;
     }
 
     auto* lir = new(alloc()) LWasmLoad(ptr);
-    if (ins->offset())
+    if (ins->access().offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
-    if (ins->isUnaligned()) {
+    if (ins->access().isUnaligned()) {
         // Unaligned access expected! Revert to a byte store.
         LDefinition ptrCopy = tempCopy(base, 0);
 
         MIRType valueType = ins->value()->type();
         if (valueType == MIRType::Int64) {
             LInt64Allocation value = useInt64RegisterAtStart(ins->value());
             auto* lir = new(alloc()) LWasmUnalignedStoreI64(ptr, value, ptrCopy, temp());
             add(lir, ins);
@@ -684,26 +684,26 @@ LIRGeneratorARM::visitWasmStore(MWasmSto
         auto* lir = new(alloc()) LWasmUnalignedStore(ptr, value, ptrCopy, valueHelper);
         add(lir, ins);
         return;
     }
 
     if (ins->value()->type() == MIRType::Int64) {
         LInt64Allocation value = useInt64RegisterAtStart(ins->value());
         auto* lir = new(alloc()) LWasmStoreI64(ptr, value);
-        if (ins->offset() || ins->accessType() == Scalar::Int64)
+        if (ins->access().offset() || ins->access().type() == Scalar::Int64)
             lir->setTemp(0, tempCopy(base, 0));
         add(lir, ins);
         return;
     }
 
     LAllocation value = useRegisterAtStart(ins->value());
     auto* lir = new(alloc()) LWasmStore(ptr, value);
 
-    if (ins->offset())
+    if (ins->access().offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     add(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
@@ -867,23 +867,23 @@ LIRGeneratorARM::visitCompareExchangeTyp
         new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    if (byteSize(ins->accessType()) != 4 && !HasLDSTREXBHD()) {
+    if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
         LAsmJSCompareExchangeCallout* lir =
             new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(base),
                                                       useRegisterAtStart(ins->oldValue()),
                                                       useRegisterAtStart(ins->newValue()),
                                                       useFixed(ins->tls(), WasmTlsReg),
                                                       temp(), temp());
         defineReturn(lir, ins);
         return;
@@ -896,43 +896,43 @@ LIRGeneratorARM::visitAsmJSCompareExchan
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     const LAllocation base = useRegisterAtStart(ins->base());
     const LAllocation value = useRegisterAtStart(ins->value());
 
-    if (byteSize(ins->accessType()) < 4 && !HasLDSTREXBHD()) {
+    if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
         // Call out on ARMv6.
         defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value,
                                                               useFixed(ins->tls(), WasmTlsReg),
                                                               temp(), temp()), ins);
         return;
     }
 
     define(new(alloc()) LAsmJSAtomicExchangeHeap(base, value), ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    if (byteSize(ins->accessType()) != 4 && !HasLDSTREXBHD()) {
+    if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
         LAsmJSAtomicBinopCallout* lir =
             new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(base),
                                                   useRegisterAtStart(ins->value()),
                                                   useFixed(ins->tls(), WasmTlsReg),
                                                   temp(), temp());
         defineReturn(lir, ins);
         return;
     }
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -2042,16 +2042,30 @@ MacroAssembler::storeFloat32x3(FloatRegi
     MOZ_CRASH("NYI");
 }
 void
 MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
 {
     MOZ_CRASH("NYI");
 }
 
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+    // On ARMv6 the optional argument (BarrierST, etc) is ignored.
+    if (barrier == (MembarStoreStore|MembarSynchronizing))
+        ma_dsb(BarrierST);
+    else if (barrier & MembarSynchronizing)
+        ma_dsb();
+    else if (barrier == MembarStoreStore)
+        ma_dmb(BarrierST);
+    else if (barrier)
+        ma_dmb();
+}
+
 // ===============================================================
 // Clamping functions.
 
 void
 MacroAssembler::clampIntToUint8(Register reg)
 {
     // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
     // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -1523,16 +1523,22 @@ MacroAssembler::storeFloat32x3(FloatRegi
     MOZ_CRASH("NYI");
 }
 void
 MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
 {
     MOZ_CRASH("NYI");
 }
 
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+    MOZ_CRASH("NYI");
+}
+
 // ===============================================================
 // Clamping functions.
 
 void
 MacroAssembler::clampIntToUint8(Register reg)
 {
     vixl::UseScratchRegisterScope temps(this);
     const ARMRegister scratch32 = temps.AcquireW();
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1817,30 +1817,17 @@ CodeGeneratorMIPSShared::visitGuardClass
     masm.loadObjClass(obj, tmp);
     bailoutCmpPtr(Assembler::NotEqual, tmp, ImmPtr(guard->mir()->getClass()),
                   guard->snapshot());
 }
 
 void
 CodeGeneratorMIPSShared::visitMemoryBarrier(LMemoryBarrier* ins)
 {
-    memoryBarrier(ins->type());
-}
-
-void
-CodeGeneratorMIPSShared::memoryBarrier(MemoryBarrierBits barrier)
-{
-    if (barrier == MembarLoadLoad)
-        masm.as_sync(19);
-    else if (barrier == MembarStoreStore)
-        masm.as_sync(4);
-    else if (barrier & MembarSynchronizing)
-        masm.as_sync();
-    else if (barrier)
-        masm.as_sync(16);
+    masm.memoryBarrier(ins->type());
 }
 
 void
 CodeGeneratorMIPSShared::generateInvalidateEpilogue()
 {
     // Ensure that there is enough space in the buffer for the OsiPoint
     // patching to occur. Otherwise, we could overwrite the invalidation
     // epilogue.
@@ -1889,80 +1876,80 @@ CodeGeneratorMIPSShared::visitWasmCallI6
 }
 
 template <typename T>
 void
 CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset <= INT32_MAX);
 
     Register ptr = ToRegister(lir->ptr());
 
     // Maybe add the offset.
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     bool isSigned;
     bool isFloat = false;
 
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
       case Scalar::Int8:    isSigned = true;  break;
       case Scalar::Uint8:   isSigned = false; break;
       case Scalar::Int16:   isSigned = true;  break;
       case Scalar::Uint16:  isSigned = false; break;
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Float64: isFloat  = true;  break;
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     BaseIndex address(HeapReg, ptr, TimesOne);
 
-    if (mir->isUnaligned()) {
+    if (mir->access().isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (isFloat) {
             if (byteSize == 4)
                 masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
             else
                 masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
         } else {
             masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
                                    static_cast<LoadStoreSize>(8 * byteSize),
                                    isSigned ? SignExtend : ZeroExtend);
         }
 
-        memoryBarrier(mir->barrierAfter());
+        masm.memoryBarrier(mir->access().barrierAfter());
         return;
     }
 
     if (isFloat) {
         if (byteSize == 4)
             masm.loadFloat32(address, ToFloatRegister(lir->output()));
         else
             masm.loadDouble(address, ToFloatRegister(lir->output()));
     } else {
         masm.ma_load(ToRegister(lir->output()), address,
                      static_cast<LoadStoreSize>(8 * byteSize),
                      isSigned ? SignExtend : ZeroExtend);
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
 {
     emitWasmLoad(lir);
 }
 
@@ -1973,81 +1960,81 @@ CodeGeneratorMIPSShared::visitWasmUnalig
 }
 
 template <typename T>
 void
 CodeGeneratorMIPSShared::emitWasmStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset <= INT32_MAX);
 
     Register ptr = ToRegister(lir->ptr());
 
     // Maybe add the offset.
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     bool isSigned;
     bool isFloat = false;
 
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
       case Scalar::Int8:    isSigned = true;  break;
       case Scalar::Uint8:   isSigned = false; break;
       case Scalar::Int16:   isSigned = true;  break;
       case Scalar::Uint16:  isSigned = false; break;
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       case Scalar::Float64: isFloat  = true;  break;
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     BaseIndex address(HeapReg, ptr, TimesOne);
 
-    if (mir->isUnaligned()) {
+    if (mir->access().isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (isFloat) {
             if (byteSize == 4)
                 masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
             else
                 masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
         } else {
             masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
                                     static_cast<LoadStoreSize>(8 * byteSize),
                                     isSigned ? SignExtend : ZeroExtend);
         }
 
-        memoryBarrier(mir->barrierAfter());
+        masm.memoryBarrier(mir->access().barrierAfter());
         return;
     }
 
     if (isFloat) {
         if (byteSize == 4) {
             masm.storeFloat32(ToFloatRegister(lir->value()), address);
         } else
             masm.storeDouble(ToFloatRegister(lir->value()), address);
     } else {
         masm.ma_store(ToRegister(lir->value()), address,
                       static_cast<LoadStoreSize>(8 * byteSize),
                       isSigned ? SignExtend : ZeroExtend);
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmStore(LWasmStore* lir)
 {
     emitWasmStore(lir);
 }
 
@@ -2062,17 +2049,17 @@ CodeGeneratorMIPSShared::visitAsmJSLoadH
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     const LAllocation* ptr = ins->ptr();
     const LDefinition* out = ins->output();
 
     bool isSigned;
     int size;
     bool isFloat = false;
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
       case Scalar::Int8:    isSigned = true;  size =  8; break;
       case Scalar::Uint8:   isSigned = false; size =  8; break;
       case Scalar::Int16:   isSigned = true;  size = 16; break;
       case Scalar::Uint16:  isSigned = false; size = 16; break;
       case Scalar::Int32:   isSigned = true;  size = 32; break;
       case Scalar::Uint32:  isSigned = false; size = 32; break;
       case Scalar::Float64: isFloat  = true;  size = 64; break;
       case Scalar::Float32: isFloat  = true;  size = 32; break;
@@ -2149,17 +2136,17 @@ CodeGeneratorMIPSShared::visitAsmJSStore
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     const LAllocation* value = ins->value();
     const LAllocation* ptr = ins->ptr();
 
     bool isSigned;
     int size;
     bool isFloat = false;
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
       case Scalar::Int8:    isSigned = true;  size =  8; break;
       case Scalar::Uint8:   isSigned = false; size =  8; break;
       case Scalar::Int16:   isSigned = true;  size = 16; break;
       case Scalar::Uint16:  isSigned = false; size = 16; break;
       case Scalar::Int32:   isSigned = true;  size = 32; break;
       case Scalar::Uint32:  isSigned = false; size = 32; break;
       case Scalar::Float64: isFloat  = true;  size = 64; break;
       case Scalar::Float32: isFloat  = true;  size = 32; break;
@@ -2222,17 +2209,17 @@ CodeGeneratorMIPSShared::visitAsmJSStore
     masm.bind(&outOfRange);
     masm.append(wasm::BoundsCheck(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
-    Scalar::Type vt = mir->accessType();
+    Scalar::Type vt = mir->access().type();
     const LAllocation* ptr = ins->ptr();
     Register ptrReg = ToRegister(ptr);
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     Register valueTemp = ToRegister(ins->valueTemp());
@@ -2244,17 +2231,17 @@ CodeGeneratorMIPSShared::visitAsmJSCompa
                                         valueTemp, offsetTemp, maskTemp,
                                         ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
-    Scalar::Type vt = mir->accessType();
+    Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
@@ -2266,17 +2253,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    Scalar::Type vt = mir->accessType();
+    Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
@@ -2296,17 +2283,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    Scalar::Type vt = mir->accessType();
+    Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -193,17 +193,16 @@ class CodeGeneratorMIPSShared : public C
     // Out of line visitors.
     virtual void visitOutOfLineBailout(OutOfLineBailout* ool) = 0;
     void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
     void visitCopySignD(LCopySignD* ins);
     void visitCopySignF(LCopySignF* ins);
 
   protected:
     virtual ValueOperand ToOutValue(LInstruction* ins) = 0;
-    void memoryBarrier(MemoryBarrierBits barrier);
 
   public:
     CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
 
     void visitValue(LValue* value);
     void visitDouble(LDouble* ins);
     void visitFloat32(LFloat32* ins);
 
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -319,92 +319,92 @@ LIRGeneratorMIPSShared::visitAsmJSNeg(MA
 void
 LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
-    if (ins->isUnaligned()) {
+    if (ins->access().isUnaligned()) {
         if (ins->type() == MIRType::Int64) {
             auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
-            if (ins->offset())
+            if (ins->access().offset())
                 lir->setTemp(0, tempCopy(base, 0));
 
             defineInt64(lir, ins);
             return;
         }
 
         auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, temp());
-        if (ins->offset())
+        if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         define(lir, ins);
         return;
     }
 
     if (ins->type() == MIRType::Int64) {
         auto* lir = new(alloc()) LWasmLoadI64(ptr);
-        if (ins->offset())
+        if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         defineInt64(lir, ins);
         return;
     }
 
     auto* lir = new(alloc()) LWasmLoad(ptr);
-    if (ins->offset())
+    if (ins->access().offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* value = ins->value();
     LAllocation baseAlloc = useRegisterAtStart(base);
 
-    if (ins->isUnaligned()) {
+    if (ins->access().isUnaligned()) {
         if (ins->type() == MIRType::Int64) {
             LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
             auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
-            if (ins->offset())
+            if (ins->access().offset())
                 lir->setTemp(0, tempCopy(base, 0));
 
             add(lir, ins);
             return;
         }
 
         LAllocation valueAlloc = useRegisterAtStart(value);
         auto* lir = new(alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
-        if (ins->offset())
+        if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         add(lir, ins);
         return;
     }
 
     if (ins->type() == MIRType::Int64) {
         LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
         auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
-        if (ins->offset())
+        if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         add(lir, ins);
         return;
     }
 
     LAllocation valueAlloc = useRegisterAtStart(value);
     auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
-    if (ins->offset())
+    if (ins->access().offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     add(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmSelect(MAsmSelect* ins)
 {
@@ -470,17 +470,17 @@ LIRGeneratorMIPSShared::visitAsmJSUnsign
     MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
     LAllocation baseAlloc;
 
     // For MIPS it is best to keep the 'base' in a register if a bounds check
     // is needed.
     if (base->isConstant() && !ins->needsBoundsCheck()) {
@@ -491,17 +491,17 @@ LIRGeneratorMIPSShared::visitAsmJSLoadHe
         baseAlloc = useRegisterAtStart(base);
 
     define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
     LAllocation baseAlloc;
 
     if (base->isConstant() && !ins->needsBoundsCheck()) {
         MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
         baseAlloc = LAllocation(base->toConstant());
@@ -586,18 +586,18 @@ LIRGeneratorMIPSShared::visitAtomicExcha
                                                       /* maskTemp= */ temp());
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAsmJSCompareExchangeHeap* lir =
         new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
                                                useRegister(ins->oldValue()),
                                                useRegister(ins->newValue()),
@@ -607,17 +607,17 @@ LIRGeneratorMIPSShared::visitAsmJSCompar
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
 
     // The output may not be used but will be clobbered regardless,
     // so ignore the case where we're not using the value and just
     // use the output register as a temp.
 
@@ -627,18 +627,18 @@ LIRGeneratorMIPSShared::visitAsmJSAtomic
                                               /* offsetTemp= */ temp(),
                                               /* maskTemp= */ temp());
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
-    MOZ_ASSERT(ins->offset() == 0);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     if (!ins->hasUses()) {
         LAsmJSAtomicBinopHeapForEffect* lir =
             new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
                                                         useRegister(ins->value()),
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -985,16 +985,29 @@ MacroAssembler::storeFloat32x3(FloatRegi
     MOZ_CRASH("NYI");
 }
 void
 MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
 {
     MOZ_CRASH("NYI");
 }
 
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+    if (barrier == MembarLoadLoad)
+        as_sync(19);
+    else if (barrier == MembarStoreStore)
+        as_sync(4);
+    else if (barrier & MembarSynchronizing)
+        as_sync();
+    else if (barrier)
+        as_sync(16);
+}
+
 // ===============================================================
 // Clamping functions.
 
 void
 MacroAssembler::clampIntToUint8(Register reg)
 {
     // If reg is < 0, then we want to clamp to 0.
     as_slti(ScratchRegister, reg, 0);
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -455,46 +455,46 @@ CodeGeneratorMIPS::visitUDivOrModI64(LUD
 
 template <typename T>
 void
 CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
     Register64 output = ToOutRegister64(lir);
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
 
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     bool isSigned;
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
         case Scalar::Int8:   isSigned = true; break;
         case Scalar::Uint8:  isSigned = false; break;
         case Scalar::Int16:  isSigned = true; break;
         case Scalar::Uint16: isSigned = false; break;
         case Scalar::Int32:  isSigned = true; break;
         case Scalar::Uint32: isSigned = false; break;
         case Scalar::Int64:  isSigned = true; break;
         default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
-    if (mir->isUnaligned()) {
+    if (mir->access().isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (byteSize <= 4) {
             masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
                                    temp, static_cast<LoadStoreSize>(8 * byteSize),
                                    isSigned ? SignExtend : ZeroExtend);
             if (!isSigned)
                 masm.move32(Imm32(0), output.high);
@@ -520,17 +520,17 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* li
             masm.ma_sra(output.high, output.low, Imm32(31));
     } else {
         ScratchRegisterScope scratch(masm);
         masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
         masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
         masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorMIPS::visitWasmLoadI64(LWasmLoadI64* lir)
 {
     emitWasmLoadI64(lir);
 }
 
@@ -542,46 +542,46 @@ CodeGeneratorMIPS::visitWasmUnalignedLoa
 
 template <typename T>
 void
 CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
 {
     const MWasmStore* mir = lir->mir();
     Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
 
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     bool isSigned;
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
         case Scalar::Int8:   isSigned = true; break;
         case Scalar::Uint8:  isSigned = false; break;
         case Scalar::Int16:  isSigned = true; break;
         case Scalar::Uint16: isSigned = false; break;
         case Scalar::Int32:  isSigned = true; break;
         case Scalar::Uint32: isSigned = false; break;
         case Scalar::Int64:  isSigned = true; break;
         default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
-    if (mir->isUnaligned()) {
+    if (mir->access().isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (byteSize <= 4) {
             masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
                                     temp, static_cast<LoadStoreSize>(8 * byteSize),
                                     isSigned ? SignExtend : ZeroExtend);
         } else {
             ScratchRegisterScope scratch(masm);
@@ -599,17 +599,17 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* l
                       static_cast<LoadStoreSize>(8 * byteSize));
     } else {
         ScratchRegisterScope scratch(masm);
         masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
         masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
         masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
     }
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorMIPS::visitWasmStoreI64(LWasmStoreI64* lir)
 {
     emitWasmStoreI64(lir);
 }
 
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -413,59 +413,59 @@ CodeGeneratorMIPS64::visitUDivOrModI64(L
 template <typename T>
 void
 CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
 
     // Maybe add the offset.
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     bool isSigned;
 
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
       case Scalar::Int8:    isSigned = true;  break;
       case Scalar::Uint8:   isSigned = false; break;
       case Scalar::Int16:   isSigned = true;  break;
       case Scalar::Uint16:  isSigned = false; break;
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
-    if (mir->isUnaligned()) {
+    if (mir->access().isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
                                temp, static_cast<LoadStoreSize>(8 * byteSize),
                                isSigned ? SignExtend : ZeroExtend);
         return;
     }
 
     masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
                  static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorMIPS64::visitWasmLoadI64(LWasmLoadI64* lir)
 {
     emitWasmLoadI64(lir);
 }
 
@@ -478,58 +478,58 @@ CodeGeneratorMIPS64::visitWasmUnalignedL
 template <typename T>
 void
 CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
 
-    uint32_t offset = mir->offset();
+    uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
 
     // Maybe add the offset.
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    unsigned byteSize = mir->byteSize();
+    unsigned byteSize = mir->access().byteSize();
     bool isSigned;
 
-    switch (mir->accessType()) {
+    switch (mir->access().type()) {
       case Scalar::Int8:    isSigned = true;  break;
       case Scalar::Uint8:   isSigned = false; break;
       case Scalar::Int16:   isSigned = true;  break;
       case Scalar::Uint16:  isSigned = false; break;
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
+    masm.memoryBarrier(mir->access().barrierBefore());
 
-    if (mir->isUnaligned()) {
+    if (mir->access().isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
                                 temp, static_cast<LoadStoreSize>(8 * byteSize),
                                 isSigned ? SignExtend : ZeroExtend);
         return;
     }
     masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
                   static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
 
-    memoryBarrier(mir->barrierAfter());
+    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorMIPS64::visitWasmStoreI64(LWasmStoreI64* lir)
 {
     emitWasmStoreI64(lir);
 }
 
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -86,18 +86,16 @@ MacroAssemblerMIPS64Compat::convertInt64
 {
     as_dmtc1(src, dest);
     as_cvtsl(dest, dest);
 }
 
 void
 MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src, FloatRegister dest)
 {
-    MOZ_ASSERT(temp == Register::Invalid());
-
     Label positive, done;
     ma_b(src, src, &positive, NotSigned, ShortJump);
 
     MOZ_ASSERT(src!= ScratchRegister);
     MOZ_ASSERT(src!= SecondScratchReg);
 
     ma_and(ScratchRegister, src, Imm32(1));
     ma_dsrl(SecondScratchReg, src, Imm32(1));
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -7,16 +7,17 @@
 #ifndef jit_shared_Assembler_shared_h
 #define jit_shared_Assembler_shared_h
 
 #include "mozilla/PodOperations.h"
 
 #include <limits.h>
 
 #include "asmjs/WasmTypes.h"
+#include "jit/AtomicOp.h"
 #include "jit/JitAllocPolicy.h"
 #include "jit/Label.h"
 #include "jit/Registers.h"
 #include "jit/RegisterSets.h"
 #include "vm/HelperThreads.h"
 
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
     defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
@@ -691,16 +692,60 @@ struct AsmJSAbsoluteAddress
     CodeOffset patchAt;
     wasm::SymbolicAddress target;
 };
 
 } // namespace jit
 
 namespace wasm {
 
+class MemoryAccessDesc
+{
+    uint32_t offset_;
+    uint32_t align_;
+    Scalar::Type type_;
+    unsigned numSimdElems_;
+    jit::MemoryBarrierBits barrierBefore_;
+    jit::MemoryBarrierBits barrierAfter_;
+
+  public:
+    explicit MemoryAccessDesc(Scalar::Type type, uint32_t align, uint32_t offset,
+                              unsigned numSimdElems = 0,
+                              jit::MemoryBarrierBits barrierBefore = jit::MembarNobits,
+                              jit::MemoryBarrierBits barrierAfter = jit::MembarNobits)
+      : offset_(offset),
+        align_(align),
+        type_(type),
+        numSimdElems_(numSimdElems),
+        barrierBefore_(barrierBefore),
+        barrierAfter_(barrierAfter)
+    {
+        MOZ_ASSERT_IF(!Scalar::isSimdType(type), numSimdElems == 0);
+        MOZ_ASSERT(numSimdElems <= jit::ScalarTypeToLength(type));
+        MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+    }
+
+    uint32_t offset() const { return offset_; }
+    uint32_t align() const { return align_; }
+    Scalar::Type type() const { return type_; }
+    unsigned byteSize() const {
+        return Scalar::isSimdType(type())
+               ? Scalar::scalarByteSize(type()) * numSimdElems()
+               : Scalar::byteSize(type());
+    }
+    unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
+    jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
+    jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
+    bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
+    bool isSimd() const { return Scalar::isSimdType(type_); }
+    bool isUnaligned() const { return align() && align() < byteSize(); }
+
+    void clearOffset() { offset_ = 0; }
+};
+
 // Summarizes a global access for a mutable (in asm.js) or immutable value (in
 // asm.js or the MVP) that needs to get patched later.
 
 struct GlobalAccess
 {
     GlobalAccess(jit::CodeOffset patchAt, unsigned globalDataOffset)
       : patchAt(patchAt), globalDataOffset(globalDataOffset)
     {}
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -408,30 +408,27 @@ CodeGeneratorX64::visitWasmCall(LWasmCal
 
 void
 CodeGeneratorX64::visitWasmCallI64(LWasmCallI64* ins)
 {
     emitWasmCallBase(ins);
 }
 
 void
-CodeGeneratorX64::memoryBarrier(MemoryBarrierBits barrier)
-{
-    if (barrier & MembarStoreLoad)
-        masm.storeLoadFence();
-}
-
-void
-CodeGeneratorX64::wasmStore(Scalar::Type type, unsigned numSimdElems, const LAllocation* value,
+CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value,
                             Operand dstAddr)
 {
     if (value->isConstant()) {
+        MOZ_ASSERT(!access.isSimd());
+
+        masm.memoryBarrier(access.barrierBefore());
+
         const MConstant* mir = value->toConstant();
         Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
-        switch (type) {
+        switch (access.type()) {
           case Scalar::Int8:
           case Scalar::Uint8:
             masm.movb(cst, dstAddr);
             break;
           case Scalar::Int16:
           case Scalar::Uint16:
             masm.movw(cst, dstAddr);
             break;
@@ -445,41 +442,41 @@ CodeGeneratorX64::wasmStore(Scalar::Type
           case Scalar::Float32x4:
           case Scalar::Int8x16:
           case Scalar::Int16x8:
           case Scalar::Int32x4:
           case Scalar::Uint8Clamped:
           case Scalar::MaxTypedArrayViewType:
             MOZ_CRASH("unexpected array type");
         }
+
+        masm.memoryBarrier(access.barrierAfter());
     } else {
-        masm.wasmStore(type, numSimdElems, ToAnyRegister(value), dstAddr);
+        masm.wasmStore(access, ToAnyRegister(value), dstAddr);
     }
 }
 
 template <typename T>
 void
 CodeGeneratorX64::emitWasmLoad(T* ins)
 {
     const MWasmLoad* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+    uint32_t offset = mir->access().offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     Operand srcAddr = ptr->isBogus()
-                      ? Operand(HeapReg, mir->offset())
-                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
-
-    memoryBarrier(mir->barrierBefore());
+                      ? Operand(HeapReg, offset)
+                      : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
 
     if (mir->type() == MIRType::Int64)
-        masm.wasmLoadI64(mir->accessType(), srcAddr, ToOutRegister64(ins));
+        masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
     else
-        masm.wasmLoad(mir->accessType(), mir->numSimdElems(), srcAddr, ToAnyRegister(ins->output()));
-
-    memoryBarrier(mir->barrierAfter());
+        masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
 {
     emitWasmLoad(ins);
 }
 
@@ -489,27 +486,27 @@ CodeGeneratorX64::visitWasmLoadI64(LWasm
     emitWasmLoad(ins);
 }
 
 template <typename T>
 void
 CodeGeneratorX64::emitWasmStore(T* ins)
 {
     const MWasmStore* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+    uint32_t offset = mir->access().offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     const LAllocation* value = ins->getOperand(ins->ValueIndex);
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
-                      ? Operand(HeapReg, mir->offset())
-                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
+                      ? Operand(HeapReg, offset)
+                      : Operand(HeapReg, ToRegister(ptr), TimesOne, offset);
 
-    memoryBarrier(mir->barrierBefore());
-    wasmStore(mir->accessType(), mir->numSimdElems(), value, dstAddr);
-    memoryBarrier(mir->barrierAfter());
+    wasmStore(mir->access(), value, dstAddr);
 }
 
 void
 CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
 {
     emitWasmStore(ins);
 }
 
@@ -523,115 +520,115 @@ void
 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     const LDefinition* out = ins->output();
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     MOZ_ASSERT(!Scalar::isSimdType(accessType));
 
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     uint32_t before = masm.size();
-    masm.wasmLoad(accessType, 0, srcAddr, ToAnyRegister(out));
+    masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(out));
     uint32_t after = masm.size();
 
     verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
     masm.append(wasm::MemoryAccess(before));
 }
 
 void
 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     const LAllocation* value = ins->value();
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     MOZ_ASSERT(!Scalar::isSimdType(accessType));
 
     canonicalizeIfDeterministic(accessType, value);
 
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     uint32_t before = masm.size();
-    wasmStore(accessType, 0, value, dstAddr);
+    wasmStore(mir->access(), value, dstAddr);
     uint32_t after = masm.size();
 
     verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
     masm.append(wasm::MemoryAccess(before));
 }
 
 void
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                         srcAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     MOZ_ASSERT(accessType <= Scalar::Uint32);
 
     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        value,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
     MOZ_ASSERT(mir->hasUses());
 
     Register ptr = ToRegister(ins->ptr());
     const LAllocation* value = ins->value();
     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
     AnyRegister output = ToAnyRegister(ins->output());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     if (accessType == Scalar::Uint32)
         accessType = Scalar::Int32;
 
     AtomicOp op = mir->operation();
     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     if (value->isConstant()) {
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
@@ -641,24 +638,24 @@ CodeGeneratorX64::visitAsmJSAtomicBinopH
                                    output);
     }
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
     MOZ_ASSERT(!mir->hasUses());
 
     Register ptr = ToRegister(ins->ptr());
     const LAllocation* value = ins->value();
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
     else
         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
--- a/js/src/jit/x64/CodeGenerator-x64.h
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -21,19 +21,18 @@ class CodeGeneratorX64 : public CodeGene
   protected:
     Operand ToOperand64(const LInt64Allocation& a);
     ValueOperand ToValue(LInstruction* ins, size_t pos);
     ValueOperand ToOutValue(LInstruction* ins);
     ValueOperand ToTempValue(LInstruction* ins, size_t pos);
 
     void storeUnboxedValue(const LAllocation* value, MIRType valueType,
                            Operand dest, MIRType slotType);
-    void memoryBarrier(MemoryBarrierBits barrier);
 
-    void wasmStore(Scalar::Type type, unsigned numSimdElems, const LAllocation* value, Operand dstAddr);
+    void wasmStore(const wasm::MemoryAccessDesc& access, const LAllocation* value, Operand dstAddr);
     template <typename T> void emitWasmLoad(T* ins);
     template <typename T> void emitWasmStore(T* ins);
 
   public:
     CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
 
   public:
     void visitValue(LValue* value);
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -211,17 +211,17 @@ LIRGeneratorX64::visitWasmLoad(MWasmLoad
 void
 LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* value = ins->value();
     LAllocation valueAlloc;
-    switch (ins->accessType()) {
+    switch (ins->access().type()) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
       case Scalar::Uint32:
         valueAlloc = useRegisterOrConstantAtStart(value);
         break;
@@ -261,17 +261,17 @@ LIRGeneratorX64::visitAsmJSLoadHeap(MAsm
 
 void
 LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAsmJSStoreHeap* lir = nullptr;  // initialize to silence GCC warning
-    switch (ins->accessType()) {
+    switch (ins->access().type()) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
       case Scalar::Uint32:
         lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
                                            useRegisterOrConstantAtStart(ins->value()));
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -656,19 +656,21 @@ MacroAssembler::storeUnboxedValue(const 
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const BaseIndex& dest, MIRType slotType);
 
 // ========================================================================
 // wasm support
 
 void
-MacroAssembler::wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out)
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
 {
-    switch (type) {
+    memoryBarrier(access.barrierBefore());
+
+    switch (access.type()) {
       case Scalar::Int8:
         movsbl(srcAddr, out.gpr());
         break;
       case Scalar::Uint8:
         movzbl(srcAddr, out.gpr());
         break;
       case Scalar::Int16:
         movswl(srcAddr, out.gpr());
@@ -682,55 +684,60 @@ MacroAssembler::wasmLoad(Scalar::Type ty
         break;
       case Scalar::Float32:
         loadFloat32(srcAddr, out.fpu());
         break;
       case Scalar::Float64:
         loadDouble(srcAddr, out.fpu());
         break;
       case Scalar::Float32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movss zeroes out the high lanes.
           case 1: loadFloat32(srcAddr, out.fpu()); break;
           // See comment above, which also applies to movsd.
           case 2: loadDouble(srcAddr, out.fpu()); break;
           case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movd zeroes out the high lanes.
           case 1: vmovd(srcAddr, out.fpu()); break;
           // See comment above, which also applies to movq.
           case 2: vmovq(srcAddr, out.fpu()); break;
           case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int8x16:
-        MOZ_ASSERT(numSimdElems == 16, "unexpected partial load");
+        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
         loadUnalignedSimd128Int(srcAddr, out.fpu());
         break;
       case Scalar::Int16x8:
-        MOZ_ASSERT(numSimdElems == 8, "unexpected partial load");
+        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
         loadUnalignedSimd128Int(srcAddr, out.fpu());
         break;
       case Scalar::Int64:
         MOZ_CRASH("int64 loads must use load64");
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
+
+    memoryBarrier(access.barrierAfter());
 }
 
 void
-MacroAssembler::wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out)
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
 {
-    switch (type) {
+    MOZ_ASSERT(!access.isAtomic());
+    MOZ_ASSERT(!access.isSimd());
+
+    switch (access.type()) {
       case Scalar::Int8:
         movsbq(srcAddr, out.reg);
         break;
       case Scalar::Uint8:
         movzbq(srcAddr, out.reg);
         break;
       case Scalar::Int16:
         movswq(srcAddr, out.reg);
@@ -757,20 +764,21 @@ MacroAssembler::wasmLoadI64(Scalar::Type
         MOZ_CRASH("non-int64 loads should use load()");
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
 }
 
 void
-MacroAssembler::wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value,
-                          Operand dstAddr)
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
 {
-    switch (type) {
+    memoryBarrier(access.barrierBefore());
+
+    switch (access.type()) {
       case Scalar::Int8:
       case Scalar::Uint8:
         movb(value.gpr(), dstAddr);
         break;
       case Scalar::Int16:
       case Scalar::Uint16:
         movw(value.gpr(), dstAddr);
         break;
@@ -783,47 +791,49 @@ MacroAssembler::wasmStore(Scalar::Type t
         break;
       case Scalar::Float32:
         storeUncanonicalizedFloat32(value.fpu(), dstAddr);
         break;
       case Scalar::Float64:
         storeUncanonicalizedDouble(value.fpu(), dstAddr);
         break;
       case Scalar::Float32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movss zeroes out the high lanes.
           case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
           // See comment above, which also applies to movsd.
           case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
           case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movd zeroes out the high lanes.
           case 1: vmovd(value.fpu(), dstAddr); break;
           // See comment above, which also applies to movq.
           case 2: vmovq(value.fpu(), dstAddr); break;
           case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int8x16:
-        MOZ_ASSERT(numSimdElems == 16, "unexpected partial store");
+        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
         storeUnalignedSimd128Int(value.fpu(), dstAddr);
         break;
       case Scalar::Int16x8:
-        MOZ_ASSERT(numSimdElems == 8, "unexpected partial store");
+        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
         storeUnalignedSimd128Int(value.fpu(), dstAddr);
         break;
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
+
+    memoryBarrier(access.barrierAfter());
 }
 
 void
 MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
 {
     vcvttsd2sq(input, output);
 
     // Check that the result is in the uint32_t range.
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -1164,16 +1164,22 @@ MacroAssembler::storeFloat32x3(FloatRegi
     BaseIndex destZ(dest);
     destZ.offset += 2 * sizeof(int32_t);
     storeDouble(src, dest);
     ScratchSimd128Scope scratch(*this);
     vmovhlps(src, scratch, scratch);
     storeFloat32(scratch, destZ);
 }
 
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+    if (barrier & MembarStoreLoad)
+        storeLoadFence();
+}
 
 // ========================================================================
 // Truncate floating point.
 
 void
 MacroAssembler::truncateFloat32ToInt64(Address src, Address dest, Register temp)
 {
     if (Assembler::HasSSE3()) {
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -337,44 +337,35 @@ CodeGeneratorX86::visitWasmCall(LWasmCal
 }
 
 void
 CodeGeneratorX86::visitWasmCallI64(LWasmCallI64* ins)
 {
     emitWasmCall(ins);
 }
 
-void
-CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier)
-{
-    if (barrier & MembarStoreLoad)
-        masm.storeLoadFence();
-}
-
 template <typename T>
 void
 CodeGeneratorX86::emitWasmLoad(T* ins)
 {
     const MWasmLoad* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+    uint32_t offset = mir->access().offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
 
     Operand srcAddr = ptr->isBogus()
-                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
-                      : Operand(ToRegister(ptr), mir->offset());
-
-    memoryBarrier(mir->barrierBefore());
+                      ? Operand(PatchedAbsoluteAddress(offset))
+                      : Operand(ToRegister(ptr), offset);
 
     if (mir->type() == MIRType::Int64)
-        masm.wasmLoadI64(mir->accessType(), srcAddr, ToOutRegister64(ins));
+        masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
     else
-        masm.wasmLoad(mir->accessType(), mir->numSimdElems(), srcAddr, ToAnyRegister(ins->output()));
-
-    memoryBarrier(mir->barrierAfter());
+        masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins)
 {
     emitWasmLoad(ins);
 }
 
@@ -384,34 +375,32 @@ CodeGeneratorX86::visitWasmLoadI64(LWasm
     emitWasmLoad(ins);
 }
 
 template <typename T>
 void
 CodeGeneratorX86::emitWasmStore(T* ins)
 {
     const MWasmStore* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+    uint32_t offset = mir->access().offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
-                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
-                      : Operand(ToRegister(ptr), mir->offset());
+                      ? Operand(PatchedAbsoluteAddress(offset))
+                      : Operand(ToRegister(ptr), offset);
 
-    memoryBarrier(mir->barrierBefore());
-
-    if (mir->accessType() == Scalar::Int64) {
+    if (mir->access().type() == Scalar::Int64) {
         Register64 value = ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
-        masm.wasmStoreI64(value, dstAddr);
+        masm.wasmStoreI64(mir->access(), value, dstAddr);
     } else {
         AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
-        masm.wasmStore(mir->accessType(), mir->numSimdElems(), value, dstAddr);
+        masm.wasmStore(mir->access(), value, dstAddr);
     }
-
-    memoryBarrier(mir->barrierBefore());
 }
 
 void
 CodeGeneratorX86::visitWasmStore(LWasmStore* ins)
 {
     emitWasmStore(ins);
 }
 
@@ -420,17 +409,17 @@ CodeGeneratorX86::visitWasmStoreI64(LWas
 {
     emitWasmStore(ins);
 }
 
 void
 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
     const LAllocation* ptr = ins->ptr();
     AnyRegister out = ToAnyRegister(ins->output());
 
     Scalar::Type accessType = mir->accessType();
     MOZ_ASSERT(!Scalar::isSimdType(accessType));
 
     OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
@@ -440,17 +429,17 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAs
 
         masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ool->entry());
     }
 
     Operand srcAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress())
                       : Operand(ToRegister(ptr), 0);
 
-    masm.wasmLoad(accessType, 0, srcAddr, out);
+    masm.wasmLoad(mir->access(), srcAddr, out);
 
     if (ool)
         masm.bind(ool->rejoin());
 }
 
 void
 CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
 {
@@ -516,17 +505,17 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LA
     Operand dstAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress())
                       : Operand(ToRegister(ptr), 0);
 
     Label rejoin;
     if (mir->needsBoundsCheck())
         masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), &rejoin);
 
-    masm.wasmStore(accessType, 0, ToAnyRegister(value), dstAddr);
+    masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
 
     if (rejoin.used())
         masm.bind(&rejoin);
 }
 
 // Perform bounds checking on the access if necessary; if it fails,
 // jump to out-of-line code that throws.  If the bounds check passes,
 // set up the heap address in addrTemp.
@@ -540,19 +529,19 @@ CodeGeneratorX86::asmJSAtomicComputeAddr
     masm.addlWithPatch(Imm32(0), addrTemp);
     masm.append(wasm::MemoryAccess(masm.size()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     Register addrTemp = ToRegister(ins->addrTemp());
 
     asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
     Address memAddr(addrTemp, 0);
@@ -563,19 +552,19 @@ CodeGeneratorX86::visitAsmJSCompareExcha
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     Register addrTemp = ToRegister(ins->addrTemp());
 
     asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
     Address memAddr(addrTemp, 0);
     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
@@ -584,19 +573,19 @@ CodeGeneratorX86::visitAsmJSAtomicExchan
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
     Register addrTemp = ToRegister(ins->addrTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
@@ -617,20 +606,20 @@ CodeGeneratorX86::visitAsmJSAtomicBinopH
                                    ToAnyRegister(ins->output()));
     }
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->access().offset() == 0);
     MOZ_ASSERT(!mir->hasUses());
 
-    Scalar::Type accessType = mir->accessType();
+    Scalar::Type accessType = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register addrTemp = ToRegister(ins->addrTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
     Address memAddr(addrTemp, 0);
--- a/js/src/jit/x86/CodeGenerator-x86.h
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -23,18 +23,16 @@ class CodeGeneratorX86 : public CodeGene
         return this;
     }
 
   protected:
     ValueOperand ToValue(LInstruction* ins, size_t pos);
     ValueOperand ToOutValue(LInstruction* ins);
     ValueOperand ToTempValue(LInstruction* ins, size_t pos);
 
-    void memoryBarrier(MemoryBarrierBits barrier);
-
     template <typename T> void emitWasmLoad(T* ins);
     template <typename T> void emitWasmStore(T* ins);
 
   public:
     CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
 
   public:
     void visitBox(LBox* box);
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -280,17 +280,17 @@ LIRGeneratorX86::visitWasmLoad(MWasmLoad
         return;
     }
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
 
-    Scalar::Type accessType = ins->accessType();
+    Scalar::Type accessType = ins->access().type();
     if (accessType == Scalar::Int8 || accessType == Scalar::Int16 || accessType == Scalar::Int32) {
         // We use cdq to sign-extend the result and cdq demands these registers.
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)),
                                                     LAllocation(AnyRegister(eax))));
         return;
     }
 
     defineInt64(lir, ins);
@@ -300,17 +300,17 @@ void
 LIRGeneratorX86::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
 
     LAllocation valueAlloc;
-    switch (ins->accessType()) {
+    switch (ins->access().type()) {
       case Scalar::Int8: case Scalar::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
         valueAlloc = useFixed(ins->value(), eax);
         break;
       case Scalar::Int16: case Scalar::Uint16:
       case Scalar::Int32: case Scalar::Uint32:
       case Scalar::Float32: case Scalar::Float64:
       case Scalar::Float32x4:
@@ -359,17 +359,17 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAs
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
     LAllocation baseAlloc = ins->needsBoundsCheck()
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     LAsmJSStoreHeap* lir = nullptr;
-    switch (ins->accessType()) {
+    switch (ins->access().type()) {
       case Scalar::Int8: case Scalar::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
         lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax));
         break;
       case Scalar::Int16: case Scalar::Uint16:
       case Scalar::Int32: case Scalar::Uint32:
       case Scalar::Float32: case Scalar::Float64:
       case Scalar::Float32x4:
@@ -411,22 +411,22 @@ LIRGeneratorX86::visitStoreTypedArrayEle
     }
 
     add(lir, ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    bool byteArray = byteSize(ins->accessType()) == 1;
+    bool byteArray = byteSize(ins->access().type()) == 1;
 
     // Register allocation:
     //
     // The output may not be used, but eax will be clobbered regardless
     // so pin the output to eax.
     //
     // oldval must be in a register.
     //
@@ -453,31 +453,31 @@ LIRGeneratorX86::visitAsmJSAtomicExchang
 
     const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
 
     LAsmJSAtomicExchangeHeap* lir =
         new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
 
     lir->setAddrTemp(temp());
-    if (byteSize(ins->accessType()) == 1)
+    if (byteSize(ins->access().type()) == 1)
         defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
     else
         define(lir, ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
-    MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    bool byteArray = byteSize(ins->accessType()) == 1;
+    bool byteArray = byteSize(ins->access().type()) == 1;
 
     // Case 1: the result of the operation is not used.
     //
     // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
     // LOCK OR, or LOCK XOR.  These can all take an immediate.
 
     if (!ins->hasUses()) {
         LAllocation value;
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -553,19 +553,21 @@ MacroAssembler::storeUnboxedValue(const 
                                   const Address& dest, MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
                                   const BaseIndex& dest, MIRType slotType);
 
 // wasm specific methods, used in both the wasm baseline compiler and ion.
 
 void
-MacroAssembler::wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out)
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
 {
-    switch (type) {
+    memoryBarrier(access.barrierBefore());
+
+    switch (access.type()) {
       case Scalar::Int8:
         movsblWithPatch(srcAddr, out.gpr());
         break;
       case Scalar::Uint8:
         movzblWithPatch(srcAddr, out.gpr());
         break;
       case Scalar::Int16:
         movswlWithPatch(srcAddr, out.gpr());
@@ -579,55 +581,60 @@ MacroAssembler::wasmLoad(Scalar::Type ty
         break;
       case Scalar::Float32:
         vmovssWithPatch(srcAddr, out.fpu());
         break;
       case Scalar::Float64:
         vmovsdWithPatch(srcAddr, out.fpu());
         break;
       case Scalar::Float32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movss zeroes out the high lanes.
           case 1: vmovssWithPatch(srcAddr, out.fpu()); break;
           // See comment above, which also applies to movsd.
           case 2: vmovsdWithPatch(srcAddr, out.fpu()); break;
           case 4: vmovupsWithPatch(srcAddr, out.fpu()); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movd zeroes out the high lanes.
           case 1: vmovdWithPatch(srcAddr, out.fpu()); break;
           // See comment above, which also applies to movq.
           case 2: vmovqWithPatch(srcAddr, out.fpu()); break;
           case 4: vmovdquWithPatch(srcAddr, out.fpu()); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int8x16:
-        MOZ_ASSERT(numSimdElems == 16, "unexpected partial load");
+        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
         vmovdquWithPatch(srcAddr, out.fpu());
         break;
       case Scalar::Int16x8:
-        MOZ_ASSERT(numSimdElems == 8, "unexpected partial load");
+        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
         vmovdquWithPatch(srcAddr, out.fpu());
         break;
       case Scalar::Int64:
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected type");
     }
     append(wasm::MemoryAccess(size()));
+
+    memoryBarrier(access.barrierAfter());
 }
 
 void
-MacroAssembler::wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out)
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
 {
-    switch (type) {
+    MOZ_ASSERT(!access.isAtomic());
+    MOZ_ASSERT(!access.isSimd());
+
+    switch (access.type()) {
       case Scalar::Int8:
         MOZ_ASSERT(out == Register64(edx, eax));
         movsblWithPatch(srcAddr, out.low);
         append(wasm::MemoryAccess(size()));
         cdq();
         break;
       case Scalar::Uint8:
         movzblWithPatch(srcAddr, out.low);
@@ -694,20 +701,21 @@ MacroAssembler::wasmLoadI64(Scalar::Type
         MOZ_CRASH("non-int64 loads should use load()");
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
 }
 
 void
-MacroAssembler::wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value,
-                          Operand dstAddr)
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
 {
-    switch (type) {
+    memoryBarrier(access.barrierBefore());
+
+    switch (access.type()) {
       case Scalar::Int8:
       case Scalar::Uint8Clamped:
       case Scalar::Uint8:
         movbWithPatch(value.gpr(), dstAddr);
         break;
       case Scalar::Int16:
       case Scalar::Uint16:
         movwWithPatch(value.gpr(), dstAddr);
@@ -718,54 +726,59 @@ MacroAssembler::wasmStore(Scalar::Type t
         break;
       case Scalar::Float32:
         vmovssWithPatch(value.fpu(), dstAddr);
         break;
       case Scalar::Float64:
         vmovsdWithPatch(value.fpu(), dstAddr);
         break;
       case Scalar::Float32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movss zeroes out the high lanes.
           case 1: vmovssWithPatch(value.fpu(), dstAddr); break;
           // See comment above, which also applies to movsd.
           case 2: vmovsdWithPatch(value.fpu(), dstAddr); break;
           case 4: vmovupsWithPatch(value.fpu(), dstAddr); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int32x4:
-        switch (numSimdElems) {
+        switch (access.numSimdElems()) {
           // In memory-to-register mode, movd zeroes out the high lanes.
           case 1: vmovdWithPatch(value.fpu(), dstAddr); break;
           // See comment above, which also applies to movsd.
           case 2: vmovqWithPatch(value.fpu(), dstAddr); break;
           case 4: vmovdquWithPatch(value.fpu(), dstAddr); break;
           default: MOZ_CRASH("unexpected size for partial load");
         }
         break;
       case Scalar::Int8x16:
-        MOZ_ASSERT(numSimdElems == 16, "unexpected partial store");
+        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
         vmovdquWithPatch(value.fpu(), dstAddr);
         break;
       case Scalar::Int16x8:
-        MOZ_ASSERT(numSimdElems == 8, "unexpected partial store");
+        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
         vmovdquWithPatch(value.fpu(), dstAddr);
         break;
       case Scalar::Int64:
         MOZ_CRASH("Should be handled in storeI64.");
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected type");
     }
     append(wasm::MemoryAccess(size()));
+
+    memoryBarrier(access.barrierAfter());
 }
 
 void
-MacroAssembler::wasmStoreI64(Register64 value, Operand dstAddr)
+MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr)
 {
+    MOZ_ASSERT(!access.isAtomic());
+    MOZ_ASSERT(!access.isSimd());
+
     if (dstAddr.kind() == Operand::MEM_ADDRESS32) {
         Operand low(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64LOW_OFFSET));
         Operand high(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64HIGH_OFFSET));
 
         movlWithPatch(value.low, low);
         append(wasm::MemoryAccess(size()));
         movlWithPatch(value.high, high);
         append(wasm::MemoryAccess(size()));