Bug 1268024: Rename MAsmJSHeapAccess to MWasmMemoryAccess; r=luke
authorBenjamin Bouvier <benj@benj.me>
Fri, 17 Jun 2016 17:19:42 +0200
changeset 303424 38c84afa8cde50514366e23d4405457c276361da
parent 303423 25d979f7c932e272ff80dfc5dd303674bcd6f21a
child 303425 d1abee3e755dcc38ce5b5a72f702b950f333853d
push id30388
push usercbook@mozilla.com
push dateSat, 02 Jul 2016 09:15:23 +0000
treeherdermozilla-central@39dffbba7642 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1268024
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1268024: Rename MAsmJSHeapAccess to MWasmMemoryAccess; r=luke MozReview-Commit-ID: 1N1UlhhkFSu
js/src/asmjs/WasmBaselineCompile.cpp
js/src/asmjs/WasmIonCompile.cpp
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/EffectiveAddressAnalysis.h
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.h
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -2686,17 +2686,17 @@ class BaseCompiler
             masm.storeLoadFence();
 #else
         MOZ_CRASH("BaseCompiler platform hook: memoryBarrier");
 #endif
     }
 
     // Cloned from MIRGraph.cpp, merge somehow?
 
-    bool needsBoundsCheckBranch(const MAsmJSHeapAccess& access) const {
+    bool needsBoundsCheckBranch(const MWasmMemoryAccess& access) const {
         // A heap access needs a bounds-check branch if we're not relying on signal
         // handlers to catch errors, and if it's not proven to be within bounds.
         // We use signal-handlers on x64, but on x86 there isn't enough address
         // space for a guard region.  Also, on x64 the atomic loads and stores
         // can't (yet) use the signal handlers.
 
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
         if (mg_.args.useSignalHandlersForOOB && !access.isAtomicAccess())
@@ -2714,17 +2714,17 @@ class BaseCompiler
 #ifdef DEBUG
         // TODO / MISSING: this needs to be adapted from what's in the
         // platform's CodeGenerator; that code takes an LAllocation as
         // the last arg now.
 #endif
     }
 #endif
 
-    void loadHeap(const MAsmJSHeapAccess& access, RegI32 ptr, AnyReg dest) {
+    void loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
 #if defined(JS_CODEGEN_X64)
         // CodeGeneratorX64::visitAsmJSLoadHeap()
 
         if (needsBoundsCheckBranch(access))
             MOZ_CRASH("BaseCompiler platform hook: bounds checking");
 
         Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset());
 
@@ -2745,17 +2745,17 @@ class BaseCompiler
 
         masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
         verifyHeapAccessDisassembly(before, after, IsLoad(true), access.accessType(), 0, srcAddr, dest);
 #else
         MOZ_CRASH("BaseCompiler platform hook: loadHeap");
 #endif
     }
 
-    void storeHeap(const MAsmJSHeapAccess& access, RegI32 ptr, AnyReg src) {
+    void storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
 #if defined(JS_CODEGEN_X64)
         // CodeGeneratorX64::visitAsmJSStoreHeap()
 
         if (needsBoundsCheckBranch(access))
             MOZ_CRASH("BaseCompiler platform hook: bounds checking");
 
         Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset());
 
@@ -5021,17 +5021,17 @@ BaseCompiler::emitLoad(ValType type, Sca
 {
     LinearMemoryAddress<Nothing> addr;
     if (!iter_.readLoad(type, Scalar::byteSize(viewType), &addr))
         return false;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setOffset(addr.offset);
     access.setAlign(addr.align);
 
     switch (type) {
       case ValType::I32: {
         RegI32 rp = popI32();
         loadHeap(access, rp, AnyReg(rp));
         pushI32(rp);
@@ -5069,17 +5069,17 @@ BaseCompiler::emitStore(ValType resultTy
     LinearMemoryAddress<Nothing> addr;
     Nothing unused_value;
     if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
         return false;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setOffset(addr.offset);
     access.setAlign(addr.align);
 
     switch (resultType) {
       case ValType::I32: {
         RegI32 rp, rv;
         pop2xI32(&rp, &rv);
         storeHeap(access, rp, AnyReg(rv));
@@ -5344,17 +5344,17 @@ BaseCompiler::emitStoreWithCoercion(ValT
     LinearMemoryAddress<Nothing> addr;
     Nothing unused_value;
     if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
         return false;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setOffset(addr.offset);
     access.setAlign(addr.align);
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64) {
         RegF32 rv = popF32();
         RegF64 rw = needF64();
         masm.convertFloat32ToDouble(rv.reg, rw.reg);
         RegI32 rp = popI32();
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -584,107 +584,107 @@ class FunctionCompiler
 
     void assign(unsigned slot, MDefinition* def)
     {
         if (inDeadCode())
             return;
         curBlock_->setSlot(info().localSlot(slot), def);
     }
 
-    MDefinition* loadHeap(MDefinition* base, const MAsmJSHeapAccess& access)
+    MDefinition* loadHeap(MDefinition* base, const MWasmMemoryAccess& access)
     {
         if (inDeadCode())
             return nullptr;
 
         MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap");
         MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    MDefinition* loadSimdHeap(MDefinition* base, const MAsmJSHeapAccess& access)
+    MDefinition* loadSimdHeap(MDefinition* base, const MWasmMemoryAccess& access)
     {
         if (inDeadCode())
             return nullptr;
 
         MOZ_ASSERT(Scalar::isSimdType(access.accessType()),
                    "loadSimdHeap can only load from a SIMD view");
         MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    void storeHeap(MDefinition* base, const MAsmJSHeapAccess& access, MDefinition* v)
+    void storeHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
     {
         if (inDeadCode())
             return;
 
         MOZ_ASSERT(!Scalar::isSimdType(access.accessType()),
                    "SIMD stores should use storeSimdHeap");
         MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
         curBlock_->add(store);
     }
 
-    void storeSimdHeap(MDefinition* base, const MAsmJSHeapAccess& access, MDefinition* v)
+    void storeSimdHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
     {
         if (inDeadCode())
             return;
 
         MOZ_ASSERT(Scalar::isSimdType(access.accessType()),
                    "storeSimdHeap can only load from a SIMD view");
         MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
         curBlock_->add(store);
     }
 
-    MDefinition* atomicLoadHeap(MDefinition* base, const MAsmJSHeapAccess& access)
+    MDefinition* atomicLoadHeap(MDefinition* base, const MWasmMemoryAccess& access)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    void atomicStoreHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+    void atomicStoreHeap(MDefinition* base, const MWasmMemoryAccess& access,
                          MDefinition* v)
     {
         if (inDeadCode())
             return;
 
         MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
         curBlock_->add(store);
     }
 
-    MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+    MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
                                            MDefinition* oldv, MDefinition* newv)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSCompareExchangeHeap* cas =
             MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv);
         curBlock_->add(cas);
         return cas;
     }
 
-    MDefinition* atomicExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+    MDefinition* atomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
                                     MDefinition* value)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSAtomicExchangeHeap* cas =
             MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value);
         curBlock_->add(cas);
         return cas;
     }
 
     MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
-                                 MDefinition* base, const MAsmJSHeapAccess& access,
+                                 MDefinition* base, const MWasmMemoryAccess& access,
                                  MDefinition* v)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSAtomicBinopHeap* binop =
             MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v);
         curBlock_->add(binop);
@@ -2075,18 +2075,18 @@ EmitSelect(FunctionCompiler& f)
 }
 
 enum class IsAtomic {
     No = false,
     Yes = true
 };
 
 static bool
-SetHeapAccessOffset(FunctionCompiler& f, uint32_t offset, MAsmJSHeapAccess* access, MDefinition** base,
-                    IsAtomic atomic = IsAtomic::No)
+SetHeapAccessOffset(FunctionCompiler& f, uint32_t offset, MWasmMemoryAccess* access,
+                    MDefinition** base, IsAtomic atomic = IsAtomic::No)
 {
     // TODO Remove this after implementing non-wraparound offset semantics.
     uint32_t endOffset = offset + access->byteSize();
     if (endOffset < offset)
         return false;
 
     // Assume worst case.
     if (endOffset > f.mirGen().foldableOffsetRange(/* bounds check */ true, bool(atomic))) {
@@ -2102,17 +2102,17 @@ SetHeapAccessOffset(FunctionCompiler& f,
 
 static bool
 EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr))
         return false;
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.iter().setResult(f.loadHeap(base, access));
     return true;
@@ -2121,17 +2121,17 @@ EmitLoad(FunctionCompiler& f, ValType ty
 static bool
 EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.storeHeap(base, access, value);
     return true;
@@ -2147,17 +2147,17 @@ EmitStoreWithCoercion(FunctionCompiler& 
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64)
         value = f.unary<MToDouble>(value);
     else if (resultType == ValType::F64 && viewType == Scalar::Float32)
         value = f.unary<MToFloat32>(value);
     else
         MOZ_CRASH("unexpected coerced store");
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.storeHeap(base, access, value);
     return true;
@@ -2224,17 +2224,17 @@ EmitBinaryMathBuiltinCall(FunctionCompil
 static bool
 EmitAtomicsLoad(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     if (!f.iter().readAtomicLoad(&addr, &viewType))
         return false;
 
-    MAsmJSHeapAccess access(viewType, 0, MembarBeforeLoad, MembarAfterLoad);
+    MWasmMemoryAccess access(viewType, 0, MembarBeforeLoad, MembarAfterLoad);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicLoadHeap(base, access));
     return true;
@@ -2244,17 +2244,17 @@ static bool
 EmitAtomicsStore(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicStore(&addr, &viewType, &value))
         return false;
 
-    MAsmJSHeapAccess access(viewType, 0, MembarBeforeStore, MembarAfterStore);
+    MWasmMemoryAccess access(viewType, 0, MembarBeforeStore, MembarAfterStore);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.atomicStoreHeap(base, access, value);
     f.iter().setResult(value);
@@ -2266,17 +2266,17 @@ EmitAtomicsBinOp(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     jit::AtomicOp op;
     MDefinition* value;
     if (!f.iter().readAtomicBinOp(&addr, &viewType, &op, &value))
         return false;
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicBinopHeap(op, base, access, value));
     return true;
@@ -2287,17 +2287,17 @@ EmitAtomicsCompareExchange(FunctionCompi
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* oldValue;
     MDefinition* newValue;
     if (!f.iter().readAtomicCompareExchange(&addr, &viewType, &oldValue, &newValue))
         return false;
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicCompareExchangeHeap(base, access, oldValue, newValue));
     return true;
@@ -2307,17 +2307,17 @@ static bool
 EmitAtomicsExchange(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicExchange(&addr, &viewType, &value))
         return false;
 
-    MAsmJSHeapAccess access(viewType);
+    MWasmMemoryAccess access(viewType);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicExchangeHeap(base, access, value));
     return true;
@@ -2535,17 +2535,17 @@ EmitSimdLoad(FunctionCompiler& f, ValTyp
 
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
         return false;
 
-    MAsmJSHeapAccess access(viewType, numElems);
+    MWasmMemoryAccess access(viewType, numElems);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.iter().setResult(f.loadSimdHeap(base, access));
     return true;
@@ -2560,17 +2560,17 @@ EmitSimdStore(FunctionCompiler& f, ValTy
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MAsmJSHeapAccess access(viewType, numElems);
+    MWasmMemoryAccess access(viewType, numElems);
     access.setAlign(addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.storeSimdHeap(base, access, value);
     return true;
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -95,19 +95,19 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* l
     if (base->isRecoveredOnBailout())
         return;
 
     MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
     last->replaceAllUsesWith(eaddr);
     last->block()->insertAfter(last, eaddr);
 }
 
-template<typename MAsmJSHeapAccessType>
+template<typename MWasmMemoryAccessType>
 bool
-EffectiveAddressAnalysis::tryAddDisplacement(MAsmJSHeapAccessType* ins, int32_t o)
+EffectiveAddressAnalysis::tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o)
 {
     // Compute the new offset. Check for overflow.
     uint32_t oldOffset = ins->offset();
     uint32_t newOffset = oldOffset + o;
     if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset))
         return false;
 
     // Compute the new offset to the end of the access. Check for overflow
@@ -122,19 +122,19 @@ EffectiveAddressAnalysis::tryAddDisplace
     if (size_t(newEnd) > range)
         return false;
 
     // Everything checks out. This is the new offset.
     ins->setOffset(newOffset);
     return true;
 }
 
-template<typename MAsmJSHeapAccessType>
+template<typename MWasmMemoryAccessType>
 void
-EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins)
+EffectiveAddressAnalysis::analyzeAsmHeapAccess(MWasmMemoryAccessType* ins)
 {
     MDefinition* base = ins->base();
 
     if (base->isConstant()) {
         // Look for heap[i] where i is a constant offset, and fold the offset.
         // By doing the folding now, we simplify the task of codegen; the offset
         // is always the address mode immediate. This also allows it to avoid
         // a situation where the sum of a constant pointer value and a non-zero
--- a/js/src/jit/EffectiveAddressAnalysis.h
+++ b/js/src/jit/EffectiveAddressAnalysis.h
@@ -14,21 +14,21 @@ namespace jit {
 
 class MIRGraph;
 
 class EffectiveAddressAnalysis
 {
     MIRGenerator* mir_;
     MIRGraph& graph_;
 
-    template<typename MAsmJSHeapAccessType>
-    MOZ_MUST_USE bool tryAddDisplacement(MAsmJSHeapAccessType* ins, int32_t o);
+    template<typename MWasmMemoryAccessType>
+    MOZ_MUST_USE bool tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o);
 
-    template<typename MAsmJSHeapAccessType>
-    void analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins);
+    template<typename MWasmMemoryAccessType>
+    void analyzeAsmHeapAccess(MWasmMemoryAccessType* ins);
 
   public:
     EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph)
       : mir_(mir), graph_(graph)
     {}
 
     MOZ_MUST_USE bool analyze();
 };
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -12907,30 +12907,30 @@ class MAsmJSNeg
 
   public:
     INSTRUCTION_HEADER(AsmJSNeg)
     static MAsmJSNeg* NewAsmJS(TempAllocator& alloc, MDefinition* op, MIRType type) {
         return new(alloc) MAsmJSNeg(op, type);
     }
 };
 
-class MAsmJSHeapAccess
+class MWasmMemoryAccess
 {
     uint32_t offset_;
     uint32_t align_;
     Scalar::Type accessType_ : 8;
     bool needsBoundsCheck_;
     unsigned numSimdElems_;
     MemoryBarrierBits barrierBefore_;
     MemoryBarrierBits barrierAfter_;
 
   public:
-    explicit MAsmJSHeapAccess(Scalar::Type accessType, unsigned numSimdElems = 0,
-                              MemoryBarrierBits barrierBefore = MembarNobits,
-                              MemoryBarrierBits barrierAfter = MembarNobits)
+    explicit MWasmMemoryAccess(Scalar::Type accessType, unsigned numSimdElems = 0,
+                               MemoryBarrierBits barrierBefore = MembarNobits,
+                               MemoryBarrierBits barrierAfter = MembarNobits)
       : offset_(0),
         align_(Scalar::byteSize(accessType)),
         accessType_(accessType),
         needsBoundsCheck_(true),
         numSimdElems_(numSimdElems),
         barrierBefore_(barrierBefore),
         barrierAfter_(barrierAfter)
     {
@@ -12953,22 +12953,22 @@ class MAsmJSHeapAccess
     void setAlign(uint32_t a) { MOZ_ASSERT(mozilla::IsPowerOfTwo(a)); align_ = a; }
     MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
     MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
     bool isAtomicAccess() const { return (barrierBefore_|barrierAfter_) != MembarNobits; }
 };
 
 class MAsmJSLoadHeap
   : public MUnaryInstruction,
-    public MAsmJSHeapAccess,
+    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSLoadHeap(MDefinition* base, const MAsmJSHeapAccess& access)
+    MAsmJSLoadHeap(MDefinition* base, const MWasmMemoryAccess& access)
       : MUnaryInstruction(base),
-        MAsmJSHeapAccess(access)
+        MWasmMemoryAccess(access)
     {
         if (access.barrierBefore()|access.barrierAfter())
             setGuard();         // Not removable
         else
             setMovable();
 
         MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped,
                    "unexpected load heap in asm.js");
@@ -12990,23 +12990,23 @@ class MAsmJSLoadHeap
             return AliasSet::Store(AliasSet::AsmJSHeap);
         return AliasSet::Load(AliasSet::AsmJSHeap);
     }
     AliasType mightAlias(const MDefinition* def) const override;
 };
 
 class MAsmJSStoreHeap
   : public MBinaryInstruction,
-    public MAsmJSHeapAccess,
+    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSStoreHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+    MAsmJSStoreHeap(MDefinition* base, const MWasmMemoryAccess& access,
                     MDefinition* v)
       : MBinaryInstruction(base, v),
-        MAsmJSHeapAccess(access)
+        MWasmMemoryAccess(access)
     {
         if (access.barrierBefore()|access.barrierAfter())
             setGuard();         // Not removable
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap)
     TRIVIAL_NEW_WRAPPERS
@@ -13017,23 +13017,23 @@ class MAsmJSStoreHeap
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSCompareExchangeHeap
   : public MTernaryInstruction,
-    public MAsmJSHeapAccess,
+    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSCompareExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+    MAsmJSCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
                               MDefinition* oldv, MDefinition* newv)
         : MTernaryInstruction(base, oldv, newv),
-          MAsmJSHeapAccess(access)
+          MWasmMemoryAccess(access)
     {
         setGuard();             // Not removable
         setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSCompareExchangeHeap)
     TRIVIAL_NEW_WRAPPERS
@@ -13044,23 +13044,23 @@ class MAsmJSCompareExchangeHeap
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicExchangeHeap
   : public MBinaryInstruction,
-    public MAsmJSHeapAccess,
+    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSAtomicExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+    MAsmJSAtomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
                              MDefinition* value)
         : MBinaryInstruction(base, value),
-          MAsmJSHeapAccess(access)
+          MWasmMemoryAccess(access)
     {
         setGuard();             // Not removable
         setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicExchangeHeap)
     TRIVIAL_NEW_WRAPPERS
@@ -13070,25 +13070,25 @@ class MAsmJSAtomicExchangeHeap
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicBinopHeap
   : public MBinaryInstruction,
-    public MAsmJSHeapAccess,
+    public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
     AtomicOp op_;
 
-    MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const MAsmJSHeapAccess& access,
+    MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const MWasmMemoryAccess& access,
                           MDefinition* v)
         : MBinaryInstruction(base, v),
-          MAsmJSHeapAccess(access),
+          MWasmMemoryAccess(access),
           op_(op)
     {
         setGuard();         // Not removable
         setResultType(MIRType::Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicBinopHeap)
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -217,18 +217,18 @@ class MIRGenerator
 
   public:
     AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; }
 #endif
 
   public:
     const JitCompileOptions options;
 
-    bool needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const;
-    size_t foldableOffsetRange(const MAsmJSHeapAccess* access) const;
+    bool needsAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access) const;
+    size_t foldableOffsetRange(const MWasmMemoryAccess* access) const;
     size_t foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const;
 
   private:
     GraphSpewer gs_;
 
   public:
     GraphSpewer& graphSpewer() {
         return gs_;
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -104,32 +104,32 @@ MIRGenerator::addAbortedPreliminaryGroup
             return;
     }
     AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!abortedPreliminaryGroups_.append(group))
         oomUnsafe.crash("addAbortedPreliminaryGroup");
 }
 
 bool
-MIRGenerator::needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const
+MIRGenerator::needsAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access) const
 {
     // A heap access needs a bounds-check branch if we're not relying on signal
     // handlers to catch errors, and if it's not proven to be within bounds.
     // We use signal-handlers on x64, but on x86 there isn't enough address
     // space for a guard region.  Also, on x64 the atomic loads and stores
     // can't (yet) use the signal handlers.
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
     if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
         return false;
 #endif
     return access->needsBoundsCheck();
 }
 
 size_t
-MIRGenerator::foldableOffsetRange(const MAsmJSHeapAccess* access) const
+MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
 {
     return foldableOffsetRange(access->needsBoundsCheck(), access->isAtomicAccess());
 }
 
 size_t
 MIRGenerator::foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const
 {
     // This determines whether it's ok to fold up to WasmImmediateRange
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -920,17 +920,17 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
     }
 
     memoryBarrier(mir->barrierAfter());
 
     masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
 }
 
 static void
-MaybeAddAtomicsBoundsCheck(MacroAssemblerX64& masm, MAsmJSHeapAccess* mir, Register ptr)
+MaybeAddAtomicsBoundsCheck(MacroAssemblerX64& masm, MWasmMemoryAccess* mir, Register ptr)
 {
     if (!mir->needsBoundsCheck())
         return;
 
     // Note that we can't use the same machinery as normal asm.js loads/stores
     // since signal-handler bounds checking is not yet implemented for atomic
     // accesses.
     uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(-mir->endOffset())).offset();
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -423,17 +423,17 @@ CodeGeneratorX86Shared::visitOffsetBound
     // after the access to restore asm.js invariants.
     masm.movslq(oolCheck->ptrReg(), oolCheck->ptrReg());
 #endif
 
     masm.jmp(oolCheck->rejoin());
 }
 
 void
-CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
+CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access,
                                                    const MInstruction* mir,
                                                    Register ptr, Label* maybeFail)
 {
     // Emit a bounds-checking branch for |access|.
 
     MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
 
     Label* pass = nullptr;
@@ -462,17 +462,17 @@ CodeGeneratorX86Shared::emitAsmJSBoundsC
 
     if (pass)
         masm.bind(pass);
 
     masm.append(wasm::BoundsCheck(cmpOffset));
 }
 
 bool
-CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess* access,
+CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* access,
                                                           const MInstruction* mir,
                                                           const LAllocation* ptr)
 {
     if (!gen->needsAsmJSBoundsCheckBranch(access))
         return false;
 
     emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
     return true;
@@ -513,17 +513,17 @@ CodeGeneratorX86Shared::maybeEmitAsmJSSt
     if (!mir->isAtomicAccess())
         *rejoin = alloc().lifoAlloc()->newInfallible<Label>();
 
     emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
     return true;
 }
 
 void
-CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
+CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access,
                                                            Register ptr)
 {
     // Clean up after performing a heap access checked by a branch.
 
     MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
 
 #ifdef JS_CODEGEN_X64
     // If the offset is 0, we don't use an OffsetBoundsCheck.
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -90,35 +90,35 @@ class CodeGeneratorX86Shared : public Co
 
         void accept(CodeGeneratorX86Shared* codegen) {
             codegen->visitOutOfLineSimdFloatToIntCheck(this);
         }
     };
 
   private:
     void
-    emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* mir, const MInstruction* ins,
+    emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, const MInstruction* ins,
                                Register ptr, Label* fail);
 
   public:
     // For SIMD and atomic loads and stores (which throw on out-of-bounds):
     bool
-    maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess* mir, const MInstruction* ins,
+    maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* mir, const MInstruction* ins,
                                       const LAllocation* ptr);
 
     // For asm.js plain and atomic loads that possibly require a bounds check:
     bool
     maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
                                   OutOfLineLoadTypedArrayOutOfBounds** ool);
 
     // For asm.js plain and atomic stores that possibly require a bounds check:
     bool
     maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins, Label** rejoin);
 
-    void cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* mir, Register ptr);
+    void cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr);
 
     NonAssertingLabel deoptLabel_;
 
     Operand ToOperand(const LAllocation& a);
     Operand ToOperand(const LAllocation* a);
     Operand ToOperand(const LDefinition* def);
 
     MoveOperand toMoveOperand(LAllocation a) const;