Bug 1253115 - BaldrMonkey: Refactor AsmJS load/store infrastructure. r=luke
authorDan Gohman <sunfish@mozilla.com>
Fri, 04 Mar 2016 07:57:29 -0800
changeset 323128 590ae9fe87592f60b50597c68dd8a0d338d90925
parent 323127 ac2f7ef7566d6b99e880b0fc436439c976ce7bad
child 323129 c7f58f4320c518f1c00b2d3cfc2ef111bbf9f527
push id5913
push userjlund@mozilla.com
push dateMon, 25 Apr 2016 16:57:49 +0000
treeherdermozilla-beta@dcaf0a6fa115 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1253115
milestone47.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1253115 - BaldrMonkey: Refactor AsmJS load/store infrastructure. r=luke
js/src/asmjs/AsmJS.cpp
js/src/asmjs/WasmIonCompile.cpp
js/src/jit/AlignmentMaskAnalysis.cpp
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/mips-shared/Lowering-mips-shared.cpp
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x86/Lowering-x86.cpp
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -3601,34 +3601,37 @@ IsLiteralOrConstInt(FunctionValidator& f
     NumLit lit;
     if (!IsLiteralOrConst(f, pn, &lit))
         return false;
 
     return IsLiteralInt(lit, u32);
 }
 
 static const int32_t NoMask = -1;
+static const bool YesSimd = true;
+static const bool NoSimd = false;
 
 static bool
 CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
-                 Scalar::Type* viewType, int32_t* mask)
+                 bool isSimd, Scalar::Type* viewType, int32_t* mask)
 {
     if (!viewName->isKind(PNK_NAME))
         return f.fail(viewName, "base of array access must be a typed array view name");
 
     const ModuleValidator::Global* global = f.lookupGlobal(viewName->name());
     if (!global || !global->isAnyArrayView())
         return f.fail(viewName, "base of array access must be a typed array view name");
 
     *viewType = global->viewType();
 
     uint32_t index;
     if (IsLiteralOrConstInt(f, indexExpr, &index)) {
         uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
-        if (!f.m().tryConstantAccess(byteOffset, TypedArrayElemSize(*viewType)))
+        uint64_t width = isSimd ? Simd128DataSize : TypedArrayElemSize(*viewType);
+        if (!f.m().tryConstantAccess(byteOffset, width))
             return f.fail(indexExpr, "constant index out of range");
 
         *mask = NoMask;
         return f.writeInt32Lit(byteOffset);
     }
 
     // Mask off the low bits to account for the clearing effect of a right shift
     // followed by the left shift implicit in the array access. E.g., H32[i>>2]
@@ -3650,85 +3653,85 @@ CheckArrayAccess(FunctionValidator& f, P
 
         Type pointerType;
         if (!CheckExpr(f, pointerNode, &pointerType))
             return false;
 
         if (!pointerType.isIntish())
             return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
     } else {
-        // For legacy compatibility, accept Int8/Uint8 accesses with no shift.
+        // For SIMD access, and legacy scalar access compatibility, accept
+        // Int8/Uint8 accesses with no shift.
         if (TypedArrayShift(*viewType) != 0)
             return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access");
 
         MOZ_ASSERT(*mask == NoMask);
-        bool folded = false;
 
         ParseNode* pointerNode = indexExpr;
 
         Type pointerType;
         if (!CheckExpr(f, pointerNode, &pointerType))
             return false;
 
-        if (folded) {
+        if (isSimd) {
             if (!pointerType.isIntish())
                 return f.failf(pointerNode, "%s is not a subtype of intish", pointerType.toChars());
         } else {
             if (!pointerType.isInt())
                 return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
         }
     }
 
     return true;
 }
 
 static bool
 CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
-                           Scalar::Type* viewType, int32_t* mask)
+                           bool isSimd, Scalar::Type* viewType)
 {
     // asm.js doesn't have constant offsets, so just encode a 0.
     if (!f.encoder().writeVarU32(0))
         return false;
 
     size_t alignAt;
     if (!f.encoder().writePatchableVarU8(&alignAt))
         return false;
 
     size_t prepareAt;
     if (!f.encoder().writePatchableExpr(&prepareAt))
         return false;
 
-    if (!CheckArrayAccess(f, viewName, indexExpr, viewType, mask))
+    int32_t mask;
+    if (!CheckArrayAccess(f, viewName, indexExpr, isSimd, viewType, &mask))
         return false;
 
     // asm.js only has naturally-aligned accesses.
     f.encoder().patchVarU8(alignAt, TypedArrayElemSize(*viewType));
 
     // Don't generate the mask op if there is no need for it which could happen for
     // a shift of zero or a SIMD access.
-    if (*mask != NoMask) {
+    if (mask != NoMask) {
         f.encoder().patchExpr(prepareAt, Expr::I32And);
-        return f.writeInt32Lit(*mask);
+        return f.writeInt32Lit(mask);
     }
 
     f.encoder().patchExpr(prepareAt, Expr::Id);
     return true;
 }
 
 static bool
 CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
 {
     Scalar::Type viewType;
-    int32_t mask;
 
     size_t opcodeAt;
     if (!f.encoder().writePatchableExpr(&opcodeAt))
         return false;
 
-    if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &mask))
+    if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), NoSimd, &viewType))
         return false;
 
     switch (viewType) {
       case Scalar::Int8:    f.encoder().patchExpr(opcodeAt, Expr::I32LoadMem8S);  break;
       case Scalar::Uint8:   f.encoder().patchExpr(opcodeAt, Expr::I32LoadMem8U);  break;
       case Scalar::Int16:   f.encoder().patchExpr(opcodeAt, Expr::I32LoadMem16S); break;
       case Scalar::Uint16:  f.encoder().patchExpr(opcodeAt, Expr::I32LoadMem16U); break;
       case Scalar::Uint32:
@@ -3762,18 +3765,17 @@ CheckLoadArray(FunctionValidator& f, Par
 static bool
 CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
 {
     size_t opcodeAt;
     if (!f.encoder().writePatchableExpr(&opcodeAt))
         return false;
 
     Scalar::Type viewType;
-    int32_t mask;
-    if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &mask))
+    if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), NoSimd, &viewType))
         return false;
 
     Type rhsType;
     if (!CheckExpr(f, rhs, &rhsType))
         return false;
 
     switch (viewType) {
       case Scalar::Int8:
@@ -4058,19 +4060,19 @@ CheckMathMinMax(FunctionValidator& f, Pa
             return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars());
     }
 
     return true;
 }
 
 static bool
 CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
-                             Scalar::Type* viewType, int32_t* mask)
-{
-    if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, viewType, mask))
+                             Scalar::Type* viewType)
+{
+    if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, NoSimd, viewType))
         return false;
 
     // The global will be sane, CheckArrayAccess checks it.
     const ModuleValidator::Global* global = f.lookupGlobal(viewName->name());
     if (global->which() != ModuleValidator::Global::ArrayView)
         return f.fail(viewName, "base of array access must be a typed array view");
 
     MOZ_ASSERT(f.m().atomicsPresent());
@@ -4116,18 +4118,17 @@ CheckAtomicsLoad(FunctionValidator& f, P
     ParseNode* arrayArg = CallArgList(call);
     ParseNode* indexArg = NextNode(arrayArg);
 
     size_t viewTypeAt;
     if (!WriteAtomicOperator(f, Expr::I32AtomicsLoad, &viewTypeAt))
         return false;
 
     Scalar::Type viewType;
-    int32_t mask;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
         return false;
 
     f.encoder().patchU8(viewTypeAt, uint8_t(viewType));
 
     *type = Type::Int;
     return true;
 }
 
@@ -4141,18 +4142,17 @@ CheckAtomicsStore(FunctionValidator& f, 
     ParseNode* indexArg = NextNode(arrayArg);
     ParseNode* valueArg = NextNode(indexArg);
 
     size_t viewTypeAt;
     if (!WriteAtomicOperator(f, Expr::I32AtomicsStore, &viewTypeAt))
         return false;
 
     Scalar::Type viewType;
-    int32_t mask;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
         return false;
 
     Type rhsType;
     if (!CheckExpr(f, valueArg, &rhsType))
         return false;
 
     if (!rhsType.isIntish())
         return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
@@ -4175,18 +4175,17 @@ CheckAtomicsBinop(FunctionValidator& f, 
 
     size_t viewTypeAt;
     if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, &viewTypeAt))
         return false;
     if (!f.encoder().writeU8(uint8_t(op)))
         return false;
 
     Scalar::Type viewType;
-    int32_t mask;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
         return false;
 
     Type valueArgType;
     if (!CheckExpr(f, valueArg, &valueArgType))
         return false;
 
     if (!valueArgType.isIntish())
         return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
@@ -4224,18 +4223,17 @@ CheckAtomicsCompareExchange(FunctionVali
     ParseNode* oldValueArg = NextNode(indexArg);
     ParseNode* newValueArg = NextNode(oldValueArg);
 
     size_t viewTypeAt;
     if (!WriteAtomicOperator(f, Expr::I32AtomicsCompareExchange, &viewTypeAt))
         return false;
 
     Scalar::Type viewType;
-    int32_t mask;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
         return false;
 
     Type oldValueArgType;
     if (!CheckExpr(f, oldValueArg, &oldValueArgType))
         return false;
 
     Type newValueArgType;
     if (!CheckExpr(f, newValueArg, &newValueArgType))
@@ -4263,18 +4261,17 @@ CheckAtomicsExchange(FunctionValidator& 
     ParseNode* indexArg = NextNode(arrayArg);
     ParseNode* valueArg = NextNode(indexArg);
 
     size_t viewTypeAt;
     if (!WriteAtomicOperator(f, Expr::I32AtomicsExchange, &viewTypeAt))
         return false;
 
     Scalar::Type viewType;
-    int32_t mask;
-    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &mask))
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
         return false;
 
     Type valueArgType;
     if (!CheckExpr(f, valueArg, &valueArgType))
         return false;
 
     if (!valueArgType.isIntish())
         return f.failf(arrayArg, "%s is not a subtype of intish", valueArgType.toChars());
@@ -5112,38 +5109,24 @@ CheckSimdShuffle(FunctionValidator& f, P
 
 static bool
 CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call)
 {
     ParseNode* view = CallArgList(call);
     if (!view->isKind(PNK_NAME))
         return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
 
-    const ModuleValidator::Global* global = f.lookupGlobal(view->name());
-    if (!global ||
-        global->which() != ModuleValidator::Global::ArrayView ||
-        global->viewType() != Scalar::Uint8)
-    {
+    ParseNode* indexExpr = NextNode(view);
+
+    Scalar::Type viewType;
+    if (!CheckAndPrepareArrayAccess(f, view, indexExpr, YesSimd, &viewType))
+        return false;
+
+    if (viewType != Scalar::Uint8)
         return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
-    }
-
-    ParseNode* indexExpr = NextNode(view);
-    uint32_t indexLit;
-    if (IsLiteralOrConstInt(f, indexExpr, &indexLit)) {
-        if (!f.m().tryConstantAccess(indexLit, Simd128DataSize))
-            return f.fail(indexExpr, "constant index out of range");
-        return f.writeInt32Lit(indexLit);
-    }
-
-    Type indexType;
-    if (!CheckExpr(f, indexExpr, &indexType))
-        return false;
-
-    if (!indexType.isIntish())
-        return f.failf(indexExpr, "%s is not a subtype of intish", indexType.toChars());
 
     return true;
 }
 
 static bool
 CheckSimdLoad(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
               Type* type)
 {
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -482,120 +482,123 @@ class FunctionCompiler
 
     void assign(unsigned slot, MDefinition* def)
     {
         if (inDeadCode())
             return;
         curBlock_->setSlot(info().localSlot(slot), def);
     }
 
-    MDefinition* loadHeap(Scalar::Type accessType, MDefinition* ptr)
+    MDefinition* loadHeap(MDefinition* base,
+                          const MAsmJSHeapAccess& access)
     {
         if (inDeadCode())
             return nullptr;
 
-        MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD loads should use loadSimdHeap");
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr);
+        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap");
+        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    MDefinition* loadSimdHeap(Scalar::Type accessType, MDefinition* ptr, unsigned numElems)
+    MDefinition* loadSimdHeap(MDefinition* base, const MAsmJSHeapAccess& access)
     {
         if (inDeadCode())
             return nullptr;
 
-        MOZ_ASSERT(Scalar::isSimdType(accessType), "loadSimdHeap can only load from a SIMD view");
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, numElems);
+        MOZ_ASSERT(Scalar::isSimdType(access.accessType()),
+                   "loadSimdHeap can only load from a SIMD view");
+        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    void storeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v)
+    void storeHeap(MDefinition* base, const MAsmJSHeapAccess& access, MDefinition* v)
     {
         if (inDeadCode())
             return;
 
-        MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD stores should use storeSimdHeap");
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v);
+        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()),
+                   "SIMD stores should use storeSimdHeap");
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
         curBlock_->add(store);
     }
 
-    void storeSimdHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
-                       unsigned numElems)
+    void storeSimdHeap(MDefinition* base, const MAsmJSHeapAccess& access, MDefinition* v)
     {
         if (inDeadCode())
             return;
 
-        MOZ_ASSERT(Scalar::isSimdType(accessType), "storeSimdHeap can only load from a SIMD view");
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, numElems);
+        MOZ_ASSERT(Scalar::isSimdType(access.accessType()),
+                   "storeSimdHeap can only load from a SIMD view");
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
         curBlock_->add(store);
     }
 
     void memoryBarrier(MemoryBarrierBits type)
     {
         if (inDeadCode())
             return;
         MMemoryBarrier* ins = MMemoryBarrier::New(alloc(), type);
         curBlock_->add(ins);
     }
 
-    MDefinition* atomicLoadHeap(Scalar::Type accessType, MDefinition* ptr)
+    MDefinition* atomicLoadHeap(MDefinition* base, const MAsmJSHeapAccess& access)
     {
         if (inDeadCode())
             return nullptr;
 
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, /* numElems */ 0,
-                                                   MembarBeforeLoad, MembarAfterLoad);
+        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    void atomicStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v)
+    void atomicStoreHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+                         MDefinition* v)
     {
         if (inDeadCode())
             return;
 
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v,
-                                                      /* numElems = */ 0,
-                                                      MembarBeforeStore, MembarAfterStore);
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
         curBlock_->add(store);
     }
 
-    MDefinition* atomicCompareExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* oldv,
-                                           MDefinition* newv)
+    MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+                                           MDefinition* oldv, MDefinition* newv)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSCompareExchangeHeap* cas =
-            MAsmJSCompareExchangeHeap::New(alloc(), accessType, ptr, oldv, newv);
+            MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv);
         curBlock_->add(cas);
         return cas;
     }
 
-    MDefinition* atomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value)
+    MDefinition* atomicExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+                                    MDefinition* value)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSAtomicExchangeHeap* cas =
-            MAsmJSAtomicExchangeHeap::New(alloc(), accessType, ptr, value);
+            MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value);
         curBlock_->add(cas);
         return cas;
     }
 
-    MDefinition* atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type accessType, MDefinition* ptr,
+    MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
+                                 MDefinition* base, const MAsmJSHeapAccess& access,
                                  MDefinition* v)
     {
         if (inDeadCode())
             return nullptr;
 
         MAsmJSAtomicBinopHeap* binop =
-            MAsmJSAtomicBinopHeap::New(alloc(), op, accessType, ptr, v);
+            MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v);
         curBlock_->add(binop);
         return binop;
     }
 
     MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type)
     {
         if (inDeadCode())
             return nullptr;
@@ -1354,58 +1357,64 @@ EmitLoadGlobal(FunctionCompiler& f, Expr
     *def = f.loadGlobalVar(global.globalDataOffset, global.isConst, ToMIRType(global.type));
     MOZ_ASSERT_IF(type != ExprType::Void, global.type == type);
     return true;
 }
 
 static bool EmitExpr(FunctionCompiler&, ExprType, MDefinition**);
 
 static bool
-EmitLoadStoreAddress(FunctionCompiler& f, Scalar::Type viewType, uint32_t* offset,
-                     uint32_t* align, MDefinition** base)
+EmitHeapAddress(FunctionCompiler& f, MDefinition** base, MAsmJSHeapAccess* access)
 {
-    *offset = f.readVarU32();
-    MOZ_ASSERT(*offset == 0, "Non-zero offsets not supported yet");
-
-    *align = f.readVarU32();
+    uint32_t offset = f.readVarU32();
+    MOZ_ASSERT(offset == 0, "Non-zero offsets not supported yet");
+    access->setOffset(offset);
+
+    uint32_t align = f.readVarU32();
+    access->setAlign(align);
 
     if (!EmitExpr(f, ExprType::I32, base))
         return false;
 
     // TODO Remove this (and the viewType param) after implementing unaligned
     // loads/stores.
-    if (f.mg().isAsmJS())
+    if (f.mg().isAsmJS()) {
+        MOZ_ASSERT(offset == 0 && "asm.js validation does not produce load/store offsets");
         return true;
-
-    int32_t maskVal = ~(Scalar::byteSize(viewType) - 1);
+    }
+
+    int32_t maskVal = ~(Scalar::byteSize(access->accessType()) - 1);
     if (maskVal == -1)
         return true;
 
+    offset &= maskVal;
+    access->setOffset(offset);
+
     MDefinition* mask = f.constant(Int32Value(maskVal), MIRType_Int32);
     *base = f.bitwise<MBitAnd>(*base, mask, MIRType_Int32);
     return true;
 }
 
 static bool
 EmitLoad(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def)
 {
-    uint32_t offset, align;
-    MDefinition* ptr;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &ptr))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
-    *def = f.loadHeap(viewType, ptr);
+    *def = f.loadHeap(base, access);
     return true;
 }
 
 static bool
 EmitStore(FunctionCompiler& f, Scalar::Type viewType, MDefinition** def)
 {
-    uint32_t offset, align;
-    MDefinition* ptr;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &ptr))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* rhs = nullptr;
     switch (viewType) {
       case Scalar::Int8:
       case Scalar::Int16:
       case Scalar::Int32:
         if (!EmitExpr(f, ExprType::I32, &rhs))
@@ -1417,45 +1426,45 @@ EmitStore(FunctionCompiler& f, Scalar::T
         break;
       case Scalar::Float64:
         if (!EmitExpr(f, ExprType::F64, &rhs))
             return false;
         break;
       default: MOZ_CRASH("unexpected scalar type");
     }
 
-    f.storeHeap(viewType, ptr, rhs);
+    f.storeHeap(base, access, rhs);
     *def = rhs;
     return true;
 }
 
 static bool
 EmitStoreWithCoercion(FunctionCompiler& f, Scalar::Type rhsType, Scalar::Type viewType,
                       MDefinition **def)
 {
-    uint32_t offset, align;
-    MDefinition* ptr;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &ptr))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* rhs = nullptr;
     MDefinition* coerced = nullptr;
     if (rhsType == Scalar::Float32 && viewType == Scalar::Float64) {
         if (!EmitExpr(f, ExprType::F32, &rhs))
             return false;
         coerced = f.unary<MToDouble>(rhs);
     } else if (rhsType == Scalar::Float64 && viewType == Scalar::Float32) {
         if (!EmitExpr(f, ExprType::F64, &rhs))
             return false;
         coerced = f.unary<MToFloat32>(rhs);
     } else {
         MOZ_CRASH("unexpected coerced store");
     }
 
-    f.storeHeap(viewType, ptr, coerced);
+    f.storeHeap(base, access, coerced);
     *def = rhs;
     return true;
 }
 
 static bool
 EmitSetLocal(FunctionCompiler& f, ExprType expected, MDefinition** def)
 {
     uint32_t slot = f.readVarU32();
@@ -1499,95 +1508,95 @@ EmitMathMinMax(FunctionCompiler& f, Expr
     return true;
 }
 
 static bool
 EmitAtomicsLoad(FunctionCompiler& f, MDefinition** def)
 {
     Scalar::Type viewType = Scalar::Type(f.readU8());
 
-    uint32_t offset, align;
-    MDefinition* index;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType, 0, MembarBeforeLoad, MembarAfterLoad);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
-    *def = f.atomicLoadHeap(viewType, index);
+    *def = f.atomicLoadHeap(base, access);
     return true;
 }
 
 static bool
 EmitAtomicsStore(FunctionCompiler& f, MDefinition** def)
 {
     Scalar::Type viewType = Scalar::Type(f.readU8());
 
-    uint32_t offset, align;
-    MDefinition* index;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType, 0, MembarBeforeStore, MembarAfterStore);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* value;
     if (!EmitExpr(f, ExprType::I32, &value))
         return false;
-    f.atomicStoreHeap(viewType, index, value);
+    f.atomicStoreHeap(base, access, value);
     *def = value;
     return true;
 }
 
 static bool
 EmitAtomicsBinOp(FunctionCompiler& f, MDefinition** def)
 {
     Scalar::Type viewType = Scalar::Type(f.readU8());
     js::jit::AtomicOp op = js::jit::AtomicOp(f.readU8());
 
-    uint32_t offset, align;
-    MDefinition* index;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* value;
     if (!EmitExpr(f, ExprType::I32, &value))
         return false;
-    *def = f.atomicBinopHeap(op, viewType, index, value);
+    *def = f.atomicBinopHeap(op, base, access, value);
     return true;
 }
 
 static bool
 EmitAtomicsCompareExchange(FunctionCompiler& f, MDefinition** def)
 {
     Scalar::Type viewType = Scalar::Type(f.readU8());
 
-    uint32_t offset, align;
-    MDefinition* index;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* oldValue;
     if (!EmitExpr(f, ExprType::I32, &oldValue))
         return false;
     MDefinition* newValue;
     if (!EmitExpr(f, ExprType::I32, &newValue))
         return false;
-    *def = f.atomicCompareExchangeHeap(viewType, index, oldValue, newValue);
+    *def = f.atomicCompareExchangeHeap(base, access, oldValue, newValue);
     return true;
 }
 
 static bool
 EmitAtomicsExchange(FunctionCompiler& f, MDefinition** def)
 {
     Scalar::Type viewType = Scalar::Type(f.readU8());
 
-    uint32_t offset, align;
-    MDefinition* index;
-    if (!EmitLoadStoreAddress(f, viewType, &offset, &align, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* value;
     if (!EmitExpr(f, ExprType::I32, &value))
         return false;
-    *def = f.atomicExchangeHeap(viewType, index, value);
+    *def = f.atomicExchangeHeap(base, access, value);
     return true;
 }
 
 static bool
 EmitCallArgs(FunctionCompiler& f, const Sig& sig, FunctionCompiler::Call* call)
 {
     f.startCallArgs(call);
     for (ValType argType : sig.args()) {
@@ -1946,42 +1955,44 @@ static bool
 EmitSimdLoad(FunctionCompiler& f, ExprType type, unsigned numElems, MDefinition** def)
 {
     unsigned defaultNumElems;
     Scalar::Type viewType = SimdExprTypeToViewType(type, &defaultNumElems);
 
     if (!numElems)
         numElems = defaultNumElems;
 
-    MDefinition* index;
-    if (!EmitExpr(f, ExprType::I32, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType, numElems);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
-    *def = f.loadSimdHeap(viewType, index, numElems);
+    *def = f.loadSimdHeap(base, access);
     return true;
 }
 
 static bool
 EmitSimdStore(FunctionCompiler& f, ExprType type, unsigned numElems, MDefinition** def)
 {
     unsigned defaultNumElems;
     Scalar::Type viewType = SimdExprTypeToViewType(type, &defaultNumElems);
 
     if (!numElems)
         numElems = defaultNumElems;
 
-    MDefinition* index;
-    if (!EmitExpr(f, ExprType::I32, &index))
+    MDefinition* base;
+    MAsmJSHeapAccess access(viewType, numElems);
+    if (!EmitHeapAddress(f, &base, &access))
         return false;
 
     MDefinition* vec;
     if (!EmitExpr(f, type, &vec))
         return false;
 
-    f.storeSimdHeap(viewType, index, vec, numElems);
+    f.storeSimdHeap(base, access, vec);
     *def = vec;
     return true;
 }
 
 static bool
 EmitSimdSelect(FunctionCompiler& f, ExprType type, MDefinition** def)
 {
     MDefinition* mask;
--- a/js/src/jit/AlignmentMaskAnalysis.cpp
+++ b/js/src/jit/AlignmentMaskAnalysis.cpp
@@ -77,15 +77,15 @@ bool
 AlignmentMaskAnalysis::analyze()
 {
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
         for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
             // Note that we don't check for MAsmJSCompareExchangeHeap
             // or MAsmJSAtomicBinopHeap, because the backend and the OOB
             // mechanism don't support non-zero offsets for them yet.
             if (i->isAsmJSLoadHeap())
-                AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->ptr(), graph_);
+                AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
             else if (i->isAsmJSStoreHeap())
-                AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->ptr(), graph_);
+                AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
         }
     }
     return true;
 }
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -126,50 +126,50 @@ EffectiveAddressAnalysis::tryAddDisplace
     ins->setOffset(newOffset);
     return true;
 }
 
 template<typename MAsmJSHeapAccessType>
 void
 EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins)
 {
-    MDefinition* ptr = ins->ptr();
+    MDefinition* base = ins->base();
 
-    if (ptr->isConstant()) {
+    if (base->isConstant()) {
         // Look for heap[i] where i is a constant offset, and fold the offset.
         // By doing the folding now, we simplify the task of codegen; the offset
         // is always the address mode immediate. This also allows it to avoid
         // a situation where the sum of a constant pointer value and a non-zero
         // offset doesn't actually fit into the address mode immediate.
-        int32_t imm = ptr->toConstant()->toInt32();
+        int32_t imm = base->toConstant()->toInt32();
         if (imm != 0 && tryAddDisplacement(ins, imm)) {
             MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0));
             ins->block()->insertBefore(ins, zero);
-            ins->replacePtr(zero);
+            ins->replaceBase(zero);
         }
 
         // If the index is within the minimum heap length, we can optimize
         // away the bounds check.
         if (imm >= 0) {
             int32_t end = (uint32_t)imm + ins->byteSize();
             if (end >= imm && (uint32_t)end <= mir_->minAsmJSHeapLength())
                  ins->removeBoundsCheck();
         }
-    } else if (ptr->isAdd()) {
+    } else if (base->isAdd()) {
         // Look for heap[a+i] where i is a constant offset, and fold the offset.
         // Alignment masks have already been moved out of the way by the
         // Alignment Mask Analysis pass.
-        MDefinition* op0 = ptr->toAdd()->getOperand(0);
-        MDefinition* op1 = ptr->toAdd()->getOperand(1);
+        MDefinition* op0 = base->toAdd()->getOperand(0);
+        MDefinition* op1 = base->toAdd()->getOperand(1);
         if (op0->isConstant())
             mozilla::Swap(op0, op1);
         if (op1->isConstant()) {
             int32_t imm = op1->toConstant()->toInt32();
             if (tryAddDisplacement(ins, imm))
-                ins->replacePtr(op0);
+                ins->replaceBase(op0);
         }
     }
 }
 
 // This analysis converts patterns of the form:
 //   truncate(x + (y << {0,1,2,3}))
 //   truncate(x + (y << {0,1,2,3}) + imm32)
 // into a single lea instruction, and patterns of the form:
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -4547,20 +4547,20 @@ MLoadFixedSlot::foldsTo(TempAllocator& a
 
 bool
 MAsmJSLoadHeap::mightAlias(const MDefinition* def) const
 {
     if (def->isAsmJSStoreHeap()) {
         const MAsmJSStoreHeap* store = def->toAsmJSStoreHeap();
         if (store->accessType() != accessType())
             return true;
-        if (!ptr()->isConstant() || !store->ptr()->isConstant())
+        if (!base()->isConstant() || !store->base()->isConstant())
             return true;
-        const MConstant* otherPtr = store->ptr()->toConstant();
-        return ptr()->toConstant()->equals(otherPtr);
+        const MConstant* otherBase = store->base()->toConstant();
+        return base()->toConstant()->equals(otherBase);
     }
     return true;
 }
 
 bool
 MAsmJSLoadHeap::congruentTo(const MDefinition* ins) const
 {
     if (!ins->isAsmJSLoadHeap())
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13878,72 +13878,77 @@ class MAsmJSNeg
     static MAsmJSNeg* NewAsmJS(TempAllocator& alloc, MDefinition* op, MIRType type) {
         return new(alloc) MAsmJSNeg(op, type);
     }
 };
 
 class MAsmJSHeapAccess
 {
     uint32_t offset_;
+    uint32_t align_;
     Scalar::Type accessType_ : 8;
     bool needsBoundsCheck_;
     unsigned numSimdElems_;
     MemoryBarrierBits barrierBefore_;
     MemoryBarrierBits barrierAfter_;
 
   public:
     explicit MAsmJSHeapAccess(Scalar::Type accessType, unsigned numSimdElems = 0,
                               MemoryBarrierBits barrierBefore = MembarNobits,
                               MemoryBarrierBits barrierAfter = MembarNobits)
       : offset_(0),
+        align_(Scalar::byteSize(accessType)),
         accessType_(accessType),
         needsBoundsCheck_(true),
         numSimdElems_(numSimdElems),
         barrierBefore_(barrierBefore),
         barrierAfter_(barrierAfter)
     {
         MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
     }
 
     uint32_t offset() const { return offset_; }
     uint32_t endOffset() const { return offset() + byteSize(); }
+    uint32_t align() const { return align_; }
     Scalar::Type accessType() const { return accessType_; }
     unsigned byteSize() const {
         return Scalar::isSimdType(accessType())
                ? Scalar::scalarByteSize(accessType()) * numSimdElems()
                : TypedArrayElemSize(accessType());
     }
     bool needsBoundsCheck() const { return needsBoundsCheck_; }
     void removeBoundsCheck() { needsBoundsCheck_ = false; }
     unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; }
     void setOffset(uint32_t o) {
-        MOZ_ASSERT(o >= 0);
         offset_ = o;
     }
+    void setAlign(uint32_t a) {
+        MOZ_ASSERT(mozilla::IsPowerOfTwo(a));
+        align_ = a;
+    }
     MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
     MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
     bool isAtomicAccess() const { return (barrierBefore_|barrierAfter_) != MembarNobits; }
 };
 
 class MAsmJSLoadHeap
   : public MUnaryInstruction,
     public MAsmJSHeapAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSLoadHeap(Scalar::Type accessType, MDefinition* ptr, unsigned numSimdElems,
-                   MemoryBarrierBits before, MemoryBarrierBits after)
-      : MUnaryInstruction(ptr),
-        MAsmJSHeapAccess(accessType, numSimdElems, before, after)
-    {
-        if (before|after)
+    MAsmJSLoadHeap(MDefinition* base, const MAsmJSHeapAccess& access)
+      : MUnaryInstruction(base),
+        MAsmJSHeapAccess(access)
+    {
+        if (access.barrierBefore()|access.barrierAfter())
             setGuard();         // Not removable
         else
             setMovable();
 
-        switch (accessType) {
+        switch (access.accessType()) {
           case Scalar::Int8:
           case Scalar::Uint8:
           case Scalar::Int16:
           case Scalar::Uint16:
           case Scalar::Int32:
           case Scalar::Uint32:
             setResultType(MIRType_Int32);
             break;
@@ -13963,27 +13968,24 @@ class MAsmJSLoadHeap
           case Scalar::MaxTypedArrayViewType:
             MOZ_CRASH("unexpected load heap in asm.js");
         }
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap)
 
-    static MAsmJSLoadHeap* New(TempAllocator& alloc, Scalar::Type accessType,
-                               MDefinition* ptr, unsigned numSimdElems = 0,
-                               MemoryBarrierBits barrierBefore = MembarNobits,
-                               MemoryBarrierBits barrierAfter = MembarNobits)
-    {
-        return new(alloc) MAsmJSLoadHeap(accessType, ptr, numSimdElems,
-                                         barrierBefore, barrierAfter);
-    }
-
-    MDefinition* ptr() const { return getOperand(0); }
-    void replacePtr(MDefinition* newPtr) { replaceOperand(0, newPtr); }
+    static MAsmJSLoadHeap* New(TempAllocator& alloc, MDefinition* base,
+                               const MAsmJSHeapAccess& access)
+    {
+        return new(alloc) MAsmJSLoadHeap(base, access);
+    }
+
+    MDefinition* base() const { return getOperand(0); }
+    void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
 
     bool congruentTo(const MDefinition* ins) const override;
     AliasSet getAliasSet() const override {
         // When a barrier is needed make the instruction effectful by
         // giving it a "store" effect.
         if (isAtomicAccess())
             return AliasSet::Store(AliasSet::AsmJSHeap);
         return AliasSet::Load(AliasSet::AsmJSHeap);
@@ -13991,136 +13993,138 @@ class MAsmJSLoadHeap
     bool mightAlias(const MDefinition* def) const override;
 };
 
 class MAsmJSStoreHeap
   : public MBinaryInstruction,
     public MAsmJSHeapAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSStoreHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* v,
-                    unsigned numSimdElems, MemoryBarrierBits before, MemoryBarrierBits after)
-      : MBinaryInstruction(ptr, v),
-        MAsmJSHeapAccess(accessType, numSimdElems, before, after)
-    {
-        if (before|after)
+    MAsmJSStoreHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+                    MDefinition* v)
+      : MBinaryInstruction(base, v),
+        MAsmJSHeapAccess(access)
+    {
+        if (access.barrierBefore()|access.barrierAfter())
             setGuard();         // Not removable
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap)
 
-    static MAsmJSStoreHeap* New(TempAllocator& alloc, Scalar::Type accessType,
-                                MDefinition* ptr, MDefinition* v, unsigned numSimdElems = 0,
-                                MemoryBarrierBits barrierBefore = MembarNobits,
-                                MemoryBarrierBits barrierAfter = MembarNobits)
-    {
-        return new(alloc) MAsmJSStoreHeap(accessType, ptr, v, numSimdElems,
-                                          barrierBefore, barrierAfter);
-    }
-
-    MDefinition* ptr() const { return getOperand(0); }
-    void replacePtr(MDefinition* newPtr) { replaceOperand(0, newPtr); }
+    static MAsmJSStoreHeap* New(TempAllocator& alloc,
+                                MDefinition* base, const MAsmJSHeapAccess& access,
+                                MDefinition* v)
+    {
+        return new(alloc) MAsmJSStoreHeap(base, access, v);
+    }
+
+    MDefinition* base() const { return getOperand(0); }
+    void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
     MDefinition* value() const { return getOperand(1); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSCompareExchangeHeap
   : public MTernaryInstruction,
     public MAsmJSHeapAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSCompareExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* oldv,
-                              MDefinition* newv)
-        : MTernaryInstruction(ptr, oldv, newv),
-          MAsmJSHeapAccess(accessType)
+    MAsmJSCompareExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+                              MDefinition* oldv, MDefinition* newv)
+        : MTernaryInstruction(base, oldv, newv),
+          MAsmJSHeapAccess(access)
     {
         setGuard();             // Not removable
         setResultType(MIRType_Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSCompareExchangeHeap)
 
-    static MAsmJSCompareExchangeHeap* New(TempAllocator& alloc, Scalar::Type accessType,
-                                          MDefinition* ptr, MDefinition* oldv,
-                                          MDefinition* newv)
-    {
-        return new(alloc) MAsmJSCompareExchangeHeap(accessType, ptr, oldv, newv);
-    }
-
-    MDefinition* ptr() const { return getOperand(0); }
+    static MAsmJSCompareExchangeHeap* New(TempAllocator& alloc,
+                                          MDefinition* base, const MAsmJSHeapAccess& access,
+                                          MDefinition* oldv, MDefinition* newv)
+    {
+        return new(alloc) MAsmJSCompareExchangeHeap(base, access, oldv, newv);
+    }
+
+    MDefinition* base() const { return getOperand(0); }
     MDefinition* oldValue() const { return getOperand(1); }
     MDefinition* newValue() const { return getOperand(2); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicExchangeHeap
   : public MBinaryInstruction,
     public MAsmJSHeapAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSAtomicExchangeHeap(Scalar::Type accessType, MDefinition* ptr, MDefinition* value)
-        : MBinaryInstruction(ptr, value),
-          MAsmJSHeapAccess(accessType)
+    MAsmJSAtomicExchangeHeap(MDefinition* base, const MAsmJSHeapAccess& access,
+                             MDefinition* value)
+        : MBinaryInstruction(base, value),
+          MAsmJSHeapAccess(access)
     {
         setGuard();             // Not removable
         setResultType(MIRType_Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicExchangeHeap)
 
-    static MAsmJSAtomicExchangeHeap* New(TempAllocator& alloc, Scalar::Type accessType,
-                                         MDefinition* ptr, MDefinition* value)
-    {
-        return new(alloc) MAsmJSAtomicExchangeHeap(accessType, ptr, value);
-    }
-
-    MDefinition* ptr() const { return getOperand(0); }
+    static MAsmJSAtomicExchangeHeap* New(TempAllocator& alloc,
+                                         MDefinition* base, const MAsmJSHeapAccess& access,
+                                         MDefinition* value)
+    {
+        return new(alloc) MAsmJSAtomicExchangeHeap(base, access, value);
+    }
+
+    MDefinition* base() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicBinopHeap
   : public MBinaryInstruction,
     public MAsmJSHeapAccess,
     public NoTypePolicy::Data
 {
     AtomicOp op_;
 
-    MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type accessType, MDefinition* ptr, MDefinition* v)
-        : MBinaryInstruction(ptr, v),
-          MAsmJSHeapAccess(accessType),
+    MAsmJSAtomicBinopHeap(AtomicOp op, MDefinition* base, const MAsmJSHeapAccess& access,
+                          MDefinition* v)
+        : MBinaryInstruction(base, v),
+          MAsmJSHeapAccess(access),
           op_(op)
     {
         setGuard();         // Not removable
         setResultType(MIRType_Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicBinopHeap)
 
-    static MAsmJSAtomicBinopHeap* New(TempAllocator& alloc, AtomicOp op, Scalar::Type accessType,
-                                      MDefinition* ptr, MDefinition* v)
-    {
-        return new(alloc) MAsmJSAtomicBinopHeap(op, accessType, ptr, v);
+    static MAsmJSAtomicBinopHeap* New(TempAllocator& alloc, AtomicOp op,
+                                      MDefinition* base, const MAsmJSHeapAccess& access,
+                                      MDefinition* v)
+    {
+        return new(alloc) MAsmJSAtomicBinopHeap(op, base, access, v);
     }
 
     AtomicOp operation() const { return op_; }
-    MDefinition* ptr() const { return getOperand(0); }
+    MDefinition* base() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSLoadGlobalVar : public MNullaryInstruction
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -224,16 +224,17 @@ class MIRGenerator
     AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; }
 #endif
 
   public:
     const JitCompileOptions options;
 
     bool needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const;
     size_t foldableOffsetRange(const MAsmJSHeapAccess* access) const;
+    size_t foldableOffsetRange(bool accessNeedsBoundsCheck) const;
 
   private:
     GraphSpewer gs_;
 
   public:
     GraphSpewer& graphSpewer() {
         return gs_;
     }
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -121,16 +121,22 @@ MIRGenerator::needsAsmJSBoundsCheckBranc
         return false;
 #endif
     return access->needsBoundsCheck();
 }
 
 size_t
 MIRGenerator::foldableOffsetRange(const MAsmJSHeapAccess* access) const
 {
+    return foldableOffsetRange(access->needsBoundsCheck());
+}
+
+size_t
+MIRGenerator::foldableOffsetRange(bool accessNeedsBoundsCheck) const
+{
     // This determines whether it's ok to fold up to WasmImmediateSize
     // offsets, instead of just WasmCheckedImmediateSize.
 
     static_assert(WasmCheckedImmediateRange <= WasmImmediateRange,
                   "WasmImmediateRange should be the size of an unconstrained "
                   "address immediate");
 
 #ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
@@ -143,17 +149,17 @@ MIRGenerator::foldableOffsetRange(const 
     // Signal-handling can be dynamically disabled by OS bugs or flags.
     if (usesSignalHandlersForAsmJSOOB_)
         return WasmImmediateRange;
 #endif
 
     // On 32-bit platforms, if we've proven the access is in bounds after
     // 32-bit wrapping, we can fold full offsets because they're added with
     // 32-bit arithmetic.
-    if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck())
+    if (sizeof(intptr_t) == sizeof(int32_t) && !accessNeedsBoundsCheck)
         return WasmImmediateRange;
 
     // Otherwise, only allow the checked size. This is always less than the
     // minimum heap length, and allows explicit bounds checks to fold in the
     // offset without overflow.
     return WasmCheckedImmediateRange;
 }
 
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -481,47 +481,51 @@ LIRGeneratorARM::visitAsmJSUnsignedToFlo
     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
-    LAllocation ptrAlloc;
+    MOZ_ASSERT(ins->offset() == 0);
+
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
+    LAllocation baseAlloc;
 
-    // For the ARM it is best to keep the 'ptr' in a register if a bounds check is needed.
-    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
+    // For the ARM it is best to keep the 'base' in a register if a bounds check is needed.
+    if (base->isConstant() && !ins->needsBoundsCheck()) {
         // A bounds check is only skipped for a positive index.
-        MOZ_ASSERT(ptr->toConstant()->toInt32() >= 0);
-        ptrAlloc = LAllocation(ptr->toConstant());
+        MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+        baseAlloc = LAllocation(base->toConstant());
     } else {
-        ptrAlloc = useRegisterAtStart(ptr);
+        baseAlloc = useRegisterAtStart(base);
     }
 
-    define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
+    define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
-    LAllocation ptrAlloc;
+    MOZ_ASSERT(ins->offset() == 0);
+
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
+    LAllocation baseAlloc;
 
-    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
-        MOZ_ASSERT(ptr->toConstant()->toInt32() >= 0);
-        ptrAlloc = LAllocation(ptr->toConstant());
+    if (base->isConstant() && !ins->needsBoundsCheck()) {
+        MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+        baseAlloc = LAllocation(base->toConstant());
     } else {
-        ptrAlloc = useRegisterAtStart(ptr);
+        baseAlloc = useRegisterAtStart(base);
     }
 
-    add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
+    add(new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())), ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins)
 {
     define(new(alloc()) LAsmJSLoadFuncPtr(useRegister(ins->index()), temp()), ins);
 }
 
@@ -673,82 +677,85 @@ LIRGeneratorARM::visitCompareExchangeTyp
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->offset() == 0);
 
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     if (byteSize(ins->accessType()) != 4 && !HasLDSTREXBHD()) {
         LAsmJSCompareExchangeCallout* lir =
-            new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(ptr),
+            new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(base),
                                                       useRegisterAtStart(ins->oldValue()),
                                                       useRegisterAtStart(ins->newValue()));
         defineReturn(lir, ins);
         return;
     }
 
     LAsmJSCompareExchangeHeap* lir =
-        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr),
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
                                                useRegister(ins->oldValue()),
                                                useRegister(ins->newValue()));
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
+    MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->offset() == 0);
 
-    const LAllocation ptr = useRegisterAtStart(ins->ptr());
+    const LAllocation base = useRegisterAtStart(ins->base());
     const LAllocation value = useRegisterAtStart(ins->value());
 
     if (byteSize(ins->accessType()) < 4 && !HasLDSTREXBHD()) {
         // Call out on ARMv6.
-        defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(ptr, value), ins);
+        defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value), ins);
         return;
     }
 
-    define(new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value), ins);
+    define(new(alloc()) LAsmJSAtomicExchangeHeap(base, value), ins);
 }
 
 void
 LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->offset() == 0);
 
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     if (byteSize(ins->accessType()) != 4 && !HasLDSTREXBHD()) {
         LAsmJSAtomicBinopCallout* lir =
-            new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(ptr),
+            new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(base),
                                                   useRegisterAtStart(ins->value()));
         defineReturn(lir, ins);
         return;
     }
 
     if (!ins->hasUses()) {
         LAsmJSAtomicBinopHeapForEffect* lir =
-            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(ptr),
+            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
                                                         useRegister(ins->value()),
                                                         /* flagTemp= */ temp());
         add(lir, ins);
         return;
     }
 
     LAsmJSAtomicBinopHeap* lir =
-        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr),
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
                                            useRegister(ins->value()),
                                            /* temp = */ LDefinition::BogusTemp(),
                                            /* flagTemp= */ temp());
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitSubstr(MSubstr* ins)
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -317,46 +317,50 @@ LIRGeneratorMIPSShared::visitAsmJSUnsign
     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
-    LAllocation ptrAlloc;
+    MOZ_ASSERT(ins->offset() == 0);
 
-    // For MIPS it is best to keep the 'ptr' in a register if a bounds check
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
+    LAllocation baseAlloc;
+
+    // For MIPS it is best to keep the 'base' in a register if a bounds check
     // is needed.
-    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
+    if (base->isConstant() && !ins->needsBoundsCheck()) {
         // A bounds check is only skipped for a positive index.
-        MOZ_ASSERT(ptr->toConstant()->toInt32() >= 0);
-        ptrAlloc = LAllocation(ptr->toConstant());
+        MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+        baseAlloc = LAllocation(base->toConstant());
     } else
-        ptrAlloc = useRegisterAtStart(ptr);
+        baseAlloc = useRegisterAtStart(base);
 
-    define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
+    define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
-    LAllocation ptrAlloc;
+    MOZ_ASSERT(ins->offset() == 0);
+
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
+    LAllocation baseAlloc;
 
-    if (ptr->isConstant() && !ins->needsBoundsCheck()) {
-        MOZ_ASSERT(ptr->toConstant()->toInt32() >= 0);
-        ptrAlloc = LAllocation(ptr->toConstant());
+    if (base->isConstant() && !ins->needsBoundsCheck()) {
+        MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+        baseAlloc = LAllocation(base->toConstant());
     } else
-        ptrAlloc = useRegisterAtStart(ptr);
+        baseAlloc = useRegisterAtStart(base);
 
-    add(new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value())), ins);
+    add(new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())), ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins)
 {
     define(new(alloc()) LAsmJSLoadFuncPtr(useRegister(ins->index())), ins);
 }
 
@@ -403,16 +407,17 @@ LIRGeneratorMIPSShared::visitSimdValueX4
     MOZ_CRASH("NYI");
 }
 
 void
 LIRGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
 {
     MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
     MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+    MOZ_ASSERT(ins->offset() == 0);
 
     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
 
     const LUse elements = useRegister(ins->elements());
     const LAllocation index = useRegisterOrConstant(ins->index());
 
     // If the target is a floating register then we need a temp at the
@@ -431,16 +436,17 @@ LIRGeneratorMIPSShared::visitCompareExch
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
 {
     MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+    MOZ_ASSERT(ins->offset() == 0);
 
     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
 
     const LUse elements = useRegister(ins->elements());
     const LAllocation index = useRegisterOrConstant(ins->index());
 
     // If the target is a floating register then we need a temp at the
@@ -460,73 +466,76 @@ LIRGeneratorMIPSShared::visitAtomicExcha
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->offset() == 0);
 
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     LAsmJSCompareExchangeHeap* lir =
-        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr),
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
                                                useRegister(ins->oldValue()),
                                                useRegister(ins->newValue()),
                                                /* valueTemp= */ temp(),
                                                /* offsetTemp= */ temp(),
                                                /* maskTemp= */ temp());
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
+    MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
+    MOZ_ASSERT(ins->offset() == 0);
 
-    const LAllocation ptr = useRegister(ins->ptr());
+    const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
 
     // The output may not be used but will be clobbered regardless,
     // so ignore the case where we're not using the value and just
     // use the output register as a temp.
 
     LAsmJSAtomicExchangeHeap* lir =
-        new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value,
+        new(alloc()) LAsmJSAtomicExchangeHeap(base, value,
                                               /* valueTemp= */ temp(),
                                               /* offsetTemp= */ temp(),
                                               /* maskTemp= */ temp());
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
+    MOZ_ASSERT(ins->offset() == 0);
 
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     if (!ins->hasUses()) {
         LAsmJSAtomicBinopHeapForEffect* lir =
-            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(ptr),
+            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
                                                         useRegister(ins->value()),
                                                         /* flagTemp= */ temp(),
                                                         /* valueTemp= */ temp(),
                                                         /* offsetTemp= */ temp(),
                                                         /* maskTemp= */ temp());
         add(lir, ins);
         return;
     }
 
     LAsmJSAtomicBinopHeap* lir =
-        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr),
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
                                            useRegister(ins->value()),
                                            /* temp= */ LDefinition::BogusTemp(),
                                            /* flagTemp= */ temp(),
                                            /* valueTemp= */ temp(),
                                            /* offsetTemp= */ temp(),
                                            /* maskTemp= */ temp());
 
     define(lir, ins);
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -153,114 +153,114 @@ LIRGeneratorX64::visitAsmJSUnsignedToFlo
     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
-                           ? useRegisterAtStart(ptr)
-                           : useRegisterOrZeroAtStart(ptr);
+    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+                            ? useRegisterAtStart(base)
+                            : useRegisterOrZeroAtStart(base);
 
-    define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
+    define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
-                           ? useRegisterAtStart(ptr)
-                           : useRegisterOrZeroAtStart(ptr);
+    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+                            ? useRegisterAtStart(base)
+                            : useRegisterOrZeroAtStart(base);
 
     LAsmJSStoreHeap* lir = nullptr;  // initialize to silence GCC warning
     switch (ins->accessType()) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
       case Scalar::Uint32:
-        lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterOrConstantAtStart(ins->value()));
+        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterOrConstantAtStart(ins->value()));
         break;
       case Scalar::Float32:
       case Scalar::Float64:
       case Scalar::Float32x4:
       case Scalar::Int32x4:
-        lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
+        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
         break;
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
     add(lir, ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     // The output may not be used but will be clobbered regardless, so
     // pin the output to eax.
     //
     // The input values must both be in registers.
 
     const LAllocation oldval = useRegister(ins->oldValue());
     const LAllocation newval = useRegister(ins->newValue());
 
     LAsmJSCompareExchangeHeap* lir =
-        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base), oldval, newval);
 
     defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
 }
 
 void
 LIRGeneratorX64::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
+    MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
 
-    const LAllocation ptr = useRegister(ins->ptr());
+    const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
 
     // The output may not be used but will be clobbered regardless,
     // so ignore the case where we're not using the value and just
     // use the output register as a temp.
 
     LAsmJSAtomicExchangeHeap* lir =
-        new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value);
+        new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
     define(lir, ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     // Case 1: the result of the operation is not used.
     //
     // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
     // LOCK OR, or LOCK XOR.
 
     if (!ins->hasUses()) {
         LAsmJSAtomicBinopHeapForEffect* lir =
-            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(ptr),
+            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
                                                         useRegisterOrConstant(ins->value()));
         add(lir, ins);
         return;
     }
 
     // Case 2: the result of the operation is used.
     //
     // For ADD and SUB we'll use XADD with word and byte ops as
@@ -291,17 +291,17 @@ LIRGeneratorX64::visitAsmJSAtomicBinopHe
     if (bitOp || ins->value()->isConstant()) {
         value = useRegisterOrConstant(ins->value());
     } else {
         reuseInput = true;
         value = useRegisterAtStart(ins->value());
     }
 
     LAsmJSAtomicBinopHeap* lir =
-        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr),
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
                                            value,
                                            bitOp ? temp() : LDefinition::BogusTemp());
 
     if (reuseInput)
         defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
     else if (bitOp)
         defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
     else
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -201,53 +201,53 @@ LIRGeneratorX86::visitAsmJSUnsignedToFlo
     MOZ_ASSERT(ins->input()->type() == MIRType_Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()), temp());
     define(lir, ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
-                           ? useRegisterAtStart(ptr)
-                           : useRegisterOrZeroAtStart(ptr);
+    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+                            ? useRegisterAtStart(base)
+                            : useRegisterOrZeroAtStart(base);
 
-    define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
+    define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
-                           ? useRegisterAtStart(ptr)
-                           : useRegisterOrZeroAtStart(ptr);
+    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+                            ? useRegisterAtStart(base)
+                            : useRegisterOrZeroAtStart(base);
 
     LAsmJSStoreHeap* lir = nullptr;
     switch (ins->accessType()) {
       case Scalar::Int8: case Scalar::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
-        lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
+        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax));
         break;
       case Scalar::Int16: case Scalar::Uint16:
       case Scalar::Int32: case Scalar::Uint32:
       case Scalar::Float32: case Scalar::Float64:
       case Scalar::Float32x4: case Scalar::Int32x4:
         // For now, don't allow constant values. The immediate operand
         // affects instruction layout which affects patching.
-        lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
+        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
         break;
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
     add(lir, ins);
 }
 
@@ -275,18 +275,18 @@ LIRGeneratorX86::visitStoreTypedArrayEle
     add(lir, ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
 
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     bool byteArray = byteSize(ins->accessType()) == 1;
 
     // Register allocation:
     //
     // The output may not be used, but eax will be clobbered regardless
     // so pin the output to eax.
     //
@@ -297,63 +297,63 @@ LIRGeneratorX86::visitAsmJSCompareExchan
     // be ebx, ecx, or edx (eax is taken).
     //
     // Bug #1077036 describes some optimization opportunities.
 
     const LAllocation oldval = useRegister(ins->oldValue());
     const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
 
     LAsmJSCompareExchangeHeap* lir =
-        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base), oldval, newval);
 
     lir->setAddrTemp(temp());
     defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
 }
 
 void
 LIRGeneratorX86::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->ptr()->type() == MIRType_Int32);
+    MOZ_ASSERT(ins->base()->type() == MIRType_Int32);
 
-    const LAllocation ptr = useRegister(ins->ptr());
+    const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
 
     LAsmJSAtomicExchangeHeap* lir =
-        new(alloc()) LAsmJSAtomicExchangeHeap(ptr, value);
+        new(alloc()) LAsmJSAtomicExchangeHeap(base, value);
 
     lir->setAddrTemp(temp());
     if (byteSize(ins->accessType()) == 1)
         defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
     else
         define(lir, ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->accessType() < Scalar::Float32);
 
-    MDefinition* ptr = ins->ptr();
-    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType_Int32);
 
     bool byteArray = byteSize(ins->accessType()) == 1;
 
     // Case 1: the result of the operation is not used.
     //
     // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
     // LOCK OR, or LOCK XOR.  These can all take an immediate.
 
     if (!ins->hasUses()) {
         LAllocation value;
         if (byteArray && !ins->value()->isConstant())
             value = useFixed(ins->value(), ebx);
         else
             value = useRegisterOrConstant(ins->value());
         LAsmJSAtomicBinopHeapForEffect* lir =
-            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(ptr), value);
+            new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base), value);
         lir->setAddrTemp(temp());
         add(lir, ins);
         return;
     }
 
     // Case 2: the result of the operation is used.
     //
     // For ADD and SUB we'll use XADD:
@@ -398,17 +398,17 @@ LIRGeneratorX86::visitAsmJSAtomicBinopHe
         value = useRegisterOrConstant(ins->value());
         if (bitOp)
             tempDef = temp();
     } else {
         value = useRegisterAtStart(ins->value());
     }
 
     LAsmJSAtomicBinopHeap* lir =
-        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr), value, tempDef);
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base), value, tempDef);
 
     lir->setAddrTemp(temp());
     if (byteArray || bitOp)
         defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
     else if (ins->value()->isConstant())
         define(lir, ins);
     else
         defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);