Bug 1284155 - Baldr: rename 'heap' to 'memory' to better match wasm terminiology (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Wed, 06 Jul 2016 18:40:35 -0500
changeset 343997 71449118bfe810e93207624e0e7a553fd63ebf2e
parent 343996 7711cc8ad793c2fb27cb70a4e65d37d9089be88d
child 343998 5b783da72df1ae9f4c144e460327154c95ebd557
push id6389
push userraliiev@mozilla.com
push dateMon, 19 Sep 2016 13:38:22 +0000
treeherdermozilla-beta@01d67bfe6c81 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1284155
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1284155 - Baldr: rename 'heap' to 'memory' to better match wasm terminiology (r=bbouvier) MozReview-Commit-ID: 8IQta6uCaKh
js/src/asmjs/AsmJS.cpp
js/src/asmjs/WasmCode.cpp
js/src/asmjs/WasmCode.h
js/src/asmjs/WasmCompile.cpp
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmGenerator.h
js/src/asmjs/WasmInstance.cpp
js/src/asmjs/WasmInstance.h
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmModule.cpp
js/src/asmjs/WasmSignalHandlers.cpp
js/src/asmjs/WasmTypes.h
js/src/builtin/AtomicsObject.cpp
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -262,17 +262,16 @@ enum class CacheResult
 
 // Holds the immutable guts of an AsmJSModule.
 //
 // AsmJSMetadata is built incrementally by ModuleValidator and then shared
 // immutably between AsmJSModules.
 
 struct AsmJSMetadataCacheablePod
 {
-    uint32_t                minHeapLength;
     uint32_t                numFFIs;
     uint32_t                srcLength;
     uint32_t                srcLengthWithRightBrace;
 
     AsmJSMetadataCacheablePod() { PodZero(this); }
 };
 
 struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod
@@ -1677,17 +1676,16 @@ class MOZ_STACK_CLASS ModuleValidator
             ReportOverRecursed(cx_);
     }
 
     bool init() {
         asmJSMetadata_ = cx_->new_<AsmJSMetadata>();
         if (!asmJSMetadata_)
             return false;
 
-        asmJSMetadata_->minHeapLength = RoundUpToNextValidAsmJSHeapLength(0);
         asmJSMetadata_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
         asmJSMetadata_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
         asmJSMetadata_->strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
         asmJSMetadata_->scriptSource.reset(parser_.ss);
 
         if (!globalMap_.init() || !sigMap_.init() || !importMap_.init())
             return false;
 
@@ -1762,42 +1760,42 @@ class MOZ_STACK_CLASS ModuleValidator
             !genData->sigs.resize(MaxSigs) ||
             !genData->funcSigs.resize(MaxFuncs) ||
             !genData->imports.resize(MaxImports) ||
             !genData->asmJSSigToTable.resize(MaxTables))
         {
             return false;
         }
 
+        genData->minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
+
         if (parser_.ss->filename()) {
             args.filename = DuplicateString(parser_.ss->filename());
             if (!args.filename)
                 return false;
         }
 
         if (!mg_.init(Move(genData), Move(args), asmJSMetadata_.get()))
             return false;
 
-        mg_.bumpMinHeapLength(asmJSMetadata_->minHeapLength);
-
         return true;
     }
 
     ExclusiveContext* cx() const             { return cx_; }
     PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
     PropertyName* globalArgumentName() const { return globalArgumentName_; }
     PropertyName* importArgumentName() const { return importArgumentName_; }
     PropertyName* bufferArgumentName() const { return bufferArgumentName_; }
     ModuleGenerator& mg()                    { return mg_; }
     AsmJSParser& parser() const              { return parser_; }
     TokenStream& tokenStream() const         { return parser_.tokenStream; }
     RootedFunction& dummyFunction()          { return dummyFunction_; }
     bool supportsSimd() const                { return cx_->jitSupportsSimd(); }
     bool atomicsPresent() const              { return atomicsPresent_; }
-    uint32_t minHeapLength() const           { return asmJSMetadata_->minHeapLength; }
+    uint32_t minMemoryLength() const         { return mg_.minMemoryLength(); }
 
     void initModuleFunctionName(PropertyName* name) {
         MOZ_ASSERT(!moduleFunctionName_);
         moduleFunctionName_ = name;
     }
     MOZ_MUST_USE bool initGlobalArgumentName(PropertyName* n) {
         MOZ_ASSERT(n->isTenured());
         globalArgumentName_ = n;
@@ -2133,20 +2131,18 @@ class MOZ_STACK_CLASS ModuleValidator
     }
 
     bool tryConstantAccess(uint64_t start, uint64_t width) {
         MOZ_ASSERT(UINT64_MAX - start > width);
         uint64_t len = start + width;
         if (len > uint64_t(INT32_MAX) + 1)
             return false;
         len = RoundUpToNextValidAsmJSHeapLength(len);
-        if (len > asmJSMetadata_->minHeapLength) {
-            asmJSMetadata_->minHeapLength = len;
-            mg_.bumpMinHeapLength(len);
-        }
+        if (len > mg_.minMemoryLength())
+            mg_.bumpMinMemoryLength(len);
         return true;
     }
 
     // Error handling.
     bool hasAlreadyFailed() const {
         return !!errorString_;
     }
 
@@ -2269,17 +2265,17 @@ class MOZ_STACK_CLASS ModuleValidator
     bool startFunctionBodies() {
         return mg_.startFuncDefs();
     }
     bool finishFunctionBodies() {
         return mg_.finishFuncDefs();
     }
     UniqueModule finish() {
         if (!arrayViews_.empty())
-            mg_.initHeapUsage(atomicsPresent_ ? HeapUsage::Shared : HeapUsage::Unshared);
+            mg_.initMemoryUsage(atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared);
 
         MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
         for (const Func* func : functions_) {
             CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name());
             if (!funcName || !asmJSMetadata_->asmJSFuncNames.emplaceBack(Move(funcName)))
                 return nullptr;
         }
 
@@ -7756,47 +7752,47 @@ ValidateConstant(JSContext* cx, const As
 
     return true;
 }
 
 static bool
 CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata, HandleValue bufferVal,
             MutableHandle<ArrayBufferObjectMaybeShared*> buffer)
 {
-    if (metadata.heapUsage == HeapUsage::Shared) {
+    if (metadata.memoryUsage == MemoryUsage::Shared) {
         if (!IsSharedArrayBuffer(bufferVal))
             return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
     } else {
         if (!IsArrayBuffer(bufferVal))
             return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
     }
 
     buffer.set(&AsAnyArrayBuffer(bufferVal));
-    uint32_t heapLength = buffer->byteLength();
-
-    if (!IsValidAsmJSHeapLength(heapLength)) {
+    uint32_t memoryLength = buffer->byteLength();
+
+    if (!IsValidAsmJSHeapLength(memoryLength)) {
         UniqueChars msg(
             JS_smprintf("ArrayBuffer byteLength 0x%x is not a valid heap length. The next "
                         "valid length is 0x%x",
-                        heapLength,
-                        RoundUpToNextValidAsmJSHeapLength(heapLength)));
+                        memoryLength,
+                        RoundUpToNextValidAsmJSHeapLength(memoryLength)));
         if (!msg)
             return false;
         return LinkFail(cx, msg.get());
     }
 
     // This check is sufficient without considering the size of the loaded datum because heap
     // loads and stores start on an aligned boundary and the heap byteLength has larger alignment.
-    MOZ_ASSERT((metadata.minHeapLength - 1) <= INT32_MAX);
-    if (heapLength < metadata.minHeapLength) {
+    MOZ_ASSERT((metadata.minMemoryLength - 1) <= INT32_MAX);
+    if (memoryLength < metadata.minMemoryLength) {
         UniqueChars msg(
             JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied "
                         "by const heap accesses).",
-                        heapLength,
-                        metadata.minHeapLength));
+                        memoryLength,
+                        metadata.minMemoryLength));
         if (!msg)
             return false;
         return LinkFail(cx, msg.get());
     }
 
     if (buffer->is<ArrayBufferObject>()) {
         Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
         bool useSignalHandlers = metadata.assumptions.usesSignal.forOOB;
@@ -7811,17 +7807,17 @@ static bool
 TryInstantiate(JSContext* cx, CallArgs args, Module& module, const AsmJSMetadata& metadata,
                MutableHandleWasmInstanceObject instanceObj)
 {
     HandleValue globalVal = args.get(0);
     HandleValue importVal = args.get(1);
     HandleValue bufferVal = args.get(2);
 
     Rooted<ArrayBufferObjectMaybeShared*> heap(cx);
-    if (module.metadata().usesHeap() && !CheckBuffer(cx, metadata, bufferVal, &heap))
+    if (module.metadata().usesMemory() && !CheckBuffer(cx, metadata, bufferVal, &heap))
         return false;
 
     Vector<Val> valImports(cx);
 
     Rooted<FunctionVector> ffis(cx, FunctionVector(cx));
     if (!ffis.resize(metadata.numFFIs))
         return false;
 
--- a/js/src/asmjs/WasmCode.cpp
+++ b/js/src/asmjs/WasmCode.cpp
@@ -102,28 +102,28 @@ StaticallyLink(CodeSegment& cs, const Li
     for (const LinkData::FuncTable& table : linkData.funcTables) {
         auto array = reinterpret_cast<void**>(cs.globalData() + table.globalDataOffset);
         for (size_t i = 0; i < table.elemOffsets.length(); i++)
             array[i] = cs.code() + table.elemOffsets[i];
     }
 }
 
 static void
-SpecializeToHeap(CodeSegment& cs, const Metadata& metadata, uint8_t* heapBase, uint32_t heapLength)
+SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, uint8_t* base, uint32_t length)
 {
     for (const BoundsCheck& check : metadata.boundsChecks)
-        Assembler::UpdateBoundsCheck(check.patchAt(cs.code()), heapLength);
+        Assembler::UpdateBoundsCheck(check.patchAt(cs.code()), length);
 
 #if defined(JS_CODEGEN_X86)
     for (const MemoryAccess& access : metadata.memoryAccesses) {
         // Patch memory pointer immediate.
-        void* addr = access.patchHeapPtrImmAt(cs.code());
+        void* addr = access.patchMemoryPtrImmAt(cs.code());
         uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
         MOZ_ASSERT(disp <= INT32_MAX);
-        X86Encoding::SetPointer(addr, (void*)(heapBase + disp));
+        X86Encoding::SetPointer(addr, (void*)(base + disp));
     }
 #endif
 }
 
 static bool
 SendCodeRangesToProfiler(JSContext* cx, CodeSegment& cs, const Bytes& bytecode,
                          const Metadata& metadata)
 {
@@ -189,18 +189,18 @@ SendCodeRangesToProfiler(JSContext* cx, 
     return true;
 }
 
 /* static */ UniqueCodeSegment
 CodeSegment::create(JSContext* cx,
                     const Bytes& bytecode,
                     const LinkData& linkData,
                     const Metadata& metadata,
-                    uint8_t* heapBase,
-                    uint32_t heapLength)
+                    uint8_t* memoryBase,
+                    uint32_t memoryLength)
 {
     MOZ_ASSERT(bytecode.length() % gc::SystemPageSize() == 0);
     MOZ_ASSERT(linkData.globalDataLength % gc::SystemPageSize() == 0);
     MOZ_ASSERT(linkData.functionCodeLength < bytecode.length());
 
     auto cs = cx->make_unique<CodeSegment>();
     if (!cs)
         return nullptr;
@@ -218,17 +218,17 @@ CodeSegment::create(JSContext* cx,
 
     {
         JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
         AutoFlushICache afc("CodeSegment::create");
         AutoFlushICache::setRange(uintptr_t(cs->code()), cs->codeLength());
 
         memcpy(cs->code(), bytecode.begin(), bytecode.length());
         StaticallyLink(*cs, linkData, cx);
-        SpecializeToHeap(*cs, metadata, heapBase, heapLength);
+        SpecializeToMemory(*cs, metadata, memoryBase, memoryLength);
     }
 
     if (!ExecutableAllocator::makeExecutable(cs->code(), cs->codeLength())) {
         ReportOutOfMemory(cx);
         return nullptr;
     }
 
     if (!SendCodeRangesToProfiler(cx, *cs, bytecode, metadata))
--- a/js/src/asmjs/WasmCode.h
+++ b/js/src/asmjs/WasmCode.h
@@ -64,18 +64,18 @@ class CodeSegment
     void operator=(const CodeSegment&) = delete;
     void operator=(CodeSegment&&) = delete;
 
   public:
     static UniqueCodeSegment create(JSContext* cx,
                                     const Bytes& code,
                                     const LinkData& linkData,
                                     const Metadata& metadata,
-                                    uint8_t* heapBase,
-                                    uint32_t heapLength);
+                                    uint8_t* memoryBase,
+                                    uint32_t memoryLength);
     ~CodeSegment();
 
     uint8_t* code() const { return bytes_; }
     uint8_t* globalData() const { return bytes_ + codeLength_; }
     uint32_t codeLength() const { return codeLength_; }
     uint32_t globalDataLength() const { return globalDataLength_; }
     uint32_t totalLength() const { return codeLength_ + globalDataLength_; }
 
@@ -366,30 +366,30 @@ struct CacheableChars : UniqueChars
     CacheableChars() = default;
     explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
     MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
     WASM_DECLARE_SERIALIZABLE(CacheableChars)
 };
 
 typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
 
-// A wasm module can either use no heap, a unshared heap (ArrayBuffer) or shared
-// heap (SharedArrayBuffer).
+// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
+// shared memory (SharedArrayBuffer).
 
-enum class HeapUsage
+enum class MemoryUsage
 {
     None = false,
     Unshared = 1,
     Shared = 2
 };
 
 static inline bool
-UsesHeap(HeapUsage heapUsage)
+UsesMemory(MemoryUsage memoryUsage)
 {
-    return bool(heapUsage);
+    return bool(memoryUsage);
 }
 
 // NameInBytecode represents a name that is embedded in the wasm bytecode.
 // The presence of NameInBytecode implies that bytecode has been kept.
 
 struct NameInBytecode
 {
     uint32_t offset;
@@ -407,18 +407,18 @@ typedef Vector<char16_t, 64> TwoByteName
 // instantiate a module).
 //
 // Metadata is built incrementally by ModuleGenerator and then shared immutably
 // between modules.
 
 struct MetadataCacheablePod
 {
     ModuleKind            kind;
-    HeapUsage             heapUsage;
-    uint32_t              initialHeapLength;
+    MemoryUsage           memoryUsage;
+    uint32_t              minMemoryLength;
 
     MetadataCacheablePod() { mozilla::PodZero(this); }
 };
 
 struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
 {
     virtual ~Metadata() {}
 
@@ -431,18 +431,18 @@ struct Metadata : ShareableBase<Metadata
     BoundsCheckVector     boundsChecks;
     CodeRangeVector       codeRanges;
     CallSiteVector        callSites;
     CallThunkVector       callThunks;
     NameInBytecodeVector  funcNames;
     CacheableChars        filename;
     Assumptions           assumptions;
 
-    bool usesHeap() const { return UsesHeap(heapUsage); }
-    bool hasSharedHeap() const { return heapUsage == HeapUsage::Shared; }
+    bool usesMemory() const { return UsesMemory(memoryUsage); }
+    bool hasSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
 
     // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
     // encapsulated within AsmJS.cpp, but the additional virtual functions allow
     // asm.js to override wasm behavior in the handful of cases that can't be
     // easily encapsulated by AsmJS.cpp.
 
     bool isAsmJS() const {
         return kind == ModuleKind::AsmJS;
--- a/js/src/asmjs/WasmCompile.cpp
+++ b/js/src/asmjs/WasmCompile.cpp
@@ -754,17 +754,17 @@ DecodeImportSection(Decoder& d, bool new
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "import section byte size mismatch");
 
     return true;
 }
 
 static bool
-DecodeMemorySection(Decoder& d, bool newFormat, ModuleGenerator& mg)
+DecodeMemorySection(Decoder& d, bool newFormat, ModuleGeneratorData* init, bool* exported)
 {
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(MemorySectionId, &sectionStart, &sectionSize))
         return Fail(d, "failed to start section");
     if (sectionStart == Decoder::NotStarted)
         return true;
 
     uint32_t initialSizePages;
@@ -788,31 +788,29 @@ DecodeMemorySection(Decoder& d, bool new
     maxSize *= PageSize;
     if (!maxSize.isValid())
         return Fail(d, "maximum memory size too big");
 
     if (maxSize.value() < initialSize.value())
         return Fail(d, "maximum memory size less than initial memory size");
 
     if (!newFormat) {
-        uint8_t exported;
-        if (!d.readFixedU8(&exported))
+        uint8_t u8;
+        if (!d.readFixedU8(&u8))
             return Fail(d, "expected exported byte");
 
-        if (exported) {
-            UniqueChars fieldName = DuplicateString("memory");
-            if (!fieldName || !mg.addMemoryExport(Move(fieldName)))
-                return false;
-        }
+        *exported = u8;
     }
 
     if (!d.finishSection(sectionStart, sectionSize))
         return Fail(d, "memory section byte size mismatch");
 
-    mg.initHeapUsage(HeapUsage::Unshared, initialSize.value());
+    MOZ_ASSERT(init->memoryUsage == MemoryUsage::None);
+    init->memoryUsage = MemoryUsage::Unshared;
+    init->minMemoryLength = initialSize.value();
     return true;
 }
 
 typedef HashSet<const char*, CStringHasher, SystemAllocPolicy> CStringSet;
 
 static UniqueChars
 DecodeExportName(Decoder& d, CStringSet* dupSet)
 {
@@ -890,18 +888,24 @@ DecodeExport(Decoder& d, bool newFormat,
       default:
         return Fail(d, "unexpected export kind");
     }
 
     MOZ_CRASH("unreachable");
 }
 
 static bool
-DecodeExportSection(Decoder& d, bool newFormat, ModuleGenerator& mg)
+DecodeExportSection(Decoder& d, bool newFormat, bool memoryExported, ModuleGenerator& mg)
 {
+    if (!newFormat && memoryExported) {
+        UniqueChars fieldName = DuplicateString("memory");
+        if (!fieldName || !mg.addMemoryExport(Move(fieldName)))
+            return false;
+    }
+
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(ExportSectionId, &sectionStart, &sectionSize))
         return Fail(d, "failed to start section");
     if (sectionStart == Decoder::NotStarted)
         return true;
 
     CStringSet dupSet;
     if (!dupSet.init())
@@ -1018,27 +1022,27 @@ static bool
 DecodeDataSection(Decoder& d, ModuleGenerator& mg)
 {
     uint32_t sectionStart, sectionSize;
     if (!d.startSection(DataSectionId, &sectionStart, &sectionSize))
         return Fail(d, "failed to start section");
     if (sectionStart == Decoder::NotStarted)
         return true;
 
-    if (!mg.usesHeap())
+    if (!mg.usesMemory())
         return Fail(d, "data section requires a memory section");
 
     uint32_t numSegments;
     if (!d.readVarU32(&numSegments))
         return Fail(d, "failed to read number of data segments");
 
     if (numSegments > MaxDataSegments)
         return Fail(d, "too many data segments");
 
-    uint32_t max = mg.initialHeapLength();
+    uint32_t max = mg.minMemoryLength();
     for (uint32_t i = 0, prevEnd = 0; i < numSegments; i++) {
         DataSegment seg;
 
         if (!d.readVarU32(&seg.memoryOffset))
             return Fail(d, "expected segment destination offset");
 
         if (seg.memoryOffset < prevEnd)
             return Fail(d, "data segments must be disjoint and ordered");
@@ -1172,24 +1176,25 @@ wasm::Compile(Bytes&& bytecode, CompileA
         return nullptr;
 
     if (!DecodeFunctionSection(d, init.get()))
         return nullptr;
 
     if (!DecodeTableSection(d, init.get()))
         return nullptr;
 
+    bool memoryExported = false;
+    if (!DecodeMemorySection(d, newFormat, init.get(), &memoryExported))
+        return nullptr;
+
     ModuleGenerator mg;
     if (!mg.init(Move(init), Move(args)))
         return nullptr;
 
-    if (!DecodeMemorySection(d, newFormat, mg))
-        return nullptr;
-
-    if (!DecodeExportSection(d, newFormat, mg))
+    if (!DecodeExportSection(d, newFormat, memoryExported, mg))
         return nullptr;
 
     if (!DecodeCodeSection(d, mg))
         return nullptr;
 
     if (!DecodeDataSection(d, mg))
         return nullptr;
 
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -109,17 +109,16 @@ ModuleGenerator::init(UniqueModuleGenera
         metadata_ = maybeAsmJSMetadata;
     } else {
         metadata_ = js_new<Metadata>();
         if (!metadata_)
             return false;
     }
 
     metadata_->kind = shared->kind;
-    metadata_->heapUsage = HeapUsage::None;
     metadata_->filename = Move(args.filename);
     metadata_->assumptions = Move(args.assumptions);
 
     shared_ = Move(shared);
 
     // For asm.js, the Vectors in ModuleGeneratorData are max-sized reservations
     // and will be initialized in a linear order via init* functions as the
     // module is generated. For wasm, the Vectors are correctly-sized and
@@ -347,25 +346,25 @@ ModuleGenerator::finishCodegen()
 
     {
         TempAllocator alloc(&lifo_);
         MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
 
         if (!entries.resize(numExports()))
             return false;
         for (uint32_t i = 0; i < numExports(); i++)
-            entries[i] = GenerateEntry(masm, metadata_->exports[i], usesHeap());
+            entries[i] = GenerateEntry(masm, metadata_->exports[i], usesMemory());
 
         if (!interpExits.resize(numImports()))
             return false;
         if (!jitExits.resize(numImports()))
             return false;
         for (uint32_t i = 0; i < numImports(); i++) {
             interpExits[i] = GenerateInterpExit(masm, metadata_->imports[i], i);
-            jitExits[i] = GenerateJitExit(masm, metadata_->imports[i], usesHeap());
+            jitExits[i] = GenerateJitExit(masm, metadata_->imports[i], usesMemory());
         }
 
         for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
             jumpTargets[target] = GenerateJumpTarget(masm, target);
 
         interruptExit = GenerateInterruptStub(masm);
 
         if (masm.oom() || !masm_.asmMergeWith(masm))
@@ -582,41 +581,16 @@ ModuleGenerator::allocateGlobal(ValType 
     if (!allocateGlobalBytes(width, width, &offset))
         return false;
 
     *index = shared_->globals.length();
     return shared_->globals.append(GlobalDesc(type, offset, isConst));
 }
 
 void
-ModuleGenerator::initHeapUsage(HeapUsage heapUsage, uint32_t initialHeapLength)
-{
-    MOZ_ASSERT(metadata_->heapUsage == HeapUsage::None);
-    metadata_->heapUsage = heapUsage;
-    metadata_->initialHeapLength = initialHeapLength;
-    if (isAsmJS())
-        MOZ_ASSERT(initialHeapLength == 0);
-    else
-        shared_->minHeapLength = initialHeapLength;
-}
-
-bool
-ModuleGenerator::usesHeap() const
-{
-    return UsesHeap(metadata_->heapUsage);
-}
-
-uint32_t
-ModuleGenerator::initialHeapLength() const
-{
-    MOZ_ASSERT(!isAsmJS());
-    return metadata_->initialHeapLength;
-}
-
-void
 ModuleGenerator::initSig(uint32_t sigIndex, Sig&& sig)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(sigIndex == numSigs_);
     numSigs_++;
 
     MOZ_ASSERT(shared_->sigs[sigIndex] == Sig());
     shared_->sigs[sigIndex] = Move(sig);
@@ -634,22 +608,31 @@ ModuleGenerator::initFuncSig(uint32_t fu
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
 
     shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
 }
 
 void
-ModuleGenerator::bumpMinHeapLength(uint32_t newMinHeapLength)
+ModuleGenerator::initMemoryUsage(MemoryUsage memoryUsage)
 {
     MOZ_ASSERT(isAsmJS());
-    MOZ_ASSERT(newMinHeapLength >= shared_->minHeapLength);
+    MOZ_ASSERT(shared_->memoryUsage == MemoryUsage::None);
+
+    shared_->memoryUsage = memoryUsage;
+}
 
-    shared_->minHeapLength = newMinHeapLength;
+void
+ModuleGenerator::bumpMinMemoryLength(uint32_t newMinMemoryLength)
+{
+    MOZ_ASSERT(isAsmJS());
+    MOZ_ASSERT(newMinMemoryLength >= shared_->minMemoryLength);
+
+    shared_->minMemoryLength = newMinMemoryLength;
 }
 
 const DeclaredSig&
 ModuleGenerator::funcSig(uint32_t funcIndex) const
 {
     MOZ_ASSERT(shared_->funcSigs[funcIndex]);
     return *shared_->funcSigs[funcIndex];
 }
@@ -924,28 +907,32 @@ ModuleGenerator::finish(ImportNameVector
     // Zero the padding, since we used resizeUninitialized above.
     memset(code.begin() + bytesNeeded, 0, padding);
 
     // Convert the CallSiteAndTargetVector (needed during generation) to a
     // CallSiteVector (what is stored in the Module).
     if (!metadata_->callSites.appendAll(masm_.callSites()))
         return nullptr;
 
-    // The MacroAssembler has accumulated all the heap accesses during codegen.
+    // The MacroAssembler has accumulated all the memory accesses during codegen.
     metadata_->memoryAccesses = masm_.extractMemoryAccesses();
     metadata_->boundsChecks = masm_.extractBoundsChecks();
 
     // These Vectors can get large and the excess capacity can be significant,
     // so realloc them down to size.
     metadata_->memoryAccesses.podResizeToFit();
     metadata_->boundsChecks.podResizeToFit();
     metadata_->codeRanges.podResizeToFit();
     metadata_->callSites.podResizeToFit();
     metadata_->callThunks.podResizeToFit();
 
+    // Copy over data from the ModuleGeneratorData.
+    metadata_->memoryUsage = shared_->memoryUsage;
+    metadata_->minMemoryLength = shared_->minMemoryLength;
+
     // Assert CodeRanges are sorted.
 #ifdef DEBUG
     uint32_t lastEnd = 0;
     for (const CodeRange& codeRange : metadata_->codeRanges) {
         MOZ_ASSERT(codeRange.begin() >= lastEnd);
         lastEnd = codeRange.end();
     }
 #endif
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -59,32 +59,33 @@ struct ImportModuleGeneratorData
 };
 
 typedef Vector<ImportModuleGeneratorData, 0, SystemAllocPolicy> ImportModuleGeneratorDataVector;
 
 struct ModuleGeneratorData
 {
     ModuleKind                      kind;
     SignalUsage                     usesSignal;
-    mozilla::Atomic<uint32_t>       minHeapLength;
+    MemoryUsage                     memoryUsage;
+    mozilla::Atomic<uint32_t>       minMemoryLength;
 
     DeclaredSigVector               sigs;
     DeclaredSigPtrVector            funcSigs;
     ImportModuleGeneratorDataVector imports;
     GlobalDescVector                globals;
 
     TableModuleGeneratorData        wasmTable;
     TableModuleGeneratorDataVector  asmJSSigToTable;
 
     uint32_t funcSigIndex(uint32_t funcIndex) const {
         return funcSigs[funcIndex] - sigs.begin();
     }
 
     explicit ModuleGeneratorData(SignalUsage usesSignal, ModuleKind kind = ModuleKind::Wasm)
-      : kind(kind), usesSignal(usesSignal), minHeapLength(0)
+      : kind(kind), usesSignal(usesSignal), memoryUsage(MemoryUsage::None), minMemoryLength(0)
     {}
 };
 
 typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
 
 // A ModuleGenerator encapsulates the creation of a wasm module. During the
 // lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
 // and destroyed to compile the individual function bodies. After generating all
@@ -146,20 +147,19 @@ class MOZ_STACK_CLASS ModuleGenerator
 
     MOZ_MUST_USE bool init(UniqueModuleGeneratorData shared, CompileArgs&& args,
                            Metadata* maybeAsmJSMetadata = nullptr);
 
     bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
     SignalUsage usesSignal() const { return metadata_->assumptions.usesSignal; }
     jit::MacroAssembler& masm() { return masm_; }
 
-    // Heap usage:
-    void initHeapUsage(HeapUsage heapUsage, uint32_t initialHeapLength = 0);
-    bool usesHeap() const;
-    uint32_t initialHeapLength() const;
+    // Memory:
+    bool usesMemory() const { return UsesMemory(shared_->memoryUsage); }
+    uint32_t minMemoryLength() const { return shared_->minMemoryLength; }
     MOZ_MUST_USE bool addDataSegment(DataSegment s) { return dataSegments_.append(s); }
 
     // Signatures:
     uint32_t numSigs() const { return numSigs_; }
     const DeclaredSig& sig(uint32_t sigIndex) const;
 
     // Function declarations:
     uint32_t numFuncSigs() const { return shared_->funcSigs.length(); }
@@ -189,17 +189,18 @@ class MOZ_STACK_CLASS ModuleGenerator
     void setFuncNames(NameInBytecodeVector&& funcNames);
 
     // asm.js lazy initialization:
     void initSig(uint32_t sigIndex, Sig&& sig);
     void initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initImport(uint32_t importIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initSigTableLength(uint32_t sigIndex, uint32_t numElems);
     void initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
-    void bumpMinHeapLength(uint32_t newMinHeapLength);
+    void initMemoryUsage(MemoryUsage memoryUsage);
+    void bumpMinMemoryLength(uint32_t newMinMemoryLength);
 
     // Finish compilation, provided the list of imported names and source
     // bytecode. Both these Vectors may be empty (viz., b/c asm.js does
     // different things for imports and source).
     UniqueModule finish(ImportNameVector&& importNames, const ShareableBytes& bytecode);
 };
 
 // A FunctionGenerator encapsulates the generation of a single function body.
--- a/js/src/asmjs/WasmInstance.cpp
+++ b/js/src/asmjs/WasmInstance.cpp
@@ -38,17 +38,17 @@
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 using mozilla::BinarySearch;
 using mozilla::Swap;
 
 uint8_t**
-Instance::addressOfHeapPtr() const
+Instance::addressOfMemoryBase() const
 {
     return (uint8_t**)(codeSegment_->globalData() + HeapGlobalDataOffset);
 }
 
 ImportExit&
 Instance::importToExit(const Import& import)
 {
     return *(ImportExit*)(codeSegment_->globalData() + import.exitGlobalDataOffset());
@@ -380,31 +380,31 @@ NewExportedFunction(JSContext* cx, Handl
     fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT, ObjectValue(*instanceObj));
     fun->setExtendedSlot(FunctionExtended::WASM_EXPORT_INDEX_SLOT, Int32Value(exportIndex));
     return fun;
 }
 
 static bool
 CreateExportObject(JSContext* cx,
                    HandleWasmInstanceObject instanceObj,
-                   HandleArrayBufferObjectMaybeShared heap,
+                   HandleArrayBufferObjectMaybeShared memoryObj,
                    const ExportMap& exportMap,
                    const ExportVector& exports,
                    MutableHandleObject exportObj)
 {
     MOZ_ASSERT(exportMap.fieldNames.length() == exportMap.fieldsToExports.length());
 
     for (size_t fieldIndex = 0; fieldIndex < exportMap.fieldNames.length(); fieldIndex++) {
         const char* fieldName = exportMap.fieldNames[fieldIndex].get();
         if (!*fieldName) {
             MOZ_ASSERT(!exportObj);
             uint32_t exportIndex = exportMap.fieldsToExports[fieldIndex];
             if (exportIndex == MemoryExport) {
-                MOZ_ASSERT(heap);
-                exportObj.set(heap);
+                MOZ_ASSERT(memoryObj);
+                exportObj.set(memoryObj);
             } else {
                 exportObj.set(NewExportedFunction(cx, instanceObj, exportIndex));
                 if (!exportObj)
                     return false;
             }
             break;
         }
     }
@@ -430,17 +430,17 @@ CreateExportObject(JSContext* cx,
         JSAtom* atom = AtomizeUTF8Chars(cx, fieldName, strlen(fieldName));
         if (!atom)
             return false;
 
         RootedId id(cx, AtomToId(atom));
         RootedValue val(cx);
         uint32_t exportIndex = exportMap.fieldsToExports[fieldIndex];
         if (exportIndex == MemoryExport)
-            val = ObjectValue(*heap);
+            val = ObjectValue(*memoryObj);
         else
             val = vals[exportIndex];
 
         if (!JS_DefinePropertyById(cx, exportObj, id, val, JSPROP_ENUMERATE))
             return false;
     }
 
     return true;
@@ -491,17 +491,17 @@ Instance::create(JSContext* cx,
         const Import& import = metadata.imports[i];
         ImportExit& exit = instance.importToExit(import);
         exit.code = instance.codeSegment().code() + import.interpExitCodeOffset();
         exit.fun = funcImports[i];
         exit.baselineScript = nullptr;
     }
 
     if (heap)
-        *instance.addressOfHeapPtr() = heap->dataPointerEither().unwrap(/* wasm heap pointer */);
+        *instance.addressOfMemoryBase() = heap->dataPointerEither().unwrap(/* wasm heap pointer */);
 
     // Create the export object
 
     RootedObject exportObj(cx);
     if (!CreateExportObject(cx, instanceObj, heap, exportMap, metadata.exports, &exportObj))
         return false;
 
     // Attach the export object to the instance object
@@ -537,25 +537,25 @@ void
 Instance::trace(JSTracer* trc)
 {
     for (const Import& import : metadata_->imports)
         TraceNullableEdge(trc, &importToExit(import).fun, "wasm function import");
     TraceNullableEdge(trc, &heap_, "wasm buffer");
 }
 
 SharedMem<uint8_t*>
-Instance::heap() const
+Instance::memoryBase() const
 {
-    MOZ_ASSERT(metadata_->usesHeap());
-    MOZ_ASSERT(*addressOfHeapPtr() == heap_->dataPointerEither());
+    MOZ_ASSERT(metadata_->usesMemory());
+    MOZ_ASSERT(*addressOfMemoryBase() == heap_->dataPointerEither());
     return heap_->dataPointerEither();
 }
 
 size_t
-Instance::heapLength() const
+Instance::memoryLength() const
 {
     return heap_->byteLength();
 }
 
 bool
 Instance::callExport(JSContext* cx, uint32_t exportIndex, CallArgs args)
 {
     const Export& exp = metadata_->exports[exportIndex];
--- a/js/src/asmjs/WasmInstance.h
+++ b/js/src/asmjs/WasmInstance.h
@@ -61,17 +61,17 @@ class Instance
     const SharedBytes                    maybeBytecode_;
     const TypedFuncTableVector           typedFuncTables_;
     GCPtr<ArrayBufferObjectMaybeShared*> heap_;
 
     bool                                 profilingEnabled_;
     CacheableCharsVector                 funcLabels_;
 
     // Internal helpers:
-    uint8_t** addressOfHeapPtr() const;
+    uint8_t** addressOfMemoryBase() const;
     ImportExit& importToExit(const Import& import);
     MOZ_MUST_USE bool toggleProfiling(JSContext* cx);
 
     // An instance keeps track of its innermost WasmActivation. A WasmActivation
     // is pushed for the duration of each call of an export.
     friend class js::WasmActivation;
     WasmActivation*& activation();
 
@@ -101,18 +101,18 @@ class Instance
                        Handle<FunctionVector> funcImports,
                        const ExportMap& exports,
                        HandleWasmInstanceObject instanceObj);
     ~Instance();
     void trace(JSTracer* trc);
 
     const CodeSegment& codeSegment() const { return *codeSegment_; }
     const Metadata& metadata() const { return *metadata_; }
-    SharedMem<uint8_t*> heap() const;
-    size_t heapLength() const;
+    SharedMem<uint8_t*> memoryBase() const;
+    size_t memoryLength() const;
 
     // Execute the given export given the JS call arguments, storing the return
     // value in args.rval.
 
     MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t exportIndex, CallArgs args);
 
     // An instance has a profiling mode that is updated to match the runtime's
     // profiling mode when calling an instance's exports when there are no other
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -3384,17 +3384,17 @@ wasm::IonCompileFunction(IonCompileTask*
 
     JitContext jitContext(&results.alloc());
     const JitCompileOptions options;
     MIRGraph graph(&results.alloc());
     CompileInfo compileInfo(locals.length());
     MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
                      IonOptimizations.get(OptimizationLevel::AsmJS));
     mir.initUsesSignalHandlersForAsmJSOOB(task->mg().usesSignal.forOOB);
-    mir.initMinAsmJSHeapLength(task->mg().minHeapLength);
+    mir.initMinAsmJSHeapLength(task->mg().minMemoryLength);
 
     // Build MIR graph
     {
         FunctionCompiler f(task->mg(), d, func, locals, mir, results);
         if (!f.init())
             return false;
 
         if (!f.iter().readFunctionStart())
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -331,19 +331,19 @@ Module::instantiate(JSContext* cx,
 {
     MOZ_ASSERT(funcImports.length() == metadata_->imports.length());
     MOZ_ASSERT_IF(asmJSHeap, metadata_->isAsmJS());
 
     // asm.js module instantiation supplies its own heap, but for wasm, create
     // and initialize the heap if one is requested.
 
     Rooted<ArrayBufferObjectMaybeShared*> heap(cx, asmJSHeap);
-    if (metadata_->usesHeap() && !heap) {
+    if (metadata_->usesMemory() && !heap) {
         MOZ_ASSERT(!metadata_->isAsmJS());
-        heap = ArrayBufferObject::createForWasm(cx, metadata_->initialHeapLength,
+        heap = ArrayBufferObject::createForWasm(cx, metadata_->minMemoryLength, 
                                                 metadata_->assumptions.usesSignal.forOOB);
         if (!heap)
             return false;
     }
 
     uint8_t* memoryBase = heap ? heap->dataPointerEither().unwrap(/* code patching */) : nullptr;
     uint32_t memoryLength = heap ? heap->byteLength() : 0;
 
--- a/js/src/asmjs/WasmSignalHandlers.cpp
+++ b/js/src/asmjs/WasmSignalHandlers.cpp
@@ -620,17 +620,17 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
     MOZ_RELEASE_ASSERT(address.disp() >= 0);
     MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
     MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
     MOZ_RELEASE_ASSERT(address.scale() == 0);
     if (address.hasBase()) {
         uintptr_t base;
         StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.base()));
-        MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == instance.heap());
+        MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == instance.memoryBase());
     }
     if (address.hasIndex()) {
         uintptr_t index;
         StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.index()));
         MOZ_RELEASE_ASSERT(uint32_t(index) == index);
     }
 #endif
@@ -638,58 +638,58 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
     // Determine the actual effective address of the faulting access. We can't
     // rely on the faultingAddress given to us by the OS, because we need the
     // address of the start of the access, and the OS may sometimes give us an
     // address somewhere in the middle of the heap access.
     uint8_t* accessAddress = ComputeAccessAddress(context, address);
     MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
                        "Given faulting address does not appear to be within computed "
                        "faulting address range");
-    MOZ_RELEASE_ASSERT(accessAddress >= instance.heap(),
+    MOZ_RELEASE_ASSERT(accessAddress >= instance.memoryBase(),
                        "Access begins outside the asm.js heap");
-    MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.heap() + MappedSize,
+    MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() + MappedSize,
                        "Access extends beyond the asm.js heap guard region");
-    MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.heap() + instance.heapLength(),
+    MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() + instance.memoryLength(),
                        "Computed access address is not actually out of bounds");
 
     // The basic sandbox model is that all heap accesses are a heap base
     // register plus an index, and the index is always computed with 32-bit
     // operations, so we know it can only be 4 GiB off of the heap base.
     //
     // However, we wish to support the optimization of folding immediates
     // and scaled indices into addresses, and any address arithmetic we fold
     // gets done at full pointer width, so it doesn't get properly wrapped.
     // We support this by extending MappedSize to the greatest size that could
     // be reached by such an unwrapped address, and then when we arrive here in
     // the signal handler for such an access, we compute the fully wrapped
     // address, and perform the load or store on it.
     //
     // Taking a signal is really slow, but in theory programs really shouldn't
     // be hitting this anyway.
-    intptr_t unwrappedOffset = accessAddress - instance.heap().unwrap(/*safe - for value*/);
+    intptr_t unwrappedOffset = accessAddress - instance.memoryBase().unwrap(/* for value */);
     uint32_t wrappedOffset = uint32_t(unwrappedOffset);
     size_t size = access.size();
     MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
-    bool inBounds = wrappedOffset + size < instance.heapLength();
+    bool inBounds = wrappedOffset + size < instance.memoryLength();
 
     // If this is storing Z of an XYZ, check whether X is also in bounds, so
     // that we don't store anything before throwing.
     MOZ_RELEASE_ASSERT(unwrappedOffset > memoryAccess->offsetWithinWholeSimdVector());
     uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - memoryAccess->offsetWithinWholeSimdVector());
-    if (wrappedBaseOffset >= instance.heapLength())
+    if (wrappedBaseOffset >= instance.memoryLength())
         inBounds = false;
 
     if (inBounds) {
         // We now know that this is an access that is actually in bounds when
         // properly wrapped. Complete the load or store with the wrapped
         // address.
-        SharedMem<uint8_t*> wrappedAddress = instance.heap() + wrappedOffset;
-        MOZ_RELEASE_ASSERT(wrappedAddress >= instance.heap());
+        SharedMem<uint8_t*> wrappedAddress = instance.memoryBase() + wrappedOffset;
+        MOZ_RELEASE_ASSERT(wrappedAddress >= instance.memoryBase());
         MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
-        MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.heap() + instance.heapLength());
+        MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.memoryBase() + instance.memoryLength());
         switch (access.kind()) {
           case Disassembler::HeapAccess::Load:
             SetRegisterToLoadedValue(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
             break;
           case Disassembler::HeapAccess::LoadSext32:
             SetRegisterToLoadedValueSext32(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
             break;
           case Disassembler::HeapAccess::Store:
@@ -742,21 +742,21 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
 
 MOZ_COLD static bool
 IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
 {
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
     size_t accessLimit = MappedSize;
 #elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
-    size_t accessLimit = instance.heapLength();
+    size_t accessLimit = instance.memoryLength();
 #endif
-    return instance.metadata().usesHeap() &&
-           faultingAddress >= instance.heap() &&
-           faultingAddress < instance.heap() + accessLimit;
+    return instance.metadata().usesMemory() &&
+           faultingAddress >= instance.memoryBase() &&
+           faultingAddress < instance.memoryBase() + accessLimit;
 }
 
 #if defined(XP_WIN)
 
 static bool
 HandleFault(PEXCEPTION_POINTERS exception)
 {
     EXCEPTION_RECORD* record = exception->ExceptionRecord;
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -601,17 +601,17 @@ class MemoryAccess
 
   public:
     MemoryAccess() = default;
 
     explicit MemoryAccess(uint32_t nextInsOffset)
       : nextInsOffset_(nextInsOffset)
     { }
 
-    void* patchHeapPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
+    void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
     void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
 };
 #elif defined(JS_CODEGEN_X64)
 class MemoryAccess
 {
     uint32_t insnOffset_;
     uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
     bool throwOnOOB_;                     // should we throw on OOB?
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -528,18 +528,18 @@ js::atomics_isLockFree(JSContext* cx, un
 // simulator build with ARMHWCAP=vfp set.  Do not set any other flags; other
 // vfp/neon flags force ARMv7 to be set.
 
 static void
 GetCurrentAsmJSHeap(SharedMem<void*>* heap, size_t* length)
 {
     JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
     wasm::Instance& instance = rt->wasmActivationStack()->instance();
-    *heap = instance.heap().cast<void*>();
-    *length = instance.heapLength();
+    *heap = instance.memoryBase().cast<void*>();
+    *length = instance.memoryLength();
 }
 
 int32_t
 js::atomics_add_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
     SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);