Bug 1359027 - Split tier-invariant and tier-variant parts of wasm LinkData apart. r=luke
authorLars T Hansen <lhansen@mozilla.com>
Fri, 19 May 2017 10:37:06 +0200
changeset 408086 b3e049afffcc2f87cfea98c3a197cade1a07b4fa
parent 408085 22e4abf1e4e7e330fc9eb855f5a2e4ace44774f3
child 408087 b644b0a89fab8cd70fbe022c77d7923c1eb142b1
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1359027
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1359027 - Split tier-invariant and tier-variant parts of wasm LinkData apart. r=luke
js/src/wasm/WasmCode.cpp
js/src/wasm/WasmCode.h
js/src/wasm/WasmDebug.h
js/src/wasm/WasmGenerator.cpp
js/src/wasm/WasmGenerator.h
js/src/wasm/WasmModule.cpp
js/src/wasm/WasmModule.h
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -81,19 +81,19 @@ CodeSegment::FreeCode::operator()(uint8_
 
 #ifdef MOZ_VTUNE
     vtune::UnmarkBytes(bytes, codeLength);
 #endif
     DeallocateExecutableMemory(bytes, codeLength);
 }
 
 static bool
-StaticallyLink(const CodeSegment& cs, const LinkData& linkData)
+StaticallyLink(const CodeSegment& cs, const LinkDataTier& linkData)
 {
-    for (LinkData::InternalLink link : linkData.internalLinks) {
+    for (LinkDataTier::InternalLink link : linkData.internalLinks) {
         uint8_t* patchAt = cs.base() + link.patchAtOffset;
         void* target = cs.base() + link.targetOffset;
         if (link.isRawPointerPatch())
             *(void**)(patchAt) = target;
         else
             Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
     }
 
@@ -113,19 +113,19 @@ StaticallyLink(const CodeSegment& cs, co
                                                PatchedImmPtr((void*)-1));
         }
     }
 
     return true;
 }
 
 static void
-StaticallyUnlink(uint8_t* base, const LinkData& linkData)
+StaticallyUnlink(uint8_t* base, const LinkDataTier& linkData)
 {
-    for (LinkData::InternalLink link : linkData.internalLinks) {
+    for (LinkDataTier::InternalLink link : linkData.internalLinks) {
         uint8_t* patchAt = base + link.patchAtOffset;
         void* target = 0;
         if (link.isRawPointerPatch())
             *(void**)(patchAt) = target;
         else
             Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
     }
 
@@ -190,17 +190,17 @@ SendCodeRangesToProfiler(const CodeSegme
     }
 
     return;
 }
 
 /* static */ UniqueConstCodeSegment
 CodeSegment::create(MacroAssembler& masm,
                     const ShareableBytes& bytecode,
-                    const LinkData& linkData,
+                    const LinkDataTier& linkData,
                     const Metadata& metadata)
 {
     // Round up the code size to page size since this is eventually required by
     // the executable-code allocator and for setting memory protection.
     uint32_t bytesNeeded = masm.bytesNeeded();
     uint32_t padding = ComputeByteAlignment(bytesNeeded, gc::SystemPageSize());
     uint32_t codeLength = bytesNeeded + padding;
 
@@ -217,17 +217,17 @@ CodeSegment::create(MacroAssembler& masm
     memset(codeBytes.get() + bytesNeeded, 0, padding);
 
     return create(Move(codeBytes), codeLength, bytecode, linkData, metadata);
 }
 
 /* static */ UniqueConstCodeSegment
 CodeSegment::create(const Bytes& unlinkedBytes,
                     const ShareableBytes& bytecode,
-                    const LinkData& linkData,
+                    const LinkDataTier& linkData,
                     const Metadata& metadata)
 {
     // The unlinked bytes are a snapshot of the MacroAssembler's contents so
     // round up just like in the MacroAssembler overload above.
     uint32_t padding = ComputeByteAlignment(unlinkedBytes.length(), gc::SystemPageSize());
     uint32_t codeLength = unlinkedBytes.length() + padding;
 
     UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
@@ -239,17 +239,17 @@ CodeSegment::create(const Bytes& unlinke
 
     return create(Move(codeBytes), codeLength, bytecode, linkData, metadata);
 }
 
 /* static */ UniqueConstCodeSegment
 CodeSegment::create(UniqueCodeBytes codeBytes,
                     uint32_t codeLength,
                     const ShareableBytes& bytecode,
-                    const LinkData& linkData,
+                    const LinkDataTier& linkData,
                     const Metadata& metadata)
 {
     // These should always exist and should never be first in the code segment.
     MOZ_ASSERT(linkData.interruptOffset != 0);
     MOZ_ASSERT(linkData.outOfBoundsOffset != 0);
     MOZ_ASSERT(linkData.unalignedAccessOffset != 0);
 
     auto cs = js::MakeUnique<CodeSegment>();
@@ -261,17 +261,17 @@ CodeSegment::create(UniqueCodeBytes code
 
     return UniqueConstCodeSegment(cs.release());
 }
 
 bool
 CodeSegment::initialize(UniqueCodeBytes codeBytes,
                         uint32_t codeLength,
                         const ShareableBytes& bytecode,
-                        const LinkData& linkData,
+                        const LinkDataTier& linkData,
                         const Metadata& metadata)
 {
     MOZ_ASSERT(bytes_ == nullptr);
 
     bytes_ = Move(codeBytes);
     functionLength_ = linkData.functionCodeLength;
     length_ = codeLength;
     interruptCode_ = bytes_.get() + linkData.interruptOffset;
@@ -301,28 +301,28 @@ CodeSegment::serializedSize() const
 void
 CodeSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const
 {
     *data += mallocSizeOf(this);
     *code += RoundupCodeLength(length_);
 }
 
 uint8_t*
-CodeSegment::serialize(uint8_t* cursor, const LinkData& linkData) const
+CodeSegment::serialize(uint8_t* cursor, const LinkDataTier& linkData) const
 {
     cursor = WriteScalar<uint32_t>(cursor, length_);
     uint8_t* base = cursor;
     cursor = WriteBytes(cursor, bytes_.get(), length_);
     StaticallyUnlink(base, linkData);
     return cursor;
 }
 
 const uint8_t*
 CodeSegment::deserialize(const uint8_t* cursor, const ShareableBytes& bytecode,
-                         const LinkData& linkData, const Metadata& metadata)
+                         const LinkDataTier& linkData, const Metadata& metadata)
 {
     uint32_t length;
     cursor = ReadScalar<uint32_t>(cursor, &length);
     if (!cursor)
         return nullptr;
 
     MOZ_ASSERT(length_ % gc::SystemPageSize() == 0);
     UniqueCodeBytes bytes = AllocateCodeBytes(length);
@@ -627,17 +627,17 @@ Code::serializedSize() const
 }
 
 uint8_t*
 Code::serialize(uint8_t* cursor, const LinkData& linkData) const
 {
     MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
 
     cursor = metadata().serialize(cursor);
-    cursor = segmentTier().serialize(cursor, linkData);
+    cursor = segmentTier().serialize(cursor, linkData.tier());
     return cursor;
 }
 
 const uint8_t*
 Code::deserialize(const uint8_t* cursor, const SharedBytes& bytecode, const LinkData& linkData,
                   Metadata* maybeMetadata)
 {
     MutableMetadata metadata;
@@ -656,17 +656,17 @@ Code::deserialize(const uint8_t* cursor,
     cursor = metadata->deserialize(cursor);
     if (!cursor)
         return nullptr;
 
     UniqueCodeSegment codeSegment = js::MakeUnique<CodeSegment>();
     if (!codeSegment)
         return nullptr;
 
-    cursor = codeSegment->deserialize(cursor, *bytecode, linkData, *metadata);
+    cursor = codeSegment->deserialize(cursor, *bytecode, linkData.tier(), *metadata);
     if (!cursor)
         return nullptr;
 
     tier_ = UniqueConstCodeSegment(codeSegment.release());
     metadata_ = metadata;
 
     return cursor;
 }
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -26,16 +26,17 @@
 namespace js {
 
 struct AsmJSMetadata;
 class WasmInstanceObject;
 
 namespace wasm {
 
 struct LinkData;
+struct LinkDataTier;
 struct Metadata;
 struct MetadataTier;
 class FrameIterator;
 
 // ShareableBytes is a reference-counted Vector of bytes.
 
 struct ShareableBytes : ShareableBase<ShareableBytes>
 {
@@ -80,44 +81,44 @@ class CodeSegment
     // signal-handler control-flow transfer.
     uint8_t* interruptCode_;
     uint8_t* outOfBoundsCode_;
     uint8_t* unalignedAccessCode_;
 
     bool initialize(UniqueCodeBytes bytes,
                     uint32_t codeLength,
                     const ShareableBytes& bytecode,
-                    const LinkData& linkData,
+                    const LinkDataTier& linkData,
                     const Metadata& metadata);
 
     static UniqueConstCodeSegment create(UniqueCodeBytes bytes,
                                          uint32_t codeLength,
                                          const ShareableBytes& bytecode,
-                                         const LinkData& linkData,
+                                         const LinkDataTier& linkData,
                                          const Metadata& metadata);
   public:
     CodeSegment(const CodeSegment&) = delete;
     void operator=(const CodeSegment&) = delete;
 
     CodeSegment()
       : functionLength_(0),
         length_(0),
         interruptCode_(nullptr),
         outOfBoundsCode_(nullptr),
         unalignedAccessCode_(nullptr)
     {}
 
     static UniqueConstCodeSegment create(jit::MacroAssembler& masm,
                                          const ShareableBytes& bytecode,
-                                         const LinkData& linkData,
+                                         const LinkDataTier& linkData,
                                          const Metadata& metadata);
 
     static UniqueConstCodeSegment create(const Bytes& unlinkedBytes,
                                          const ShareableBytes& bytecode,
-                                         const LinkData& linkData,
+                                         const LinkDataTier& linkData,
                                          const Metadata& metadata);
 
     uint8_t* base() const { return bytes_.get(); }
     uint32_t length() const { return length_; }
 
     uint8_t* interruptCode() const { return interruptCode_; }
     uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; }
     uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; }
@@ -133,19 +134,19 @@ class CodeSegment
     }
     bool containsCodePC(const void* pc) const {
         return pc >= base() && pc < (base() + length_);
     }
 
     // Structured clone support:
 
     size_t serializedSize() const;
-    uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
+    uint8_t* serialize(uint8_t* cursor, const LinkDataTier& linkData) const;
     const uint8_t* deserialize(const uint8_t* cursor, const ShareableBytes& bytecode,
-                               const LinkData& linkData, const Metadata& metadata);
+                               const LinkDataTier& linkData, const Metadata& metadata);
 
     void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data) const;
 };
 
 // A FuncExport represents a single function definition inside a wasm Module
 // that has been exported one or more times. A FuncExport represents an
 // internal entry point that can be called via function definition index by
 // Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
@@ -317,23 +318,25 @@ typedef Vector<ExprType, 0, SystemAllocP
 // subsystem subclasses the Metadata, adding more tier-invariant data, some of
 // which is serialized.  See AsmJS.cpp.
 
 struct MetadataCacheablePod
 {
     ModuleKind            kind;
     MemoryUsage           memoryUsage;
     uint32_t              minMemoryLength;
+    uint32_t              globalDataLength;
     Maybe<uint32_t>       maxMemoryLength;
     Maybe<uint32_t>       startFuncIndex;
 
     explicit MetadataCacheablePod(ModuleKind kind)
       : kind(kind),
         memoryUsage(MemoryUsage::None),
-        minMemoryLength(0)
+        minMemoryLength(0),
+        globalDataLength(0)
     {}
 };
 
 typedef uint8_t ModuleHash[8];
 
 struct MetadataTier
 {
     MemoryAccessVector    memoryAccesses;
--- a/js/src/wasm/WasmDebug.h
+++ b/js/src/wasm/WasmDebug.h
@@ -29,16 +29,17 @@ class Debugger;
 class WasmActivation;
 class WasmBreakpoint;
 class WasmBreakpointSite;
 class WasmInstanceObject;
 
 namespace wasm {
 
 struct LinkData;
+struct LinkDataTier;
 struct MetadataTier;
 class FrameIterator;
 
 // The generated source location for the AST node/expression. The offset field refers
 // an offset in an binary format file.
 
 struct ExprLoc
 {
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -43,16 +43,18 @@ using mozilla::MakeEnumeratedRange;
 
 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
 static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
 
 ModuleGenerator::ModuleGenerator(UniqueChars* error)
   : compileMode_(CompileMode(-1)),
     error_(error),
+    linkDataTier_(nullptr),
+    metadataTier_(nullptr),
     numSigs_(0),
     numTables_(0),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
     masmAlloc_(&lifo_),
     masm_(MacroAssembler::WasmToken(), masmAlloc_),
     lastPatchedCallsite_(0),
     startOfUnpatchedCallsites_(0),
     parallel_(false),
@@ -104,16 +106,20 @@ ModuleGenerator::~ModuleGenerator()
     MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
 }
 
 bool
 ModuleGenerator::initAsmJS(Metadata* asmJSMetadata)
 {
     MOZ_ASSERT(env_->isAsmJS());
 
+    if (!linkData_.initTier())
+        return false;
+    linkDataTier_ = &linkData_.tier();
+
     metadataTier_ = &asmJSMetadata->tier();
     metadata_ = asmJSMetadata;
     MOZ_ASSERT(isAsmJS());
 
     // Enabling debugging requires baseline and baseline is only enabled for
     // wasm (since the baseline does not currently support Atomics or SIMD).
 
     metadata_->debugEnabled = false;
@@ -130,16 +136,20 @@ ModuleGenerator::initAsmJS(Metadata* asm
     return true;
 }
 
 bool
 ModuleGenerator::initWasm(const CompileArgs& args)
 {
     MOZ_ASSERT(!env_->isAsmJS());
 
+    if (!linkData_.initTier())
+        return false;
+    linkDataTier_ = &linkData_.tier();
+
     auto metadataTier = js::MakeUnique<MetadataTier>();
     if (!metadataTier)
         return false;
 
     metadata_ = js_new<Metadata>(Move(metadataTier));
     if (!metadata_)
         return false;
 
@@ -154,18 +164,18 @@ ModuleGenerator::initWasm(const CompileA
                    : CompileMode::Ion;
 
     // For wasm, the Vectors are correctly-sized and already initialized.
 
     numSigs_ = env_->sigs.length();
     numTables_ = env_->tables.length();
 
     for (size_t i = 0; i < env_->funcImportGlobalDataOffsets.length(); i++) {
-        env_->funcImportGlobalDataOffsets[i] = linkData_.globalDataLength;
-        linkData_.globalDataLength += sizeof(FuncImportTls);
+        env_->funcImportGlobalDataOffsets[i] = metadata_->globalDataLength;
+        metadata_->globalDataLength += sizeof(FuncImportTls);
         if (!addFuncImport(*env_->funcSigs[i], env_->funcImportGlobalDataOffsets[i]))
             return false;
     }
 
     for (TableDesc& table : env_->tables) {
         if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*), &table.globalDataOffset))
             return false;
     }
@@ -226,18 +236,16 @@ ModuleGenerator::initWasm(const CompileA
 }
 
 bool
 ModuleGenerator::init(UniqueModuleEnvironment env, const CompileArgs& args,
                       Metadata* maybeAsmJSMetadata)
 {
     env_ = Move(env);
 
-    linkData_.globalDataLength = 0;
-
     if (!funcToCodeRange_.appendN(BAD_CODE_RANGE, env_->funcSigs.length()))
         return false;
 
     if (!assumptions_.clone(args.assumptions))
         return false;
 
     if (!exportedFuncs_.init())
         return false;
@@ -633,19 +641,19 @@ ModuleGenerator::finishCodegen()
         return false;
 
     debugTrapStub.offsetBy(offsetInWhole);
     if (!metadataTier_->codeRanges.emplaceBack(CodeRange::DebugTrap, debugTrapStub))
         return false;
 
     // Fill in LinkData with the offsets of these stubs.
 
-    linkData_.unalignedAccessOffset = unalignedAccessExit.begin;
-    linkData_.outOfBoundsOffset = outOfBoundsExit.begin;
-    linkData_.interruptOffset = interruptExit.begin;
+    linkDataTier_->unalignedAccessOffset = unalignedAccessExit.begin;
+    linkDataTier_->outOfBoundsOffset = outOfBoundsExit.begin;
+    linkDataTier_->interruptOffset = interruptExit.begin;
 
     // Now that all other code has been emitted, patch all remaining callsites
     // then far jumps. Patching callsites can generate far jumps so there is an
     // ordering dependency.
 
     if (!patchCallSites())
         return false;
 
@@ -658,36 +666,36 @@ ModuleGenerator::finishCodegen()
     return !masm_.oom();
 }
 
 bool
 ModuleGenerator::finishLinkData()
 {
     // Inflate the global bytes up to page size so that the total bytes are a
     // page size (as required by the allocator functions).
-    linkData_.globalDataLength = AlignBytes(linkData_.globalDataLength, gc::SystemPageSize());
+    metadata_->globalDataLength = AlignBytes(metadata_->globalDataLength, gc::SystemPageSize());
 
     // Add links to absolute addresses identified symbolically.
     for (size_t i = 0; i < masm_.numSymbolicAccesses(); i++) {
         SymbolicAccess src = masm_.symbolicAccess(i);
-        if (!linkData_.symbolicLinks[src.target].append(src.patchAt.offset()))
+        if (!linkDataTier_->symbolicLinks[src.target].append(src.patchAt.offset()))
             return false;
     }
 
     // Relative link metadata: absolute addresses that refer to another point within
     // the asm.js module.
 
     // CodeLabels are used for switch cases and loads from floating-point /
     // SIMD values in the constant pool.
     for (size_t i = 0; i < masm_.numCodeLabels(); i++) {
         CodeLabel cl = masm_.codeLabel(i);
-        LinkData::InternalLink inLink(LinkData::InternalLink::CodeLabel);
+        LinkDataTier::InternalLink inLink(LinkDataTier::InternalLink::CodeLabel);
         inLink.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt());
         inLink.targetOffset = cl.target()->offset();
-        if (!linkData_.internalLinks.append(inLink))
+        if (!linkDataTier_->internalLinks.append(inLink))
             return false;
     }
 
     return true;
 }
 
 bool
 ModuleGenerator::addFuncImport(const Sig& sig, uint32_t globalDataOffset)
@@ -699,29 +707,29 @@ ModuleGenerator::addFuncImport(const Sig
         return false;
 
     return metadataTier_->funcImports.emplaceBack(Move(copy), globalDataOffset);
 }
 
 bool
 ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
 {
-    CheckedInt<uint32_t> newGlobalDataLength(linkData_.globalDataLength);
+    CheckedInt<uint32_t> newGlobalDataLength(metadata_->globalDataLength);
 
     newGlobalDataLength += ComputeByteAlignment(newGlobalDataLength.value(), align);
     if (!newGlobalDataLength.isValid())
         return false;
 
     *globalDataOffset = newGlobalDataLength.value();
     newGlobalDataLength += bytes;
 
     if (!newGlobalDataLength.isValid())
         return false;
 
-    linkData_.globalDataLength = newGlobalDataLength.value();
+    metadata_->globalDataLength = newGlobalDataLength.value();
     return true;
 }
 
 bool
 ModuleGenerator::allocateGlobal(GlobalDesc* global)
 {
     MOZ_ASSERT(!startedFuncDefs_);
     unsigned width = 0;
@@ -1002,17 +1010,17 @@ ModuleGenerator::finishFuncDefs()
     if (currentTask_ && !launchBatchCompile())
         return false;
 
     while (outstanding_ > 0) {
         if (!finishOutstandingTask())
             return false;
     }
 
-    linkData_.functionCodeLength = masm_.size();
+    linkDataTier_->functionCodeLength = masm_.size();
     finishedFuncDefs_ = true;
 
     // Generate wrapper functions for every import. These wrappers turn imports
     // into plain functions so they can be put into tables and re-exported.
     // asm.js cannot do either and so no wrappers are generated.
 
     if (!isAsmJS()) {
         for (size_t funcIndex = 0; funcIndex < numFuncImports(); funcIndex++) {
@@ -1185,17 +1193,17 @@ ModuleGenerator::finish(const ShareableB
     }
 #endif
 
     if (!finishLinkData())
         return nullptr;
 
     generateBytecodeHash(bytecode);
 
-    UniqueConstCodeSegment codeSegment = CodeSegment::create(masm_, bytecode, linkData_, *metadata_);
+    UniqueConstCodeSegment codeSegment = CodeSegment::create(masm_, bytecode, *linkDataTier_, *metadata_);
     if (!codeSegment)
         return nullptr;
 
     UniqueConstBytes maybeDebuggingBytes;
     if (metadata_->debugEnabled) {
         Bytes bytes;
         if (!bytes.resize(masm_.bytesNeeded()))
             return nullptr;
--- a/js/src/wasm/WasmGenerator.h
+++ b/js/src/wasm/WasmGenerator.h
@@ -215,16 +215,17 @@ class MOZ_STACK_CLASS ModuleGenerator
     typedef EnumeratedArray<Trap, Trap::Limit, CallableOffsets> TrapExitOffsetArray;
 
     // Constant parameters
     CompileMode                     compileMode_;
     UniqueChars*                    error_;
 
     // Data that is moved into the result of finish()
     Assumptions                     assumptions_;
+    LinkDataTier*                   linkDataTier_; // Owned by linkData_
     LinkData                        linkData_;
     MetadataTier*                   metadataTier_; // Owned by metadata_
     MutableMetadata                 metadata_;
 
     // Data scoped to the ModuleGenerator's lifetime
     UniqueModuleEnvironment         env_;
     uint32_t                        numSigs_;
     uint32_t                        numTables_;
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -37,111 +37,145 @@ using namespace js::wasm;
 
 using mozilla::IsNaN;
 
 const char wasm::InstanceExportField[] = "exports";
 
 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
 // On MIPS, CodeLabels are instruction immediates so InternalLinks only
 // patch instruction immediates.
-LinkData::InternalLink::InternalLink(Kind kind)
+LinkDataTier::InternalLink::InternalLink(Kind kind)
 {
     MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
 }
 
 bool
-LinkData::InternalLink::isRawPointerPatch()
+LinkDataTier::InternalLink::isRawPointerPatch()
 {
     return false;
 }
 #else
 // On the rest, CodeLabels are raw pointers so InternalLinks only patch
 // raw pointers.
-LinkData::InternalLink::InternalLink(Kind kind)
+LinkDataTier::InternalLink::InternalLink(Kind kind)
 {
     MOZ_ASSERT(kind == CodeLabel || kind == RawPointer);
 }
 
 bool
-LinkData::InternalLink::isRawPointerPatch()
+LinkDataTier::InternalLink::isRawPointerPatch()
 {
     return true;
 }
 #endif
 
 size_t
-LinkData::SymbolicLinkArray::serializedSize() const
+LinkDataTier::SymbolicLinkArray::serializedSize() const
 {
     size_t size = 0;
     for (const Uint32Vector& offsets : *this)
         size += SerializedPodVectorSize(offsets);
     return size;
 }
 
 uint8_t*
-LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const
+LinkDataTier::SymbolicLinkArray::serialize(uint8_t* cursor) const
 {
     for (const Uint32Vector& offsets : *this)
         cursor = SerializePodVector(cursor, offsets);
     return cursor;
 }
 
 const uint8_t*
-LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor)
+LinkDataTier::SymbolicLinkArray::deserialize(const uint8_t* cursor)
 {
     for (Uint32Vector& offsets : *this) {
         cursor = DeserializePodVector(cursor, &offsets);
         if (!cursor)
             return nullptr;
     }
     return cursor;
 }
 
 size_t
-LinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+LinkDataTier::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     size_t size = 0;
     for (const Uint32Vector& offsets : *this)
         size += offsets.sizeOfExcludingThis(mallocSizeOf);
     return size;
 }
 
 size_t
-LinkData::serializedSize() const
+LinkDataTier::serializedSize() const
 {
     return sizeof(pod()) +
            SerializedPodVectorSize(internalLinks) +
            symbolicLinks.serializedSize();
 }
 
 uint8_t*
-LinkData::serialize(uint8_t* cursor) const
+LinkDataTier::serialize(uint8_t* cursor) const
 {
     cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
     cursor = SerializePodVector(cursor, internalLinks);
     cursor = symbolicLinks.serialize(cursor);
     return cursor;
 }
 
 const uint8_t*
-LinkData::deserialize(const uint8_t* cursor)
+LinkDataTier::deserialize(const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
     (cursor = DeserializePodVector(cursor, &internalLinks)) &&
     (cursor = symbolicLinks.deserialize(cursor));
     return cursor;
 }
 
 size_t
-LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+LinkDataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return internalLinks.sizeOfExcludingThis(mallocSizeOf) +
            symbolicLinks.sizeOfExcludingThis(mallocSizeOf);
 }
 
+bool
+LinkData::initTier()
+{
+    MOZ_ASSERT(!tier_);
+    tier_ = js::MakeUnique<LinkDataTier>();
+    return tier_ != nullptr;
+}
+
+size_t
+LinkData::serializedSize() const
+{
+    return tier_->serializedSize();
+}
+
+uint8_t*
+LinkData::serialize(uint8_t* cursor) const
+{
+    cursor = tier_->serialize(cursor);
+    return cursor;
+}
+
+const uint8_t*
+LinkData::deserialize(const uint8_t* cursor)
+{
+    (cursor = tier_->deserialize(cursor));
+    return cursor;
+}
+
+size_t
+LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return tier_->sizeOfExcludingThis(mallocSizeOf);
+}
+
 /* virtual */ void
 Module::serializedSize(size_t* maybeBytecodeSize, size_t* maybeCompiledSize) const
 {
     if (maybeBytecodeSize)
         *maybeBytecodeSize = bytecode_->bytes.length();
 
     // The compiled debug code must not be saved, set compiled size to 0,
     // so Module::assumptionsMatch will return false during assumptions
@@ -220,16 +254,19 @@ Module::deserialize(const uint8_t* bytec
     memcpy(bytecode->bytes.begin(), bytecodeBegin, bytecodeSize);
 
     Assumptions assumptions;
     const uint8_t* cursor = assumptions.deserialize(compiledBegin, compiledSize);
     if (!cursor)
         return nullptr;
 
     LinkData linkData;
+    if (!linkData.initTier())
+        return nullptr;
+
     cursor = linkData.deserialize(cursor);
     if (!cursor)
         return nullptr;
 
     ImportVector imports;
     cursor = DeserializeVector(cursor, &imports);
     if (!cursor)
         return nullptr;
@@ -864,30 +901,30 @@ Module::instantiate(JSContext* cx,
     if (!instantiateMemory(cx, &memory))
         return false;
 
     RootedWasmTableObject table(cx, tableImport);
     SharedTableVector tables;
     if (!instantiateTable(cx, &table, &tables))
         return false;
 
-    auto globalSegment = GlobalSegment::create(linkData_.globalDataLength);
+    auto globalSegment = GlobalSegment::create(metadata().globalDataLength);
     if (!globalSegment)
         return false;
 
     SharedCode code(code_);
 
     if (metadata().debugEnabled) {
         // The first time through, use the pre-linked code in the module but
         // mark it as busy. Subsequently, instantiate the copy of the code
         // bytes that we keep around for debugging instead, because the debugger
         // may patch the pre-linked code at any time.
         if (!codeIsBusy_.compareExchange(false, true)) {
             auto codeSegment = CodeSegment::create(*unlinkedCodeForDebugging_, *bytecode_,
-                                                   linkData_, metadata());
+                                                   linkData_.tier(), metadata());
             if (!codeSegment)
                 return false;
 
             code = js_new<Code>(Move(codeSegment), metadata());
             if (!code)
                 return false;
         }
     }
--- a/js/src/wasm/WasmModule.h
+++ b/js/src/wasm/WasmModule.h
@@ -28,31 +28,30 @@ namespace js {
 namespace wasm {
 
 // LinkData contains all the metadata necessary to patch all the locations
 // that depend on the absolute address of a CodeSegment.
 //
 // LinkData is built incrementing by ModuleGenerator and then stored immutably
 // in Module.
 
-struct LinkDataCacheablePod
+struct LinkDataTierCacheablePod
 {
     uint32_t functionCodeLength;
-    uint32_t globalDataLength;
     uint32_t interruptOffset;
     uint32_t outOfBoundsOffset;
     uint32_t unalignedAccessOffset;
 
-    LinkDataCacheablePod() { mozilla::PodZero(this); }
+    LinkDataTierCacheablePod() { mozilla::PodZero(this); }
 };
 
-struct LinkData : LinkDataCacheablePod
+struct LinkDataTier : LinkDataTierCacheablePod
 {
-    LinkDataCacheablePod& pod() { return *this; }
-    const LinkDataCacheablePod& pod() const { return *this; }
+    LinkDataTierCacheablePod& pod() { return *this; }
+    const LinkDataTierCacheablePod& pod() const { return *this; }
 
     struct InternalLink {
         enum Kind {
             RawPointer,
             CodeLabel,
             InstructionImmediate
         };
         MOZ_INIT_OUTSIDE_CTOR uint32_t patchAtOffset;
@@ -69,18 +68,34 @@ struct LinkData : LinkDataCacheablePod
     };
 
     InternalLinkVector  internalLinks;
     SymbolicLinkArray   symbolicLinks;
 
     WASM_DECLARE_SERIALIZABLE(LinkData)
 };
 
-typedef UniquePtr<LinkData> UniqueLinkData;
-typedef UniquePtr<const LinkData> UniqueConstLinkData;
+typedef UniquePtr<LinkDataTier> UniqueLinkDataTier;
+
+struct LinkData
+{
+    // `tier_` and the means of accessing it will become more complicated once
+    // tiering is implemented.
+    UniqueLinkDataTier tier_;
+
+    LinkData() : tier_(nullptr) {}
+
+    // Construct the tier_ object.
+    bool initTier();
+
+    const LinkDataTier& tier() const { MOZ_ASSERT(tier_); return *tier_; }
+    LinkDataTier& tier() { MOZ_ASSERT(tier_); return *tier_; }
+
+    WASM_DECLARE_SERIALIZABLE(LinkData)
+};
 
 // Module represents a compiled wasm module and primarily provides two
 // operations: instantiation and serialization. A Module can be instantiated any
 // number of times to produce new Instance objects. A Module can be serialized
 // any number of times such that the serialized bytes can be deserialized later
 // to produce a new, equivalent Module.
 //
 // Fully linked-and-instantiated code (represented by Code and its owned