Bug 1276028 - Baldr: split out CodeSegment and Metadata from Module (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Mon, 06 Jun 2016 17:21:31 -0500
changeset 340808 a35b9743df321f79b1d669517fae209b46eb0cd7
parent 340807 525966b4be239563d28f15d5b261ea31ffbc1f43
child 340809 d6aea1ba992f47371228e66b7774761cb5326bae
push id6389
push userraliiev@mozilla.com
push dateMon, 19 Sep 2016 13:38:22 +0000
treeherdermozilla-beta@01d67bfe6c81 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1276028
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1276028 - Baldr: split out CodeSegment and Metadata from Module (r=bbouvier) MozReview-Commit-ID: AnITPZYpgp1
js/src/asmjs/AsmJS.cpp
js/src/asmjs/Wasm.cpp
js/src/asmjs/WasmCode.cpp
js/src/asmjs/WasmCode.h
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmGenerator.h
js/src/asmjs/WasmModule.cpp
js/src/asmjs/WasmModule.h
js/src/asmjs/WasmSerialize.h
js/src/asmjs/WasmTypes.h
js/src/moz.build
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -291,150 +291,160 @@ class AsmJSExport
     }
     uint32_t endOffsetInModule() const {
         return endOffsetInModule_;
     }
 };
 
 typedef Vector<AsmJSExport, 0, SystemAllocPolicy> AsmJSExportVector;
 
-// Holds the trivially-memcpy()able, serializable portion of AsmJSModuleData.
-struct AsmJSModuleCacheablePod
+enum class CacheResult
+{
+    Hit,
+    Miss
+};
+
+// Holds the immutable guts of an AsmJSModule.
+//
+// AsmJSMetadata is built incrementally by ModuleValidator and then shared
+// immutably between AsmJSModules.
+
+struct AsmJSMetadataCacheablePod
 {
     uint32_t                minHeapLength;
     uint32_t                numFFIs;
     uint32_t                srcLength;
     uint32_t                srcLengthWithRightBrace;
+
+    AsmJSMetadataCacheablePod() { PodZero(this); }
 };
 
-// Holds the immutable guts of an AsmJSModule. This struct is mutably built up
-// by ModuleValidator and then handed over to the AsmJSModule constructor in
-// finish().
-struct AsmJSModuleData : AsmJSModuleCacheablePod
+struct AsmJSMetadata : RefCounted<AsmJSMetadata>, AsmJSMetadataCacheablePod
 {
     AsmJSGlobalVector       globals;
     AsmJSImportVector       imports;
     AsmJSExportVector       exports;
     PropertyName*           globalArgumentName;
     PropertyName*           importArgumentName;
     PropertyName*           bufferArgumentName;
 
+    CacheResult             cacheResult;
+
     // These values are not serialized since they are relative to the
     // containing script which can be different between serialization and
     // deserialization contexts. Thus, they must be set explicitly using the
     // ambient Parser/ScriptSource after deserialization. Cloning, however,
     // preserves the same exact parsing context and can copy these values.
     uint32_t                srcStart;
     uint32_t                srcBodyStart;
     bool                    strict;
     ScriptSourceHolder      scriptSource;
 
-    AsmJSModuleData()
+    AsmJSMetadata()
       : globalArgumentName(nullptr),
         importArgumentName(nullptr),
         bufferArgumentName(nullptr),
+        cacheResult(CacheResult::Miss),
         srcStart(0),
         srcBodyStart(0),
         strict(false)
-    {
-        PodZero(&pod());
-    }
-
-    AsmJSModuleCacheablePod& pod() { return *this; }
-    const AsmJSModuleCacheablePod& pod() const { return *this; }
+    {}
+
+    AsmJSMetadataCacheablePod& pod() { return *this; }
+    const AsmJSMetadataCacheablePod& pod() const { return *this; }
 
     void trace(JSTracer* trc) const {
         for (const AsmJSGlobal& global : globals)
             global.trace(trc);
         TraceNameField(trc, &globalArgumentName, "asm.js global argument name");
         TraceNameField(trc, &importArgumentName, "asm.js import argument name");
         TraceNameField(trc, &bufferArgumentName, "asm.js buffer argument name");
     }
 
-    WASM_DECLARE_SERIALIZABLE(AsmJSModuleData)
+    WASM_DECLARE_SERIALIZABLE(AsmJSMetadata)
 };
 
-typedef UniquePtr<AsmJSModuleData> UniqueAsmJSModuleData;
+typedef RefPtr<AsmJSMetadata> MutableAsmJSMetadata;
+typedef RefPtr<const AsmJSMetadata> SharedAsmJSMetadata;
 
 // An AsmJSModule is-a Module with the extra persistent state necessary to
 // represent a compiled asm.js module.
 class js::AsmJSModule final : public Module
 {
-    typedef UniquePtr<const AsmJSModuleData> UniqueConstAsmJSModuleData;
-    typedef UniquePtr<const StaticLinkData> UniqueConstStaticLinkData;
-
-    const UniqueConstStaticLinkData  link_;
-    const UniqueExportMap            exportMap_;
-    const UniqueConstAsmJSModuleData module_;
+    const SharedStaticLinkData staticLinkData_;
+    const SharedExportMap      exportMap_;
+    const SharedAsmJSMetadata  asmJSMetadata_;
 
   public:
-    AsmJSModule(UniqueModuleData base,
-                UniqueStaticLinkData link,
-                UniqueExportMap exportMap,
-                UniqueAsmJSModuleData module)
-      : Module(Move(base)),
-        link_(Move(link)),
-        exportMap_(Move(exportMap)),
-        module_(Move(module))
+    AsmJSModule(UniqueCodeSegment code,
+                const Metadata& metadata,
+                const StaticLinkData& staticLinkData,
+                const ExportMap& exportMap,
+                const AsmJSMetadata& asmJSMetadata)
+      : Module(Move(code), metadata),
+        staticLinkData_(&staticLinkData),
+        exportMap_(&exportMap),
+        asmJSMetadata_(&asmJSMetadata)
     {}
 
     virtual void trace(JSTracer* trc) override {
         Module::trace(trc);
-        module_->trace(trc);
+        asmJSMetadata_->trace(trc);
     }
     virtual void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data) override {
         Module::addSizeOfMisc(mallocSizeOf, code, data);
-        *data += mallocSizeOf(link_.get()) + link_->sizeOfExcludingThis(mallocSizeOf);
+        *data += mallocSizeOf(staticLinkData_.get()) + staticLinkData_->sizeOfExcludingThis(mallocSizeOf);
         *data += mallocSizeOf(exportMap_.get()) + exportMap_->sizeOfExcludingThis(mallocSizeOf);
-        *data += mallocSizeOf(module_.get()) + module_->sizeOfExcludingThis(mallocSizeOf);
+        *data += mallocSizeOf(asmJSMetadata_.get()) + asmJSMetadata_->sizeOfExcludingThis(mallocSizeOf);
     }
     virtual bool mutedErrors() const override {
         return scriptSource()->mutedErrors();
     }
     virtual const char16_t* displayURL() const override {
         return scriptSource()->hasDisplayURL() ? scriptSource()->displayURL() : nullptr;
     }
     virtual ScriptSource* maybeScriptSource() const override {
         return scriptSource();
     }
 
-    uint32_t minHeapLength() const { return module_->minHeapLength; }
-    uint32_t numFFIs() const { return module_->numFFIs; }
-    bool strict() const { return module_->strict; }
-    ScriptSource* scriptSource() const { return module_->scriptSource.get(); }
-    const AsmJSGlobalVector& asmJSGlobals() const { return module_->globals; }
-    const AsmJSImportVector& asmJSImports() const { return module_->imports; }
-    const AsmJSExportVector& asmJSExports() const { return module_->exports; }
-    PropertyName* globalArgumentName() const { return module_->globalArgumentName; }
-    PropertyName* importArgumentName() const { return module_->importArgumentName; }
-    PropertyName* bufferArgumentName() const { return module_->bufferArgumentName; }
+    uint32_t minHeapLength() const { return asmJSMetadata_->minHeapLength; }
+    uint32_t numFFIs() const { return asmJSMetadata_->numFFIs; }
+    bool strict() const { return asmJSMetadata_->strict; }
+    ScriptSource* scriptSource() const { return asmJSMetadata_->scriptSource.get(); }
+    const AsmJSGlobalVector& asmJSGlobals() const { return asmJSMetadata_->globals; }
+    const AsmJSImportVector& asmJSImports() const { return asmJSMetadata_->imports; }
+    const AsmJSExportVector& asmJSExports() const { return asmJSMetadata_->exports; }
+    PropertyName* globalArgumentName() const { return asmJSMetadata_->globalArgumentName; }
+    PropertyName* importArgumentName() const { return asmJSMetadata_->importArgumentName; }
+    PropertyName* bufferArgumentName() const { return asmJSMetadata_->bufferArgumentName; }
+    bool loadedFromCache() const { return asmJSMetadata_->cacheResult == CacheResult::Hit; }
 
     // srcStart() refers to the offset in the ScriptSource to the beginning of
     // the asm.js module function. If the function has been created with the
     // Function constructor, this will be the first character in the function
     // source. Otherwise, it will be the opening parenthesis of the arguments
     // list.
     uint32_t srcStart() const {
-        return module_->srcStart;
+        return asmJSMetadata_->srcStart;
     }
     uint32_t srcEndBeforeCurly() const {
-        return module_->srcStart + module_->srcLength;
+        return asmJSMetadata_->srcStart + asmJSMetadata_->srcLength;
     }
     uint32_t srcEndAfterCurly() const {
-        return module_->srcStart + module_->srcLengthWithRightBrace;
+        return asmJSMetadata_->srcStart + asmJSMetadata_->srcLengthWithRightBrace;
     }
 
     // srcBodyStart() refers to the offset in the ScriptSource to the end
     // of the 'use asm' string-literal token.
     uint32_t srcBodyStart() const {
-        return module_->srcBodyStart;
+        return asmJSMetadata_->srcBodyStart;
     }
 
     bool staticallyLink(ExclusiveContext* cx) {
-        return Module::staticallyLink(cx, *link_);
+        return Module::staticallyLink(cx, *staticLinkData_);
     }
     bool dynamicallyLink(JSContext* cx,
                          Handle<WasmModuleObject*> moduleObj,
                          Handle<ArrayBufferObjectMaybeShared*> heap,
                          Handle<FunctionVector> imports,
                          MutableHandleObject exportObj) {
         return Module::dynamicallyLink(cx, moduleObj, heap, imports, *exportMap_, exportObj);
     }
@@ -1680,17 +1690,17 @@ class MOZ_STACK_CLASS ModuleValidator
     GlobalMap             globalMap_;
     SigMap                sigMap_;
     ImportMap             importMap_;
     ArrayViewVector       arrayViews_;
     bool                  atomicsPresent_;
 
     // State used to build the AsmJSModule in finish():
     ModuleGenerator       mg_;
-    UniqueAsmJSModuleData module_;
+    MutableAsmJSMetadata  asmJSMetadata_;
 
     // Error reporting:
     UniqueChars           errorString_;
     uint32_t              errorOffset_;
     bool                  errorOverRecursed_;
 
     // Helpers:
     bool addStandardLibraryMathName(const char* name, AsmJSMathBuiltinFunction func) {
@@ -1771,25 +1781,25 @@ class MOZ_STACK_CLASS ModuleValidator
                                            JSMSG_USE_ASM_TYPE_FAIL,
                                            errorString_.get());
         }
         if (errorOverRecursed_)
             ReportOverRecursed(cx_);
     }
 
     bool init() {
-        module_ = cx_->make_unique<AsmJSModuleData>();
-        if (!module_)
-            return false;
-
-        module_->minHeapLength = RoundUpToNextValidAsmJSHeapLength(0);
-        module_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
-        module_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
-        module_->strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
-        module_->scriptSource.reset(parser_.ss);
+        asmJSMetadata_ = cx_->new_<AsmJSMetadata>();
+        if (!asmJSMetadata_)
+            return false;
+
+        asmJSMetadata_->minHeapLength = RoundUpToNextValidAsmJSHeapLength(0);
+        asmJSMetadata_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
+        asmJSMetadata_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
+        asmJSMetadata_->strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
+        asmJSMetadata_->scriptSource.reset(parser_.ss);
 
         if (!globalMap_.init() || !sigMap_.init() || !importMap_.init())
             return false;
 
         if (!standardLibraryMathNames_.init() ||
             !addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) ||
             !addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) ||
             !addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) ||
@@ -1866,49 +1876,49 @@ class MOZ_STACK_CLASS ModuleValidator
             filename = DuplicateString(parser_.ss->filename());
             if (!filename)
                 return false;
         }
 
         if (!mg_.init(Move(genData), Move(filename)))
             return false;
 
-        mg_.bumpMinHeapLength(module_->minHeapLength);
+        mg_.bumpMinHeapLength(asmJSMetadata_->minHeapLength);
 
         return true;
     }
 
     ExclusiveContext* cx() const             { return cx_; }
     PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
-    PropertyName* globalArgumentName() const { return module_->globalArgumentName; }
-    PropertyName* importArgumentName() const { return module_->importArgumentName; }
-    PropertyName* bufferArgumentName() const { return module_->bufferArgumentName; }
+    PropertyName* globalArgumentName() const { return asmJSMetadata_->globalArgumentName; }
+    PropertyName* importArgumentName() const { return asmJSMetadata_->importArgumentName; }
+    PropertyName* bufferArgumentName() const { return asmJSMetadata_->bufferArgumentName; }
     ModuleGenerator& mg()                    { return mg_; }
     AsmJSParser& parser() const              { return parser_; }
     TokenStream& tokenStream() const         { return parser_.tokenStream; }
     RootedFunction& dummyFunction()          { return dummyFunction_; }
     bool supportsSimd() const                { return cx_->jitSupportsSimd(); }
     bool atomicsPresent() const              { return atomicsPresent_; }
-    uint32_t minHeapLength() const           { return module_->minHeapLength; }
+    uint32_t minHeapLength() const           { return asmJSMetadata_->minHeapLength; }
 
     void initModuleFunctionName(PropertyName* name) {
         MOZ_ASSERT(!moduleFunctionName_);
         moduleFunctionName_ = name;
     }
     void initGlobalArgumentName(PropertyName* n) {
         MOZ_ASSERT(n->isTenured());
-        module_->globalArgumentName = n;
+        asmJSMetadata_->globalArgumentName = n;
     }
     void initImportArgumentName(PropertyName* n) {
         MOZ_ASSERT(n->isTenured());
-        module_->importArgumentName = n;
+        asmJSMetadata_->importArgumentName = n;
     }
     void initBufferArgumentName(PropertyName* n) {
         MOZ_ASSERT(n->isTenured());
-        module_->bufferArgumentName = n;
+        asmJSMetadata_->bufferArgumentName = n;
     }
     bool addGlobalVarInit(PropertyName* var, const NumLit& lit, Type type, bool isConst)
     {
         MOZ_ASSERT(type.isGlobalVarType());
         MOZ_ASSERT(type == Type::canonicalize(Type::lit(lit)));
 
         uint32_t index;
         if (!mg_.allocateGlobal(type.canonicalToValType(), isConst, &index))
@@ -1924,17 +1934,17 @@ class MOZ_STACK_CLASS ModuleValidator
             global->u.varOrConst.literalValue_ = lit;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::Variable, nullptr);
         g.pod.u.var.initKind_ = AsmJSGlobal::InitConstant;
         g.pod.u.var.u.val_ = lit.value();
         g.pod.u.var.globalDataOffset_ = mg_.global(index).globalDataOffset;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addGlobalVarImport(PropertyName* var, PropertyName* field, Type type, bool isConst) {
         MOZ_ASSERT(type.isGlobalVarType());
 
         uint32_t index;
         ValType valType = type.canonicalToValType();
         if (!mg_.allocateGlobal(valType, isConst, &index))
             return false;
@@ -1947,46 +1957,46 @@ class MOZ_STACK_CLASS ModuleValidator
         global->u.varOrConst.type_ = type.which();
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::Variable, field);
         g.pod.u.var.initKind_ = AsmJSGlobal::InitImport;
         g.pod.u.var.u.importType_ = valType;
         g.pod.u.var.globalDataOffset_ = mg_.global(index).globalDataOffset;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) {
         if (!arrayViews_.append(ArrayView(var, vt)))
             return false;
 
         Global* global = validationLifo_.new_<Global>(Global::ArrayView);
         if (!global)
             return false;
         global->u.viewInfo.viewType_ = vt;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::ArrayView, maybeField);
         g.pod.u.viewType_ = vt;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func,
                                 PropertyName* field)
     {
         Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
         if (!global)
             return false;
         global->u.mathBuiltinFunc_ = func;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::MathBuiltinFunction, field);
         g.pod.u.mathBuiltinFunc_ = func;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
   private:
     bool addGlobalDoubleConstant(PropertyName* var, double constant) {
         Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
         if (!global)
             return false;
         global->u.varOrConst.type_ = Type::Double;
         global->u.varOrConst.literalValue_ = NumLit(NumLit::Double, DoubleValue(constant));
@@ -1995,97 +2005,97 @@ class MOZ_STACK_CLASS ModuleValidator
   public:
     bool addMathBuiltinConstant(PropertyName* var, double constant, PropertyName* field) {
         if (!addGlobalDoubleConstant(var, constant))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::Constant, field);
         g.pod.u.constant.value_ = constant;
         g.pod.u.constant.kind_ = AsmJSGlobal::MathConstant;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) {
         if (!addGlobalDoubleConstant(var, constant))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::Constant, field);
         g.pod.u.constant.value_ = constant;
         g.pod.u.constant.kind_ = AsmJSGlobal::GlobalConstant;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
                                    PropertyName* field)
     {
         atomicsPresent_ = true;
 
         Global* global = validationLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
         if (!global)
             return false;
         global->u.atomicsBuiltinFunc_ = func;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::AtomicsBuiltinFunction, field);
         g.pod.u.atomicsBuiltinFunc_ = func;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
         Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
         if (!global)
             return false;
         global->u.simdCtorType_ = type;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::SimdCtor, field);
         g.pod.u.simdCtorType_ = type;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* opName)
     {
         Global* global = validationLifo_.new_<Global>(Global::SimdOp);
         if (!global)
             return false;
         global->u.simdOp.type_ = type;
         global->u.simdOp.which_ = op;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::SimdOp, opName);
         g.pod.u.simdOp.type_ = type;
         g.pod.u.simdOp.which_ = op;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
         Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
         if (!global)
             return false;
         global->u.viewInfo.viewType_ = vt;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::ArrayViewCtor, field);
         g.pod.u.viewType_ = vt;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addFFI(PropertyName* var, PropertyName* field) {
-        if (module_->numFFIs == UINT32_MAX)
-            return false;
-        uint32_t ffiIndex = module_->numFFIs++;
+        if (asmJSMetadata_->numFFIs == UINT32_MAX)
+            return false;
+        uint32_t ffiIndex = asmJSMetadata_->numFFIs++;
 
         Global* global = validationLifo_.new_<Global>(Global::FFI);
         if (!global)
             return false;
         global->u.ffiIndex_ = ffiIndex;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::FFI, field);
         g.pod.u.ffiIndex_ = ffiIndex;
-        return module_->globals.append(g);
+        return asmJSMetadata_->globals.append(g);
     }
     bool addExportField(ParseNode* pn, const Func& func, PropertyName* maybeFieldName) {
         // Record the field name of this export.
         CacheableChars fieldName;
         if (maybeFieldName)
             fieldName = StringToNewUTF8CharsZ(cx_, *maybeFieldName);
         else
             fieldName = DuplicateString("");
@@ -2095,20 +2105,20 @@ class MOZ_STACK_CLASS ModuleValidator
         // Declare which function is exported which gives us an index into the
         // module ExportVector.
         uint32_t exportIndex;
         if (!mg_.declareExport(Move(fieldName), func.index(), &exportIndex))
             return false;
 
         // The exported function might have already been exported in which case
         // the index will refer into the range of AsmJSExports.
-        MOZ_ASSERT(exportIndex <= module_->exports.length());
-        return exportIndex < module_->exports.length() ||
-               module_->exports.emplaceBack(func.srcBegin() - module_->srcStart,
-                                            func.srcEnd() - module_->srcStart);
+        MOZ_ASSERT(exportIndex <= asmJSMetadata_->exports.length());
+        return exportIndex < asmJSMetadata_->exports.length() ||
+               asmJSMetadata_->exports.emplaceBack(func.srcBegin() - asmJSMetadata_->srcStart,
+                                                   func.srcEnd() - asmJSMetadata_->srcStart);
     }
     bool addFunction(PropertyName* name, uint32_t firstUse, Sig&& sig, Func** func) {
         uint32_t sigIndex;
         if (!declareSig(Move(sig), &sigIndex))
             return false;
         uint32_t funcIndex = numFunctions();
         if (funcIndex >= MaxFuncs)
             return failCurrentOffset("too many functions");
@@ -2150,37 +2160,37 @@ class MOZ_STACK_CLASS ModuleValidator
         return true;
     }
     bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* importIndex) {
         ImportMap::AddPtr p = importMap_.lookupForAdd(NamedSig::Lookup(name, sig));
         if (p) {
             *importIndex = p->value();
             return true;
         }
-        *importIndex = module_->imports.length();
+        *importIndex = asmJSMetadata_->imports.length();
         if (*importIndex >= MaxImports)
             return failCurrentOffset("too many imports");
-        if (!module_->imports.emplaceBack(ffiIndex))
+        if (!asmJSMetadata_->imports.emplaceBack(ffiIndex))
             return false;
         uint32_t sigIndex;
         if (!declareSig(Move(sig), &sigIndex))
             return false;
         if (!mg_.initImport(*importIndex, sigIndex))
             return false;
         return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *importIndex);
     }
 
     bool tryConstantAccess(uint64_t start, uint64_t width) {
         MOZ_ASSERT(UINT64_MAX - start > width);
         uint64_t len = start + width;
         if (len > uint64_t(INT32_MAX) + 1)
             return false;
         len = RoundUpToNextValidAsmJSHeapLength(len);
-        if (len > module_->minHeapLength) {
-            module_->minHeapLength = len;
+        if (len > asmJSMetadata_->minHeapLength) {
+            asmJSMetadata_->minHeapLength = len;
             mg_.bumpMinHeapLength(len);
         }
         return true;
     }
 
     // Error handling.
     bool hasAlreadyFailed() const {
         return !!errorString_;
@@ -2315,34 +2325,41 @@ class MOZ_STACK_CLASS ModuleValidator
         CacheableCharsVector funcNames;
         for (const Func* func : functions_) {
             CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name());
             if (!funcName || !funcNames.emplaceBack(Move(funcName)))
                 return false;
         }
 
         uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
-        module_->srcLength = endBeforeCurly - module_->srcStart;
+        asmJSMetadata_->srcLength = endBeforeCurly - asmJSMetadata_->srcStart;
 
         TokenPos pos;
         JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
         uint32_t endAfterCurly = pos.end;
-        module_->srcLengthWithRightBrace = endAfterCurly - module_->srcStart;
-
-        UniqueModuleData base;
-        UniqueStaticLinkData link;
-        UniqueExportMap exportMap;
-        if (!mg_.finish(Move(funcNames), &base, &link, &exportMap, slowFuncs))
+        asmJSMetadata_->srcLengthWithRightBrace = endAfterCurly - asmJSMetadata_->srcStart;
+
+        UniqueCodeSegment code;
+        SharedMetadata metadata;
+        SharedStaticLinkData staticLinkData;
+        SharedExportMap exportMap;
+        if (!mg_.finish(Move(funcNames), &code, &metadata, &staticLinkData, &exportMap, slowFuncs))
             return false;
 
         moduleObj.set(WasmModuleObject::create(cx_));
         if (!moduleObj)
             return false;
 
-        return moduleObj->init(js_new<AsmJSModule>(Move(base), Move(link), Move(exportMap), Move(module_)));
+        auto* module = js_new<AsmJSModule>(Move(code), *metadata, *staticLinkData, *exportMap,
+                                           *asmJSMetadata_);
+        if (!module)
+            return false;
+
+        moduleObj->init(*module);
+        return true;
     }
 };
 
 /*****************************************************************************/
 // Numeric literal utilities
 
 static bool
 IsNumericNonFloatLiteral(ParseNode* pn)
@@ -8044,189 +8061,168 @@ AsmJSGlobal::serializedSize() const
 const uint8_t*
 AsmJSGlobal::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
     (cursor = DeserializeName(cx, cursor, &name_));
     return cursor;
 }
 
-bool
-AsmJSGlobal::clone(JSContext* cx, AsmJSGlobal* out) const
-{
-    *out = *this;
-    return true;
-}
-
 size_t
-AsmJSModuleData::serializedSize() const
+AsmJSMetadata::serializedSize() const
 {
     return sizeof(pod()) +
            SerializedVectorSize(globals) +
            SerializedPodVectorSize(imports) +
            SerializedPodVectorSize(exports) +
            SerializedNameSize(globalArgumentName) +
            SerializedNameSize(importArgumentName) +
            SerializedNameSize(bufferArgumentName);
 }
 
 uint8_t*
-AsmJSModuleData::serialize(uint8_t* cursor) const
+AsmJSMetadata::serialize(uint8_t* cursor) const
 {
     cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
     cursor = SerializeVector(cursor, globals);
     cursor = SerializePodVector(cursor, imports);
     cursor = SerializePodVector(cursor, exports);
     cursor = SerializeName(cursor, globalArgumentName);
     cursor = SerializeName(cursor, importArgumentName);
     cursor = SerializeName(cursor, bufferArgumentName);
     return cursor;
 }
 
 const uint8_t*
-AsmJSModuleData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+AsmJSMetadata::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
     (cursor = DeserializeVector(cx, cursor, &globals)) &&
     (cursor = DeserializePodVector(cx, cursor, &imports)) &&
     (cursor = DeserializePodVector(cx, cursor, &exports)) &&
     (cursor = DeserializeName(cx, cursor, &globalArgumentName)) &&
     (cursor = DeserializeName(cx, cursor, &importArgumentName)) &&
     (cursor = DeserializeName(cx, cursor, &bufferArgumentName));
+    cacheResult = CacheResult::Hit;
     return cursor;
 }
 
-bool
-AsmJSModuleData::clone(JSContext* cx, AsmJSModuleData* out) const
-{
-    out->pod() = pod();
-    out->globalArgumentName = globalArgumentName;
-    out->importArgumentName = importArgumentName;
-    out->bufferArgumentName = bufferArgumentName;
-    out->srcStart = srcStart;
-    out->srcBodyStart = srcBodyStart;
-    out->strict = strict;
-    out->scriptSource.reset(scriptSource.get());
-    return CloneVector(cx, globals, &out->globals) &&
-           ClonePodVector(cx, imports, &out->imports) &&
-           ClonePodVector(cx, exports, &out->exports);
-}
-
 size_t
-AsmJSModuleData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+AsmJSMetadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return globals.sizeOfExcludingThis(mallocSizeOf) +
            imports.sizeOfExcludingThis(mallocSizeOf) +
            exports.sizeOfExcludingThis(mallocSizeOf);
 }
 
 size_t
 AsmJSModule::serializedSize() const
 {
-    return base().serializedSize() +
-           link_->serializedSize() +
+    return codeSegment().serializedSize() +
+           metadata().serializedSize() +
+           staticLinkData_->serializedSize() +
            exportMap_->serializedSize() +
-           module_->serializedSize();
+           asmJSMetadata_->serializedSize();
 }
 
 uint8_t*
 AsmJSModule::serialize(uint8_t* cursor) const
 {
-    cursor = base().serialize(cursor);
-    cursor = link_->serialize(cursor);
+    cursor = codeSegment().serialize(cursor);
+    cursor = metadata().serialize(cursor);
+    cursor = staticLinkData_->serialize(cursor);
     cursor = exportMap_->serialize(cursor);
-    cursor = module_->serialize(cursor);
+    cursor = asmJSMetadata_->serialize(cursor);
     return cursor;
 }
 
 /* static */ const uint8_t*
 AsmJSModule::deserialize(ExclusiveContext* cx, const uint8_t* cursor, AsmJSParser& parser,
                          MutableHandle<WasmModuleObject*> moduleObj)
 {
     moduleObj.set(WasmModuleObject::create(cx));
     if (!moduleObj)
         return nullptr;
 
     // Deserialization GC-allocates a bunch of atoms and stores them in unrooted
     // Vectors so, for simplicity, inhibit GC of the atoms zone.
     AutoKeepAtoms aka(cx->perThreadData);
 
-    UniqueModuleData base = cx->make_unique<ModuleData>();
-    if (!base)
+    UniqueCodeSegment code = MakeUnique<CodeSegment>();
+    if (!code)
         return nullptr;
-    cursor = base->deserialize(cx, cursor);
+    cursor = code->deserialize(cx, cursor);
     if (!cursor)
         return nullptr;
 
-    MOZ_ASSERT(!base->loadedFromCache);
-    base->loadedFromCache = true;
-
-    UniqueStaticLinkData link = cx->make_unique<StaticLinkData>();
-    if (!link)
+    MutableMetadata metadata = js_new<Metadata>();
+    if (!metadata)
         return nullptr;
-    cursor = link->deserialize(cx, cursor);
+    cursor = metadata->deserialize(cx, cursor);
     if (!cursor)
         return nullptr;
 
-    UniqueExportMap exportMap = cx->make_unique<ExportMap>();
+    MutableStaticLinkData staticLinkData = cx->new_<StaticLinkData>();
+    if (!staticLinkData)
+        return nullptr;
+    cursor = staticLinkData->deserialize(cx, cursor);
+    if (!cursor)
+        return nullptr;
+
+    MutableExportMap exportMap = cx->new_<ExportMap>();
     if (!exportMap)
         return nullptr;
     cursor = exportMap->deserialize(cx, cursor);
     if (!cursor)
         return nullptr;
 
-    UniqueAsmJSModuleData module = cx->make_unique<AsmJSModuleData>();
-    if (!module)
+    MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>();
+    if (!asmJSMetadata)
         return nullptr;
-    cursor = module->deserialize(cx, cursor);
+    cursor = asmJSMetadata->deserialize(cx, cursor);
     if (!cursor)
         return nullptr;
 
-    // See AsmJSModuleData comment as well as ModuleValidator::init().
-    module->srcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
-    module->srcBodyStart = parser.tokenStream.currentToken().pos.end;
-    module->strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict();
-    module->scriptSource.reset(parser.ss);
-
-    if (!moduleObj->init(js_new<AsmJSModule>(Move(base), Move(link), Move(exportMap), Move(module))))
+    // See AsmJSMetadata comment as well as ModuleValidator::init().
+    asmJSMetadata->srcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
+    asmJSMetadata->srcBodyStart = parser.tokenStream.currentToken().pos.end;
+    asmJSMetadata->strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict();
+    asmJSMetadata->scriptSource.reset(parser.ss);
+
+    auto* module = js_new<AsmJSModule>(Move(code), *metadata, *staticLinkData, *exportMap,
+                                       *asmJSMetadata);
+    if (!module)
         return nullptr;
 
+    moduleObj->init(*module);
     return cursor;
 }
 
 bool
 AsmJSModule::clone(JSContext* cx, MutableHandle<WasmModuleObject*> moduleObj) const
 {
     moduleObj.set(WasmModuleObject::create(cx));
     if (!moduleObj)
         return false;
 
     // Prevent any GC that may move the temporarily-unrooted atoms being cloned.
     AutoKeepAtoms aka(cx->perThreadData);
 
-    UniqueModuleData base = cx->make_unique<ModuleData>();
-    if (!base || !this->base().clone(cx, base.get()))
-        return false;
-
-    UniqueStaticLinkData link = cx->make_unique<StaticLinkData>();
-    if (!link || !link_->clone(cx, link.get()))
-        return false;
-
-    UniqueExportMap exportMap = cx->make_unique<ExportMap>();
-    if (!exportMap || !exportMap_->clone(cx, exportMap.get()))
-        return false;
-
-    UniqueAsmJSModuleData module = cx->make_unique<AsmJSModuleData>();
-    if (!module || !module_->clone(cx, module.get()))
-        return false;
-
-    if (!moduleObj->init(js_new<AsmJSModule>(Move(base), Move(link), Move(exportMap), Move(module))))
-        return false;
-
-    return Module::clone(cx, *link_, &moduleObj->module());
+    UniqueCodeSegment code = CodeSegment::clone(cx, codeSegment());
+    if (!code)
+        return false;
+
+    auto* module = js_new<AsmJSModule>(Move(code), metadata(), *staticLinkData_, *exportMap_,
+                                       *asmJSMetadata_);
+    if (!module)
+        return false;
+
+    moduleObj->init(*module);
+
+    return Module::clone(cx, *staticLinkData_, &moduleObj->module());
 }
 
 namespace {
 
 struct PropertyNameWrapper
 {
     PropertyName* name;
 
@@ -8777,17 +8773,17 @@ js::IsAsmJSModuleLoadedFromCache(JSConte
     JSFunction* fun = MaybeWrappedNativeFunction(args.get(0));
     if (!fun || !IsAsmJSModule(fun)) {
         JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_USE_ASM_TYPE_FAIL,
                              "argument passed to isAsmJSModuleLoadedFromCache is not a "
                              "validated asm.js module");
         return false;
     }
 
-    bool loadedFromCache = AsmJSModuleToModuleObject(fun)->module().loadedFromCache();
+    bool loadedFromCache = AsmJSModuleToModuleObject(fun)->module().asAsmJS().loadedFromCache();
 
     args.rval().set(BooleanValue(loadedFromCache));
     return true;
 }
 
 /*****************************************************************************/
 // asm.js toString/toSource support
 
--- a/js/src/asmjs/Wasm.cpp
+++ b/js/src/asmjs/Wasm.cpp
@@ -1097,17 +1097,17 @@ DecodeNameSection(JSContext* cx, Decoder
         return Fail(cx, d, "names section byte size mismatch");
 
     return true;
 }
 
 
 static bool
 DecodeModule(JSContext* cx, UniqueChars file, const uint8_t* bytes, uint32_t length,
-             ImportNameVector* importNames, UniqueExportMap* exportMap,
+             ImportNameVector* importNames, SharedExportMap* exportMap,
              MutableHandle<ArrayBufferObject*> heap, MutableHandle<WasmModuleObject*> moduleObj)
 {
     Decoder d(bytes, bytes + length);
 
     uint32_t u32;
     if (!d.readFixedU32(&u32) || u32 != MagicNumber)
         return Fail(cx, d, "failed to match magic number");
 
@@ -1150,30 +1150,34 @@ DecodeModule(JSContext* cx, UniqueChars 
     if (!DecodeNameSection(cx, d, &funcNames))
         return false;
 
     while (!d.done()) {
         if (!d.skipSection())
             return Fail(cx, d, "failed to skip unknown section at end");
     }
 
-    UniqueModuleData module;
-    UniqueStaticLinkData staticLink;
+    UniqueCodeSegment code;
+    SharedMetadata metadata;
+    SharedStaticLinkData staticLinkData;
     SlowFunctionVector slowFuncs(cx);
-    if (!mg.finish(Move(funcNames), &module, &staticLink, exportMap, &slowFuncs))
+    if (!mg.finish(Move(funcNames), &code, &metadata, &staticLinkData, exportMap, &slowFuncs))
         return false;
 
     moduleObj.set(WasmModuleObject::create(cx));
     if (!moduleObj)
         return false;
 
-    if (!moduleObj->init(cx->new_<Module>(Move(module))))
+    auto module = cx->new_<Module>(Move(code), *metadata);
+    if (!module)
         return false;
 
-    return moduleObj->module().staticallyLink(cx, *staticLink);
+    moduleObj->init(*module);
+
+    return moduleObj->module().staticallyLink(cx, *staticLinkData);
 }
 
 /*****************************************************************************/
 // Top-level functions
 
 bool
 wasm::HasCompilerSupport(ExclusiveContext* cx)
 {
@@ -1290,17 +1294,17 @@ wasm::Eval(JSContext* cx, Handle<TypedAr
     if (!DescribeScriptedCaller(cx, &filename))
         return false;
 
     UniqueChars file = DuplicateString(filename.get());
     if (!file)
         return false;
 
     ImportNameVector importNames;
-    UniqueExportMap exportMap;
+    SharedExportMap exportMap;
     Rooted<ArrayBufferObject*> heap(cx);
     Rooted<WasmModuleObject*> moduleObj(cx);
 
     if (!DecodeModule(cx, Move(file), bytes, length, &importNames, &exportMap, &heap, &moduleObj)) {
         if (!cx->isExceptionPending())
             ReportOutOfMemory(cx);
         return false;
     }
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmCode.cpp
@@ -0,0 +1,381 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asmjs/WasmCode.h"
+
+#include "mozilla/Atomics.h"
+
+#include "asmjs/WasmSerialize.h"
+#include "jit/ExecutableAllocator.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::Atomic;
+
+// Limit the number of concurrent wasm code allocations per process. Note that
+// on Linux, the real maximum is ~32k, as each module requires 2 maps (RW/RX),
+// and the kernel's default max_map_count is ~65k.
+//
+// Note: this can be removed once writable/non-executable global data stops
+// being stored in the code segment.
+static Atomic<uint32_t> wasmCodeAllocations(0);
+static const uint32_t MaxWasmCodeAllocations = 16384;
+
+static uint8_t*
+AllocateCodeSegment(ExclusiveContext* cx, uint32_t totalLength)
+{
+    if (wasmCodeAllocations >= MaxWasmCodeAllocations)
+        return nullptr;
+
+    // Allocate RW memory. DynamicallyLinkModule will reprotect the code as RX.
+    unsigned permissions =
+        ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
+
+    void* p = AllocateExecutableMemory(nullptr, totalLength, permissions,
+                                       "wasm-code-segment", gc::SystemPageSize());
+    if (!p) {
+        ReportOutOfMemory(cx);
+        return nullptr;
+    }
+
+    wasmCodeAllocations++;
+    return (uint8_t*)p;
+}
+
+/* static */ UniqueCodeSegment
+CodeSegment::allocate(ExclusiveContext* cx, uint32_t codeLength, uint32_t globalDataLength)
+{
+    UniqueCodeSegment code = cx->make_unique<CodeSegment>();
+    if (!code)
+        return nullptr;
+
+    uint8_t* bytes = AllocateCodeSegment(cx, codeLength + globalDataLength);
+    if (!bytes)
+        return nullptr;
+
+    code->bytes_ = bytes;
+    code->codeLength_ = codeLength;
+    code->globalDataLength_ = globalDataLength;
+    return code;
+}
+
+/* static */ UniqueCodeSegment
+CodeSegment::clone(ExclusiveContext* cx, const CodeSegment& src)
+{
+    UniqueCodeSegment dst = allocate(cx, src.codeLength_, src.globalDataLength_);
+    if (!dst)
+        return nullptr;
+
+    memcpy(dst->code(), src.code(), src.codeLength());
+    return dst;
+}
+
+CodeSegment::~CodeSegment()
+{
+    if (!bytes_) {
+        MOZ_ASSERT(!totalLength());
+        return;
+    }
+
+    MOZ_ASSERT(wasmCodeAllocations > 0);
+    wasmCodeAllocations--;
+
+    MOZ_ASSERT(totalLength() > 0);
+    DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize());
+}
+
+size_t
+CodeSegment::serializedSize() const
+{
+    return sizeof(uint32_t) +
+           sizeof(uint32_t) +
+           codeLength_;
+}
+
+uint8_t*
+CodeSegment::serialize(uint8_t* cursor) const
+{
+    cursor = WriteScalar<uint32_t>(cursor, codeLength_);
+    cursor = WriteScalar<uint32_t>(cursor, globalDataLength_);
+    cursor = WriteBytes(cursor, bytes_, codeLength_);
+    return cursor;
+}
+
+const uint8_t*
+CodeSegment::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    cursor = ReadScalar<uint32_t>(cursor, &codeLength_);
+    cursor = ReadScalar<uint32_t>(cursor, &globalDataLength_);
+
+    bytes_ = AllocateCodeSegment(cx, codeLength_ + globalDataLength_);
+    if (!bytes_)
+        return nullptr;
+
+    cursor = ReadBytes(cursor, bytes_, codeLength_);
+    return cursor;
+}
+
+static size_t
+SerializedSigSize(const Sig& sig)
+{
+    return sizeof(ExprType) +
+           SerializedPodVectorSize(sig.args());
+}
+
+static uint8_t*
+SerializeSig(uint8_t* cursor, const Sig& sig)
+{
+    cursor = WriteScalar<ExprType>(cursor, sig.ret());
+    cursor = SerializePodVector(cursor, sig.args());
+    return cursor;
+}
+
+static const uint8_t*
+DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, Sig* sig)
+{
+    ExprType ret;
+    cursor = ReadScalar<ExprType>(cursor, &ret);
+
+    ValTypeVector args;
+    cursor = DeserializePodVector(cx, cursor, &args);
+    if (!cursor)
+        return nullptr;
+
+    *sig = Sig(Move(args), ret);
+    return cursor;
+}
+
+static size_t
+SizeOfSigExcludingThis(const Sig& sig, MallocSizeOf mallocSizeOf)
+{
+    return sig.args().sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+Export::serializedSize() const
+{
+    return SerializedSigSize(sig_) +
+           sizeof(pod);
+}
+
+uint8_t*
+Export::serialize(uint8_t* cursor) const
+{
+    cursor = SerializeSig(cursor, sig_);
+    cursor = WriteBytes(cursor, &pod, sizeof(pod));
+    return cursor;
+}
+
+const uint8_t*
+Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
+    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+    return cursor;
+}
+
+size_t
+Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return SizeOfSigExcludingThis(sig_, mallocSizeOf);
+}
+
+size_t
+Import::serializedSize() const
+{
+    return SerializedSigSize(sig_) +
+           sizeof(pod);
+}
+
+uint8_t*
+Import::serialize(uint8_t* cursor) const
+{
+    cursor = SerializeSig(cursor, sig_);
+    cursor = WriteBytes(cursor, &pod, sizeof(pod));
+    return cursor;
+}
+
+const uint8_t*
+Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
+    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+    return cursor;
+}
+
+size_t
+Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return SizeOfSigExcludingThis(sig_, mallocSizeOf);
+}
+
+CodeRange::CodeRange(Kind kind, Offsets offsets)
+  : begin_(offsets.begin),
+    profilingReturn_(0),
+    end_(offsets.end),
+    funcIndex_(0),
+    funcLineOrBytecode_(0),
+    funcBeginToTableEntry_(0),
+    funcBeginToTableProfilingJump_(0),
+    funcBeginToNonProfilingEntry_(0),
+    funcProfilingJumpToProfilingReturn_(0),
+    funcProfilingEpilogueToProfilingReturn_(0),
+    kind_(kind)
+{
+    MOZ_ASSERT(begin_ <= end_);
+    MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == CallThunk);
+}
+
+CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
+  : begin_(offsets.begin),
+    profilingReturn_(offsets.profilingReturn),
+    end_(offsets.end),
+    funcIndex_(0),
+    funcLineOrBytecode_(0),
+    funcBeginToTableEntry_(0),
+    funcBeginToTableProfilingJump_(0),
+    funcBeginToNonProfilingEntry_(0),
+    funcProfilingJumpToProfilingReturn_(0),
+    funcProfilingEpilogueToProfilingReturn_(0),
+    kind_(kind)
+{
+    MOZ_ASSERT(begin_ < profilingReturn_);
+    MOZ_ASSERT(profilingReturn_ < end_);
+    MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit);
+}
+
+CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
+  : begin_(offsets.begin),
+    profilingReturn_(offsets.profilingReturn),
+    end_(offsets.end),
+    funcIndex_(funcIndex),
+    funcLineOrBytecode_(funcLineOrBytecode),
+    funcBeginToTableEntry_(offsets.tableEntry - begin_),
+    funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
+    funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
+    funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
+    funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
+    kind_(Function)
+{
+    MOZ_ASSERT(begin_ < profilingReturn_);
+    MOZ_ASSERT(profilingReturn_ < end_);
+    MOZ_ASSERT(funcBeginToTableEntry_ == offsets.tableEntry - begin_);
+    MOZ_ASSERT(funcBeginToTableProfilingJump_ == offsets.tableProfilingJump - begin_);
+    MOZ_ASSERT(funcBeginToNonProfilingEntry_ == offsets.nonProfilingEntry - begin_);
+    MOZ_ASSERT(funcProfilingJumpToProfilingReturn_ == profilingReturn_ - offsets.profilingJump);
+    MOZ_ASSERT(funcProfilingEpilogueToProfilingReturn_ == profilingReturn_ - offsets.profilingEpilogue);
+}
+
+static size_t
+NullableStringLength(const char* chars)
+{
+    return chars ? strlen(chars) : 0;
+}
+
+size_t
+CacheableChars::serializedSize() const
+{
+    return sizeof(uint32_t) + NullableStringLength(get());
+}
+
+uint8_t*
+CacheableChars::serialize(uint8_t* cursor) const
+{
+    uint32_t length = NullableStringLength(get());
+    cursor = WriteBytes(cursor, &length, sizeof(uint32_t));
+    cursor = WriteBytes(cursor, get(), length);
+    return cursor;
+}
+
+const uint8_t*
+CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    uint32_t length;
+    cursor = ReadBytes(cursor, &length, sizeof(uint32_t));
+
+    reset(cx->pod_calloc<char>(length + 1));
+    if (!get())
+        return nullptr;
+
+    cursor = ReadBytes(cursor, get(), length);
+    return cursor;
+}
+
+size_t
+CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return mallocSizeOf(get());
+}
+
+size_t
+Metadata::serializedSize() const
+{
+    return sizeof(pod()) +
+           SerializedVectorSize(imports) +
+           SerializedVectorSize(exports) +
+           SerializedPodVectorSize(heapAccesses) +
+           SerializedPodVectorSize(codeRanges) +
+           SerializedPodVectorSize(callSites) +
+           SerializedPodVectorSize(callThunks) +
+           SerializedVectorSize(prettyFuncNames) +
+           filename.serializedSize();
+}
+
+uint8_t*
+Metadata::serialize(uint8_t* cursor) const
+{
+    cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
+    cursor = SerializeVector(cursor, imports);
+    cursor = SerializeVector(cursor, exports);
+    cursor = SerializePodVector(cursor, heapAccesses);
+    cursor = SerializePodVector(cursor, codeRanges);
+    cursor = SerializePodVector(cursor, callSites);
+    cursor = SerializePodVector(cursor, callThunks);
+    cursor = SerializeVector(cursor, prettyFuncNames);
+    cursor = filename.serialize(cursor);
+    return cursor;
+}
+
+/* static */ const uint8_t*
+Metadata::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
+    (cursor = DeserializeVector(cx, cursor, &imports)) &&
+    (cursor = DeserializeVector(cx, cursor, &exports)) &&
+    (cursor = DeserializePodVector(cx, cursor, &heapAccesses)) &&
+    (cursor = DeserializePodVector(cx, cursor, &codeRanges)) &&
+    (cursor = DeserializePodVector(cx, cursor, &callSites)) &&
+    (cursor = DeserializePodVector(cx, cursor, &callThunks)) &&
+    (cursor = DeserializeVector(cx, cursor, &prettyFuncNames)) &&
+    (cursor = filename.deserialize(cx, cursor));
+    return cursor;
+}
+
+size_t
+Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return SizeOfVectorExcludingThis(imports, mallocSizeOf) +
+           SizeOfVectorExcludingThis(exports, mallocSizeOf) +
+           heapAccesses.sizeOfExcludingThis(mallocSizeOf) +
+           codeRanges.sizeOfExcludingThis(mallocSizeOf) +
+           callSites.sizeOfExcludingThis(mallocSizeOf) +
+           callThunks.sizeOfExcludingThis(mallocSizeOf) +
+           SizeOfVectorExcludingThis(prettyFuncNames, mallocSizeOf) +
+           filename.sizeOfExcludingThis(mallocSizeOf);
+}
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmCode.h
@@ -0,0 +1,342 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_code_h
+#define wasm_code_h
+
+#include "asmjs/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+// A wasm CodeSegment owns the allocated executable code for a wasm module.
+// CodeSegment passed to the Module constructor must be allocated via allocate.
+
+class CodeSegment;
+typedef UniquePtr<CodeSegment> UniqueCodeSegment;
+
+class CodeSegment
+{
+    uint8_t* bytes_;
+    uint32_t codeLength_;
+    uint32_t globalDataLength_;
+
+    CodeSegment(const CodeSegment&) = delete;
+    void operator=(const CodeSegment&) = delete;
+
+  public:
+    static UniqueCodeSegment allocate(ExclusiveContext* cx, uint32_t codeLength, uint32_t dataLength);
+    static UniqueCodeSegment clone(ExclusiveContext* cx, const CodeSegment& code);
+    CodeSegment() : bytes_(nullptr), codeLength_(0), globalDataLength_(0) {}
+    ~CodeSegment();
+
+    uint8_t* code() const { return bytes_; }
+    uint8_t* globalData() const { return bytes_ + codeLength_; }
+    uint32_t codeLength() const { return codeLength_; }
+    uint32_t globalDataLength() const { return globalDataLength_; }
+    uint32_t totalLength() const { return codeLength_ + globalDataLength_; }
+
+    WASM_DECLARE_SERIALIZABLE(CodeSegment)
+};
+
+// An Export represents a single function inside a wasm Module that has been
+// exported one or more times.
+
+class Export
+{
+    Sig sig_;
+    struct CacheablePod {
+        uint32_t stubOffset_;
+    } pod;
+
+  public:
+    Export() = default;
+    explicit Export(Sig&& sig)
+      : sig_(Move(sig))
+    {
+        pod.stubOffset_ = UINT32_MAX;
+    }
+    void initStubOffset(uint32_t stubOffset) {
+        MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
+        pod.stubOffset_ = stubOffset;
+    }
+
+    uint32_t stubOffset() const {
+        return pod.stubOffset_;
+    }
+    const Sig& sig() const {
+        return sig_;
+    }
+
+    WASM_DECLARE_SERIALIZABLE(Export)
+};
+
+typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
+
+// An Import describes a wasm module import. Currently, only functions can be
+// imported in wasm. A function import includes the signature used within the
+// module to call it.
+
+class Import
+{
+    Sig sig_;
+    struct CacheablePod {
+        uint32_t exitGlobalDataOffset_;
+        uint32_t interpExitCodeOffset_;
+        uint32_t jitExitCodeOffset_;
+    } pod;
+
+  public:
+    Import() = default;
+    Import(Sig&& sig, uint32_t exitGlobalDataOffset)
+      : sig_(Move(sig))
+    {
+        pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
+        pod.interpExitCodeOffset_ = 0;
+        pod.jitExitCodeOffset_ = 0;
+    }
+
+    void initInterpExitOffset(uint32_t off) {
+        MOZ_ASSERT(!pod.interpExitCodeOffset_);
+        pod.interpExitCodeOffset_ = off;
+    }
+    void initJitExitOffset(uint32_t off) {
+        MOZ_ASSERT(!pod.jitExitCodeOffset_);
+        pod.jitExitCodeOffset_ = off;
+    }
+
+    const Sig& sig() const {
+        return sig_;
+    }
+    uint32_t exitGlobalDataOffset() const {
+        return pod.exitGlobalDataOffset_;
+    }
+    uint32_t interpExitCodeOffset() const {
+        return pod.interpExitCodeOffset_;
+    }
+    uint32_t jitExitCodeOffset() const {
+        return pod.jitExitCodeOffset_;
+    }
+
+    WASM_DECLARE_SERIALIZABLE(Import)
+};
+
+typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
+
+// A CodeRange describes a single contiguous range of code within a wasm
+// module's code segment. A CodeRange describes what the code does and, for
+// function bodies, the name and source coordinates of the function.
+
+class CodeRange
+{
+  public:
+    enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Inline, CallThunk };
+
+  private:
+    // All fields are treated as cacheable POD:
+    uint32_t begin_;
+    uint32_t profilingReturn_;
+    uint32_t end_;
+    uint32_t funcIndex_;
+    uint32_t funcLineOrBytecode_;
+    uint8_t funcBeginToTableEntry_;
+    uint8_t funcBeginToTableProfilingJump_;
+    uint8_t funcBeginToNonProfilingEntry_;
+    uint8_t funcProfilingJumpToProfilingReturn_;
+    uint8_t funcProfilingEpilogueToProfilingReturn_;
+    Kind kind_ : 8;
+
+  public:
+    CodeRange() = default;
+    CodeRange(Kind kind, Offsets offsets);
+    CodeRange(Kind kind, ProfilingOffsets offsets);
+    CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
+
+    // All CodeRanges have a begin and end.
+
+    uint32_t begin() const {
+        return begin_;
+    }
+    uint32_t end() const {
+        return end_;
+    }
+
+    // Other fields are only available for certain CodeRange::Kinds.
+
+    Kind kind() const {
+        return kind_;
+    }
+
+    bool isFunction() const {
+        return kind() == Function;
+    }
+    bool isImportExit() const {
+        return kind() == ImportJitExit || kind() == ImportInterpExit;
+    }
+    bool isInline() const {
+        return kind() == Inline;
+    }
+
+    // Every CodeRange except entry and inline stubs has a profiling return
+    // which is used for asynchronous profiling to determine the frame pointer.
+
+    uint32_t profilingReturn() const {
+        MOZ_ASSERT(isFunction() || isImportExit());
+        return profilingReturn_;
+    }
+
+    // Functions have offsets which allow patching to selectively execute
+    // profiling prologues/epilogues.
+
+    uint32_t funcProfilingEntry() const {
+        MOZ_ASSERT(isFunction());
+        return begin();
+    }
+    uint32_t funcTableEntry() const {
+        MOZ_ASSERT(isFunction());
+        return begin_ + funcBeginToTableEntry_;
+    }
+    uint32_t funcTableProfilingJump() const {
+        MOZ_ASSERT(isFunction());
+        return begin_ + funcBeginToTableProfilingJump_;
+    }
+    uint32_t funcNonProfilingEntry() const {
+        MOZ_ASSERT(isFunction());
+        return begin_ + funcBeginToNonProfilingEntry_;
+    }
+    uint32_t funcProfilingJump() const {
+        MOZ_ASSERT(isFunction());
+        return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
+    }
+    uint32_t funcProfilingEpilogue() const {
+        MOZ_ASSERT(isFunction());
+        return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
+    }
+    uint32_t funcIndex() const {
+        MOZ_ASSERT(isFunction());
+        return funcIndex_;
+    }
+    uint32_t funcLineOrBytecode() const {
+        MOZ_ASSERT(isFunction());
+        return funcLineOrBytecode_;
+    }
+
+    // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
+
+    struct PC {
+        size_t offset;
+        explicit PC(size_t offset) : offset(offset) {}
+        bool operator==(const CodeRange& rhs) const {
+            return offset >= rhs.begin() && offset < rhs.end();
+        }
+        bool operator<(const CodeRange& rhs) const {
+            return offset < rhs.begin();
+        }
+    };
+};
+
+WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
+
+// A CallThunk describes the offset and target of thunks so that they may be
+// patched at runtime when profiling is toggled. Thunks are emitted to connect
+// callsites that are too far away from callees to fit in a single call
+// instruction's relative offset.
+
+struct CallThunk
+{
+    uint32_t offset;
+    union {
+        uint32_t funcIndex;
+        uint32_t codeRangeIndex;
+    } u;
+
+    CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
+    CallThunk() = default;
+};
+
+WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
+
+// CacheableChars is used to cacheably store UniqueChars.
+
+struct CacheableChars : UniqueChars
+{
+    CacheableChars() = default;
+    explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
+    MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
+    WASM_DECLARE_SERIALIZABLE(CacheableChars)
+};
+
+typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
+
+// A wasm module can either use no heap, a unshared heap (ArrayBuffer) or shared
+// heap (SharedArrayBuffer).
+
+enum class HeapUsage
+{
+    None = false,
+    Unshared = 1,
+    Shared = 2
+};
+
+static inline bool
+UsesHeap(HeapUsage heapUsage)
+{
+    return bool(heapUsage);
+}
+
+// Metadata holds all the data that is needed to describe compiled wasm code
+// at runtime (as opposed to data that is only used to statically link or
+// instantiate a module).
+//
+// Metadata is built incrementally by ModuleGenerator and then shared immutably
+// between modules.
+
+struct MetadataCacheablePod
+{
+    uint32_t              functionLength;
+    ModuleKind            kind;
+    HeapUsage             heapUsage;
+    CompileArgs           compileArgs;
+
+    MetadataCacheablePod() { mozilla::PodZero(this); }
+};
+
+struct Metadata : RefCounted<Metadata>, MetadataCacheablePod
+{
+    MetadataCacheablePod& pod() { return *this; }
+    const MetadataCacheablePod& pod() const { return *this; }
+
+    ImportVector          imports;
+    ExportVector          exports;
+    HeapAccessVector      heapAccesses;
+    CodeRangeVector       codeRanges;
+    CallSiteVector        callSites;
+    CallThunkVector       callThunks;
+    CacheableCharsVector  prettyFuncNames;
+    CacheableChars        filename;
+
+    WASM_DECLARE_SERIALIZABLE(Metadata);
+};
+
+typedef RefPtr<Metadata> MutableMetadata;
+typedef RefPtr<const Metadata> SharedMetadata;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_code_h
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -35,16 +35,17 @@ using mozilla::MakeEnumeratedRange;
 // ModuleGenerator
 
 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
 
 ModuleGenerator::ModuleGenerator(ExclusiveContext* cx)
   : cx_(cx),
     jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
+    globalDataLength_(InitialGlobalDataBytes),
     slowFuncs_(cx),
     numSigs_(0),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
     alloc_(&lifo_),
     masm_(MacroAssembler::AsmJSToken(), alloc_),
     funcIndexToExport_(cx),
     lastPatchedCallsite_(0),
     startOfUnpatchedBranches_(0),
@@ -110,55 +111,54 @@ ParallelCompilationEnabled(ExclusiveCont
 }
 
 bool
 ModuleGenerator::init(UniqueModuleGeneratorData shared, UniqueChars filename)
 {
     if (!funcIndexToExport_.init())
         return false;
 
-    module_ = MakeUnique<ModuleData>();
-    if (!module_)
+    metadata_ = js_new<Metadata>();
+    if (!metadata_)
         return false;
 
-    module_->globalBytes = InitialGlobalDataBytes;
-    module_->compileArgs = shared->args;
-    module_->kind = shared->kind;
-    module_->heapUsage = HeapUsage::None;
-    module_->filename = Move(filename);
+    metadata_->compileArgs = shared->args;
+    metadata_->kind = shared->kind;
+    metadata_->heapUsage = HeapUsage::None;
+    metadata_->filename = Move(filename);
 
-    exportMap_ = MakeUnique<ExportMap>();
+    exportMap_ = js_new<ExportMap>();
     if (!exportMap_)
         return false;
 
     shared_ = Move(shared);
 
     // For asm.js, the Vectors in ModuleGeneratorData are max-sized reservations
     // and will be initialized in a linear order via init* functions as the
     // module is generated. For wasm, the Vectors are correctly-sized and
     // already initialized.
 
-    if (module_->kind == ModuleKind::Wasm) {
+    if (metadata_->kind == ModuleKind::Wasm) {
         numSigs_ = shared_->sigs.length();
-        module_->globalBytes = AlignBytes(module_->globalBytes, sizeof(void*));
+        globalDataLength_ = AlignBytes(globalDataLength_, sizeof(void*));
 
         for (ImportModuleGeneratorData& import : shared_->imports) {
             MOZ_ASSERT(!import.globalDataOffset);
-            import.globalDataOffset = module_->globalBytes;
-            module_->globalBytes += Module::SizeOfImportExit;
+            import.globalDataOffset = globalDataLength_;
+            globalDataLength_ += Module::SizeOfImportExit;
             if (!addImport(*import.sig, import.globalDataOffset))
                 return false;
         }
 
-        MOZ_ASSERT(module_->globalBytes % sizeof(void*) == 0);
+        MOZ_ASSERT(globalDataLength_ % sizeof(void*) == 0);
         MOZ_ASSERT(shared_->asmJSSigToTable.empty());
         MOZ_ASSERT(shared_->wasmTable.numElems == shared_->wasmTable.elemFuncIndices.length());
         MOZ_ASSERT(!shared_->wasmTable.globalDataOffset);
-        shared_->wasmTable.globalDataOffset = module_->globalBytes;
-        module_->globalBytes += shared_->wasmTable.numElems * sizeof(void*);
+        shared_->wasmTable.globalDataOffset = globalDataLength_;
+        globalDataLength_ += shared_->wasmTable.numElems * sizeof(void*);
     }
 
     return true;
 }
 
 bool
 ModuleGenerator::finishOutstandingTask()
 {
@@ -194,17 +194,17 @@ ModuleGenerator::funcIsDefined(uint32_t 
     return funcIndex < funcIndexToCodeRange_.length() &&
            funcIndexToCodeRange_[funcIndex] != BadCodeRange;
 }
 
 const CodeRange&
 ModuleGenerator::funcCodeRange(uint32_t funcIndex) const
 {
     MOZ_ASSERT(funcIsDefined(funcIndex));
-    const CodeRange& cr = module_->codeRanges[funcIndexToCodeRange_[funcIndex]];
+    const CodeRange& cr = metadata_->codeRanges[funcIndexToCodeRange_[funcIndex]];
     MOZ_ASSERT(cr.isFunction());
     return cr;
 }
 
 static uint32_t
 JumpRange()
 {
     return Min(JitOptions.jumpThreshold, JumpImmediateRange);
@@ -246,19 +246,19 @@ ModuleGenerator::convertOutOfRangeBranch
         if (!p) {
             Offsets offsets;
             offsets.begin = masm_.currentOffset();
             uint32_t thunkOffset = masm_.thunkWithPatch().offset();
             if (masm_.oom())
                 return false;
             offsets.end = masm_.currentOffset();
 
-            if (!module_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets))
+            if (!metadata_->codeRanges.emplaceBack(CodeRange::CallThunk, offsets))
                 return false;
-            if (!module_->callThunks.emplaceBack(thunkOffset, cs.targetIndex()))
+            if (!metadata_->callThunks.emplaceBack(thunkOffset, cs.targetIndex()))
                 return false;
             if (!alreadyThunked.add(p, cs.targetIndex(), offsets.begin))
                 return false;
         }
 
         masm_.patchCall(callerOffset, p->value());
     }
 
@@ -277,17 +277,17 @@ ModuleGenerator::convertOutOfRangeBranch
 
         Offsets offsets;
         offsets.begin = masm_.currentOffset();
         uint32_t thunkOffset = masm_.thunkWithPatch().offset();
         if (masm_.oom())
             return false;
         offsets.end = masm_.currentOffset();
 
-        if (!module_->codeRanges.emplaceBack(CodeRange::Inline, offsets))
+        if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, offsets))
             return false;
         if (!jumpThunks_[target].append(thunkOffset))
             return false;
     }
 
     // Unlike callsites, which need to be persisted in the Module, we can simply
     // flush jump sites after each patching pass.
     masm_.clearJumpSites();
@@ -311,18 +311,18 @@ ModuleGenerator::finishTask(IonCompileTa
     }
 
     // Offset the recorded FuncOffsets by the offset of the function in the
     // whole module's code segment.
     uint32_t offsetInWhole = masm_.size();
     results.offsets().offsetBy(offsetInWhole);
 
     // Add the CodeRange for this function.
-    uint32_t funcCodeRangeIndex = module_->codeRanges.length();
-    if (!module_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
+    uint32_t funcCodeRangeIndex = metadata_->codeRanges.length();
+    if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
         return false;
 
     // Maintain a mapping from function index to CodeRange index.
     if (func.index() >= funcIndexToCodeRange_.length()) {
         uint32_t n = func.index() - funcIndexToCodeRange_.length() + 1;
         if (!funcIndexToCodeRange_.appendN(BadCodeRange, n))
             return false;
     }
@@ -364,84 +364,84 @@ ModuleGenerator::finishCodegen(StaticLin
     {
         TempAllocator alloc(&lifo_);
         MacroAssembler masm(MacroAssembler::AsmJSToken(), alloc);
 
         if (!entries.resize(numExports()))
             return false;
         for (uint32_t i = 0; i < numExports(); i++) {
             uint32_t target = exportMap_->exportFuncIndices[i];
-            const Sig& sig = module_->exports[i].sig();
+            const Sig& sig = metadata_->exports[i].sig();
             entries[i] = GenerateEntry(masm, target, sig, usesHeap());
         }
 
         if (!interpExits.resize(numImports()))
             return false;
         if (!jitExits.resize(numImports()))
             return false;
         for (uint32_t i = 0; i < numImports(); i++) {
-            interpExits[i] = GenerateInterpExit(masm, module_->imports[i], i);
-            jitExits[i] = GenerateJitExit(masm, module_->imports[i], usesHeap());
+            interpExits[i] = GenerateInterpExit(masm, metadata_->imports[i], i);
+            jitExits[i] = GenerateJitExit(masm, metadata_->imports[i], usesHeap());
         }
 
         for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit))
             jumpTargets[target] = GenerateJumpTarget(masm, target);
 
         interruptExit = GenerateInterruptStub(masm);
 
         if (masm.oom() || !masm_.asmMergeWith(masm))
             return false;
     }
 
     // Adjust each of the resulting Offsets (to account for being merged into
     // masm_) and then create code ranges for all the stubs.
 
     for (uint32_t i = 0; i < numExports(); i++) {
         entries[i].offsetBy(offsetInWhole);
-        module_->exports[i].initStubOffset(entries[i].begin);
-        if (!module_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
+        metadata_->exports[i].initStubOffset(entries[i].begin);
+        if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
             return false;
     }
 
     for (uint32_t i = 0; i < numImports(); i++) {
         interpExits[i].offsetBy(offsetInWhole);
-        module_->imports[i].initInterpExitOffset(interpExits[i].begin);
-        if (!module_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
+        metadata_->imports[i].initInterpExitOffset(interpExits[i].begin);
+        if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
             return false;
 
         jitExits[i].offsetBy(offsetInWhole);
-        module_->imports[i].initJitExitOffset(jitExits[i].begin);
-        if (!module_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
+        metadata_->imports[i].initJitExitOffset(jitExits[i].begin);
+        if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
             return false;
     }
 
     for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
         jumpTargets[target].offsetBy(offsetInWhole);
-        if (!module_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
+        if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, jumpTargets[target]))
             return false;
     }
 
     interruptExit.offsetBy(offsetInWhole);
-    if (!module_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
+    if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
         return false;
 
     // Fill in StaticLinkData with the offsets of these stubs.
 
     link->pod.outOfBoundsOffset = jumpTargets[JumpTarget::OutOfBounds].begin;
     link->pod.interruptOffset = interruptExit.begin;
 
     // Only call convertOutOfRangeBranchesToThunks after all other codegen that may
     // emit new jumps to JumpTargets has finished.
 
     if (!convertOutOfRangeBranchesToThunks())
         return false;
 
     // Now that all thunks have been generated, patch all the thunks.
 
-    for (CallThunk& callThunk : module_->callThunks) {
+    for (CallThunk& callThunk : metadata_->callThunks) {
         uint32_t funcIndex = callThunk.u.funcIndex;
         callThunk.u.codeRangeIndex = funcIndexToCodeRange_[funcIndex];
         masm_.patchThunk(callThunk.offset, funcCodeRange(funcIndex).funcNonProfilingEntry());
     }
 
     for (JumpTarget target : MakeEnumeratedRange(JumpTarget::Limit)) {
         for (uint32_t thunkOffset : jumpThunks_[target])
             masm_.patchThunk(thunkOffset, jumpTargets[target].begin);
@@ -449,17 +449,17 @@ ModuleGenerator::finishCodegen(StaticLin
 
     // Code-generation is complete!
 
     masm_.finish();
     return !masm_.oom();
 }
 
 bool
-ModuleGenerator::finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticLinkData* link)
+ModuleGenerator::finishStaticLinkData(uint8_t* code, uint32_t codeLength, StaticLinkData* link)
 {
     // Add links to absolute addresses identified symbolically.
     StaticLinkData::SymbolicLinkArray& symbolicLinks = link->symbolicLinks;
     for (size_t i = 0; i < masm_.numAsmJSAbsoluteAddresses(); i++) {
         AsmJSAbsoluteAddress src = masm_.asmJSAbsoluteAddress(i);
         if (!symbolicLinks[src.target].append(src.patchAt.offset()))
             return false;
     }
@@ -481,26 +481,26 @@ ModuleGenerator::finishStaticLinkData(ui
 #if defined(JS_CODEGEN_X86)
     // Global data accesses in x86 need to be patched with the absolute
     // address of the global. Globals are allocated sequentially after the
     // code section so we can just use an InternalLink.
     for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
         AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
         StaticLinkData::InternalLink inLink(StaticLinkData::InternalLink::RawPointer);
         inLink.patchAtOffset = masm_.labelToPatchOffset(a.patchAt);
-        inLink.targetOffset = codeBytes + a.globalDataOffset;
+        inLink.targetOffset = codeLength + a.globalDataOffset;
         if (!link->internalLinks.append(inLink))
             return false;
     }
 #endif
 
 #if defined(JS_CODEGEN_X64)
     // Global data accesses on x64 use rip-relative addressing and thus do
     // not need patching after deserialization.
-    uint8_t* globalData = code + codeBytes;
+    uint8_t* globalData = code + codeLength;
     for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
         AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
         masm_.patchAsmJSGlobalAccess(a.patchAt, code, globalData, a.globalDataOffset);
     }
 #endif
 
     // Function pointer table elements
 
@@ -536,33 +536,30 @@ ModuleGenerator::finishStaticLinkData(ui
 
 bool
 ModuleGenerator::addImport(const Sig& sig, uint32_t globalDataOffset)
 {
     Sig copy;
     if (!copy.clone(sig))
         return false;
 
-    return module_->imports.emplaceBack(Move(copy), globalDataOffset);
+    return metadata_->imports.emplaceBack(Move(copy), globalDataOffset);
 }
 
 bool
 ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
 {
-    uint32_t globalBytes = module_->globalBytes;
-
-    uint32_t pad = ComputeByteAlignment(globalBytes, align);
-    if (UINT32_MAX - globalBytes < pad + bytes)
+    uint32_t pad = ComputeByteAlignment(globalDataLength_, align);
+    if (UINT32_MAX - globalDataLength_ < pad + bytes)
         return false;
 
-    globalBytes += pad;
-    *globalDataOffset = globalBytes;
-    globalBytes += bytes;
+    globalDataLength_ += pad;
+    *globalDataOffset = globalDataLength_;
+    globalDataLength_ += bytes;
 
-    module_->globalBytes = globalBytes;
     return true;
 }
 
 bool
 ModuleGenerator::allocateGlobal(ValType type, bool isConst, uint32_t* index)
 {
     MOZ_ASSERT(!startedFuncDefs_);
     unsigned width = 0;
@@ -595,25 +592,25 @@ ModuleGenerator::allocateGlobal(ValType 
 
     *index = shared_->globals.length();
     return shared_->globals.append(GlobalDesc(type, offset, isConst));
 }
 
 void
 ModuleGenerator::initHeapUsage(HeapUsage heapUsage, uint32_t minHeapLength)
 {
-    MOZ_ASSERT(module_->heapUsage == HeapUsage::None);
-    module_->heapUsage = heapUsage;
+    MOZ_ASSERT(metadata_->heapUsage == HeapUsage::None);
+    metadata_->heapUsage = heapUsage;
     shared_->minHeapLength = minHeapLength;
 }
 
 bool
 ModuleGenerator::usesHeap() const
 {
-    return UsesHeap(module_->heapUsage);
+    return UsesHeap(metadata_->heapUsage);
 }
 
 void
 ModuleGenerator::initSig(uint32_t sigIndex, Sig&& sig)
 {
     MOZ_ASSERT(isAsmJS());
     MOZ_ASSERT(sigIndex == numSigs_);
     numSigs_++;
@@ -658,31 +655,31 @@ bool
 ModuleGenerator::initImport(uint32_t importIndex, uint32_t sigIndex)
 {
     MOZ_ASSERT(isAsmJS());
 
     uint32_t globalDataOffset;
     if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
         return false;
 
-    MOZ_ASSERT(importIndex == module_->imports.length());
+    MOZ_ASSERT(importIndex == metadata_->imports.length());
     if (!addImport(sig(sigIndex), globalDataOffset))
         return false;
 
     ImportModuleGeneratorData& import = shared_->imports[importIndex];
     MOZ_ASSERT(!import.sig);
     import.sig = &shared_->sigs[sigIndex];
     import.globalDataOffset = globalDataOffset;
     return true;
 }
 
 uint32_t
 ModuleGenerator::numImports() const
 {
-    return module_->imports.length();
+    return metadata_->imports.length();
 }
 
 const ImportModuleGeneratorData&
 ModuleGenerator::import(uint32_t index) const
 {
     MOZ_ASSERT(shared_->imports[index].sig);
     return shared_->imports[index];
 }
@@ -695,36 +692,36 @@ ModuleGenerator::declareExport(UniqueCha
 
     FuncIndexMap::AddPtr p = funcIndexToExport_.lookupForAdd(funcIndex);
     if (p) {
         if (exportIndex)
             *exportIndex = p->value();
         return exportMap_->fieldsToExports.append(p->value());
     }
 
-    uint32_t newExportIndex = module_->exports.length();
+    uint32_t newExportIndex = metadata_->exports.length();
     MOZ_ASSERT(newExportIndex < MaxExports);
 
     if (exportIndex)
         *exportIndex = newExportIndex;
 
     Sig copy;
     if (!copy.clone(funcSig(funcIndex)))
         return false;
 
-    return module_->exports.append(Move(copy)) &&
+    return metadata_->exports.append(Move(copy)) &&
            funcIndexToExport_.add(p, funcIndex, newExportIndex) &&
            exportMap_->fieldsToExports.append(newExportIndex) &&
            exportMap_->exportFuncIndices.append(funcIndex);
 }
 
 uint32_t
 ModuleGenerator::numExports() const
 {
-    return module_->exports.length();
+    return metadata_->exports.length();
 }
 
 bool
 ModuleGenerator::addMemoryExport(UniqueChars fieldName)
 {
     return exportMap_->fieldNames.append(Move(fieldName)) &&
            exportMap_->fieldsToExports.append(MemoryExport);
 }
@@ -841,17 +838,17 @@ ModuleGenerator::finishFuncDefs()
     while (outstanding_ > 0) {
         if (!finishOutstandingTask())
             return false;
     }
 
     for (uint32_t funcIndex = 0; funcIndex < funcIndexToCodeRange_.length(); funcIndex++)
         MOZ_ASSERT(funcIsDefined(funcIndex));
 
-    module_->functionBytes = masm_.size();
+    metadata_->functionLength = masm_.size();
     finishedFuncDefs_ = true;
     return true;
 }
 
 bool
 ModuleGenerator::initSigTableLength(uint32_t sigIndex, uint32_t numElems)
 {
     MOZ_ASSERT(isAsmJS());
@@ -879,74 +876,76 @@ ModuleGenerator::initSigTableElems(uint3
     MOZ_ASSERT(table.numElems == elemFuncIndices.length());
 
     MOZ_ASSERT(table.elemFuncIndices.empty());
     table.elemFuncIndices = Move(elemFuncIndices);
 }
 
 bool
 ModuleGenerator::finish(CacheableCharsVector&& prettyFuncNames,
-                        UniqueModuleData* module,
-                        UniqueStaticLinkData* linkData,
-                        UniqueExportMap* exportMap,
+                        UniqueCodeSegment* codeSegment,
+                        SharedMetadata* metadata,
+                        SharedStaticLinkData* staticLinkDataOut,
+                        SharedExportMap* exportMap,
                         SlowFunctionVector* slowFuncs)
 {
     MOZ_ASSERT(!activeFunc_);
     MOZ_ASSERT(finishedFuncDefs_);
 
-    UniqueStaticLinkData link = MakeUnique<StaticLinkData>();
-    if (!link)
+    MutableStaticLinkData staticLinkData = js_new<StaticLinkData>();
+    if (!staticLinkData)
         return false;
 
-    if (!finishCodegen(link.get()))
+    if (!finishCodegen(staticLinkData.get()))
         return false;
 
-    module_->prettyFuncNames = Move(prettyFuncNames);
+    metadata_->prettyFuncNames = Move(prettyFuncNames);
 
     // Start global data on a new page so JIT code may be given independent
     // protection flags. Note assumption that global data starts right after
     // code below.
-    module_->codeBytes = AlignBytes(masm_.bytesNeeded(), gc::SystemPageSize());
+    uint32_t codeLength = AlignBytes(masm_.bytesNeeded(), gc::SystemPageSize());
 
     // Inflate the global bytes up to page size so that the total bytes are a
     // page size (as required by the allocator functions).
-    module_->globalBytes = AlignBytes(module_->globalBytes, gc::SystemPageSize());
+    globalDataLength_ = AlignBytes(globalDataLength_, gc::SystemPageSize());
 
     // Allocate the code (guarded by a UniquePtr until it is given to the Module).
-    module_->code = AllocateCode(cx_, module_->totalBytes());
-    if (!module_->code)
+    UniqueCodeSegment cs = CodeSegment::allocate(cx_, codeLength, globalDataLength_);
+    if (!cs)
         return false;
 
     // Delay flushing until Module::dynamicallyLink. The flush-inhibited range
     // is set by executableCopy.
     AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true);
-    masm_.executableCopy(module_->code.get());
+    masm_.executableCopy(cs->code());
 
     // c.f. JitCode::copyFrom
     MOZ_ASSERT(masm_.jumpRelocationTableBytes() == 0);
     MOZ_ASSERT(masm_.dataRelocationTableBytes() == 0);
     MOZ_ASSERT(masm_.preBarrierTableBytes() == 0);
     MOZ_ASSERT(!masm_.hasSelfReference());
 
     // Convert the CallSiteAndTargetVector (needed during generation) to a
     // CallSiteVector (what is stored in the Module).
-    if (!module_->callSites.appendAll(masm_.callSites()))
+    if (!metadata_->callSites.appendAll(masm_.callSites()))
         return false;
 
     // The MacroAssembler has accumulated all the heap accesses during codegen.
-    module_->heapAccesses = masm_.extractHeapAccesses();
+    metadata_->heapAccesses = masm_.extractHeapAccesses();
 
-    if (!finishStaticLinkData(module_->code.get(), module_->codeBytes, link.get()))
+    if (!finishStaticLinkData(cs->code(), cs->codeLength(), staticLinkData.get()))
         return false;
 
     // These Vectors can get large and the excess capacity can be significant,
     // so realloc them down to size.
-    module_->heapAccesses.podResizeToFit();
-    module_->codeRanges.podResizeToFit();
-    module_->callSites.podResizeToFit();
-    module_->callThunks.podResizeToFit();
+    metadata_->heapAccesses.podResizeToFit();
+    metadata_->codeRanges.podResizeToFit();
+    metadata_->callSites.podResizeToFit();
+    metadata_->callThunks.podResizeToFit();
 
-    *module = Move(module_);
-    *linkData = Move(link);
-    *exportMap = Move(exportMap_);
+    *codeSegment = Move(cs);
+    *metadata = metadata_.forget();
+    *staticLinkDataOut = staticLinkData.forget();
+    *exportMap = exportMap_.forget();
     *slowFuncs = Move(slowFuncs_);
     return true;
 }
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -114,18 +114,19 @@ typedef UniquePtr<ModuleGeneratorData> U
 class MOZ_STACK_CLASS ModuleGenerator
 {
     typedef HashMap<uint32_t, uint32_t> FuncIndexMap;
 
     ExclusiveContext*               cx_;
     jit::JitContext                 jcx_;
 
     // Data handed back to the caller in finish()
-    UniqueModuleData                module_;
-    UniqueExportMap                 exportMap_;
+    uint32_t                        globalDataLength_;
+    MutableMetadata                 metadata_;
+    MutableExportMap                exportMap_;
     SlowFunctionVector              slowFuncs_;
 
     // Data scoped to the ModuleGenerator's lifetime
     UniqueModuleGeneratorData       shared_;
     uint32_t                        numSigs_;
     LifoAlloc                       lifo_;
     jit::TempAllocator              alloc_;
     jit::MacroAssembler             masm_;
@@ -147,28 +148,28 @@ class MOZ_STACK_CLASS ModuleGenerator
     DebugOnly<bool>                 finishedFuncDefs_;
 
     MOZ_MUST_USE bool finishOutstandingTask();
     bool funcIsDefined(uint32_t funcIndex) const;
     const CodeRange& funcCodeRange(uint32_t funcIndex) const;
     MOZ_MUST_USE bool convertOutOfRangeBranchesToThunks();
     MOZ_MUST_USE bool finishTask(IonCompileTask* task);
     MOZ_MUST_USE bool finishCodegen(StaticLinkData* link);
-    MOZ_MUST_USE bool finishStaticLinkData(uint8_t* code, uint32_t codeBytes, StaticLinkData* link);
+    MOZ_MUST_USE bool finishStaticLinkData(uint8_t* code, uint32_t codeLength, StaticLinkData* link);
     MOZ_MUST_USE bool addImport(const Sig& sig, uint32_t globalDataOffset);
     MOZ_MUST_USE bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOff);
 
   public:
     explicit ModuleGenerator(ExclusiveContext* cx);
     ~ModuleGenerator();
 
     MOZ_MUST_USE bool init(UniqueModuleGeneratorData shared, UniqueChars filename);
 
-    bool isAsmJS() const { return module_->kind == ModuleKind::AsmJS; }
-    CompileArgs args() const { return module_->compileArgs; }
+    bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
+    CompileArgs args() const { return metadata_->compileArgs; }
     jit::MacroAssembler& masm() { return masm_; }
 
     // Heap usage:
     void initHeapUsage(HeapUsage heapUsage, uint32_t minHeapLength = 0);
     bool usesHeap() const;
 
     // Signatures:
     uint32_t numSigs() const { return numSigs_; }
@@ -202,23 +203,24 @@ class MOZ_STACK_CLASS ModuleGenerator
     // asm.js lazy initialization:
     void initSig(uint32_t sigIndex, Sig&& sig);
     void initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initImport(uint32_t importIndex, uint32_t sigIndex);
     MOZ_MUST_USE bool initSigTableLength(uint32_t sigIndex, uint32_t numElems);
     void initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
     void bumpMinHeapLength(uint32_t newMinHeapLength);
 
-    // Return a ModuleData object which may be used to construct a Module, the
+    // Return a Metadata object which may be used to construct a Module, the
     // StaticLinkData required to call Module::staticallyLink, and the list of
     // functions that took a long time to compile.
     MOZ_MUST_USE bool finish(CacheableCharsVector&& prettyFuncNames,
-                             UniqueModuleData* module,
-                             UniqueStaticLinkData* staticLinkData,
-                             UniqueExportMap* exportMap,
+                             UniqueCodeSegment* codeSegment,
+                             SharedMetadata* metadata,
+                             SharedStaticLinkData* staticLinkData,
+                             SharedExportMap* exportMap,
                              SlowFunctionVector* slowFuncs);
 };
 
 // A FunctionGenerator encapsulates the generation of a single function body.
 // ModuleGenerator::startFunc must be called after construction and before doing
 // anything else. After the body is complete, ModuleGenerator::finishFunc must
 // be called before the FunctionGenerator is destroyed and the next function is
 // started.
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -29,75 +29,39 @@
 #include "asmjs/WasmBinaryToText.h"
 #include "asmjs/WasmSerialize.h"
 #include "builtin/AtomicsObject.h"
 #include "builtin/SIMD.h"
 #ifdef JS_ION_PERF
 # include "jit/PerfSpewer.h"
 #endif
 #include "jit/BaselineJIT.h"
-#include "jit/ExecutableAllocator.h"
 #include "jit/JitCommon.h"
 #include "js/MemoryMetrics.h"
 #include "vm/StringBuffer.h"
 #ifdef MOZ_VTUNE
 # include "vtune/VTuneWrapper.h"
 #endif
 
 #include "jsobjinlines.h"
 
 #include "jit/MacroAssembler-inl.h"
 #include "vm/ArrayBufferObject-inl.h"
 #include "vm/TypeInference-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
-using mozilla::Atomic;
 using mozilla::BinarySearch;
 using mozilla::MakeEnumeratedRange;
 using mozilla::PodCopy;
 using mozilla::PodZero;
 using mozilla::Swap;
 using JS::GenericNaN;
 
-// Limit the number of concurrent wasm code allocations per process. Note that
-// on Linux, the real maximum is ~32k, as each module requires 2 maps (RW/RX),
-// and the kernel's default max_map_count is ~65k.
-static Atomic<uint32_t> wasmCodeAllocations(0);
-static const uint32_t MaxWasmCodeAllocations = 16384;
-
-UniqueCodePtr
-wasm::AllocateCode(ExclusiveContext* cx, size_t bytes)
-{
-    // Allocate RW memory. DynamicallyLinkModule will reprotect the code as RX.
-    unsigned permissions =
-        ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
-
-    void* p = nullptr;
-    if (wasmCodeAllocations++ < MaxWasmCodeAllocations)
-        p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", gc::SystemPageSize());
-    if (!p) {
-        wasmCodeAllocations--;
-        ReportOutOfMemory(cx);
-    }
-
-    return UniqueCodePtr((uint8_t*)p, CodeDeleter(bytes));
-}
-
-void
-CodeDeleter::operator()(uint8_t* p)
-{
-    MOZ_ASSERT(wasmCodeAllocations > 0);
-    wasmCodeAllocations--;
-
-    MOZ_ASSERT(bytes_ != 0);
-    DeallocateExecutableMemory(p, bytes_, gc::SystemPageSize());
-}
-
 #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
 // On MIPS, CodeLabels are instruction immediates so InternalLinks only
 // patch instruction immediates.
 StaticLinkData::InternalLink::InternalLink(Kind kind)
 {
     MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
 }
 
@@ -144,26 +108,16 @@ StaticLinkData::SymbolicLinkArray::deser
     for (Uint32Vector& offsets : *this) {
         cursor = DeserializePodVector(cx, cursor, &offsets);
         if (!cursor)
             return nullptr;
     }
     return cursor;
 }
 
-bool
-StaticLinkData::SymbolicLinkArray::clone(JSContext* cx, SymbolicLinkArray* out) const
-{
-    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
-        if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
-            return false;
-    }
-    return true;
-}
-
 size_t
 StaticLinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     size_t size = 0;
     for (const Uint32Vector& offsets : *this)
         size += offsets.sizeOfExcludingThis(mallocSizeOf);
     return size;
 }
@@ -186,23 +140,16 @@ StaticLinkData::FuncPtrTable::serialize(
 const uint8_t*
 StaticLinkData::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &globalDataOffset, sizeof(globalDataOffset))) &&
     (cursor = DeserializePodVector(cx, cursor, &elemOffsets));
     return cursor;
 }
 
-bool
-StaticLinkData::FuncPtrTable::clone(JSContext* cx, FuncPtrTable* out) const
-{
-    out->globalDataOffset = globalDataOffset;
-    return ClonePodVector(cx, elemOffsets, &out->elemOffsets);
-}
-
 size_t
 StaticLinkData::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return elemOffsets.sizeOfExcludingThis(mallocSizeOf);
 }
 
 size_t
 StaticLinkData::serializedSize() const
@@ -228,254 +175,24 @@ StaticLinkData::deserialize(ExclusiveCon
 {
     (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
     (cursor = DeserializePodVector(cx, cursor, &internalLinks)) &&
     (cursor = symbolicLinks.deserialize(cx, cursor)) &&
     (cursor = DeserializeVector(cx, cursor, &funcPtrTables));
     return cursor;
 }
 
-bool
-StaticLinkData::clone(JSContext* cx, StaticLinkData* out) const
-{
-    out->pod = pod;
-    return ClonePodVector(cx, internalLinks, &out->internalLinks) &&
-           symbolicLinks.clone(cx, &out->symbolicLinks) &&
-           CloneVector(cx, funcPtrTables, &out->funcPtrTables);
-}
-
 size_t
 StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return internalLinks.sizeOfExcludingThis(mallocSizeOf) +
            symbolicLinks.sizeOfExcludingThis(mallocSizeOf) +
            SizeOfVectorExcludingThis(funcPtrTables, mallocSizeOf);
 }
 
-static size_t
-SerializedSigSize(const Sig& sig)
-{
-    return sizeof(ExprType) +
-           SerializedPodVectorSize(sig.args());
-}
-
-static uint8_t*
-SerializeSig(uint8_t* cursor, const Sig& sig)
-{
-    cursor = WriteScalar<ExprType>(cursor, sig.ret());
-    cursor = SerializePodVector(cursor, sig.args());
-    return cursor;
-}
-
-static const uint8_t*
-DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, Sig* sig)
-{
-    ExprType ret;
-    cursor = ReadScalar<ExprType>(cursor, &ret);
-
-    ValTypeVector args;
-    cursor = DeserializePodVector(cx, cursor, &args);
-    if (!cursor)
-        return nullptr;
-
-    *sig = Sig(Move(args), ret);
-    return cursor;
-}
-
-static size_t
-SizeOfSigExcludingThis(const Sig& sig, MallocSizeOf mallocSizeOf)
-{
-    return sig.args().sizeOfExcludingThis(mallocSizeOf);
-}
-
-size_t
-Export::serializedSize() const
-{
-    return SerializedSigSize(sig_) +
-           sizeof(pod);
-}
-
-uint8_t*
-Export::serialize(uint8_t* cursor) const
-{
-    cursor = SerializeSig(cursor, sig_);
-    cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    return cursor;
-}
-
-const uint8_t*
-Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
-    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
-    return cursor;
-}
-
-bool
-Export::clone(JSContext* cx, Export* out) const
-{
-    out->pod = pod;
-    return out->sig_.clone(sig_);
-}
-
-size_t
-Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    return SizeOfSigExcludingThis(sig_, mallocSizeOf);
-}
-
-size_t
-Import::serializedSize() const
-{
-    return SerializedSigSize(sig_) +
-           sizeof(pod);
-}
-
-uint8_t*
-Import::serialize(uint8_t* cursor) const
-{
-    cursor = SerializeSig(cursor, sig_);
-    cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    return cursor;
-}
-
-const uint8_t*
-Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
-    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
-    return cursor;
-}
-
-bool
-Import::clone(JSContext* cx, Import* out) const
-{
-    out->pod = pod;
-    return out->sig_.clone(sig_);
-}
-
-size_t
-Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    return SizeOfSigExcludingThis(sig_, mallocSizeOf);
-}
-
-CodeRange::CodeRange(Kind kind, Offsets offsets)
-  : begin_(offsets.begin),
-    profilingReturn_(0),
-    end_(offsets.end),
-    funcIndex_(0),
-    funcLineOrBytecode_(0),
-    funcBeginToTableEntry_(0),
-    funcBeginToTableProfilingJump_(0),
-    funcBeginToNonProfilingEntry_(0),
-    funcProfilingJumpToProfilingReturn_(0),
-    funcProfilingEpilogueToProfilingReturn_(0),
-    kind_(kind)
-{
-    MOZ_ASSERT(begin_ <= end_);
-    MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == CallThunk);
-}
-
-CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
-  : begin_(offsets.begin),
-    profilingReturn_(offsets.profilingReturn),
-    end_(offsets.end),
-    funcIndex_(0),
-    funcLineOrBytecode_(0),
-    funcBeginToTableEntry_(0),
-    funcBeginToTableProfilingJump_(0),
-    funcBeginToNonProfilingEntry_(0),
-    funcProfilingJumpToProfilingReturn_(0),
-    funcProfilingEpilogueToProfilingReturn_(0),
-    kind_(kind)
-{
-    MOZ_ASSERT(begin_ < profilingReturn_);
-    MOZ_ASSERT(profilingReturn_ < end_);
-    MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit);
-}
-
-CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
-  : begin_(offsets.begin),
-    profilingReturn_(offsets.profilingReturn),
-    end_(offsets.end),
-    funcIndex_(funcIndex),
-    funcLineOrBytecode_(funcLineOrBytecode),
-    funcBeginToTableEntry_(offsets.tableEntry - begin_),
-    funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
-    funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
-    funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
-    funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
-    kind_(Function)
-{
-    MOZ_ASSERT(begin_ < profilingReturn_);
-    MOZ_ASSERT(profilingReturn_ < end_);
-    MOZ_ASSERT(funcBeginToTableEntry_ == offsets.tableEntry - begin_);
-    MOZ_ASSERT(funcBeginToTableProfilingJump_ == offsets.tableProfilingJump - begin_);
-    MOZ_ASSERT(funcBeginToNonProfilingEntry_ == offsets.nonProfilingEntry - begin_);
-    MOZ_ASSERT(funcProfilingJumpToProfilingReturn_ == profilingReturn_ - offsets.profilingJump);
-    MOZ_ASSERT(funcProfilingEpilogueToProfilingReturn_ == profilingReturn_ - offsets.profilingEpilogue);
-}
-
-static size_t
-NullableStringLength(const char* chars)
-{
-    return chars ? strlen(chars) : 0;
-}
-
-size_t
-CacheableChars::serializedSize() const
-{
-    return sizeof(uint32_t) + NullableStringLength(get());
-}
-
-uint8_t*
-CacheableChars::serialize(uint8_t* cursor) const
-{
-    uint32_t length = NullableStringLength(get());
-    cursor = WriteBytes(cursor, &length, sizeof(uint32_t));
-    cursor = WriteBytes(cursor, get(), length);
-    return cursor;
-}
-
-const uint8_t*
-CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    uint32_t length;
-    cursor = ReadBytes(cursor, &length, sizeof(uint32_t));
-
-    reset(cx->pod_calloc<char>(length + 1));
-    if (!get())
-        return nullptr;
-
-    cursor = ReadBytes(cursor, get(), length);
-    return cursor;
-}
-
-bool
-CacheableChars::clone(JSContext* cx, CacheableChars* out) const
-{
-    uint32_t length = NullableStringLength(get());
-
-    UniqueChars chars(cx->pod_calloc<char>(length + 1));
-    if (!chars)
-        return false;
-
-    PodCopy(chars.get(), get(), length);
-
-    *out = Move(chars);
-    return true;
-}
-
-size_t
-CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    return mallocSizeOf(get());
-}
-
 size_t
 ExportMap::serializedSize() const
 {
     return SerializedVectorSize(fieldNames) +
            SerializedPodVectorSize(fieldsToExports) +
            SerializedPodVectorSize(exportFuncIndices);
 }
 
@@ -492,118 +209,24 @@ const uint8_t*
 ExportMap::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = DeserializeVector(cx, cursor, &fieldNames)) &&
     (cursor = DeserializePodVector(cx, cursor, &fieldsToExports)) &&
     (cursor = DeserializePodVector(cx, cursor, &exportFuncIndices));
     return cursor;
 }
 
-bool
-ExportMap::clone(JSContext* cx, ExportMap* map) const
-{
-    return CloneVector(cx, fieldNames, &map->fieldNames) &&
-           ClonePodVector(cx, fieldsToExports, &map->fieldsToExports) &&
-           ClonePodVector(cx, exportFuncIndices, &map->exportFuncIndices);
-}
-
 size_t
 ExportMap::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     return SizeOfVectorExcludingThis(fieldNames, mallocSizeOf) &&
            fieldsToExports.sizeOfExcludingThis(mallocSizeOf) &&
            exportFuncIndices.sizeOfExcludingThis(mallocSizeOf);
 }
 
-size_t
-ModuleData::serializedSize() const
-{
-    return sizeof(pod()) +
-           codeBytes +
-           SerializedVectorSize(imports) +
-           SerializedVectorSize(exports) +
-           SerializedPodVectorSize(heapAccesses) +
-           SerializedPodVectorSize(codeRanges) +
-           SerializedPodVectorSize(callSites) +
-           SerializedPodVectorSize(callThunks) +
-           SerializedVectorSize(prettyFuncNames) +
-           filename.serializedSize();
-}
-
-uint8_t*
-ModuleData::serialize(uint8_t* cursor) const
-{
-    cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
-    cursor = WriteBytes(cursor, code.get(), codeBytes);
-    cursor = SerializeVector(cursor, imports);
-    cursor = SerializeVector(cursor, exports);
-    cursor = SerializePodVector(cursor, heapAccesses);
-    cursor = SerializePodVector(cursor, codeRanges);
-    cursor = SerializePodVector(cursor, callSites);
-    cursor = SerializePodVector(cursor, callThunks);
-    cursor = SerializeVector(cursor, prettyFuncNames);
-    cursor = filename.serialize(cursor);
-    return cursor;
-}
-
-/* static */ const uint8_t*
-ModuleData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    cursor = ReadBytes(cursor, &pod(), sizeof(pod()));
-
-    code = AllocateCode(cx, totalBytes());
-    if (!code)
-        return nullptr;
-    cursor = ReadBytes(cursor, code.get(), codeBytes);
-
-    (cursor = DeserializeVector(cx, cursor, &imports)) &&
-    (cursor = DeserializeVector(cx, cursor, &exports)) &&
-    (cursor = DeserializePodVector(cx, cursor, &heapAccesses)) &&
-    (cursor = DeserializePodVector(cx, cursor, &codeRanges)) &&
-    (cursor = DeserializePodVector(cx, cursor, &callSites)) &&
-    (cursor = DeserializePodVector(cx, cursor, &callThunks)) &&
-    (cursor = DeserializeVector(cx, cursor, &prettyFuncNames)) &&
-    (cursor = filename.deserialize(cx, cursor));
-    return cursor;
-}
-
-bool
-ModuleData::clone(JSContext* cx, ModuleData* out) const
-{
-    out->pod() = pod();
-
-    out->code = AllocateCode(cx, totalBytes());
-    if (!out->code)
-        return false;
-    memcpy(out->code.get(), code.get(), codeBytes);
-
-    return CloneVector(cx, imports, &out->imports) &&
-           CloneVector(cx, exports, &out->exports) &&
-           ClonePodVector(cx, heapAccesses, &out->heapAccesses) &&
-           ClonePodVector(cx, codeRanges, &out->codeRanges) &&
-           ClonePodVector(cx, callSites, &out->callSites) &&
-           ClonePodVector(cx, callThunks, &out->callThunks) &&
-           CloneVector(cx, prettyFuncNames, &out->prettyFuncNames) &&
-           filename.clone(cx, &out->filename);
-}
-
-size_t
-ModuleData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    // Module::addSizeOfMisc takes care of code and global memory.
-    return SizeOfVectorExcludingThis(imports, mallocSizeOf) +
-           SizeOfVectorExcludingThis(exports, mallocSizeOf) +
-           heapAccesses.sizeOfExcludingThis(mallocSizeOf) +
-           codeRanges.sizeOfExcludingThis(mallocSizeOf) +
-           callSites.sizeOfExcludingThis(mallocSizeOf) +
-           callThunks.sizeOfExcludingThis(mallocSizeOf) +
-           SizeOfVectorExcludingThis(prettyFuncNames, mallocSizeOf) +
-           filename.sizeOfExcludingThis(mallocSizeOf);
-}
-
 uint8_t*
 Module::rawHeapPtr() const
 {
     return const_cast<Module*>(this)->rawHeapPtr();
 }
 
 uint8_t*&
 Module::rawHeapPtr()
@@ -629,40 +252,40 @@ Module::specializeToHeap(ArrayBufferObje
     uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - protected by Module methods*/);
     uint32_t heapLength = heap->byteLength();
 #if defined(JS_CODEGEN_X86)
     // An access is out-of-bounds iff
     //      ptr + offset + data-type-byte-size > heapLength
     // i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size
     // and offset are already included in the addend so we
     // just have to add the heap length here.
-    for (const HeapAccess& access : module_->heapAccesses) {
+    for (const HeapAccess& access : metadata_->heapAccesses) {
         if (access.hasLengthCheck())
             X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
         void* addr = access.patchHeapPtrImmAt(code());
         uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
         MOZ_ASSERT(disp <= INT32_MAX);
         X86Encoding::SetPointer(addr, (void*)(ptrBase + disp));
     }
 #elif defined(JS_CODEGEN_X64)
     // Even with signal handling being used for most bounds checks, there may be
     // atomic operations that depend on explicit checks.
     //
     // If we have any explicit bounds checks, we need to patch the heap length
     // checks at the right places. All accesses that have been recorded are the
     // only ones that need bound checks (see also
     // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
-    for (const HeapAccess& access : module_->heapAccesses) {
+    for (const HeapAccess& access : metadata_->heapAccesses) {
         // See comment above for x86 codegen.
         if (access.hasLengthCheck())
             X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
       defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-    for (const HeapAccess& access : module_->heapAccesses)
+    for (const HeapAccess& access : metadata_->heapAccesses)
         Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + code()));
 #endif
 
     heap_ = heap;
     rawHeapPtr() = ptrBase;
 }
 
 void
@@ -672,29 +295,29 @@ Module::despecializeFromHeap(ArrayBuffer
     // another dynamically-linked module which we are despecializing from that
     // module's heap.
     MOZ_ASSERT_IF(heap_, heap_ == heap);
     MOZ_ASSERT_IF(rawHeapPtr(), rawHeapPtr() == heap->dataPointerEither().unwrap());
 
 #if defined(JS_CODEGEN_X86)
     uint32_t heapLength = heap->byteLength();
     uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - used for value*/);
-    for (unsigned i = 0; i < module_->heapAccesses.length(); i++) {
-        const HeapAccess& access = module_->heapAccesses[i];
+    for (unsigned i = 0; i < metadata_->heapAccesses.length(); i++) {
+        const HeapAccess& access = metadata_->heapAccesses[i];
         if (access.hasLengthCheck())
             X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
         void* addr = access.patchHeapPtrImmAt(code());
         uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
         MOZ_ASSERT(ptr >= ptrBase);
         X86Encoding::SetPointer(addr, reinterpret_cast<void*>(ptr - ptrBase));
     }
 #elif defined(JS_CODEGEN_X64)
     uint32_t heapLength = heap->byteLength();
-    for (unsigned i = 0; i < module_->heapAccesses.length(); i++) {
-        const HeapAccess& access = module_->heapAccesses[i];
+    for (unsigned i = 0; i < metadata_->heapAccesses.length(); i++) {
+        const HeapAccess& access = metadata_->heapAccesses[i];
         if (access.hasLengthCheck())
             X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
     }
 #endif
 
     heap_ = nullptr;
     rawHeapPtr() = nullptr;
 }
@@ -707,17 +330,17 @@ Module::sendCodeRangesToProfiler(JSConte
     enabled |= PerfFuncEnabled();
 #endif
 #ifdef MOZ_VTUNE
     enabled |= IsVTuneProfilingActive();
 #endif
     if (!enabled)
         return true;
 
-    for (const CodeRange& codeRange : module_->codeRanges) {
+    for (const CodeRange& codeRange : metadata_->codeRanges) {
         if (!codeRange.isFunction())
             continue;
 
         uintptr_t start = uintptr_t(code() + codeRange.begin());
         uintptr_t end = uintptr_t(code() + codeRange.end());
         uintptr_t size = end - start;
 
         UniqueChars owner;
@@ -727,17 +350,17 @@ Module::sendCodeRangesToProfiler(JSConte
 
         // Avoid "unused" warnings
         (void)start;
         (void)size;
         (void)name;
 
 #ifdef JS_ION_PERF
         if (PerfFuncEnabled()) {
-            const char* file = module_->filename.get();
+            const char* file = metadata_->filename.get();
             unsigned line = codeRange.funcLineOrBytecode();
             unsigned column = 0;
             writePerfSpewerAsmJSFunctionMap(start, size, file, line, column, name);
         }
 #endif
 #ifdef MOZ_VTUNE
         if (IsVTuneProfilingActive()) {
             unsigned method_id = iJIT_GetNewMethodID();
@@ -770,28 +393,28 @@ Module::setProfilingEnabled(JSContext* c
     if (profilingEnabled_ == enabled)
         return true;
 
     // When enabled, generate profiling labels for every name in funcNames_
     // that is the name of some Function CodeRange. This involves malloc() so
     // do it now since, once we start sampling, we'll be in a signal-handing
     // context where we cannot malloc.
     if (enabled) {
-        for (const CodeRange& codeRange : module_->codeRanges) {
+        for (const CodeRange& codeRange : metadata_->codeRanges) {
             if (!codeRange.isFunction())
                 continue;
 
             UniqueChars owner;
             const char* funcName = getFuncName(cx, codeRange.funcIndex(), &owner);
             if (!funcName)
                 return false;
 
             UniqueChars label(JS_smprintf("%s (%s:%u)",
                                           funcName,
-                                          module_->filename.get(),
+                                          metadata_->filename.get(),
                                           codeRange.funcLineOrBytecode()));
             if (!label) {
                 ReportOutOfMemory(cx);
                 return false;
             }
 
             if (codeRange.funcIndex() >= funcLabels_.length()) {
                 if (!funcLabels_.resize(codeRange.funcIndex() + 1))
@@ -800,27 +423,27 @@ Module::setProfilingEnabled(JSContext* c
             funcLabels_[codeRange.funcIndex()] = Move(label);
         }
     } else {
         funcLabels_.clear();
     }
 
     // Patch callsites and returns to execute profiling prologues/epilogues.
     {
-        AutoWritableJitCode awjc(cx->runtime(), code(), codeBytes());
+        AutoWritableJitCode awjc(cx->runtime(), code(), codeLength());
         AutoFlushICache afc("Module::setProfilingEnabled");
-        AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
+        AutoFlushICache::setRange(uintptr_t(code()), codeLength());
 
-        for (const CallSite& callSite : module_->callSites)
+        for (const CallSite& callSite : metadata_->callSites)
             ToggleProfiling(*this, callSite, enabled);
 
-        for (const CallThunk& callThunk : module_->callThunks)
+        for (const CallThunk& callThunk : metadata_->callThunks)
             ToggleProfiling(*this, callThunk, enabled);
 
-        for (const CodeRange& codeRange : module_->codeRanges)
+        for (const CodeRange& codeRange : metadata_->codeRanges)
             ToggleProfiling(*this, codeRange, enabled);
     }
 
     // In asm.js, table elements point directly to the prologue and must be
     // updated to reflect the profiling mode. In wasm, table elements point to
     // the (one) table entry which checks signature before jumping to the
     // appropriate prologue (which is patched by ToggleProfiling).
     if (isAsmJS()) {
@@ -848,62 +471,65 @@ Module::importToExit(const Import& impor
     return *reinterpret_cast<ImportExit*>(globalData() + import.exitGlobalDataOffset());
 }
 
 bool
 Module::clone(JSContext* cx, const StaticLinkData& link, Module* out) const
 {
     MOZ_ASSERT(dynamicallyLinked_);
 
-    // The out->module_ field was already cloned and initialized when 'out' was
+    // The out->metadata_ field was already cloned and initialized when 'out' was
     // constructed. This function should clone the rest.
-    MOZ_ASSERT(out->module_);
+    MOZ_ASSERT(out->metadata_);
 
+    // Copy the profiling state over too since the cloned machine code
+    // implicitly brings the profiling mode.
     out->profilingEnabled_ = profilingEnabled_;
-
-    if (!CloneVector(cx, funcLabels_, &out->funcLabels_))
-        return false;
+    for (const CacheableChars& label : funcLabels_) {
+        if (!out->funcLabels_.emplaceBack(DuplicateString(label.get())))
+            return false;
+    }
 
 #ifdef DEBUG
     // Put the symbolic links back to -1 so PatchDataWithValueCheck assertions
     // in Module::staticallyLink are valid.
     for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
         void* callee = AddressOf(imm, cx);
         const Uint32Vector& offsets = link.symbolicLinks[imm];
         for (uint32_t offset : offsets) {
-            jit::Assembler::PatchDataWithValueCheck(jit::CodeLocationLabel(out->code() + offset),
-                                                    jit::PatchedImmPtr((void*)-1),
-                                                    jit::PatchedImmPtr(callee));
+            Assembler::PatchDataWithValueCheck(CodeLocationLabel(out->code() + offset),
+                                               PatchedImmPtr((void*)-1),
+                                               PatchedImmPtr(callee));
         }
     }
 #endif
 
     // If the copied machine code has been specialized to the heap, it must be
     // unspecialized in the copy.
     if (usesHeap())
         out->despecializeFromHeap(heap_);
 
     return true;
 }
 
-
-Module::Module(UniqueModuleData module)
-  : module_(Move(module)),
+Module::Module(UniqueCodeSegment codeSegment, const Metadata& metadata)
+  : codeSegment_(Move(codeSegment)),
+    metadata_(&metadata),
     staticallyLinked_(false),
     interrupt_(nullptr),
     outOfBounds_(nullptr),
     dynamicallyLinked_(false),
     profilingEnabled_(false)
 {
     *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
     *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
 
 #ifdef DEBUG
     uint32_t lastEnd = 0;
-    for (const CodeRange& cr : module_->codeRanges) {
+    for (const CodeRange& cr : metadata_->codeRanges) {
         MOZ_ASSERT(cr.begin() >= lastEnd);
         lastEnd = cr.end();
     }
 #endif
 }
 
 Module::~Module()
 {
@@ -930,21 +556,21 @@ Module::trace(JSTracer* trc)
 Module::readBarrier()
 {
     InternalBarrierMethods<JSObject*>::readBarrier(owner());
 }
 
 /* virtual */ void
 Module::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data)
 {
-    *code += codeBytes();
+    *code += codeSegment_->codeLength();
     *data += mallocSizeOf(this) +
-             globalBytes() +
-             mallocSizeOf(module_.get()) +
-             module_->sizeOfExcludingThis(mallocSizeOf) +
+             codeSegment_->globalDataLength() +
+             mallocSizeOf(metadata_.get()) +
+             metadata_->sizeOfExcludingThis(mallocSizeOf) +
              source_.sizeOfExcludingThis(mallocSizeOf) +
              funcPtrTables_.sizeOfExcludingThis(mallocSizeOf) +
              SizeOfVectorExcludingThis(funcLabels_, mallocSizeOf);
 }
 
 /* virtual */ bool
 Module::mutedErrors() const
 {
@@ -959,60 +585,60 @@ Module::displayURL() const
 {
     // WebAssembly code does not have `//# sourceURL`.
     return nullptr;
 }
 
 bool
 Module::containsFunctionPC(void* pc) const
 {
-    return pc >= code() && pc < (code() + module_->functionBytes);
+    return pc >= code() && pc < (code() + metadata_->functionLength);
 }
 
 bool
 Module::containsCodePC(void* pc) const
 {
-    return pc >= code() && pc < (code() + codeBytes());
+    return pc >= code() && pc < (code() + codeLength());
 }
 
 struct CallSiteRetAddrOffset
 {
     const CallSiteVector& callSites;
     explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {}
     uint32_t operator[](size_t index) const {
         return callSites[index].returnAddressOffset();
     }
 };
 
 const CallSite*
 Module::lookupCallSite(void* returnAddress) const
 {
     uint32_t target = ((uint8_t*)returnAddress) - code();
     size_t lowerBound = 0;
-    size_t upperBound = module_->callSites.length();
+    size_t upperBound = metadata_->callSites.length();
 
     size_t match;
-    if (!BinarySearch(CallSiteRetAddrOffset(module_->callSites), lowerBound, upperBound, target, &match))
+    if (!BinarySearch(CallSiteRetAddrOffset(metadata_->callSites), lowerBound, upperBound, target, &match))
         return nullptr;
 
-    return &module_->callSites[match];
+    return &metadata_->callSites[match];
 }
 
 const CodeRange*
 Module::lookupCodeRange(void* pc) const
 {
     CodeRange::PC target((uint8_t*)pc - code());
     size_t lowerBound = 0;
-    size_t upperBound = module_->codeRanges.length();
+    size_t upperBound = metadata_->codeRanges.length();
 
     size_t match;
-    if (!BinarySearch(module_->codeRanges, lowerBound, upperBound, target, &match))
+    if (!BinarySearch(metadata_->codeRanges, lowerBound, upperBound, target, &match))
         return nullptr;
 
-    return &module_->codeRanges[match];
+    return &metadata_->codeRanges[match];
 }
 
 struct HeapAccessOffset
 {
     const HeapAccessVector& accesses;
     explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
     uintptr_t operator[](size_t index) const {
         return accesses[index].insnOffset();
@@ -1021,38 +647,38 @@ struct HeapAccessOffset
 
 const HeapAccess*
 Module::lookupHeapAccess(void* pc) const
 {
     MOZ_ASSERT(containsFunctionPC(pc));
 
     uint32_t target = ((uint8_t*)pc) - code();
     size_t lowerBound = 0;
-    size_t upperBound = module_->heapAccesses.length();
+    size_t upperBound = metadata_->heapAccesses.length();
 
     size_t match;
-    if (!BinarySearch(HeapAccessOffset(module_->heapAccesses), lowerBound, upperBound, target, &match))
+    if (!BinarySearch(HeapAccessOffset(metadata_->heapAccesses), lowerBound, upperBound, target, &match))
         return nullptr;
 
-    return &module_->heapAccesses[match];
+    return &metadata_->heapAccesses[match];
 }
 
 bool
 Module::staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData)
 {
     MOZ_ASSERT(!dynamicallyLinked_);
     MOZ_ASSERT(!staticallyLinked_);
     staticallyLinked_ = true;
 
     // Push a JitContext for benefit of IsCompilingAsmJS and delay flushing
     // until Module::dynamicallyLink.
     JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
     MOZ_ASSERT(IsCompilingAsmJS());
     AutoFlushICache afc("Module::staticallyLink", /* inhibit = */ true);
-    AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
+    AutoFlushICache::setRange(uintptr_t(code()), codeLength());
 
     interrupt_ = code() + linkData.pod.interruptOffset;
     outOfBounds_ = code() + linkData.pod.outOfBoundsOffset;
 
     for (StaticLinkData::InternalLink link : linkData.internalLinks) {
         uint8_t* patchAt = code() + link.patchAtOffset;
         void* target = code() + link.targetOffset;
 
@@ -1221,34 +847,34 @@ Module::dynamicallyLink(JSContext* cx,
     MOZ_ASSERT(!dynamicallyLinked_);
     dynamicallyLinked_ = true;
 
     // Push a JitContext for benefit of IsCompilingAsmJS and flush the ICache.
     // We've been inhibiting flushing up to this point so flush it all now.
     JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
     MOZ_ASSERT(IsCompilingAsmJS());
     AutoFlushICache afc("Module::dynamicallyLink");
-    AutoFlushICache::setRange(uintptr_t(code()), codeBytes());
+    AutoFlushICache::setRange(uintptr_t(code()), codeLength());
 
     // Initialize imports with actual imported values.
     MOZ_ASSERT(importArgs.length() == imports().length());
     for (size_t i = 0; i < imports().length(); i++) {
         const Import& import = imports()[i];
         ImportExit& exit = importToExit(import);
         exit.code = code() + import.interpExitCodeOffset();
         exit.fun = importArgs[i];
         exit.baselineScript = nullptr;
     }
 
     // Specialize code to the actual heap.
     if (usesHeap())
         specializeToHeap(heap);
 
-    // See AllocateCode comment above.
-    if (!ExecutableAllocator::makeExecutable(code(), codeBytes())) {
+    // See CodeSegment::allocate comment above.
+    if (!ExecutableAllocator::makeExecutable(code(), codeLength())) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     if (!sendCodeRangesToProfiler(cx))
         return false;
 
     return CreateExportObject(cx, moduleObj, heap, exportMap, exports(), exportObj);
@@ -1696,19 +1322,19 @@ Module::callImport_f64(int32_t importInd
         return false;
 
     return ToNumber(cx, rval, (double*)argv);
 }
 
 const char*
 Module::maybePrettyFuncName(uint32_t funcIndex) const
 {
-    if (funcIndex >= module_->prettyFuncNames.length())
+    if (funcIndex >= metadata_->prettyFuncNames.length())
         return nullptr;
-    return module_->prettyFuncNames[funcIndex].get();
+    return metadata_->prettyFuncNames[funcIndex].get();
 }
 
 const char*
 Module::getFuncName(JSContext* cx, uint32_t funcIndex, UniqueChars* owner) const
 {
     if (const char* prettyName = maybePrettyFuncName(funcIndex))
         return prettyName;
 
@@ -1828,26 +1454,23 @@ WasmModuleObject::create(ExclusiveContex
     AutoSetNewObjectMetadata metadata(cx);
     JSObject* obj = NewObjectWithGivenProto(cx, &WasmModuleObject::class_, nullptr);
     if (!obj)
         return nullptr;
 
     return &obj->as<WasmModuleObject>();
 }
 
-bool
-WasmModuleObject::init(Module* module)
+void
+WasmModuleObject::init(Module& module)
 {
     MOZ_ASSERT(is<WasmModuleObject>());
     MOZ_ASSERT(!hasModule());
-    if (!module)
-        return false;
-    module->setOwner(this);
-    setReservedSlot(MODULE_SLOT, PrivateValue(module));
-    return true;
+    module.setOwner(this);
+    setReservedSlot(MODULE_SLOT, PrivateValue(&module));
 }
 
 Module&
 WasmModuleObject::module() const
 {
     MOZ_ASSERT(is<WasmModuleObject>());
     MOZ_ASSERT(hasModule());
     return *(Module*)getReservedSlot(MODULE_SLOT).toPrivate();
--- a/js/src/asmjs/WasmModule.h
+++ b/js/src/asmjs/WasmModule.h
@@ -16,34 +16,37 @@
  * limitations under the License.
  */
 
 #ifndef wasm_module_h
 #define wasm_module_h
 
 #include "mozilla/LinkedList.h"
 
-#include "asmjs/WasmTypes.h"
+#include "asmjs/WasmCode.h"
 #include "gc/Barrier.h"
 #include "vm/MallocProvider.h"
 #include "vm/NativeObject.h"
 
 namespace js {
 
 class AsmJSModule;
 class WasmActivation;
 class WasmModuleObject;
 namespace jit { struct BaselineScript; }
 
 namespace wasm {
 
 // The StaticLinkData contains all the metadata necessary to perform
 // Module::staticallyLink but is not necessary afterwards.
+//
+// StaticLinkData is built incrementing by ModuleGenerator and then shared
+// immutably between modules.
 
-struct StaticLinkData
+struct StaticLinkData : RefCounted<StaticLinkData>
 {
     struct InternalLink {
         enum Kind {
             RawPointer,
             CodeLabel,
             InstructionImmediate
         };
         uint32_t patchAtOffset;
@@ -79,350 +82,45 @@ struct StaticLinkData
     } pod;
     InternalLinkVector  internalLinks;
     SymbolicLinkArray   symbolicLinks;
     FuncPtrTableVector  funcPtrTables;
 
     WASM_DECLARE_SERIALIZABLE(StaticLinkData)
 };
 
-typedef UniquePtr<StaticLinkData> UniqueStaticLinkData;
-
-// An Export represents a single function inside a wasm Module that has been
-// exported one or more times.
-
-class Export
-{
-    Sig sig_;
-    struct CacheablePod {
-        uint32_t stubOffset_;
-    } pod;
-
-  public:
-    Export() = default;
-    explicit Export(Sig&& sig)
-      : sig_(Move(sig))
-    {
-        pod.stubOffset_ = UINT32_MAX;
-    }
-    Export(Export&& rhs)
-      : sig_(Move(rhs.sig_)),
-        pod(rhs.pod)
-    {}
-
-    void initStubOffset(uint32_t stubOffset) {
-        MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
-        pod.stubOffset_ = stubOffset;
-    }
-
-    uint32_t stubOffset() const {
-        return pod.stubOffset_;
-    }
-    const Sig& sig() const {
-        return sig_;
-    }
-
-    WASM_DECLARE_SERIALIZABLE(Export)
-};
-
-typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
-
-// An Import describes a wasm module import. Currently, only functions can be
-// imported in wasm. A function import includes the signature used within the
-// module to call it.
-
-class Import
-{
-    Sig sig_;
-    struct CacheablePod {
-        uint32_t exitGlobalDataOffset_;
-        uint32_t interpExitCodeOffset_;
-        uint32_t jitExitCodeOffset_;
-    } pod;
-
-  public:
-    Import() {}
-    Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
-    Import(Sig&& sig, uint32_t exitGlobalDataOffset)
-      : sig_(Move(sig))
-    {
-        pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
-        pod.interpExitCodeOffset_ = 0;
-        pod.jitExitCodeOffset_ = 0;
-    }
-
-    void initInterpExitOffset(uint32_t off) {
-        MOZ_ASSERT(!pod.interpExitCodeOffset_);
-        pod.interpExitCodeOffset_ = off;
-    }
-    void initJitExitOffset(uint32_t off) {
-        MOZ_ASSERT(!pod.jitExitCodeOffset_);
-        pod.jitExitCodeOffset_ = off;
-    }
-
-    const Sig& sig() const {
-        return sig_;
-    }
-    uint32_t exitGlobalDataOffset() const {
-        return pod.exitGlobalDataOffset_;
-    }
-    uint32_t interpExitCodeOffset() const {
-        return pod.interpExitCodeOffset_;
-    }
-    uint32_t jitExitCodeOffset() const {
-        return pod.jitExitCodeOffset_;
-    }
-
-    WASM_DECLARE_SERIALIZABLE(Import)
-};
-
-typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
-
-// A CodeRange describes a single contiguous range of code within a wasm
-// module's code segment. A CodeRange describes what the code does and, for
-// function bodies, the name and source coordinates of the function.
-
-class CodeRange
-{
-  public:
-    enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Inline, CallThunk };
-
-  private:
-    // All fields are treated as cacheable POD:
-    uint32_t begin_;
-    uint32_t profilingReturn_;
-    uint32_t end_;
-    uint32_t funcIndex_;
-    uint32_t funcLineOrBytecode_;
-    uint8_t funcBeginToTableEntry_;
-    uint8_t funcBeginToTableProfilingJump_;
-    uint8_t funcBeginToNonProfilingEntry_;
-    uint8_t funcProfilingJumpToProfilingReturn_;
-    uint8_t funcProfilingEpilogueToProfilingReturn_;
-    Kind kind_ : 8;
-
-  public:
-    CodeRange() = default;
-    CodeRange(Kind kind, Offsets offsets);
-    CodeRange(Kind kind, ProfilingOffsets offsets);
-    CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
-
-    // All CodeRanges have a begin and end.
-
-    uint32_t begin() const {
-        return begin_;
-    }
-    uint32_t end() const {
-        return end_;
-    }
-
-    // Other fields are only available for certain CodeRange::Kinds.
-
-    Kind kind() const {
-        return kind_;
-    }
-
-    bool isFunction() const {
-        return kind() == Function;
-    }
-    bool isImportExit() const {
-        return kind() == ImportJitExit || kind() == ImportInterpExit;
-    }
-    bool isInline() const {
-        return kind() == Inline;
-    }
-
-    // Every CodeRange except entry and inline stubs has a profiling return
-    // which is used for asynchronous profiling to determine the frame pointer.
-
-    uint32_t profilingReturn() const {
-        MOZ_ASSERT(isFunction() || isImportExit());
-        return profilingReturn_;
-    }
-
-    // Functions have offsets which allow patching to selectively execute
-    // profiling prologues/epilogues.
-
-    uint32_t funcProfilingEntry() const {
-        MOZ_ASSERT(isFunction());
-        return begin();
-    }
-    uint32_t funcTableEntry() const {
-        MOZ_ASSERT(isFunction());
-        return begin_ + funcBeginToTableEntry_;
-    }
-    uint32_t funcTableProfilingJump() const {
-        MOZ_ASSERT(isFunction());
-        return begin_ + funcBeginToTableProfilingJump_;
-    }
-    uint32_t funcNonProfilingEntry() const {
-        MOZ_ASSERT(isFunction());
-        return begin_ + funcBeginToNonProfilingEntry_;
-    }
-    uint32_t funcProfilingJump() const {
-        MOZ_ASSERT(isFunction());
-        return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
-    }
-    uint32_t funcProfilingEpilogue() const {
-        MOZ_ASSERT(isFunction());
-        return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
-    }
-    uint32_t funcIndex() const {
-        MOZ_ASSERT(isFunction());
-        return funcIndex_;
-    }
-    uint32_t funcLineOrBytecode() const {
-        MOZ_ASSERT(isFunction());
-        return funcLineOrBytecode_;
-    }
-
-    // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
-
-    struct PC {
-        size_t offset;
-        explicit PC(size_t offset) : offset(offset) {}
-        bool operator==(const CodeRange& rhs) const {
-            return offset >= rhs.begin() && offset < rhs.end();
-        }
-        bool operator<(const CodeRange& rhs) const {
-            return offset < rhs.begin();
-        }
-    };
-};
-
-WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
-
-// A CallThunk describes the offset and target of thunks so that they may be
-// patched at runtime when profiling is toggled. Thunks are emitted to connect
-// callsites that are too far away from callees to fit in a single call
-// instruction's relative offset.
-
-struct CallThunk
-{
-    uint32_t offset;
-    union {
-        uint32_t funcIndex;
-        uint32_t codeRangeIndex;
-    } u;
-
-    CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
-    CallThunk() = default;
-};
-
-WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
-
-// CacheableChars is used to cacheably store UniqueChars.
-
-struct CacheableChars : UniqueChars
-{
-    CacheableChars() = default;
-    explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
-    MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
-    CacheableChars(CacheableChars&& rhs) : UniqueChars(Move(rhs)) {}
-    void operator=(CacheableChars&& rhs) { UniqueChars::operator=(Move(rhs)); }
-    WASM_DECLARE_SERIALIZABLE(CacheableChars)
-};
-
-typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
+typedef RefPtr<StaticLinkData> MutableStaticLinkData;
+typedef RefPtr<const StaticLinkData> SharedStaticLinkData;
 
 // The ExportMap describes how Exports are mapped to the fields of the export
 // object. This allows a single Export to be used in multiple fields.
 // The 'fieldNames' vector provides the list of names of the module's exports.
 // For each field in fieldNames, 'fieldsToExports' provides either:
 //  - the sentinel value MemoryExport indicating an export of linear memory; or
 //  - the index of an export (both into the module's ExportVector and the
 //    ExportMap's exportFuncIndices vector).
 // Lastly, the 'exportFuncIndices' vector provides, for each exported function,
 // the internal index of the function.
+//
+// The ExportMap is built incrementally by ModuleGenerator and then shared
+// immutably between modules.
 
 static const uint32_t MemoryExport = UINT32_MAX;
 
-struct ExportMap
+struct ExportMap : RefCounted<ExportMap>
 {
     CacheableCharsVector fieldNames;
     Uint32Vector fieldsToExports;
     Uint32Vector exportFuncIndices;
 
     WASM_DECLARE_SERIALIZABLE(ExportMap)
 };
 
-typedef UniquePtr<ExportMap> UniqueExportMap;
-
-// A UniqueCodePtr owns allocated executable code. Code passed to the Module
-// constructor must be allocated via AllocateCode.
-
-class CodeDeleter
-{
-    uint32_t bytes_;
-  public:
-    CodeDeleter() : bytes_(0) {}
-    explicit CodeDeleter(uint32_t bytes) : bytes_(bytes) {}
-    void operator()(uint8_t* p);
-};
-typedef UniquePtr<uint8_t, CodeDeleter> UniqueCodePtr;
-
-UniqueCodePtr
-AllocateCode(ExclusiveContext* cx, size_t bytes);
-
-// A wasm module can either use no heap, a unshared heap (ArrayBuffer) or shared
-// heap (SharedArrayBuffer).
-
-enum class HeapUsage
-{
-    None = false,
-    Unshared = 1,
-    Shared = 2
-};
-
-static inline bool
-UsesHeap(HeapUsage heapUsage)
-{
-    return bool(heapUsage);
-}
-
-// ModuleCacheablePod holds the trivially-memcpy()able serializable portion of
-// ModuleData.
-
-struct ModuleCacheablePod
-{
-    uint32_t              functionBytes;
-    uint32_t              codeBytes;
-    uint32_t              globalBytes;
-    ModuleKind            kind;
-    HeapUsage             heapUsage;
-    CompileArgs           compileArgs;
-
-    uint32_t totalBytes() const { return codeBytes + globalBytes; }
-};
-
-// ModuleData holds the guts of a Module. ModuleData is mutably built up by
-// ModuleGenerator and then handed over to the Module constructor in finish(),
-// where it is stored immutably.
-
-struct ModuleData : ModuleCacheablePod
-{
-    ModuleData() : loadedFromCache(false) { mozilla::PodZero(&pod()); }
-    ModuleCacheablePod& pod() { return *this; }
-    const ModuleCacheablePod& pod() const { return *this; }
-
-    UniqueCodePtr         code;
-    ImportVector          imports;
-    ExportVector          exports;
-    HeapAccessVector      heapAccesses;
-    CodeRangeVector       codeRanges;
-    CallSiteVector        callSites;
-    CallThunkVector       callThunks;
-    CacheableCharsVector  prettyFuncNames;
-    CacheableChars        filename;
-    bool                  loadedFromCache;
-
-    WASM_DECLARE_SERIALIZABLE(ModuleData);
-};
-
-typedef UniquePtr<ModuleData> UniqueModuleData;
+typedef RefPtr<ExportMap> MutableExportMap;
+typedef RefPtr<const ExportMap> SharedExportMap;
 
 // Module represents a compiled WebAssembly module which lives until the last
 // reference to any exported functions is dropped. Modules must be wrapped by a
 // rooted JSObject immediately after creation so that Module::trace() is called
 // during GC. Modules are created after compilation completes and start in a
 // a fully unlinked state. After creation, a module must be first statically
 // linked and then dynamically linked:
 //
@@ -436,17 +134,16 @@ typedef UniquePtr<ModuleData> UniqueModu
 //    once. However, a dynamically-linked module may be cloned so that the clone
 //    can be independently dynamically linked.
 //
 // Once fully dynamically linked, a Module can have its exports invoked via
 // callExport().
 
 class Module : public mozilla::LinkedListElement<Module>
 {
-    typedef UniquePtr<const ModuleData> UniqueConstModuleData;
     struct ImportExit {
         void* code;
         jit::BaselineScript* baselineScript;
         GCPtrFunction fun;
         static_assert(sizeof(GCPtrFunction) == sizeof(void*), "for JIT access");
     };
     struct EntryArg {
         uint64_t lo;
@@ -462,17 +159,18 @@ class Module : public mozilla::LinkedLis
         {}
     };
     typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
     typedef Vector<CacheableChars, 0, SystemAllocPolicy> FuncLabelVector;
     typedef HeapPtr<ArrayBufferObjectMaybeShared*> BufferPtr;
     typedef GCPtr<WasmModuleObject*> ModuleObjectPtr;
 
     // Initialized when constructed:
-    const UniqueConstModuleData  module_;
+    const UniqueCodeSegment      codeSegment_;
+    const SharedMetadata         metadata_;
 
     // Initialized during staticallyLink:
     bool                         staticallyLinked_;
     uint8_t*                     interrupt_;
     uint8_t*                     outOfBounds_;
     FuncPtrTableVector           funcPtrTables_;
 
     // Initialized during dynamicallyLink:
@@ -504,64 +202,64 @@ class Module : public mozilla::LinkedLis
     static int32_t callImport_i32(int32_t importIndex, int32_t argc, uint64_t* argv);
     static int32_t callImport_i64(int32_t importIndex, int32_t argc, uint64_t* argv);
     static int32_t callImport_f64(int32_t importIndex, int32_t argc, uint64_t* argv);
 
     friend class js::WasmActivation;
     friend void* wasm::AddressOf(SymbolicAddress, ExclusiveContext*);
 
   protected:
-    const ModuleData& base() const { return *module_; }
+    const CodeSegment& codeSegment() const { return *codeSegment_; }
+    const Metadata& metadata() const { return *metadata_; }
     MOZ_MUST_USE bool clone(JSContext* cx, const StaticLinkData& link, Module* clone) const;
 
   public:
     static const unsigned SizeOfImportExit = sizeof(ImportExit);
     static const unsigned OffsetOfImportExitFun = offsetof(ImportExit, fun);
     static const unsigned SizeOfEntryArg = sizeof(EntryArg);
 
-    explicit Module(UniqueModuleData module);
+    explicit Module(UniqueCodeSegment codeSegment, const Metadata& metadata);
     virtual ~Module();
     virtual void trace(JSTracer* trc);
     virtual void readBarrier();
     virtual void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code, size_t* data);
 
     void setOwner(WasmModuleObject* owner) { MOZ_ASSERT(!ownerObject_); ownerObject_ = owner; }
     inline const GCPtr<WasmModuleObject*>& owner() const;
 
     void setSource(Bytes&& source) { source_ = Move(source); }
 
-    uint8_t* code() const { return module_->code.get(); }
-    uint32_t codeBytes() const { return module_->codeBytes; }
-    uint8_t* globalData() const { return code() + module_->codeBytes; }
-    uint32_t globalBytes() const { return module_->globalBytes; }
-    HeapUsage heapUsage() const { return module_->heapUsage; }
-    bool usesHeap() const { return UsesHeap(module_->heapUsage); }
-    bool hasSharedHeap() const { return module_->heapUsage == HeapUsage::Shared; }
-    CompileArgs compileArgs() const { return module_->compileArgs; }
-    const ImportVector& imports() const { return module_->imports; }
-    const ExportVector& exports() const { return module_->exports; }
-    const CodeRangeVector& codeRanges() const { return module_->codeRanges; }
-    const char* filename() const { return module_->filename.get(); }
-    bool loadedFromCache() const { return module_->loadedFromCache; }
+    uint8_t* code() const { return codeSegment_->code(); }
+    uint32_t codeLength() const { return codeSegment_->codeLength(); }
+    uint8_t* globalData() const { return codeSegment_->globalData(); }
+    uint32_t globalDataLength() const { return codeSegment_->globalDataLength(); }
+    HeapUsage heapUsage() const { return metadata_->heapUsage; }
+    bool usesHeap() const { return UsesHeap(metadata_->heapUsage); }
+    bool hasSharedHeap() const { return metadata_->heapUsage == HeapUsage::Shared; }
+    CompileArgs compileArgs() const { return metadata_->compileArgs; }
+    const ImportVector& imports() const { return metadata_->imports; }
+    const ExportVector& exports() const { return metadata_->exports; }
+    const CodeRangeVector& codeRanges() const { return metadata_->codeRanges; }
+    const char* filename() const { return metadata_->filename.get(); }
     bool staticallyLinked() const { return staticallyLinked_; }
     bool dynamicallyLinked() const { return dynamicallyLinked_; }
 
     // Some wasm::Module's have the most-derived type AsmJSModule. The
     // AsmJSModule stores the extra metadata necessary to implement asm.js (JS)
     // semantics. The asAsmJS() member may be used as a checked downcast when
     // isAsmJS() is true.
 
-    bool isAsmJS() const { return module_->kind == ModuleKind::AsmJS; }
+    bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
     AsmJSModule& asAsmJS() { MOZ_ASSERT(isAsmJS()); return *(AsmJSModule*)this; }
     const AsmJSModule& asAsmJS() const { MOZ_ASSERT(isAsmJS()); return *(const AsmJSModule*)this; }
     virtual bool mutedErrors() const;
     virtual const char16_t* displayURL() const;
     virtual ScriptSource* maybeScriptSource() const { return nullptr; }
 
-    // The range [0, functionBytes) is a subrange of [0, codeBytes) that
+    // The range [0, functionLength) is a subrange of [0, codeLength) that
     // contains only function body code, not the stub code. This distinction is
     // used by the async interrupt handler to only interrupt when the pc is in
     // function code which, in turn, simplifies reasoning about how stubs
     // enter/exit.
 
     bool containsFunctionPC(void* pc) const;
     bool containsCodePC(void* pc) const;
     const CallSite* lookupCallSite(void* returnAddress) const;
@@ -661,17 +359,17 @@ class WasmModuleObject : public NativeOb
     static const ClassOps classOps_;
 
     bool hasModule() const;
     static void finalize(FreeOp* fop, JSObject* obj);
     static void trace(JSTracer* trc, JSObject* obj);
   public:
     static const unsigned RESERVED_SLOTS = 1;
     static WasmModuleObject* create(ExclusiveContext* cx);
-    MOZ_MUST_USE bool init(wasm::Module* module);
+    void init(wasm::Module& module);
     wasm::Module& module() const;
     void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code, size_t* data);
     static const Class class_;
 };
 
 inline const GCPtr<WasmModuleObject*>&
 wasm::Module::owner() const {
     MOZ_ASSERT(&ownerObject_->module() == this);
--- a/js/src/asmjs/WasmSerialize.h
+++ b/js/src/asmjs/WasmSerialize.h
@@ -160,30 +160,16 @@ DeserializeVector(ExclusiveContext* cx, 
     for (size_t i = 0; i < vec->length(); i++) {
         if (!(cursor = (*vec)[i].deserialize(cx, cursor)))
             return nullptr;
     }
     return cursor;
 }
 
 template <class T, size_t N>
-static inline MOZ_MUST_USE bool
-CloneVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
-            mozilla::Vector<T, N, SystemAllocPolicy>* out)
-{
-    if (!out->resize(in.length()))
-        return false;
-    for (size_t i = 0; i < in.length(); i++) {
-        if (!in[i].clone(cx, &(*out)[i]))
-            return false;
-    }
-    return true;
-}
-
-template <class T, size_t N>
 static inline size_t
 SizeOfVectorExcludingThis(const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
                           MallocSizeOf mallocSizeOf)
 {
     size_t size = vec.sizeOfExcludingThis(mallocSizeOf);
     for (const T& t : vec)
         size += t.sizeOfExcludingThis(mallocSizeOf);
     return size;
@@ -214,27 +200,16 @@ DeserializePodVector(ExclusiveContext* c
     uint32_t length;
     cursor = ReadScalar<uint32_t>(cursor, &length);
     if (!vec->resize(length))
         return nullptr;
     cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
     return cursor;
 }
 
-template <class T, size_t N>
-static inline MOZ_MUST_USE bool
-ClonePodVector(JSContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
-               mozilla::Vector<T, N, SystemAllocPolicy>* out)
-{
-    if (!out->resize(in.length()))
-        return false;
-    mozilla::PodCopy(out->begin(), in.begin(), in.length());
-    return true;
-}
-
 static inline MOZ_MUST_USE bool
 GetCPUID(uint32_t* cpuId)
 {
     enum Arch {
         X86 = 0x1,
         X64 = 0x2,
         ARM = 0x3,
         MIPS = 0x4,
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -18,16 +18,18 @@
 
 #ifndef wasm_types_h
 #define wasm_types_h
 
 #include "mozilla/EnumeratedArray.h"
 #include "mozilla/HashFunctions.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/Move.h"
+#include "mozilla/RefCounted.h"
+#include "mozilla/RefPtr.h"
 
 #include "NamespaceImports.h"
 
 #include "asmjs/WasmBinary.h"
 #include "ds/LifoAlloc.h"
 #include "jit/IonTypes.h"
 #include "js/UniquePtr.h"
 #include "js/Utility.h"
@@ -39,40 +41,40 @@ class PropertyName;
 
 namespace wasm {
 
 using mozilla::DebugOnly;
 using mozilla::EnumeratedArray;
 using mozilla::Maybe;
 using mozilla::Move;
 using mozilla::MallocSizeOf;
+using mozilla::RefCounted;
 
 typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
 
 // To call Vector::podResizeToFit, a type must specialize mozilla::IsPod
 // which is pretty verbose to do within js::wasm, so factor that process out
 // into a macro.
 
 #define WASM_DECLARE_POD_VECTOR(Type, VectorName)                               \
 } } namespace mozilla {                                                         \
 template <> struct IsPod<js::wasm::Type> : TrueType {};                         \
 } namespace js { namespace wasm {                                               \
 typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
 
-// A wasm Module and everything it contains must support serialization,
-// deserialization and cloning. Some data can be simply copied as raw bytes and,
+// A wasm Module and everything it contains must support serialization and
+// deserialization. Some data can be simply copied as raw bytes and,
 // as a convention, is stored in an inline CacheablePod struct. Everything else
 // should implement the below methods which are called recusively by the
-// containing Module. See comments for these methods in wasm::Module.
+// containing Module.
 
 #define WASM_DECLARE_SERIALIZABLE(Type)                                         \
     size_t serializedSize() const;                                              \
     uint8_t* serialize(uint8_t* cursor) const;                                  \
     const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);    \
-    MOZ_MUST_USE bool clone(JSContext* cx, Type* out) const;                    \
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
 
 // ValType/ExprType utilities
 
 // ExprType::Limit is an out-of-band value and has no wasm-semantic meaning. For
 // the purpose of recursive validation, we use this value to represent the type
 // of branch/return instructions that don't actually return to the parent
 // expression and can thus be used in any context.
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -155,16 +155,17 @@ EXPORTS.js += [
 UNIFIED_SOURCES += [
     'asmjs/AsmJS.cpp',
     'asmjs/Wasm.cpp',
     'asmjs/WasmBinary.cpp',
     'asmjs/WasmBinaryIterator.cpp',
     'asmjs/WasmBinaryToAST.cpp',
     'asmjs/WasmBinaryToExperimentalText.cpp',
     'asmjs/WasmBinaryToText.cpp',
+    'asmjs/WasmCode.cpp',
     'asmjs/WasmFrameIterator.cpp',
     'asmjs/WasmGenerator.cpp',
     'asmjs/WasmIonCompile.cpp',
     'asmjs/WasmModule.cpp',
     'asmjs/WasmSignalHandlers.cpp',
     'asmjs/WasmStubs.cpp',
     'asmjs/WasmTextToBinary.cpp',
     'asmjs/WasmTypes.cpp',