Bug 1229642 - Split wasm::Module out of AsmJSModule (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Mon, 28 Dec 2015 17:39:21 -0600
changeset 277720 5f25108ed130a969db2a336d19fa62d94623778c
parent 277719 3e8f585fb99ea8f5e07f91731994996f511c845b
child 277721 5e0769303a5efc3c745cc9ffd0492fa3a75dcf74
push id69574
push userlwagner@mozilla.com
push dateTue, 29 Dec 2015 00:40:13 +0000
treeherdermozilla-inbound@2fe666bbf4ec [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1229642
milestone46.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1229642 - Split wasm::Module out of AsmJSModule (r=bbouvier)
js/public/ProfilingFrameIterator.h
js/src/asmjs/AsmJSFrameIterator.cpp
js/src/asmjs/AsmJSFrameIterator.h
js/src/asmjs/AsmJSLink.cpp
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSModule.h
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/asmjs/AsmJSSignalHandlers.h
js/src/asmjs/AsmJSValidate.cpp
js/src/asmjs/AsmJSValidate.h
js/src/asmjs/Wasm.h
js/src/asmjs/WasmCompileArgs.h
js/src/asmjs/WasmFrameIterator.cpp
js/src/asmjs/WasmFrameIterator.h
js/src/asmjs/WasmGenerator.cpp
js/src/asmjs/WasmGenerator.h
js/src/asmjs/WasmIR.h
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmIonCompile.h
js/src/asmjs/WasmModule.cpp
js/src/asmjs/WasmModule.h
js/src/asmjs/WasmSerialize.h
js/src/asmjs/WasmSignalHandlers.cpp
js/src/asmjs/WasmSignalHandlers.h
js/src/asmjs/WasmStubs.cpp
js/src/asmjs/WasmStubs.h
js/src/asmjs/WasmTypes.cpp
js/src/asmjs/WasmTypes.h
js/src/builtin/AtomicsObject.cpp
js/src/builtin/WeakSetObject.cpp
js/src/frontend/ParseNode.h
js/src/jit-test/tests/asm.js/testProfiling.js
js/src/jit/BaselineJIT.cpp
js/src/jit/BaselineJIT.h
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/Ion.cpp
js/src/jit/Linker.h
js/src/jit/MIR.h
js/src/jit/MIRGraph.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips32/Simulator-mips32.cpp
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/Simulator-mips64.cpp
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jsopcode.cpp
js/src/jsscript.h
js/src/moz.build
js/src/vm/HelperThreads.cpp
js/src/vm/HelperThreads.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/SharedArrayObject.cpp
js/src/vm/Stack-inl.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
--- a/js/public/ProfilingFrameIterator.h
+++ b/js/public/ProfilingFrameIterator.h
@@ -14,22 +14,24 @@
 #include "js/TypeDecls.h"
 #include "js/Utility.h"
 
 struct JSRuntime;
 class JSScript;
 
 namespace js {
     class Activation;
-    class AsmJSProfilingFrameIterator;
     namespace jit {
         class JitActivation;
         class JitProfilingFrameIterator;
         class JitcodeGlobalEntry;
     } // namespace jit
+    namespace wasm {
+        class ProfilingFrameIterator;
+    } // namespace wasm
 } // namespace js
 
 namespace JS {
 
 struct ForEachTrackedOptimizationAttemptOp;
 struct ForEachTrackedOptimizationTypeInfoOp;
 
 // This iterator can be used to walk the stack of a thread suspended at an
@@ -44,25 +46,25 @@ class JS_PUBLIC_API(ProfilingFrameIterat
 
     // When moving past a JitActivation, we need to save the prevJitTop
     // from it to use as the exit-frame pointer when the next caller jit
     // activation (if any) comes around.
     void* savedPrevJitTop_;
 
     static const unsigned StorageSpace = 8 * sizeof(void*);
     mozilla::AlignedStorage<StorageSpace> storage_;
-    js::AsmJSProfilingFrameIterator& asmJSIter() {
+    js::wasm::ProfilingFrameIterator& asmJSIter() {
         MOZ_ASSERT(!done());
         MOZ_ASSERT(isAsmJS());
-        return *reinterpret_cast<js::AsmJSProfilingFrameIterator*>(storage_.addr());
+        return *reinterpret_cast<js::wasm::ProfilingFrameIterator*>(storage_.addr());
     }
-    const js::AsmJSProfilingFrameIterator& asmJSIter() const {
+    const js::wasm::ProfilingFrameIterator& asmJSIter() const {
         MOZ_ASSERT(!done());
         MOZ_ASSERT(isAsmJS());
-        return *reinterpret_cast<const js::AsmJSProfilingFrameIterator*>(storage_.addr());
+        return *reinterpret_cast<const js::wasm::ProfilingFrameIterator*>(storage_.addr());
     }
 
     js::jit::JitProfilingFrameIterator& jitIter() {
         MOZ_ASSERT(!done());
         MOZ_ASSERT(isJit());
         return *reinterpret_cast<js::jit::JitProfilingFrameIterator*>(storage_.addr());
     }
 
--- a/js/src/asmjs/AsmJSLink.cpp
+++ b/js/src/asmjs/AsmJSLink.cpp
@@ -15,34 +15,27 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/AsmJSLink.h"
 
 #include "mozilla/PodOperations.h"
 
-#ifdef MOZ_VTUNE
-# include "vtune/VTuneWrapper.h"
-#endif
-
 #include "jscntxt.h"
 #include "jsmath.h"
 #include "jsprf.h"
 #include "jswrapper.h"
 
 #include "asmjs/AsmJSModule.h"
 #include "builtin/AtomicsObject.h"
 #include "builtin/SIMD.h"
 #include "frontend/BytecodeCompiler.h"
 #include "jit/Ion.h"
 #include "jit/JitCommon.h"
-#ifdef JS_ION_PERF
-# include "jit/PerfSpewer.h"
-#endif
 #include "vm/ArrayBufferObject.h"
 #include "vm/SharedArrayObject.h"
 #include "vm/StringBuffer.h"
 
 #include "jsobjinlines.h"
 
 #include "vm/ArrayBufferObject-inl.h"
 #include "vm/NativeObject-inl.h"
@@ -50,31 +43,16 @@
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::IsNaN;
 using mozilla::PodZero;
 
 static bool
-CloneModule(JSContext* cx, MutableHandle<AsmJSModuleObject*> moduleObj)
-{
-    ScopedJSDeletePtr<AsmJSModule> module;
-    if (!moduleObj->module().clone(cx, &module))
-        return false;
-
-    AsmJSModuleObject* newModuleObj = AsmJSModuleObject::create(cx, &module);
-    if (!newModuleObj)
-        return false;
-
-    moduleObj.set(newModuleObj);
-    return true;
-}
-
-static bool
 LinkFail(JSContext* cx, const char* str)
 {
     JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING, GetErrorMessage,
                                  nullptr, JSMSG_USE_ASM_LINK_FAIL, str);
     return false;
 }
 
 static bool
@@ -122,20 +100,20 @@ HasPureCoercion(JSContext* cx, HandleVal
     {
         return true;
     }
 
     return false;
 }
 
 static bool
-ValidateGlobalVariable(JSContext* cx, const AsmJSModule& module, AsmJSModule::Global& global,
+ValidateGlobalVariable(JSContext* cx, const AsmJSModule::Global& global, uint8_t* globalData,
                        HandleValue importVal)
 {
-    void* datum = module.globalData() + global.varGlobalDataOffset();
+    void* datum = globalData + global.varGlobalDataOffset();
 
     switch (global.varInitKind()) {
       case AsmJSModule::Global::InitConstant: {
         Val v = global.varInitVal();
         switch (v.type()) {
           case ValType::I32:
             *(int32_t*)datum = v.i32();
             break;
@@ -209,33 +187,33 @@ ValidateGlobalVariable(JSContext* cx, co
         break;
       }
     }
 
     return true;
 }
 
 static bool
-ValidateFFI(JSContext* cx, AsmJSModule::Global& global, HandleValue importVal,
-            AutoObjectVector* ffis)
+ValidateFFI(JSContext* cx, const AsmJSModule::Global& global, HandleValue importVal,
+            AutoVectorRooter<JSFunction*>* ffis)
 {
     RootedPropertyName field(cx, global.ffiField());
     RootedValue v(cx);
     if (!GetDataProperty(cx, importVal, field, &v))
         return false;
 
     if (!v.isObject() || !v.toObject().is<JSFunction>())
         return LinkFail(cx, "FFI imports must be functions");
 
     (*ffis)[global.ffiIndex()].set(&v.toObject().as<JSFunction>());
     return true;
 }
 
 static bool
-ValidateArrayView(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateArrayView(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
 {
     RootedPropertyName field(cx, global.maybeViewName());
     if (!field)
         return true;
 
     RootedValue v(cx);
     if (!GetDataProperty(cx, globalVal, field, &v))
         return false;
@@ -267,17 +245,17 @@ ValidateByteLength(JSContext* cx, Handle
     RootedValue boundThis(cx, fun->getBoundFunctionThis());
     if (!IsNativeFunction(boundThis, ArrayBufferObject::byteLengthGetter))
         return LinkFail(cx, "bound this value must be ArrayBuffer.protototype.byteLength accessor");
 
     return true;
 }
 
 static bool
-ValidateMathBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateMathBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
 {
     RootedValue v(cx);
     if (!GetDataProperty(cx, globalVal, cx->names().Math, &v))
         return false;
 
     RootedPropertyName field(cx, global.mathName());
     if (!GetDataProperty(cx, v, field, &v))
         return false;
@@ -329,17 +307,17 @@ AsmJSSimdTypeToTypeDescrType(AsmJSSimdTy
       case AsmJSSimdType_int32x4: return Int32x4::type;
       case AsmJSSimdType_float32x4: return Float32x4::type;
       case AsmJSSimdType_bool32x4: return Bool32x4::type;
     }
     MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected AsmJSSimdType");
 }
 
 static bool
-ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal,
+ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal,
                  MutableHandleValue out)
 {
     RootedValue v(cx);
     if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
         return false;
 
     AsmJSSimdType type;
     if (global.which() == AsmJSModule::Global::SimdCtor)
@@ -361,24 +339,24 @@ ValidateSimdType(JSContext* cx, AsmJSMod
     if (AsmJSSimdTypeToTypeDescrType(type) != simdDesc->as<SimdTypeDescr>().type())
         return LinkFail(cx, "bad SIMD type");
 
     out.set(v);
     return true;
 }
 
 static bool
-ValidateSimdType(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateSimdType(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
 {
     RootedValue _(cx);
     return ValidateSimdType(cx, global, globalVal, &_);
 }
 
 static bool
-ValidateSimdOperation(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateSimdOperation(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
 {
     // SIMD operations are loaded from the SIMD type, so the type must have been
     // validated before the operation.
     RootedValue v(cx);
     JS_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
 
     RootedPropertyName opName(cx, global.simdOperationName());
     if (!GetDataProperty(cx, v, opName, &v))
@@ -421,17 +399,17 @@ ValidateSimdOperation(JSContext* cx, Asm
 #undef SET_NATIVE
     }
     if (!native || !IsNativeFunction(v, native))
         return LinkFail(cx, "bad SIMD.type.* operation");
     return true;
 }
 
 static bool
-ValidateAtomicsBuiltinFunction(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
 {
     RootedValue v(cx);
     if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
         return false;
     RootedPropertyName field(cx, global.atomicsName());
     if (!GetDataProperty(cx, v, field, &v))
         return false;
 
@@ -452,17 +430,17 @@ ValidateAtomicsBuiltinFunction(JSContext
 
     if (!IsNativeFunction(v, native))
         return LinkFail(cx, "bad Atomics.* builtin function");
 
     return true;
 }
 
 static bool
-ValidateConstant(JSContext* cx, AsmJSModule::Global& global, HandleValue globalVal)
+ValidateConstant(JSContext* cx, const AsmJSModule::Global& global, HandleValue globalVal)
 {
     RootedPropertyName field(cx, global.constantName());
     RootedValue v(cx, globalVal);
 
     if (global.constantKind() == AsmJSModule::Global::MathConstant) {
         if (!GetDataProperty(cx, v, cx->names().Math, &v))
             return false;
     }
@@ -481,96 +459,92 @@ ValidateConstant(JSContext* cx, AsmJSMod
         if (v.toNumber() != global.constantValue())
             return LinkFail(cx, "global constant value mismatch");
     }
 
     return true;
 }
 
 static bool
-LinkModuleToHeap(JSContext* cx, AsmJSModule& module, Handle<ArrayBufferObjectMaybeShared*> heap)
+CheckBuffer(JSContext* cx, AsmJSModule& module, HandleValue bufferVal,
+            MutableHandle<ArrayBufferObjectMaybeShared*> buffer)
 {
-    uint32_t heapLength = heap->byteLength();
+    if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal))
+        return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
+
+    if (!module.isSharedView() && !IsArrayBuffer(bufferVal))
+        return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
+
+    buffer.set(&AsAnyArrayBuffer(bufferVal));
+    uint32_t heapLength = buffer->byteLength();
 
     if (!IsValidAsmJSHeapLength(heapLength)) {
-        ScopedJSFreePtr<char> msg(
+        UniqueChars msg(
             JS_smprintf("ArrayBuffer byteLength 0x%x is not a valid heap length. The next "
                         "valid length is 0x%x",
                         heapLength,
                         RoundUpToNextValidAsmJSHeapLength(heapLength)));
         return LinkFail(cx, msg.get());
     }
 
     // This check is sufficient without considering the size of the loaded datum because heap
     // loads and stores start on an aligned boundary and the heap byteLength has larger alignment.
     MOZ_ASSERT((module.minHeapLength() - 1) <= INT32_MAX);
     if (heapLength < module.minHeapLength()) {
-        ScopedJSFreePtr<char> msg(
+        UniqueChars msg(
             JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied "
                         "by const heap accesses and/or change-heap minimum-length requirements).",
                         heapLength,
                         module.minHeapLength()));
         return LinkFail(cx, msg.get());
     }
 
     if (heapLength > module.maxHeapLength()) {
-        ScopedJSFreePtr<char> msg(
+        UniqueChars msg(
             JS_smprintf("ArrayBuffer byteLength 0x%x is greater than maximum length of 0x%x",
                         heapLength,
                         module.maxHeapLength()));
         return LinkFail(cx, msg.get());
     }
 
-    // If we've generated the code with signal handlers in mind (for bounds
-    // checks on x64 and for interrupt callback requesting on all platforms),
-    // we need to be able to use signals at runtime. In particular, a module
-    // can have been created using signals and cached, and executed without
-    // signals activated.
-    if (module.usesSignalHandlersForInterrupt() && !cx->canUseSignalHandlers())
-        return LinkFail(cx, "Code generated with signal handlers but signals are deactivated");
+    // Shell builtins may have disabled signal handlers since the module we're
+    // cloning was compiled. LookupAsmJSModuleInCache checks for signal handlers
+    // as well for the caching case.
+    if (module.wasm().compileArgs() != CompileArgs(cx))
+        return LinkFail(cx, "Signals have been toggled since compilation");
 
-    if (heap->is<ArrayBufferObject>()) {
-        Rooted<ArrayBufferObject*> abheap(cx, &heap->as<ArrayBufferObject>());
-        if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, module.usesSignalHandlersForOOB()))
+    if (buffer->is<ArrayBufferObject>()) {
+        Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
+        bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB;
+        if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, useSignalHandlers))
             return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
     }
 
-    module.initHeap(heap, cx);
     return true;
 }
 
 static bool
 DynamicallyLinkModule(JSContext* cx, const CallArgs& args, AsmJSModule& module)
 {
-    module.setIsDynamicallyLinked(cx->runtime());
-
     HandleValue globalVal = args.get(0);
     HandleValue importVal = args.get(1);
     HandleValue bufferVal = args.get(2);
 
-    Rooted<ArrayBufferObjectMaybeShared*> heap(cx);
-    if (module.hasArrayView()) {
-        if (module.isSharedView() && !IsSharedArrayBuffer(bufferVal))
-            return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
-        if (!module.isSharedView() && !IsArrayBuffer(bufferVal))
-            return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
-        heap = &AsAnyArrayBuffer(bufferVal);
-        if (!LinkModuleToHeap(cx, module, heap))
-            return false;
-    }
+    Rooted<ArrayBufferObjectMaybeShared*> buffer(cx);
+    if (module.hasArrayView() && !CheckBuffer(cx, module, bufferVal, &buffer))
+        return false;
 
-    AutoObjectVector ffis(cx);
+    AutoVectorRooter<JSFunction*> ffis(cx);
     if (!ffis.resize(module.numFFIs()))
         return false;
 
-    for (unsigned i = 0; i < module.numGlobals(); i++) {
-        AsmJSModule::Global& global = module.global(i);
+    for (const AsmJSModule::Global& global : module.globals()) {
         switch (global.which()) {
           case AsmJSModule::Global::Variable:
-            if (!ValidateGlobalVariable(cx, module, global, importVal))
+            if (!ValidateGlobalVariable(cx, global, module.wasm().globalData(), importVal))
                 return false;
             break;
           case AsmJSModule::Global::FFI:
             if (!ValidateFFI(cx, global, importVal, &ffis))
                 return false;
             break;
           case AsmJSModule::Global::ArrayView:
           case AsmJSModule::Global::ArrayViewCtor:
@@ -599,25 +573,23 @@ DynamicallyLinkModule(JSContext* cx, con
             break;
           case AsmJSModule::Global::SimdOperation:
             if (!ValidateSimdOperation(cx, global, globalVal))
                 return false;
             break;
         }
     }
 
-    for (unsigned i = 0; i < module.numExits(); i++) {
-        const AsmJSModule::Exit& exit = module.exit(i);
-        exit.datum(module).fun = &ffis[exit.ffiIndex()]->as<JSFunction>();
+    AutoVectorRooter<JSFunction*> imports(cx);
+    for (const AsmJSModule::Import& import : module.imports()) {
+        if (!imports.append(ffis[import.ffiIndex()]))
+            return false;
     }
 
-    // See the comment in AllocateExecutableMemory.
-    ExecutableAllocator::makeExecutable(module.codeBase(), module.codeBytes());
-
-    return true;
+    return module.wasm().dynamicallyLink(cx, buffer, imports);
 }
 
 static bool
 ChangeHeap(JSContext* cx, AsmJSModule& module, const CallArgs& args)
 {
     HandleValue bufferArg = args.get(0);
     if (!IsArrayBuffer(bufferArg)) {
         ReportIncompatible(cx, args);
@@ -636,79 +608,76 @@ ChangeHeap(JSContext* cx, AsmJSModule& m
 
     if (!module.hasArrayView()) {
         args.rval().set(BooleanValue(true));
         return true;
     }
 
     MOZ_ASSERT(IsValidAsmJSHeapLength(heapLength));
 
-    if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, module.usesSignalHandlersForOOB()))
+    bool useSignalHandlers = module.wasm().compileArgs().useSignalHandlersForOOB;
+    if (!ArrayBufferObject::prepareForAsmJS(cx, newBuffer, useSignalHandlers))
         return false;
 
-    args.rval().set(BooleanValue(module.changeHeap(newBuffer, cx)));
+    args.rval().set(BooleanValue(module.wasm().changeHeap(newBuffer, cx)));
     return true;
 }
 
 // An asm.js function stores, in its extended slots:
 //  - a pointer to the module from which it was returned
 //  - its index in the ordered list of exported functions
 static const unsigned ASM_MODULE_SLOT = 0;
 static const unsigned ASM_EXPORT_INDEX_SLOT = 1;
 
 static unsigned
-FunctionToExportedFunctionIndex(HandleFunction fun)
+FunctionToExportIndex(HandleFunction fun)
 {
     MOZ_ASSERT(IsAsmJSFunction(fun));
     Value v = fun->getExtendedSlot(ASM_EXPORT_INDEX_SLOT);
     return v.toInt32();
 }
 
-static const AsmJSModule::ExportedFunction&
-FunctionToExportedFunction(HandleFunction fun, AsmJSModule& module)
-{
-    unsigned funIndex = FunctionToExportedFunctionIndex(fun);
-    return module.exportedFunction(funIndex);
-}
-
 static AsmJSModule&
 FunctionToEnclosingModule(HandleFunction fun)
 {
     return fun->getExtendedSlot(ASM_MODULE_SLOT).toObject().as<AsmJSModuleObject>().module();
 }
 
 // This is the js::Native for functions exported by an asm.js module.
 static bool
 CallAsmJS(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs callArgs = CallArgsFromVp(argc, vp);
     RootedFunction callee(cx, &callArgs.callee().as<JSFunction>());
-    AsmJSModule& module = FunctionToEnclosingModule(callee);
-    const AsmJSModule::ExportedFunction& func = FunctionToExportedFunction(callee, module);
 
     // The heap-changing function is a special-case and is implemented by C++.
-    if (func.isChangeHeap())
-        return ChangeHeap(cx, module, callArgs);
+    AsmJSModule& asmJSModule = FunctionToEnclosingModule(callee);
+    const AsmJSModule::Export& asmJSFunc = asmJSModule.exports()[FunctionToExportIndex(callee)];
+    if (asmJSFunc.isChangeHeap())
+        return ChangeHeap(cx, asmJSModule, callArgs);
+
+    Module& module = asmJSModule.wasm();
+    const Export& func = module.exports()[asmJSFunc.wasmIndex()];
 
     // Enable/disable profiling in the asm.js module to match the current global
     // profiling state. Don't do this if the module is already active on the
     // stack since this would leave the module in a state where profiling is
     // enabled but the stack isn't unwindable.
     if (module.profilingEnabled() != cx->runtime()->spsProfiler.enabled() && !module.active())
         module.setProfilingEnabled(cx->runtime()->spsProfiler.enabled(), cx);
 
     // The calling convention for an external call into asm.js is to pass an
     // array of 16-byte values where each value contains either a coerced int32
     // (in the low word), a double value (in the low dword) or a SIMD vector
     // value, with the coercions specified by the asm.js signature. The
     // external entry point unpacks this array into the system-ABI-specified
     // registers and stack memory and then calls into the internal entry point.
     // The return value is stored in the first element of the array (which,
     // therefore, must have length >= 1).
-    js::Vector<AsmJSModule::EntryArg, 8> coercedArgs(cx);
+    Vector<Module::EntryArg, 8> coercedArgs(cx);
     if (!coercedArgs.resize(Max<size_t>(1, func.sig().args().length())))
         return false;
 
     RootedValue v(cx);
     for (unsigned i = 0; i < func.sig().args().length(); ++i) {
         v = i < callArgs.length() ? callArgs[i] : UndefinedValue();
         switch (func.sig().arg(i)) {
           case ValType::I32:
@@ -762,21 +731,21 @@ CallAsmJS(JSContext* cx, unsigned argc, 
     }
 
     {
         // Push an AsmJSActivation to describe the asm.js frames we're about to
         // push when running this module. Additionally, push a JitActivation so
         // that the optimized asm.js-to-Ion FFI call path (which we want to be
         // very fast) can avoid doing so. The JitActivation is marked as
         // inactive so stack iteration will skip over it.
-        AsmJSActivation activation(cx, module);
+        AsmJSActivation activation(cx, asmJSModule);
         JitActivation jitActivation(cx, /* active */ false);
 
         // Call the per-exported-function trampoline created by GenerateEntry.
-        AsmJSModule::CodePtr enter = module.entryTrampoline(func);
+        Module::EntryFuncPtr enter = module.entryTrampoline(func);
         if (!CALL_GENERATED_2(enter, coercedArgs.begin(), module.globalData()))
             return false;
     }
 
     if (callArgs.isConstructing()) {
         // By spec, when a function is called as a constructor and this function
         // returns a primary type, which is the case for all asm.js exported
         // functions, the returned value is discarded and an empty object is
@@ -821,21 +790,24 @@ CallAsmJS(JSContext* cx, unsigned argc, 
         callArgs.rval().set(ObjectValue(*simdObj));
         break;
     }
 
     return true;
 }
 
 static JSFunction*
-NewExportedFunction(JSContext* cx, const AsmJSModule::ExportedFunction& func,
+NewExportedFunction(JSContext* cx, const AsmJSModule& module, const AsmJSModule::Export& func,
                     HandleObject moduleObj, unsigned exportIndex)
 {
+    unsigned numArgs = func.isChangeHeap()
+                       ? 1
+                       : module.wasm().exports()[func.wasmIndex()].sig().args().length();
+
     RootedPropertyName name(cx, func.name());
-    unsigned numArgs = func.isChangeHeap() ? 1 : func.sig().args().length();
     JSFunction* fun =
         NewNativeConstructor(cx, CallAsmJS, numArgs, name,
                              gc::AllocKind::FUNCTION_EXTENDED, GenericObject,
                              JSFunction::ASMJS_CTOR);
     if (!fun)
         return nullptr;
 
     fun->setExtendedSlot(ASM_MODULE_SLOT, ObjectValue(*moduleObj));
@@ -845,29 +817,31 @@ NewExportedFunction(JSContext* cx, const
 
 static bool
 HandleDynamicLinkFailure(JSContext* cx, const CallArgs& args, AsmJSModule& module,
                          HandlePropertyName name)
 {
     if (cx->isExceptionPending())
         return false;
 
+    ScriptSource* source = module.scriptSource();
+
     // Source discarding is allowed to affect JS semantics because it is never
     // enabled for normal JS content.
-    bool haveSource = module.scriptSource()->hasSourceData();
-    if (!haveSource && !JSScript::loadSource(cx, module.scriptSource(), &haveSource))
+    bool haveSource = source->hasSourceData();
+    if (!haveSource && !JSScript::loadSource(cx, source, &haveSource))
         return false;
     if (!haveSource) {
         JS_ReportError(cx, "asm.js link failure with source discarding enabled");
         return false;
     }
 
     uint32_t begin = module.srcBodyStart();  // starts right after 'use asm'
     uint32_t end = module.srcEndBeforeCurly();
-    Rooted<JSFlatString*> src(cx, module.scriptSource()->substringDontDeflate(cx, begin, end));
+    Rooted<JSFlatString*> src(cx, source->substringDontDeflate(cx, begin, end));
     if (!src)
         return false;
 
     RootedFunction fun(cx, NewScriptedFunction(cx, 0, JSFunction::INTERPRETED_NORMAL,
                                                name, gc::AllocKind::FUNCTION,
                                                TenuredObject));
     if (!fun)
         return false;
@@ -879,18 +853,18 @@ HandleDynamicLinkFailure(JSContext* cx, 
     if (module.globalArgumentName())
         formals.infallibleAppend(module.globalArgumentName());
     if (module.importArgumentName())
         formals.infallibleAppend(module.importArgumentName());
     if (module.bufferArgumentName())
         formals.infallibleAppend(module.bufferArgumentName());
 
     CompileOptions options(cx);
-    options.setMutedErrors(module.scriptSource()->mutedErrors())
-           .setFile(module.scriptSource()->filename())
+    options.setMutedErrors(source->mutedErrors())
+           .setFile(source->filename())
            .setNoScriptRval(false);
 
     // The exported function inherits an implicit strict context if the module
     // also inherited it somehow.
     if (module.strict())
         options.strictOption = true;
 
     AutoStableStringChars stableChars(cx);
@@ -905,122 +879,37 @@ HandleDynamicLinkFailure(JSContext* cx, 
     if (!frontend::CompileFunctionBody(cx, &fun, options, formals, srcBuf))
         return false;
 
     // Call the function we just recompiled.
     args.setCallee(ObjectValue(*fun));
     return Invoke(cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT);
 }
 
-#ifdef MOZ_VTUNE
-static bool
-SendFunctionsToVTune(JSContext* cx, AsmJSModule& module)
+static JSObject*
+CreateExportObject(JSContext* cx, HandleAsmJSModule moduleObj)
 {
-    uint8_t* base = module.codeBase();
-
-    for (unsigned i = 0; i < module.numProfiledFunctions(); i++) {
-        const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i);
-
-        uint8_t* start = base + func.pod.startCodeOffset;
-        uint8_t* end   = base + func.pod.endCodeOffset;
-        MOZ_ASSERT(end >= start);
-
-        unsigned method_id = iJIT_GetNewMethodID();
-        if (method_id == 0)
-            return false;
+    AsmJSModule& module = moduleObj->module();
+    const AsmJSModule::ExportVector& exports = module.exports();
 
-        JSAutoByteString bytes;
-        const char* method_name = AtomToPrintableString(cx, func.name, &bytes);
-        if (!method_name)
-            return false;
-
-        iJIT_Method_Load method;
-        method.method_id = method_id;
-        method.method_name = const_cast<char*>(method_name);
-        method.method_load_address = (void*)start;
-        method.method_size = unsigned(end - start);
-        method.line_number_size = 0;
-        method.line_number_table = nullptr;
-        method.class_id = 0;
-        method.class_file_name = nullptr;
-        method.source_file_name = nullptr;
-
-        iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method);
+    if (exports.length() == 1) {
+        const AsmJSModule::Export& func = exports[0];
+        if (!func.maybeFieldName())
+            return NewExportedFunction(cx, module, func, moduleObj, 0);
     }
 
-    return true;
-}
-#endif
-
-#ifdef JS_ION_PERF
-static bool
-SendFunctionsToPerf(JSContext* cx, AsmJSModule& module)
-{
-    if (!PerfFuncEnabled())
-        return true;
-
-    uintptr_t base = (uintptr_t) module.codeBase();
-    const char* filename = module.scriptSource()->filename();
-
-    for (unsigned i = 0; i < module.numProfiledFunctions(); i++) {
-        const AsmJSModule::ProfiledFunction& func = module.profiledFunction(i);
-        uintptr_t start = base + (unsigned long) func.pod.startCodeOffset;
-        uintptr_t end   = base + (unsigned long) func.pod.endCodeOffset;
-        MOZ_ASSERT(end >= start);
-        size_t size = end - start;
-
-        JSAutoByteString bytes;
-        const char* name = AtomToPrintableString(cx, func.name, &bytes);
-        if (!name)
-            return false;
-
-        writePerfSpewerAsmJSFunctionMap(start, size, filename, func.pod.lineno,
-                                        func.pod.columnIndex, name);
-    }
-
-    return true;
-}
-#endif
-
-static bool
-SendModuleToAttachedProfiler(JSContext* cx, AsmJSModule& module)
-{
-#if defined(MOZ_VTUNE)
-    if (IsVTuneProfilingActive() && !SendFunctionsToVTune(cx, module))
-        return false;
-#endif
-#if defined(JS_ION_PERF)
-    if (!SendFunctionsToPerf(cx, module))
-        return false;
-#endif
-
-    return true;
-}
-
-
-static JSObject*
-CreateExportObject(JSContext* cx, Handle<AsmJSModuleObject*> moduleObj)
-{
-    AsmJSModule& module = moduleObj->module();
-
-    if (module.numExportedFunctions() == 1) {
-        const AsmJSModule::ExportedFunction& func = module.exportedFunction(0);
-        if (!func.maybeFieldName())
-            return NewExportedFunction(cx, func, moduleObj, 0);
-    }
-
-    gc::AllocKind allocKind = gc::GetGCObjectKind(module.numExportedFunctions());
+    gc::AllocKind allocKind = gc::GetGCObjectKind(exports.length());
     RootedPlainObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx, allocKind));
     if (!obj)
         return nullptr;
 
-    for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
-        const AsmJSModule::ExportedFunction& func = module.exportedFunction(i);
+    for (unsigned i = 0; i < exports.length(); i++) {
+        const AsmJSModule::Export& func = exports[i];
 
-        RootedFunction fun(cx, NewExportedFunction(cx, func, moduleObj, i));
+        RootedFunction fun(cx, NewExportedFunction(cx, module, func, moduleObj, i));
         if (!fun)
             return nullptr;
 
         MOZ_ASSERT(func.maybeFieldName() != nullptr);
         RootedId id(cx, NameToId(func.maybeFieldName()));
         RootedValue val(cx, ObjectValue(*fun));
         if (!NativeDefineProperty(cx, obj, id, val, nullptr, nullptr, JSPROP_ENUMERATE))
             return nullptr;
@@ -1043,45 +932,44 @@ LinkAsmJS(JSContext* cx, unsigned argc, 
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     // The LinkAsmJS builtin (created by NewAsmJSModuleFunction) is an extended
     // function and stores its module in an extended slot.
     RootedFunction fun(cx, &args.callee().as<JSFunction>());
     Rooted<AsmJSModuleObject*> moduleObj(cx, &ModuleFunctionToModuleObject(fun));
 
-
     // When a module is linked, it is dynamically specialized to the given
     // arguments (buffer, ffis). Thus, if the module is linked again (it is just
     // a function so it can be called multiple times), we need to clone a new
     // module.
-    if (moduleObj->module().isDynamicallyLinked() && !CloneModule(cx, &moduleObj))
-        return false;
+    if (moduleObj->module().wasm().dynamicallyLinked()) {
+        Rooted<AsmJSModuleObject*> clone(cx, AsmJSModuleObject::create(cx));
+        if (!clone)
+            return false;
+
+        if (!moduleObj->module().clone(cx, clone))
+            return false;
+
+        moduleObj = clone;
+    }
 
     AsmJSModule& module = moduleObj->module();
 
-    AutoFlushICache afc("LinkAsmJS");
-    module.setAutoFlushICacheRange();
-
     // Link the module by performing the link-time validation checks in the
     // asm.js spec and then patching the generated module to associate it with
     // the given heap (ArrayBuffer) and a new global data segment (the closure
     // state shared by the inner asm.js functions).
     if (!DynamicallyLinkModule(cx, args, module)) {
         // Linking failed, so reparse the entire asm.js module from scratch to
         // get normal interpreted bytecode which we can simply Invoke. Very slow.
         RootedPropertyName name(cx, fun->name());
         return HandleDynamicLinkFailure(cx, args, module, name);
     }
 
-    // Notify profilers so that asm.js generated code shows up with JS function
-    // names and lines in native (i.e., not SPS) profilers.
-    if (!SendModuleToAttachedProfiler(cx, module))
-        return false;
-
     // Link-time validation succeeded, so wrap all the exported functions with
     // CallAsmJS builtins that trampoline into the generated code.
     JSObject* obj = CreateExportObject(cx, moduleObj);
     if (!obj)
         return false;
 
     args.rval().set(ObjectValue(*obj));
     return true;
@@ -1247,17 +1135,17 @@ js::IsAsmJSModuleLoadedFromCache(JSConte
     JSFunction* fun;
     if (!args.hasDefined(0) || !IsMaybeWrappedNativeFunction(args[0], LinkAsmJS, &fun)) {
         JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_USE_ASM_TYPE_FAIL,
                              "argument passed to isAsmJSModuleLoadedFromCache is not a "
                              "validated asm.js module");
         return false;
     }
 
-    bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().loadedFromCache();
+    bool loadedFromCache = ModuleFunctionToModuleObject(fun).module().wasm().loadedFromCache();
 
     args.rval().set(BooleanValue(loadedFromCache));
     return true;
 }
 
 bool
 js::IsAsmJSFunction(JSContext* cx, unsigned argc, Value* vp)
 {
@@ -1272,17 +1160,17 @@ js::IsAsmJSFunction(HandleFunction fun)
 {
     return fun->isNative() && fun->maybeNative() == CallAsmJS;
 }
 
 JSString*
 js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
 {
     AsmJSModule& module = FunctionToEnclosingModule(fun);
-    const AsmJSModule::ExportedFunction& f = FunctionToExportedFunction(fun, module);
+    const AsmJSModule::Export& f = module.exports()[FunctionToExportIndex(fun)];
     uint32_t begin = module.srcStart() + f.startOffsetInModule();
     uint32_t end = module.srcStart() + f.endOffsetInModule();
 
     ScriptSource* source = module.scriptSource();
     StringBuffer out(cx);
 
     if (!out.append("function "))
         return nullptr;
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -13,962 +13,132 @@
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/AsmJSModule.h"
 
-#include "mozilla/BinarySearch.h"
 #include "mozilla/Compression.h"
 #include "mozilla/EnumeratedRange.h"
 #include "mozilla/PodOperations.h"
-#include "mozilla/TaggedAnonymousMemory.h"
-#include "mozilla/Vector.h"
 
-#include "jslibmath.h"
-#include "jsmath.h"
 #include "jsprf.h"
 
-#include "builtin/AtomicsObject.h"
+#include "asmjs/WasmSerialize.h"
 #include "frontend/Parser.h"
-#include "jit/IonCode.h"
-#ifdef JS_ION_PERF
-# include "jit/PerfSpewer.h"
-#endif
 #include "js/Class.h"
-#include "js/Conversions.h"
 #include "js/MemoryMetrics.h"
-#include "vm/Time.h"
 
 #include "jsobjinlines.h"
 
 #include "frontend/ParseNode-inl.h"
-#include "jit/MacroAssembler-inl.h"
-#include "vm/ArrayBufferObject-inl.h"
-#include "vm/Stack-inl.h"
 
 using namespace js;
+using namespace js::frontend;
 using namespace js::jit;
 using namespace js::wasm;
-using namespace js::frontend;
-using mozilla::BinarySearch;
-using mozilla::Compression::LZ4;
-using mozilla::MakeEnumeratedRange;
-using mozilla::MallocSizeOf;
-using mozilla::PodCopy;
+using mozilla::PodZero;
 using mozilla::PodEqual;
-using mozilla::PodZero;
-using mozilla::Swap;
-using JS::GenericNaN;
-
-static uint8_t*
-AllocateExecutableMemory(ExclusiveContext* cx, size_t bytes)
-{
-    // On most platforms, this will allocate RWX memory. On iOS, or when
-    // --non-writable-jitcode is used, this will allocate RW memory. In this
-    // case, DynamicallyLinkModule will reprotect the code as RX.
-    unsigned permissions =
-        ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
-    void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize);
-    if (!p)
-        ReportOutOfMemory(cx);
-    return (uint8_t*)p;
-}
+using mozilla::Compression::LZ4;
 
 AsmJSModule::AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart,
-                         bool strict, bool canUseSignalHandlers)
-  : srcStart_(srcStart),
+                         bool strict)
+  : scriptSource_(scriptSource),
+    srcStart_(srcStart),
     srcBodyStart_(srcBodyStart),
-    scriptSource_(scriptSource),
     globalArgumentName_(nullptr),
     importArgumentName_(nullptr),
-    bufferArgumentName_(nullptr),
-    code_(nullptr),
-    interruptExit_(nullptr),
-    prevLinked_(nullptr),
-    nextLinked_(nullptr),
-    dynamicallyLinked_(false),
-    loadedFromCache_(false),
-    profilingEnabled_(false),
-    interrupted_(false)
+    bufferArgumentName_(nullptr)
 {
     mozilla::PodZero(&pod);
-    pod.globalBytes_ = sInitialGlobalDataBytes;
     pod.minHeapLength_ = RoundUpToNextValidAsmJSHeapLength(0);
     pod.maxHeapLength_ = 0x80000000;
     pod.strict_ = strict;
-    pod.canUseSignalHandlers_ = canUseSignalHandlers;
 
     // AsmJSCheckedImmediateRange should be defined to be at most the minimum
     // heap length so that offsets can be folded into bounds checks.
     MOZ_ASSERT(pod.minHeapLength_ - AsmJSCheckedImmediateRange <= pod.minHeapLength_);
-
-    scriptSource_->incref();
-}
-
-AsmJSModule::~AsmJSModule()
-{
-    MOZ_ASSERT(!interrupted_);
-
-    scriptSource_->decref();
-
-    if (code_) {
-        for (unsigned i = 0; i < numExits(); i++) {
-            AsmJSModule::ExitDatum& exitDatum = exit(i).datum(*this);
-            if (!exitDatum.baselineScript)
-                continue;
-
-            jit::DependentAsmJSModuleExit exit(this, i);
-            exitDatum.baselineScript->removeDependentAsmJSModule(exit);
-        }
-
-        DeallocateExecutableMemory(code_, pod.totalBytes_, AsmJSPageSize);
-    }
-
-    if (prevLinked_)
-        *prevLinked_ = nextLinked_;
-    if (nextLinked_)
-        nextLinked_->prevLinked_ = prevLinked_;
 }
 
 void
 AsmJSModule::trace(JSTracer* trc)
 {
+    if (wasm_)
+        wasm_->trace(trc);
     for (Global& global : globals_)
         global.trace(trc);
-    for (Exit& exit : exits_) {
-        if (exit.datum(*this).fun)
-            TraceEdge(trc, &exit.datum(*this).fun, "asm.js imported function");
-    }
-    for (ExportedFunction& exp : exports_)
+    for (Export& exp : exports_)
         exp.trace(trc);
-    for (Name& name : names_)
-        TraceManuallyBarrieredEdge(trc, &name.name(), "asm.js module function name");
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    for (ProfiledFunction& profiledFunction : profiledFunctions_)
-        profiledFunction.trace(trc);
-#endif
     if (globalArgumentName_)
         TraceManuallyBarrieredEdge(trc, &globalArgumentName_, "asm.js global argument name");
     if (importArgumentName_)
         TraceManuallyBarrieredEdge(trc, &importArgumentName_, "asm.js import argument name");
     if (bufferArgumentName_)
         TraceManuallyBarrieredEdge(trc, &bufferArgumentName_, "asm.js buffer argument name");
-    if (maybeHeap_)
-        TraceEdge(trc, &maybeHeap_, "asm.js heap");
 }
 
 void
 AsmJSModule::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
                            size_t* asmJSModuleData)
 {
-    *asmJSModuleCode += pod.totalBytes_;
+    if (wasm_)
+        wasm_->addSizeOfMisc(mallocSizeOf, asmJSModuleCode, asmJSModuleData);
+
+    if (linkData_)
+        *asmJSModuleData += linkData_->sizeOfExcludingThis(mallocSizeOf);
+
     *asmJSModuleData += mallocSizeOf(this) +
                         globals_.sizeOfExcludingThis(mallocSizeOf) +
-                        exits_.sizeOfExcludingThis(mallocSizeOf) +
-                        exports_.sizeOfExcludingThis(mallocSizeOf) +
-                        callSites_.sizeOfExcludingThis(mallocSizeOf) +
-                        codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
-                        names_.sizeOfExcludingThis(mallocSizeOf) +
-                        heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-                        profiledFunctions_.sizeOfExcludingThis(mallocSizeOf) +
-#endif
-                        staticLinkData_.sizeOfExcludingThis(mallocSizeOf);
-}
-
-struct CallSiteRetAddrOffset
-{
-    const CallSiteVector& callSites;
-    explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {}
-    uint32_t operator[](size_t index) const {
-        return callSites[index].returnAddressOffset();
-    }
-};
-
-const CallSite*
-AsmJSModule::lookupCallSite(void* returnAddress) const
-{
-    MOZ_ASSERT(isFinished());
-
-    uint32_t target = ((uint8_t*)returnAddress) - code_;
-    size_t lowerBound = 0;
-    size_t upperBound = callSites_.length();
-
-    size_t match;
-    if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match))
-        return nullptr;
-
-    return &callSites_[match];
+                        imports_.sizeOfExcludingThis(mallocSizeOf) +
+                        exports_.sizeOfExcludingThis(mallocSizeOf);
 }
 
-namespace js {
-
-// Create an ordering on CodeRange and pc offsets suitable for BinarySearch.
-// Stick these in the same namespace as AsmJSModule so that argument-dependent
-// lookup will find it.
-bool
-operator==(size_t pcOffset, const AsmJSModule::CodeRange& rhs)
-{
-    return pcOffset >= rhs.begin() && pcOffset < rhs.end();
-}
-bool
-operator<=(const AsmJSModule::CodeRange& lhs, const AsmJSModule::CodeRange& rhs)
-{
-    return lhs.begin() <= rhs.begin();
-}
-bool
-operator<(size_t pcOffset, const AsmJSModule::CodeRange& rhs)
-{
-    return pcOffset < rhs.begin();
-}
-
-} // namespace js
-
-const AsmJSModule::CodeRange*
-AsmJSModule::lookupCodeRange(void* pc) const
-{
-    MOZ_ASSERT(isFinished());
-
-    uint32_t target = ((uint8_t*)pc) - code_;
-    size_t lowerBound = 0;
-    size_t upperBound = codeRanges_.length();
-
-    size_t match;
-    if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
-        return nullptr;
-
-    return &codeRanges_[match];
-}
-
-struct HeapAccessOffset
-{
-    const HeapAccessVector& accesses;
-    explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
-    uintptr_t operator[](size_t index) const {
-        return accesses[index].insnOffset();
-    }
-};
-
-const HeapAccess*
-AsmJSModule::lookupHeapAccess(void* pc) const
-{
-    MOZ_ASSERT(isFinished());
-    MOZ_ASSERT(containsFunctionPC(pc));
-
-    uint32_t target = ((uint8_t*)pc) - code_;
-    size_t lowerBound = 0;
-    size_t upperBound = heapAccesses_.length();
-
-    size_t match;
-    if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match))
-        return nullptr;
-
-    return &heapAccesses_[match];
-}
-
-bool
-AsmJSModule::finish(ExclusiveContext* cx, TokenStream& tokenStream, MacroAssembler& masm)
+void
+AsmJSModule::finish(Module* wasm, wasm::UniqueStaticLinkData linkData,
+                    uint32_t endBeforeCurly, uint32_t endAfterCurly)
 {
     MOZ_ASSERT(!isFinished());
 
-    uint32_t endBeforeCurly = tokenStream.currentToken().pos.end;
-    TokenPos pos;
-    if (!tokenStream.peekTokenPos(&pos, TokenStream::Operand))
-        return false;
-    uint32_t endAfterCurly = pos.end;
+    wasm_.reset(wasm);
+    linkData_ = Move(linkData);
+
     MOZ_ASSERT(endBeforeCurly >= srcBodyStart_);
     MOZ_ASSERT(endAfterCurly >= srcBodyStart_);
     pod.srcLength_ = endBeforeCurly - srcStart_;
     pod.srcLengthWithRightBrace_ = endAfterCurly - srcStart_;
 
-    // Start global data on a new page so JIT code may be given independent
-    // protection flags.
-    pod.codeBytes_ = AlignBytes(masm.bytesNeeded(), AsmJSPageSize);
-    MOZ_ASSERT(pod.functionBytes_ <= pod.codeBytes_);
-
-    // The entire region is allocated via mmap/VirtualAlloc which requires
-    // units of pages.
-    pod.totalBytes_ = AlignBytes(pod.codeBytes_ + pod.globalBytes_, AsmJSPageSize);
-
-    MOZ_ASSERT(!code_);
-    code_ = AllocateExecutableMemory(cx, pod.totalBytes_);
-    if (!code_)
-        return false;
-
-    // Delay flushing until dynamic linking. The flush-inhibited range is set within
-    // masm.executableCopy.
-    AutoFlushICache afc("CheckModule", /* inhibit = */ true);
-
-    // Copy the code from the MacroAssembler into its final resting place in the
-    // AsmJSModule.
-    MOZ_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
-    masm.executableCopy(code_);
-
-    // c.f. JitCode::copyFrom
-    MOZ_ASSERT(masm.jumpRelocationTableBytes() == 0);
-    MOZ_ASSERT(masm.dataRelocationTableBytes() == 0);
-    MOZ_ASSERT(masm.preBarrierTableBytes() == 0);
-    MOZ_ASSERT(!masm.hasSelfReference());
-
-    // Heap-access metadata used for link-time patching and fault-handling.
-    heapAccesses_ = masm.extractHeapAccesses();
-
-    // Call-site metadata used for stack unwinding.
-    const CallSiteAndTargetVector& callSites = masm.callSites();
-    if (!callSites_.appendAll(callSites))
-        return false;
-
-    // Absolute link metadata: absolute addresses that refer to some fixed
-    // address in the address space.
-    AbsoluteLinkArray& absoluteLinks = staticLinkData_.absoluteLinks;
-    for (size_t i = 0; i < masm.numAsmJSAbsoluteLinks(); i++) {
-        AsmJSAbsoluteLink src = masm.asmJSAbsoluteLink(i);
-        if (!absoluteLinks[src.target].append(src.patchAt.offset()))
-            return false;
-    }
-
-    // Relative link metadata: absolute addresses that refer to another point within
-    // the asm.js module.
-
-    // CodeLabels are used for switch cases and loads from floating-point /
-    // SIMD values in the constant pool.
-    for (size_t i = 0; i < masm.numCodeLabels(); i++) {
-        CodeLabel cl = masm.codeLabel(i);
-        RelativeLink link(RelativeLink::CodeLabel);
-        link.patchAtOffset = masm.labelToPatchOffset(*cl.patchAt());
-        link.targetOffset = cl.target()->offset();
-        if (!staticLinkData_.relativeLinks.append(link))
-            return false;
-    }
-
-#if defined(JS_CODEGEN_X86)
-    // Global data accesses in x86 need to be patched with the absolute
-    // address of the global. Globals are allocated sequentially after the
-    // code section so we can just use an RelativeLink.
-    for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
-        AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i);
-        RelativeLink link(RelativeLink::RawPointer);
-        link.patchAtOffset = masm.labelToPatchOffset(a.patchAt);
-        link.targetOffset = offsetOfGlobalData() + a.globalDataOffset;
-        if (!staticLinkData_.relativeLinks.append(link))
-            return false;
-    }
-#endif
-
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-    // On MIPS we need to update all the long jumps because they contain an
-    // absolute adress. The values are correctly patched for the current address
-    // space, but not after serialization or profiling-mode toggling.
-    for (size_t i = 0; i < masm.numLongJumps(); i++) {
-        size_t off = masm.longJump(i);
-        RelativeLink link(RelativeLink::InstructionImmediate);
-        link.patchAtOffset = off;
-        link.targetOffset = Assembler::ExtractInstructionImmediate(code_ + off) - uintptr_t(code_);
-        if (!staticLinkData_.relativeLinks.append(link))
-            return false;
-    }
-#endif
-
-#if defined(JS_CODEGEN_X64)
-    // Global data accesses on x64 use rip-relative addressing and thus do
-    // not need patching after deserialization.
-    for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
-        AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i);
-        masm.patchAsmJSGlobalAccess(a.patchAt, code_, globalData(), a.globalDataOffset);
-    }
-#endif
-
-    return true;
-}
-
-void
-AsmJSModule::setAutoFlushICacheRange()
-{
     MOZ_ASSERT(isFinished());
-    AutoFlushICache::setRange(uintptr_t(code_), pod.codeBytes_);
-}
-
-static void
-AsmJSReportOverRecursed()
-{
-    JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-    ReportOverRecursed(cx);
-}
-
-static void
-OnDetached()
-{
-    // See hasDetachedHeap comment in LinkAsmJS.
-    JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-    JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
-}
-
-static void
-OnOutOfBounds()
-{
-    JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-    JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
-}
-
-static void
-OnImpreciseConversion()
-{
-    JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-    JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION);
-}
-
-static bool
-AsmJSHandleExecutionInterrupt()
-{
-    AsmJSActivation* act = JSRuntime::innermostAsmJSActivation();
-    act->module().setInterrupted(true);
-    bool ret = CheckForInterrupt(act->cx());
-    act->module().setInterrupted(false);
-    return ret;
-}
-
-static int32_t
-CoerceInPlace_ToInt32(MutableHandleValue val)
-{
-    JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-
-    int32_t i32;
-    if (!ToInt32(cx, val, &i32))
-        return false;
-    val.set(Int32Value(i32));
-
-    return true;
-}
-
-static int32_t
-CoerceInPlace_ToNumber(MutableHandleValue val)
-{
-    JSContext* cx = JSRuntime::innermostAsmJSActivation()->cx();
-
-    double dbl;
-    if (!ToNumber(cx, val, &dbl))
-        return false;
-    val.set(DoubleValue(dbl));
-
-    return true;
-}
-
-static bool
-TryEnablingJit(JSContext* cx, AsmJSModule& module, HandleFunction fun, uint32_t exitIndex,
-               int32_t argc, Value* argv)
-{
-    if (!fun->hasScript())
-        return true;
-
-    // Test if the function is JIT compiled.
-    JSScript* script = fun->nonLazyScript();
-    if (!script->hasBaselineScript()) {
-        MOZ_ASSERT(!script->hasIonScript());
-        return true;
-    }
-
-    // Don't enable jit entry when we have a pending ion builder.
-    // Take the interpreter path which will link it and enable
-    // the fast path on the next call.
-    if (script->baselineScript()->hasPendingIonBuilder())
-        return true;
-
-    // Currently we can't rectify arguments. Therefore disabling if argc is too low.
-    if (fun->nargs() > size_t(argc))
-        return true;
-
-    // Ensure the argument types are included in the argument TypeSets stored in
-    // the TypeScript. This is necessary for Ion, because the FFI exit will
-    // use the skip-arg-checks entry point.
-    //
-    // Note that the TypeScript is never discarded while the script has a
-    // BaselineScript, so if those checks hold now they must hold at least until
-    // the BaselineScript is discarded and when that happens the FFI exit is
-    // patched back.
-    if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
-        return true;
-    for (uint32_t i = 0; i < fun->nargs(); i++) {
-        StackTypeSet* typeset = TypeScript::ArgTypes(script, i);
-        TypeSet::Type type = TypeSet::DoubleType();
-        if (!argv[i].isDouble())
-            type = TypeSet::PrimitiveType(argv[i].extractNonDoubleType());
-        if (!typeset->hasType(type))
-            return true;
-    }
-
-    // The exit may have become optimized while executing the FFI.
-    AsmJSModule::Exit& exit = module.exit(exitIndex);
-    if (exit.isOptimized(module))
-        return true;
-
-    BaselineScript* baselineScript = script->baselineScript();
-    if (!baselineScript->addDependentAsmJSModule(cx, DependentAsmJSModuleExit(&module, exitIndex)))
-        return false;
-
-    exit.optimize(module, baselineScript);
-    return true;
-}
-
-static bool
-InvokeFromAsmJS(AsmJSActivation* activation, int32_t exitIndex, int32_t argc, Value* argv,
-                MutableHandleValue rval)
-{
-    JSContext* cx = activation->cx();
-    AsmJSModule& module = activation->module();
-
-    RootedFunction fun(cx, module.exit(exitIndex).datum(module).fun);
-    RootedValue fval(cx, ObjectValue(*fun));
-    if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval))
-        return false;
-
-    return TryEnablingJit(cx, module, fun, exitIndex, argc, argv);
-}
-
-// Use an int32_t return type instead of bool since bool does not have a
-// specified width and the caller is assuming a word-sized return.
-static int32_t
-InvokeFromAsmJS_Ignore(int32_t exitIndex, int32_t argc, Value* argv)
-{
-    AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
-    JSContext* cx = activation->cx();
-
-    RootedValue rval(cx);
-    return InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval);
-}
-
-// Use an int32_t return type instead of bool since bool does not have a
-// specified width and the caller is assuming a word-sized return.
-static int32_t
-InvokeFromAsmJS_ToInt32(int32_t exitIndex, int32_t argc, Value* argv)
-{
-    AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
-    JSContext* cx = activation->cx();
-
-    RootedValue rval(cx);
-    if (!InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval))
-        return false;
-
-    int32_t i32;
-    if (!ToInt32(cx, rval, &i32))
-        return false;
-
-    argv[0] = Int32Value(i32);
-    return true;
-}
-
-// Use an int32_t return type instead of bool since bool does not have a
-// specified width and the caller is assuming a word-sized return.
-static int32_t
-InvokeFromAsmJS_ToNumber(int32_t exitIndex, int32_t argc, Value* argv)
-{
-    AsmJSActivation* activation = JSRuntime::innermostAsmJSActivation();
-    JSContext* cx = activation->cx();
-
-    RootedValue rval(cx);
-    if (!InvokeFromAsmJS(activation, exitIndex, argc, argv, &rval))
-        return false;
-
-    double dbl;
-    if (!ToNumber(cx, rval, &dbl))
-        return false;
-
-    argv[0] = DoubleValue(dbl);
-    return true;
-}
-
-#if defined(JS_CODEGEN_ARM)
-extern "C" {
-
-extern MOZ_EXPORT int64_t
-__aeabi_idivmod(int, int);
-
-extern MOZ_EXPORT int64_t
-__aeabi_uidivmod(int, int);
-
-}
-#endif
-
-template <class F>
-static inline void*
-FuncCast(F* pf)
-{
-    return JS_FUNC_TO_DATA_PTR(void*, pf);
-}
-
-static void*
-RedirectCall(void* fun, ABIFunctionType type)
-{
-#ifdef JS_SIMULATOR
-    fun = Simulator::RedirectNativeFunction(fun, type);
-#endif
-    return fun;
-}
-
-static void*
-AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
-{
-    switch (imm) {
-      case SymbolicAddress::Runtime:
-        return cx->runtimeAddressForJit();
-      case SymbolicAddress::RuntimeInterruptUint32:
-        return cx->runtimeAddressOfInterruptUint32();
-      case SymbolicAddress::StackLimit:
-        return cx->stackLimitAddressForJitCode(StackForUntrustedScript);
-      case SymbolicAddress::ReportOverRecursed:
-        return RedirectCall(FuncCast(AsmJSReportOverRecursed), Args_General0);
-      case SymbolicAddress::OnDetached:
-        return RedirectCall(FuncCast(OnDetached), Args_General0);
-      case SymbolicAddress::OnOutOfBounds:
-        return RedirectCall(FuncCast(OnOutOfBounds), Args_General0);
-      case SymbolicAddress::OnImpreciseConversion:
-        return RedirectCall(FuncCast(OnImpreciseConversion), Args_General0);
-      case SymbolicAddress::HandleExecutionInterrupt:
-        return RedirectCall(FuncCast(AsmJSHandleExecutionInterrupt), Args_General0);
-      case SymbolicAddress::InvokeFromAsmJS_Ignore:
-        return RedirectCall(FuncCast(InvokeFromAsmJS_Ignore), Args_General3);
-      case SymbolicAddress::InvokeFromAsmJS_ToInt32:
-        return RedirectCall(FuncCast(InvokeFromAsmJS_ToInt32), Args_General3);
-      case SymbolicAddress::InvokeFromAsmJS_ToNumber:
-        return RedirectCall(FuncCast(InvokeFromAsmJS_ToNumber), Args_General3);
-      case SymbolicAddress::CoerceInPlace_ToInt32:
-        return RedirectCall(FuncCast(CoerceInPlace_ToInt32), Args_General1);
-      case SymbolicAddress::CoerceInPlace_ToNumber:
-        return RedirectCall(FuncCast(CoerceInPlace_ToNumber), Args_General1);
-      case SymbolicAddress::ToInt32:
-        return RedirectCall(FuncCast<int32_t (double)>(JS::ToInt32), Args_Int_Double);
-#if defined(JS_CODEGEN_ARM)
-      case SymbolicAddress::aeabi_idivmod:
-        return RedirectCall(FuncCast(__aeabi_idivmod), Args_General2);
-      case SymbolicAddress::aeabi_uidivmod:
-        return RedirectCall(FuncCast(__aeabi_uidivmod), Args_General2);
-      case SymbolicAddress::AtomicCmpXchg:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t, int32_t)>(js::atomics_cmpxchg_asm_callout), Args_General4);
-      case SymbolicAddress::AtomicXchg:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xchg_asm_callout), Args_General3);
-      case SymbolicAddress::AtomicFetchAdd:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_add_asm_callout), Args_General3);
-      case SymbolicAddress::AtomicFetchSub:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_sub_asm_callout), Args_General3);
-      case SymbolicAddress::AtomicFetchAnd:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_and_asm_callout), Args_General3);
-      case SymbolicAddress::AtomicFetchOr:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_or_asm_callout), Args_General3);
-      case SymbolicAddress::AtomicFetchXor:
-        return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xor_asm_callout), Args_General3);
-#endif
-      case SymbolicAddress::ModD:
-        return RedirectCall(FuncCast(NumberMod), Args_Double_DoubleDouble);
-      case SymbolicAddress::SinD:
-#ifdef _WIN64
-        // Workaround a VS 2013 sin issue, see math_sin_uncached.
-        return RedirectCall(FuncCast<double (double)>(js::math_sin_uncached), Args_Double_Double);
-#else
-        return RedirectCall(FuncCast<double (double)>(sin), Args_Double_Double);
-#endif
-      case SymbolicAddress::CosD:
-        return RedirectCall(FuncCast<double (double)>(cos), Args_Double_Double);
-      case SymbolicAddress::TanD:
-        return RedirectCall(FuncCast<double (double)>(tan), Args_Double_Double);
-      case SymbolicAddress::ASinD:
-        return RedirectCall(FuncCast<double (double)>(asin), Args_Double_Double);
-      case SymbolicAddress::ACosD:
-        return RedirectCall(FuncCast<double (double)>(acos), Args_Double_Double);
-      case SymbolicAddress::ATanD:
-        return RedirectCall(FuncCast<double (double)>(atan), Args_Double_Double);
-      case SymbolicAddress::CeilD:
-        return RedirectCall(FuncCast<double (double)>(ceil), Args_Double_Double);
-      case SymbolicAddress::CeilF:
-        return RedirectCall(FuncCast<float (float)>(ceilf), Args_Float32_Float32);
-      case SymbolicAddress::FloorD:
-        return RedirectCall(FuncCast<double (double)>(floor), Args_Double_Double);
-      case SymbolicAddress::FloorF:
-        return RedirectCall(FuncCast<float (float)>(floorf), Args_Float32_Float32);
-      case SymbolicAddress::ExpD:
-        return RedirectCall(FuncCast<double (double)>(exp), Args_Double_Double);
-      case SymbolicAddress::LogD:
-        return RedirectCall(FuncCast<double (double)>(log), Args_Double_Double);
-      case SymbolicAddress::PowD:
-        return RedirectCall(FuncCast(ecmaPow), Args_Double_DoubleDouble);
-      case SymbolicAddress::ATan2D:
-        return RedirectCall(FuncCast(ecmaAtan2), Args_Double_DoubleDouble);
-      case SymbolicAddress::Limit:
-        break;
-    }
-
-    MOZ_CRASH("Bad SymbolicAddress");
-}
-
-void
-AsmJSModule::staticallyLink(ExclusiveContext* cx)
-{
-    MOZ_ASSERT(isFinished());
-
-    // Process staticLinkData_
-
-    MOZ_ASSERT(staticLinkData_.pod.interruptExitOffset != 0);
-    interruptExit_ = code_ + staticLinkData_.pod.interruptExitOffset;
-
-    MOZ_ASSERT(staticLinkData_.pod.outOfBoundsExitOffset != 0);
-    outOfBoundsExit_ = code_ + staticLinkData_.pod.outOfBoundsExitOffset;
-
-    for (size_t i = 0; i < staticLinkData_.relativeLinks.length(); i++) {
-        RelativeLink link = staticLinkData_.relativeLinks[i];
-        uint8_t* patchAt = code_ + link.patchAtOffset;
-        uint8_t* target = code_ + link.targetOffset;
-
-        // In the case of long-jumps on MIPS and possibly future cases, a
-        // RelativeLink is used to patch a pointer to the function entry. If
-        // profiling is enabled (by cloning a module with profiling enabled),
-        // the target should be the profiling entry.
-        if (profilingEnabled_) {
-            const CodeRange* codeRange = lookupCodeRange(target);
-            if (codeRange && codeRange->isFunction() && link.targetOffset == codeRange->entry())
-                target = code_ + codeRange->profilingEntry();
-        }
-
-        if (link.isRawPointerPatch())
-            *(uint8_t**)(patchAt) = target;
-        else
-            Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
-    }
-
-    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
-        const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
-        for (size_t i = 0; i < offsets.length(); i++) {
-            uint8_t* patchAt = code_ + offsets[i];
-            void* target = AddressOf(imm, cx);
-
-            // Builtin calls are another case where, when profiling is enabled,
-            // we must point to the profiling entry.
-            Builtin builtin;
-            if (profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)) {
-                const CodeRange* codeRange = lookupCodeRange(patchAt);
-                if (codeRange->isFunction())
-                    target = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin];
-            }
-
-            Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
-                                               PatchedImmPtr(target),
-                                               PatchedImmPtr((void*)-1));
-        }
-    }
-
-    // Initialize global data segment
-
-    *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
-    *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
-
-    for (size_t tableIndex = 0; tableIndex < staticLinkData_.funcPtrTables.length(); tableIndex++) {
-        FuncPtrTable& funcPtrTable = staticLinkData_.funcPtrTables[tableIndex];
-        const OffsetVector& offsets = funcPtrTable.elemOffsets();
-        auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset());
-        for (size_t elemIndex = 0; elemIndex < offsets.length(); elemIndex++) {
-            uint8_t* target = code_ + offsets[elemIndex];
-            if (profilingEnabled_)
-                target = code_ + lookupCodeRange(target)->profilingEntry();
-            array[elemIndex] = target;
-        }
-    }
-
-    for (AsmJSModule::Exit& exit : exits_)
-        exit.initDatum(*this);
-}
-
-void
-AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared*> heap, JSContext* cx)
-{
-    MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
-    MOZ_ASSERT(IsValidAsmJSHeapLength(heap->byteLength()));
-    MOZ_ASSERT(dynamicallyLinked_);
-    MOZ_ASSERT(!maybeHeap_);
-
-    maybeHeap_ = heap;
-    // heapDatum() may point to shared memory but that memory is only
-    // accessed from maybeHeap(), which wraps it, and from
-    // hasDetachedHeap(), which checks it for null.
-    heapDatum() = heap->dataPointerEither().unwrap(/*safe - explained above*/);
-
-#if defined(JS_CODEGEN_X86)
-    uint8_t* heapOffset = heap->dataPointerEither().unwrap(/*safe - used for value*/);
-    uint32_t heapLength = heap->byteLength();
-    for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-        const HeapAccess& access = heapAccesses_[i];
-        // An access is out-of-bounds iff
-        //      ptr + offset + data-type-byte-size > heapLength
-        // i.e. ptr > heapLength - data-type-byte-size - offset.
-        // data-type-byte-size and offset are already included in the addend
-        // so we just have to add the heap length here.
-        if (access.hasLengthCheck())
-            X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
-        void* addr = access.patchHeapPtrImmAt(code_);
-        uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
-        MOZ_ASSERT(disp <= INT32_MAX);
-        X86Encoding::SetPointer(addr, (void*)(heapOffset + disp));
-    }
-#elif defined(JS_CODEGEN_X64)
-    // Even with signal handling being used for most bounds checks, there may be
-    // atomic operations that depend on explicit checks.
-    //
-    // If we have any explicit bounds checks, we need to patch the heap length
-    // checks at the right places. All accesses that have been recorded are the
-    // only ones that need bound checks (see also
-    // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
-    uint32_t heapLength = heap->byteLength();
-    for (size_t i = 0; i < heapAccesses_.length(); i++) {
-        const HeapAccess& access = heapAccesses_[i];
-        // See comment above for x86 codegen.
-        if (access.hasLengthCheck())
-            X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
-    }
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-    uint32_t heapLength = heap->byteLength();
-    for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-        jit::Assembler::UpdateBoundsCheck(heapLength,
-                                          (jit::Instruction*)(heapAccesses_[i].insnOffset() + code_));
-    }
-#endif
-}
-
-void
-AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer)
-{
-#if defined(JS_CODEGEN_X86)
-    if (maybePrevBuffer) {
-        // Subtract out the base-pointer added by AsmJSModule::initHeap.
-        uint8_t* ptrBase = maybePrevBuffer->dataPointerEither().unwrap(/*safe - used for value*/);
-        uint32_t heapLength = maybePrevBuffer->byteLength();
-        for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-            const HeapAccess& access = heapAccesses_[i];
-            // Subtract the heap length back out, leaving the raw displacement in place.
-            if (access.hasLengthCheck())
-                X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
-            void* addr = access.patchHeapPtrImmAt(code_);
-            uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
-            MOZ_ASSERT(ptr >= ptrBase);
-            X86Encoding::SetPointer(addr, (void*)(ptr - ptrBase));
-        }
-    }
-#elif defined(JS_CODEGEN_X64)
-    if (maybePrevBuffer) {
-        uint32_t heapLength = maybePrevBuffer->byteLength();
-        for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-            const HeapAccess& access = heapAccesses_[i];
-            // See comment above for x86 codegen.
-            if (access.hasLengthCheck())
-                X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
-        }
-    }
-#endif
-
-    maybeHeap_ = nullptr;
-    heapDatum() = nullptr;
-}
-
-void
-AsmJSModule::restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer,
-                                   uint8_t* prevCode,
-                                   ExclusiveContext* cx)
-{
-#ifdef DEBUG
-    // Put the absolute links back to -1 so PatchDataWithValueCheck assertions
-    // in staticallyLink are valid.
-    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
-        void* callee = AddressOf(imm, cx);
-
-        // If we are in profiling mode, calls to builtins will have been patched
-        // by setProfilingEnabled to be calls to thunks.
-        Builtin builtin;
-        void* profilingCallee = profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)
-                                ? prevCode + staticLinkData_.pod.builtinThunkOffsets[builtin]
-                                : nullptr;
-
-        const AsmJSModule::OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
-        for (size_t i = 0; i < offsets.length(); i++) {
-            uint8_t* caller = code_ + offsets[i];
-            void* originalValue = profilingCallee && !lookupCodeRange(caller)->isThunk()
-                                  ? profilingCallee
-                                  : callee;
-            Assembler::PatchDataWithValueCheck(CodeLocationLabel(caller),
-                                               PatchedImmPtr((void*)-1),
-                                               PatchedImmPtr(originalValue));
-        }
-    }
-#endif
-
-    restoreHeapToInitialState(maybePrevBuffer);
-}
-
-namespace {
-
-class MOZ_STACK_CLASS AutoMutateCode
-{
-    AutoWritableJitCode awjc_;
-    AutoFlushICache afc_;
-
-   public:
-    AutoMutateCode(JSContext* cx, AsmJSModule& module, const char* name)
-      : awjc_(cx->runtime(), module.codeBase(), module.codeBytes()),
-        afc_(name)
-    {
-        module.setAutoFlushICacheRange();
-    }
-};
-
-} // namespace
-
-bool
-AsmJSModule::detachHeap(JSContext* cx)
-{
-    MOZ_ASSERT(isDynamicallyLinked());
-    MOZ_ASSERT(maybeHeap_);
-
-    // Content JS should not be able to run (and detach heap) from within an
-    // interrupt callback, but in case it does, fail. Otherwise, the heap can
-    // change at an arbitrary instruction and break the assumption below.
-    if (interrupted_) {
-        JS_ReportError(cx, "attempt to detach from inside interrupt handler");
-        return false;
-    }
-
-    // Even if this->active(), to reach here, the activation must have called
-    // out via an FFI stub. FFI stubs check if heapDatum() is null on reentry
-    // and throw an exception if so.
-    MOZ_ASSERT_IF(active(), activation()->exitReason().kind() == ExitReason::Jit ||
-                            activation()->exitReason().kind() == ExitReason::Slow);
-
-    AutoMutateCode amc(cx, *this, "AsmJSModule::detachHeap");
-    restoreHeapToInitialState(maybeHeap_);
-
-    MOZ_ASSERT(hasDetachedHeap());
-    return true;
 }
 
 bool
 js::OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer)
 {
-    for (AsmJSModule* m = cx->runtime()->linkedAsmJSModules; m; m = m->nextLinked()) {
-        if (buffer == m->maybeHeapBufferObject() && !m->detachHeap(cx))
+    for (Module* m = cx->runtime()->linkedWasmModules; m; m = m->nextLinked()) {
+        if (buffer == m->maybeBuffer() && !m->detachHeap(cx))
             return false;
     }
     return true;
 }
 
 static void
 AsmJSModuleObject_finalize(FreeOp* fop, JSObject* obj)
 {
-    fop->delete_(&obj->as<AsmJSModuleObject>().module());
+    AsmJSModuleObject& moduleObj = obj->as<AsmJSModuleObject>();
+    if (moduleObj.hasModule())
+        fop->delete_(&moduleObj.module());
 }
 
 static void
 AsmJSModuleObject_trace(JSTracer* trc, JSObject* obj)
 {
-    obj->as<AsmJSModuleObject>().module().trace(trc);
+    AsmJSModuleObject& moduleObj = obj->as<AsmJSModuleObject>();
+    if (moduleObj.hasModule())
+        moduleObj.module().trace(trc);
 }
 
 const Class AsmJSModuleObject::class_ = {
     "AsmJSModuleObject",
     JSCLASS_IS_ANONYMOUS | JSCLASS_DELAY_METADATA_CALLBACK |
     JSCLASS_HAS_RESERVED_SLOTS(AsmJSModuleObject::RESERVED_SLOTS),
     nullptr, /* addProperty */
     nullptr, /* delProperty */
@@ -980,294 +150,48 @@ const Class AsmJSModuleObject::class_ = 
     AsmJSModuleObject_finalize,
     nullptr, /* call */
     nullptr, /* hasInstance */
     nullptr, /* construct */
     AsmJSModuleObject_trace
 };
 
 AsmJSModuleObject*
-AsmJSModuleObject::create(ExclusiveContext* cx, ScopedJSDeletePtr<AsmJSModule>* module)
+AsmJSModuleObject::create(ExclusiveContext* cx)
 {
     AutoSetNewObjectMetadata metadata(cx);
     JSObject* obj = NewObjectWithGivenProto(cx, &AsmJSModuleObject::class_, nullptr);
     if (!obj)
         return nullptr;
-    AsmJSModuleObject* nobj = &obj->as<AsmJSModuleObject>();
+    return &obj->as<AsmJSModuleObject>();
+}
 
-    nobj->setReservedSlot(MODULE_SLOT, PrivateValue(module->forget()));
+bool
+AsmJSModuleObject::hasModule() const
+{
+    MOZ_ASSERT(is<AsmJSModuleObject>());
+    return !getReservedSlot(MODULE_SLOT).isUndefined();
+}
 
-    return nobj;
+void
+AsmJSModuleObject::setModule(AsmJSModule* newModule)
+{
+    MOZ_ASSERT(is<AsmJSModuleObject>());
+    if (hasModule())
+        js_delete(&module());
+    setReservedSlot(MODULE_SLOT, PrivateValue(newModule));
 }
 
 AsmJSModule&
 AsmJSModuleObject::module() const
 {
     MOZ_ASSERT(is<AsmJSModuleObject>());
     return *(AsmJSModule*)getReservedSlot(MODULE_SLOT).toPrivate();
 }
 
-static inline uint8_t*
-WriteBytes(uint8_t* dst, const void* src, size_t nbytes)
-{
-    memcpy(dst, src, nbytes);
-    return dst + nbytes;
-}
-
-static inline const uint8_t*
-ReadBytes(const uint8_t* src, void* dst, size_t nbytes)
-{
-    memcpy(dst, src, nbytes);
-    return src + nbytes;
-}
-
-template <class T>
-static inline uint8_t*
-WriteScalar(uint8_t* dst, T t)
-{
-    memcpy(dst, &t, sizeof(t));
-    return dst + sizeof(t);
-}
-
-template <class T>
-static inline const uint8_t*
-ReadScalar(const uint8_t* src, T* dst)
-{
-    memcpy(dst, src, sizeof(*dst));
-    return src + sizeof(*dst);
-}
-
-static size_t
-SerializedNameSize(PropertyName* name)
-{
-    size_t s = sizeof(uint32_t);
-    if (name)
-        s += name->length() * (name->hasLatin1Chars() ? sizeof(Latin1Char) : sizeof(char16_t));
-    return s;
-}
-
-size_t
-AsmJSModule::Name::serializedSize() const
-{
-    return SerializedNameSize(name_);
-}
-
-static uint8_t*
-SerializeName(uint8_t* cursor, PropertyName* name)
-{
-    MOZ_ASSERT_IF(name, !name->empty());
-    if (name) {
-        static_assert(JSString::MAX_LENGTH <= INT32_MAX, "String length must fit in 31 bits");
-        uint32_t length = name->length();
-        uint32_t lengthAndEncoding = (length << 1) | uint32_t(name->hasLatin1Chars());
-        cursor = WriteScalar<uint32_t>(cursor, lengthAndEncoding);
-        JS::AutoCheckCannotGC nogc;
-        if (name->hasLatin1Chars())
-            cursor = WriteBytes(cursor, name->latin1Chars(nogc), length * sizeof(Latin1Char));
-        else
-            cursor = WriteBytes(cursor, name->twoByteChars(nogc), length * sizeof(char16_t));
-    } else {
-        cursor = WriteScalar<uint32_t>(cursor, 0);
-    }
-    return cursor;
-}
-
-uint8_t*
-AsmJSModule::Name::serialize(uint8_t* cursor) const
-{
-    return SerializeName(cursor, name_);
-}
-
-template <typename CharT>
-static const uint8_t*
-DeserializeChars(ExclusiveContext* cx, const uint8_t* cursor, size_t length, PropertyName** name)
-{
-    Vector<CharT> tmp(cx);
-    CharT* src;
-    if ((size_t(cursor) & (sizeof(CharT) - 1)) != 0) {
-        // Align 'src' for AtomizeChars.
-        if (!tmp.resize(length))
-            return nullptr;
-        memcpy(tmp.begin(), cursor, length * sizeof(CharT));
-        src = tmp.begin();
-    } else {
-        src = (CharT*)cursor;
-    }
-
-    JSAtom* atom = AtomizeChars(cx, src, length);
-    if (!atom)
-        return nullptr;
-
-    *name = atom->asPropertyName();
-    return cursor + length * sizeof(CharT);
-}
-
-static const uint8_t*
-DeserializeName(ExclusiveContext* cx, const uint8_t* cursor, PropertyName** name)
-{
-    uint32_t lengthAndEncoding;
-    cursor = ReadScalar<uint32_t>(cursor, &lengthAndEncoding);
-
-    uint32_t length = lengthAndEncoding >> 1;
-    if (length == 0) {
-        *name = nullptr;
-        return cursor;
-    }
-
-    bool latin1 = lengthAndEncoding & 0x1;
-    return latin1
-           ? DeserializeChars<Latin1Char>(cx, cursor, length, name)
-           : DeserializeChars<char16_t>(cx, cursor, length, name);
-}
-
-const uint8_t*
-AsmJSModule::Name::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    return DeserializeName(cx, cursor, &name_);
-}
-
-bool
-AsmJSModule::Name::clone(ExclusiveContext* cx, Name* out) const
-{
-    out->name_ = name_;
-    return true;
-}
-
-template <class T, size_t N>
-size_t
-SerializedVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
-{
-    size_t size = sizeof(uint32_t);
-    for (size_t i = 0; i < vec.length(); i++)
-        size += vec[i].serializedSize();
-    return size;
-}
-
-template <class T, size_t N>
-uint8_t*
-SerializeVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
-{
-    cursor = WriteScalar<uint32_t>(cursor, vec.length());
-    for (size_t i = 0; i < vec.length(); i++)
-        cursor = vec[i].serialize(cursor);
-    return cursor;
-}
-
-template <class T, size_t N>
-const uint8_t*
-DeserializeVector(ExclusiveContext* cx, const uint8_t* cursor,
-                  mozilla::Vector<T, N, SystemAllocPolicy>* vec)
-{
-    uint32_t length;
-    cursor = ReadScalar<uint32_t>(cursor, &length);
-    if (!vec->resize(length))
-        return nullptr;
-    for (size_t i = 0; i < vec->length(); i++) {
-        if (!(cursor = (*vec)[i].deserialize(cx, cursor)))
-            return nullptr;
-    }
-    return cursor;
-}
-
-template <class T, size_t N>
-bool
-CloneVector(ExclusiveContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
-            mozilla::Vector<T, N, SystemAllocPolicy>* out)
-{
-    if (!out->resize(in.length()))
-        return false;
-    for (size_t i = 0; i < in.length(); i++) {
-        if (!in[i].clone(cx, &(*out)[i]))
-            return false;
-    }
-    return true;
-}
-
-template <class T, size_t N, class AllocPolicy>
-size_t
-SerializedPodVectorSize(const mozilla::Vector<T, N, AllocPolicy>& vec)
-{
-    return sizeof(uint32_t) +
-           vec.length() * sizeof(T);
-}
-
-template <class T, size_t N, class AllocPolicy>
-uint8_t*
-SerializePodVector(uint8_t* cursor, const mozilla::Vector<T, N, AllocPolicy>& vec)
-{
-    cursor = WriteScalar<uint32_t>(cursor, vec.length());
-    cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T));
-    return cursor;
-}
-
-template <class T, size_t N, class AllocPolicy>
-const uint8_t*
-DeserializePodVector(ExclusiveContext* cx, const uint8_t* cursor,
-                     mozilla::Vector<T, N, AllocPolicy>* vec)
-{
-    uint32_t length;
-    cursor = ReadScalar<uint32_t>(cursor, &length);
-    if (!vec->resize(length))
-        return nullptr;
-    cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
-    return cursor;
-}
-
-template <class T, size_t N>
-bool
-ClonePodVector(ExclusiveContext* cx, const mozilla::Vector<T, N, SystemAllocPolicy>& in,
-               mozilla::Vector<T, N, SystemAllocPolicy>* out)
-{
-    if (!out->resize(in.length()))
-        return false;
-    PodCopy(out->begin(), in.begin(), in.length());
-    return true;
-}
-
-size_t
-SerializedSigSize(const MallocSig& sig)
-{
-    return sizeof(ExprType) +
-           SerializedPodVectorSize(sig.args());
-}
-
-uint8_t*
-SerializeSig(uint8_t* cursor, const MallocSig& sig)
-{
-    cursor = WriteScalar<ExprType>(cursor, sig.ret());
-    cursor = SerializePodVector(cursor, sig.args());
-    return cursor;
-}
-
-const uint8_t*
-DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig)
-{
-    ExprType ret;
-    cursor = ReadScalar<ExprType>(cursor, &ret);
-
-    MallocSig::ArgVector args;
-    cursor = DeserializePodVector(cx, cursor, &args);
-    if (!cursor)
-        return nullptr;
-
-    sig->init(Move(args), ret);
-    return cursor;
-}
-
-bool
-CloneSig(ExclusiveContext* cx, const MallocSig& sig, MallocSig* out)
-{
-    MallocSig::ArgVector args;
-    if (!ClonePodVector(cx, sig.args(), &args))
-        return false;
-
-    out->init(Move(args), sig.ret());
-    return true;
-}
-
 uint8_t*
 AsmJSModule::Global::serialize(uint8_t* cursor) const
 {
     cursor = WriteBytes(cursor, &pod, sizeof(pod));
     cursor = SerializeName(cursor, name_);
     return cursor;
 }
 
@@ -1282,756 +206,144 @@ const uint8_t*
 AsmJSModule::Global::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
     (cursor = DeserializeName(cx, cursor, &name_));
     return cursor;
 }
 
 bool
-AsmJSModule::Global::clone(ExclusiveContext* cx, Global* out) const
+AsmJSModule::Global::clone(JSContext* cx, Global* out) const
 {
     *out = *this;
     return true;
 }
 
 uint8_t*
-AsmJSModule::Exit::serialize(uint8_t* cursor) const
+AsmJSModule::Export::serialize(uint8_t* cursor) const
 {
-    cursor = SerializeSig(cursor, sig_);
+    cursor = SerializeName(cursor, name_);
+    cursor = SerializeName(cursor, maybeFieldName_);
     cursor = WriteBytes(cursor, &pod, sizeof(pod));
     return cursor;
 }
 
 size_t
-AsmJSModule::Exit::serializedSize() const
+AsmJSModule::Export::serializedSize() const
 {
-    return SerializedSigSize(sig_) +
+    return SerializedNameSize(name_) +
+           SerializedNameSize(maybeFieldName_) +
            sizeof(pod);
 }
 
 const uint8_t*
-AsmJSModule::Exit::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
-    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
-    return cursor;
-}
-
-bool
-AsmJSModule::Exit::clone(ExclusiveContext* cx, Exit* out) const
-{
-    out->pod = pod;
-    return CloneSig(cx, sig_, &out->sig_);
-}
-
-uint8_t*
-AsmJSModule::ExportedFunction::serialize(uint8_t* cursor) const
-{
-    cursor = SerializeName(cursor, name_);
-    cursor = SerializeName(cursor, maybeFieldName_);
-    cursor = SerializeSig(cursor, sig_);
-    cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    return cursor;
-}
-
-size_t
-AsmJSModule::ExportedFunction::serializedSize() const
-{
-    return SerializedNameSize(name_) +
-           SerializedNameSize(maybeFieldName_) +
-           SerializedSigSize(sig_) +
-           sizeof(pod);
-}
-
-const uint8_t*
-AsmJSModule::ExportedFunction::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+AsmJSModule::Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
     (cursor = DeserializeName(cx, cursor, &name_)) &&
     (cursor = DeserializeName(cx, cursor, &maybeFieldName_)) &&
-    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
     (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
     return cursor;
 }
 
 bool
-AsmJSModule::ExportedFunction::clone(ExclusiveContext* cx, ExportedFunction* out) const
+AsmJSModule::Export::clone(JSContext* cx, Export* out) const
 {
     out->name_ = name_;
     out->maybeFieldName_ = maybeFieldName_;
     out->pod = pod;
-    return CloneSig(cx, sig_, &out->sig_);
-}
-
-AsmJSModule::CodeRange::CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets)
-  : nameIndex_(UINT32_MAX),
-    lineNumber_(lineNumber)
-{
-    PodZero(&u);  // zero padding for Valgrind
-    u.kind_ = Function;
-
-    MOZ_ASSERT(offsets.nonProfilingEntry - offsets.begin <= UINT8_MAX);
-    begin_ = offsets.begin;
-    u.func.beginToEntry_ = offsets.nonProfilingEntry - begin_;
-
-    MOZ_ASSERT(offsets.nonProfilingEntry < offsets.profilingReturn);
-    MOZ_ASSERT(offsets.profilingReturn - offsets.profilingJump <= UINT8_MAX);
-    MOZ_ASSERT(offsets.profilingReturn - offsets.profilingEpilogue <= UINT8_MAX);
-    profilingReturn_ = offsets.profilingReturn;
-    u.func.profilingJumpToProfilingReturn_ = profilingReturn_ - offsets.profilingJump;
-    u.func.profilingEpilogueToProfilingReturn_ = profilingReturn_ - offsets.profilingEpilogue;
-
-    MOZ_ASSERT(offsets.nonProfilingEntry < offsets.end);
-    end_ = offsets.end;
-}
-
-AsmJSModule::CodeRange::CodeRange(Kind kind, AsmJSOffsets offsets)
-  : nameIndex_(0),
-    lineNumber_(0),
-    begin_(offsets.begin),
-    profilingReturn_(0),
-    end_(offsets.end)
-{
-    PodZero(&u);  // zero padding for Valgrind
-    u.kind_ = kind;
-
-    MOZ_ASSERT(begin_ <= end_);
-    MOZ_ASSERT(u.kind_ == Entry || u.kind_ == Inline);
-}
-
-AsmJSModule::CodeRange::CodeRange(Kind kind, AsmJSProfilingOffsets offsets)
-  : nameIndex_(0),
-    lineNumber_(0),
-    begin_(offsets.begin),
-    profilingReturn_(offsets.profilingReturn),
-    end_(offsets.end)
-{
-    PodZero(&u);  // zero padding for Valgrind
-    u.kind_ = kind;
-
-    MOZ_ASSERT(begin_ < profilingReturn_);
-    MOZ_ASSERT(profilingReturn_ < end_);
-    MOZ_ASSERT(u.kind_ == JitFFI || u.kind_ == SlowFFI || u.kind_ == Interrupt);
-}
-
-AsmJSModule::CodeRange::CodeRange(Builtin builtin, AsmJSProfilingOffsets offsets)
-  : nameIndex_(0),
-    lineNumber_(0),
-    begin_(offsets.begin),
-    profilingReturn_(offsets.profilingReturn),
-    end_(offsets.end)
-{
-    PodZero(&u);  // zero padding for Valgrind
-    u.kind_ = Thunk;
-    u.thunk.target_ = uint16_t(builtin);
-
-    MOZ_ASSERT(begin_ < profilingReturn_);
-    MOZ_ASSERT(profilingReturn_ < end_);
-}
-
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-size_t
-AsmJSModule::ProfiledFunction::serializedSize() const
-{
-    return SerializedNameSize(name) +
-           sizeof(pod);
-}
-
-uint8_t*
-AsmJSModule::ProfiledFunction::serialize(uint8_t* cursor) const
-{
-    cursor = SerializeName(cursor, name);
-    cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    return cursor;
-}
-
-const uint8_t*
-AsmJSModule::ProfiledFunction::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    (cursor = DeserializeName(cx, cursor, &name)) &&
-    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
-    return cursor;
-}
-#endif
-
-size_t
-AsmJSModule::AbsoluteLinkArray::serializedSize() const
-{
-    size_t size = 0;
-    for (const OffsetVector& offsets : *this)
-        size += SerializedPodVectorSize(offsets);
-    return size;
-}
-
-uint8_t*
-AsmJSModule::AbsoluteLinkArray::serialize(uint8_t* cursor) const
-{
-    for (const OffsetVector& offsets : *this)
-        cursor = SerializePodVector(cursor, offsets);
-    return cursor;
-}
-
-const uint8_t*
-AsmJSModule::AbsoluteLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    for (OffsetVector& offsets : *this) {
-        cursor = DeserializePodVector(cx, cursor, &offsets);
-        if (!cursor)
-            return nullptr;
-    }
-    return cursor;
-}
-
-bool
-AsmJSModule::AbsoluteLinkArray::clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const
-{
-    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
-        if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
-            return false;
-    }
     return true;
 }
 
 size_t
-AsmJSModule::AbsoluteLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    size_t size = 0;
-    for (const OffsetVector& offsets : *this)
-        size += offsets.sizeOfExcludingThis(mallocSizeOf);
-    return size;
-}
-
-size_t
-AsmJSModule::FuncPtrTable::serializedSize() const
-{
-    return sizeof(pod) +
-           SerializedPodVectorSize(elemOffsets_);
-}
-
-uint8_t*
-AsmJSModule::FuncPtrTable::serialize(uint8_t* cursor) const
-{
-    cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    cursor = SerializePodVector(cursor, elemOffsets_);
-    return cursor;
-}
-
-const uint8_t*
-AsmJSModule::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
-    (cursor = DeserializePodVector(cx, cursor, &elemOffsets_));
-    return cursor;
-}
-
-bool
-AsmJSModule::FuncPtrTable::clone(ExclusiveContext* cx, FuncPtrTable* out) const
-{
-    out->pod = pod;
-    return ClonePodVector(cx, elemOffsets_, &out->elemOffsets_);
-}
-
-size_t
-AsmJSModule::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    return elemOffsets_.sizeOfExcludingThis(mallocSizeOf);
-}
-
-size_t
-AsmJSModule::StaticLinkData::serializedSize() const
-{
-    return sizeof(pod) +
-           SerializedPodVectorSize(relativeLinks) +
-           absoluteLinks.serializedSize() +
-           SerializedVectorSize(funcPtrTables);
-}
-
-uint8_t*
-AsmJSModule::StaticLinkData::serialize(uint8_t* cursor) const
-{
-    cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    cursor = SerializePodVector(cursor, relativeLinks);
-    cursor = absoluteLinks.serialize(cursor);
-    cursor = SerializeVector(cursor, funcPtrTables);
-    return cursor;
-}
-
-const uint8_t*
-AsmJSModule::StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
-{
-    (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
-    (cursor = DeserializePodVector(cx, cursor, &relativeLinks)) &&
-    (cursor = absoluteLinks.deserialize(cx, cursor)) &&
-    (cursor = DeserializeVector(cx, cursor, &funcPtrTables));
-    return cursor;
-}
-
-bool
-AsmJSModule::StaticLinkData::clone(ExclusiveContext* cx, StaticLinkData* out) const
-{
-    out->pod = pod;
-    return ClonePodVector(cx, relativeLinks, &out->relativeLinks) &&
-           absoluteLinks.clone(cx, &out->absoluteLinks) &&
-           CloneVector(cx, funcPtrTables, &out->funcPtrTables);
-}
-
-size_t
-AsmJSModule::StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
-{
-    size_t size = relativeLinks.sizeOfExcludingThis(mallocSizeOf) +
-                  absoluteLinks.sizeOfExcludingThis(mallocSizeOf) +
-                  funcPtrTables.sizeOfExcludingThis(mallocSizeOf);
-
-    for (const FuncPtrTable& table : funcPtrTables)
-        size += table.sizeOfExcludingThis(mallocSizeOf);
-
-    return size;
-}
-
-size_t
 AsmJSModule::serializedSize() const
 {
-    return sizeof(pod) +
-           pod.codeBytes_ +
+    MOZ_ASSERT(isFinished());
+    return wasm_->serializedSize() +
+           linkData_->serializedSize() +
+           sizeof(pod) +
+           SerializedVectorSize(globals_) +
+           SerializedPodVectorSize(imports_) +
+           SerializedVectorSize(exports_) +
            SerializedNameSize(globalArgumentName_) +
            SerializedNameSize(importArgumentName_) +
-           SerializedNameSize(bufferArgumentName_) +
-           SerializedVectorSize(globals_) +
-           SerializedVectorSize(exits_) +
-           SerializedVectorSize(exports_) +
-           SerializedPodVectorSize(callSites_) +
-           SerializedPodVectorSize(codeRanges_) +
-           SerializedVectorSize(names_) +
-           SerializedPodVectorSize(heapAccesses_) +
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-           SerializedVectorSize(profiledFunctions_) +
-#endif
-           staticLinkData_.serializedSize();
+           SerializedNameSize(bufferArgumentName_);
 }
 
 uint8_t*
 AsmJSModule::serialize(uint8_t* cursor) const
 {
-    MOZ_ASSERT(!dynamicallyLinked_);
-    MOZ_ASSERT(!loadedFromCache_);
-    MOZ_ASSERT(!profilingEnabled_);
-    MOZ_ASSERT(!interrupted_);
-
+    MOZ_ASSERT(isFinished());
+    cursor = wasm_->serialize(cursor);
+    cursor = linkData_->serialize(cursor);
     cursor = WriteBytes(cursor, &pod, sizeof(pod));
-    cursor = WriteBytes(cursor, code_, pod.codeBytes_);
+    cursor = SerializeVector(cursor, globals_);
+    cursor = SerializePodVector(cursor, imports_);
+    cursor = SerializeVector(cursor, exports_);
     cursor = SerializeName(cursor, globalArgumentName_);
     cursor = SerializeName(cursor, importArgumentName_);
     cursor = SerializeName(cursor, bufferArgumentName_);
-    cursor = SerializeVector(cursor, globals_);
-    cursor = SerializeVector(cursor, exits_);
-    cursor = SerializeVector(cursor, exports_);
-    cursor = SerializePodVector(cursor, callSites_);
-    cursor = SerializePodVector(cursor, codeRanges_);
-    cursor = SerializeVector(cursor, names_);
-    cursor = SerializePodVector(cursor, heapAccesses_);
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    cursor = SerializeVector(cursor, profiledFunctions_);
-#endif
-    cursor = staticLinkData_.serialize(cursor);
     return cursor;
 }
 
 const uint8_t*
 AsmJSModule::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
+    linkData_ = cx->make_unique<StaticLinkData>();
+    if (!linkData_)
+        return nullptr;
+
     // To avoid GC-during-deserialization corner cases, prevent atoms from
     // being collected.
     AutoKeepAtoms aka(cx->perThreadData);
 
+    (cursor = Module::deserialize(cx, cursor, &wasm_)) &&
+    (cursor = linkData_->deserialize(cx, cursor)) &&
     (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
-    (code_ = AllocateExecutableMemory(cx, pod.totalBytes_)) &&
-    (cursor = ReadBytes(cursor, code_, pod.codeBytes_)) &&
+    (cursor = DeserializeVector(cx, cursor, &globals_)) &&
+    (cursor = DeserializePodVector(cx, cursor, &imports_)) &&
+    (cursor = DeserializeVector(cx, cursor, &exports_)) &&
     (cursor = DeserializeName(cx, cursor, &globalArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &importArgumentName_)) &&
-    (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)) &&
-    (cursor = DeserializeVector(cx, cursor, &globals_)) &&
-    (cursor = DeserializeVector(cx, cursor, &exits_)) &&
-    (cursor = DeserializeVector(cx, cursor, &exports_)) &&
-    (cursor = DeserializePodVector(cx, cursor, &callSites_)) &&
-    (cursor = DeserializePodVector(cx, cursor, &codeRanges_)) &&
-    (cursor = DeserializeVector(cx, cursor, &names_)) &&
-    (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) &&
-#endif
-    (cursor = staticLinkData_.deserialize(cx, cursor));
-
-    loadedFromCache_ = true;
+    (cursor = DeserializeName(cx, cursor, &bufferArgumentName_));
 
     return cursor;
 }
 
 bool
-AsmJSModule::clone(JSContext* cx, ScopedJSDeletePtr<AsmJSModule>* moduleOut) const
+AsmJSModule::clone(JSContext* cx, HandleAsmJSModule obj) const
 {
-    *moduleOut = cx->new_<AsmJSModule>(scriptSource_, srcStart_, srcBodyStart_, pod.strict_,
-                                       pod.canUseSignalHandlers_);
-    if (!*moduleOut)
+    auto out = cx->new_<AsmJSModule>(scriptSource(), srcStart_, srcBodyStart_, pod.strict_);
+    if (!out)
         return false;
 
-    AsmJSModule& out = **moduleOut;
-
-    // Mirror the order of serialize/deserialize in cloning:
+    obj->setModule(out);
 
-    out.pod = pod;
-
-    out.code_ = AllocateExecutableMemory(cx, pod.totalBytes_);
-    if (!out.code_)
+    out->wasm_ = wasm_->clone(cx, *linkData_);
+    if (!out->wasm_)
         return false;
 
-    memcpy(out.code_, code_, pod.codeBytes_);
-
-    out.globalArgumentName_ = globalArgumentName_;
-    out.importArgumentName_ = importArgumentName_;
-    out.bufferArgumentName_ = bufferArgumentName_;
+    out->linkData_ = cx->make_unique<StaticLinkData>();
+    if (!out->linkData_ || !linkData_->clone(cx, out->linkData_.get()))
+        return false;
 
-    if (!CloneVector(cx, globals_, &out.globals_) ||
-        !CloneVector(cx, exits_, &out.exits_) ||
-        !CloneVector(cx, exports_, &out.exports_) ||
-        !ClonePodVector(cx, callSites_, &out.callSites_) ||
-        !ClonePodVector(cx, codeRanges_, &out.codeRanges_) ||
-        !CloneVector(cx, names_, &out.names_) ||
-        !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
-        !staticLinkData_.clone(cx, &out.staticLinkData_))
+    out->pod = pod;
+
+    if (!CloneVector(cx, globals_, &out->globals_) ||
+        !ClonePodVector(cx, imports_, &out->imports_) ||
+        !CloneVector(cx, exports_, &out->exports_))
     {
         return false;
     }
 
-    out.loadedFromCache_ = loadedFromCache_;
-    out.profilingEnabled_ = profilingEnabled_;
-
-    if (profilingEnabled_) {
-        if (!out.profilingLabels_.resize(profilingLabels_.length()))
-            return false;
-        for (size_t i = 0; i < profilingLabels_.length(); i++) {
-            out.profilingLabels_[i] = DuplicateString(cx, profilingLabels_[i].get());
-            if (!out.profilingLabels_[i])
-                return false;
-        }
-    }
-
-
-    // Delay flushing until dynamic linking.
-    AutoFlushICache afc("AsmJSModule::clone", /* inhibit = */ true);
-    out.setAutoFlushICacheRange();
-
-    out.restoreToInitialState(maybeHeap_, code_, cx);
-    out.staticallyLink(cx);
-    return true;
-}
-
-bool
-AsmJSModule::changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx)
-{
-    MOZ_ASSERT(hasArrayView());
-
-    // Content JS should not be able to run (and change heap) from within an
-    // interrupt callback, but in case it does, fail to change heap. Otherwise,
-    // the heap can change at every single instruction which would prevent
-    // future optimizations like heap-base hoisting.
-    if (interrupted_)
-        return false;
-
-    AutoMutateCode amc(cx, *this, "AsmJSModule::changeHeap");
-    restoreHeapToInitialState(maybeHeap_);
-    initHeap(newHeap, cx);
+    out->globalArgumentName_ = globalArgumentName_;
+    out->importArgumentName_ = importArgumentName_;
+    out->bufferArgumentName_ = bufferArgumentName_;
     return true;
 }
 
-size_t
-AsmJSModule::heapLength() const
-{
-    MOZ_ASSERT(isDynamicallyLinked());
-    return maybeHeap_ ? maybeHeap_->byteLength() : 0;
-}
-
-void
-AsmJSModule::setProfilingEnabled(bool enabled, JSContext* cx)
-{
-    MOZ_ASSERT(isDynamicallyLinked());
-
-    if (profilingEnabled_ == enabled)
-        return;
-
-    // When enabled, generate profiling labels for every name in names_ that is
-    // the name of some Function CodeRange. This involves malloc() so do it now
-    // since, once we start sampling, we'll be in a signal-handing context where
-    // we cannot malloc.
-    if (enabled) {
-        profilingLabels_.resize(names_.length());
-        const char* filename = scriptSource_->filename();
-        JS::AutoCheckCannotGC nogc;
-        for (size_t i = 0; i < codeRanges_.length(); i++) {
-            CodeRange& cr = codeRanges_[i];
-            if (!cr.isFunction())
-                continue;
-            unsigned lineno = cr.functionLineNumber();
-            PropertyName* name = names_[cr.functionNameIndex()].name();
-            profilingLabels_[cr.functionNameIndex()].reset(
-                name->hasLatin1Chars()
-                ? JS_smprintf("%s (%s:%u)", name->latin1Chars(nogc), filename, lineno)
-                : JS_smprintf("%hs (%s:%u)", name->twoByteChars(nogc), filename, lineno));
-        }
-    } else {
-        profilingLabels_.clear();
-    }
-
-    AutoMutateCode amc(cx, *this, "AsmJSModule::setProfilingEnabled");
-
-    // Patch all internal (asm.js->asm.js) callsites to call the profiling
-    // prologues:
-    for (size_t i = 0; i < callSites_.length(); i++) {
-        CallSite& cs = callSites_[i];
-        if (cs.kind() != CallSite::Relative)
-            continue;
-
-        uint8_t* callerRetAddr = code_ + cs.returnAddressOffset();
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-        void* callee = X86Encoding::GetRel32Target(callerRetAddr);
-#elif defined(JS_CODEGEN_ARM)
-        uint8_t* caller = callerRetAddr - 4;
-        Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
-        BOffImm calleeOffset;
-        callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
-        void* callee = calleeOffset.getDest(callerInsn);
-#elif defined(JS_CODEGEN_ARM64)
-        MOZ_CRASH();
-        void* callee = nullptr;
-        (void)callerRetAddr;
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-        uint8_t* instr = callerRetAddr - Assembler::PatchWrite_NearCallSize();
-        void* callee = (void*)Assembler::ExtractInstructionImmediate(instr);
-#elif defined(JS_CODEGEN_NONE)
-        MOZ_CRASH();
-        void* callee = nullptr;
-#else
-# error "Missing architecture"
-#endif
-
-        const CodeRange* codeRange = lookupCodeRange(callee);
-        if (codeRange->kind() != CodeRange::Function)
-            continue;
-
-        uint8_t* profilingEntry = code_ + codeRange->profilingEntry();
-        uint8_t* entry = code_ + codeRange->entry();
-        MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry);
-        MOZ_ASSERT_IF(!profilingEnabled_, callee == entry);
-        uint8_t* newCallee = enabled ? profilingEntry : entry;
-
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-        X86Encoding::SetRel32(callerRetAddr, newCallee);
-#elif defined(JS_CODEGEN_ARM)
-        new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always);
-#elif defined(JS_CODEGEN_ARM64)
-        (void)newCallee;
-        MOZ_CRASH();
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-        Assembler::PatchInstructionImmediate(instr, PatchedImmPtr(newCallee));
-#elif defined(JS_CODEGEN_NONE)
-        MOZ_CRASH();
-#else
-# error "Missing architecture"
-#endif
-    }
-
-    // Update all the addresses in the function-pointer tables to point to the
-    // profiling prologues:
-    for (FuncPtrTable& funcPtrTable : staticLinkData_.funcPtrTables) {
-        auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset());
-        for (size_t i = 0; i < funcPtrTable.elemOffsets().length(); i++) {
-            void* callee = array[i];
-            const CodeRange* codeRange = lookupCodeRange(callee);
-            void* profilingEntry = code_ + codeRange->profilingEntry();
-            void* entry = code_ + codeRange->entry();
-            MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry);
-            MOZ_ASSERT_IF(!profilingEnabled_, callee == entry);
-            if (enabled)
-                array[i] = profilingEntry;
-            else
-                array[i] = entry;
-        }
-    }
-
-    // Replace all the nops in all the epilogues of asm.js functions with jumps
-    // to the profiling epilogues.
-    for (size_t i = 0; i < codeRanges_.length(); i++) {
-        CodeRange& cr = codeRanges_[i];
-        if (!cr.isFunction())
-            continue;
-        uint8_t* jump = code_ + cr.profilingJump();
-        uint8_t* profilingEpilogue = code_ + cr.profilingEpilogue();
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-        // An unconditional jump with a 1 byte offset immediate has the opcode
-        // 0x90. The offset is relative to the address of the instruction after
-        // the jump. 0x66 0x90 is the canonical two-byte nop.
-        ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2;
-        MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127);
-        if (enabled) {
-            MOZ_ASSERT(jump[0] == 0x66);
-            MOZ_ASSERT(jump[1] == 0x90);
-            jump[0] = 0xeb;
-            jump[1] = jumpImmediate;
-        } else {
-            MOZ_ASSERT(jump[0] == 0xeb);
-            MOZ_ASSERT(jump[1] == jumpImmediate);
-            jump[0] = 0x66;
-            jump[1] = 0x90;
-        }
-#elif defined(JS_CODEGEN_ARM)
-        if (enabled) {
-            MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
-            new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always);
-        } else {
-            MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
-            new (jump) InstNOP();
-        }
-#elif defined(JS_CODEGEN_ARM64)
-        (void)jump;
-        (void)profilingEpilogue;
-        MOZ_CRASH();
-#elif defined(JS_CODEGEN_MIPS32)
-        Instruction* instr = (Instruction*)jump;
-        if (enabled) {
-            Assembler::WriteLuiOriInstructions(instr, instr->next(),
-                                               ScratchRegister, (uint32_t)profilingEpilogue);
-            instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
-        } else {
-            instr[0].makeNop();
-            instr[1].makeNop();
-            instr[2].makeNop();
-        }
-#elif defined(JS_CODEGEN_MIPS64)
-        Instruction* instr = (Instruction*)jump;
-        if (enabled) {
-            Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue);
-            instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
-        } else {
-            instr[0].makeNop();
-            instr[1].makeNop();
-            instr[2].makeNop();
-            instr[3].makeNop();
-            instr[4].makeNop();
-        }
-#elif defined(JS_CODEGEN_NONE)
-        MOZ_CRASH();
-#else
-# error "Missing architecture"
-#endif
-    }
-
-    // Replace all calls to builtins with calls to profiling thunks that push a
-    // frame pointer. Since exit unwinding always starts at the caller of fp,
-    // this avoids losing the innermost asm.js function.
-    for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) {
-        auto imm = BuiltinToImmediate(builtin);
-        const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
-        void* from = AddressOf(imm, nullptr);
-        void* to = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin];
-        if (!enabled)
-            Swap(from, to);
-        for (size_t j = 0; j < offsets.length(); j++) {
-            uint8_t* caller = code_ + offsets[j];
-            const AsmJSModule::CodeRange* codeRange = lookupCodeRange(caller);
-            if (codeRange->isThunk())
-                continue;
-            MOZ_ASSERT(codeRange->isFunction());
-            Assembler::PatchDataWithValueCheck(CodeLocationLabel(caller),
-                                               PatchedImmPtr(to),
-                                               PatchedImmPtr(from));
-        }
-    }
-
-    profilingEnabled_ = enabled;
-}
-
-static bool
-GetCPUID(uint32_t* cpuId)
-{
-    enum Arch {
-        X86 = 0x1,
-        X64 = 0x2,
-        ARM = 0x3,
-        MIPS = 0x4,
-        MIPS64 = 0x5,
-        ARCH_BITS = 3
-    };
-
-#if defined(JS_CODEGEN_X86)
-    MOZ_ASSERT(uint32_t(CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
-    *cpuId = X86 | (uint32_t(CPUInfo::GetSSEVersion()) << ARCH_BITS);
-    return true;
-#elif defined(JS_CODEGEN_X64)
-    MOZ_ASSERT(uint32_t(CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
-    *cpuId = X64 | (uint32_t(CPUInfo::GetSSEVersion()) << ARCH_BITS);
-    return true;
-#elif defined(JS_CODEGEN_ARM)
-    MOZ_ASSERT(GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
-    *cpuId = ARM | (GetARMFlags() << ARCH_BITS);
-    return true;
-#elif defined(JS_CODEGEN_MIPS32)
-    MOZ_ASSERT(GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
-    *cpuId = MIPS | (GetMIPSFlags() << ARCH_BITS);
-    return true;
-#elif defined(JS_CODEGEN_MIPS64)
-    MOZ_ASSERT(GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
-    *cpuId = MIPS64 | (GetMIPSFlags() << ARCH_BITS);
-    return true;
-#else
-    return false;
-#endif
-}
-
-class MachineId
-{
-    uint32_t cpuId_;
-    JS::BuildIdCharVector buildId_;
-
-  public:
-    bool extractCurrentState(ExclusiveContext* cx) {
-        if (!cx->asmJSCacheOps().buildId)
-            return false;
-        if (!cx->asmJSCacheOps().buildId(&buildId_))
-            return false;
-        if (!GetCPUID(&cpuId_))
-            return false;
-        return true;
-    }
-
-    size_t serializedSize() const {
-        return sizeof(uint32_t) +
-               SerializedPodVectorSize(buildId_);
-    }
-
-    uint8_t* serialize(uint8_t* cursor) const {
-        cursor = WriteScalar<uint32_t>(cursor, cpuId_);
-        cursor = SerializePodVector(cursor, buildId_);
-        return cursor;
-    }
-
-    const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor) {
-        (cursor = ReadScalar<uint32_t>(cursor, &cpuId_)) &&
-        (cursor = DeserializePodVector(cx, cursor, &buildId_));
-        return cursor;
-    }
-
-    bool operator==(const MachineId& rhs) const {
-        return cpuId_ == rhs.cpuId_ &&
-               buildId_.length() == rhs.buildId_.length() &&
-               PodEqual(buildId_.begin(), rhs.buildId_.begin(), buildId_.length());
-    }
-    bool operator!=(const MachineId& rhs) const {
-        return !(*this == rhs);
-    }
-};
-
 struct PropertyNameWrapper
 {
     PropertyName* name;
 
     PropertyNameWrapper()
       : name(nullptr)
     {}
     explicit PropertyNameWrapper(PropertyName* name)
@@ -2193,37 +505,18 @@ class ModuleCharsForLookup : ModuleChars
                 if (funCtorArgs_[i].name != arg->name())
                     return false;
             }
         }
         return true;
     }
 };
 
-struct ScopedCacheEntryOpenedForWrite
-{
-    ExclusiveContext* cx;
-    const size_t serializedSize;
-    uint8_t* memory;
-    intptr_t handle;
-
-    ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize)
-      : cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1)
-    {}
-
-    ~ScopedCacheEntryOpenedForWrite() {
-        if (memory)
-            cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle);
-    }
-};
-
 JS::AsmJSCacheResult
-js::StoreAsmJSModuleInCache(AsmJSParser& parser,
-                            const AsmJSModule& module,
-                            ExclusiveContext* cx)
+js::StoreAsmJSModuleInCache(AsmJSParser& parser, const AsmJSModule& module, ExclusiveContext* cx)
 {
     MachineId machineId;
     if (!machineId.extractCurrentState(cx))
         return JS::AsmJSCache_InternalError;
 
     ModuleCharsForStore moduleChars;
     if (!moduleChars.init(parser))
         return JS::AsmJSCache_InternalError;
@@ -2250,41 +543,24 @@ js::StoreAsmJSModuleInCache(AsmJSParser&
     cursor = machineId.serialize(cursor);
     cursor = moduleChars.serialize(cursor);
     cursor = module.serialize(cursor);
 
     MOZ_ASSERT(cursor == entry.memory + serializedSize);
     return JS::AsmJSCache_Success;
 }
 
-struct ScopedCacheEntryOpenedForRead
-{
-    ExclusiveContext* cx;
-    size_t serializedSize;
-    const uint8_t* memory;
-    intptr_t handle;
-
-    explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx)
-      : cx(cx), serializedSize(0), memory(nullptr), handle(0)
-    {}
-
-    ~ScopedCacheEntryOpenedForRead() {
-        if (memory)
-            cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle);
-    }
-};
-
 bool
-js::LookupAsmJSModuleInCache(ExclusiveContext* cx,
-                             AsmJSParser& parser,
-                             ScopedJSDeletePtr<AsmJSModule>* moduleOut,
-                             ScopedJSFreePtr<char>* compilationTimeReport)
+js::LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, HandleAsmJSModule moduleObj,
+                             bool* loadedFromCache, UniqueChars* compilationTimeReport)
 {
     int64_t usecBefore = PRMJ_Now();
 
+    *loadedFromCache = false;
+
     MachineId machineId;
     if (!machineId.extractCurrentState(cx))
         return true;
 
     JS::OpenAsmJSCacheEntryForReadOp open = cx->asmJSCacheOps().openEntryForRead;
     if (!open)
         return true;
 
@@ -2308,44 +584,39 @@ js::LookupAsmJSModuleInCache(ExclusiveCo
     cursor = moduleChars.deserialize(cx, cursor);
     if (!moduleChars.match(parser))
         return true;
 
     uint32_t srcStart = parser.pc->maybeFunction->pn_body->pn_pos.begin;
     uint32_t srcBodyStart = parser.tokenStream.currentToken().pos.end;
     bool strict = parser.pc->sc->strict() && !parser.pc->sc->hasExplicitUseStrict();
 
-    // canUseSignalHandlers will be clobbered when deserializing and checked below
-    ScopedJSDeletePtr<AsmJSModule> module(
-        cx->new_<AsmJSModule>(parser.ss, srcStart, srcBodyStart, strict,
-                              /* canUseSignalHandlers = */ false));
+    AsmJSModule* module = cx->new_<AsmJSModule>(parser.ss, srcStart, srcBodyStart, strict);
     if (!module)
         return false;
 
+    moduleObj->setModule(module);
+
     cursor = module->deserialize(cx, cursor);
     if (!cursor)
         return false;
 
     bool atEnd = cursor == entry.memory + entry.serializedSize;
     MOZ_ASSERT(atEnd, "Corrupt cache file");
     if (!atEnd)
         return true;
 
-    if (module->canUseSignalHandlers() != cx->canUseSignalHandlers())
+    if (module->wasm().compileArgs() != CompileArgs(cx))
         return true;
 
+    module->staticallyLink(cx);
+
     if (!parser.tokenStream.advance(module->srcEndBeforeCurly()))
         return false;
 
-    {
-        // Delay flushing until dynamic linking.
-        AutoFlushICache afc("LookupAsmJSModuleInCache", /* inhibit = */ true);
-        module->setAutoFlushICacheRange();
-
-        module->staticallyLink(cx);
-    }
+    *loadedFromCache = true;
 
     int64_t usecAfter = PRMJ_Now();
     int ms = (usecAfter - usecBefore) / PRMJ_USEC_PER_MSEC;
-    *compilationTimeReport = JS_smprintf("loaded from cache in %dms", ms);
-    *moduleOut = module.forget();
+    *compilationTimeReport = UniqueChars(JS_smprintf("loaded from cache in %dms", ms));
     return true;
 }
+
--- a/js/src/asmjs/AsmJSModule.h
+++ b/js/src/asmjs/AsmJSModule.h
@@ -19,32 +19,26 @@
 #ifndef asmjs_AsmJSModule_h
 #define asmjs_AsmJSModule_h
 
 #include "mozilla/EnumeratedArray.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/Move.h"
 #include "mozilla/PodOperations.h"
 
-#include "jsscript.h"
-
-#include "asmjs/AsmJSFrameIterator.h"
 #include "asmjs/AsmJSValidate.h"
-#include "asmjs/Wasm.h"
+#include "asmjs/WasmModule.h"
 #include "builtin/SIMD.h"
 #include "gc/Tracer.h"
-#ifdef JS_ION_PERF
-# include "jit/PerfSpewer.h"
-#endif
 #include "vm/TypedArrayObject.h"
 
 namespace js {
 
-namespace frontend { class TokenStream; }
-namespace jit { struct BaselineScript; class MacroAssembler; }
+class AsmJSModuleObject;
+typedef Handle<AsmJSModuleObject*> HandleAsmJSModule;
 
 // The asm.js spec recognizes this set of builtin Math functions.
 enum AsmJSMathBuiltinFunction
 {
     AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan,
     AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
     AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
     AsmJSMathBuiltin_log, AsmJSMathBuiltin_pow, AsmJSMathBuiltin_sqrt,
@@ -93,39 +87,31 @@ IsSignedIntSimdType(AsmJSSimdType type)
 // Set of known operations, for a given SIMD type (int32x4, float32x4,...)
 enum AsmJSSimdOperation
 {
 #define ASMJSSIMDOPERATION(op) AsmJSSimdOperation_##op,
     FORALL_SIMD_ASMJS_OP(ASMJSSIMDOPERATION)
 #undef ASMJSSIMDOPERATION
 };
 
-// An asm.js module represents the collection of functions nested inside a
-// single outer "use asm" function. For example, this asm.js module:
-//   function() { "use asm"; function f() {} function g() {} return f }
-// contains the functions 'f' and 'g'.
-//
-// An asm.js module contains both the jit-code produced by compiling all the
-// functions in the module as well all the data required to perform the
-// link-time validation step in the asm.js spec.
-//
-// NB: this means that AsmJSModule must be GC-safe.
+// An AsmJSModule extends (via containment) a wasm::Module with the extra persistent state
+// necessary to represent a compiled asm.js module.
 class AsmJSModule
 {
   public:
     class Global
     {
       public:
         enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction,
                      AtomicsBuiltinFunction, Constant, SimdCtor, SimdOperation, ByteLength };
         enum VarInitKind { InitConstant, InitImport };
         enum ConstantKind { GlobalConstant, MathConstant };
 
       private:
-        struct Pod {
+        struct CacheablePod {
             Which which_;
             union {
                 struct {
                     uint32_t globalDataOffset_;
                     VarInitKind initKind_;
                     union {
                         wasm::ValType importType_;
                         wasm::Val val_;
@@ -254,562 +240,129 @@ class AsmJSModule
             MOZ_ASSERT(pod.which_ == Constant);
             return pod.u.constant.kind_;
         }
         double constantValue() const {
             MOZ_ASSERT(pod.which_ == Constant);
             return pod.u.constant.value_;
         }
 
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, Global* out) const;
-    };
-
-    // An Exit holds bookkeeping information about an exit; the ExitDatum
-    // struct overlays the actual runtime data stored in the global data
-    // section.
-
-    struct ExitDatum
-    {
-        uint8_t* exit;
-        jit::BaselineScript* baselineScript;
-        HeapPtrFunction fun;
+        WASM_DECLARE_SERIALIZABLE(Global);
     };
 
-    class Exit
+    typedef Vector<Global, 0, SystemAllocPolicy> GlobalVector;
+
+    class Import
     {
-        wasm::MallocSig sig_;
-        struct Pod {
-            unsigned ffiIndex_;
-            unsigned globalDataOffset_;
-            unsigned interpCodeOffset_;
-            unsigned jitCodeOffset_;
-        } pod;
-
+        uint32_t ffiIndex_;
       public:
-        Exit() {}
-        Exit(Exit&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
-        Exit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned globalDataOffset)
-          : sig_(Move(sig))
-        {
-            pod.ffiIndex_ = ffiIndex;
-            pod.globalDataOffset_ = globalDataOffset;
-            pod.interpCodeOffset_ = 0;
-            pod.jitCodeOffset_ = 0;
-        }
-        const wasm::MallocSig& sig() const {
-            return sig_;
-        }
-        unsigned ffiIndex() const {
-            return pod.ffiIndex_;
-        }
-        unsigned globalDataOffset() const {
-            return pod.globalDataOffset_;
-        }
-        void initInterpOffset(unsigned off) {
-            MOZ_ASSERT(!pod.interpCodeOffset_);
-            pod.interpCodeOffset_ = off;
-        }
-        void initJitOffset(unsigned off) {
-            MOZ_ASSERT(!pod.jitCodeOffset_);
-            pod.jitCodeOffset_ = off;
-        }
-        ExitDatum& datum(const AsmJSModule& module) const {
-            return *reinterpret_cast<ExitDatum*>(module.globalData() + pod.globalDataOffset_);
-        }
-        void initDatum(const AsmJSModule& module) const {
-            MOZ_ASSERT(pod.interpCodeOffset_);
-            ExitDatum& d = datum(module);
-            d.exit = module.codeBase() + pod.interpCodeOffset_;
-            d.baselineScript = nullptr;
-            d.fun = nullptr;
-        }
-        bool isOptimized(const AsmJSModule& module) const {
-            return datum(module).exit == module.codeBase() + pod.jitCodeOffset_;
-        }
-        void optimize(const AsmJSModule& module, jit::BaselineScript* baselineScript) const {
-            ExitDatum& d = datum(module);
-            d.exit = module.codeBase() + pod.jitCodeOffset_;
-            d.baselineScript = baselineScript;
-        }
-        void deoptimize(const AsmJSModule& module) const {
-            ExitDatum& d = datum(module);
-            d.exit = module.codeBase() + pod.interpCodeOffset_;
-            d.baselineScript = nullptr;
-        }
-
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, Exit* out) const;
+        Import() = default;
+        explicit Import(uint32_t ffiIndex) : ffiIndex_(ffiIndex) {}
+        uint32_t ffiIndex() const { return ffiIndex_; }
     };
 
-    struct EntryArg {
-        uint64_t lo;
-        uint64_t hi;
-    };
+    typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
 
-    typedef int32_t (*CodePtr)(EntryArg* args, uint8_t* global);
-
-    class ExportedFunction
+    class Export
     {
         PropertyName* name_;
         PropertyName* maybeFieldName_;
-        wasm::MallocSig sig_;
-        struct Pod {
-            bool isChangeHeap_;
-            uint32_t funcIndex_;
-            uint32_t codeOffset_;
+        struct CacheablePod {
+            uint32_t wasmIndex_;
             uint32_t startOffsetInModule_;  // Store module-start-relative offsets
             uint32_t endOffsetInModule_;    // so preserved by serialization.
         } pod;
 
-        friend class AsmJSModule;
-
-        ExportedFunction(PropertyName* name, uint32_t funcIndex,
-                         uint32_t startOffsetInModule, uint32_t endOffsetInModule,
-                         PropertyName* maybeFieldName,
-                         wasm::MallocSig&& sig)
-         : name_(name),
-           maybeFieldName_(maybeFieldName),
-           sig_(Move(sig))
-        {
-            MOZ_ASSERT(name_->isTenured());
-            MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured());
-            mozilla::PodZero(&pod);  // zero padding for Valgrind
-            pod.funcIndex_ = funcIndex;
-            pod.isChangeHeap_ = false;
-            pod.codeOffset_ = UINT32_MAX;
-            pod.startOffsetInModule_ = startOffsetInModule;
-            pod.endOffsetInModule_ = endOffsetInModule;
-        }
-
-        ExportedFunction(PropertyName* name,
-                         uint32_t startOffsetInModule, uint32_t endOffsetInModule,
-                         PropertyName* maybeFieldName)
+      public:
+        Export() {}
+        Export(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex,
+               uint32_t startOffsetInModule, uint32_t endOffsetInModule)
           : name_(name),
             maybeFieldName_(maybeFieldName)
         {
             MOZ_ASSERT(name_->isTenured());
             MOZ_ASSERT_IF(maybeFieldName_, maybeFieldName_->isTenured());
-            mozilla::PodZero(&pod);  // zero padding for Valgrind
-            pod.isChangeHeap_ = true;
+            pod.wasmIndex_ = wasmIndex;
             pod.startOffsetInModule_ = startOffsetInModule;
             pod.endOffsetInModule_ = endOffsetInModule;
         }
 
         void trace(JSTracer* trc) {
             TraceManuallyBarrieredEdge(trc, &name_, "asm.js export name");
             if (maybeFieldName_)
                 TraceManuallyBarrieredEdge(trc, &maybeFieldName_, "asm.js export field");
         }
 
-      public:
-        ExportedFunction() {}
-        ExportedFunction(ExportedFunction&& rhs)
-          : name_(rhs.name_),
-            maybeFieldName_(rhs.maybeFieldName_),
-            sig_(mozilla::Move(rhs.sig_))
-        {
-            mozilla::PodZero(&pod);  // zero padding for Valgrind
-            pod = rhs.pod;
-        }
-
         PropertyName* name() const {
             return name_;
         }
         PropertyName* maybeFieldName() const {
             return maybeFieldName_;
         }
         uint32_t startOffsetInModule() const {
             return pod.startOffsetInModule_;
         }
         uint32_t endOffsetInModule() const {
             return pod.endOffsetInModule_;
         }
+        static const uint32_t ChangeHeap = UINT32_MAX;
         bool isChangeHeap() const {
-            return pod.isChangeHeap_;
-        }
-        uint32_t funcIndex() const {
-            MOZ_ASSERT(!isChangeHeap());
-            return pod.funcIndex_;
-        }
-        void initCodeOffset(unsigned off) {
-            MOZ_ASSERT(!isChangeHeap());
-            MOZ_ASSERT(pod.codeOffset_ == UINT32_MAX);
-            pod.codeOffset_ = off;
-        }
-        const wasm::MallocSig& sig() const {
-            MOZ_ASSERT(!isChangeHeap());
-            return sig_;
-        }
-
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, ExportedFunction* out) const;
-    };
-
-    class CodeRange
-    {
-      protected:
-        uint32_t nameIndex_;
-
-      private:
-        uint32_t lineNumber_;
-        uint32_t begin_;
-        uint32_t profilingReturn_;
-        uint32_t end_;
-        union {
-            struct {
-                uint8_t kind_;
-                uint8_t beginToEntry_;
-                uint8_t profilingJumpToProfilingReturn_;
-                uint8_t profilingEpilogueToProfilingReturn_;
-            } func;
-            struct {
-                uint8_t kind_;
-                uint16_t target_;
-            } thunk;
-            uint8_t kind_;
-        } u;
-
-        void assertValid();
-
-      public:
-        enum Kind { Function, Entry, JitFFI, SlowFFI, Interrupt, Thunk, Inline };
-
-        CodeRange() {}
-        CodeRange(Kind kind, AsmJSOffsets offsets);
-        CodeRange(Kind kind, AsmJSProfilingOffsets offsets);
-        CodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets);
-        CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets);
-
-        Kind kind() const { return Kind(u.kind_); }
-        bool isFunction() const { return kind() == Function; }
-        bool isEntry() const { return kind() == Entry; }
-        bool isFFI() const { return kind() == JitFFI || kind() == SlowFFI; }
-        bool isInterrupt() const { return kind() == Interrupt; }
-        bool isThunk() const { return kind() == Thunk; }
-
-        uint32_t begin() const {
-            return begin_;
-        }
-        uint32_t profilingEntry() const {
-            return begin();
-        }
-        uint32_t entry() const {
-            MOZ_ASSERT(isFunction());
-            return begin_ + u.func.beginToEntry_;
-        }
-        uint32_t end() const {
-            return end_;
-        }
-        uint32_t profilingJump() const {
-            MOZ_ASSERT(isFunction());
-            return profilingReturn_ - u.func.profilingJumpToProfilingReturn_;
+            return pod.wasmIndex_ == ChangeHeap;
         }
-        uint32_t profilingEpilogue() const {
-            MOZ_ASSERT(isFunction());
-            return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_;
-        }
-        uint32_t profilingReturn() const {
-            MOZ_ASSERT(isFunction() || isFFI() || isInterrupt() || isThunk());
-            return profilingReturn_;
-        }
-        void initNameIndex(uint32_t nameIndex) {
-            MOZ_ASSERT(nameIndex_ == UINT32_MAX);
-            nameIndex_ = nameIndex;
-        }
-        uint32_t functionNameIndex() const {
-            MOZ_ASSERT(isFunction());
-            MOZ_ASSERT(nameIndex_ != UINT32_MAX);
-            return nameIndex_;
-        }
-        PropertyName* functionName(const AsmJSModule& module) const {
-            return module.names_[functionNameIndex()].name();
-        }
-        const char* functionProfilingLabel(const AsmJSModule& module) const {
-            MOZ_ASSERT(isFunction());
-            return module.profilingLabels_[nameIndex_].get();
-        }
-        uint32_t functionLineNumber() const {
-            MOZ_ASSERT(isFunction());
-            return lineNumber_;
-        }
-        void functionOffsetBy(uint32_t offset) {
-            MOZ_ASSERT(isFunction());
-            begin_ += offset;
-            profilingReturn_ += offset;
-            end_ += offset;
-        }
-        wasm::Builtin thunkTarget() const {
-            MOZ_ASSERT(isThunk());
-            return wasm::Builtin(u.thunk.target_);
-        }
-    };
-
-    class Name
-    {
-        PropertyName* name_;
-      public:
-        Name() : name_(nullptr) {}
-        MOZ_IMPLICIT Name(PropertyName* name) : name_(name) {}
-        PropertyName* name() const { return name_; }
-        PropertyName*& name() { return name_; }
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, Name* out) const;
-    };
-
-    typedef mozilla::UniquePtr<char[], JS::FreePolicy> ProfilingLabel;
-
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    // Function information to add to the VTune JIT profiler following linking.
-    struct ProfiledFunction
-    {
-        PropertyName* name;
-        struct Pod {
-            unsigned startCodeOffset;
-            unsigned endCodeOffset;
-            unsigned lineno;
-            unsigned columnIndex;
-        } pod;
-
-        explicit ProfiledFunction()
-          : name(nullptr)
-        { }
-
-        ProfiledFunction(PropertyName* name, unsigned start, unsigned end,
-                         unsigned line = 0, unsigned column = 0)
-          : name(name)
-        {
-            MOZ_ASSERT(name->isTenured());
-
-            pod.startCodeOffset = start;
-            pod.endCodeOffset = end;
-            pod.lineno = line;
-            pod.columnIndex = column;
-        }
-
-        void trace(JSTracer* trc) {
-            if (name)
-                TraceManuallyBarrieredEdge(trc, &name, "asm.js profiled function name");
+        uint32_t wasmIndex() const {
+            MOZ_ASSERT(!isChangeHeap());
+            return pod.wasmIndex_;
         }
 
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-    };
-#endif
-
-    struct RelativeLink
-    {
-        enum Kind
-        {
-            RawPointer,
-            CodeLabel,
-            InstructionImmediate
-        };
-
-        RelativeLink()
-        { }
-
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-        // On MIPS, CodeLabels are instruction immediates so RelativeLinks only
-        // patch instruction immediates.
-        explicit RelativeLink(Kind kind) {
-            MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
-        }
-        bool isRawPointerPatch() {
-            return false;
-        }
-#else
-        // On the rest, CodeLabels are raw pointers so RelativeLinks only patch
-        // raw pointers.
-        explicit RelativeLink(Kind kind) {
-            MOZ_ASSERT(kind == CodeLabel || kind == RawPointer);
-        }
-        bool isRawPointerPatch() {
-            return true;
-        }
-#endif
-
-        uint32_t patchAtOffset;
-        uint32_t targetOffset;
-    };
-
-    typedef Vector<RelativeLink, 0, SystemAllocPolicy> RelativeLinkVector;
-
-    typedef mozilla::EnumeratedArray<wasm::Builtin,
-                                     wasm::Builtin::Limit,
-                                     uint32_t> BuiltinThunkOffsetArray;
-
-    typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
-    typedef mozilla::EnumeratedArray<wasm::SymbolicAddress,
-                                     wasm::SymbolicAddress::Limit,
-                                     OffsetVector> OffsetVectorArray;
-
-    struct AbsoluteLinkArray : public OffsetVectorArray
-    {
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const;
-
-        size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+        WASM_DECLARE_SERIALIZABLE(Export)
     };
 
-    class FuncPtrTable
-    {
-        struct Pod {
-            uint32_t globalDataOffset_;
-        } pod;
-        OffsetVector elemOffsets_;
-
-      public:
-        FuncPtrTable() {}
-        FuncPtrTable(FuncPtrTable&& rhs) : pod(rhs.pod), elemOffsets_(Move(rhs.elemOffsets_)) {}
-        explicit FuncPtrTable(uint32_t globalDataOffset) { pod.globalDataOffset_ = globalDataOffset; }
-        void define(OffsetVector&& elemOffsets) { elemOffsets_ = Move(elemOffsets); }
-        uint32_t globalDataOffset() const { return pod.globalDataOffset_; }
-        const OffsetVector& elemOffsets() const { return elemOffsets_; }
-
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, FuncPtrTable* out) const;
-
-        size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
-    };
-
-    typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
+    typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
 
-    // Static-link data is used to patch a module either after it has been
-    // compiled or deserialized with various absolute addresses (of code or
-    // data in the process) or relative addresses (of code or data in the same
-    // AsmJSModule).
-    struct StaticLinkData
-    {
-        StaticLinkData() { mozilla::PodZero(&pod); }
-
-        struct Pod {
-            uint32_t interruptExitOffset;
-            uint32_t outOfBoundsExitOffset;
-            BuiltinThunkOffsetArray builtinThunkOffsets;
-        } pod;
-
-        RelativeLinkVector relativeLinks;
-        AbsoluteLinkArray absoluteLinks;
-        FuncPtrTableVector funcPtrTables;
-
-        size_t serializedSize() const;
-        uint8_t* serialize(uint8_t* cursor) const;
-        const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-        bool clone(ExclusiveContext* cx, StaticLinkData* out) const;
-
-        size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
-    };
+    typedef JS::UniquePtr<wasm::Module, JS::DeletePolicy<wasm::Module>> UniqueWasmModule;
 
   private:
-    struct Pod {
-        uint32_t                          functionBytes_;
-        uint32_t                          codeBytes_;
-        uint32_t                          globalBytes_;
-        uint32_t                          totalBytes_;
-        uint32_t                          minHeapLength_;
-        uint32_t                          maxHeapLength_;
-        uint32_t                          heapLengthMask_;
-        uint32_t                          numFFIs_;
-        uint32_t                          srcLength_;
-        uint32_t                          srcLengthWithRightBrace_;
-        bool                              strict_;
-        bool                              hasArrayView_;
-        bool                              isSharedView_;
-        bool                              hasFixedMinHeapLength_;
-        bool                              canUseSignalHandlers_;
+    UniqueWasmModule            wasm_;
+    wasm::UniqueStaticLinkData  linkData_;
+    struct CacheablePod {
+        uint32_t                minHeapLength_;
+        uint32_t                maxHeapLength_;
+        uint32_t                heapLengthMask_;
+        uint32_t                numFFIs_;
+        uint32_t                srcLength_;
+        uint32_t                srcLengthWithRightBrace_;
+        bool                    strict_;
+        bool                    hasArrayView_;
+        bool                    isSharedView_;
+        bool                    hasFixedMinHeapLength_;
     } pod;
-
-    // These two fields need to be kept out pod as they depend on the position
-    // of the module within the ScriptSource and thus aren't invariant with
-    // respect to caching.
-    const uint32_t                        srcStart_;
-    const uint32_t                        srcBodyStart_;
-
-    Vector<Global,                 0, SystemAllocPolicy> globals_;
-    Vector<Exit,                   0, SystemAllocPolicy> exits_;
-    Vector<ExportedFunction,       0, SystemAllocPolicy> exports_;
-    Vector<wasm::CallSite,         0, SystemAllocPolicy> callSites_;
-    Vector<CodeRange,              0, SystemAllocPolicy> codeRanges_;
-    Vector<Name,                   0, SystemAllocPolicy> names_;
-    Vector<ProfilingLabel,         0, SystemAllocPolicy> profilingLabels_;
-    Vector<wasm::HeapAccess,       0, SystemAllocPolicy> heapAccesses_;
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    Vector<ProfiledFunction,       0, SystemAllocPolicy> profiledFunctions_;
-#endif
-
-    ScriptSource *                        scriptSource_;
-    PropertyName *                        globalArgumentName_;
-    PropertyName *                        importArgumentName_;
-    PropertyName *                        bufferArgumentName_;
-    uint8_t *                             code_;
-    uint8_t *                             interruptExit_;
-    uint8_t *                             outOfBoundsExit_;
-    StaticLinkData                        staticLinkData_;
-    RelocatablePtrArrayBufferObjectMaybeShared maybeHeap_;
-    AsmJSModule **                        prevLinked_;
-    AsmJSModule *                         nextLinked_;
-    bool                                  dynamicallyLinked_;
-    bool                                  loadedFromCache_;
-    bool                                  profilingEnabled_;
-    bool                                  interrupted_;
-
-    void restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer);
-    void restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer, uint8_t* prevCode,
-                               ExclusiveContext* cx);
+    const ScriptSourceHolder    scriptSource_;
+    const uint32_t              srcStart_;
+    const uint32_t              srcBodyStart_;
+    GlobalVector                globals_;
+    ImportVector                imports_;
+    ExportVector                exports_;
+    PropertyName*               globalArgumentName_;
+    PropertyName*               importArgumentName_;
+    PropertyName*               bufferArgumentName_;
 
   public:
     explicit AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t srcBodyStart,
-                         bool strict, bool canUseSignalHandlers);
+                         bool strict);
     void trace(JSTracer* trc);
-    ~AsmJSModule();
-
-    // An AsmJSModule transitions from !finished to finished to dynamically linked.
-    bool isFinished() const { return !!code_; }
-    bool isDynamicallyLinked() const { return dynamicallyLinked_; }
 
     /*************************************************************************/
     // These functions may be used as soon as the module is constructed:
 
     ScriptSource* scriptSource() const {
-        MOZ_ASSERT(scriptSource_);
-        return scriptSource_;
+        return scriptSource_.get();
     }
     bool strict() const {
         return pod.strict_;
     }
-    bool canUseSignalHandlers() const {
-        return pod.canUseSignalHandlers_;
-    }
-    bool usesSignalHandlersForInterrupt() const {
-        return pod.canUseSignalHandlers_;
-    }
-    bool usesSignalHandlersForOOB() const {
-#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
-        return pod.canUseSignalHandlers_;
-#else
-        return false;
-#endif
-    }
-    bool loadedFromCache() const {
-        return loadedFromCache_;
-    }
 
     // srcStart() refers to the offset in the ScriptSource to the beginning of
     // the asm.js module function. If the function has been created with the
     // Function constructor, this will be the first character in the function
     // source. Otherwise, it will be the opening parenthesis of the arguments
     // list.
     uint32_t srcStart() const {
         return srcStart_;
@@ -829,24 +382,16 @@ class AsmJSModule
     uint32_t maxHeapLength() const {
         return pod.maxHeapLength_;
     }
     uint32_t heapLengthMask() const {
         MOZ_ASSERT(pod.hasFixedMinHeapLength_);
         return pod.heapLengthMask_;
     }
 
-    // about:memory reporting
-    void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
-                       size_t* asmJSModuleData);
-
-    /*************************************************************************/
-    // These functions build the global scope of the module while parsing the
-    // module prologue (before the function bodies):
-
     void initGlobalArgumentName(PropertyName* n) {
         MOZ_ASSERT(!isFinished());
         MOZ_ASSERT_IF(n, n->isTenured());
         globalArgumentName_ = n;
     }
     void initImportArgumentName(PropertyName* n) {
         MOZ_ASSERT(!isFinished());
         MOZ_ASSERT_IF(n, n->isTenured());
@@ -862,69 +407,30 @@ class AsmJSModule
     }
     PropertyName* importArgumentName() const {
         return importArgumentName_;
     }
     PropertyName* bufferArgumentName() const {
         return bufferArgumentName_;
     }
 
-    /*************************************************************************/
-    // These functions may only be called before finish():
-
-  private:
-    bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset) {
-        MOZ_ASSERT(!isFinished());
-        uint32_t pad = ComputeByteAlignment(pod.globalBytes_, align);
-        if (UINT32_MAX - pod.globalBytes_ < pad + bytes)
-            return false;
-        pod.globalBytes_ += pad;
-        *globalDataOffset = pod.globalBytes_;
-        pod.globalBytes_ += bytes;
-        return true;
-    }
-    bool addGlobalVar(wasm::ValType type, uint32_t* globalDataOffset) {
+    bool addGlobalVarInit(const wasm::Val& v, uint32_t globalDataOffset) {
         MOZ_ASSERT(!isFinished());
-        unsigned width = 0;
-        switch (type) {
-          case wasm::ValType::I32:
-          case wasm::ValType::F32:
-            width = 4;
-            break;
-          case wasm::ValType::I64:
-          case wasm::ValType::F64:
-            width = 8;
-            break;
-          case wasm::ValType::I32x4:
-          case wasm::ValType::F32x4:
-          case wasm::ValType::B32x4:
-            width = 16;
-            break;
-        }
-        return allocateGlobalBytes(width, width, globalDataOffset);
-    }
-  public:
-    bool addGlobalVarInit(const wasm::Val& v, uint32_t* globalDataOffset) {
-        MOZ_ASSERT(!isFinished());
-        if (!addGlobalVar(v.type(), globalDataOffset))
-            return false;
         Global g(Global::Variable, nullptr);
         g.pod.u.var.initKind_ = Global::InitConstant;
         g.pod.u.var.u.val_ = v;
-        g.pod.u.var.globalDataOffset_ = *globalDataOffset;
+        g.pod.u.var.globalDataOffset_ = globalDataOffset;
         return globals_.append(g);
     }
-    bool addGlobalVarImport(PropertyName* name, wasm::ValType importType, uint32_t* globalDataOffset) {
+    bool addGlobalVarImport(PropertyName* name, wasm::ValType importType, uint32_t globalDataOffset) {
         MOZ_ASSERT(!isFinished());
-        if (!addGlobalVar(importType, globalDataOffset))
-            return false;
         Global g(Global::Variable, name);
         g.pod.u.var.initKind_ = Global::InitImport;
         g.pod.u.var.u.importType_ = importType;
-        g.pod.u.var.globalDataOffset_ = *globalDataOffset;
+        g.pod.u.var.globalDataOffset_ = globalDataOffset;
         return globals_.append(g);
     }
     bool addFFI(PropertyName* field, uint32_t* ffiIndex) {
         MOZ_ASSERT(!isFinished());
         if (pod.numFFIs_ == UINT32_MAX)
             return false;
         Global g(Global::FFI, field);
         g.pod.u.ffiIndex_ = *ffiIndex = pod.numFFIs_++;
@@ -985,416 +491,153 @@ class AsmJSModule
     }
     bool addGlobalConstant(double value, PropertyName* name) {
         MOZ_ASSERT(!isFinished());
         Global g(Global::Constant, name);
         g.pod.u.constant.value_ = value;
         g.pod.u.constant.kind_ = Global::GlobalConstant;
         return globals_.append(g);
     }
-    unsigned numGlobals() const {
-        return globals_.length();
-    }
-    Global& global(unsigned i) {
-        return globals_[i];
-    }
-    void setViewsAreShared() {
-        if (pod.hasArrayView_)
-            pod.isSharedView_ = true;
+    bool addImport(uint32_t ffiIndex, uint32_t importIndex) {
+        MOZ_ASSERT(imports_.length() == importIndex);
+        return imports_.emplaceBack(ffiIndex);
     }
-
-    /*************************************************************************/
-    // These functions are called while parsing/compiling function bodies:
-
-    bool hasArrayView() const {
-        return pod.hasArrayView_;
+    bool addExport(PropertyName* name, PropertyName* maybeFieldName, uint32_t wasmIndex,
+                   uint32_t funcSrcBegin, uint32_t funcSrcEnd)
+    {
+        // NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource
+        // (the entire file) and ExportedFunctions store offsets relative to
+        // the beginning of the module (so that they are caching-invariant).
+        MOZ_ASSERT(!isFinished());
+        MOZ_ASSERT(srcStart_ < funcSrcBegin);
+        MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
+        return exports_.emplaceBack(name, maybeFieldName, wasmIndex,
+                                    funcSrcBegin - srcStart_, funcSrcEnd - srcStart_);
     }
-    bool isSharedView() const {
-        MOZ_ASSERT(pod.hasArrayView_);
-        return pod.isSharedView_;
-    }
-    void addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
+    bool addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
         MOZ_ASSERT(!isFinished());
         MOZ_ASSERT(!pod.hasFixedMinHeapLength_);
         MOZ_ASSERT(IsValidAsmJSHeapLength(mask + 1));
         MOZ_ASSERT(min >= RoundUpToNextValidAsmJSHeapLength(0));
         MOZ_ASSERT(max <= pod.maxHeapLength_);
         MOZ_ASSERT(min <= max);
         pod.heapLengthMask_ = mask;
         pod.minHeapLength_ = min;
         pod.maxHeapLength_ = max;
         pod.hasFixedMinHeapLength_ = true;
+        return true;
+    }
+
+    const GlobalVector& globals() const {
+        return globals_;
+    }
+    const ImportVector& imports() const {
+        return imports_;
+    }
+    const ExportVector& exports() const {
+        return exports_;
+    }
+
+    void setViewsAreShared() {
+        if (pod.hasArrayView_)
+            pod.isSharedView_ = true;
+    }
+    bool hasArrayView() const {
+        return pod.hasArrayView_;
+    }
+    bool isSharedView() const {
+        return pod.isSharedView_;
     }
     bool tryRequireHeapLengthToBeAtLeast(uint32_t len) {
         MOZ_ASSERT(!isFinished());
         if (pod.hasFixedMinHeapLength_ && len > pod.minHeapLength_)
             return false;
         if (len > pod.maxHeapLength_)
             return false;
         len = RoundUpToNextValidAsmJSHeapLength(len);
         if (len > pod.minHeapLength_)
             pod.minHeapLength_ = len;
         return true;
     }
-    bool addCodeRange(CodeRange::Kind kind, AsmJSOffsets offsets) {
-        return codeRanges_.append(CodeRange(kind, offsets));
-    }
-    bool addCodeRange(CodeRange::Kind kind, AsmJSProfilingOffsets offsets) {
-        return codeRanges_.append(CodeRange(kind, offsets));
-    }
-    bool addFunctionCodeRange(PropertyName* name, CodeRange codeRange) {
-        MOZ_ASSERT(!isFinished());
-        MOZ_ASSERT(name->isTenured());
-        if (names_.length() >= UINT32_MAX)
-            return false;
-        codeRange.initNameIndex(names_.length());
-        return names_.append(name) && codeRanges_.append(codeRange);
-    }
-    bool addBuiltinThunkCodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets) {
-        MOZ_ASSERT(staticLinkData_.pod.builtinThunkOffsets[builtin] == 0);
-        staticLinkData_.pod.builtinThunkOffsets[builtin] = offsets.begin;
-        return codeRanges_.append(CodeRange(builtin, offsets));
-    }
-    bool addExit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex) {
-        MOZ_ASSERT(!isFinished());
-        static_assert(sizeof(ExitDatum) % sizeof(void*) == 0, "word aligned");
-        uint32_t globalDataOffset;
-        if (!allocateGlobalBytes(sizeof(ExitDatum), sizeof(void*), &globalDataOffset))
-            return false;
-        *exitIndex = unsigned(exits_.length());
-        return exits_.append(Exit(Move(sig), ffiIndex, globalDataOffset));
-    }
-    unsigned numExits() const {
-        return exits_.length();
-    }
-    Exit& exit(unsigned i) {
-        return exits_[i];
-    }
-    const Exit& exit(unsigned i) const {
-        return exits_[i];
-    }
-    bool declareFuncPtrTable(unsigned numElems, uint32_t* funcPtrTableIndex) {
-        MOZ_ASSERT(!isFinished());
-        MOZ_ASSERT(IsPowerOfTwo(numElems));
-        uint32_t globalDataOffset;
-        if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset))
-            return false;
-        *funcPtrTableIndex = staticLinkData_.funcPtrTables.length();
-        return staticLinkData_.funcPtrTables.append(FuncPtrTable(globalDataOffset));
-    }
-    FuncPtrTable& funcPtrTable(uint32_t funcPtrTableIndex) {
-        return staticLinkData_.funcPtrTables[funcPtrTableIndex];
-    }
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    bool addProfiledFunction(ProfiledFunction func) {
-        MOZ_ASSERT(!isFinished());
-        return profiledFunctions_.append(func);
-    }
-    unsigned numProfiledFunctions() const {
-        return profiledFunctions_.length();
-    }
-    ProfiledFunction& profiledFunction(unsigned i) {
-        return profiledFunctions_[i];
-    }
-#endif
-
-    bool addExportedFunction(PropertyName* name,
-                             uint32_t funcIndex,
-                             uint32_t funcSrcBegin,
-                             uint32_t funcSrcEnd,
-                             PropertyName* maybeFieldName,
-                             wasm::MallocSig&& sig)
-    {
-        // NB: funcSrcBegin/funcSrcEnd are given relative to the ScriptSource
-        // (the entire file) and ExportedFunctions store offsets relative to
-        // the beginning of the module (so that they are caching-invariant).
-        MOZ_ASSERT(!isFinished());
-        MOZ_ASSERT(srcStart_ < funcSrcBegin);
-        MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
-        ExportedFunction func(name, funcIndex, funcSrcBegin - srcStart_, funcSrcEnd - srcStart_,
-                              maybeFieldName, mozilla::Move(sig));
-        return exports_.length() < UINT32_MAX && exports_.append(mozilla::Move(func));
-    }
-    bool addExportedChangeHeap(PropertyName* name,
-                               uint32_t funcSrcBegin,
-                               uint32_t funcSrcEnd,
-                               PropertyName* maybeFieldName)
-    {
-        // See addExportedFunction.
-        MOZ_ASSERT(!isFinished());
-        MOZ_ASSERT(srcStart_ < funcSrcBegin);
-        MOZ_ASSERT(funcSrcBegin < funcSrcEnd);
-        ExportedFunction func(name, funcSrcBegin - srcStart_, funcSrcEnd - srcStart_,
-                              maybeFieldName);
-        return exports_.length() < UINT32_MAX && exports_.append(mozilla::Move(func));
-    }
-    unsigned numExportedFunctions() const {
-        return exports_.length();
-    }
-    const ExportedFunction& exportedFunction(unsigned i) const {
-        return exports_[i];
-    }
-    ExportedFunction& exportedFunction(unsigned i) {
-        return exports_[i];
-    }
-    void setAsyncInterruptOffset(uint32_t o) {
-        staticLinkData_.pod.interruptExitOffset = o;
-    }
-    void setOnOutOfBoundsExitOffset(uint32_t o) {
-        staticLinkData_.pod.outOfBoundsExitOffset = o;
-    }
 
     /*************************************************************************/
+    // A module isFinished() when compilation completes. After being finished,
+    // a module must be statically and dynamically linked before execution.
 
-    // finish() is called once the entire module has been parsed (via
-    // tokenStream) and all function and entry/exit trampolines have been
-    // generated (via masm). After this function, the module must still be
-    // statically and dynamically linked before code can be run.
-    bool finish(ExclusiveContext* cx, frontend::TokenStream& ts, jit::MacroAssembler& masm);
+    bool isFinished() const {
+        return !!wasm_;
+    }
+    void finish(wasm::Module* wasm, wasm::UniqueStaticLinkData linkData,
+                uint32_t endBeforeCurly, uint32_t endAfterCurly);
 
     /*************************************************************************/
-    // These accessor functions can be used after finish():
-
-    uint8_t* codeBase() const {
-        MOZ_ASSERT(isFinished());
-        MOZ_ASSERT(uintptr_t(code_) % AsmJSPageSize == 0);
-        return code_;
-    }
-    uint32_t codeBytes() const {
-        MOZ_ASSERT(isFinished());
-        return pod.codeBytes_;
-    }
-    bool containsCodePC(void* pc) const {
-        MOZ_ASSERT(isFinished());
-        return pc >= code_ && pc < (code_ + codeBytes());
-    }
+    // These accessor functions can only be used after finish():
 
-    // The range [0, functionBytes) is a subrange of [0, codeBytes) that
-    // contains only function body code, not the stub code. This distinction is
-    // used by the async interrupt handler to only interrupt when the pc is in
-    // function code which, in turn, simplifies reasoning about how stubs
-    // enter/exit.
-    void setFunctionBytes(uint32_t functionBytes) {
-        MOZ_ASSERT(!isFinished());
-        MOZ_ASSERT(!pod.functionBytes_);
-        pod.functionBytes_ = functionBytes;
-    }
-    uint32_t functionBytes() const {
+    wasm::Module& wasm() const {
         MOZ_ASSERT(isFinished());
-        return pod.functionBytes_;
-    }
-    bool containsFunctionPC(void* pc) const {
-        MOZ_ASSERT(isFinished());
-        return pc >= code_ && pc < (code_ + functionBytes());
+        return *wasm_;
     }
-
-    uint32_t globalBytes() const {
-        MOZ_ASSERT(isFinished());
-        return pod.globalBytes_;
-    }
-
-    unsigned numFFIs() const {
+    uint32_t numFFIs() const {
         MOZ_ASSERT(isFinished());
         return pod.numFFIs_;
     }
     uint32_t srcEndBeforeCurly() const {
         MOZ_ASSERT(isFinished());
         return srcStart_ + pod.srcLength_;
     }
     uint32_t srcEndAfterCurly() const {
         MOZ_ASSERT(isFinished());
         return srcStart_ + pod.srcLengthWithRightBrace_;
     }
-
-    // Lookup a callsite by the return pc (from the callee to the caller).
-    // Return null if no callsite was found.
-    const wasm::CallSite* lookupCallSite(void* returnAddress) const;
-
-    // Lookup the name the code range containing the given pc. Return null if no
-    // code range was found.
-    const CodeRange* lookupCodeRange(void* pc) const;
-
-    // Lookup a heap access site by the pc which performs the access. Return
-    // null if no heap access was found.
-    const wasm::HeapAccess* lookupHeapAccess(void* pc) const;
-
-    // The global data section is placed after the executable code (i.e., at
-    // offset codeBytes_) in the module's linear allocation. The global data
-    // starts with some fixed allocations followed by interleaved global,
-    // function-pointer table and exit allocations.
-    uint32_t offsetOfGlobalData() const {
-        MOZ_ASSERT(isFinished());
-        return pod.codeBytes_;
-    }
-    uint8_t* globalData() const {
-        MOZ_ASSERT(isFinished());
-        return codeBase() + offsetOfGlobalData();
-    }
-    static void assertGlobalDataOffsets() {
-        static_assert(wasm::ActivationGlobalDataOffset == 0,
-                      "an AsmJSActivation* data goes first");
-        static_assert(wasm::HeapGlobalDataOffset == wasm::ActivationGlobalDataOffset + sizeof(void*),
-                      "then a pointer to the heap*");
-        static_assert(wasm::NaN64GlobalDataOffset == wasm::HeapGlobalDataOffset + sizeof(uint8_t*),
-                      "then a 64-bit NaN");
-        static_assert(wasm::NaN32GlobalDataOffset == wasm::NaN64GlobalDataOffset + sizeof(double),
-                      "then a 32-bit NaN");
-        static_assert(sInitialGlobalDataBytes == wasm::NaN32GlobalDataOffset + sizeof(float),
-                      "then all the normal global data (globals, exits, func-ptr-tables)");
-    }
-    static const uint32_t sInitialGlobalDataBytes = wasm::NaN32GlobalDataOffset + sizeof(float);
-
-    AsmJSActivation*& activation() const {
-        MOZ_ASSERT(isFinished());
-        return *(AsmJSActivation**)(globalData() + wasm::ActivationGlobalDataOffset);
-    }
-    bool active() const {
-        return activation() != nullptr;
-    }
-  private:
-    // The pointer may reference shared memory, use with care.
-    // Generally you want to use maybeHeap(), not heapDatum().
-    uint8_t*& heapDatum() const {
-        MOZ_ASSERT(isFinished());
-        return *(uint8_t**)(globalData() + wasm::HeapGlobalDataOffset);
-    }
-  public:
-
-    /*************************************************************************/
-    // These functions are called after finish() but before staticallyLink():
-
-    bool addRelativeLink(RelativeLink link) {
-        MOZ_ASSERT(isFinished());
-        return staticLinkData_.relativeLinks.append(link);
+    bool staticallyLink(ExclusiveContext* cx) {
+        return wasm_->staticallyLink(cx, *linkData_);
     }
 
-    // A module is serialized after it is finished but before it is statically
-    // linked. (Technically, it could be serialized after static linking, but it
-    // would still need to be statically linked on deserialization.)
+    // See WASM_DECLARE_SERIALIZABLE.
     size_t serializedSize() const;
     uint8_t* serialize(uint8_t* cursor) const;
     const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
-
-    // Additionally, this function is called to flush the i-cache after
-    // deserialization and cloning (but still before static linking, to prevent
-    // a bunch of expensive micro-flushes).
-    void setAutoFlushICacheRange();
-
-    /*************************************************************************/
-
-    // After a module is finished compiling or deserializing, it is "statically
-    // linked" which specializes the code to its current address (this allows
-    // code to be relocated between serialization and deserialization).
-    void staticallyLink(ExclusiveContext* cx);
-
-    // After a module is statically linked, it is "dynamically linked" which
-    // specializes it to a particular set of arguments. In particular, this
-    // binds the code to a particular heap (via initHeap) and set of global
-    // variables. A given asm.js module cannot be dynamically linked more than
-    // once so, if JS tries, the module is cloned. When linked, an asm.js module
-    // is kept in a list so that it can be updated if the linked buffer is
-    // detached.
-    void setIsDynamicallyLinked(JSRuntime* rt) {
-        MOZ_ASSERT(isFinished());
-        MOZ_ASSERT(!isDynamicallyLinked());
-        dynamicallyLinked_ = true;
-        nextLinked_ = rt->linkedAsmJSModules;
-        prevLinked_ = &rt->linkedAsmJSModules;
-        if (nextLinked_)
-            nextLinked_->prevLinked_ = &nextLinked_;
-        rt->linkedAsmJSModules = this;
-        MOZ_ASSERT(isDynamicallyLinked());
-    }
-
-    void initHeap(Handle<ArrayBufferObjectMaybeShared*> heap, JSContext* cx);
-    bool changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx);
-    bool detachHeap(JSContext* cx);
-
-    bool clone(JSContext* cx, ScopedJSDeletePtr<AsmJSModule>* moduleOut) const;
-
-    /*************************************************************************/
-    // Functions that can be called after dynamic linking succeeds:
-
-    AsmJSModule* nextLinked() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return nextLinked_;
-    }
-    bool hasDetachedHeap() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return hasArrayView() && !heapDatum();
-    }
-    CodePtr entryTrampoline(const ExportedFunction& func) const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        MOZ_ASSERT(!func.isChangeHeap());
-        return JS_DATA_TO_FUNC_PTR(CodePtr, code_ + func.pod.codeOffset_);
-    }
-    uint8_t* interruptExit() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return interruptExit_;
-    }
-    uint8_t* outOfBoundsExit() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return outOfBoundsExit_;
-    }
-    SharedMem<uint8_t*> maybeHeap() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return hasArrayView() && isSharedView() ? SharedMem<uint8_t*>::shared(heapDatum())
-            : SharedMem<uint8_t*>::unshared(heapDatum());
-    }
-    ArrayBufferObjectMaybeShared* maybeHeapBufferObject() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return maybeHeap_;
-    }
-    size_t heapLength() const;
-    bool profilingEnabled() const {
-        MOZ_ASSERT(isDynamicallyLinked());
-        return profilingEnabled_;
-    }
-    void setProfilingEnabled(bool enabled, JSContext* cx);
-    void setInterrupted(bool interrupted) {
-        MOZ_ASSERT(isDynamicallyLinked());
-        interrupted_ = interrupted;
-    }
+    bool clone(JSContext* cx, HandleAsmJSModule moduleObj) const;
+    void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
+                       size_t* asmJSModuleData);
 };
 
 // Store the just-parsed module in the cache using AsmJSCacheOps.
 extern JS::AsmJSCacheResult
 StoreAsmJSModuleInCache(AsmJSParser& parser,
                         const AsmJSModule& module,
                         ExclusiveContext* cx);
 
 // Attempt to load the asm.js module that is about to be parsed from the cache
-// using AsmJSCacheOps. On cache hit, *module will be non-null. Note: the
-// return value indicates whether or not an error was encountered, not whether
-// there was a cache hit.
+// using AsmJSCacheOps. The return value indicates whether an error was
+// reported. The loadedFromCache outparam indicates whether the module was
+// successfully loaded and stored in moduleObj.extern bool
 extern bool
-LookupAsmJSModuleInCache(ExclusiveContext* cx,
-                         AsmJSParser& parser,
-                         ScopedJSDeletePtr<AsmJSModule>* module,
-                         ScopedJSFreePtr<char>* compilationTimeReport);
+LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, HandleAsmJSModule moduleObj,
+                         bool* loadedFromCache, UniqueChars* compilationTimeReport);
 
 // This function must be called for every detached ArrayBuffer.
 extern bool
 OnDetachAsmJSArrayBuffer(JSContext* cx, Handle<ArrayBufferObject*> buffer);
 
 // An AsmJSModuleObject is an internal implementation object (i.e., not exposed
 // directly to user script) which manages the lifetime of an AsmJSModule. A
 // JSObject is necessary since we want LinkAsmJS/CallAsmJS JSFunctions to be
 // able to point to their module via their extended slots.
 class AsmJSModuleObject : public NativeObject
 {
     static const unsigned MODULE_SLOT = 0;
 
   public:
     static const unsigned RESERVED_SLOTS = 1;
 
-    // On success, return an AsmJSModuleClass JSObject that has taken ownership
-    // (and release()ed) the given module.
-    static AsmJSModuleObject* create(ExclusiveContext* cx, ScopedJSDeletePtr<AsmJSModule>* module);
+    static AsmJSModuleObject* create(ExclusiveContext* cx);
 
+    bool hasModule() const;
+    void setModule(AsmJSModule* module);
     AsmJSModule& module() const;
 
     void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode,
                        size_t* asmJSModuleData) {
         module().addSizeOfMisc(mallocSizeOf, asmJSModuleCode, asmJSModuleData);
     }
 
     static const Class class_;
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -14,17 +14,16 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/AsmJSValidate.h"
 
 #include "mozilla/Move.h"
-#include "mozilla/UniquePtr.h"
 
 #include "jsmath.h"
 #include "jsprf.h"
 #include "jsutil.h"
 
 #include "asmjs/AsmJSLink.h"
 #include "asmjs/AsmJSModule.h"
 #include "asmjs/WasmGenerator.h"
@@ -44,17 +43,16 @@ using namespace js::frontend;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::HashGeneric;
 using mozilla::IsNaN;
 using mozilla::IsNegativeZero;
 using mozilla::Move;
 using mozilla::PositiveInfinity;
-using mozilla::UniquePtr;
 using JS::AsmJSOption;
 using JS::GenericNaN;
 
 /*****************************************************************************/
 // ParseNode utilities
 
 static inline ParseNode*
 NextNode(ParseNode* pn)
@@ -1159,23 +1157,23 @@ class MOZ_STACK_CLASS ModuleValidator
         ArrayView(PropertyName* name, Scalar::Type type)
           : name(name), type(type)
         {}
 
         PropertyName* name;
         Scalar::Type type;
     };
 
-    class ExitDescriptor
+    class ImportDescriptor
     {
         PropertyName* name_;
         const LifoSig* sig_;
 
       public:
-        ExitDescriptor(PropertyName* name, const LifoSig& sig)
+        ImportDescriptor(PropertyName* name, const LifoSig& sig)
           : name_(name), sig_(&sig)
         {}
 
         PropertyName* name() const {
             return name_;
         }
         const LifoSig& sig() const {
             return *sig_;
@@ -1184,71 +1182,72 @@ class MOZ_STACK_CLASS ModuleValidator
         struct Lookup {  // implements HashPolicy
             PropertyName* name_;
             const MallocSig& sig_;
             Lookup(PropertyName* name, const MallocSig& sig) : name_(name), sig_(sig) {}
         };
         static HashNumber hash(const Lookup& l) {
             return HashGeneric(l.name_, l.sig_.hash());
         }
-        static bool match(const ExitDescriptor& lhs, const Lookup& rhs) {
+        static bool match(const ImportDescriptor& lhs, const Lookup& rhs) {
             return lhs.name_ == rhs.name_ && *lhs.sig_ == rhs.sig_;
         }
     };
 
   private:
     typedef HashMap<PropertyName*, Global*> GlobalMap;
     typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
     typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
     typedef HashMap<PropertyName*, AsmJSSimdOperation> SimdOperationNameMap;
     typedef Vector<ArrayView> ArrayViewVector;
 
   public:
-    typedef HashMap<ExitDescriptor, unsigned, ExitDescriptor> ExitMap;
+    typedef HashMap<ImportDescriptor, unsigned, ImportDescriptor> ImportMap;
 
   private:
-    ExclusiveContext*                       cx_;
-    AsmJSParser&                            parser_;
-
-    ModuleGenerator                         mg_;
-
-    LifoAlloc                               validationLifo_;
-    FuncVector                              functions_;
-    FuncPtrTableVector                      funcPtrTables_;
-    GlobalMap                               globals_;
-    ArrayViewVector                         arrayViews_;
-    ExitMap                                 exits_;
-
-    MathNameMap                             standardLibraryMathNames_;
-    AtomicsNameMap                          standardLibraryAtomicsNames_;
-    SimdOperationNameMap                    standardLibrarySimdOpNames_;
-
-    ParseNode*                              moduleFunctionNode_;
-    PropertyName*                           moduleFunctionName_;
-
-    UniquePtr<char[], JS::FreePolicy>       errorString_;
-    uint32_t                                errorOffset_;
-    bool                                    errorOverRecursed_;
-
-    bool                                    canValidateChangeHeap_;
-    bool                                    hasChangeHeap_;
-    bool                                    supportsSimd_;
-    bool                                    atomicsPresent_;
+    ExclusiveContext*    cx_;
+    AsmJSParser&         parser_;
+
+    ModuleGenerator      mg_;
+    AsmJSModule*         module_;
+
+    LifoAlloc            validationLifo_;
+    FuncVector           functions_;
+    FuncPtrTableVector   funcPtrTables_;
+    GlobalMap            globals_;
+    ArrayViewVector      arrayViews_;
+    ImportMap            imports_;
+
+    MathNameMap          standardLibraryMathNames_;
+    AtomicsNameMap       standardLibraryAtomicsNames_;
+    SimdOperationNameMap standardLibrarySimdOpNames_;
+
+    ParseNode*           moduleFunctionNode_;
+    PropertyName*        moduleFunctionName_;
+
+    UniqueChars          errorString_;
+    uint32_t             errorOffset_;
+    bool                 errorOverRecursed_;
+
+    bool                 canValidateChangeHeap_;
+    bool                 hasChangeHeap_;
+    bool                 supportsSimd_;
+    bool                 atomicsPresent_;
 
   public:
     ModuleValidator(ExclusiveContext* cx, AsmJSParser& parser)
       : cx_(cx),
         parser_(parser),
         mg_(cx),
         validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
         functions_(cx),
         funcPtrTables_(cx),
         globals_(cx),
         arrayViews_(cx),
-        exits_(cx),
+        imports_(cx),
         standardLibraryMathNames_(cx),
         standardLibraryAtomicsNames_(cx),
         standardLibrarySimdOpNames_(cx),
         moduleFunctionNode_(parser.pc->maybeFunction),
         moduleFunctionName_(nullptr),
         errorString_(nullptr),
         errorOffset_(UINT32_MAX),
         errorOverRecursed_(false),
@@ -1298,18 +1297,18 @@ class MOZ_STACK_CLASS ModuleValidator
         JSAtom* atom = Atomize(cx_, name, strlen(name));
         if (!atom)
             return false;
         return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
     }
 
   public:
 
-    bool init() {
-        if (!globals_.init() || !exits_.init())
+    bool init(HandleAsmJSModule moduleObj) {
+        if (!globals_.init() || !imports_.init())
             return false;
 
         if (!standardLibraryMathNames_.init() ||
             !addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) ||
             !addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) ||
             !addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) ||
             !addStandardLibraryMathName("asin", AsmJSMathBuiltin_asin) ||
             !addStandardLibraryMathName("acos", AsmJSMathBuiltin_acos) ||
@@ -1366,183 +1365,196 @@ class MOZ_STACK_CLASS ModuleValidator
         uint32_t srcStart = parser_.pc->maybeFunction->pn_body->pn_pos.begin;
         uint32_t srcBodyStart = tokenStream().currentToken().pos.end;
 
         // "use strict" should be added to the source if we are in an implicit
         // strict context, see also comment above addUseStrict in
         // js::FunctionToString.
         bool strict = parser_.pc->sc->strict() && !parser_.pc->sc->hasExplicitUseStrict();
 
-        return mg_.init(parser_.ss, srcStart, srcBodyStart, strict);
-    }
-
-    bool finish(ScopedJSDeletePtr<AsmJSModule>* module, SlowFunctionVector* slowFuncs) {
-        return mg_.finish(parser_.tokenStream, module, slowFuncs);
+        module_ = cx_->new_<AsmJSModule>(parser_.ss, srcStart, srcBodyStart, strict);
+        if (!module_)
+            return false;
+
+        moduleObj->setModule(module_);
+
+        return mg_.init();
+    }
+
+    bool finish(SlowFunctionVector* slowFuncs) {
+        uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
+        TokenPos pos;
+        JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
+        uint32_t endAfterCurly = pos.end;
+
+        auto usesHeap = Module::HeapBool(module_->hasArrayView());
+        auto sharedHeap = Module::SharedBool(module_->isSharedView());
+        UniqueChars filename = make_string_copy(parser_.ss->filename());
+        if (!filename)
+            return false;
+
+        UniqueStaticLinkData linkData;
+        Module* wasm = mg_.finish(usesHeap, sharedHeap, Move(filename), &linkData, slowFuncs);
+        if (!wasm)
+            return false;
+
+        module_->finish(wasm, Move(linkData), endBeforeCurly, endAfterCurly);
+        return true;
     }
 
     // Mutable interface.
     void initModuleFunctionName(PropertyName* name) { moduleFunctionName_ = name; }
     void initGlobalArgumentName(PropertyName* n)    { module().initGlobalArgumentName(n); }
     void initImportArgumentName(PropertyName* n)    { module().initImportArgumentName(n); }
     void initBufferArgumentName(PropertyName* n)    { module().initBufferArgumentName(n); }
 
-    bool addGlobalVarInit(PropertyName* varName, const NumLit& lit, bool isConst) {
-        // The type of a const is the exact type of the literal (since its value
-        // cannot change) which is more precise than the corresponding vartype.
-        Type type = isConst ? Type::lit(lit) : Type::var(lit.type());
+    bool addGlobalVarInit(PropertyName* var, const NumLit& lit, bool isConst) {
         uint32_t globalDataOffset;
-        if (!module().addGlobalVarInit(lit.value(), &globalDataOffset))
+        if (!mg_.allocateGlobalVar(lit.type(), &globalDataOffset))
             return false;
         Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable;
         Global* global = validationLifo_.new_<Global>(which);
         if (!global)
             return false;
         global->u.varOrConst.globalDataOffset_ = globalDataOffset;
-        global->u.varOrConst.type_ = type.which();
+        global->u.varOrConst.type_ = (isConst ? Type::lit(lit) : Type::var(lit.type())).which();
         if (isConst)
             global->u.varOrConst.literalValue_ = lit;
-        return globals_.putNew(varName, global);
-    }
-    bool addGlobalVarImport(PropertyName* varName, PropertyName* fieldName, ValType importType,
-                            bool isConst)
-    {
+        return globals_.putNew(var, global) &&
+               module().addGlobalVarInit(lit.value(), globalDataOffset);
+    }
+    bool addGlobalVarImport(PropertyName* var, PropertyName* field, ValType type, bool isConst) {
         uint32_t globalDataOffset;
-        if (!module().addGlobalVarImport(fieldName, importType, &globalDataOffset))
+        if (!mg_.allocateGlobalVar(type, &globalDataOffset))
             return false;
         Global::Which which = isConst ? Global::ConstantImport : Global::Variable;
         Global* global = validationLifo_.new_<Global>(which);
         if (!global)
             return false;
         global->u.varOrConst.globalDataOffset_ = globalDataOffset;
-        global->u.varOrConst.type_ = Type::var(importType).which();
-        return globals_.putNew(varName, global);
-    }
-    bool addArrayView(PropertyName* varName, Scalar::Type vt, PropertyName* maybeField)
-    {
-        if (!arrayViews_.append(ArrayView(varName, vt)))
+        global->u.varOrConst.type_ = Type::var(type).which();
+        return globals_.putNew(var, global) &&
+               module().addGlobalVarImport(field, type, globalDataOffset);
+    }
+    bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) {
+        if (!arrayViews_.append(ArrayView(var, vt)))
             return false;
         Global* global = validationLifo_.new_<Global>(Global::ArrayView);
         if (!global)
             return false;
-        if (!module().addArrayView(vt, maybeField))
-            return false;
         global->u.viewInfo.viewType_ = vt;
-        return globals_.putNew(varName, global);
-    }
-    bool addMathBuiltinFunction(PropertyName* varName, AsmJSMathBuiltinFunction func,
-                                PropertyName* fieldName)
+        return globals_.putNew(var, global) &&
+               module().addArrayView(vt, maybeField);
+    }
+    bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func,
+                                PropertyName* field)
     {
-        if (!module().addMathBuiltinFunction(func, fieldName))
-            return false;
         Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
         if (!global)
             return false;
         global->u.mathBuiltinFunc_ = func;
-        return globals_.putNew(varName, global);
+        return globals_.putNew(var, global) &&
+               module().addMathBuiltinFunction(func, field);
     }
   private:
-    bool addGlobalDoubleConstant(PropertyName* varName, double constant) {
+    bool addGlobalDoubleConstant(PropertyName* var, double constant) {
         Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
         if (!global)
             return false;
         global->u.varOrConst.type_ = Type::Double;
         global->u.varOrConst.literalValue_ = NumLit(NumLit::Double, DoubleValue(constant));
-        return globals_.putNew(varName, global);
+        return globals_.putNew(var, global);
     }
   public:
-    bool addMathBuiltinConstant(PropertyName* varName, double constant, PropertyName* fieldName) {
-        if (!module().addMathBuiltinConstant(constant, fieldName))
-            return false;
-        return addGlobalDoubleConstant(varName, constant);
-    }
-    bool addGlobalConstant(PropertyName* varName, double constant, PropertyName* fieldName) {
-        if (!module().addGlobalConstant(constant, fieldName))
-            return false;
-        return addGlobalDoubleConstant(varName, constant);
-    }
-    bool addAtomicsBuiltinFunction(PropertyName* varName, AsmJSAtomicsBuiltinFunction func,
-                                   PropertyName* fieldName)
+    bool addMathBuiltinConstant(PropertyName* var, double constant, PropertyName* field) {
+        return addGlobalDoubleConstant(var, constant) &&
+               module().addMathBuiltinConstant(constant, field);
+    }
+    bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) {
+        return addGlobalDoubleConstant(var, constant) &&
+               module().addGlobalConstant(constant, field);
+    }
+    bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
+                                   PropertyName* field)
     {
-        if (!module().addAtomicsBuiltinFunction(func, fieldName))
-            return false;
         Global* global = validationLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
         if (!global)
             return false;
         atomicsPresent_ = true;
         global->u.atomicsBuiltinFunc_ = func;
-        return globals_.putNew(varName, global);
-    }
-    bool addSimdCtor(PropertyName* varName, AsmJSSimdType type, PropertyName* fieldName) {
-        if (!module().addSimdCtor(type, fieldName))
-            return false;
+        return globals_.putNew(var, global) &&
+               module().addAtomicsBuiltinFunction(func, field);
+    }
+    bool addSimdCtor(PropertyName* var, AsmJSSimdType type, PropertyName* field) {
         Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
         if (!global)
             return false;
         global->u.simdCtorType_ = type;
-        return globals_.putNew(varName, global);
-    }
-    bool addSimdOperation(PropertyName* varName, AsmJSSimdType type, AsmJSSimdOperation op,
-                          PropertyName* typeVarName, PropertyName* opName)
+        return globals_.putNew(var, global) &&
+               module().addSimdCtor(type, field);
+    }
+    bool addSimdOperation(PropertyName* var, AsmJSSimdType type, AsmJSSimdOperation op,
+                          PropertyName* opName)
     {
-        if (!module().addSimdOperation(type, op, opName))
-            return false;
         Global* global = validationLifo_.new_<Global>(Global::SimdOperation);
         if (!global)
             return false;
         global->u.simdOp.type_ = type;
         global->u.simdOp.which_ = op;
-        return globals_.putNew(varName, global);
+        return globals_.putNew(var, global) &&
+               module().addSimdOperation(type, op, opName);
     }
     bool addByteLength(PropertyName* name) {
         canValidateChangeHeap_ = true;
-        if (!module().addByteLength())
-            return false;
         Global* global = validationLifo_.new_<Global>(Global::ByteLength);
-        return global && globals_.putNew(name, global);
+        return global && globals_.putNew(name, global) &&
+               module().addByteLength();
     }
     bool addChangeHeap(PropertyName* name, ParseNode* fn, uint32_t mask, uint32_t min, uint32_t max) {
         hasChangeHeap_ = true;
-        module().addChangeHeap(mask, min, max);
         Global* global = validationLifo_.new_<Global>(Global::ChangeHeap);
         if (!global)
             return false;
         global->u.changeHeap.srcBegin_ = fn->pn_pos.begin;
         global->u.changeHeap.srcEnd_ = fn->pn_pos.end;
-        return globals_.putNew(name, global);
-    }
-    bool addArrayViewCtor(PropertyName* varName, Scalar::Type vt, PropertyName* fieldName) {
+        return globals_.putNew(name, global) &&
+               module().addChangeHeap(mask, min, max);
+    }
+    bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
         Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
         if (!global)
             return false;
-        if (!module().addArrayViewCtor(vt, fieldName))
-            return false;
         global->u.viewInfo.viewType_ = vt;
-        return globals_.putNew(varName, global);
-    }
-    bool addFFI(PropertyName* varName, PropertyName* field) {
+        return globals_.putNew(var, global) &&
+               module().addArrayViewCtor(vt, field);
+    }
+    bool addFFI(PropertyName* var, PropertyName* field) {
         Global* global = validationLifo_.new_<Global>(Global::FFI);
         if (!global)
             return false;
         uint32_t index;
         if (!module().addFFI(field, &index))
             return false;
         global->u.ffiIndex_ = index;
-        return globals_.putNew(varName, global);
-    }
-    bool addExportedFunction(const Func& func, PropertyName* maybeFieldName) {
+        return globals_.putNew(var, global);
+    }
+    bool addExport(const Func& func, PropertyName* maybeFieldName) {
         MallocSig::ArgVector args;
         if (!args.appendAll(func.sig().args()))
             return false;
         MallocSig sig(Move(args), func.sig().ret());
-        return module().addExportedFunction(func.name(), func.index(), func.srcBegin(),
-                                            func.srcEnd(), maybeFieldName, Move(sig));
-    }
-    bool addExportedChangeHeap(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
-        return module().addExportedChangeHeap(name, g.changeHeapSrcBegin(), g.changeHeapSrcEnd(),
-                                              maybeFieldName);
+        uint32_t wasmIndex;
+        if (!mg_.declareExport(Move(sig), func.index(), &wasmIndex))
+            return false;
+        return module().addExport(func.name(), maybeFieldName, wasmIndex,
+                                  func.srcBegin(), func.srcEnd());
+    }
+    bool addChangeHeapExport(PropertyName* name, const Global& g, PropertyName* maybeFieldName) {
+        return module().addExport(name, maybeFieldName, AsmJSModule::Export::ChangeHeap,
+                                  g.changeHeapSrcBegin(), g.changeHeapSrcEnd());
     }
   private:
     const LifoSig* getLifoSig(const LifoSig& sig) {
         return &sig;
     }
     const LifoSig* getLifoSig(const MallocSig& sig) {
         return mg_.newLifoSig(sig);
     }
@@ -1577,39 +1589,41 @@ class MOZ_STACK_CLASS ModuleValidator
         if (!globals_.putNew(name, global))
             return false;
         const LifoSig* lifoSig = getLifoSig(sig);
         if (!lifoSig)
             return false;
         FuncPtrTable* t = validationLifo_.new_<FuncPtrTable>(cx_, name, firstUse, *lifoSig, mask);
         return t && funcPtrTables_.append(t);
     }
-    bool defineFuncPtrTable(uint32_t funcPtrTableIndex, ModuleGenerator::FuncIndexVector&& elems) {
+    bool defineFuncPtrTable(uint32_t funcPtrTableIndex, const Vector<uint32_t>& elems) {
         FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex];
         if (table.defined())
             return false;
         table.define();
-        return mg_.defineFuncPtrTable(funcPtrTableIndex, Move(elems));
-    }
-    bool addExit(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex,
+        mg_.defineFuncPtrTable(funcPtrTableIndex, elems);
+        return true;
+    }
+    bool addImport(PropertyName* name, MallocSig&& sig, unsigned ffiIndex, unsigned* importIndex,
                  const LifoSig** lifoSig)
     {
-        ExitDescriptor::Lookup lookup(name, sig);
-        ExitMap::AddPtr p = exits_.lookupForAdd(lookup);
+        ImportDescriptor::Lookup lookup(name, sig);
+        ImportMap::AddPtr p = imports_.lookupForAdd(lookup);
         if (p) {
             *lifoSig = &p->key().sig();
-            *exitIndex = p->value();
+            *importIndex = p->value();
             return true;
         }
         *lifoSig = getLifoSig(sig);
         if (!*lifoSig)
             return false;
-        if (!module().addExit(Move(sig), ffiIndex, exitIndex))
+        if (!mg_.declareImport(Move(sig), importIndex))
             return false;
-        return exits_.add(p, ExitDescriptor(name, **lifoSig), *exitIndex);
+        return imports_.add(p, ImportDescriptor(name, **lifoSig), *importIndex) &&
+               module().addImport(ffiIndex, *importIndex);
     }
 
     bool tryOnceToValidateChangeHeap() {
         bool ret = canValidateChangeHeap_;
         canValidateChangeHeap_ = false;
         return ret;
     }
     bool hasChangeHeap() const {
@@ -1631,17 +1645,17 @@ class MOZ_STACK_CLASS ModuleValidator
         return !!errorString_;
     }
 
     bool failOffset(uint32_t offset, const char* str) {
         MOZ_ASSERT(!hasAlreadyFailed());
         MOZ_ASSERT(errorOffset_ == UINT32_MAX);
         MOZ_ASSERT(str);
         errorOffset_ = offset;
-        errorString_ = DuplicateString(cx_, str);
+        errorString_ = make_string_copy(str);
         return false;
     }
 
     bool fail(ParseNode* pn, const char* str) {
         return failOffset(pn->pn_pos.begin, str);
     }
 
     bool failfVAOffset(uint32_t offset, const char* fmt, va_list ap) {
@@ -1687,17 +1701,17 @@ class MOZ_STACK_CLASS ModuleValidator
         return false;
     }
 
     // Read-only interface
     ExclusiveContext* cx() const             { return cx_; }
     ParseNode* moduleFunctionNode() const    { return moduleFunctionNode_; }
     PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
     ModuleGenerator& mg()                    { return mg_; }
-    AsmJSModule& module() const              { return mg_.module(); }
+    AsmJSModule& module() const              { return *module_; }
     AsmJSParser& parser() const              { return parser_; }
     TokenStream& tokenStream() const         { return parser_.tokenStream; }
     bool supportsSimd() const                { return supportsSimd_; }
 
     unsigned numArrayViews() const {
         return arrayViews_.length();
     }
     const ArrayView& arrayView(unsigned i) const {
@@ -1748,19 +1762,29 @@ class MOZ_STACK_CLASS ModuleValidator
     bool lookupStandardSimdOpName(PropertyName* name, AsmJSSimdOperation* op) const {
         if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
             *op = p->value();
             return true;
         }
         return false;
     }
 
-    void startFunctionBodies() {
-        if (atomicsPresent_)
+    bool startFunctionBodies() {
+        if (atomicsPresent_) {
+#if defined(ENABLE_SHARED_ARRAY_BUFFER)
             module().setViewsAreShared();
+#else
+            return failOffset(parser_.tokenStream.currentToken().pos.begin,
+                              "shared memory and atomics not supported by this build");
+#endif
+        }
+        return true;
+    }
+    bool finishFunctionBodies() {
+        return mg_.finishFuncs();
     }
 };
 
 } // namespace
 
 /*****************************************************************************/
 // Numeric literal utilities
 
@@ -2675,26 +2699,25 @@ CheckGlobalSimdImport(ModuleValidator& m
     AsmJSSimdType simdType;
     if (!IsSimdTypeName(m, field, &simdType))
         return m.failName(initNode, "'%s' is not a standard SIMD type", field);
     return m.addSimdCtor(varName, simdType, field);
 }
 
 static bool
 CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global,
-                               ParseNode* initNode, PropertyName* varName, PropertyName* ctorVarName,
-                               PropertyName* opName)
+                               ParseNode* initNode, PropertyName* varName, PropertyName* opName)
 {
     AsmJSSimdType simdType = global->simdCtorType();
     AsmJSSimdOperation simdOp;
     if (!m.lookupStandardSimdOpName(opName, &simdOp))
         return m.failName(initNode, "'%s' is not a standard SIMD operation", opName);
     if (!IsSimdValidOperationType(simdType, simdOp))
         return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName);
-    return m.addSimdOperation(varName, simdType, simdOp, ctorVarName, opName);
+    return m.addSimdOperation(varName, simdType, simdOp, opName);
 }
 
 static bool
 CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode)
 {
     ParseNode* base = DotBase(initNode);
     PropertyName* field = DotMember(initNode);
 
@@ -2746,17 +2769,17 @@ CheckGlobalDotImport(ModuleValidator& m,
 
     const ModuleValidator::Global* global = m.lookupGlobal(base->name());
     if (!global)
         return m.failName(initNode, "%s not found in module global scope", base->name());
 
     if (!global->isSimdCtor())
         return m.failName(base, "expecting SIMD constructor name, got %s", field);
 
-    return CheckGlobalSimdOperationImport(m, global, initNode, varName, base->name(), field);
+    return CheckGlobalSimdOperationImport(m, global, initNode, varName, field);
 }
 
 static bool
 CheckModuleGlobal(ModuleValidator& m, ParseNode* var, bool isConst)
 {
     if (!IsDefinition(var))
         return m.fail(var, "import variable names must be unique");
 
@@ -4049,18 +4072,17 @@ CheckFuncPtrCall(FunctionValidator& f, P
         return false;
 
     MallocSig sig(Move(args), ret);
 
     uint32_t funcPtrTableIndex;
     if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, sig, mask, &funcPtrTableIndex))
         return false;
 
-    uint32_t globalDataOffset = f.m().module().funcPtrTable(funcPtrTableIndex).globalDataOffset();
-    f.patch32(globalDataOffsetAt, globalDataOffset);
+    f.patch32(globalDataOffsetAt, f.m().mg().funcPtrTableGlobalDataOffset(funcPtrTableIndex));
     f.patchSig(sigAt, &f.m().funcPtrTable(funcPtrTableIndex).sig());
 
     *type = Type::ret(ret);
     return true;
 }
 
 static bool
 CheckIsExternType(FunctionValidator& f, ParseNode* argNode, Type type)
@@ -4094,34 +4116,33 @@ CheckFFICall(FunctionValidator& f, Parse
       case ExprType::F64:    f.writeOp(F64::CallImport);   break;
       case ExprType::I32x4:  f.writeOp(I32X4::CallImport); break;
       case ExprType::F32x4:  f.writeOp(F32X4::CallImport); break;
       case ExprType::B32x4:  f.writeOp(B32X4::CallImport); break;
     }
 
     // Global data offset
     size_t offsetAt = f.temp32();
-    // Pointer to the exit's signature in the module's lifo
+    // Pointer to the import's signature in the module's lifo
     size_t sigAt = f.tempPtr();
     // Call node position (asm.js specific)
     WriteCallLineCol(f, callNode);
 
     MallocSig::ArgVector args;
     if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
         return false;
 
     MallocSig sig(Move(args), ret);
 
-    unsigned exitIndex = 0;
+    unsigned importIndex = 0;
     const LifoSig* lifoSig = nullptr;
-    if (!f.m().addExit(calleeName, Move(sig), ffiIndex, &exitIndex, &lifoSig))
-        return false;
-
-    JS_STATIC_ASSERT(offsetof(AsmJSModule::ExitDatum, exit) == 0);
-    f.patch32(offsetAt, f.module().exit(exitIndex).globalDataOffset());
+    if (!f.m().addImport(calleeName, Move(sig), ffiIndex, &importIndex, &lifoSig))
+        return false;
+
+    f.patch32(offsetAt, f.m().mg().importExitGlobalDataOffset(importIndex));
     f.patchSig(sigAt, lifoSig);
     *type = Type::ret(ret);
     return true;
 }
 
 static bool
 CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType,
                       size_t opcodeAt)
@@ -5851,17 +5872,17 @@ CheckExprStatement(FunctionValidator& f,
 enum class InterruptCheckPosition {
     Head,
     Loop
 };
 
 static void
 MaybeAddInterruptCheck(FunctionValidator& f, InterruptCheckPosition pos, ParseNode* pn)
 {
-    if (f.m().module().usesSignalHandlersForInterrupt())
+    if (f.m().mg().args().useSignalHandlersForInterrupt)
         return;
 
     switch (pos) {
       case InterruptCheckPosition::Head: f.writeOp(Stmt::InterruptCheckHead); break;
       case InterruptCheckPosition::Loop: f.writeOp(Stmt::InterruptCheckLoop); break;
     }
 
     unsigned lineno = 0, column = 0;
@@ -6680,17 +6701,17 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
 
     unsigned length = ListLength(arrayLiteral);
 
     if (!IsPowerOfTwo(length))
         return m.failf(arrayLiteral, "function-pointer table length must be a power of 2 (is %u)", length);
 
     unsigned mask = length - 1;
 
-    ModuleGenerator::FuncIndexVector elems;
+    Vector<uint32_t> elemFuncIndices(m.cx());
     const LifoSig* sig = nullptr;
     for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
         if (!elem->isKind(PNK_NAME))
             return m.fail(elem, "function-pointer table's elements must be names of functions");
 
         PropertyName* funcName = elem->name();
         const ModuleValidator::Func* func = m.lookupFunction(funcName);
         if (!func)
@@ -6698,25 +6719,25 @@ CheckFuncPtrTable(ModuleValidator& m, Pa
 
         if (sig) {
             if (*sig != func->sig())
                 return m.fail(elem, "all functions in table must have same signature");
         } else {
             sig = &func->sig();
         }
 
-        if (!elems.append(func->index()))
+        if (!elemFuncIndices.append(func->index()))
             return false;
     }
 
     uint32_t funcPtrTableIndex;
     if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), *sig, mask, &funcPtrTableIndex))
         return false;
 
-    if (!m.defineFuncPtrTable(funcPtrTableIndex, Move(elems)))
+    if (!m.defineFuncPtrTable(funcPtrTableIndex, elemFuncIndices))
         return m.fail(var, "duplicate function-pointer definition");
 
     return true;
 }
 
 static bool
 CheckFuncPtrTables(ModuleValidator& m)
 {
@@ -6751,20 +6772,20 @@ CheckModuleExportFunction(ModuleValidato
         return m.fail(pn, "expected name of exported function");
 
     PropertyName* funcName = pn->name();
     const ModuleValidator::Global* global = m.lookupGlobal(funcName);
     if (!global)
         return m.failName(pn, "exported function name '%s' not found", funcName);
 
     if (global->which() == ModuleValidator::Global::Function)
-        return m.addExportedFunction(m.function(global->funcIndex()), maybeFieldName);
+        return m.addExport(m.function(global->funcIndex()), maybeFieldName);
 
     if (global->which() == ModuleValidator::Global::ChangeHeap)
-        return m.addExportedChangeHeap(funcName, *global, maybeFieldName);
+        return m.addChangeHeapExport(funcName, *global, maybeFieldName);
 
     return m.failName(pn, "'%s' is not a function", funcName);
 }
 
 static bool
 CheckModuleExportObject(ModuleValidator& m, ParseNode* object)
 {
     MOZ_ASSERT(object->isKind(PNK_OBJECT));
@@ -6837,24 +6858,23 @@ CheckModuleEnd(ModuleValidator &m)
                             "top-level export (return) must be the last statement");
     }
 
     m.parser().tokenStream.ungetToken();
     return true;
 }
 
 static bool
-CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList,
-            ScopedJSDeletePtr<AsmJSModule>* module, unsigned* time,
-            SlowFunctionVector* slowFuncs)
+CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, HandleAsmJSModule obj,
+            unsigned* time, SlowFunctionVector* slowFuncs)
 {
     int64_t before = PRMJ_Now();
 
     ModuleValidator m(cx, parser);
-    if (!m.init())
+    if (!m.init(obj))
         return false;
 
     if (PropertyName* moduleFunctionName = FunctionName(m.moduleFunctionNode())) {
         if (!CheckModuleLevelName(m, m.moduleFunctionNode(), moduleFunctionName))
             return false;
         m.initModuleFunctionName(moduleFunctionName);
     }
 
@@ -6868,66 +6888,63 @@ CheckModule(ExclusiveContext* cx, AsmJSP
         return false;
 
     if (!CheckModuleProcessingDirectives(m))
         return false;
 
     if (!CheckModuleGlobals(m))
         return false;
 
-    m.startFunctionBodies();
-
-#if !defined(ENABLE_SHARED_ARRAY_BUFFER)
-    if (m.usesSharedMemory())
-        return m.failOffset(m.parser().tokenStream.currentToken().pos.begin,
-                            "shared memory and atomics not supported by this build");
-#endif
+    if (!m.startFunctionBodies())
+        return false;
 
     if (!CheckFunctions(m))
         return false;
 
+    if (!m.finishFunctionBodies())
+        return false;
+
     if (!CheckFuncPtrTables(m))
         return false;
 
     if (!CheckModuleReturn(m))
         return false;
 
     if (!CheckModuleEnd(m))
         return false;
 
-    if (!m.finish(module, slowFuncs))
+    if (!m.finish(slowFuncs))
         return false;
 
     *time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC;
     return true;
 }
 
-static bool
-BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module,
-                    unsigned time, const SlowFunctionVector& slowFuncs,
-                    JS::AsmJSCacheResult cacheResult, ScopedJSFreePtr<char>* out)
+static UniqueChars
+BuildConsoleMessage(ExclusiveContext* cx, AsmJSModule& module, unsigned time,
+                    const SlowFunctionVector& slowFuncs, JS::AsmJSCacheResult cacheResult)
 {
 #ifndef JS_MORE_DETERMINISTIC
-    ScopedJSFreePtr<char> slowText;
+    UniqueChars slowText;
     if (!slowFuncs.empty()) {
         slowText.reset(JS_smprintf("; %d functions compiled slowly: ", slowFuncs.length()));
         if (!slowText)
-            return true;
+            return nullptr;
 
         for (unsigned i = 0; i < slowFuncs.length(); i++) {
             const SlowFunction& func = slowFuncs[i];
             JSAutoByteString name;
             if (!AtomToPrintableString(cx, func.name, &name))
-                return false;
+                return nullptr;
 
             slowText.reset(JS_smprintf("%s%s:%u:%u (%ums)%s", slowText.get(),
                                        name.ptr(), func.line, func.column, func.ms,
                                        i+1 < slowFuncs.length() ? ", " : ""));
             if (!slowText)
-                return true;
+                return nullptr;
         }
     }
 
     const char* cacheString = "";
     switch (cacheResult) {
       case JS::AsmJSCache_Success:
         cacheString = "stored in cache";
         break;
@@ -6956,21 +6973,21 @@ BuildConsoleMessage(ExclusiveContext* cx
       case JS::AsmJSCache_InternalError:
         cacheString = "unable to store in cache due to internal error (consider filing a bug)";
         break;
       case JS::AsmJSCache_LIMIT:
         MOZ_CRASH("bad AsmJSCacheResult");
         break;
     }
 
-    out->reset(JS_smprintf("total compilation time %dms; %s%s",
-                           time, cacheString, slowText ? slowText.get() : ""));
+    return UniqueChars(JS_smprintf("total compilation time %dms; %s%s",
+                                   time, cacheString, slowText ? slowText.get() : ""));
+#else
+    return make_string_copy("");
 #endif
-
-    return true;
 }
 
 static bool
 Warn(AsmJSParser& parser, int errorNumber, const char* str)
 {
     ParseReportKind reportKind = parser.options().throwOnAsmJSValidationFailureOption &&
                                  errorNumber == JSMSG_USE_ASM_TYPE_FAIL
                                  ? ParseError
@@ -7024,52 +7041,50 @@ bool
 js::ValidateAsmJS(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, bool* validated)
 {
     *validated = false;
 
     // Various conditions disable asm.js optimizations.
     if (!EstablishPreconditions(cx, parser))
         return NoExceptionPending(cx);
 
-    ScopedJSDeletePtr<AsmJSModule> module;
-    ScopedJSFreePtr<char> message;
+    Rooted<AsmJSModuleObject*> moduleObj(cx, AsmJSModuleObject::create(cx));
+    if (!moduleObj)
+        return false;
 
     // Before spending any time parsing the module, try to look it up in the
     // embedding's cache using the chars about to be parsed as the key.
-    if (!LookupAsmJSModuleInCache(cx, parser, &module, &message))
+    bool loadedFromCache;
+    UniqueChars message;
+    if (!LookupAsmJSModuleInCache(cx, parser, moduleObj, &loadedFromCache, &message))
         return false;
 
     // If not present in the cache, parse, validate and generate code in a
     // single linear pass over the chars of the asm.js module.
-    if (!module) {
+    if (!loadedFromCache) {
         // "Checking" parses, validates and compiles, producing a fully compiled
-        // AsmJSModule as result.
+        // AsmJSModuleObject as result.
         unsigned time;
         SlowFunctionVector slowFuncs(cx);
-        if (!CheckModule(cx, parser, stmtList, &module, &time, &slowFuncs))
+        if (!CheckModule(cx, parser, stmtList, moduleObj, &time, &slowFuncs))
             return NoExceptionPending(cx);
 
         // Try to store the AsmJSModule in the embedding's cache. The
         // AsmJSModule must be stored before static linking since static linking
         // specializes the AsmJSModule to the current process's address space
         // and therefore must be executed after a cache hit.
-        JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, cx);
-        module->staticallyLink(cx);
-
-        if (!BuildConsoleMessage(cx, *module, time, slowFuncs, cacheResult, &message))
+        AsmJSModule& module = moduleObj->module();
+        JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, module, cx);
+        if (!module.staticallyLink(cx))
             return false;
-    }
-
-    // The AsmJSModuleObject isn't directly referenced by user code; it is only
-    // referenced (and kept alive by) an internal slot of the asm.js module
-    // function generated below and asm.js export functions generated when the
-    // asm.js module function is called.
-    RootedObject moduleObj(cx, AsmJSModuleObject::create(cx, &module));
-    if (!moduleObj)
-        return false;
+
+        message = BuildConsoleMessage(cx, module, time, slowFuncs, cacheResult);
+        if (!message)
+            return NoExceptionPending(cx);
+    }
 
     // The module function dynamically links the AsmJSModule when called and
     // generates a set of functions wrapping all the exports.
     FunctionBox* funbox = parser.pc->maybeFunction->pn_funbox;
     RootedFunction moduleFun(cx, NewAsmJSModuleFunction(cx, funbox->function(), moduleObj));
     if (!moduleFun)
         return false;
 
--- a/js/src/asmjs/AsmJSValidate.h
+++ b/js/src/asmjs/AsmJSValidate.h
@@ -22,17 +22,16 @@
 #include "mozilla/MathAlgorithms.h"
 
 #include <stddef.h>
 
 #include "jsutil.h"
 
 #include "jit/Registers.h"
 #include "js/TypeDecls.h"
-#include "vm/NativeObject.h"
 
 namespace js {
 
 class ExclusiveContext;
 namespace frontend {
     template <typename ParseHandler> class Parser;
     template <typename ParseHandler> struct ParseContext;
     class FullParseHandler;
deleted file mode 100644
--- a/js/src/asmjs/WasmCompileArgs.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * Copyright 2015 Mozilla Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef asmjs_wasm_compile_args_h
-#define asmjs_wasm_compile_args_h
-
-struct JSRuntime;
-
-namespace js {
-namespace wasm {
-
-struct CompileArgs
-{
-    JSRuntime* runtime;
-    bool usesSignalHandlersForOOB;
-
-    CompileArgs(JSRuntime* runtime,
-                bool usesSignalHandlersForOOB)
-      : runtime(runtime),
-        usesSignalHandlersForOOB(usesSignalHandlersForOOB)
-    {}
-};
-
-} // namespace wasm
-} // namespace js
-
-#endif // asmjs_wasm_compile_args_h
rename from js/src/asmjs/AsmJSFrameIterator.cpp
rename to js/src/asmjs/WasmFrameIterator.cpp
--- a/js/src/asmjs/AsmJSFrameIterator.cpp
+++ b/js/src/asmjs/WasmFrameIterator.cpp
@@ -11,97 +11,128 @@
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
-#include "asmjs/AsmJSFrameIterator.h"
+#include "asmjs/WasmFrameIterator.h"
+
+#include "jsatom.h"
 
 #include "asmjs/AsmJSModule.h"
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::DebugOnly;
+using mozilla::Swap;
 
 /*****************************************************************************/
-// AsmJSFrameIterator implementation
+// FrameIterator implementation
 
 static void*
 ReturnAddressFromFP(void* fp)
 {
     return reinterpret_cast<AsmJSFrame*>(fp)->returnAddress;
 }
 
 static uint8_t*
 CallerFPFromFP(void* fp)
 {
     return reinterpret_cast<AsmJSFrame*>(fp)->callerFP;
 }
 
-AsmJSFrameIterator::AsmJSFrameIterator(const AsmJSActivation& activation)
-  : module_(&activation.module()),
+FrameIterator::FrameIterator()
+  : cx_(nullptr),
+    module_(nullptr),
+    callsite_(nullptr),
+    codeRange_(nullptr),
+    fp_(nullptr)
+{
+    MOZ_ASSERT(done());
+}
+
+FrameIterator::FrameIterator(const AsmJSActivation& activation)
+  : cx_(activation.cx()),
+    module_(&activation.module().wasm()),
+    callsite_(nullptr),
+    codeRange_(nullptr),
     fp_(activation.fp())
 {
-    if (!fp_)
-        return;
-    settle();
+    if (fp_)
+        settle();
 }
 
 void
-AsmJSFrameIterator::operator++()
+FrameIterator::operator++()
 {
     MOZ_ASSERT(!done());
     DebugOnly<uint8_t*> oldfp = fp_;
     fp_ += callsite_->stackDepth();
     MOZ_ASSERT_IF(module_->profilingEnabled(), fp_ == CallerFPFromFP(oldfp));
     settle();
 }
 
 void
-AsmJSFrameIterator::settle()
+FrameIterator::settle()
 {
     void* returnAddress = ReturnAddressFromFP(fp_);
 
-    const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(returnAddress);
+    const CodeRange* codeRange = module_->lookupCodeRange(returnAddress);
     MOZ_ASSERT(codeRange);
     codeRange_ = codeRange;
 
     switch (codeRange->kind()) {
-      case AsmJSModule::CodeRange::Function:
+        case CodeRange::Function:
         callsite_ = module_->lookupCallSite(returnAddress);
         MOZ_ASSERT(callsite_);
         break;
-      case AsmJSModule::CodeRange::Entry:
+      case CodeRange::Entry:
         fp_ = nullptr;
         MOZ_ASSERT(done());
         break;
-      case AsmJSModule::CodeRange::JitFFI:
-      case AsmJSModule::CodeRange::SlowFFI:
-      case AsmJSModule::CodeRange::Interrupt:
-      case AsmJSModule::CodeRange::Inline:
-      case AsmJSModule::CodeRange::Thunk:
+      case CodeRange::ImportJitExit:
+      case CodeRange::ImportInterpExit:
+      case CodeRange::Interrupt:
+      case CodeRange::Inline:
         MOZ_CRASH("Should not encounter an exit during iteration");
     }
 }
 
 JSAtom*
-AsmJSFrameIterator::functionDisplayAtom() const
+FrameIterator::functionDisplayAtom() const
 {
     MOZ_ASSERT(!done());
-    return reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_)->functionName(*module_);
+
+    const char* chars = module_->functionName(codeRange_->funcNameIndex());
+    UTF8Chars utf8(chars, strlen(chars));
+
+    size_t twoByteLength;
+    UniquePtr<char16_t> twoByte(JS::UTF8CharsToNewTwoByteCharsZ(cx_, utf8, &twoByteLength).get());
+    if (!twoByte) {
+        cx_->clearPendingException();
+        return cx_->names().empty;
+    }
+
+    JSAtom* atom = AtomizeChars(cx_, twoByte.get(), twoByteLength);
+    if (!atom) {
+        cx_->clearPendingException();
+        return cx_->names().empty;
+    }
+
+    return atom;
 }
 
 unsigned
-AsmJSFrameIterator::computeLine(uint32_t* column) const
+FrameIterator::computeLine(uint32_t* column) const
 {
     MOZ_ASSERT(!done());
     if (column)
         *column = callsite_->column();
     return callsite_->line();
 }
 
 /*****************************************************************************/
@@ -158,33 +189,33 @@ PushRetAddr(MacroAssembler& masm)
 #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     masm.push(ra);
 #else
     // The x86/x64 call instruction pushes the return address.
 #endif
 }
 
 // Generate a prologue that maintains AsmJSActivation::fp as the virtual frame
-// pointer so that AsmJSProfilingFrameIterator can walk the stack at any pc in
+// pointer so that ProfilingFrameIterator can walk the stack at any pc in
 // generated code.
 static void
 GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
-                          AsmJSProfilingOffsets* offsets, Label* maybeEntry = nullptr)
+                          ProfilingOffsets* offsets, Label* maybeEntry = nullptr)
 {
 #if !defined (JS_CODEGEN_ARM)
     Register scratch = ABIArgGenerator::NonArg_VolatileReg;
 #else
     // Unfortunately, there are no unused non-arg volatile registers on ARM --
     // the MacroAssembler claims both lr and ip -- so we use the second scratch
     // register (lr) and be very careful not to call any methods that use it.
     Register scratch = lr;
     masm.setSecondScratchReg(InvalidReg);
 #endif
 
-    // AsmJSProfilingFrameIterator needs to know the offsets of several key
+    // ProfilingFrameIterator needs to know the offsets of several key
     // instructions from entry. To save space, we make these offsets static
     // constants and assert that they match the actual codegen below. On ARM,
     // this requires AutoForbidPools to prevent a constant pool from being
     // randomly inserted between two instructions.
     {
 #if defined(JS_CODEGEN_ARM)
         AutoForbidPools afp(&masm, /* number of instructions in scope = */ 5);
 #endif
@@ -199,51 +230,51 @@ GenerateProfilingPrologue(MacroAssembler
         masm.loadAsmJSActivation(scratch);
         masm.push(Address(scratch, AsmJSActivation::offsetOfFP()));
         MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - offsets->begin);
 
         masm.storePtr(masm.getStackPointer(), Address(scratch, AsmJSActivation::offsetOfFP()));
         MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
     }
 
-    if (reason.kind() != ExitReason::None) {
-        masm.store32_NoSecondScratch(Imm32(reason.pack()),
-                                     Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
+    if (reason != ExitReason::None) {
+        masm.store32_NoSecondScratch(Imm32(int32_t(reason)),
+                                     Address(scratch, AsmJSActivation::offsetOfExitReason()));
     }
 
 #if defined(JS_CODEGEN_ARM)
     masm.setSecondScratchReg(lr);
 #endif
 
     if (framePushed)
         masm.subFromStackPtr(Imm32(framePushed));
 }
 
 // Generate the inverse of GenerateProfilingPrologue.
 static void
 GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
-                          AsmJSProfilingOffsets* offsets)
+                          ProfilingOffsets* offsets)
 {
     Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
     defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     Register scratch2 = ABIArgGenerator::NonReturn_VolatileReg1;
 #endif
 
     if (framePushed)
         masm.addToStackPtr(Imm32(framePushed));
 
     masm.loadAsmJSActivation(scratch);
 
-    if (reason.kind() != ExitReason::None) {
-        masm.store32(Imm32(ExitReason::None),
-                     Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
+    if (reason != ExitReason::None) {
+        masm.store32(Imm32(int32_t(ExitReason::None)),
+                     Address(scratch, AsmJSActivation::offsetOfExitReason()));
     }
 
-    // AsmJSProfilingFrameIterator assumes fixed offsets of the last few
+    // ProfilingFrameIterator assumes fixed offsets of the last few
     // instructions from profilingReturn, so AutoForbidPools to ensure that
     // unintended instructions are not automatically inserted.
     {
 #if defined(JS_CODEGEN_ARM)
         AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
 #endif
 
         // sp protects the stack from clobber via asynchronous signal handlers
@@ -267,21 +298,20 @@ GenerateProfilingEpilogue(MacroAssembler
     }
 }
 
 // In profiling mode, we need to maintain fp so that we can unwind the stack at
 // any pc. In non-profiling mode, the only way to observe AsmJSActivation::fp is
 // to call out to C++ so, as an optimization, we don't update fp. To avoid
 // recompilation when the profiling mode is toggled, we generate both prologues
 // a priori and switch between prologues when the profiling mode is toggled.
-// Specifically, AsmJSModule::setProfilingEnabled patches all callsites to
+// Specifically, Module::setProfilingEnabled patches all callsites to
 // either call the profiling or non-profiling entry point.
 void
-js::GenerateAsmJSFunctionPrologue(MacroAssembler& masm, unsigned framePushed,
-                                  AsmJSFunctionOffsets* offsets)
+wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
 {
 #if defined(JS_CODEGEN_ARM)
     // Flush pending pools so they do not get dumped between the 'begin' and
     // 'entry' offsets since the difference must be less than UINT8_MAX.
     masm.flushBuffer();
 #endif
 
     masm.haltingAlign(CodeAlignment);
@@ -296,24 +326,23 @@ js::GenerateAsmJSFunctionPrologue(MacroA
     PushRetAddr(masm);
     masm.subFromStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
 
     // Prologue join point, body begin:
     masm.bind(&body);
     masm.setFramePushed(framePushed);
 }
 
-// Similar to GenerateAsmJSFunctionPrologue (see comment), we generate both a
+// Similar to GenerateFunctionPrologue (see comment), we generate both a
 // profiling and non-profiling epilogue a priori. When the profiling mode is
-// toggled, AsmJSModule::setProfilingEnabled patches the 'profiling jump' to
+// toggled, Module::setProfilingEnabled patches the 'profiling jump' to
 // either be a nop (falling through to the normal prologue) or a jump (jumping
 // to the profiling epilogue).
 void
-js::GenerateAsmJSFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
-                                  AsmJSFunctionOffsets* offsets)
+wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
 {
     MOZ_ASSERT(masm.framePushed() == framePushed);
 
 #if defined(JS_CODEGEN_ARM)
     // Flush pending pools so they do not get dumped between the profilingReturn
     // and profilingJump/profilingEpilogue offsets since the difference must be
     // less than UINT8_MAX.
     masm.flushBuffer();
@@ -324,17 +353,17 @@ js::GenerateAsmJSFunctionEpilogue(MacroA
     {
 #if defined(JS_CODEGEN_ARM)
         // Forbid pools from being inserted between the profilingJump label and
         // the nop since we need the location of the actual nop to patch it.
         AutoForbidPools afp(&masm, 1);
 #endif
 
         // The exact form of this instruction must be kept consistent with the
-        // patching in AsmJSModule::setProfilingEnabled.
+        // patching in Module::setProfilingEnabled.
         offsets->profilingJump = masm.currentOffset();
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
         masm.twoByteNop();
 #elif defined(JS_CODEGEN_ARM)
         masm.nop();
 #elif defined(JS_CODEGEN_MIPS32)
         masm.nop();
         masm.nop();
@@ -356,149 +385,152 @@ js::GenerateAsmJSFunctionEpilogue(MacroA
     masm.setFramePushed(0);
 
     // Profiling epilogue:
     offsets->profilingEpilogue = masm.currentOffset();
     GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
 }
 
 void
-js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
-                              AsmJSProfilingOffsets* offsets, Label* maybeEntry)
+wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+                           ProfilingOffsets* offsets, Label* maybeEntry)
 {
     masm.haltingAlign(CodeAlignment);
     GenerateProfilingPrologue(masm, framePushed, reason, offsets, maybeEntry);
     masm.setFramePushed(framePushed);
 }
 
 void
-js::GenerateAsmJSExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
-                              AsmJSProfilingOffsets* offsets)
+wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+                           ProfilingOffsets* offsets)
 {
-    // Inverse of GenerateAsmJSExitPrologue:
+    // Inverse of GenerateExitPrologue:
     MOZ_ASSERT(masm.framePushed() == framePushed);
     GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
     masm.setFramePushed(0);
 }
 
 /*****************************************************************************/
-// AsmJSProfilingFrameIterator
+// ProfilingFrameIterator
 
-AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation)
-  : module_(&activation.module()),
+ProfilingFrameIterator::ProfilingFrameIterator()
+  : module_(nullptr),
+    codeRange_(nullptr),
     callerFP_(nullptr),
     callerPC_(nullptr),
     stackAddress_(nullptr),
-    exitReason_(ExitReason::None),
-    codeRange_(nullptr)
+    exitReason_(ExitReason::None)
+{
+    MOZ_ASSERT(done());
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation)
+  : module_(&activation.module().wasm()),
+    codeRange_(nullptr),
+    callerFP_(nullptr),
+    callerPC_(nullptr),
+    stackAddress_(nullptr),
+    exitReason_(ExitReason::None)
 {
     // If profiling hasn't been enabled for this module, then CallerFPFromFP
     // will be trash, so ignore the entire activation. In practice, this only
     // happens if profiling is enabled while module->active() (in this case,
     // profiling will be enabled when the module becomes inactive and gets
     // called again).
     if (!module_->profilingEnabled()) {
         MOZ_ASSERT(done());
         return;
     }
 
     initFromFP(activation);
 }
 
 static inline void
-AssertMatchesCallSite(const AsmJSModule& module, const AsmJSModule::CodeRange* calleeCodeRange,
-                      void* callerPC, void* callerFP, void* fp)
+AssertMatchesCallSite(const Module& module, void* callerPC, void* callerFP, void* fp)
 {
 #ifdef DEBUG
-    const AsmJSModule::CodeRange* callerCodeRange = module.lookupCodeRange(callerPC);
+    const CodeRange* callerCodeRange = module.lookupCodeRange(callerPC);
     MOZ_ASSERT(callerCodeRange);
-    if (callerCodeRange->isEntry()) {
+    if (callerCodeRange->kind() == CodeRange::Entry) {
         MOZ_ASSERT(callerFP == nullptr);
         return;
     }
 
     const CallSite* callsite = module.lookupCallSite(callerPC);
-    if (calleeCodeRange->isThunk()) {
-        MOZ_ASSERT(!callsite);
-        MOZ_ASSERT(callerCodeRange->isFunction());
-    } else {
-        MOZ_ASSERT(callsite);
-        MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
-    }
+    MOZ_ASSERT(callsite);
+    MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
 #endif
 }
 
 void
-AsmJSProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
+ProfilingFrameIterator::initFromFP(const AsmJSActivation& activation)
 {
     uint8_t* fp = activation.fp();
 
     // If a signal was handled while entering an activation, the frame will
     // still be null.
     if (!fp) {
         MOZ_ASSERT(done());
         return;
     }
 
     // Since we don't have the pc for fp, start unwinding at the caller of fp
     // (ReturnAddressFromFP(fp)). This means that the innermost frame is
     // skipped. This is fine because:
-    //  - for FFI calls, the innermost frame is a thunk, so the first frame that
-    //    shows up is the function calling the FFI;
-    //  - for Math and other builtin calls, when profiling is activated, we
-    //    patch all call sites to instead call through a thunk; and
-    //  - for interrupts, we just accept that we'll lose the innermost frame.
+    //  - for import exit calls, the innermost frame is a thunk, so the first
+    //    frame that shows up is the function calling the import;
+    //  - for Math and other builtin calls as well as interrupts, we note the absence
+    //    of an exit reason and inject a fake "builtin" frame; and
+    //  - for async interrupts, we just accept that we'll lose the innermost frame.
     void* pc = ReturnAddressFromFP(fp);
-    const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(pc);
+    const CodeRange* codeRange = module_->lookupCodeRange(pc);
     MOZ_ASSERT(codeRange);
     codeRange_ = codeRange;
     stackAddress_ = fp;
 
     switch (codeRange->kind()) {
-      case AsmJSModule::CodeRange::Entry:
+      case CodeRange::Entry:
         callerPC_ = nullptr;
         callerFP_ = nullptr;
         break;
-      case AsmJSModule::CodeRange::Function:
+      case CodeRange::Function:
         fp = CallerFPFromFP(fp);
         callerPC_ = ReturnAddressFromFP(fp);
         callerFP_ = CallerFPFromFP(fp);
-        AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
+        AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp);
         break;
-      case AsmJSModule::CodeRange::JitFFI:
-      case AsmJSModule::CodeRange::SlowFFI:
-      case AsmJSModule::CodeRange::Interrupt:
-      case AsmJSModule::CodeRange::Inline:
-      case AsmJSModule::CodeRange::Thunk:
+      case CodeRange::ImportJitExit:
+      case CodeRange::ImportInterpExit:
+      case CodeRange::Interrupt:
+      case CodeRange::Inline:
         MOZ_CRASH("Unexpected CodeRange kind");
     }
 
-    // Despite the above reasoning for skipping a frame, we do actually want FFI
-    // trampolines and interrupts to show up in the profile (so they can
-    // accumulate self time and explain performance faults). To do this, an
-    // "exit reason" is stored on all the paths leaving asm.js and this iterator
-    // treats this exit reason as its own frame. If we have exited asm.js code
-    // without setting an exit reason, the reason will be None and this means
-    // the code was asynchronously interrupted.
+    // The iterator inserts a pretend innermost frame for non-None ExitReasons.
+    // This allows the variety of exit reasons to show up in the callstack.
     exitReason_ = activation.exitReason();
-    if (exitReason_.kind() == ExitReason::None)
-        exitReason_ = ExitReason::Interrupt;
+
+    // In the case of calls to builtins or asynchronous interrupts, no exit path
+    // is taken so the exitReason is None. Coerce these to the Native exit
+    // reason so that self-time is accounted for.
+    if (exitReason_ == ExitReason::None)
+        exitReason_ = ExitReason::Native;
 
     MOZ_ASSERT(!done());
 }
 
 typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
 
-AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
-                                                         const RegisterState& state)
-  : module_(&activation.module()),
+ProfilingFrameIterator::ProfilingFrameIterator(const AsmJSActivation& activation,
+                                               const RegisterState& state)
+  : module_(&activation.module().wasm()),
+    codeRange_(nullptr),
     callerFP_(nullptr),
     callerPC_(nullptr),
-    exitReason_(ExitReason::None),
-    codeRange_(nullptr)
+    exitReason_(ExitReason::None)
 {
     // If profiling hasn't been enabled for this module, then CallerFPFromFP
     // will be trash, so ignore the entire activation. In practice, this only
     // happens if profiling is enabled while module->active() (in this case,
     // profiling will be enabled when the module becomes inactive and gets
     // called again).
     if (!module_->profilingEnabled()) {
         MOZ_ASSERT(done());
@@ -510,219 +542,309 @@ AsmJSProfilingFrameIterator::AsmJSProfil
     if (!module_->containsCodePC(state.pc)) {
         initFromFP(activation);
         return;
     }
 
     // Note: fp may be null while entering and leaving the activation.
     uint8_t* fp = activation.fp();
 
-    const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(state.pc);
+    const CodeRange* codeRange = module_->lookupCodeRange(state.pc);
     switch (codeRange->kind()) {
-      case AsmJSModule::CodeRange::Function:
-      case AsmJSModule::CodeRange::JitFFI:
-      case AsmJSModule::CodeRange::SlowFFI:
-      case AsmJSModule::CodeRange::Interrupt:
-      case AsmJSModule::CodeRange::Thunk: {
+      case CodeRange::Function:
+      case CodeRange::ImportJitExit:
+      case CodeRange::ImportInterpExit:
+      case CodeRange::Interrupt: {
         // When the pc is inside the prologue/epilogue, the innermost
         // call's AsmJSFrame is not complete and thus fp points to the the
         // second-to-innermost call's AsmJSFrame. Since fp can only tell you
         // about its caller (via ReturnAddressFromFP(fp)), naively unwinding
         // while pc is in the prologue/epilogue would skip the second-to-
         // innermost call. To avoid this problem, we use the static structure of
         // the code in the prologue and epilogue to do the Right Thing.
-        uint32_t offsetInModule = (uint8_t*)state.pc - module_->codeBase();
-        MOZ_ASSERT(offsetInModule < module_->codeBytes());
+        MOZ_ASSERT(module_->containsCodePC(state.pc));
+        uint32_t offsetInModule = (uint8_t*)state.pc - module_->code();
         MOZ_ASSERT(offsetInModule >= codeRange->begin());
         MOZ_ASSERT(offsetInModule < codeRange->end());
         uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
         void** sp = (void**)state.sp;
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
         if (offsetInCodeRange < PushedRetAddr) {
             // First instruction of the ARM/MIPS function; the return address is
             // still in lr and fp still holds the caller's fp.
             callerPC_ = state.lr;
             callerFP_ = fp;
-            AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 2);
+            AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp - 2);
         } else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
             // Second-to-last instruction of the ARM/MIPS function; fp points to
             // the caller's fp; have not yet popped AsmJSFrame.
             callerPC_ = ReturnAddressFromFP(sp);
             callerFP_ = CallerFPFromFP(sp);
-            AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
+            AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp);
         } else
 #endif
         if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn()) {
             // The return address has been pushed on the stack but not fp; fp
             // still points to the caller's fp.
             callerPC_ = *sp;
             callerFP_ = fp;
-            AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp - 1);
+            AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp - 1);
         } else if (offsetInCodeRange < StoredFP) {
             // The full AsmJSFrame has been pushed; fp still points to the
             // caller's frame.
             MOZ_ASSERT(fp == CallerFPFromFP(sp));
             callerPC_ = ReturnAddressFromFP(sp);
             callerFP_ = CallerFPFromFP(sp);
-            AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, sp);
+            AssertMatchesCallSite(*module_, callerPC_, callerFP_, sp);
         } else {
             // Not in the prologue/epilogue.
             callerPC_ = ReturnAddressFromFP(fp);
             callerFP_ = CallerFPFromFP(fp);
-            AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
+            AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp);
         }
         break;
       }
-      case AsmJSModule::CodeRange::Entry: {
+      case CodeRange::Entry: {
         // The entry trampoline is the final frame in an AsmJSActivation. The entry
-        // trampoline also doesn't GenerateAsmJSPrologue/Epilogue so we can't use
+        // trampoline also doesn't GeneratePrologue/Epilogue so we can't use
         // the general unwinding logic above.
         MOZ_ASSERT(!fp);
         callerPC_ = nullptr;
         callerFP_ = nullptr;
         break;
       }
-      case AsmJSModule::CodeRange::Inline: {
+      case CodeRange::Inline: {
         // The throw stub clears AsmJSActivation::fp on it's way out.
         if (!fp) {
             MOZ_ASSERT(done());
             return;
         }
 
         // Most inline code stubs execute after the prologue/epilogue have
         // completed so we can simply unwind based on fp. The only exception is
         // the async interrupt stub, since it can be executed at any time.
         // However, the async interrupt is super rare, so we can tolerate
         // skipped frames. Thus, we use simply unwind based on fp.
         callerPC_ = ReturnAddressFromFP(fp);
         callerFP_ = CallerFPFromFP(fp);
-        AssertMatchesCallSite(*module_, codeRange, callerPC_, callerFP_, fp);
+        AssertMatchesCallSite(*module_, callerPC_, callerFP_, fp);
         break;
       }
     }
 
     codeRange_ = codeRange;
     stackAddress_ = state.sp;
     MOZ_ASSERT(!done());
 }
 
 void
-AsmJSProfilingFrameIterator::operator++()
+ProfilingFrameIterator::operator++()
 {
-    if (exitReason_.kind() != ExitReason::None) {
+    if (exitReason_ != ExitReason::None) {
         MOZ_ASSERT(codeRange_);
         exitReason_ = ExitReason::None;
         MOZ_ASSERT(!done());
         return;
     }
 
     if (!callerPC_) {
         MOZ_ASSERT(!callerFP_);
         codeRange_ = nullptr;
         MOZ_ASSERT(done());
         return;
     }
 
-    MOZ_ASSERT(callerPC_);
-    const AsmJSModule::CodeRange* codeRange = module_->lookupCodeRange(callerPC_);
+    const CodeRange* codeRange = module_->lookupCodeRange(callerPC_);
     MOZ_ASSERT(codeRange);
     codeRange_ = codeRange;
 
     switch (codeRange->kind()) {
-      case AsmJSModule::CodeRange::Entry:
+      case CodeRange::Entry:
         MOZ_ASSERT(callerFP_ == nullptr);
         callerPC_ = nullptr;
         break;
-      case AsmJSModule::CodeRange::Function:
-      case AsmJSModule::CodeRange::JitFFI:
-      case AsmJSModule::CodeRange::SlowFFI:
-      case AsmJSModule::CodeRange::Interrupt:
-      case AsmJSModule::CodeRange::Inline:
-      case AsmJSModule::CodeRange::Thunk:
+      case CodeRange::Function:
+      case CodeRange::ImportJitExit:
+      case CodeRange::ImportInterpExit:
+      case CodeRange::Interrupt:
+      case CodeRange::Inline:
         stackAddress_ = callerFP_;
         callerPC_ = ReturnAddressFromFP(callerFP_);
-        AssertMatchesCallSite(*module_, codeRange, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
+        AssertMatchesCallSite(*module_, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
         callerFP_ = CallerFPFromFP(callerFP_);
         break;
     }
 
     MOZ_ASSERT(!done());
 }
 
-static const char*
-BuiltinToName(Builtin builtin)
-{
-    // Note: this label is regexp-matched by
-    // devtools/client/profiler/cleopatra/js/parserWorker.js.
-
-    switch (builtin) {
-      case Builtin::ToInt32:         return "ToInt32 (in asm.js)";
-#if defined(JS_CODEGEN_ARM)
-      case Builtin::aeabi_idivmod:   return "software idivmod (in asm.js)";
-      case Builtin::aeabi_uidivmod:  return "software uidivmod (in asm.js)";
-      case Builtin::AtomicCmpXchg:   return "Atomics.compareExchange (in asm.js)";
-      case Builtin::AtomicXchg:      return "Atomics.exchange (in asm.js)";
-      case Builtin::AtomicFetchAdd:  return "Atomics.add (in asm.js)";
-      case Builtin::AtomicFetchSub:  return "Atomics.sub (in asm.js)";
-      case Builtin::AtomicFetchAnd:  return "Atomics.and (in asm.js)";
-      case Builtin::AtomicFetchOr:   return "Atomics.or (in asm.js)";
-      case Builtin::AtomicFetchXor:  return "Atomics.xor (in asm.js)";
-#endif
-      case Builtin::ModD:    return "fmod (in asm.js)";
-      case Builtin::SinD:    return "Math.sin (in asm.js)";
-      case Builtin::CosD:    return "Math.cos (in asm.js)";
-      case Builtin::TanD:    return "Math.tan (in asm.js)";
-      case Builtin::ASinD:   return "Math.asin (in asm.js)";
-      case Builtin::ACosD:   return "Math.acos (in asm.js)";
-      case Builtin::ATanD:   return "Math.atan (in asm.js)";
-      case Builtin::CeilD:
-      case Builtin::CeilF:   return "Math.ceil (in asm.js)";
-      case Builtin::FloorD:
-      case Builtin::FloorF:  return "Math.floor (in asm.js)";
-      case Builtin::ExpD:    return "Math.exp (in asm.js)";
-      case Builtin::LogD:    return "Math.log (in asm.js)";
-      case Builtin::PowD:    return "Math.pow (in asm.js)";
-      case Builtin::ATan2D:  return "Math.atan2 (in asm.js)";
-      case Builtin::Limit:   break;
-    }
-    MOZ_CRASH("symbolic immediate not a builtin");
-}
-
 const char*
-AsmJSProfilingFrameIterator::label() const
+ProfilingFrameIterator::label() const
 {
     MOZ_ASSERT(!done());
 
     // Use the same string for both time inside and under so that the two
     // entries will be coalesced by the profiler.
     //
-    // NB: these labels are regexp-matched by
-    //     devtools/client/profiler/cleopatra/js/parserWorker.js.
-    const char* jitFFIDescription = "fast FFI trampoline (in asm.js)";
-    const char* slowFFIDescription = "slow FFI trampoline (in asm.js)";
-    const char* interruptDescription = "interrupt due to out-of-bounds or long execution (in asm.js)";
+    // NB: these labels are parsed for location by
+    //     devtools/client/performance/modules/logic/frame-utils.js
+    const char* importJitDescription = "fast FFI trampoline (in asm.js)";
+    const char* importInterpDescription = "slow FFI trampoline (in asm.js)";
+    const char* nativeDescription = "native call (in asm.js)";
 
-    switch (exitReason_.kind()) {
+    switch (exitReason_) {
       case ExitReason::None:
         break;
-      case ExitReason::Jit:
-        return jitFFIDescription;
-      case ExitReason::Slow:
-        return slowFFIDescription;
-      case ExitReason::Interrupt:
-        return interruptDescription;
-      case ExitReason::Builtin:
-        return BuiltinToName(exitReason_.builtin());
+      case ExitReason::ImportJit:
+        return importJitDescription;
+      case ExitReason::ImportInterp:
+        return importInterpDescription;
+      case ExitReason::Native:
+        return nativeDescription;
     }
 
-    auto codeRange = reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_);
-    switch (codeRange->kind()) {
-      case AsmJSModule::CodeRange::Function:  return codeRange->functionProfilingLabel(*module_);
-      case AsmJSModule::CodeRange::Entry:     return "entry trampoline (in asm.js)";
-      case AsmJSModule::CodeRange::JitFFI:    return jitFFIDescription;
-      case AsmJSModule::CodeRange::SlowFFI:   return slowFFIDescription;
-      case AsmJSModule::CodeRange::Interrupt: return interruptDescription;
-      case AsmJSModule::CodeRange::Inline:    return "inline stub (in asm.js)";
-      case AsmJSModule::CodeRange::Thunk:     return BuiltinToName(codeRange->thunkTarget());
+    switch (codeRange_->kind()) {
+      case CodeRange::Function:         return module_->profilingLabel(codeRange_->funcNameIndex());
+      case CodeRange::Entry:            return "entry trampoline (in asm.js)";
+      case CodeRange::ImportJitExit:    return importJitDescription;
+      case CodeRange::ImportInterpExit: return importInterpDescription;
+      case CodeRange::Interrupt:        return nativeDescription;
+      case CodeRange::Inline:           return "inline stub (in asm.js)";
     }
 
     MOZ_CRASH("bad code range kind");
 }
+
+/*****************************************************************************/
+// Runtime patching to enable/disable profiling
+
+// Patch all internal (asm.js->asm.js) callsites to call the profiling
+// prologues:
+void
+wasm::EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled)
+{
+    if (callSite.kind() != CallSite::Relative)
+        return;
+
+    uint8_t* callerRetAddr = module.code() + callSite.returnAddressOffset();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+    void* callee = X86Encoding::GetRel32Target(callerRetAddr);
+#elif defined(JS_CODEGEN_ARM)
+    uint8_t* caller = callerRetAddr - 4;
+    Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
+    BOffImm calleeOffset;
+    callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
+    void* callee = calleeOffset.getDest(callerInsn);
+#elif defined(JS_CODEGEN_ARM64)
+    MOZ_CRASH();
+    void* callee = nullptr;
+    (void)callerRetAddr;
+#elif defined(JS_CODEGEN_MIPS32)
+    Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t));
+    void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next());
+#elif defined(JS_CODEGEN_MIPS64)
+    Instruction* instr = (Instruction*)(callerRetAddr - 6 * sizeof(uint32_t));
+    void* callee = (void*)Assembler::ExtractLoad64Value(instr);
+#elif defined(JS_CODEGEN_NONE)
+    MOZ_CRASH();
+    void* callee = nullptr;
+#else
+# error "Missing architecture"
+#endif
+
+    const CodeRange* codeRange = module.lookupCodeRange(callee);
+    if (!codeRange->isFunction())
+        return;
+
+    uint8_t* from = module.code() + codeRange->funcNonProfilingEntry();
+    uint8_t* to = module.code() + codeRange->funcProfilingEntry();
+    if (!enabled)
+        Swap(from, to);
+
+    MOZ_ASSERT(callee == from);
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+    X86Encoding::SetRel32(callerRetAddr, to);
+#elif defined(JS_CODEGEN_ARM)
+    new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always);
+#elif defined(JS_CODEGEN_ARM64)
+    (void)to;
+    MOZ_CRASH();
+#elif defined(JS_CODEGEN_MIPS32)
+    Assembler::WriteLuiOriInstructions(instr, instr->next(),
+                                       ScratchRegister, (uint32_t)to);
+    instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#elif defined(JS_CODEGEN_MIPS64)
+    Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)to);
+    instr[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#elif defined(JS_CODEGEN_NONE)
+    MOZ_CRASH();
+#else
+# error "Missing architecture"
+#endif
+}
+
+// Replace all the nops in all the epilogues of asm.js functions with jumps
+// to the profiling epilogues.
+void
+wasm::EnableProfilingEpilogue(const Module& module, const CodeRange& codeRange, bool enabled)
+{
+    if (!codeRange.isFunction())
+        return;
+
+    uint8_t* jump = module.code() + codeRange.functionProfilingJump();
+    uint8_t* profilingEpilogue = module.code() + codeRange.funcProfilingEpilogue();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+    // An unconditional jump with a 1 byte offset immediate has the opcode
+    // 0x90. The offset is relative to the address of the instruction after
+    // the jump. 0x66 0x90 is the canonical two-byte nop.
+    ptrdiff_t jumpImmediate = profilingEpilogue - jump - 2;
+    MOZ_ASSERT(jumpImmediate > 0 && jumpImmediate <= 127);
+    if (enabled) {
+        MOZ_ASSERT(jump[0] == 0x66);
+        MOZ_ASSERT(jump[1] == 0x90);
+        jump[0] = 0xeb;
+        jump[1] = jumpImmediate;
+    } else {
+        MOZ_ASSERT(jump[0] == 0xeb);
+        MOZ_ASSERT(jump[1] == jumpImmediate);
+        jump[0] = 0x66;
+        jump[1] = 0x90;
+    }
+#elif defined(JS_CODEGEN_ARM)
+    if (enabled) {
+        MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
+        new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always);
+    } else {
+        MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
+        new (jump) InstNOP();
+    }
+#elif defined(JS_CODEGEN_ARM64)
+    (void)jump;
+    (void)profilingEpilogue;
+    MOZ_CRASH();
+#elif defined(JS_CODEGEN_MIPS32)
+    Instruction* instr = (Instruction*)jump;
+    if (enabled) {
+        Assembler::WriteLuiOriInstructions(instr, instr->next(),
+                                           ScratchRegister, (uint32_t)profilingEpilogue);
+        instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
+    } else {
+        for (unsigned i = 0; i < 3; i++)
+            instr[i].makeNop();
+    }
+#elif defined(JS_CODEGEN_MIPS64)
+    Instruction* instr = (Instruction*)jump;
+    if (enabled) {
+        Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)profilingEpilogue);
+        instr[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
+    } else {
+        for (unsigned i = 0; i < 5; i++)
+            instr[i].makeNop();
+    }
+#elif defined(JS_CODEGEN_NONE)
+    MOZ_CRASH();
+#else
+# error "Missing architecture"
+#endif
+}
rename from js/src/asmjs/AsmJSFrameIterator.h
rename to js/src/asmjs/WasmFrameIterator.h
--- a/js/src/asmjs/AsmJSFrameIterator.h
+++ b/js/src/asmjs/WasmFrameIterator.h
@@ -11,150 +11,111 @@
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
-#ifndef asmjs_AsmJSFrameIterator_h
-#define asmjs_AsmJSFrameIterator_h
+#ifndef wasm_frame_iterator_h
+#define wasm_frame_iterator_h
 
-#include <stdint.h>
-
-#include "asmjs/Wasm.h"
 #include "js/ProfilingFrameIterator.h"
 
 class JSAtom;
 
 namespace js {
 
 class AsmJSActivation;
-class AsmJSModule;
 namespace jit { class MacroAssembler; class Label; }
-namespace wasm { class CallSite; }
+
+namespace wasm {
+
+class CallSite;
+class CodeRange;
+class Module;
+struct FuncOffsets;
+struct ProfilingOffsets;
 
 // Iterates over the frames of a single AsmJSActivation, called synchronously
 // from C++ in the thread of the asm.js. The one exception is that this iterator
 // may be called from the interrupt callback which may be called asynchronously
 // from asm.js code; in this case, the backtrace may not be correct.
-class AsmJSFrameIterator
+class FrameIterator
 {
-    const AsmJSModule* module_;
-    const wasm::CallSite* callsite_;
+    JSContext* cx_;
+    const Module* module_;
+    const CallSite* callsite_;
+    const CodeRange* codeRange_;
     uint8_t* fp_;
 
-    // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
-    // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
-    const void* codeRange_;
-
     void settle();
 
   public:
-    explicit AsmJSFrameIterator() : module_(nullptr) {}
-    explicit AsmJSFrameIterator(const AsmJSActivation& activation);
+    explicit FrameIterator();
+    explicit FrameIterator(const AsmJSActivation& activation);
     void operator++();
     bool done() const { return !fp_; }
     JSAtom* functionDisplayAtom() const;
     unsigned computeLine(uint32_t* column) const;
 };
 
+// An ExitReason describes the possible reasons for leaving compiled wasm code
+// or the state of not having left compiled wasm code (ExitReason::None).
+enum class ExitReason : uint32_t
+{
+    None,          // default state, the pc is in wasm code
+    ImportJit,     // fast-path call directly into JIT code
+    ImportInterp,  // slow-path call into C++ Invoke()
+    Native         // call to native C++ code (e.g., Math.sin, ToInt32(), interrupt)
+};
+
 // Iterates over the frames of a single AsmJSActivation, given an
 // asynchrously-interrupted thread's state. If the activation's
 // module is not in profiling mode, the activation is skipped.
-class AsmJSProfilingFrameIterator
+class ProfilingFrameIterator
 {
-    const AsmJSModule* module_;
+    const Module* module_;
+    const CodeRange* codeRange_;
     uint8_t* callerFP_;
     void* callerPC_;
     void* stackAddress_;
-    wasm::ExitReason exitReason_;
-
-    // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
-    // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
-    const void* codeRange_;
+    ExitReason exitReason_;
 
     void initFromFP(const AsmJSActivation& activation);
 
   public:
-    AsmJSProfilingFrameIterator() : codeRange_(nullptr) {}
-    explicit AsmJSProfilingFrameIterator(const AsmJSActivation& activation);
-    AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
-                                const JS::ProfilingFrameIterator::RegisterState& state);
+    ProfilingFrameIterator();
+    explicit ProfilingFrameIterator(const AsmJSActivation& activation);
+    ProfilingFrameIterator(const AsmJSActivation& activation,
+                           const JS::ProfilingFrameIterator::RegisterState& state);
     void operator++();
     bool done() const { return !codeRange_; }
 
     void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; }
     const char* label() const;
 };
 
-/******************************************************************************/
-// Prologue/epilogue code generation.
-
-struct AsmJSOffsets
-{
-    MOZ_IMPLICIT AsmJSOffsets(uint32_t begin = 0,
-                              uint32_t end = 0)
-      : begin(begin), end(end)
-    {}
-
-    // These define a [begin, end) contiguous range of instructions compiled
-    // into an AsmJSModule::CodeRange.
-    uint32_t begin;
-    uint32_t end;
-};
-
-struct AsmJSProfilingOffsets : AsmJSOffsets
-{
-    MOZ_IMPLICIT AsmJSProfilingOffsets(uint32_t profilingReturn = 0)
-      : AsmJSOffsets(), profilingReturn(profilingReturn)
-    {}
-
-    // For CodeRanges with AsmJSProfilingOffsets, 'begin' is the offset of the
-    // profiling entry.
-    uint32_t profilingEntry() const { return begin; }
+// Prologue/epilogue code generation
+void
+GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+                     ProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
+void
+GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+                     ProfilingOffsets* offsets);
+void
+GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
+void
+GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
 
-    // The profiling return is the offset of the return instruction, which
-    // precedes the 'end' by a variable number of instructions due to
-    // out-of-line codegen.
-    uint32_t profilingReturn;
-};
-
-struct AsmJSFunctionOffsets : AsmJSProfilingOffsets
-{
-    MOZ_IMPLICIT AsmJSFunctionOffsets(uint32_t nonProfilingEntry = 0,
-                                      uint32_t profilingJump = 0,
-                                      uint32_t profilingEpilogue = 0)
-      : AsmJSProfilingOffsets(),
-        nonProfilingEntry(nonProfilingEntry),
-        profilingJump(profilingJump),
-        profilingEpilogue(profilingEpilogue)
-    {}
-
-    // Function CodeRanges have an additional non-profiling entry that comes
-    // after the profiling entry and a non-profiling epilogue that comes before
-    // the profiling epilogue.
-    uint32_t nonProfilingEntry;
-
-    // When profiling is enabled, the 'nop' at offset 'profilingJump' is
-    // overwritten to be a jump to 'profilingEpilogue'.
-    uint32_t profilingJump;
-    uint32_t profilingEpilogue;
-};
+// Runtime patching to enable/disable profiling
 
 void
-GenerateAsmJSExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
-                          AsmJSProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
-void
-GenerateAsmJSExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
-                          AsmJSProfilingOffsets* offsets);
+EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled);
 
 void
-GenerateAsmJSFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed,
-                              AsmJSFunctionOffsets* offsets);
-void
-GenerateAsmJSFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
-                              AsmJSFunctionOffsets* offsets);
+EnableProfilingEpilogue(const Module& module, const CodeRange& codeRange, bool enabled);
 
+} // namespace wasm
 } // namespace js
 
-#endif // asmjs_AsmJSFrameIterator_h
+#endif // wasm_frame_iterator_h
--- a/js/src/asmjs/WasmGenerator.cpp
+++ b/js/src/asmjs/WasmGenerator.cpp
@@ -13,76 +13,66 @@
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/WasmGenerator.h"
 
-#include "asmjs/AsmJSModule.h"
+#include "asmjs/AsmJSValidate.h"
 #include "asmjs/WasmStubs.h"
-#ifdef MOZ_VTUNE
-# include "vtune/VTuneWrapper.h"
-#endif
+
+#include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
-static bool
-ParallelCompilationEnabled(ExclusiveContext* cx)
-{
-    // Since there are a fixed number of helper threads and one is already being
-    // consumed by this parsing task, ensure that there another free thread to
-    // avoid deadlock. (Note: there is at most one thread used for parsing so we
-    // don't have to worry about general dining philosophers.)
-    if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads())
-        return false;
-
-    // If 'cx' isn't a JSContext, then we are already off the main thread so
-    // off-thread compilation must be enabled.
-    return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
-}
-
 // ****************************************************************************
 // ModuleGenerator
 
 static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
 static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
 
 ModuleGenerator::ModuleGenerator(ExclusiveContext* cx)
   : cx_(cx),
+    args_(cx),
+    globalBytes_(InitialGlobalDataBytes),
+    slowFuncs_(cx),
     lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
+    jcx_(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())),
     alloc_(&lifo_),
-    masm_(MacroAssembler::AsmJSToken(), &alloc_),
+    masm_(MacroAssembler::AsmJSToken(), alloc_),
     sigs_(cx),
     parallel_(false),
     outstanding_(0),
     tasks_(cx),
     freeTasks_(cx),
+    funcBytes_(0),
     funcEntryOffsets_(cx),
-    funcPtrTables_(cx),
-    slowFuncs_(cx),
-    active_(nullptr)
-{}
+    activeFunc_(nullptr),
+    finishedFuncs_(false)
+{
+    MOZ_ASSERT(IsCompilingAsmJS());
+}
 
 ModuleGenerator::~ModuleGenerator()
 {
     if (parallel_) {
         // Wait for any outstanding jobs to fail or complete.
         if (outstanding_) {
             AutoLockHelperThreadState lock;
             while (true) {
-                CompileTaskVector& worklist = HelperThreadState().wasmWorklist();
+                IonCompileTaskVector& worklist = HelperThreadState().wasmWorklist();
                 MOZ_ASSERT(outstanding_ >= worklist.length());
                 outstanding_ -= worklist.length();
                 worklist.clear();
 
-                CompileTaskVector& finished = HelperThreadState().wasmFinishedList();
+                IonCompileTaskVector& finished = HelperThreadState().wasmFinishedList();
                 MOZ_ASSERT(outstanding_ >= finished.length());
                 outstanding_ -= finished.length();
                 finished.clear();
 
                 uint32_t numFailed = HelperThreadState().harvestFailedWasmJobs();
                 MOZ_ASSERT(outstanding_ >= numFailed);
                 outstanding_ -= numFailed;
 
@@ -95,24 +85,39 @@ ModuleGenerator::~ModuleGenerator()
 
         MOZ_ASSERT(HelperThreadState().wasmCompilationInProgress);
         HelperThreadState().wasmCompilationInProgress = false;
     } else {
         MOZ_ASSERT(!outstanding_);
     }
 }
 
-bool
-ModuleGenerator::init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict)
+static bool
+ParallelCompilationEnabled(ExclusiveContext* cx)
 {
-    if (!sigs_.init())
+    // Since there are a fixed number of helper threads and one is already being
+    // consumed by this parsing task, ensure that there another free thread to
+    // avoid deadlock. (Note: there is at most one thread used for parsing so we
+    // don't have to worry about general dining philosophers.)
+    if (HelperThreadState().threadCount <= 1 || !CanUseExtraThreads())
         return false;
 
-    module_ = cx_->new_<AsmJSModule>(ss, srcStart, srcBodyStart, strict, cx_->canUseSignalHandlers());
-    if (!module_)
+    // If 'cx' isn't a JSContext, then we are already off the main thread so
+    // off-thread compilation must be enabled.
+    return !cx->isJSContext() || cx->asJSContext()->runtime()->canUseOffthreadIonCompilation();
+}
+
+bool
+ModuleGenerator::init()
+{
+    staticLinkData_ = cx_->make_unique<StaticLinkData>();
+    if (!staticLinkData_)
+        return false;
+
+    if (!sigs_.init())
         return false;
 
     uint32_t numTasks;
     if (ParallelCompilationEnabled(cx_) &&
         HelperThreadState().wasmCompilationInProgress.compareExchange(false, true))
     {
 #ifdef DEBUG
         {
@@ -126,132 +131,47 @@ ModuleGenerator::init(ScriptSource* ss, 
         parallel_ = true;
         numTasks = HelperThreadState().maxWasmCompilationThreads();
     } else {
         numTasks = 1;
     }
 
     if (!tasks_.initCapacity(numTasks))
         return false;
+    JSRuntime* runtime = cx_->compartment()->runtimeFromAnyThread();
     for (size_t i = 0; i < numTasks; i++)
-        tasks_.infallibleEmplaceBack(COMPILATION_LIFO_DEFAULT_CHUNK_SIZE, args());
+        tasks_.infallibleEmplaceBack(runtime, args_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
 
     if (!freeTasks_.reserve(numTasks))
         return false;
     for (size_t i = 0; i < numTasks; i++)
         freeTasks_.infallibleAppend(&tasks_[i]);
 
     return true;
 }
 
 bool
-ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
-                           FunctionGenerator* fg)
+ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
 {
-    MOZ_ASSERT(!active_);
-
-    if (freeTasks_.empty() && !finishOutstandingTask())
-        return false;
-
-    CompileTask* task = freeTasks_.popCopy();
-    FuncIR* func = task->lifo().new_<FuncIR>(task->lifo(), name, line, column);
-    if (!func)
+    uint32_t pad = ComputeByteAlignment(globalBytes_, align);
+    if (UINT32_MAX - globalBytes_ < pad + bytes)
         return false;
 
-    task->init(*func);
-    fg->m_ = this;
-    fg->task_ = task;
-    fg->func_ = func;
-    active_ = fg;
-    return true;
-}
-
-bool
-ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime,
-                            FunctionGenerator* fg)
-{
-    MOZ_ASSERT(active_ == fg);
-
-    fg->func_->finish(funcIndex, sig, generateTime);
-
-    if (parallel_) {
-        if (!StartOffThreadWasmCompile(cx_, fg->task_))
-            return false;
-        outstanding_++;
-    } else {
-        if (!CompileFunction(fg->task_))
-            return false;
-        if (!finishTask(fg->task_))
-            return false;
-    }
-
-    fg->m_ = nullptr;
-    fg->task_ = nullptr;
-    fg->func_ = nullptr;
-    active_ = nullptr;
-    return true;
-}
-
-bool
-ModuleGenerator::finish(frontend::TokenStream& ts, ScopedJSDeletePtr<AsmJSModule>* module,
-                        SlowFunctionVector* slowFuncs)
-{
-    MOZ_ASSERT(!active_);
-
-    while (outstanding_ > 0) {
-        if (!finishOutstandingTask())
-            return false;
-    }
-
-    module_->setFunctionBytes(masm_.size());
-
-    JitContext jitContext(CompileRuntime::get(args().runtime));
-
-    // Now that all function definitions have been compiled and their function-
-    // entry offsets are all known, patch inter-function calls and fill in the
-    // function-pointer table offsets.
-
-    if (!GenerateStubs(masm_, *module_, funcEntryOffsets_))
-        return false;
-
-    for (auto& cs : masm_.callSites()) {
-        if (!cs.isInternal())
-            continue;
-        MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative);
-        uint32_t callerOffset = cs.returnAddressOffset();
-        uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()];
-        masm_.patchCall(callerOffset, calleeOffset);
-    }
-
-    for (unsigned tableIndex = 0; tableIndex < funcPtrTables_.length(); tableIndex++) {
-        FuncPtrTable& table = funcPtrTables_[tableIndex];
-        AsmJSModule::OffsetVector entryOffsets;
-        for (uint32_t funcIndex : table.elems)
-            entryOffsets.append(funcEntryOffsets_[funcIndex]);
-        module_->funcPtrTable(tableIndex).define(Move(entryOffsets));
-    }
-
-    masm_.finish();
-    if (masm_.oom())
-        return false;
-
-    if (!module_->finish(cx_, ts, masm_))
-        return false;
-
-    *module = module_.forget();
-    *slowFuncs = Move(slowFuncs_);
+    globalBytes_ += pad;
+    *globalDataOffset = globalBytes_;
+    globalBytes_ += bytes;
     return true;
 }
 
 bool
 ModuleGenerator::finishOutstandingTask()
 {
     MOZ_ASSERT(parallel_);
 
-    CompileTask* task = nullptr;
+    IonCompileTask* task = nullptr;
     {
         AutoLockHelperThreadState lock;
         while (true) {
             MOZ_ASSERT(outstanding_ > 0);
 
             if (HelperThreadState().wasmFailed())
                 return false;
 
@@ -264,104 +184,444 @@ ModuleGenerator::finishOutstandingTask()
             HelperThreadState().wait(GlobalHelperThreadState::CONSUMER);
         }
     }
 
     return finishTask(task);
 }
 
 bool
-ModuleGenerator::finishTask(CompileTask* task)
+ModuleGenerator::finishTask(IonCompileTask* task)
 {
     const FuncIR& func = task->func();
-    FunctionCompileResults& results = task->results();
-
-    // Merge the compiled results into the whole-module masm.
-    size_t offset = masm_.size();
-    if (!masm_.asmMergeWith(results.masm()))
-        return false;
+    FuncCompileResults& results = task->results();
 
-    // Create the code range now that we know offset of results in whole masm.
-    AsmJSModule::CodeRange codeRange(func.line(), results.offsets());
-    codeRange.functionOffsetBy(offset);
-    if (!module_->addFunctionCodeRange(func.name(), codeRange))
-         return false;
+    // Offset the recorded FuncOffsets by the offset of the function in the
+    // whole module's code segment.
+    uint32_t offsetInWhole = masm_.size();
+    results.offsets().offsetBy(offsetInWhole);
 
-    // Compilation may complete out of order, so cannot simply append().
+    // Record the non-profiling entry for whole-module linking later.
     if (func.index() >= funcEntryOffsets_.length()) {
         if (!funcEntryOffsets_.resize(func.index() + 1))
             return false;
     }
-    funcEntryOffsets_[func.index()] = codeRange.entry();
+    funcEntryOffsets_[func.index()] = results.offsets().nonProfilingEntry;
+
+    // Merge the compiled results into the whole-module masm.
+    DebugOnly<size_t> sizeBefore = masm_.size();
+    if (!masm_.asmMergeWith(results.masm()))
+        return false;
+    MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size());
+
+    // Add the CodeRange for this function.
+    CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func.name());
+    if (!funcName)
+        return false;
+    uint32_t nameIndex = funcNames_.length();
+    if (!funcNames_.emplaceBack(Move(funcName)))
+        return false;
+    if (!codeRanges_.emplaceBack(nameIndex, func.line(), results.offsets()))
+        return false;
 
     // Keep a record of slow functions for printing in the final console message.
     unsigned totalTime = func.generateTime() + results.compileTime();
     if (totalTime >= SlowFunction::msThreshold) {
-        if (!slowFuncs_.append(SlowFunction(func.name(), totalTime, func.line(), func.column())))
+        if (!slowFuncs_.emplaceBack(func.name(), totalTime, func.line(), func.column()))
             return false;
     }
 
-#if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
-    AsmJSModule::ProfiledFunction pf(func.name(), codeRange.entry(), codeRange.end(),
-                                     func.line(), func.column());
-    if (!module().addProfiledFunction(pf))
-        return false;
-#endif
-
     task->reset();
     freeTasks_.infallibleAppend(task);
     return true;
 }
 
-CompileArgs
-ModuleGenerator::args() const
-{
-    return CompileArgs(cx_->compartment()->runtimeFromAnyThread(),
-                       module().usesSignalHandlersForOOB());
-}
-
 const LifoSig*
 ModuleGenerator::newLifoSig(const MallocSig& sig)
 {
     SigSet::AddPtr p = sigs_.lookupForAdd(sig);
     if (p)
         return *p;
 
     LifoSig* lifoSig = LifoSig::new_(lifo_, sig);
     if (!lifoSig || !sigs_.add(p, lifoSig))
         return nullptr;
 
     return lifoSig;
 }
 
 bool
-ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex)
+ModuleGenerator::allocateGlobalVar(ValType type, uint32_t* globalDataOffset)
+{
+    unsigned width = 0;
+    switch (type) {
+      case wasm::ValType::I32:
+      case wasm::ValType::F32:
+        width = 4;
+        break;
+      case wasm::ValType::I64:
+      case wasm::ValType::F64:
+        width = 8;
+        break;
+      case wasm::ValType::I32x4:
+      case wasm::ValType::F32x4:
+      case wasm::ValType::B32x4:
+        width = 16;
+        break;
+    }
+    return allocateGlobalBytes(width, width, globalDataOffset);
+}
+
+bool
+ModuleGenerator::declareImport(MallocSig&& sig, unsigned* index)
+{
+    static_assert(Module::SizeOfImportExit % sizeof(void*) == 0, "word aligned");
+
+    uint32_t globalDataOffset;
+    if (!allocateGlobalBytes(Module::SizeOfImportExit, sizeof(void*), &globalDataOffset))
+        return false;
+
+    *index = unsigned(imports_.length());
+    return imports_.emplaceBack(Move(sig), globalDataOffset);
+}
+
+uint32_t
+ModuleGenerator::numDeclaredImports() const
+{
+    return imports_.length();
+}
+
+uint32_t
+ModuleGenerator::importExitGlobalDataOffset(uint32_t index) const
+{
+    return imports_[index].exitGlobalDataOffset();
+}
+
+const MallocSig&
+ModuleGenerator::importSig(uint32_t index) const
+{
+    return imports_[index].sig();
+}
+
+bool
+ModuleGenerator::defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit)
+{
+    Import& import = imports_[index];
+    import.initInterpExitOffset(interpExit.begin);
+    import.initJitExitOffset(jitExit.begin);
+    return codeRanges_.emplaceBack(CodeRange::ImportInterpExit, interpExit) &&
+           codeRanges_.emplaceBack(CodeRange::ImportJitExit, jitExit);
+}
+
+bool
+ModuleGenerator::declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index)
+{
+    *index = exports_.length();
+    return exports_.emplaceBack(Move(sig), funcIndex);
+}
+
+uint32_t
+ModuleGenerator::exportFuncIndex(uint32_t index) const
+{
+    return exports_[index].funcIndex();
+}
+
+const MallocSig&
+ModuleGenerator::exportSig(uint32_t index) const
+{
+    return exports_[index].sig();
+}
+
+uint32_t
+ModuleGenerator::numDeclaredExports() const
+{
+    return exports_.length();
+}
+
+bool
+ModuleGenerator::defineExport(uint32_t index, Offsets offsets)
+{
+    exports_[index].initStubOffset(offsets.begin);
+    return codeRanges_.emplaceBack(CodeRange::Entry, offsets);
+}
+
+bool
+ModuleGenerator::startFunc(PropertyName* name, unsigned line, unsigned column,
+                           FunctionGenerator* fg)
+{
+    MOZ_ASSERT(!activeFunc_);
+    MOZ_ASSERT(!finishedFuncs_);
+
+    if (freeTasks_.empty() && !finishOutstandingTask())
+        return false;
+
+    IonCompileTask* task = freeTasks_.popCopy();
+    FuncIR* func = task->lifo().new_<FuncIR>(task->lifo(), name, line, column);
+    if (!func)
+        return false;
+
+    task->init(*func);
+    fg->m_ = this;
+    fg->task_ = task;
+    fg->func_ = func;
+    activeFunc_ = fg;
+    return true;
+}
+
+bool
+ModuleGenerator::finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime,
+                            FunctionGenerator* fg)
+{
+    MOZ_ASSERT(activeFunc_ == fg);
+
+    fg->func_->finish(funcIndex, sig, generateTime);
+
+    if (parallel_) {
+        if (!StartOffThreadWasmCompile(cx_, fg->task_))
+            return false;
+        outstanding_++;
+    } else {
+        if (!IonCompileFunction(fg->task_))
+            return false;
+        if (!finishTask(fg->task_))
+            return false;
+    }
+
+    fg->m_ = nullptr;
+    fg->task_ = nullptr;
+    fg->func_ = nullptr;
+    activeFunc_ = nullptr;
+    return true;
+}
+
+bool
+ModuleGenerator::finishFuncs()
+{
+    MOZ_ASSERT(!activeFunc_);
+    MOZ_ASSERT(!finishedFuncs_);
+
+    while (outstanding_ > 0) {
+        if (!finishOutstandingTask())
+            return false;
+    }
+
+    // During codegen, all wasm->wasm (internal) calls use AsmJSInternalCallee
+    // as the call target, which contains the function-index of the target.
+    // These get recorded in a CallSiteAndTargetVector in the MacroAssembler
+    // so that we can patch them now that all the function entry offsets are
+    // known.
+
+    for (CallSiteAndTarget& cs : masm_.callSites()) {
+        if (!cs.isInternal())
+            continue;
+        MOZ_ASSERT(cs.kind() == CallSiteDesc::Relative);
+        uint32_t callerOffset = cs.returnAddressOffset();
+        uint32_t calleeOffset = funcEntryOffsets_[cs.targetIndex()];
+        masm_.patchCall(callerOffset, calleeOffset);
+    }
+
+    funcBytes_ = masm_.size();
+    finishedFuncs_ = true;
+    return true;
+}
+
+bool
+ModuleGenerator::declareFuncPtrTable(uint32_t numElems, uint32_t* index)
 {
     // Here just add an uninitialized FuncPtrTable and claim space in the global
     // data section. Later, 'defineFuncPtrTable' will be called with function
     // indices for all the elements of the table.
 
     // Avoid easy way to OOM the process.
     if (numElems > 1024 * 1024)
         return false;
 
-    if (!module_->declareFuncPtrTable(numElems, funcPtrTableIndex))
+    uint32_t globalDataOffset;
+    if (!allocateGlobalBytes(numElems * sizeof(void*), sizeof(void*), &globalDataOffset))
+        return false;
+
+    StaticLinkData::FuncPtrTableVector& tables = staticLinkData_->funcPtrTables;
+
+    *index = tables.length();
+    if (!tables.emplaceBack(globalDataOffset))
+        return false;
+
+    if (!tables.back().elemOffsets.resize(numElems))
         return false;
 
-    MOZ_ASSERT(*funcPtrTableIndex == funcPtrTables_.length());
-    return funcPtrTables_.emplaceBack(numElems);
+    return true;
+}
+
+uint32_t
+ModuleGenerator::funcPtrTableGlobalDataOffset(uint32_t index) const
+{
+    return staticLinkData_->funcPtrTables[index].globalDataOffset;
+}
+
+void
+ModuleGenerator::defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices)
+{
+    MOZ_ASSERT(finishedFuncs_);
+
+    StaticLinkData::FuncPtrTable& table = staticLinkData_->funcPtrTables[index];
+    MOZ_ASSERT(table.elemOffsets.length() == elemFuncIndices.length());
+
+    for (size_t i = 0; i < elemFuncIndices.length(); i++)
+        table.elemOffsets[i] = funcEntryOffsets_[elemFuncIndices[i]];
+}
+
+bool
+ModuleGenerator::defineInlineStub(Offsets offsets)
+{
+    MOZ_ASSERT(finishedFuncs_);
+    return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
+}
+
+bool
+ModuleGenerator::defineSyncInterruptStub(ProfilingOffsets offsets)
+{
+    MOZ_ASSERT(finishedFuncs_);
+    return codeRanges_.emplaceBack(CodeRange::Interrupt, offsets);
+}
+
+bool
+ModuleGenerator::defineAsyncInterruptStub(Offsets offsets)
+{
+    MOZ_ASSERT(finishedFuncs_);
+    staticLinkData_->pod.interruptOffset = offsets.begin;
+    return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
 }
 
 bool
-ModuleGenerator::defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems)
+ModuleGenerator::defineOutOfBoundsStub(Offsets offsets)
 {
-    // The AsmJSModule needs to know the offsets in the code section which won't
-    // be known until 'finish'. So just remember the function indices for now
-    // and wait until 'finish' to hand over the offsets to the AsmJSModule.
-
-    FuncPtrTable& table = funcPtrTables_[funcPtrTableIndex];
-    if (table.numDeclared != elems.length() || !table.elems.empty())
-        return false;
-
-    table.elems = Move(elems);
-    return true;
+    MOZ_ASSERT(finishedFuncs_);
+    staticLinkData_->pod.outOfBoundsOffset = offsets.begin;
+    return codeRanges_.emplaceBack(CodeRange::Inline, offsets);
 }
 
+Module*
+ModuleGenerator::finish(Module::HeapBool usesHeap,
+                        Module::SharedBool sharedHeap,
+                        UniqueChars filename,
+                        UniqueStaticLinkData* staticLinkData,
+                        SlowFunctionVector* slowFuncs)
+{
+    MOZ_ASSERT(!activeFunc_);
+    MOZ_ASSERT(finishedFuncs_);
+
+    if (!GenerateStubs(*this, usesHeap))
+        return nullptr;
+
+    masm_.finish();
+    if (masm_.oom())
+        return nullptr;
+
+    // Start global data on a new page so JIT code may be given independent
+    // protection flags. Note assumption that global data starts right after
+    // code below.
+    uint32_t codeBytes = AlignBytes(masm_.bytesNeeded(), AsmJSPageSize);
+
+    // Inflate the global bytes up to page size so that the total bytes are a
+    // page size (as required by the allocator functions).
+    globalBytes_ = AlignBytes(globalBytes_, AsmJSPageSize);
+    uint32_t totalBytes = codeBytes + globalBytes_;
+
+    // Allocate the code (guarded by a UniquePtr until it is given to the Module).
+    UniqueCodePtr code = AllocateCode(cx_, totalBytes);
+    if (!code)
+        return nullptr;
+
+    // Delay flushing until Module::dynamicallyLink. The flush-inhibited range
+    // is set by executableCopy.
+    AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true);
+    masm_.executableCopy(code.get());
+
+    // c.f. JitCode::copyFrom
+    MOZ_ASSERT(masm_.jumpRelocationTableBytes() == 0);
+    MOZ_ASSERT(masm_.dataRelocationTableBytes() == 0);
+    MOZ_ASSERT(masm_.preBarrierTableBytes() == 0);
+    MOZ_ASSERT(!masm_.hasSelfReference());
+
+    // Convert the CallSiteAndTargetVector (needed during generation) to a
+    // CallSiteVector (what is stored in the Module).
+    CallSiteVector callSites;
+    if (!callSites.appendAll(masm_.callSites()))
+        return nullptr;
+
+    // Add links to absolute addresses identified symbolically.
+    StaticLinkData::SymbolicLinkArray& symbolicLinks = staticLinkData_->symbolicLinks;
+    for (size_t i = 0; i < masm_.numAsmJSAbsoluteAddresses(); i++) {
+        AsmJSAbsoluteAddress src = masm_.asmJSAbsoluteAddress(i);
+        if (!symbolicLinks[src.target].append(src.patchAt.offset()))
+            return nullptr;
+    }
+
+    // Relative link metadata: absolute addresses that refer to another point within
+    // the asm.js module.
+
+    // CodeLabels are used for switch cases and loads from floating-point /
+    // SIMD values in the constant pool.
+    for (size_t i = 0; i < masm_.numCodeLabels(); i++) {
+        CodeLabel cl = masm_.codeLabel(i);
+        StaticLinkData::InternalLink link(StaticLinkData::InternalLink::CodeLabel);
+        link.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt());
+        link.targetOffset = cl.target()->offset();
+        if (!staticLinkData_->internalLinks.append(link))
+            return nullptr;
+    }
+
+#if defined(JS_CODEGEN_X86)
+    // Global data accesses in x86 need to be patched with the absolute
+    // address of the global. Globals are allocated sequentially after the
+    // code section so we can just use an InternalLink.
+    for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
+        AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
+        StaticLinkData::InternalLink link(StaticLinkData::InternalLink::RawPointer);
+        link.patchAtOffset = masm_.labelToPatchOffset(a.patchAt);
+        link.targetOffset = codeBytes + a.globalDataOffset;
+        if (!staticLinkData_->internalLinks.append(link))
+            return nullptr;
+    }
+#endif
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+    // On MIPS we need to update all the long jumps because they contain an
+    // absolute adress. The values are correctly patched for the current address
+    // space, but not after serialization or profiling-mode toggling.
+    for (size_t i = 0; i < masm_.numLongJumps(); i++) {
+        size_t off = masm_.longJump(i);
+        StaticLinkData::InternalLink link(StaticLinkData::InternalLink::InstructionImmediate);
+        link.patchAtOffset = off;
+        link.targetOffset = Assembler::ExtractInstructionImmediate(code.get() + off) -
+                            uintptr_t(code.get());
+        if (!staticLinkData_->internalLinks.append(link))
+            return nullptr;
+    }
+#endif
+
+#if defined(JS_CODEGEN_X64)
+    // Global data accesses on x64 use rip-relative addressing and thus do
+    // not need patching after deserialization.
+    uint8_t* globalData = code.get() + codeBytes;
+    for (size_t i = 0; i < masm_.numAsmJSGlobalAccesses(); i++) {
+        AsmJSGlobalAccess a = masm_.asmJSGlobalAccess(i);
+        masm_.patchAsmJSGlobalAccess(a.patchAt, code.get(), globalData, a.globalDataOffset);
+    }
+#endif
+
+    *staticLinkData = Move(staticLinkData_);
+    *slowFuncs = Move(slowFuncs_);
+    return cx_->new_<Module>(args_,
+                             funcBytes_,
+                             codeBytes,
+                             globalBytes_,
+                             usesHeap,
+                             sharedHeap,
+                             Move(code),
+                             Move(imports_),
+                             Move(exports_),
+                             masm_.extractHeapAccesses(),
+                             Move(codeRanges_),
+                             Move(callSites),
+                             Move(funcNames_),
+                             Move(filename));
+}
--- a/js/src/asmjs/WasmGenerator.h
+++ b/js/src/asmjs/WasmGenerator.h
@@ -11,132 +11,168 @@
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
-#ifndef asmjs_wasm_generator_h
-#define asmjs_wasm_generator_h
+#ifndef wasm_generator_h
+#define wasm_generator_h
 
 #include "asmjs/WasmIonCompile.h"
-#include "asmjs/WasmStubs.h"
+#include "asmjs/WasmIR.h"
+#include "asmjs/WasmModule.h"
 #include "jit/MacroAssembler.h"
 
 namespace js {
-
-class AsmJSModule;
-namespace fronted { class TokenStream; }
-
 namespace wasm {
 
 class FunctionGenerator;
 
+// A slow function describes a function that took longer than msThreshold to
+// validate and compile.
 struct SlowFunction
 {
     SlowFunction(PropertyName* name, unsigned ms, unsigned line, unsigned column)
      : name(name), ms(ms), line(line), column(column)
     {}
 
     static const unsigned msThreshold = 250;
 
     PropertyName* name;
     unsigned ms;
     unsigned line;
     unsigned column;
 };
-
 typedef Vector<SlowFunction> SlowFunctionVector;
 
 // A ModuleGenerator encapsulates the creation of a wasm module. During the
 // lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
 // and destroyed to compile the individual function bodies. After generating all
 // functions, ModuleGenerator::finish() must be called to complete the
 // compilation and extract the resulting wasm module.
 class MOZ_STACK_CLASS ModuleGenerator
 {
-  public:
-    typedef Vector<uint32_t, 0, SystemAllocPolicy> FuncIndexVector;
-
-  private:
-    struct FuncPtrTable
-    {
-        uint32_t numDeclared;
-        FuncIndexVector elems;
-
-        explicit FuncPtrTable(uint32_t numDeclared) : numDeclared(numDeclared) {}
-        FuncPtrTable(FuncPtrTable&& rhs) : numDeclared(rhs.numDeclared), elems(Move(rhs.elems)) {}
-    };
-    typedef Vector<FuncPtrTable> FuncPtrTableVector;
+    typedef Vector<uint32_t> FuncOffsetVector;
 
     struct SigHashPolicy
     {
         typedef const MallocSig& Lookup;
         static HashNumber hash(Lookup l) { return l.hash(); }
         static bool match(const LifoSig* lhs, Lookup rhs) { return *lhs == rhs; }
     };
     typedef HashSet<const LifoSig*, SigHashPolicy> SigSet;
 
-    ExclusiveContext*                      cx_;
-    ScopedJSDeletePtr<AsmJSModule>         module_;
+    ExclusiveContext*             cx_;
+    CompileArgs                   args_;
 
-    LifoAlloc                              lifo_;
-    jit::TempAllocator                     alloc_;
-    jit::MacroAssembler                    masm_;
-    SigSet                                 sigs_;
+    // Data handed over to the Module in finish()
+    uint32_t                      globalBytes_;
+    ImportVector                  imports_;
+    ExportVector                  exports_;
+    CodeRangeVector               codeRanges_;
+    CacheableCharsVector          funcNames_;
+
+    // Data handed back to the caller in finish()
+    UniqueStaticLinkData          staticLinkData_;
+    SlowFunctionVector            slowFuncs_;
 
-    bool                                   parallel_;
-    uint32_t                               outstanding_;
-    Vector<CompileTask>                    tasks_;
-    Vector<CompileTask*>                   freeTasks_;
+    // Data scoped to the ModuleGenerator's lifetime
+    LifoAlloc                     lifo_;
+    jit::JitContext               jcx_;
+    jit::TempAllocator            alloc_;
+    jit::MacroAssembler           masm_;
+    SigSet                        sigs_;
 
-    FuncOffsetVector                       funcEntryOffsets_;
-    FuncPtrTableVector                     funcPtrTables_;
+    // Parallel compilation
+    bool                          parallel_;
+    uint32_t                      outstanding_;
+    Vector<IonCompileTask>        tasks_;
+    Vector<IonCompileTask*>       freeTasks_;
 
-    SlowFunctionVector                     slowFuncs_;
-    mozilla::DebugOnly<FunctionGenerator*> active_;
+    // Function compilation
+    uint32_t                      funcBytes_;
+    FuncOffsetVector              funcEntryOffsets_;
+    DebugOnly<FunctionGenerator*> activeFunc_;
+    DebugOnly<bool>               finishedFuncs_;
 
+    bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset);
     bool finishOutstandingTask();
-    bool finishTask(CompileTask* task);
-    CompileArgs args() const;
+    bool finishTask(IonCompileTask* task);
 
   public:
     explicit ModuleGenerator(ExclusiveContext* cx);
     ~ModuleGenerator();
 
-    bool init(ScriptSource* ss, uint32_t srcStart, uint32_t srcBodyStart, bool strict);
-    AsmJSModule& module() const { return *module_; }
+    bool init();
+
+    CompileArgs args() const { return args_; }
+    jit::MacroAssembler& masm() { return masm_; }
+    const FuncOffsetVector& funcEntryOffsets() const { return funcEntryOffsets_; }
 
     const LifoSig* newLifoSig(const MallocSig& sig);
-    bool declareFuncPtrTable(uint32_t numElems, uint32_t* funcPtrTableIndex);
-    bool defineFuncPtrTable(uint32_t funcPtrTableIndex, FuncIndexVector&& elems);
+
+    // Global data:
+    bool allocateGlobalVar(ValType type, uint32_t* globalDataOffset);
 
+    // Imports:
+    bool declareImport(MallocSig&& sig, uint32_t* index);
+    uint32_t numDeclaredImports() const;
+    uint32_t importExitGlobalDataOffset(uint32_t index) const;
+    const MallocSig& importSig(uint32_t index) const;
+    bool defineImport(uint32_t index, ProfilingOffsets interpExit, ProfilingOffsets jitExit);
+
+    // Exports:
+    bool declareExport(MallocSig&& sig, uint32_t funcIndex, uint32_t* index);
+    uint32_t numDeclaredExports() const;
+    uint32_t exportFuncIndex(uint32_t index) const;
+    const MallocSig& exportSig(uint32_t index) const;
+    bool defineExport(uint32_t index, Offsets offsets);
+
+    // Functions:
     bool startFunc(PropertyName* name, unsigned line, unsigned column, FunctionGenerator* fg);
     bool finishFunc(uint32_t funcIndex, const LifoSig& sig, unsigned generateTime, FunctionGenerator* fg);
+    bool finishFuncs();
 
-    bool finish(frontend::TokenStream& ts, ScopedJSDeletePtr<AsmJSModule>* module,
-                SlowFunctionVector* slowFuncs);
+    // Function-pointer tables:
+    bool declareFuncPtrTable(uint32_t numElems, uint32_t* index);
+    uint32_t funcPtrTableGlobalDataOffset(uint32_t index) const;
+    void defineFuncPtrTable(uint32_t index, const Vector<uint32_t>& elemFuncIndices);
+
+    // Stubs:
+    bool defineInlineStub(Offsets offsets);
+    bool defineSyncInterruptStub(ProfilingOffsets offsets);
+    bool defineAsyncInterruptStub(Offsets offsets);
+    bool defineOutOfBoundsStub(Offsets offsets);
+
+    // Null return indicates failure. The caller must immediately root a
+    // non-null return value.
+    Module* finish(Module::HeapBool usesHeap,
+                   Module::SharedBool sharedHeap,
+                   UniqueChars filename,
+                   UniqueStaticLinkData* staticLinkData,
+                   SlowFunctionVector* slowFuncs);
 };
 
 // A FunctionGenerator encapsulates the generation of a single function body.
 // ModuleGenerator::startFunc must be called after construction and before doing
 // anything else. After the body is complete, ModuleGenerator::finishFunc must
 // be called before the FunctionGenerator is destroyed and the next function is
 // started.
 class MOZ_STACK_CLASS FunctionGenerator
 {
     friend class ModuleGenerator;
 
     ModuleGenerator* m_;
-    CompileTask*     task_;
+    IonCompileTask*  task_;
     FuncIR*          func_;
 
   public:
     FunctionGenerator() : m_(nullptr), task_(nullptr), func_(nullptr) {}
     FuncIR& func() const { MOZ_ASSERT(func_); return *func_; }
 };
 
 } // namespace wasm
 } // namespace js
 
-#endif // asmjs_wasm_generator_h
+#endif // wasm_generator_h
--- a/js/src/asmjs/WasmIR.h
+++ b/js/src/asmjs/WasmIR.h
@@ -11,20 +11,20 @@
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
-#ifndef asmjs_wasm_ir_h
-#define asmjs_wasm_ir_h
+#ifndef wasm_ir_h
+#define wasm_ir_h
 
-#include "asmjs/Wasm.h"
+#include "asmjs/WasmTypes.h"
 
 namespace js {
 
 class PropertyName;
 
 namespace wasm {
 
 enum class Stmt : uint8_t
@@ -429,18 +429,17 @@ enum NeedsBoundsCheck : uint8_t
 // associated with a LifoAlloc allocation which contains all the memory
 // referenced by the FuncIR.
 class FuncIR
 {
     typedef Vector<wasm::Val, 4, LifoAllocPolicy<Fallible>> VarInitVector;
     typedef Vector<uint8_t, 4096, LifoAllocPolicy<Fallible>> Bytecode;
 
     // Note: this unrooted field assumes AutoKeepAtoms via TokenStream via
-    // asm.js compilation. Wasm compilation will require an alternative way to
-    // name CodeRanges (index).
+    // asm.js compilation.
     PropertyName* name_;
     unsigned line_;
     unsigned column_;
 
     uint32_t index_;
     const wasm::LifoSig* sig_;
     VarInitVector varInits_;
     Bytecode bytecode_;
@@ -567,9 +566,9 @@ class FuncIR
     wasm::Val varInit(size_t i) const { return varInits_[i]; }
     size_t numLocals() const { return sig_->args().length() + varInits_.length(); }
     unsigned generateTime() const { MOZ_ASSERT(generateTime_ != UINT_MAX); return generateTime_; }
 };
 
 } // namespace wasm
 } // namespace js
 
-#endif // asmjs_wasm_ir_h
+#endif // wasm_ir_h
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -35,43 +35,40 @@ typedef Vector<MBasicBlock*, 8, SystemAl
 class FunctionCompiler
 {
   private:
     typedef HashMap<uint32_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> LabeledBlockMap;
     typedef HashMap<size_t, BlockVector, DefaultHasher<uint32_t>, SystemAllocPolicy> UnlabeledBlockMap;
     typedef Vector<size_t, 4, SystemAllocPolicy> PositionStack;
     typedef Vector<ValType, 4, SystemAllocPolicy> LocalTypes;
 
-    CompileArgs              args_;
-    const FuncIR&            func_;
-    size_t                   pc_;
-
-    TempAllocator&           alloc_;
-    MIRGraph&                graph_;
-    const CompileInfo&       info_;
-    MIRGenerator&            mirGen_;
-
-    MBasicBlock*             curBlock_;
-
-    PositionStack            loopStack_;
-    PositionStack            breakableStack_;
-    UnlabeledBlockMap        unlabeledBreaks_;
-    UnlabeledBlockMap        unlabeledContinues_;
-    LabeledBlockMap          labeledBreaks_;
-    LabeledBlockMap          labeledContinues_;
-
-    LocalTypes               localTypes_;
-
-    FunctionCompileResults&  compileResults_;
+    const FuncIR&       func_;
+    size_t              pc_;
+
+    TempAllocator&      alloc_;
+    MIRGraph&           graph_;
+    const CompileInfo&  info_;
+    MIRGenerator&       mirGen_;
+
+    MBasicBlock*        curBlock_;
+
+    PositionStack       loopStack_;
+    PositionStack       breakableStack_;
+    UnlabeledBlockMap   unlabeledBreaks_;
+    UnlabeledBlockMap   unlabeledContinues_;
+    LabeledBlockMap     labeledBreaks_;
+    LabeledBlockMap     labeledContinues_;
+
+    LocalTypes          localTypes_;
+
+    FuncCompileResults& compileResults_;
 
   public:
-    FunctionCompiler(CompileArgs args, const FuncIR& func, MIRGenerator& mirGen,
-                     FunctionCompileResults& compileResults)
-      : args_(args),
-        func_(func),
+    FunctionCompiler(const FuncIR& func, MIRGenerator& mirGen, FuncCompileResults& compileResults)
+      : func_(func),
         pc_(0),
         alloc_(mirGen.alloc()),
         graph_(mirGen.graph()),
         info_(mirGen.info()),
         mirGen_(mirGen),
         curBlock_(nullptr),
         compileResults_(compileResults)
     {}
@@ -765,17 +762,17 @@ class FunctionCompiler
         }
 
         MAsmJSLoadFFIFunc* ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset);
         curBlock_->add(ptrFun);
 
         return callPrivate(MAsmJSCall::Callee(ptrFun), call, ret, def);
     }
 
-    bool builtinCall(Builtin builtin, const Call& call, ValType type, MDefinition** def)
+    bool builtinCall(SymbolicAddress builtin, const Call& call, ValType type, MDefinition** def)
     {
         return callPrivate(MAsmJSCall::Callee(builtin), call, ToExprType(type), def);
     }
 
     /*********************************************** Control flow generation */
 
     inline bool inDeadCode() const {
         return curBlock_ == nullptr;
@@ -1643,17 +1640,17 @@ EmitMathBuiltinCall(FunctionCompiler& f,
     f.startCallArgs(&call);
 
     MDefinition* firstArg;
     if (!EmitF32Expr(f, &firstArg) || !f.passArg(firstArg, ValType::F32, &call))
         return false;
 
     f.finishCallArgs(&call);
 
-    Builtin callee = f32 == F32::Ceil ? Builtin::CeilF : Builtin::FloorF;
+    SymbolicAddress callee = f32 == F32::Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF;
     return f.builtinCall(callee, call, ValType::F32, def);
 }
 
 static bool
 EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def)
 {
     uint32_t lineno, column;
     ReadCallLineCol(f, &lineno, &column);
@@ -1666,30 +1663,30 @@ EmitMathBuiltinCall(FunctionCompiler& f,
         return false;
 
     if (f64 == F64::Pow || f64 == F64::Atan2) {
         MDefinition* secondArg;
         if (!EmitF64Expr(f, &secondArg) || !f.passArg(secondArg, ValType::F64, &call))
             return false;
     }
 
-    Builtin callee;
+    SymbolicAddress callee;
     switch (f64) {
-      case F64::Ceil:  callee = Builtin::CeilD; break;
-      case F64::Floor: callee = Builtin::FloorD; break;
-      case F64::Sin:   callee = Builtin::SinD; break;
-      case F64::Cos:   callee = Builtin::CosD; break;
-      case F64::Tan:   callee = Builtin::TanD; break;
-      case F64::Asin:  callee = Builtin::ASinD; break;
-      case F64::Acos:  callee = Builtin::ACosD; break;
-      case F64::Atan:  callee = Builtin::ATanD; break;
-      case F64::Exp:   callee = Builtin::ExpD; break;
-      case F64::Log:   callee = Builtin::LogD; break;
-      case F64::Pow:   callee = Builtin::PowD; break;
-      case F64::Atan2: callee = Builtin::ATan2D; break;
+      case F64::Ceil:  callee = SymbolicAddress::CeilD; break;
+      case F64::Floor: callee = SymbolicAddress::FloorD; break;
+      case F64::Sin:   callee = SymbolicAddress::SinD; break;
+      case F64::Cos:   callee = SymbolicAddress::CosD; break;
+      case F64::Tan:   callee = SymbolicAddress::TanD; break;
+      case F64::Asin:  callee = SymbolicAddress::ASinD; break;
+      case F64::Acos:  callee = SymbolicAddress::ACosD; break;
+      case F64::Atan:  callee = SymbolicAddress::ATanD; break;
+      case F64::Exp:   callee = SymbolicAddress::ExpD; break;
+      case F64::Log:   callee = SymbolicAddress::LogD; break;
+      case F64::Pow:   callee = SymbolicAddress::PowD; break;
+      case F64::Atan2: callee = SymbolicAddress::ATan2D; break;
       default: MOZ_CRASH("unexpected double math builtin callee");
     }
 
     f.finishCallArgs(&call);
 
     return f.builtinCall(callee, call, ValType::F64, def);
 }
 
@@ -3041,36 +3038,35 @@ EmitB32X4Expr(FunctionCompiler& f, MDefi
         return EmitSimdBooleanSplat(f, def);
       case B32X4::Bad:
         break;
     }
     MOZ_CRASH("unexpected bool32x4 expression");
 }
 
 bool
-wasm::CompileFunction(CompileTask* task)
+wasm::IonCompileFunction(IonCompileTask* task)
 {
     int64_t before = PRMJ_Now();
 
-    CompileArgs args = task->args();
     const FuncIR& func = task->func();
-    FunctionCompileResults& results = task->results();
-
-    JitContext jitContext(CompileRuntime::get(args.runtime), &results.alloc());
+    FuncCompileResults& results = task->results();
+
+    JitContext jitContext(CompileRuntime::get(task->runtime()), &results.alloc());
 
     const JitCompileOptions options;
     MIRGraph graph(&results.alloc());
     CompileInfo compileInfo(func.numLocals());
     MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
                      IonOptimizations.get(OptimizationLevel::AsmJS),
-                     args.usesSignalHandlersForOOB);
+                     task->args().useSignalHandlersForOOB);
 
     // Build MIR graph
     {
-        FunctionCompiler f(args, func, mir, results);
+        FunctionCompiler f(func, mir, results);
         if (!f.init())
             return false;
 
         while (!f.done()) {
             if (!EmitStatement(f))
                 return false;
         }
 
--- a/js/src/asmjs/WasmIonCompile.h
+++ b/js/src/asmjs/WasmIonCompile.h
@@ -11,93 +11,102 @@
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
-#ifndef asmjs_wasm_ion_compile_h
-#define asmjs_wasm_ion_compile_h
+#ifndef wasm_ion_compile_h
+#define wasm_ion_compile_h
 
-#include "asmjs/AsmJSFrameIterator.h"
-#include "asmjs/WasmCompileArgs.h"
 #include "asmjs/WasmIR.h"
 #include "jit/MacroAssembler.h"
 
 namespace js {
 namespace wasm {
 
-class FunctionCompileResults
+// The FuncCompileResults contains the results of compiling a single function
+// body, ready to be merged into the whole-module MacroAssembler.
+class FuncCompileResults
 {
     jit::TempAllocator alloc_;
     jit::MacroAssembler masm_;
-    AsmJSFunctionOffsets offsets_;
+    FuncOffsets offsets_;
     unsigned compileTime_;
 
-    FunctionCompileResults(const FunctionCompileResults&) = delete;
-    FunctionCompileResults& operator=(const FunctionCompileResults&) = delete;
+    FuncCompileResults(const FuncCompileResults&) = delete;
+    FuncCompileResults& operator=(const FuncCompileResults&) = delete;
 
   public:
-    explicit FunctionCompileResults(LifoAlloc& lifo)
+    explicit FuncCompileResults(LifoAlloc& lifo)
       : alloc_(&lifo),
-        masm_(jit::MacroAssembler::AsmJSToken(), &alloc_),
+        masm_(jit::MacroAssembler::AsmJSToken(), alloc_),
         compileTime_(0)
     {}
 
     jit::TempAllocator& alloc() { return alloc_; }
     jit::MacroAssembler& masm() { return masm_; }
-
-    AsmJSFunctionOffsets& offsets() { return offsets_; }
-    const AsmJSFunctionOffsets& offsets() const { return offsets_; }
+    FuncOffsets& offsets() { return offsets_; }
 
     void setCompileTime(unsigned t) { MOZ_ASSERT(!compileTime_); compileTime_ = t; }
     unsigned compileTime() const { return compileTime_; }
 };
 
-class CompileTask
+// An IonCompileTask represents the task of compiling a single function body. An
+// IonCompileTask is filled with the wasm code to be compiled on the main
+// validation thread, sent off to an Ion compilation helper thread which creates
+// the FuncCompileResults, and finally sent back to the validation thread. To
+// save time allocating and freeing memory, IonCompileTasks are reset() and
+// reused.
+class IonCompileTask
 {
+    JSRuntime* const runtime_;
+    const CompileArgs args_;
     LifoAlloc lifo_;
-    const CompileArgs args_;
     const FuncIR* func_;
-    mozilla::Maybe<FunctionCompileResults> results_;
+    mozilla::Maybe<FuncCompileResults> results_;
 
-    CompileTask(const CompileTask&) = delete;
-    CompileTask& operator=(const CompileTask&) = delete;
+    IonCompileTask(const IonCompileTask&) = delete;
+    IonCompileTask& operator=(const IonCompileTask&) = delete;
 
   public:
-    CompileTask(size_t defaultChunkSize, CompileArgs args)
-      : lifo_(defaultChunkSize),
+    IonCompileTask(JSRuntime* runtime, CompileArgs args, size_t defaultChunkSize)
+      : runtime_(runtime),
         args_(args),
+        lifo_(defaultChunkSize),
         func_(nullptr)
     {}
+    JSRuntime* runtime() const {
+        return runtime_;
+    }
     LifoAlloc& lifo() {
         return lifo_;
     }
     CompileArgs args() const {
         return args_;
     }
     void init(const FuncIR& func) {
         func_ = &func;
         results_.emplace(lifo_);
     }
     const FuncIR& func() const {
         MOZ_ASSERT(func_);
         return *func_;
     }
-    FunctionCompileResults& results() {
+    FuncCompileResults& results() {
         return *results_;
     }
     void reset() {
         func_ = nullptr;
         results_.reset();
         lifo_.releaseAll();
     }
 };
 
 bool
-CompileFunction(CompileTask* task);
+IonCompileFunction(IonCompileTask* task);
 
 } // namespace wasm
 } // namespace js
 
-#endif // asmjs_wasm_ion_compile_h
+#endif // wasm_ion_compile_h
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmModule.cpp
@@ -0,0 +1,1368 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asmjs/WasmModule.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/PodOperations.h"
+
+#include "jsprf.h"
+
+#include "asmjs/AsmJSValidate.h"
+#include "asmjs/WasmSerialize.h"
+#include "builtin/AtomicsObject.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/BaselineJIT.h"
+#include "jit/ExecutableAllocator.h"
+#include "js/MemoryMetrics.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/TypeInference-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::BinarySearch;
+using mozilla::MakeEnumeratedRange;
+using mozilla::PodZero;
+using mozilla::Swap;
+using JS::GenericNaN;
+
+UniqueCodePtr
+wasm::AllocateCode(ExclusiveContext* cx, size_t bytes)
+{
+    // On most platforms, this will allocate RWX memory. On iOS, or when
+    // --non-writable-jitcode is used, this will allocate RW memory. In this
+    // case, DynamicallyLinkModule will reprotect the code as RX.
+    unsigned permissions =
+        ExecutableAllocator::initialProtectionFlags(ExecutableAllocator::Writable);
+
+    void* p = AllocateExecutableMemory(nullptr, bytes, permissions, "asm-js-code", AsmJSPageSize);
+    if (!p)
+        ReportOutOfMemory(cx);
+
+    MOZ_ASSERT(uintptr_t(p) % AsmJSPageSize == 0);
+
+    return UniqueCodePtr((uint8_t*)p, CodeDeleter(bytes));
+}
+
+void
+CodeDeleter::operator()(uint8_t* p)
+{
+    DeallocateExecutableMemory(p, bytes_, AsmJSPageSize);
+}
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+// On MIPS, CodeLabels are instruction immediates so InternalLinks only
+// patch instruction immediates.
+StaticLinkData::InternalLink::InternalLink(Kind kind)
+{
+    MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
+}
+
+bool
+StaticLinkData::InternalLink::isRawPointerPatch()
+{
+    return false;
+}
+#else
+// On the rest, CodeLabels are raw pointers so InternalLinks only patch
+// raw pointers.
+StaticLinkData::InternalLink::InternalLink(Kind kind)
+{
+    MOZ_ASSERT(kind == CodeLabel || kind == RawPointer);
+}
+
+bool
+StaticLinkData::InternalLink::isRawPointerPatch()
+{
+    return true;
+}
+#endif
+
+size_t
+StaticLinkData::SymbolicLinkArray::serializedSize() const
+{
+    size_t size = 0;
+    for (const OffsetVector& offsets : *this)
+        size += SerializedPodVectorSize(offsets);
+    return size;
+}
+
+uint8_t*
+StaticLinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const
+{
+    for (const OffsetVector& offsets : *this)
+        cursor = SerializePodVector(cursor, offsets);
+    return cursor;
+}
+
+const uint8_t*
+StaticLinkData::SymbolicLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    for (OffsetVector& offsets : *this) {
+        cursor = DeserializePodVector(cx, cursor, &offsets);
+        if (!cursor)
+            return nullptr;
+    }
+    return cursor;
+}
+
+bool
+StaticLinkData::SymbolicLinkArray::clone(JSContext* cx, SymbolicLinkArray* out) const
+{
+    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+        if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
+            return false;
+    }
+    return true;
+}
+
+size_t
+StaticLinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    size_t size = 0;
+    for (const OffsetVector& offsets : *this)
+        size += offsets.sizeOfExcludingThis(mallocSizeOf);
+    return size;
+}
+
+size_t
+StaticLinkData::FuncPtrTable::serializedSize() const
+{
+    return sizeof(globalDataOffset) +
+           SerializedPodVectorSize(elemOffsets);
+}
+
+uint8_t*
+StaticLinkData::FuncPtrTable::serialize(uint8_t* cursor) const
+{
+    cursor = WriteBytes(cursor, &globalDataOffset, sizeof(globalDataOffset));
+    cursor = SerializePodVector(cursor, elemOffsets);
+    return cursor;
+}
+
+const uint8_t*
+StaticLinkData::FuncPtrTable::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = ReadBytes(cursor, &globalDataOffset, sizeof(globalDataOffset))) &&
+    (cursor = DeserializePodVector(cx, cursor, &elemOffsets));
+    return cursor;
+}
+
+bool
+StaticLinkData::FuncPtrTable::clone(JSContext* cx, FuncPtrTable* out) const
+{
+    out->globalDataOffset = globalDataOffset;
+    return ClonePodVector(cx, elemOffsets, &out->elemOffsets);
+}
+
+size_t
+StaticLinkData::FuncPtrTable::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return elemOffsets.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+StaticLinkData::serializedSize() const
+{
+    return sizeof(pod) +
+           SerializedPodVectorSize(internalLinks) +
+           symbolicLinks.serializedSize() +
+           SerializedVectorSize(funcPtrTables);
+}
+
+uint8_t*
+StaticLinkData::serialize(uint8_t* cursor) const
+{
+    cursor = WriteBytes(cursor, &pod, sizeof(pod));
+    cursor = SerializePodVector(cursor, internalLinks);
+    cursor = symbolicLinks.serialize(cursor);
+    cursor = SerializeVector(cursor, funcPtrTables);
+    return cursor;
+}
+
+const uint8_t*
+StaticLinkData::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
+    (cursor = DeserializePodVector(cx, cursor, &internalLinks)) &&
+    (cursor = symbolicLinks.deserialize(cx, cursor)) &&
+    (cursor = DeserializeVector(cx, cursor, &funcPtrTables));
+    return cursor;
+}
+
+bool
+StaticLinkData::clone(JSContext* cx, StaticLinkData* out) const
+{
+    out->pod = pod;
+    return ClonePodVector(cx, internalLinks, &out->internalLinks) &&
+           symbolicLinks.clone(cx, &out->symbolicLinks) &&
+           CloneVector(cx, funcPtrTables, &out->funcPtrTables);
+}
+
+size_t
+StaticLinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    size_t size = internalLinks.sizeOfExcludingThis(mallocSizeOf) +
+                  symbolicLinks.sizeOfExcludingThis(mallocSizeOf) +
+                  SizeOfVectorExcludingThis(funcPtrTables, mallocSizeOf);
+
+    for (const OffsetVector& offsets : symbolicLinks)
+        size += offsets.sizeOfExcludingThis(mallocSizeOf);
+
+    return size;
+}
+
+static size_t
+SerializedSigSize(const MallocSig& sig)
+{
+    return sizeof(ExprType) +
+           SerializedPodVectorSize(sig.args());
+}
+
+static uint8_t*
+SerializeSig(uint8_t* cursor, const MallocSig& sig)
+{
+    cursor = WriteScalar<ExprType>(cursor, sig.ret());
+    cursor = SerializePodVector(cursor, sig.args());
+    return cursor;
+}
+
+static const uint8_t*
+DeserializeSig(ExclusiveContext* cx, const uint8_t* cursor, MallocSig* sig)
+{
+    ExprType ret;
+    cursor = ReadScalar<ExprType>(cursor, &ret);
+
+    MallocSig::ArgVector args;
+    cursor = DeserializePodVector(cx, cursor, &args);
+    if (!cursor)
+        return nullptr;
+
+    sig->init(Move(args), ret);
+    return cursor;
+}
+
+static bool
+CloneSig(JSContext* cx, const MallocSig& sig, MallocSig* out)
+{
+    MallocSig::ArgVector args;
+    if (!ClonePodVector(cx, sig.args(), &args))
+        return false;
+
+    out->init(Move(args), sig.ret());
+    return true;
+}
+
+static size_t
+SizeOfSigExcludingThis(const MallocSig& sig, MallocSizeOf mallocSizeOf)
+{
+    return sig.args().sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+Export::serializedSize() const
+{
+    return SerializedSigSize(sig_) +
+           sizeof(pod);
+}
+
+uint8_t*
+Export::serialize(uint8_t* cursor) const
+{
+    cursor = SerializeSig(cursor, sig_);
+    cursor = WriteBytes(cursor, &pod, sizeof(pod));
+    return cursor;
+}
+
+const uint8_t*
+Export::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
+    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+    return cursor;
+}
+
+bool
+Export::clone(JSContext* cx, Export* out) const
+{
+    out->pod = pod;
+    return CloneSig(cx, sig_, &out->sig_);
+}
+
+size_t
+Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return SizeOfSigExcludingThis(sig_, mallocSizeOf);
+}
+
+size_t
+Import::serializedSize() const
+{
+    return SerializedSigSize(sig_) +
+           sizeof(pod);
+}
+
+uint8_t*
+Import::serialize(uint8_t* cursor) const
+{
+    cursor = SerializeSig(cursor, sig_);
+    cursor = WriteBytes(cursor, &pod, sizeof(pod));
+    return cursor;
+}
+
+const uint8_t*
+Import::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    (cursor = DeserializeSig(cx, cursor, &sig_)) &&
+    (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+    return cursor;
+}
+
+bool
+Import::clone(JSContext* cx, Import* out) const
+{
+    out->pod = pod;
+    return CloneSig(cx, sig_, &out->sig_);
+}
+
+size_t
+Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+    return SizeOfSigExcludingThis(sig_, mallocSizeOf);
+}
+
+CodeRange::CodeRange(Kind kind, Offsets offsets)
+  : nameIndex_(0),
+    lineNumber_(0),
+    begin_(offsets.begin),
+    profilingReturn_(0),
+    end_(offsets.end)
+{
+    PodZero(&u);  // zero padding for Valgrind
+    u.kind_ = kind;
+
+    MOZ_ASSERT(begin_ <= end_);
+    MOZ_ASSERT(u.kind_ == Entry || u.kind_ == Inline);
+}
+
+CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
+  : nameIndex_(0),
+    lineNumber_(0),
+    begin_(offsets.begin),
+    profilingReturn_(offsets.profilingReturn),
+    end_(offsets.end)
+{
+    PodZero(&u);  // zero padding for Valgrind
+    u.kind_ = kind;
+
+    MOZ_ASSERT(begin_ < profilingReturn_);
+    MOZ_ASSERT(profilingReturn_ < end_);
+    MOZ_ASSERT(u.kind_ == ImportJitExit || u.kind_ == ImportInterpExit || u.kind_ == Interrupt);
+}
+
+CodeRange::CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets)
+  : nameIndex_(nameIndex),
+    lineNumber_(lineNumber)
+{
+    PodZero(&u);  // zero padding for Valgrind
+    u.kind_ = Function;
+
+    MOZ_ASSERT(offsets.nonProfilingEntry - offsets.begin <= UINT8_MAX);
+    begin_ = offsets.begin;
+    u.func.beginToEntry_ = offsets.nonProfilingEntry - begin_;
+
+    MOZ_ASSERT(offsets.nonProfilingEntry < offsets.profilingReturn);
+    MOZ_ASSERT(offsets.profilingReturn - offsets.profilingJump <= UINT8_MAX);
+    MOZ_ASSERT(offsets.profilingReturn - offsets.profilingEpilogue <= UINT8_MAX);
+    profilingReturn_ = offsets.profilingReturn;
+    u.func.profilingJumpToProfilingReturn_ = profilingReturn_ - offsets.profilingJump;
+    u.func.profilingEpilogueToProfilingReturn_ = profilingReturn_ - offsets.profilingEpilogue;
+
+    MOZ_ASSERT(offsets.nonProfilingEntry < offsets.end);
+    end_ = offsets.end;
+}
+
+size_t
+CacheableChars::serializedSize() const
+{
+    return sizeof(uint32_t) + strlen(get());
+}
+
+uint8_t*
+CacheableChars::serialize(uint8_t* cursor) const
+{
+    uint32_t length = strlen(get());
+    cursor = WriteBytes(cursor, &length, sizeof(uint32_t));
+    cursor = WriteBytes(cursor, get(), length);
+    return cursor;
+}
+
+const uint8_t*
+CacheableChars::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
+{
+    uint32_t length;
+    cursor = ReadBytes(cursor, &length, sizeof(uint32_t));
+
+    reset(js_pod_calloc<char>(length + 1));
+    if (!get())
+        return nullptr;
+
+    cursor = ReadBytes(cursor, get(), length);
+    return cursor;
+}
+
+bool
+CacheableChars::clone(JSContext* cx, CacheableChars* out) const
+{
+    *out = make_string_copy(get());
+    return !!*out;
+}
+
+class Module::AutoMutateCode
+{
+    AutoWritableJitCode awjc_;
+    AutoFlushICache afc_;
+
+   public:
+    AutoMutateCode(JSContext* cx, Module& module, const char* name)
+      : awjc_(cx->runtime(), module.code(), module.pod.codeBytes_),
+        afc_(name)
+    {
+        AutoFlushICache::setRange(uintptr_t(module.code()), module.pod.codeBytes_);
+    }
+};
+
+uint32_t
+Module::totalBytes() const
+{
+    return pod.codeBytes_ + pod.globalBytes_;
+}
+
+uint8_t*
+Module::rawHeapPtr() const
+{
+    return const_cast<Module*>(this)->rawHeapPtr();
+}
+
+uint8_t*&
+Module::rawHeapPtr()
+{
+    return *(uint8_t**)(globalData() + HeapGlobalDataOffset);
+}
+
+void
+Module::specializeToHeap(ArrayBufferObjectMaybeShared* heap)
+{
+    MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
+    MOZ_ASSERT(!maybeHeap_);
+    MOZ_ASSERT(!rawHeapPtr());
+
+    uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - protected by Module methods*/);
+    uint32_t heapLength = heap->byteLength();
+#if defined(JS_CODEGEN_X86)
+    // An access is out-of-bounds iff
+    //      ptr + offset + data-type-byte-size > heapLength
+    // i.e. ptr > heapLength - data-type-byte-size - offset. data-type-byte-size
+    // and offset are already included in the addend so we
+    // just have to add the heap length here.
+    for (const HeapAccess& access : heapAccesses_) {
+        if (access.hasLengthCheck())
+            X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
+        void* addr = access.patchHeapPtrImmAt(code());
+        uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
+        MOZ_ASSERT(disp <= INT32_MAX);
+        X86Encoding::SetPointer(addr, (void*)(ptrBase + disp));
+    }
+#elif defined(JS_CODEGEN_X64)
+    // Even with signal handling being used for most bounds checks, there may be
+    // atomic operations that depend on explicit checks.
+    //
+    // If we have any explicit bounds checks, we need to patch the heap length
+    // checks at the right places. All accesses that have been recorded are the
+    // only ones that need bound checks (see also
+    // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
+    for (const HeapAccess& access : heapAccesses_) {
+        // See comment above for x86 codegen.
+        if (access.hasLengthCheck())
+            X86Encoding::AddInt32(access.patchLengthAt(code()), heapLength);
+    }
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+    for (const HeapAccess& access : heapAccesses_)
+        Assembler::UpdateBoundsCheck(heapLength, (Instruction*)(access.insnOffset() + code()));
+#endif
+
+    maybeHeap_ = heap;
+    rawHeapPtr() = ptrBase;
+}
+
+void
+Module::despecializeFromHeap(ArrayBufferObjectMaybeShared* heap)
+{
+    MOZ_ASSERT_IF(maybeHeap_, maybeHeap_ == heap);
+    MOZ_ASSERT_IF(rawHeapPtr(), rawHeapPtr() == heap->dataPointerEither().unwrap());
+
+#if defined(JS_CODEGEN_X86)
+    uint32_t heapLength = heap->byteLength();
+    uint8_t* ptrBase = heap->dataPointerEither().unwrap(/*safe - used for value*/);
+    for (unsigned i = 0; i < heapAccesses_.length(); i++) {
+        const HeapAccess& access = heapAccesses_[i];
+        if (access.hasLengthCheck())
+            X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
+        void* addr = access.patchHeapPtrImmAt(code());
+        uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
+        MOZ_ASSERT(ptr >= ptrBase);
+        X86Encoding::SetPointer(addr, reinterpret_cast<void*>(ptr - ptrBase));
+    }
+#elif defined(JS_CODEGEN_X64)
+    uint32_t heapLength = heap->byteLength();
+    for (unsigned i = 0; i < heapAccesses_.length(); i++) {
+        const HeapAccess& access = heapAccesses_[i];
+        if (access.hasLengthCheck())
+            X86Encoding::AddInt32(access.patchLengthAt(code()), -heapLength);
+    }
+#endif
+
+    maybeHeap_ = nullptr;
+    rawHeapPtr() = nullptr;
+}
+
+void
+Module::sendCodeRangesToProfiler(JSContext* cx)
+{
+#ifdef JS_ION_PERF
+    if (PerfFuncEnabled()) {
+        for (const CodeRange& codeRange : codeRanges_) {
+            if (!codeRange.isFunction())
+                continue;
+
+            uintptr_t start = uintptr_t(code() + codeRange.begin());
+            uintptr_t end = uintptr_t(code() + codeRange.end());
+            uintptr_t size = end - start;
+            const char* file = filename_.get();
+            unsigned line = codeRange.funcLineNumber();
+            unsigned column = 0;
+            const char* name = funcNames_[codeRange.funcNameIndex()].get();
+
+            writePerfSpewerAsmJSFunctionMap(start, size, file, line, column, name);
+        }
+    }
+#endif
+#ifdef MOZ_VTUNE
+    if (IsVTuneProfilingActive()) {
+        for (const CodeRange& codeRange : codeRanges_) {
+            if (!codeRange.isFunction())
+                continue;
+
+            uintptr_t start = uintptr_t(code() + codeRange.begin());
+            uintptr_t end = uintptr_t(code() + codeRange.end());
+            uintptr_t size = end - start;
+            const char* name = funcNames_[codeRange.funcNameIndex()].get();
+
+            unsigned method_id = iJIT_GetNewMethodID();
+            if (method_id == 0)
+                return;
+            iJIT_Method_Load method;
+            method.method_id = method_id;
+            method.method_name = const_cast<char*>(name);
+            method.method_load_address = (void*)start;
+            method.method_size = size;
+            method.line_number_size = 0;
+            method.line_number_table = nullptr;
+            method.class_id = 0;
+            method.class_file_name = nullptr;
+            method.source_file_name = nullptr;
+            iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method);
+        }
+    }
+#endif
+}
+
+Module::ImportExit&
+Module::importToExit(const Import& import)
+{
+    return *reinterpret_cast<ImportExit*>(globalData() + import.exitGlobalDataOffset());
+}
+
+/* static */ Module::CacheablePod
+Module::zeroPod()
+{
+    CacheablePod pod = {0, 0, 0, false, false, false, false};
+    return pod;
+}
+
+void
+Module::init()
+{
+   staticallyLinked_ = false;
+   interrupt_ = nullptr;
+   outOfBounds_ = nullptr;
+   dynamicallyLinked_ = false;
+   prev_ = nullptr;
+   next_ = nullptr;
+   interrupted_ = false;
+
+    *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
+    *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
+}
+
+// Private constructor used for deserialization and cloning.
+Module::Module(const CacheablePod& pod,
+               UniqueCodePtr code,
+               ImportVector&& imports,
+               ExportVector&& exports,
+               HeapAccessVector&& heapAccesses,
+               CodeRangeVector&& codeRanges,
+               CallSiteVector&& callSites,
+               CacheableCharsVector&& funcNames,
+               CacheableChars filename,
+               CacheBool loadedFromCache,
+               ProfilingBool profilingEnabled,
+               FuncLabelVector&& funcLabels)
+  : pod(pod),
+    code_(Move(code)),
+    imports_(Move(imports)),
+    exports_(Move(exports)),
+    heapAccesses_(Move(heapAccesses)),
+    codeRanges_(Move(codeRanges)),
+    callSites_(Move(callSites)),
+    funcNames_(Move(funcNames)),
+    filename_(Move(filename)),
+    loadedFromCache_(loadedFromCache),
+    profilingEnabled_(profilingEnabled),
+    funcLabels_(Move(funcLabels))
+{
+    MOZ_ASSERT_IF(!profilingEnabled, funcLabels_.empty());
+    MOZ_ASSERT_IF(profilingEnabled, funcNames_.length() == funcLabels_.length());
+    init();
+}
+
+// Public constructor for compilation.
+Module::Module(CompileArgs args,
+               uint32_t functionBytes,
+               uint32_t codeBytes,
+               uint32_t globalBytes,
+               HeapBool usesHeap,
+               SharedBool sharedHeap,
+               UniqueCodePtr code,
+               ImportVector&& imports,
+               ExportVector&& exports,
+               HeapAccessVector&& heapAccesses,
+               CodeRangeVector&& codeRanges,
+               CallSiteVector&& callSites,
+               CacheableCharsVector&& funcNames,
+               CacheableChars filename)
+  : pod(zeroPod()),
+    code_(Move(code)),
+    imports_(Move(imports)),
+    exports_(Move(exports)),
+    heapAccesses_(Move(heapAccesses)),
+    codeRanges_(Move(codeRanges)),
+    callSites_(Move(callSites)),
+    funcNames_(Move(funcNames)),
+    filename_(Move(filename)),
+    loadedFromCache_(false),
+    profilingEnabled_(false)
+{
+    // Work around MSVC 2013 bug around {} member initialization.
+    const_cast<uint32_t&>(pod.functionBytes_) = functionBytes;
+    const_cast<uint32_t&>(pod.codeBytes_) = codeBytes;
+    const_cast<uint32_t&>(pod.globalBytes_) = globalBytes;
+    const_cast<bool&>(pod.usesHeap_) = bool(usesHeap);
+    const_cast<bool&>(pod.sharedHeap_) = bool(sharedHeap);
+    const_cast<bool&>(pod.usesSignalHandlersForOOB_) = args.useSignalHandlersForOOB;
+    const_cast<bool&>(pod.usesSignalHandlersForInterrupt_) = args.useSignalHandlersForInterrupt;
+
+    MOZ_ASSERT_IF(sharedHeap, usesHeap);
+    init();
+}
+
+Module::~Module()
+{
+    MOZ_ASSERT(!interrupted_);
+
+    if (code_) {
+        for (unsigned i = 0; i < imports_.length(); i++) {
+            ImportExit& exit = importToExit(imports_[i]);
+            if (exit.baselineScript)
+                exit.baselineScript->removeDependentWasmModule(*this, i);
+        }
+    }
+
+    if (prev_)
+        *prev_ = next_;
+    if (next_)
+        next_->prev_ = prev_;
+}
+
+void
+Module::trace(JSTracer* trc)
+{
+    for (const Import& import : imports_) {
+        if (importToExit(import).fun)
+            TraceEdge(trc, &importToExit(import).fun, "wasm function import");
+    }
+
+    if (maybeHeap_)
+        TraceEdge(trc, &maybeHeap_, "wasm buffer");
+}
+
+CompileArgs
+Module::compileArgs() const
+{
+    CompileArgs args;
+    args.useSignalHandlersForOOB = pod.usesSignalHandlersForOOB_;
+    args.useSignalHandlersForInterrupt = pod.usesSignalHandlersForInterrupt_;
+    return args;
+}
+
+bool
+Module::containsFunctionPC(void* pc) const
+{
+    return pc >= code() && pc < (code() + pod.functionBytes_);
+}
+
+bool
+Module::containsCodePC(void* pc) const
+{
+    return pc >= code() && pc < (code() + pod.codeBytes_);
+}
+
+struct CallSiteRetAddrOffset
+{
+    const CallSiteVector& callSites;
+    explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {}
+    uint32_t operator[](size_t index) const {
+        return callSites[index].returnAddressOffset();
+    }
+};
+
+const CallSite*
+Module::lookupCallSite(void* returnAddress) const
+{
+    uint32_t target = ((uint8_t*)returnAddress) - code();
+    size_t lowerBound = 0;
+    size_t upperBound = callSites_.length();
+
+    size_t match;
+    if (!BinarySearch(CallSiteRetAddrOffset(callSites_), lowerBound, upperBound, target, &match))
+        return nullptr;
+
+    return &callSites_[match];
+}
+
+const CodeRange*
+Module::lookupCodeRange(void* pc) const
+{
+    CodeRange::PC target((uint8_t*)pc - code());
+    size_t lowerBound = 0;
+    size_t upperBound = codeRanges_.length();
+
+    size_t match;
+    if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
+        return nullptr;
+
+    return &codeRanges_[match];
+}
+
+struct HeapAccessOffset
+{
+    const HeapAccessVector& accesses;
+    explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
+    uintptr_t operator[](size_t index) const {
+        return accesses[index].insnOffset();
+    }
+};
+
+const HeapAccess*
+Module::lookupHeapAccess(void* pc) const
+{
+    MOZ_ASSERT(containsFunctionPC(pc));
+
+    uint32_t target = ((uint8_t*)pc) - code();
+    size_t lowerBound = 0;
+    size_t upperBound = heapAccesses_.length();
+
+    size_t match;
+    if (!BinarySearch(HeapAccessOffset(heapAccesses_), lowerBound, upperBound, target, &match))
+        return nullptr;
+
+    return &heapAccesses_[match];
+}
+
+bool
+Module::staticallyLink(ExclusiveContext* cx, const StaticLinkData& linkData)
+{
+    MOZ_ASSERT(!dynamicallyLinked_);
+    MOZ_ASSERT(!staticallyLinked_);
+    staticallyLinked_ = true;
+
+    // Push a JitContext for benefit of IsCompilingAsmJS and delay flushing
+    // until Module::dynamicallyLink.
+    JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
+    MOZ_ASSERT(IsCompilingAsmJS());
+    AutoFlushICache afc("Module::staticallyLink", /* inhibit = */ true);
+    AutoFlushICache::setRange(uintptr_t(code()), pod.codeBytes_);
+
+    interrupt_ = code() + linkData.pod.interruptOffset;
+    outOfBounds_ = code() + linkData.pod.outOfBoundsOffset;
+
+    for (StaticLinkData::InternalLink link : linkData.internalLinks) {
+        uint8_t* patchAt = code() + link.patchAtOffset;
+        void* target = code() + link.targetOffset;
+        if (profilingEnabled_) {
+            const CodeRange* codeRange = lookupCodeRange(target);
+            if (codeRange && codeRange->isFunction())
+                target = code() + codeRange->funcProfilingEntry();
+        }
+        if (link.isRawPointerPatch())
+            *(void**)(patchAt) = target;
+        else
+            Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
+    }
+
+    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+        const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm];
+        for (size_t i = 0; i < offsets.length(); i++) {
+            uint8_t* patchAt = code() + offsets[i];
+            void* target = AddressOf(imm, cx);
+            Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+                                               PatchedImmPtr(target),
+                                               PatchedImmPtr((void*)-1));
+        }
+    }
+
+    for (const StaticLinkData::FuncPtrTable& table : linkData.funcPtrTables) {
+        auto array = reinterpret_cast<void**>(globalData() + table.globalDataOffset);
+        for (size_t i = 0; i < table.elemOffsets.length(); i++) {
+            uint8_t* elem = code() + table.elemOffsets[i];
+            if (profilingEnabled_)
+                elem = code() + lookupCodeRange(elem)->funcProfilingEntry();
+            array[i] = elem;
+        }
+    }
+
+    // CodeRangeVector, CallSiteVector and the code technically have all the
+    // necessary info to do all the updates necessary in setProfilingEnabled.
+    // However, to simplify the finding of function-pointer table sizes and
+    // global-data offsets, save just that information here.
+
+    if (!funcPtrTables_.appendAll(linkData.funcPtrTables)) {
+        ReportOutOfMemory(cx);
+        return false;
+    }
+
+    return true;
+}
+
+bool
+Module::dynamicallyLink(JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> heap,
+                        const AutoVectorRooter<JSFunction*>& imports)
+{
+    MOZ_ASSERT(staticallyLinked_);
+    MOZ_ASSERT(!dynamicallyLinked_);
+    dynamicallyLinked_ = true;
+
+    // Add this module to the JSRuntime-wide list of dynamically-linked modules.
+    next_ = cx->runtime()->linkedWasmModules;
+    prev_ = &cx->runtime()->linkedWasmModules;
+    cx->runtime()->linkedWasmModules = this;
+    if (next_)
+        next_->prev_ = &next_;
+
+    // Push a JitContext for benefit of IsCompilingAsmJS and flush the ICache.
+    // We've been inhibiting flushing up to this point so flush it all now.
+    JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
+    MOZ_ASSERT(IsCompilingAsmJS());
+    AutoFlushICache afc("Module::dynamicallyLink");
+    AutoFlushICache::setRange(uintptr_t(code()), pod.codeBytes_);
+
+    // Initialize imports with actual imported values.
+    MOZ_ASSERT(imports.length() == imports_.length());
+    for (size_t i = 0; i < imports_.length(); i++) {
+        const Import& import = imports_[i];
+        ImportExit& exit = importToExit(import);
+        exit.code = code() + import.interpExitCodeOffset();
+        exit.fun = imports[i];
+        exit.baselineScript = nullptr;
+    }
+
+    // Specialize code to the actual heap.
+    if (heap)
+        specializeToHeap(heap);
+
+    // See AllocateCode comment above.
+    ExecutableAllocator::makeExecutable(code(), pod.codeBytes_);
+
+    sendCodeRangesToProfiler(cx);
+    return true;
+}
+
+Module*
+Module::nextLinked() const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    return next_;
+}
+
+ArrayBufferObjectMaybeShared*
+Module::maybeBuffer() const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    return maybeHeap_;
+}
+
+SharedMem<uint8_t*>
+Module::maybeHeap() const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    MOZ_ASSERT_IF(!pod.usesHeap_, rawHeapPtr() == nullptr);
+    return pod.sharedHeap_
+           ? SharedMem<uint8_t*>::shared(rawHeapPtr())
+           : SharedMem<uint8_t*>::unshared(rawHeapPtr());
+}
+
+size_t
+Module::heapLength() const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    return maybeHeap_ ? maybeHeap_->byteLength() : 0;
+}
+
+void
+Module::deoptimizeImportExit(uint32_t importIndex)
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    const Import& import = imports_[importIndex];
+    ImportExit& exit = importToExit(import);
+    exit.code = code() + import.interpExitCodeOffset();
+    exit.baselineScript = nullptr;
+}
+
+bool
+Module::hasDetachedHeap() const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    return pod.usesHeap_ && !maybeHeap_;
+}
+
+bool
+Module::changeHeap(Handle<ArrayBufferObject*> newHeap, JSContext* cx)
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    MOZ_ASSERT(pod.usesHeap_);
+
+    // Content JS should not be able to run (and change heap) from within an
+    // interrupt callback, but in case it does, fail to change heap. Otherwise,
+    // the heap can change at every single instruction which would prevent
+    // future optimizations like heap-base hoisting.
+    if (interrupted_)
+        return false;
+
+    AutoMutateCode amc(cx, *this, "Module::changeHeap");
+    if (maybeHeap_)
+        despecializeFromHeap(maybeHeap_);
+    specializeToHeap(newHeap);
+    return true;
+}
+
+bool
+Module::detachHeap(JSContext* cx)
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    MOZ_ASSERT(pod.usesHeap_);
+
+    // Content JS should not be able to run (and detach heap) from within an
+    // interrupt callback, but in case it does, fail. Otherwise, the heap can
+    // change at an arbitrary instruction and break the assumption below.
+    if (interrupted_) {
+        JS_ReportError(cx, "attempt to detach from inside interrupt handler");
+        return false;
+    }
+
+    // Even if this->active(), to reach here, the activation must have called
+    // out via an import exit stub. FFI stubs check if heapDatum() is null on
+    // reentry and throw an exception if so.
+    MOZ_ASSERT_IF(active(), activation()->exitReason() == ExitReason::ImportJit ||
+                            activation()->exitReason() == ExitReason::ImportInterp);
+
+    AutoMutateCode amc(cx, *this, "Module::detachHeap");
+    despecializeFromHeap(maybeHeap_);
+    return true;
+}
+
+void
+Module::setInterrupted(bool interrupted)
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    interrupted_ = interrupted;
+}
+
+AsmJSActivation*&
+Module::activation()
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    return *reinterpret_cast<AsmJSActivation**>(globalData() + ActivationGlobalDataOffset);
+}
+
+Module::EntryFuncPtr
+Module::entryTrampoline(const Export& func) const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    return JS_DATA_TO_FUNC_PTR(EntryFuncPtr, code() + func.stubOffset());
+}
+
+bool
+Module::callImport(JSContext* cx, uint32_t importIndex, unsigned argc, const Value* argv,
+                   MutableHandleValue rval)
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+
+    const Import& import = imports_[importIndex];
+
+    RootedValue fval(cx, ObjectValue(*importToExit(import).fun));
+    if (!Invoke(cx, UndefinedValue(), fval, argc, argv, rval))
+        return false;
+
+    ImportExit& exit = importToExit(import);
+
+    // The exit may already have become optimized.
+    void* jitExitCode = code() + import.jitExitCodeOffset();
+    if (exit.code == jitExitCode)
+        return true;
+
+    // Test if the function is JIT compiled.
+    if (!exit.fun->hasScript())
+        return true;
+    JSScript* script = exit.fun->nonLazyScript();
+    if (!script->hasBaselineScript()) {
+        MOZ_ASSERT(!script->hasIonScript());
+        return true;
+    }
+
+    // Don't enable jit entry when we have a pending ion builder.
+    // Take the interpreter path which will link it and enable
+    // the fast path on the next call.
+    if (script->baselineScript()->hasPendingIonBuilder())
+        return true;
+
+    // Currently we can't rectify arguments. Therefore disable if argc is too low.
+    if (exit.fun->nargs() > import.sig().args().length())
+        return true;
+
+    // Ensure the argument types are included in the argument TypeSets stored in
+    // the TypeScript. This is necessary for Ion, because the import exit will
+    // use the skip-arg-checks entry point.
+    //
+    // Note that the TypeScript is never discarded while the script has a
+    // BaselineScript, so if those checks hold now they must hold at least until
+    // the BaselineScript is discarded and when that happens the import exit is
+    // patched back.
+    if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
+        return true;
+    for (uint32_t i = 0; i < exit.fun->nargs(); i++) {
+        TypeSet::Type type = TypeSet::UnknownType();
+        switch (import.sig().args()[i]) {
+          case ValType::I32:   type = TypeSet::Int32Type(); break;
+          case ValType::I64:   MOZ_CRASH("NYI");
+          case ValType::F32:   type = TypeSet::DoubleType(); break;
+          case ValType::F64:   type = TypeSet::DoubleType(); break;
+          case ValType::I32x4: MOZ_CRASH("NYI");
+          case ValType::F32x4: MOZ_CRASH("NYI");
+          case ValType::B32x4: MOZ_CRASH("NYI");
+        }
+        if (!TypeScript::ArgTypes(script, i)->hasType(type))
+            return true;
+    }
+
+    // Let's optimize it!
+    if (!script->baselineScript()->addDependentWasmModule(cx, *this, importIndex))
+        return false;
+
+    exit.code = jitExitCode;
+    exit.baselineScript = script->baselineScript();
+    return true;
+}
+
+void
+Module::setProfilingEnabled(bool enabled, JSContext* cx)
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    MOZ_ASSERT(!active());
+
+    if (profilingEnabled_ == enabled)
+        return;
+
+    // When enabled, generate profiling labels for every name in funcNames_
+    // that is the name of some Function CodeRange. This involves malloc() so
+    // do it now since, once we start sampling, we'll be in a signal-handing
+    // context where we cannot malloc.
+    if (enabled) {
+        funcLabels_.resize(funcNames_.length());
+        for (const CodeRange& codeRange : codeRanges_) {
+            if (!codeRange.isFunction())
+                continue;
+            unsigned lineno = codeRange.funcLineNumber();
+            const char* name = funcNames_[codeRange.funcNameIndex()].get();
+            funcLabels_[codeRange.funcNameIndex()] =
+                UniqueChars(JS_smprintf("%s (%s:%u)", name, filename_.get(), lineno));
+        }
+    } else {
+        funcLabels_.clear();
+    }
+
+    // Patch callsites and returns to execute profiling prologues/epililogues.
+    {
+        AutoMutateCode amc(cx, *this, "Module::setProfilingEnabled");
+
+        for (const CallSite& callSite : callSites_)
+            EnableProfilingPrologue(*this, callSite, enabled);
+
+        for (const CodeRange& codeRange : codeRanges_)
+            EnableProfilingEpilogue(*this, codeRange, enabled);
+    }
+
+    // Update the function-pointer tables to point to profiling prologues.
+    for (FuncPtrTable& funcPtrTable : funcPtrTables_) {
+        auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset);
+        for (size_t i = 0; i < funcPtrTable.numElems; i++) {
+            const CodeRange* codeRange = lookupCodeRange(array[i]);
+            void* from = code() + codeRange->funcNonProfilingEntry();
+            void* to = code() + codeRange->funcProfilingEntry();
+            if (!enabled)
+                Swap(from, to);
+            MOZ_ASSERT(array[i] == from);
+            array[i] = to;
+        }
+    }
+
+    profilingEnabled_ = enabled;
+}
+
+const char*
+Module::profilingLabel(uint32_t funcIndex) const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+    MOZ_ASSERT(profilingEnabled_);
+    return funcLabels_[funcIndex].get();
+}
+
+size_t
+Module::serializedSize() const
+{
+    return sizeof(pod) +
+           pod.codeBytes_ +
+           SerializedVectorSize(imports_) +
+           SerializedVectorSize(exports_) +
+           SerializedPodVectorSize(heapAccesses_) +
+           SerializedPodVectorSize(codeRanges_) +
+           SerializedPodVectorSize(callSites_) +
+           SerializedVectorSize(funcNames_) +
+           filename_.serializedSize();
+}
+
+uint8_t*
+Module::serialize(uint8_t* cursor) const
+{
+    MOZ_ASSERT(!profilingEnabled_, "assumed by Module::deserialize");
+
+    cursor = WriteBytes(cursor, &pod, sizeof(pod));
+    cursor = WriteBytes(cursor, code(), pod.codeBytes_);
+    cursor = SerializeVector(cursor, imports_);
+    cursor = SerializeVector(cursor, exports_);
+    cursor = SerializePodVector(cursor, heapAccesses_);
+    cursor = SerializePodVector(cursor, codeRanges_);
+    cursor = SerializePodVector(cursor, callSites_);
+    cursor = SerializeVector(cursor, funcNames_);
+    cursor = filename_.serialize(cursor);
+    return cursor;
+}
+
+/* static */ const uint8_t*
+Module::deserialize(ExclusiveContext* cx, const uint8_t* cursor, UniqueModule* out)
+{
+    CacheablePod pod = zeroPod();
+    cursor = ReadBytes(cursor, &pod, sizeof(pod));
+    if (!cursor)
+        return nullptr;
+
+    UniqueCodePtr code = AllocateCode(cx, pod.codeBytes_ + pod.globalBytes_);
+    if (!code)
+        return nullptr;
+
+    cursor = ReadBytes(cursor, code.get(), pod.codeBytes_);
+
+    ImportVector imports;
+    cursor = DeserializeVector(cx, cursor, &imports);
+    if (!cursor)
+        return nullptr;
+
+    ExportVector exports;
+    cursor = DeserializeVector(cx, cursor, &exports);
+    if (!cursor)
+        return nullptr;
+
+    HeapAccessVector heapAccesses;
+    cursor = DeserializePodVector(cx, cursor, &heapAccesses);
+    if (!cursor)
+        return nullptr;
+
+    CodeRangeVector codeRanges;
+    cursor = DeserializePodVector(cx, cursor, &codeRanges);
+    if (!cursor)
+        return nullptr;
+
+    CallSiteVector callSites;
+    cursor = DeserializePodVector(cx, cursor, &callSites);
+    if (!cursor)
+        return nullptr;
+
+    CacheableCharsVector funcNames;
+    cursor = DeserializeVector(cx, cursor, &funcNames);
+    if (!cursor)
+        return nullptr;
+
+    CacheableChars filename;
+    cursor = filename.deserialize(cx, cursor);
+    if (!cursor)
+        return nullptr;
+
+    *out = cx->make_unique<Module>(pod,
+                                   Move(code),
+                                   Move(imports),
+                                   Move(exports),
+                                   Move(heapAccesses),
+                                   Move(codeRanges),
+                                   Move(callSites),
+                                   Move(funcNames),
+                                   Move(filename),
+                                   Module::LoadedFromCache,
+                                   Module::ProfilingDisabled,
+                                   FuncLabelVector());
+
+    return cursor;
+}
+
+Module::UniqueModule
+Module::clone(JSContext* cx, const StaticLinkData& linkData) const
+{
+    MOZ_ASSERT(dynamicallyLinked_);
+
+    UniqueCodePtr code = AllocateCode(cx, totalBytes());
+    if (!code)
+        return nullptr;
+
+    memcpy(code.get(), this->code(), pod.codeBytes_);
+
+#ifdef DEBUG
+    // Put the symbolic links back to -1 so PatchDataWithValueCheck assertions
+    // in Module::staticallyLink are valid.
+    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+        void* callee = AddressOf(imm, cx);
+        const StaticLinkData::OffsetVector& offsets = linkData.symbolicLinks[imm];
+        for (uint32_t offset : offsets) {
+            jit::Assembler::PatchDataWithValueCheck(jit::CodeLocationLabel(code.get() + offset),
+                                                    jit::PatchedImmPtr((void*)-1),
+                                                    jit::PatchedImmPtr(callee));
+        }
+    }
+#endif
+
+    ImportVector imports;
+    if (!CloneVector(cx, imports_, &imports))
+        return nullptr;
+
+    ExportVector exports;
+    if (!CloneVector(cx, exports_, &exports))
+        return nullptr;
+
+    HeapAccessVector heapAccesses;
+    if (!ClonePodVector(cx, heapAccesses_, &heapAccesses))
+        return nullptr;
+
+    CodeRangeVector codeRanges;
+    if (!ClonePodVector(cx, codeRanges_, &codeRanges))
+        return nullptr;
+
+    CallSiteVector callSites;
+    if (!ClonePodVector(cx, callSites_, &callSites))
+        return nullptr;
+
+    CacheableCharsVector funcNames;
+    if (!CloneVector(cx, funcNames_, &funcNames))
+        return nullptr;
+
+    CacheableChars filename;
+    if (!filename_.clone(cx, &filename))
+        return nullptr;
+
+    FuncLabelVector funcLabels;
+    if (!CloneVector(cx, funcLabels_, &funcLabels))
+        return nullptr;
+
+    // Must not GC between Module allocation and (successful) return.
+    auto out = cx->make_unique<Module>(pod,
+                                       Move(code),
+                                       Move(imports),
+                                       Move(exports),
+                                       Move(heapAccesses),
+                                       Move(codeRanges),
+                                       Move(callSites),
+                                       Move(funcNames),
+                                       Move(filename),
+                                       CacheBool::NotLoadedFromCache,
+                                       ProfilingBool(profilingEnabled_),
+                                       Move(funcLabels));
+    if (!out)
+        return nullptr;
+
+    // If the copied machine code has been specialized to the heap, it must be
+    // unspecialized in the copy.
+    if (maybeHeap_)
+        out->despecializeFromHeap(maybeHeap_);
+
+    if (!out->staticallyLink(cx, linkData))
+        return nullptr;
+
+    return Move(out);
+}
+
+void
+Module::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* asmJSModuleCode, size_t* asmJSModuleData)
+{
+    *asmJSModuleCode += pod.codeBytes_;
+    *asmJSModuleData += mallocSizeOf(this) +
+                        pod.globalBytes_ +
+                        SizeOfVectorExcludingThis(imports_, mallocSizeOf) +
+                        SizeOfVectorExcludingThis(exports_, mallocSizeOf) +
+                        heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
+                        codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
+                        callSites_.sizeOfExcludingThis(mallocSizeOf) +
+                        funcNames_.sizeOfExcludingThis(mallocSizeOf) +
+                        funcPtrTables_.sizeOfExcludingThis(mallocSizeOf);
+}
+
new file mode 100644
--- /dev/null
+++ b/js/src/asmjs/WasmModule.h
@@ -0,0 +1,569 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_module_h
+#define wasm_module_h
+
+#include "asmjs/WasmTypes.h"
+#include "gc/Barrier.h"
+#include "vm/MallocProvider.h"
+
+namespace js {
+
+class AsmJSActivation;
+namespace jit { struct BaselineScript; }
+
+namespace wasm {
+
+// A wasm Module and everything it contains must support serialization,
+// deserialization and cloning. Some data can be simply copied as raw bytes and,
+// as a convention, is stored in an inline CacheablePod struct. Everything else
+// should implement the below methods which are called recusively by the
+// containing Module. The implementation of all these methods are grouped
+// together in WasmSerialize.cpp.
+
+#define WASM_DECLARE_SERIALIZABLE(Type)                                         \
+    size_t serializedSize() const;                                              \
+    uint8_t* serialize(uint8_t* cursor) const;                                  \
+    const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);    \
+    bool clone(JSContext* cx, Type* out) const;                                 \
+    size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+// The StaticLinkData contains all the metadata necessary to perform
+// Module::staticallyLink but is not necessary afterwards.
+
+struct StaticLinkData
+{
+    struct InternalLink {
+        enum Kind {
+            RawPointer,
+            CodeLabel,
+            InstructionImmediate
+        };
+        uint32_t patchAtOffset;
+        uint32_t targetOffset;
+
+        InternalLink() = default;
+        explicit InternalLink(Kind kind);
+        bool isRawPointerPatch();
+    };
+    typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
+
+    typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
+    struct SymbolicLinkArray : mozilla::EnumeratedArray<SymbolicAddress,
+                                                        SymbolicAddress::Limit,
+                                                        OffsetVector> {
+        WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
+    };
+
+    struct FuncPtrTable {
+        uint32_t globalDataOffset;
+        OffsetVector elemOffsets;
+        explicit FuncPtrTable(uint32_t globalDataOffset) : globalDataOffset(globalDataOffset) {}
+        FuncPtrTable() = default;
+        FuncPtrTable(FuncPtrTable&& rhs)
+          : globalDataOffset(rhs.globalDataOffset), elemOffsets(Move(rhs.elemOffsets))
+        {}
+        WASM_DECLARE_SERIALIZABLE(FuncPtrTable)
+    };
+    typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
+
+    struct CacheablePod {
+        uint32_t        interruptOffset;
+        uint32_t        outOfBoundsOffset;
+    } pod;
+    InternalLinkVector  internalLinks;
+    SymbolicLinkArray   symbolicLinks;
+    FuncPtrTableVector  funcPtrTables;
+
+    WASM_DECLARE_SERIALIZABLE(StaticLinkData)
+};
+
+typedef UniquePtr<StaticLinkData, JS::DeletePolicy<StaticLinkData>> UniqueStaticLinkData;
+
+// An Export describes an export from a wasm module. Currently only functions
+// can be exported.
+
+class Export
+{
+    MallocSig sig_;
+    struct CacheablePod {
+        uint32_t funcIndex_;
+        uint32_t stubOffset_;
+    } pod;
+
+  public:
+    Export() = default;
+    Export(MallocSig&& sig, uint32_t funcIndex)
+      : sig_(Move(sig))
+    {
+        pod.funcIndex_ = funcIndex;
+        pod.stubOffset_ = UINT32_MAX;
+    }
+    Export(Export&& rhs)
+      : sig_(Move(rhs.sig_)),
+        pod(rhs.pod)
+    {}
+
+    void initStubOffset(uint32_t stubOffset) {
+        MOZ_ASSERT(pod.stubOffset_ == UINT32_MAX);
+        pod.stubOffset_ = stubOffset;
+    }
+
+    uint32_t funcIndex() const {
+        return pod.funcIndex_;
+    }
+    uint32_t stubOffset() const {
+        return pod.stubOffset_;
+    }
+    const MallocSig& sig() const {
+        return sig_;
+    }
+
+    WASM_DECLARE_SERIALIZABLE(Export)
+};
+
+typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
+
+// An Import describes a wasm module import. Currently, only functions can be
+// imported in wasm and a function import also includes the signature used
+// within the module to call that import. An import is slightly different than
+// an asm.js FFI function: a single asm.js FFI function can be called with many
+// different signatures. When compiled to wasm, each unique FFI function paired
+// with signature generates a wasm import.
+
+class Import
+{
+    MallocSig sig_;
+    struct CacheablePod {
+        uint32_t exitGlobalDataOffset_;
+        uint32_t interpExitCodeOffset_;
+        uint32_t jitExitCodeOffset_;
+    } pod;
+
+  public:
+    Import() {}
+    Import(Import&& rhs) : sig_(Move(rhs.sig_)), pod(rhs.pod) {}
+    Import(MallocSig&& sig, uint32_t exitGlobalDataOffset)
+      : sig_(Move(sig))
+    {
+        pod.exitGlobalDataOffset_ = exitGlobalDataOffset;
+        pod.interpExitCodeOffset_ = 0;
+        pod.jitExitCodeOffset_ = 0;
+    }
+
+    void initInterpExitOffset(uint32_t off) {
+        MOZ_ASSERT(!pod.interpExitCodeOffset_);
+        pod.interpExitCodeOffset_ = off;
+    }
+    void initJitExitOffset(uint32_t off) {
+        MOZ_ASSERT(!pod.jitExitCodeOffset_);
+        pod.jitExitCodeOffset_ = off;
+    }
+
+    const MallocSig& sig() const {
+        return sig_;
+    }
+    uint32_t exitGlobalDataOffset() const {
+        return pod.exitGlobalDataOffset_;
+    }
+    uint32_t interpExitCodeOffset() const {
+        MOZ_ASSERT(pod.interpExitCodeOffset_);
+        return pod.interpExitCodeOffset_;
+    }
+    uint32_t jitExitCodeOffset() const {
+        MOZ_ASSERT(pod.jitExitCodeOffset_);
+        return pod.jitExitCodeOffset_;
+    }
+
+    WASM_DECLARE_SERIALIZABLE(Import)
+};
+
+typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
+
+// A CodeRange describes a single contiguous range of code within a wasm
+// module's code segment. A CodeRange describes what the code does and, for
+// function bodies, the name and source coordinates of the function.
+
+class CodeRange
+{
+    uint32_t nameIndex_;
+    uint32_t lineNumber_;
+    uint32_t begin_;
+    uint32_t profilingReturn_;
+    uint32_t end_;
+    union {
+        struct {
+            uint8_t kind_;
+            uint8_t beginToEntry_;
+            uint8_t profilingJumpToProfilingReturn_;
+            uint8_t profilingEpilogueToProfilingReturn_;
+        } func;
+        uint8_t kind_;
+    } u;
+
+    void assertValid();
+
+  public:
+    enum Kind { Function, Entry, ImportJitExit, ImportInterpExit, Interrupt, Inline };
+
+    CodeRange() = default;
+    CodeRange(Kind kind, Offsets offsets);
+    CodeRange(Kind kind, ProfilingOffsets offsets);
+    CodeRange(uint32_t nameIndex, uint32_t lineNumber, FuncOffsets offsets);
+
+    // All CodeRanges have a begin and end.
+
+    uint32_t begin() const {
+        return begin_;
+    }
+    uint32_t end() const {
+        return end_;
+    }
+
+    // Other fields are only available for certain CodeRange::Kinds.
+
+    Kind kind() const { return Kind(u.kind_); }
+
+    // Every CodeRange except entry and inline stubs has a profiling return
+    // which is used for asynchronous profiling to determine the frame pointer.
+
+    uint32_t profilingReturn() const {
+        MOZ_ASSERT(kind() != Entry && kind() != Inline);
+        return profilingReturn_;
+    }
+
+    // Functions have offsets which allow patching to selectively execute
+    // profiling prologues/epilogues.
+
+    bool isFunction() const {
+        return kind() == Function;
+    }
+    uint32_t funcProfilingEntry() const {
+        MOZ_ASSERT(isFunction());
+        return begin();
+    }
+    uint32_t funcNonProfilingEntry() const {
+        MOZ_ASSERT(isFunction());
+        return begin_ + u.func.beginToEntry_;
+    }
+    uint32_t functionProfilingJump() const {
+        MOZ_ASSERT(isFunction());
+        return profilingReturn_ - u.func.profilingJumpToProfilingReturn_;
+    }
+    uint32_t funcProfilingEpilogue() const {
+        MOZ_ASSERT(isFunction());
+        return profilingReturn_ - u.func.profilingEpilogueToProfilingReturn_;
+    }
+    uint32_t funcNameIndex() const {
+        MOZ_ASSERT(isFunction());
+        return nameIndex_;
+    }
+    uint32_t funcLineNumber() const {
+        MOZ_ASSERT(isFunction());
+        return lineNumber_;
+    }
+
+    // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
+
+    struct PC {
+        size_t offset;
+        explicit PC(size_t offset) : offset(offset) {}
+        bool operator==(const CodeRange& rhs) const {
+            return offset >= rhs.begin() && offset < rhs.end();
+        }
+        bool operator<(const CodeRange& rhs) const {
+            return offset < rhs.begin();
+        }
+    };
+};
+
+typedef Vector<CodeRange, 0, SystemAllocPolicy> CodeRangeVector;
+
+// A CacheableChars is used to cacheably store UniqueChars in Module.
+
+struct CacheableChars : public UniqueChars
+{
+    explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
+    MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
+    CacheableChars() = default;
+    CacheableChars(CacheableChars&& rhs) : UniqueChars(Move(rhs)) {}
+    void operator=(CacheableChars&& rhs) { UniqueChars& base = *this; base = Move(rhs); }
+    WASM_DECLARE_SERIALIZABLE(CacheableChars)
+};
+typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
+
+// A UniqueCodePtr owns allocated executable code. Code passed to the Module
+// constructor must be allocated via AllocateCode.
+
+class CodeDeleter
+{
+    uint32_t bytes_;
+  public:
+    explicit CodeDeleter(uint32_t bytes) : bytes_(bytes) {}
+    void operator()(uint8_t* p);
+};
+typedef JS::UniquePtr<uint8_t, CodeDeleter> UniqueCodePtr;
+
+UniqueCodePtr
+AllocateCode(ExclusiveContext* cx, size_t bytes);
+
+// Module represents a compiled WebAssembly module which lives until the last
+// reference to any exported functions is dropped. Modules must be wrapped by a
+// rooted JSObject immediately after creation so that Module::trace() is called
+// during GC. Modules are created after compilation completes and start in a
+// a fully unlinked state. After creation, a module must be first statically
+// linked and then dynamically linked:
+//
+//  - Static linking patches code or global data that relies on absolute
+//    addresses. Static linking should happen after a module is serialized into
+//    a cache file so that the cached code is stored unlinked and ready to be
+//    statically linked after deserialization.
+//
+//  - Dynamic linking patches code or global data that relies on the address of
+//    the heap and imports of a module. A module may only be dynamically linked
+//    once. However, a dynamically-linked module may be cloned so that the clone
+//    can be independently dynamically linked.
+//
+// Once fully dynamically linked, a module can have its exports invoked (via
+// entryTrampoline). While executing, profiling may be enabled/disabled (when
+// the Module is not active()) via setProfilingEnabled(). When profiling is
+// enabled, a module's frames will be visible to wasm::ProfilingFrameIterator.
+
+class Module
+{
+    struct ImportExit {
+        void* code;
+        jit::BaselineScript* baselineScript;
+        HeapPtrFunction fun;
+        static_assert(sizeof(HeapPtrFunction) == sizeof(void*), "for JIT access");
+    };
+    struct FuncPtrTable {
+        uint32_t globalDataOffset;
+        uint32_t numElems;
+        explicit FuncPtrTable(const StaticLinkData::FuncPtrTable& table)
+          : globalDataOffset(table.globalDataOffset),
+            numElems(table.elemOffsets.length())
+        {}
+    };
+    typedef Vector<FuncPtrTable, 0, SystemAllocPolicy> FuncPtrTableVector;
+    typedef Vector<CacheableChars, 0, SystemAllocPolicy> FuncLabelVector;
+    typedef RelocatablePtrArrayBufferObjectMaybeShared BufferPtr;
+
+    // Initialized when constructed:
+    struct CacheablePod {
+        const uint32_t         functionBytes_;
+        const uint32_t         codeBytes_;
+        const uint32_t         globalBytes_;
+        const bool             usesHeap_;
+        const bool             sharedHeap_;
+        const bool             usesSignalHandlersForOOB_;
+        const bool             usesSignalHandlersForInterrupt_;
+    } pod;
+    const UniqueCodePtr        code_;
+    const ImportVector         imports_;
+    const ExportVector         exports_;
+    const HeapAccessVector     heapAccesses_;
+    const CodeRangeVector      codeRanges_;
+    const CallSiteVector       callSites_;
+    const CacheableCharsVector funcNames_;
+    const CacheableChars       filename_;
+    const bool                 loadedFromCache_;
+
+    // Initialized during staticallyLink:
+    bool                       staticallyLinked_;
+    uint8_t*                   interrupt_;
+    uint8_t*                   outOfBounds_;
+    FuncPtrTableVector         funcPtrTables_;
+
+    // Initialized during dynamicallyLink:
+    bool                       dynamicallyLinked_;
+    BufferPtr                  maybeHeap_;
+    Module**                   prev_;
+    Module*                    next_;