Bug 1229642 - Odin: hoist some things into Wasm.h and simplify symbolic addresses (r=bbouvier)
authorLuke Wagner <luke@mozilla.com>
Wed, 02 Dec 2015 21:40:09 -0600
changeset 309521 a5840fb6456818b48518bce82028f2304baeb3f4
parent 309520 429cd1f9006d3327aa54e526c7d1ef1e7b0bee0f
child 309522 a23147905fb3e8138fc0395f812e0311e3c83743
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1229642
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1229642 - Odin: hoist some things into Wasm.h and simplify symbolic addresses (r=bbouvier)
js/public/ProfilingFrameIterator.h
js/src/asmjs/AsmJSFrameIterator.cpp
js/src/asmjs/AsmJSFrameIterator.h
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSModule.h
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/asmjs/Wasm.h
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmStubs.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MacroAssembler-inl.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips32/MacroAssembler-mips32.h
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/LIR-shared.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/MacroAssembler-x86.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
--- a/js/public/ProfilingFrameIterator.h
+++ b/js/public/ProfilingFrameIterator.h
@@ -42,17 +42,17 @@ class JS_PUBLIC_API(ProfilingFrameIterat
     uint32_t sampleBufferGen_;
     js::Activation* activation_;
 
     // When moving past a JitActivation, we need to save the prevJitTop
     // from it to use as the exit-frame pointer when the next caller jit
     // activation (if any) comes around.
     void* savedPrevJitTop_;
 
-    static const unsigned StorageSpace = 6 * sizeof(void*);
+    static const unsigned StorageSpace = 8 * sizeof(void*);
     mozilla::AlignedStorage<StorageSpace> storage_;
     js::AsmJSProfilingFrameIterator& asmJSIter() {
         MOZ_ASSERT(!done());
         MOZ_ASSERT(isAsmJS());
         return *reinterpret_cast<js::AsmJSProfilingFrameIterator*>(storage_.addr());
     }
     const js::AsmJSProfilingFrameIterator& asmJSIter() const {
         MOZ_ASSERT(!done());
--- a/js/src/asmjs/AsmJSFrameIterator.cpp
+++ b/js/src/asmjs/AsmJSFrameIterator.cpp
@@ -161,17 +161,17 @@ PushRetAddr(MacroAssembler& masm)
     // The x86/x64 call instruction pushes the return address.
 #endif
 }
 
 // Generate a prologue that maintains AsmJSActivation::fp as the virtual frame
 // pointer so that AsmJSProfilingFrameIterator can walk the stack at any pc in
 // generated code.
 static void
-GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
+GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
                           AsmJSProfilingOffsets* offsets, Label* maybeEntry = nullptr)
 {
 #if !defined (JS_CODEGEN_ARM)
     Register scratch = ABIArgGenerator::NonArg_VolatileReg;
 #else
     // Unfortunately, there are no unused non-arg volatile registers on ARM --
     // the MacroAssembler claims both lr and ip -- so we use the second scratch
     // register (lr) and be very careful not to call any methods that use it.
@@ -199,45 +199,49 @@ GenerateProfilingPrologue(MacroAssembler
         masm.loadAsmJSActivation(scratch);
         masm.push(Address(scratch, AsmJSActivation::offsetOfFP()));
         MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - offsets->begin);
 
         masm.storePtr(masm.getStackPointer(), Address(scratch, AsmJSActivation::offsetOfFP()));
         MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
     }
 
-    if (reason != AsmJSExit::None)
-        masm.store32_NoSecondScratch(Imm32(reason), Address(scratch, AsmJSActivation::offsetOfExitReason()));
+    if (reason.kind() != ExitReason::None) {
+        masm.store32_NoSecondScratch(Imm32(reason.pack()),
+                                     Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
+    }
 
 #if defined(JS_CODEGEN_ARM)
     masm.setSecondScratchReg(lr);
 #endif
 
     if (framePushed)
         masm.subFromStackPtr(Imm32(framePushed));
 }
 
 // Generate the inverse of GenerateProfilingPrologue.
 static void
-GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
+GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
                           AsmJSProfilingOffsets* offsets)
 {
     Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
     defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     Register scratch2 = ABIArgGenerator::NonReturn_VolatileReg1;
 #endif
 
     if (framePushed)
         masm.addToStackPtr(Imm32(framePushed));
 
     masm.loadAsmJSActivation(scratch);
 
-    if (reason != AsmJSExit::None)
-        masm.store32(Imm32(AsmJSExit::None), Address(scratch, AsmJSActivation::offsetOfExitReason()));
+    if (reason.kind() != ExitReason::None) {
+        masm.store32(Imm32(ExitReason::None),
+                     Address(scratch, AsmJSActivation::offsetOfPackedExitReason()));
+    }
 
     // AsmJSProfilingFrameIterator assumes fixed offsets of the last few
     // instructions from profilingReturn, so AutoForbidPools to ensure that
     // unintended instructions are not automatically inserted.
     {
 #if defined(JS_CODEGEN_ARM)
         AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
 #endif
@@ -277,17 +281,17 @@ js::GenerateAsmJSFunctionPrologue(MacroA
 #if defined(JS_CODEGEN_ARM)
     // Flush pending pools so they do not get dumped between the 'begin' and
     // 'entry' offsets since the difference must be less than UINT8_MAX.
     masm.flushBuffer();
 #endif
 
     masm.haltingAlign(CodeAlignment);
 
-    GenerateProfilingPrologue(masm, framePushed, AsmJSExit::None, offsets);
+    GenerateProfilingPrologue(masm, framePushed, ExitReason::None, offsets);
     Label body;
     masm.jump(&body);
 
     // Generate normal prologue:
     masm.haltingAlign(CodeAlignment);
     offsets->nonProfilingEntry = masm.currentOffset();
     PushRetAddr(masm);
     masm.subFromStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
@@ -348,47 +352,47 @@ js::GenerateAsmJSFunctionEpilogue(MacroA
 
     // Normal epilogue:
     masm.addToStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
     masm.ret();
     masm.setFramePushed(0);
 
     // Profiling epilogue:
     offsets->profilingEpilogue = masm.currentOffset();
-    GenerateProfilingEpilogue(masm, framePushed, AsmJSExit::None, offsets);
+    GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
 }
 
 void
-js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
+js::GenerateAsmJSExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
                               AsmJSProfilingOffsets* offsets, Label* maybeEntry)
 {
     masm.haltingAlign(CodeAlignment);
     GenerateProfilingPrologue(masm, framePushed, reason, offsets, maybeEntry);
     masm.setFramePushed(framePushed);
 }
 
 void
-js::GenerateAsmJSExitEpilogue(MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
+js::GenerateAsmJSExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
                               AsmJSProfilingOffsets* offsets)
 {
     // Inverse of GenerateAsmJSExitPrologue:
     MOZ_ASSERT(masm.framePushed() == framePushed);
     GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
     masm.setFramePushed(0);
 }
 
 /*****************************************************************************/
 // AsmJSProfilingFrameIterator
 
 AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation)
   : module_(&activation.module()),
     callerFP_(nullptr),
     callerPC_(nullptr),
     stackAddress_(nullptr),
-    exitReason_(AsmJSExit::None),
+    exitReason_(ExitReason::None),
     codeRange_(nullptr)
 {
     // If profiling hasn't been enabled for this module, then CallerFPFromFP
     // will be trash, so ignore the entire activation. In practice, this only
     // happens if profiling is enabled while module->active() (in this case,
     // profiling will be enabled when the module becomes inactive and gets
     // called again).
     if (!module_->profilingEnabled()) {
@@ -470,30 +474,30 @@ AsmJSProfilingFrameIterator::initFromFP(
     // Despite the above reasoning for skipping a frame, we do actually want FFI
     // trampolines and interrupts to show up in the profile (so they can
     // accumulate self time and explain performance faults). To do this, an
     // "exit reason" is stored on all the paths leaving asm.js and this iterator
     // treats this exit reason as its own frame. If we have exited asm.js code
     // without setting an exit reason, the reason will be None and this means
     // the code was asynchronously interrupted.
     exitReason_ = activation.exitReason();
-    if (exitReason_ == AsmJSExit::None)
-        exitReason_ = AsmJSExit::Interrupt;
+    if (exitReason_.kind() == ExitReason::None)
+        exitReason_ = ExitReason::Interrupt;
 
     MOZ_ASSERT(!done());
 }
 
 typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
 
 AsmJSProfilingFrameIterator::AsmJSProfilingFrameIterator(const AsmJSActivation& activation,
                                                          const RegisterState& state)
   : module_(&activation.module()),
     callerFP_(nullptr),
     callerPC_(nullptr),
-    exitReason_(AsmJSExit::None),
+    exitReason_(ExitReason::None),
     codeRange_(nullptr)
 {
     // If profiling hasn't been enabled for this module, then CallerFPFromFP
     // will be trash, so ignore the entire activation. In practice, this only
     // happens if profiling is enabled while module->active() (in this case,
     // profiling will be enabled when the module becomes inactive and gets
     // called again).
     if (!module_->profilingEnabled()) {
@@ -598,19 +602,19 @@ AsmJSProfilingFrameIterator::AsmJSProfil
     codeRange_ = codeRange;
     stackAddress_ = state.sp;
     MOZ_ASSERT(!done());
 }
 
 void
 AsmJSProfilingFrameIterator::operator++()
 {
-    if (exitReason_ != AsmJSExit::None) {
+    if (exitReason_.kind() != ExitReason::None) {
         MOZ_ASSERT(codeRange_);
-        exitReason_ = AsmJSExit::None;
+        exitReason_ = ExitReason::None;
         MOZ_ASSERT(!done());
         return;
     }
 
     if (!callerPC_) {
         MOZ_ASSERT(!callerFP_);
         codeRange_ = nullptr;
         MOZ_ASSERT(done());
@@ -639,86 +643,86 @@ AsmJSProfilingFrameIterator::operator++(
         callerFP_ = CallerFPFromFP(callerFP_);
         break;
     }
 
     MOZ_ASSERT(!done());
 }
 
 static const char*
-BuiltinToName(AsmJSExit::BuiltinKind builtin)
+BuiltinToName(Builtin builtin)
 {
     // Note: this label is regexp-matched by
     // devtools/client/profiler/cleopatra/js/parserWorker.js.
 
     switch (builtin) {
-      case AsmJSExit::Builtin_ToInt32:   return "ToInt32 (in asm.js)";
+      case Builtin::ToInt32:         return "ToInt32 (in asm.js)";
 #if defined(JS_CODEGEN_ARM)
-      case AsmJSExit::Builtin_IDivMod:   return "software idivmod (in asm.js)";
-      case AsmJSExit::Builtin_UDivMod:   return "software uidivmod (in asm.js)";
-      case AsmJSExit::Builtin_AtomicCmpXchg:  return "Atomics.compareExchange (in asm.js)";
-      case AsmJSExit::Builtin_AtomicXchg:     return "Atomics.exchange (in asm.js)";
-      case AsmJSExit::Builtin_AtomicFetchAdd: return "Atomics.add (in asm.js)";
-      case AsmJSExit::Builtin_AtomicFetchSub: return "Atomics.sub (in asm.js)";
-      case AsmJSExit::Builtin_AtomicFetchAnd: return "Atomics.and (in asm.js)";
-      case AsmJSExit::Builtin_AtomicFetchOr:  return "Atomics.or (in asm.js)";
-      case AsmJSExit::Builtin_AtomicFetchXor: return "Atomics.xor (in asm.js)";
+      case Builtin::aeabi_idivmod:   return "software idivmod (in asm.js)";
+      case Builtin::aeabi_uidivmod:  return "software uidivmod (in asm.js)";
+      case Builtin::AtomicCmpXchg:   return "Atomics.compareExchange (in asm.js)";
+      case Builtin::AtomicXchg:      return "Atomics.exchange (in asm.js)";
+      case Builtin::AtomicFetchAdd:  return "Atomics.add (in asm.js)";
+      case Builtin::AtomicFetchSub:  return "Atomics.sub (in asm.js)";
+      case Builtin::AtomicFetchAnd:  return "Atomics.and (in asm.js)";
+      case Builtin::AtomicFetchOr:   return "Atomics.or (in asm.js)";
+      case Builtin::AtomicFetchXor:  return "Atomics.xor (in asm.js)";
 #endif
-      case AsmJSExit::Builtin_ModD:      return "fmod (in asm.js)";
-      case AsmJSExit::Builtin_SinD:      return "Math.sin (in asm.js)";
-      case AsmJSExit::Builtin_CosD:      return "Math.cos (in asm.js)";
-      case AsmJSExit::Builtin_TanD:      return "Math.tan (in asm.js)";
-      case AsmJSExit::Builtin_ASinD:     return "Math.asin (in asm.js)";
-      case AsmJSExit::Builtin_ACosD:     return "Math.acos (in asm.js)";
-      case AsmJSExit::Builtin_ATanD:     return "Math.atan (in asm.js)";
-      case AsmJSExit::Builtin_CeilD:
-      case AsmJSExit::Builtin_CeilF:     return "Math.ceil (in asm.js)";
-      case AsmJSExit::Builtin_FloorD:
-      case AsmJSExit::Builtin_FloorF:    return "Math.floor (in asm.js)";
-      case AsmJSExit::Builtin_ExpD:      return "Math.exp (in asm.js)";
-      case AsmJSExit::Builtin_LogD:      return "Math.log (in asm.js)";
-      case AsmJSExit::Builtin_PowD:      return "Math.pow (in asm.js)";
-      case AsmJSExit::Builtin_ATan2D:    return "Math.atan2 (in asm.js)";
-      case AsmJSExit::Builtin_Limit:     break;
+      case Builtin::ModD:    return "fmod (in asm.js)";
+      case Builtin::SinD:    return "Math.sin (in asm.js)";
+      case Builtin::CosD:    return "Math.cos (in asm.js)";
+      case Builtin::TanD:    return "Math.tan (in asm.js)";
+      case Builtin::ASinD:   return "Math.asin (in asm.js)";
+      case Builtin::ACosD:   return "Math.acos (in asm.js)";
+      case Builtin::ATanD:   return "Math.atan (in asm.js)";
+      case Builtin::CeilD:
+      case Builtin::CeilF:   return "Math.ceil (in asm.js)";
+      case Builtin::FloorD:
+      case Builtin::FloorF:  return "Math.floor (in asm.js)";
+      case Builtin::ExpD:    return "Math.exp (in asm.js)";
+      case Builtin::LogD:    return "Math.log (in asm.js)";
+      case Builtin::PowD:    return "Math.pow (in asm.js)";
+      case Builtin::ATan2D:  return "Math.atan2 (in asm.js)";
+      case Builtin::Limit:   break;
     }
-    MOZ_CRASH("Bad builtin kind");
+    MOZ_CRASH("symbolic immediate not a builtin");
 }
 
 const char*
 AsmJSProfilingFrameIterator::label() const
 {
     MOZ_ASSERT(!done());
 
     // Use the same string for both time inside and under so that the two
     // entries will be coalesced by the profiler.
     //
     // NB: these labels are regexp-matched by
     //     devtools/client/profiler/cleopatra/js/parserWorker.js.
     const char* jitFFIDescription = "fast FFI trampoline (in asm.js)";
     const char* slowFFIDescription = "slow FFI trampoline (in asm.js)";
     const char* interruptDescription = "interrupt due to out-of-bounds or long execution (in asm.js)";
 
-    switch (AsmJSExit::ExtractReasonKind(exitReason_)) {
-      case AsmJSExit::Reason_None:
+    switch (exitReason_.kind()) {
+      case ExitReason::None:
         break;
-      case AsmJSExit::Reason_JitFFI:
+      case ExitReason::Jit:
         return jitFFIDescription;
-      case AsmJSExit::Reason_SlowFFI:
+      case ExitReason::Slow:
         return slowFFIDescription;
-      case AsmJSExit::Reason_Interrupt:
+      case ExitReason::Interrupt:
         return interruptDescription;
-      case AsmJSExit::Reason_Builtin:
-        return BuiltinToName(AsmJSExit::ExtractBuiltinKind(exitReason_));
+      case ExitReason::Builtin:
+        return BuiltinToName(exitReason_.builtin());
     }
 
     auto codeRange = reinterpret_cast<const AsmJSModule::CodeRange*>(codeRange_);
     switch (codeRange->kind()) {
       case AsmJSModule::CodeRange::Function:  return codeRange->functionProfilingLabel(*module_);
       case AsmJSModule::CodeRange::Entry:     return "entry trampoline (in asm.js)";
       case AsmJSModule::CodeRange::JitFFI:    return jitFFIDescription;
       case AsmJSModule::CodeRange::SlowFFI:   return slowFFIDescription;
       case AsmJSModule::CodeRange::Interrupt: return interruptDescription;
       case AsmJSModule::CodeRange::Inline:    return "inline stub (in asm.js)";
       case AsmJSModule::CodeRange::Thunk:     return BuiltinToName(codeRange->thunkTarget());
     }
 
-    MOZ_CRASH("Bad exit kind");
+    MOZ_CRASH("bad code range kind");
 }
--- a/js/src/asmjs/AsmJSFrameIterator.h
+++ b/js/src/asmjs/AsmJSFrameIterator.h
@@ -16,34 +16,36 @@
  * limitations under the License.
  */
 
 #ifndef asmjs_AsmJSFrameIterator_h
 #define asmjs_AsmJSFrameIterator_h
 
 #include <stdint.h>
 
+#include "asmjs/Wasm.h"
 #include "js/ProfilingFrameIterator.h"
 
 class JSAtom;
 
 namespace js {
 
 class AsmJSActivation;
 class AsmJSModule;
-namespace jit { class CallSite; class MacroAssembler; class Label; }
+namespace jit { class MacroAssembler; class Label; }
+namespace wasm { class CallSite; }
 
 // Iterates over the frames of a single AsmJSActivation, called synchronously
 // from C++ in the thread of the asm.js. The one exception is that this iterator
 // may be called from the interrupt callback which may be called asynchronously
 // from asm.js code; in this case, the backtrace may not be correct.
 class AsmJSFrameIterator
 {
     const AsmJSModule* module_;
-    const jit::CallSite* callsite_;
+    const wasm::CallSite* callsite_;
     uint8_t* fp_;
 
     // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
     // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
     const void* codeRange_;
 
     void settle();
 
@@ -51,94 +53,26 @@ class AsmJSFrameIterator
     explicit AsmJSFrameIterator() : module_(nullptr) {}
     explicit AsmJSFrameIterator(const AsmJSActivation& activation);
     void operator++();
     bool done() const { return !fp_; }
     JSAtom* functionDisplayAtom() const;
     unsigned computeLine(uint32_t* column) const;
 };
 
-namespace AsmJSExit
-{
-    // List of reasons for execution leaving asm.js-generated code, stored in
-    // AsmJSActivation. The initial and default state is AsmJSNoExit. If
-    // AsmJSNoExit is observed when the pc isn't in asm.js code, execution must
-    // have been interrupted asynchronously (viz., by a exception/signal
-    // handler).
-    enum ReasonKind {
-        Reason_None,
-        Reason_JitFFI,
-        Reason_SlowFFI,
-        Reason_Interrupt,
-        Reason_Builtin
-    };
-
-    // For Reason_Builtin, the list of builtins, so they can be displayed in the
-    // profile call stack.
-    enum BuiltinKind {
-        Builtin_ToInt32,
-#if defined(JS_CODEGEN_ARM)
-        Builtin_IDivMod,
-        Builtin_UDivMod,
-        Builtin_AtomicCmpXchg,
-        Builtin_AtomicXchg,
-        Builtin_AtomicFetchAdd,
-        Builtin_AtomicFetchSub,
-        Builtin_AtomicFetchAnd,
-        Builtin_AtomicFetchOr,
-        Builtin_AtomicFetchXor,
-#endif
-        Builtin_ModD,
-        Builtin_SinD,
-        Builtin_CosD,
-        Builtin_TanD,
-        Builtin_ASinD,
-        Builtin_ACosD,
-        Builtin_ATanD,
-        Builtin_CeilD,
-        Builtin_CeilF,
-        Builtin_FloorD,
-        Builtin_FloorF,
-        Builtin_ExpD,
-        Builtin_LogD,
-        Builtin_PowD,
-        Builtin_ATan2D,
-        Builtin_Limit
-    };
-
-    // A Reason contains both a ReasonKind and (if Reason_Builtin) a
-    // BuiltinKind.
-    typedef uint32_t Reason;
-
-    static const uint32_t None = Reason_None;
-    static const uint32_t JitFFI = Reason_JitFFI;
-    static const uint32_t SlowFFI = Reason_SlowFFI;
-    static const uint32_t Interrupt = Reason_Interrupt;
-    static inline Reason Builtin(BuiltinKind builtin) {
-        return uint16_t(Reason_Builtin) | (uint16_t(builtin) << 16);
-    }
-    static inline ReasonKind ExtractReasonKind(Reason reason) {
-        return ReasonKind(uint16_t(reason));
-    }
-    static inline BuiltinKind ExtractBuiltinKind(Reason reason) {
-        MOZ_ASSERT(ExtractReasonKind(reason) == Reason_Builtin);
-        return BuiltinKind(uint16_t(reason >> 16));
-    }
-} // namespace AsmJSExit
-
 // Iterates over the frames of a single AsmJSActivation, given an
 // asynchrously-interrupted thread's state. If the activation's
 // module is not in profiling mode, the activation is skipped.
 class AsmJSProfilingFrameIterator
 {
     const AsmJSModule* module_;
     uint8_t* callerFP_;
     void* callerPC_;
     void* stackAddress_;
-    AsmJSExit::Reason exitReason_;
+    wasm::ExitReason exitReason_;
 
     // Really, a const AsmJSModule::CodeRange*, but no forward declarations of
     // nested classes, so use void* to avoid pulling in all of AsmJSModule.h.
     const void* codeRange_;
 
     void initFromFP(const AsmJSActivation& activation);
 
   public:
@@ -203,20 +137,20 @@ struct AsmJSFunctionOffsets : AsmJSProfi
 
     // When profiling is enabled, the 'nop' at offset 'profilingJump' is
     // overwritten to be a jump to 'profilingEpilogue'.
     uint32_t profilingJump;
     uint32_t profilingEpilogue;
 };
 
 void
-GenerateAsmJSExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
+GenerateAsmJSExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
                           AsmJSProfilingOffsets* offsets, jit::Label* maybeEntry = nullptr);
 void
-GenerateAsmJSExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
+GenerateAsmJSExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, wasm::ExitReason reason,
                           AsmJSProfilingOffsets* offsets);
 
 void
 GenerateAsmJSFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed,
                               AsmJSFunctionOffsets* offsets);
 void
 GenerateAsmJSFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
                               AsmJSFunctionOffsets* offsets);
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -15,16 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/AsmJSModule.h"
 
 #include "mozilla/BinarySearch.h"
 #include "mozilla/Compression.h"
+#include "mozilla/EnumeratedRange.h"
 #include "mozilla/PodOperations.h"
 #include "mozilla/TaggedAnonymousMemory.h"
 
 #include "jslibmath.h"
 #include "jsmath.h"
 #include "jsprf.h"
 
 #include "builtin/AtomicsObject.h"
@@ -46,21 +47,23 @@
 #include "vm/Stack-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 using namespace js::frontend;
 using mozilla::BinarySearch;
 using mozilla::Compression::LZ4;
+using mozilla::MakeEnumeratedRange;
 using mozilla::MallocSizeOf;
 using mozilla::PodCopy;
 using mozilla::PodEqual;
 using mozilla::PodZero;
 using mozilla::Swap;
+using JS::GenericNaN;
 
 static uint8_t*
 AllocateExecutableMemory(ExclusiveContext* cx, size_t bytes)
 {
     // On most platforms, this will allocate RWX memory. On iOS, or when
     // --non-writable-jitcode is used, this will allocate RW memory. In this
     // case, DynamicallyLinkModule will reprotect the code as RX.
     unsigned permissions =
@@ -160,17 +163,16 @@ AsmJSModule::addSizeOfMisc(MallocSizeOf 
 {
     *asmJSModuleCode += pod.totalBytes_;
     *asmJSModuleData += mallocSizeOf(this) +
                         globals_.sizeOfExcludingThis(mallocSizeOf) +
                         exits_.sizeOfExcludingThis(mallocSizeOf) +
                         exports_.sizeOfExcludingThis(mallocSizeOf) +
                         callSites_.sizeOfExcludingThis(mallocSizeOf) +
                         codeRanges_.sizeOfExcludingThis(mallocSizeOf) +
-                        builtinThunkOffsets_.sizeOfExcludingThis(mallocSizeOf) +
                         names_.sizeOfExcludingThis(mallocSizeOf) +
                         heapAccesses_.sizeOfExcludingThis(mallocSizeOf) +
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
                         profiledFunctions_.sizeOfExcludingThis(mallocSizeOf) +
 #endif
                         staticLinkData_.sizeOfExcludingThis(mallocSizeOf);
 }
 
@@ -235,24 +237,24 @@ AsmJSModule::lookupCodeRange(void* pc) c
     if (!BinarySearch(codeRanges_, lowerBound, upperBound, target, &match))
         return nullptr;
 
     return &codeRanges_[match];
 }
 
 struct HeapAccessOffset
 {
-    const AsmJSHeapAccessVector& accesses;
-    explicit HeapAccessOffset(const AsmJSHeapAccessVector& accesses) : accesses(accesses) {}
+    const HeapAccessVector& accesses;
+    explicit HeapAccessOffset(const HeapAccessVector& accesses) : accesses(accesses) {}
     uintptr_t operator[](size_t index) const {
         return accesses[index].insnOffset();
     }
 };
 
-const AsmJSHeapAccess*
+const HeapAccess*
 AsmJSModule::lookupHeapAccess(void* pc) const
 {
     MOZ_ASSERT(isFinished());
     MOZ_ASSERT(containsFunctionPC(pc));
 
     uint32_t target = ((uint8_t*)pc) - code_;
     size_t lowerBound = 0;
     size_t upperBound = heapAccesses_.length();
@@ -304,17 +306,17 @@ AsmJSModule::finish(ExclusiveContext* cx
 
     // c.f. JitCode::copyFrom
     MOZ_ASSERT(masm.jumpRelocationTableBytes() == 0);
     MOZ_ASSERT(masm.dataRelocationTableBytes() == 0);
     MOZ_ASSERT(masm.preBarrierTableBytes() == 0);
     MOZ_ASSERT(!masm.hasSelfReference());
 
     // Heap-access metadata used for link-time patching and fault-handling.
-    heapAccesses_ = masm.extractAsmJSHeapAccesses();
+    heapAccesses_ = masm.extractHeapAccesses();
 
     // Call-site metadata used for stack unwinding.
     const CallSiteAndTargetVector& callSites = masm.callSites();
     if (!callSites_.appendAll(callSites))
         return false;
 
     // Absolute link metadata: absolute addresses that refer to some fixed
     // address in the address space.
@@ -598,107 +600,107 @@ RedirectCall(void* fun, ABIFunctionType 
 {
 #ifdef JS_SIMULATOR
     fun = Simulator::RedirectNativeFunction(fun, type);
 #endif
     return fun;
 }
 
 static void*
-AddressOf(AsmJSImmKind kind, ExclusiveContext* cx)
+AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
 {
-    switch (kind) {
-      case AsmJSImm_Runtime:
+    switch (imm) {
+      case SymbolicAddress::Runtime:
         return cx->runtimeAddressForJit();
-      case AsmJSImm_RuntimeInterruptUint32:
+      case SymbolicAddress::RuntimeInterruptUint32:
         return cx->runtimeAddressOfInterruptUint32();
-      case AsmJSImm_StackLimit:
+      case SymbolicAddress::StackLimit:
         return cx->stackLimitAddressForJitCode(StackForUntrustedScript);
-      case AsmJSImm_ReportOverRecursed:
+      case SymbolicAddress::ReportOverRecursed:
         return RedirectCall(FuncCast(AsmJSReportOverRecursed), Args_General0);
-      case AsmJSImm_OnDetached:
+      case SymbolicAddress::OnDetached:
         return RedirectCall(FuncCast(OnDetached), Args_General0);
-      case AsmJSImm_OnOutOfBounds:
+      case SymbolicAddress::OnOutOfBounds:
         return RedirectCall(FuncCast(OnOutOfBounds), Args_General0);
-      case AsmJSImm_OnImpreciseConversion:
+      case SymbolicAddress::OnImpreciseConversion:
         return RedirectCall(FuncCast(OnImpreciseConversion), Args_General0);
-      case AsmJSImm_HandleExecutionInterrupt:
+      case SymbolicAddress::HandleExecutionInterrupt:
         return RedirectCall(FuncCast(AsmJSHandleExecutionInterrupt), Args_General0);
-      case AsmJSImm_InvokeFromAsmJS_Ignore:
+      case SymbolicAddress::InvokeFromAsmJS_Ignore:
         return RedirectCall(FuncCast(InvokeFromAsmJS_Ignore), Args_General3);
-      case AsmJSImm_InvokeFromAsmJS_ToInt32:
+      case SymbolicAddress::InvokeFromAsmJS_ToInt32:
         return RedirectCall(FuncCast(InvokeFromAsmJS_ToInt32), Args_General3);
-      case AsmJSImm_InvokeFromAsmJS_ToNumber:
+      case SymbolicAddress::InvokeFromAsmJS_ToNumber:
         return RedirectCall(FuncCast(InvokeFromAsmJS_ToNumber), Args_General3);
-      case AsmJSImm_CoerceInPlace_ToInt32:
+      case SymbolicAddress::CoerceInPlace_ToInt32:
         return RedirectCall(FuncCast(CoerceInPlace_ToInt32), Args_General1);
-      case AsmJSImm_CoerceInPlace_ToNumber:
+      case SymbolicAddress::CoerceInPlace_ToNumber:
         return RedirectCall(FuncCast(CoerceInPlace_ToNumber), Args_General1);
-      case AsmJSImm_ToInt32:
+      case SymbolicAddress::ToInt32:
         return RedirectCall(FuncCast<int32_t (double)>(JS::ToInt32), Args_Int_Double);
 #if defined(JS_CODEGEN_ARM)
-      case AsmJSImm_aeabi_idivmod:
+      case SymbolicAddress::aeabi_idivmod:
         return RedirectCall(FuncCast(__aeabi_idivmod), Args_General2);
-      case AsmJSImm_aeabi_uidivmod:
+      case SymbolicAddress::aeabi_uidivmod:
         return RedirectCall(FuncCast(__aeabi_uidivmod), Args_General2);
-      case AsmJSImm_AtomicCmpXchg:
+      case SymbolicAddress::AtomicCmpXchg:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t, int32_t)>(js::atomics_cmpxchg_asm_callout), Args_General4);
-      case AsmJSImm_AtomicXchg:
+      case SymbolicAddress::AtomicXchg:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xchg_asm_callout), Args_General3);
-      case AsmJSImm_AtomicFetchAdd:
+      case SymbolicAddress::AtomicFetchAdd:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_add_asm_callout), Args_General3);
-      case AsmJSImm_AtomicFetchSub:
+      case SymbolicAddress::AtomicFetchSub:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_sub_asm_callout), Args_General3);
-      case AsmJSImm_AtomicFetchAnd:
+      case SymbolicAddress::AtomicFetchAnd:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_and_asm_callout), Args_General3);
-      case AsmJSImm_AtomicFetchOr:
+      case SymbolicAddress::AtomicFetchOr:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_or_asm_callout), Args_General3);
-      case AsmJSImm_AtomicFetchXor:
+      case SymbolicAddress::AtomicFetchXor:
         return RedirectCall(FuncCast<int32_t (int32_t, int32_t, int32_t)>(js::atomics_xor_asm_callout), Args_General3);
 #endif
-      case AsmJSImm_ModD:
+      case SymbolicAddress::ModD:
         return RedirectCall(FuncCast(NumberMod), Args_Double_DoubleDouble);
-      case AsmJSImm_SinD:
+      case SymbolicAddress::SinD:
 #ifdef _WIN64
         // Workaround a VS 2013 sin issue, see math_sin_uncached.
         return RedirectCall(FuncCast<double (double)>(js::math_sin_uncached), Args_Double_Double);
 #else
         return RedirectCall(FuncCast<double (double)>(sin), Args_Double_Double);
 #endif
-      case AsmJSImm_CosD:
+      case SymbolicAddress::CosD:
         return RedirectCall(FuncCast<double (double)>(cos), Args_Double_Double);
-      case AsmJSImm_TanD:
+      case SymbolicAddress::TanD:
         return RedirectCall(FuncCast<double (double)>(tan), Args_Double_Double);
-      case AsmJSImm_ASinD:
+      case SymbolicAddress::ASinD:
         return RedirectCall(FuncCast<double (double)>(asin), Args_Double_Double);
-      case AsmJSImm_ACosD:
+      case SymbolicAddress::ACosD:
         return RedirectCall(FuncCast<double (double)>(acos), Args_Double_Double);
-      case AsmJSImm_ATanD:
+      case SymbolicAddress::ATanD:
         return RedirectCall(FuncCast<double (double)>(atan), Args_Double_Double);
-      case AsmJSImm_CeilD:
+      case SymbolicAddress::CeilD:
         return RedirectCall(FuncCast<double (double)>(ceil), Args_Double_Double);
-      case AsmJSImm_CeilF:
+      case SymbolicAddress::CeilF:
         return RedirectCall(FuncCast<float (float)>(ceilf), Args_Float32_Float32);
-      case AsmJSImm_FloorD:
+      case SymbolicAddress::FloorD:
         return RedirectCall(FuncCast<double (double)>(floor), Args_Double_Double);
-      case AsmJSImm_FloorF:
+      case SymbolicAddress::FloorF:
         return RedirectCall(FuncCast<float (float)>(floorf), Args_Float32_Float32);
-      case AsmJSImm_ExpD:
+      case SymbolicAddress::ExpD:
         return RedirectCall(FuncCast<double (double)>(exp), Args_Double_Double);
-      case AsmJSImm_LogD:
+      case SymbolicAddress::LogD:
         return RedirectCall(FuncCast<double (double)>(log), Args_Double_Double);
-      case AsmJSImm_PowD:
+      case SymbolicAddress::PowD:
         return RedirectCall(FuncCast(ecmaPow), Args_Double_DoubleDouble);
-      case AsmJSImm_ATan2D:
+      case SymbolicAddress::ATan2D:
         return RedirectCall(FuncCast(ecmaAtan2), Args_Double_DoubleDouble);
-      case AsmJSImm_Limit:
+      case SymbolicAddress::Limit:
         break;
     }
 
-    MOZ_CRASH("Bad AsmJSImmKind");
+    MOZ_CRASH("Bad SymbolicAddress");
 }
 
 void
 AsmJSModule::staticallyLink(ExclusiveContext* cx)
 {
     MOZ_ASSERT(isFinished());
 
     // Process staticLinkData_
@@ -725,42 +727,41 @@ AsmJSModule::staticallyLink(ExclusiveCon
         }
 
         if (link.isRawPointerPatch())
             *(uint8_t**)(patchAt) = target;
         else
             Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
     }
 
-    for (size_t immIndex = 0; immIndex < AsmJSImm_Limit; immIndex++) {
-        AsmJSImmKind imm = AsmJSImmKind(immIndex);
+    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
         const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
         for (size_t i = 0; i < offsets.length(); i++) {
             uint8_t* patchAt = code_ + offsets[i];
             void* target = AddressOf(imm, cx);
 
             // Builtin calls are another case where, when profiling is enabled,
             // we must point to the profiling entry.
-            AsmJSExit::BuiltinKind builtin;
-            if (profilingEnabled_ && ImmKindIsBuiltin(imm, &builtin)) {
+            Builtin builtin;
+            if (profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)) {
                 const CodeRange* codeRange = lookupCodeRange(patchAt);
                 if (codeRange->isFunction())
-                    target = code_ + builtinThunkOffsets_[builtin];
+                    target = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin];
             }
 
             Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
                                                PatchedImmPtr(target),
                                                PatchedImmPtr((void*)-1));
         }
     }
 
     // Initialize global data segment
 
-    *(double*)(globalData() + AsmJSNaN64GlobalDataOffset) = GenericNaN();
-    *(float*)(globalData() + AsmJSNaN32GlobalDataOffset) = GenericNaN();
+    *(double*)(globalData() + NaN64GlobalDataOffset) = GenericNaN();
+    *(float*)(globalData() + NaN32GlobalDataOffset) = GenericNaN();
 
     for (size_t tableIndex = 0; tableIndex < staticLinkData_.funcPtrTables.length(); tableIndex++) {
         FuncPtrTable& funcPtrTable = staticLinkData_.funcPtrTables[tableIndex];
         const OffsetVector& offsets = funcPtrTable.elemOffsets();
         auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset());
         for (size_t elemIndex = 0; elemIndex < offsets.length(); elemIndex++) {
             uint8_t* target = code_ + offsets[elemIndex];
             if (profilingEnabled_)
@@ -786,17 +787,17 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
     // accessed from maybeHeap(), which wraps it, and from
     // hasDetachedHeap(), which checks it for null.
     heapDatum() = heap->dataPointerEither().unwrap(/*safe - explained above*/);
 
 #if defined(JS_CODEGEN_X86)
     uint8_t* heapOffset = heap->dataPointerEither().unwrap(/*safe - used for value*/);
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-        const jit::AsmJSHeapAccess& access = heapAccesses_[i];
+        const HeapAccess& access = heapAccesses_[i];
         // An access is out-of-bounds iff
         //      ptr + offset + data-type-byte-size > heapLength
         // i.e. ptr > heapLength - data-type-byte-size - offset.
         // data-type-byte-size and offset are already included in the addend
         // so we just have to add the heap length here.
         if (access.hasLengthCheck())
             X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
         void* addr = access.patchHeapPtrImmAt(code_);
@@ -809,17 +810,17 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
     // atomic operations that depend on explicit checks.
     //
     // If we have any explicit bounds checks, we need to patch the heap length
     // checks at the right places. All accesses that have been recorded are the
     // only ones that need bound checks (see also
     // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,Exchange,AtomicBinop}Heap)
     uint32_t heapLength = heap->byteLength();
     for (size_t i = 0; i < heapAccesses_.length(); i++) {
-        const jit::AsmJSHeapAccess& access = heapAccesses_[i];
+        const HeapAccess& access = heapAccesses_[i];
         // See comment above for x86 codegen.
         if (access.hasLengthCheck())
             X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         jit::Assembler::UpdateBoundsCheck(heapLength,
@@ -832,31 +833,31 @@ void
 AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer)
 {
 #if defined(JS_CODEGEN_X86)
     if (maybePrevBuffer) {
         // Subtract out the base-pointer added by AsmJSModule::initHeap.
         uint8_t* ptrBase = maybePrevBuffer->dataPointerEither().unwrap(/*safe - used for value*/);
         uint32_t heapLength = maybePrevBuffer->byteLength();
         for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-            const jit::AsmJSHeapAccess& access = heapAccesses_[i];
+            const HeapAccess& access = heapAccesses_[i];
             // Subtract the heap length back out, leaving the raw displacement in place.
             if (access.hasLengthCheck())
                 X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
             void* addr = access.patchHeapPtrImmAt(code_);
             uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
             MOZ_ASSERT(ptr >= ptrBase);
             X86Encoding::SetPointer(addr, (void*)(ptr - ptrBase));
         }
     }
 #elif defined(JS_CODEGEN_X64)
     if (maybePrevBuffer) {
         uint32_t heapLength = maybePrevBuffer->byteLength();
         for (unsigned i = 0; i < heapAccesses_.length(); i++) {
-            const jit::AsmJSHeapAccess& access = heapAccesses_[i];
+            const HeapAccess& access = heapAccesses_[i];
             // See comment above for x86 codegen.
             if (access.hasLengthCheck())
                 X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
         }
     }
 #endif
 
     maybeHeap_ = nullptr;
@@ -866,24 +867,24 @@ AsmJSModule::restoreHeapToInitialState(A
 void
 AsmJSModule::restoreToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer,
                                    uint8_t* prevCode,
                                    ExclusiveContext* cx)
 {
 #ifdef DEBUG
     // Put the absolute links back to -1 so PatchDataWithValueCheck assertions
     // in staticallyLink are valid.
-    for (size_t imm = 0; imm < AsmJSImm_Limit; imm++) {
-        void* callee = AddressOf(AsmJSImmKind(imm), cx);
+    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+        void* callee = AddressOf(imm, cx);
 
         // If we are in profiling mode, calls to builtins will have been patched
         // by setProfilingEnabled to be calls to thunks.
-        AsmJSExit::BuiltinKind builtin;
-        void* profilingCallee = profilingEnabled_ && ImmKindIsBuiltin(AsmJSImmKind(imm), &builtin)
-                                ? prevCode + builtinThunkOffsets_[builtin]
+        Builtin builtin;
+        void* profilingCallee = profilingEnabled_ && ImmediateIsBuiltin(imm, &builtin)
+                                ? prevCode + staticLinkData_.pod.builtinThunkOffsets[builtin]
                                 : nullptr;
 
         const AsmJSModule::OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
         for (size_t i = 0; i < offsets.length(); i++) {
             uint8_t* caller = code_ + offsets[i];
             void* originalValue = profilingCallee && !lookupCodeRange(caller)->isThunk()
                                   ? profilingCallee
                                   : callee;
@@ -927,18 +928,18 @@ AsmJSModule::detachHeap(JSContext* cx)
     if (interrupted_) {
         JS_ReportError(cx, "attempt to detach from inside interrupt handler");
         return false;
     }
 
     // Even if this->active(), to reach here, the activation must have called
     // out via an FFI stub. FFI stubs check if heapDatum() is null on reentry
     // and throw an exception if so.
-    MOZ_ASSERT_IF(active(), activation()->exitReason() == AsmJSExit::Reason_JitFFI ||
-                            activation()->exitReason() == AsmJSExit::Reason_SlowFFI);
+    MOZ_ASSERT_IF(active(), activation()->exitReason().kind() == ExitReason::Jit ||
+                            activation()->exitReason().kind() == ExitReason::Slow);
 
     AutoMutateCode amc(cx, *this, "AsmJSModule::detachHeap");
     restoreHeapToInitialState(maybeHeap_);
 
     MOZ_ASSERT(hasDetachedHeap());
     return true;
 }
 
@@ -1404,26 +1405,26 @@ AsmJSModule::CodeRange::CodeRange(Kind k
     PodZero(&u);  // zero padding for Valgrind
     u.kind_ = kind;
 
     MOZ_ASSERT(begin_ < profilingReturn_);
     MOZ_ASSERT(profilingReturn_ < end_);
     MOZ_ASSERT(u.kind_ == JitFFI || u.kind_ == SlowFFI || u.kind_ == Interrupt);
 }
 
-AsmJSModule::CodeRange::CodeRange(AsmJSExit::BuiltinKind builtin, AsmJSProfilingOffsets offsets)
+AsmJSModule::CodeRange::CodeRange(Builtin builtin, AsmJSProfilingOffsets offsets)
   : nameIndex_(0),
     lineNumber_(0),
     begin_(offsets.begin),
     profilingReturn_(offsets.profilingReturn),
     end_(offsets.end)
 {
     PodZero(&u);  // zero padding for Valgrind
     u.kind_ = Thunk;
-    u.thunk.target_ = builtin;
+    u.thunk.target_ = uint16_t(builtin);
 
     MOZ_ASSERT(begin_ < profilingReturn_);
     MOZ_ASSERT(profilingReturn_ < end_);
 }
 
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
 size_t
 AsmJSModule::ProfiledFunction::serializedSize() const
@@ -1448,56 +1449,56 @@ AsmJSModule::ProfiledFunction::deseriali
     return cursor;
 }
 #endif
 
 size_t
 AsmJSModule::AbsoluteLinkArray::serializedSize() const
 {
     size_t size = 0;
-    for (size_t i = 0; i < AsmJSImm_Limit; i++)
-        size += SerializedPodVectorSize(array_[i]);
+    for (const OffsetVector& offsets : *this)
+        size += SerializedPodVectorSize(offsets);
     return size;
 }
 
 uint8_t*
 AsmJSModule::AbsoluteLinkArray::serialize(uint8_t* cursor) const
 {
-    for (size_t i = 0; i < AsmJSImm_Limit; i++)
-        cursor = SerializePodVector(cursor, array_[i]);
+    for (const OffsetVector& offsets : *this)
+        cursor = SerializePodVector(cursor, offsets);
     return cursor;
 }
 
 const uint8_t*
 AsmJSModule::AbsoluteLinkArray::deserialize(ExclusiveContext* cx, const uint8_t* cursor)
 {
-    for (size_t i = 0; i < AsmJSImm_Limit; i++) {
-        cursor = DeserializePodVector(cx, cursor, &array_[i]);
+    for (OffsetVector& offsets : *this) {
+        cursor = DeserializePodVector(cx, cursor, &offsets);
         if (!cursor)
             return nullptr;
     }
     return cursor;
 }
 
 bool
 AsmJSModule::AbsoluteLinkArray::clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const
 {
-    for (size_t i = 0; i < AsmJSImm_Limit; i++) {
-        if (!ClonePodVector(cx, array_[i], &out->array_[i]))
+    for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+        if (!ClonePodVector(cx, (*this)[imm], &(*out)[imm]))
             return false;
     }
     return true;
 }
 
 size_t
 AsmJSModule::AbsoluteLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
 {
     size_t size = 0;
-    for (size_t i = 0; i < AsmJSImm_Limit; i++)
-        size += array_[i].sizeOfExcludingThis(mallocSizeOf);
+    for (const OffsetVector& offsets : *this)
+        size += offsets.sizeOfExcludingThis(mallocSizeOf);
     return size;
 }
 
 size_t
 AsmJSModule::FuncPtrTable::serializedSize() const
 {
     return sizeof(pod) +
            SerializedPodVectorSize(elemOffsets_);
@@ -1591,17 +1592,16 @@ AsmJSModule::serializedSize() const
            SerializedNameSize(globalArgumentName_) +
            SerializedNameSize(importArgumentName_) +
            SerializedNameSize(bufferArgumentName_) +
            SerializedVectorSize(globals_) +
            SerializedVectorSize(exits_) +
            SerializedVectorSize(exports_) +
            SerializedPodVectorSize(callSites_) +
            SerializedPodVectorSize(codeRanges_) +
-           SerializedPodVectorSize(builtinThunkOffsets_) +
            SerializedVectorSize(names_) +
            SerializedPodVectorSize(heapAccesses_) +
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
            SerializedVectorSize(profiledFunctions_) +
 #endif
            staticLinkData_.serializedSize();
 }
 
@@ -1618,17 +1618,16 @@ AsmJSModule::serialize(uint8_t* cursor) 
     cursor = SerializeName(cursor, globalArgumentName_);
     cursor = SerializeName(cursor, importArgumentName_);
     cursor = SerializeName(cursor, bufferArgumentName_);
     cursor = SerializeVector(cursor, globals_);
     cursor = SerializeVector(cursor, exits_);
     cursor = SerializeVector(cursor, exports_);
     cursor = SerializePodVector(cursor, callSites_);
     cursor = SerializePodVector(cursor, codeRanges_);
-    cursor = SerializePodVector(cursor, builtinThunkOffsets_);
     cursor = SerializeVector(cursor, names_);
     cursor = SerializePodVector(cursor, heapAccesses_);
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     cursor = SerializeVector(cursor, profiledFunctions_);
 #endif
     cursor = staticLinkData_.serialize(cursor);
     return cursor;
 }
@@ -1646,17 +1645,16 @@ AsmJSModule::deserialize(ExclusiveContex
     (cursor = DeserializeName(cx, cursor, &globalArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &importArgumentName_)) &&
     (cursor = DeserializeName(cx, cursor, &bufferArgumentName_)) &&
     (cursor = DeserializeVector(cx, cursor, &globals_)) &&
     (cursor = DeserializeVector(cx, cursor, &exits_)) &&
     (cursor = DeserializeVector(cx, cursor, &exports_)) &&
     (cursor = DeserializePodVector(cx, cursor, &callSites_)) &&
     (cursor = DeserializePodVector(cx, cursor, &codeRanges_)) &&
-    (cursor = DeserializePodVector(cx, cursor, &builtinThunkOffsets_)) &&
     (cursor = DeserializeVector(cx, cursor, &names_)) &&
     (cursor = DeserializePodVector(cx, cursor, &heapAccesses_)) &&
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     (cursor = DeserializeVector(cx, cursor, &profiledFunctions_)) &&
 #endif
     (cursor = staticLinkData_.deserialize(cx, cursor));
 
     loadedFromCache_ = true;
@@ -1688,17 +1686,16 @@ AsmJSModule::clone(JSContext* cx, Scoped
     out.importArgumentName_ = importArgumentName_;
     out.bufferArgumentName_ = bufferArgumentName_;
 
     if (!CloneVector(cx, globals_, &out.globals_) ||
         !CloneVector(cx, exits_, &out.exits_) ||
         !CloneVector(cx, exports_, &out.exports_) ||
         !ClonePodVector(cx, callSites_, &out.callSites_) ||
         !ClonePodVector(cx, codeRanges_, &out.codeRanges_) ||
-        !ClonePodVector(cx, builtinThunkOffsets_, &out.builtinThunkOffsets_) ||
         !CloneVector(cx, names_, &out.names_) ||
         !ClonePodVector(cx, heapAccesses_, &out.heapAccesses_) ||
         !staticLinkData_.clone(cx, &out.staticLinkData_))
     {
         return false;
     }
 
     out.loadedFromCache_ = loadedFromCache_;
@@ -1929,21 +1926,21 @@ AsmJSModule::setProfilingEnabled(bool en
 #else
 # error "Missing architecture"
 #endif
     }
 
     // Replace all calls to builtins with calls to profiling thunks that push a
     // frame pointer. Since exit unwinding always starts at the caller of fp,
     // this avoids losing the innermost asm.js function.
-    for (unsigned builtin = 0; builtin < AsmJSExit::Builtin_Limit; builtin++) {
-        AsmJSImmKind imm = BuiltinToImmKind(AsmJSExit::BuiltinKind(builtin));
-        const AsmJSModule::OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
-        void* from = AddressOf(AsmJSImmKind(imm), nullptr);
-        void* to = code_ + builtinThunkOffsets_[builtin];
+    for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) {
+        auto imm = BuiltinToImmediate(builtin);
+        const OffsetVector& offsets = staticLinkData_.absoluteLinks[imm];
+        void* from = AddressOf(imm, nullptr);
+        void* to = code_ + staticLinkData_.pod.builtinThunkOffsets[builtin];
         if (!enabled)
             Swap(from, to);
         for (size_t j = 0; j < offsets.length(); j++) {
             uint8_t* caller = code_ + offsets[j];
             const AsmJSModule::CodeRange* codeRange = lookupCodeRange(caller);
             if (codeRange->isThunk())
                 continue;
             MOZ_ASSERT(codeRange->isFunction());
--- a/js/src/asmjs/AsmJSModule.h
+++ b/js/src/asmjs/AsmJSModule.h
@@ -14,41 +14,37 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #ifndef asmjs_AsmJSModule_h
 #define asmjs_AsmJSModule_h
 
+#include "mozilla/EnumeratedArray.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/Move.h"
 #include "mozilla/PodOperations.h"
 
 #include "jsscript.h"
 
 #include "asmjs/AsmJSFrameIterator.h"
 #include "asmjs/AsmJSValidate.h"
 #include "asmjs/Wasm.h"
 #include "builtin/SIMD.h"
-#include "gc/Marking.h"
-#include "jit/IonTypes.h"
-#include "jit/MacroAssembler.h"
+#include "gc/Tracer.h"
 #ifdef JS_ION_PERF
 # include "jit/PerfSpewer.h"
 #endif
-#include "jit/RegisterSets.h"
-#include "jit/shared/Assembler-shared.h"
 #include "vm/TypedArrayObject.h"
 
 namespace js {
 
 namespace frontend { class TokenStream; }
-
-using JS::GenericNaN;
+namespace jit { struct BaselineScript; class MacroAssembler; }
 
 // The asm.js spec recognizes this set of builtin Math functions.
 enum AsmJSMathBuiltinFunction
 {
     AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan,
     AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
     AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
     AsmJSMathBuiltin_log, AsmJSMathBuiltin_pow, AsmJSMathBuiltin_sqrt,
@@ -471,17 +467,17 @@ class AsmJSModule
         void assertValid();
 
       public:
         enum Kind { Function, Entry, JitFFI, SlowFFI, Interrupt, Thunk, Inline };
 
         CodeRange() {}
         CodeRange(Kind kind, AsmJSOffsets offsets);
         CodeRange(Kind kind, AsmJSProfilingOffsets offsets);
-        CodeRange(AsmJSExit::BuiltinKind builtin, AsmJSProfilingOffsets offsets);
+        CodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets);
         CodeRange(uint32_t lineNumber, AsmJSFunctionOffsets offsets);
 
         Kind kind() const { return Kind(u.kind_); }
         bool isFunction() const { return kind() == Function; }
         bool isEntry() const { return kind() == Entry; }
         bool isFFI() const { return kind() == JitFFI || kind() == SlowFFI; }
         bool isInterrupt() const { return kind() == Interrupt; }
         bool isThunk() const { return kind() == Thunk; }
@@ -532,19 +528,19 @@ class AsmJSModule
             return lineNumber_;
         }
         void functionOffsetBy(uint32_t offset) {
             MOZ_ASSERT(isFunction());
             begin_ += offset;
             profilingReturn_ += offset;
             end_ += offset;
         }
-        AsmJSExit::BuiltinKind thunkTarget() const {
+        wasm::Builtin thunkTarget() const {
             MOZ_ASSERT(isThunk());
-            return AsmJSExit::BuiltinKind(u.thunk.target_);
+            return wasm::Builtin(u.thunk.target_);
         }
     };
 
     class Name
     {
         PropertyName* name_;
       public:
         Name() : name_(nullptr) {}
@@ -593,37 +589,16 @@ class AsmJSModule
         }
 
         size_t serializedSize() const;
         uint8_t* serialize(uint8_t* cursor) const;
         const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
     };
 #endif
 
-#if defined(JS_ION_PERF)
-    struct ProfiledBlocksFunction : public ProfiledFunction
-    {
-        unsigned endInlineCodeOffset;
-        jit::BasicBlocksVector blocks;
-
-        ProfiledBlocksFunction(PropertyName* name, unsigned start, unsigned endInline, unsigned end,
-                               jit::BasicBlocksVector& blocksVector)
-          : ProfiledFunction(name, start, end), endInlineCodeOffset(endInline),
-            blocks(mozilla::Move(blocksVector))
-        {
-            MOZ_ASSERT(name->isTenured());
-        }
-
-        ProfiledBlocksFunction(ProfiledBlocksFunction&& copy)
-          : ProfiledFunction(copy.name, copy.pod.startCodeOffset, copy.pod.endCodeOffset),
-            endInlineCodeOffset(copy.endInlineCodeOffset), blocks(mozilla::Move(copy.blocks))
-        { }
-    };
-#endif
-
     struct RelativeLink
     {
         enum Kind
         {
             RawPointer,
             CodeLabel,
             InstructionImmediate
         };
@@ -652,32 +627,27 @@ class AsmJSModule
 #endif
 
         uint32_t patchAtOffset;
         uint32_t targetOffset;
     };
 
     typedef Vector<RelativeLink, 0, SystemAllocPolicy> RelativeLinkVector;
 
-    typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
-
-    class AbsoluteLinkArray
-    {
-        OffsetVector array_[jit::AsmJSImm_Limit];
+    typedef mozilla::EnumeratedArray<wasm::Builtin,
+                                     wasm::Builtin::Limit,
+                                     uint32_t> BuiltinThunkOffsetArray;
 
-      public:
-        OffsetVector& operator[](size_t i) {
-            MOZ_ASSERT(i < jit::AsmJSImm_Limit);
-            return array_[i];
-        }
-        const OffsetVector& operator[](size_t i) const {
-            MOZ_ASSERT(i < jit::AsmJSImm_Limit);
-            return array_[i];
-        }
+    typedef Vector<uint32_t, 0, SystemAllocPolicy> OffsetVector;
+    typedef mozilla::EnumeratedArray<wasm::SymbolicAddress,
+                                     wasm::SymbolicAddress::Limit,
+                                     OffsetVector> OffsetVectorArray;
 
+    struct AbsoluteLinkArray : public OffsetVectorArray
+    {
         size_t serializedSize() const;
         uint8_t* serialize(uint8_t* cursor) const;
         const uint8_t* deserialize(ExclusiveContext* cx, const uint8_t* cursor);
         bool clone(ExclusiveContext* cx, AbsoluteLinkArray* out) const;
 
         size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
     };
 
@@ -712,16 +682,17 @@ class AsmJSModule
     // AsmJSModule).
     struct StaticLinkData
     {
         StaticLinkData() { mozilla::PodZero(&pod); }
 
         struct Pod {
             uint32_t interruptExitOffset;
             uint32_t outOfBoundsExitOffset;
+            BuiltinThunkOffsetArray builtinThunkOffsets;
         } pod;
 
         RelativeLinkVector relativeLinks;
         AbsoluteLinkArray absoluteLinks;
         FuncPtrTableVector funcPtrTables;
 
         size_t serializedSize() const;
         uint8_t* serialize(uint8_t* cursor) const;
@@ -754,22 +725,21 @@ class AsmJSModule
     // of the module within the ScriptSource and thus aren't invariant with
     // respect to caching.
     const uint32_t                        srcStart_;
     const uint32_t                        srcBodyStart_;
 
     Vector<Global,                 0, SystemAllocPolicy> globals_;
     Vector<Exit,                   0, SystemAllocPolicy> exits_;
     Vector<ExportedFunction,       0, SystemAllocPolicy> exports_;
-    Vector<jit::CallSite,          0, SystemAllocPolicy> callSites_;
+    Vector<wasm::CallSite,         0, SystemAllocPolicy> callSites_;
     Vector<CodeRange,              0, SystemAllocPolicy> codeRanges_;
-    Vector<uint32_t,               0, SystemAllocPolicy> builtinThunkOffsets_;
     Vector<Name,                   0, SystemAllocPolicy> names_;
     Vector<ProfilingLabel,         0, SystemAllocPolicy> profilingLabels_;
-    Vector<jit::AsmJSHeapAccess,   0, SystemAllocPolicy> heapAccesses_;
+    Vector<wasm::HeapAccess,       0, SystemAllocPolicy> heapAccesses_;
 #if defined(MOZ_VTUNE) || defined(JS_ION_PERF)
     Vector<ProfiledFunction,       0, SystemAllocPolicy> profiledFunctions_;
 #endif
 
     ScriptSource *                        scriptSource_;
     PropertyName *                        globalArgumentName_;
     PropertyName *                        importArgumentName_;
     PropertyName *                        bufferArgumentName_;
@@ -1065,19 +1035,20 @@ class AsmJSModule
     bool addFunctionCodeRange(PropertyName* name, CodeRange codeRange) {
         MOZ_ASSERT(!isFinished());
         MOZ_ASSERT(name->isTenured());
         if (names_.length() >= UINT32_MAX)
             return false;
         codeRange.initNameIndex(names_.length());
         return names_.append(name) && codeRanges_.append(codeRange);
     }
-    bool addBuiltinThunkCodeRange(AsmJSExit::BuiltinKind builtin, AsmJSProfilingOffsets offsets) {
-        return builtinThunkOffsets_.append(offsets.begin) &&
-               codeRanges_.append(CodeRange(builtin, offsets));
+    bool addBuiltinThunkCodeRange(wasm::Builtin builtin, AsmJSProfilingOffsets offsets) {
+        MOZ_ASSERT(staticLinkData_.pod.builtinThunkOffsets[builtin] == 0);
+        staticLinkData_.pod.builtinThunkOffsets[builtin] = offsets.begin;
+        return codeRanges_.append(CodeRange(builtin, offsets));
     }
     bool addExit(wasm::MallocSig&& sig, unsigned ffiIndex, unsigned* exitIndex) {
         MOZ_ASSERT(!isFinished());
         static_assert(sizeof(ExitDatum) % sizeof(void*) == 0, "word aligned");
         uint32_t globalDataOffset;
         if (!allocateGlobalBytes(sizeof(ExitDatum), sizeof(void*), &globalDataOffset))
             return false;
         *exitIndex = unsigned(exits_.length());
@@ -1222,65 +1193,65 @@ class AsmJSModule
     }
     uint32_t srcEndAfterCurly() const {
         MOZ_ASSERT(isFinished());
         return srcStart_ + pod.srcLengthWithRightBrace_;
     }
 
     // Lookup a callsite by the return pc (from the callee to the caller).
     // Return null if no callsite was found.
-    const jit::CallSite* lookupCallSite(void* returnAddress) const;
+    const wasm::CallSite* lookupCallSite(void* returnAddress) const;
 
     // Lookup the name the code range containing the given pc. Return null if no
     // code range was found.
     const CodeRange* lookupCodeRange(void* pc) const;
 
     // Lookup a heap access site by the pc which performs the access. Return
     // null if no heap access was found.
-    const jit::AsmJSHeapAccess* lookupHeapAccess(void* pc) const;
+    const wasm::HeapAccess* lookupHeapAccess(void* pc) const;
 
     // The global data section is placed after the executable code (i.e., at
     // offset codeBytes_) in the module's linear allocation. The global data
     // starts with some fixed allocations followed by interleaved global,
     // function-pointer table and exit allocations.
     uint32_t offsetOfGlobalData() const {
         MOZ_ASSERT(isFinished());
         return pod.codeBytes_;
     }
     uint8_t* globalData() const {
         MOZ_ASSERT(isFinished());
         return codeBase() + offsetOfGlobalData();
     }
     static void assertGlobalDataOffsets() {
-        static_assert(jit::AsmJSActivationGlobalDataOffset == 0,
-                     "global data goes first");
-        static_assert(jit::AsmJSHeapGlobalDataOffset == jit::AsmJSActivationGlobalDataOffset + sizeof(void*),
-                      "then an AsmJSActivation*");
-        static_assert(jit::AsmJSNaN64GlobalDataOffset == jit::AsmJSHeapGlobalDataOffset + sizeof(uint8_t*),
-                      "then a pointer to the heap");
-        static_assert(jit::AsmJSNaN32GlobalDataOffset == jit::AsmJSNaN64GlobalDataOffset + sizeof(double),
+        static_assert(wasm::ActivationGlobalDataOffset == 0,
+                      "an AsmJSActivation* data goes first");
+        static_assert(wasm::HeapGlobalDataOffset == wasm::ActivationGlobalDataOffset + sizeof(void*),
+                      "then a pointer to the heap*");
+        static_assert(wasm::NaN64GlobalDataOffset == wasm::HeapGlobalDataOffset + sizeof(uint8_t*),
+                      "then a 64-bit NaN");
+        static_assert(wasm::NaN32GlobalDataOffset == wasm::NaN64GlobalDataOffset + sizeof(double),
                       "then a 32-bit NaN");
-        static_assert(sInitialGlobalDataBytes == jit::AsmJSNaN32GlobalDataOffset + sizeof(float),
-                      "then a 64-bit NaN");
+        static_assert(sInitialGlobalDataBytes == wasm::NaN32GlobalDataOffset + sizeof(float),
+                      "then all the normal global data (globals, exits, func-ptr-tables)");
     }
-    static const uint32_t sInitialGlobalDataBytes = jit::AsmJSNaN32GlobalDataOffset + sizeof(float);
+    static const uint32_t sInitialGlobalDataBytes = wasm::NaN32GlobalDataOffset + sizeof(float);
 
     AsmJSActivation*& activation() const {
         MOZ_ASSERT(isFinished());
-        return *(AsmJSActivation**)(globalData() + jit::AsmJSActivationGlobalDataOffset);
+        return *(AsmJSActivation**)(globalData() + wasm::ActivationGlobalDataOffset);
     }
     bool active() const {
         return activation() != nullptr;
     }
   private:
     // The pointer may reference shared memory, use with care.
     // Generally you want to use maybeHeap(), not heapDatum().
     uint8_t*& heapDatum() const {
         MOZ_ASSERT(isFinished());
-        return *(uint8_t**)(globalData() + jit::AsmJSHeapGlobalDataOffset);
+        return *(uint8_t**)(globalData() + wasm::HeapGlobalDataOffset);
     }
   public:
 
     /*************************************************************************/
     // These functions are called after finish() but before staticallyLink():
 
     bool addRelativeLink(RelativeLink link) {
         MOZ_ASSERT(isFinished());
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -594,17 +594,17 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c
         result += index * (1 << address.scale());
     }
 
     return reinterpret_cast<uint8_t*>(result);
 }
 
 MOZ_COLD static uint8_t*
 EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
-                  const AsmJSHeapAccess* heapAccess, const AsmJSModule& module)
+                  const HeapAccess* heapAccess, const AsmJSModule& module)
 {
     MOZ_RELEASE_ASSERT(module.containsFunctionPC(pc));
     MOZ_RELEASE_ASSERT(module.usesSignalHandlersForOOB());
     MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck());
     MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.codeBase()));
 
     // Disassemble the instruction which caused the trap so that we can extract
     // information about it and decide what to do.
@@ -780,17 +780,17 @@ HandleFault(PEXCEPTION_POINTERS exceptio
             module.containsFunctionPC(activation->resumePC()) &&
             module.lookupHeapAccess(activation->resumePC()))
         {
             return true;
         }
         return false;
     }
 
-    const AsmJSHeapAccess* heapAccess = module.lookupHeapAccess(pc);
+    const HeapAccess* heapAccess = module.lookupHeapAccess(pc);
     if (!heapAccess)
         return false;
 
     *ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, module);
     return true;
 }
 
 static LONG WINAPI
@@ -910,17 +910,17 @@ HandleMachException(JSRuntime* rt, const
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
     if (!module.maybeHeap() ||
         faultingAddress < module.maybeHeap() ||
         faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
     {
         return false;
     }
 
-    const AsmJSHeapAccess* heapAccess = module.lookupHeapAccess(pc);
+    const HeapAccess* heapAccess = module.lookupHeapAccess(pc);
     if (!heapAccess)
         return false;
 
     *ppc = EmulateHeapAccess(&context, pc, faultingAddress, heapAccess, module);
 
     // Update the thread state with the new pc and register values.
     kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
     if (kret != KERN_SUCCESS)
@@ -1120,17 +1120,17 @@ HandleFault(int signum, siginfo_t* info,
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
     if (!module.maybeHeap() ||
         faultingAddress < module.maybeHeap() ||
         faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
     {
         return false;
     }
 
-    const AsmJSHeapAccess* heapAccess = module.lookupHeapAccess(pc);
+    const HeapAccess* heapAccess = module.lookupHeapAccess(pc);
     if (!heapAccess)
         return false;
 
     *ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, module);
 
     return true;
 }
 
--- a/js/src/asmjs/Wasm.h
+++ b/js/src/asmjs/Wasm.h
@@ -160,17 +160,17 @@ ToMIRType(ExprType et)
 // The Sig class represents a WebAssembly function signature which takes a list
 // of value types and returns an expression type. The engine uses two in-memory
 // representations of the argument Vector's memory (when elements do not fit
 // inline): normal malloc allocation (via SystemAllocPolicy) and allocation in
 // a LifoAlloc (via LifoAllocPolicy). The former Sig objects can have any
 // lifetime since they own the memory. The latter Sig objects must not outlive
 // the associated LifoAlloc mark/release interval (which is currently the
 // duration of module validation+compilation). Thus, long-lived objects like
-// AsmJSModule must use malloced allocation.
+// WasmModule must use malloced allocation.
 
 template <class AllocPolicy>
 class Sig
 {
   public:
     typedef Vector<ValType, 4, AllocPolicy> ArgVector;
 
   private:
@@ -241,12 +241,334 @@ class LifoSig : public Sig<LifoAllocPoli
             return nullptr;
         ArgVector args(lifo);
         if (!args.appendAll(src.args()))
             return nullptr;
         return new (mem) LifoSig(Move(args), src.ret());
     }
 };
 
+// While the frame-pointer chain allows the stack to be unwound without
+// metadata, Error.stack still needs to know the line/column of every call in
+// the chain. A CallSiteDesc describes a single callsite to which CallSite adds
+// the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
+// adds the function index of the callee.
+
+class CallSiteDesc
+{
+    uint32_t line_;
+    uint32_t column_ : 31;
+    uint32_t kind_ : 1;
+  public:
+    enum Kind {
+        Relative,  // pc-relative call
+        Register   // call *register
+    };
+    CallSiteDesc() {}
+    explicit CallSiteDesc(Kind kind)
+      : line_(0), column_(0), kind_(kind)
+    {}
+    CallSiteDesc(uint32_t line, uint32_t column, Kind kind)
+      : line_(line), column_(column), kind_(kind)
+    {
+        MOZ_ASSERT(column_ == column, "column must fit in 31 bits");
+    }
+    uint32_t line() const { return line_; }
+    uint32_t column() const { return column_; }
+    Kind kind() const { return Kind(kind_); }
+};
+
+class CallSite : public CallSiteDesc
+{
+    uint32_t returnAddressOffset_;
+    uint32_t stackDepth_;
+
+  public:
+    CallSite() {}
+
+    CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
+      : CallSiteDesc(desc),
+        returnAddressOffset_(returnAddressOffset),
+        stackDepth_(stackDepth)
+    { }
+
+    void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
+    void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
+    uint32_t returnAddressOffset() const { return returnAddressOffset_; }
+
+    // The stackDepth measures the amount of stack space pushed since the
+    // function was called. In particular, this includes the pushed return
+    // address on all archs (whether or not the call instruction pushes the
+    // return address (x86/x64) or the prologue does (ARM/MIPS)).
+    uint32_t stackDepth() const { return stackDepth_; }
+};
+
+class CallSiteAndTarget : public CallSite
+{
+    uint32_t targetIndex_;
+
+  public:
+    CallSiteAndTarget(CallSite cs, uint32_t targetIndex)
+      : CallSite(cs), targetIndex_(targetIndex)
+    { }
+
+    static const uint32_t NOT_INTERNAL = UINT32_MAX;
+
+    bool isInternal() const { return targetIndex_ != NOT_INTERNAL; }
+    uint32_t targetIndex() const { MOZ_ASSERT(isInternal()); return targetIndex_; }
+};
+
+typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
+typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
+
+// Summarizes a heap access made by wasm code that needs to be patched later
+// and/or looked up by the wasm signal handlers. Different architectures need
+// to know different things (x64: offset and length, ARM: where to patch in
+// heap length, x86: where to patch in heap length and base).
+
+#if defined(JS_CODEGEN_X86)
+class HeapAccess
+{
+    uint32_t insnOffset_;
+    uint8_t opLength_;  // the length of the load/store instruction
+    uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
+
+  public:
+    HeapAccess() = default;
+    static const uint32_t NoLengthCheck = UINT32_MAX;
+
+    // If 'cmp' equals 'insnOffset' or if it is not supplied then the
+    // cmpDelta_ is zero indicating that there is no length to patch.
+    HeapAccess(uint32_t insnOffset, uint32_t after, uint32_t cmp = NoLengthCheck) {
+        mozilla::PodZero(this);  // zero padding for Valgrind
+        insnOffset_ = insnOffset;
+        opLength_ = after - insnOffset;
+        cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
+    }
+
+    uint32_t insnOffset() const { return insnOffset_; }
+    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
+    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
+    void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
+    bool hasLengthCheck() const { return cmpDelta_ > 0; }
+    void* patchLengthAt(uint8_t* code) const {
+        MOZ_ASSERT(hasLengthCheck());
+        return code + (insnOffset_ - cmpDelta_);
+    }
+};
+#elif defined(JS_CODEGEN_X64)
+class HeapAccess
+{
+  public:
+    enum WhatToDoOnOOB {
+        CarryOn, // loads return undefined, stores do nothing.
+        Throw    // throw a RangeError
+    };
+
+  private:
+    uint32_t insnOffset_;
+    uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
+    bool throwOnOOB_;                     // should we throw on OOB?
+    uint8_t cmpDelta_;                    // the number of bytes from the cmp to the load/store instruction
+
+  public:
+    HeapAccess() = default;
+    static const uint32_t NoLengthCheck = UINT32_MAX;
+
+    // If 'cmp' equals 'insnOffset' or if it is not supplied then the
+    // cmpDelta_ is zero indicating that there is no length to patch.
+    HeapAccess(uint32_t insnOffset, WhatToDoOnOOB oob,
+               uint32_t cmp = NoLengthCheck,
+               uint32_t offsetWithinWholeSimdVector = 0)
+    {
+        mozilla::PodZero(this);  // zero padding for Valgrind
+        insnOffset_ = insnOffset;
+        offsetWithinWholeSimdVector_ = offsetWithinWholeSimdVector;
+        throwOnOOB_ = oob == Throw;
+        cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
+        MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
+    }
+
+    uint32_t insnOffset() const { return insnOffset_; }
+    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
+    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
+    bool throwOnOOB() const { return throwOnOOB_; }
+    uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
+    bool hasLengthCheck() const { return cmpDelta_ > 0; }
+    void* patchLengthAt(uint8_t* code) const {
+        MOZ_ASSERT(hasLengthCheck());
+        return code + (insnOffset_ - cmpDelta_);
+    }
+};
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+class HeapAccess
+{
+    uint32_t insnOffset_;
+  public:
+    HeapAccess() = default;
+    explicit HeapAccess(uint32_t insnOffset) : insnOffset_(insnOffset) {}
+    uint32_t insnOffset() const { return insnOffset_; }
+    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
+    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
+};
+#elif defined(JS_CODEGEN_NONE)
+class HeapAccess { };
+#endif
+
+typedef Vector<HeapAccess, 0, SystemAllocPolicy> HeapAccessVector;
+
+// A wasm::Builtin represents a function implemented by the engine that is
+// called directly from wasm code and should show up in the callstack.
+
+enum class Builtin : uint16_t
+{
+    ToInt32,
+#if defined(JS_CODEGEN_ARM)
+    aeabi_idivmod,
+    aeabi_uidivmod,
+    AtomicCmpXchg,
+    AtomicXchg,
+    AtomicFetchAdd,
+    AtomicFetchSub,
+    AtomicFetchAnd,
+    AtomicFetchOr,
+    AtomicFetchXor,
+#endif
+    ModD,
+    SinD,
+    CosD,
+    TanD,
+    ASinD,
+    ACosD,
+    ATanD,
+    CeilD,
+    CeilF,
+    FloorD,
+    FloorF,
+    ExpD,
+    LogD,
+    PowD,
+    ATan2D,
+    Limit
+};
+
+// A wasm::SymbolicAddress represents a pointer to a well-known function or
+// object that is embedded in wasm code. Since wasm code is serialized and
+// later deserialized into a different address space, symbolic addresses must be
+// used for *all* pointers into the address space. The MacroAssembler records a
+// list of all SymbolicAddresses and the offsets of their use in the code for
+// later patching during static linking.
+
+enum class SymbolicAddress
+{
+    ToInt32         = unsigned(Builtin::ToInt32),
+#if defined(JS_CODEGEN_ARM)
+    aeabi_idivmod   = unsigned(Builtin::aeabi_idivmod),
+    aeabi_uidivmod  = unsigned(Builtin::aeabi_uidivmod),
+    AtomicCmpXchg   = unsigned(Builtin::AtomicCmpXchg),
+    AtomicXchg      = unsigned(Builtin::AtomicXchg),
+    AtomicFetchAdd  = unsigned(Builtin::AtomicFetchAdd),
+    AtomicFetchSub  = unsigned(Builtin::AtomicFetchSub),
+    AtomicFetchAnd  = unsigned(Builtin::AtomicFetchAnd),
+    AtomicFetchOr   = unsigned(Builtin::AtomicFetchOr),
+    AtomicFetchXor  = unsigned(Builtin::AtomicFetchXor),
+#endif
+    ModD            = unsigned(Builtin::ModD),
+    SinD            = unsigned(Builtin::SinD),
+    CosD            = unsigned(Builtin::CosD),
+    TanD            = unsigned(Builtin::TanD),
+    ASinD           = unsigned(Builtin::ASinD),
+    ACosD           = unsigned(Builtin::ACosD),
+    ATanD           = unsigned(Builtin::ATanD),
+    CeilD           = unsigned(Builtin::CeilD),
+    CeilF           = unsigned(Builtin::CeilF),
+    FloorD          = unsigned(Builtin::FloorD),
+    FloorF          = unsigned(Builtin::FloorF),
+    ExpD            = unsigned(Builtin::ExpD),
+    LogD            = unsigned(Builtin::LogD),
+    PowD            = unsigned(Builtin::PowD),
+    ATan2D          = unsigned(Builtin::ATan2D),
+    Runtime,
+    RuntimeInterruptUint32,
+    StackLimit,
+    ReportOverRecursed,
+    OnDetached,
+    OnOutOfBounds,
+    OnImpreciseConversion,
+    HandleExecutionInterrupt,
+    InvokeFromAsmJS_Ignore,
+    InvokeFromAsmJS_ToInt32,
+    InvokeFromAsmJS_ToNumber,
+    CoerceInPlace_ToInt32,
+    CoerceInPlace_ToNumber,
+    Limit
+};
+
+static inline SymbolicAddress
+BuiltinToImmediate(Builtin b)
+{
+    return SymbolicAddress(b);
+}
+
+static inline bool
+ImmediateIsBuiltin(SymbolicAddress imm, Builtin* builtin)
+{
+    if (uint32_t(imm) < uint32_t(Builtin::Limit)) {
+        *builtin = Builtin(imm);
+        return true;
+    }
+    return false;
+}
+
+// An ExitReason describes the possible reasons for leaving compiled wasm code
+// or the state of not having left compiled wasm code (ExitReason::None).
+
+class ExitReason
+{
+  public:
+    // List of reasons for execution leaving compiled wasm code (or None, if
+    // control hasn't exited).
+    enum Kind
+    {
+        None,       // default state, the pc is in wasm code
+        Jit,        // fast-path exit to JIT code
+        Slow,       // general case exit to C++ Invoke
+        Interrupt,  // executing an interrupt callback
+        Builtin     // calling into a builtin (native) function
+    };
+
+  private:
+    Kind kind_;
+    wasm::Builtin builtin_;
+
+  public:
+    ExitReason() = default;
+    MOZ_IMPLICIT ExitReason(Kind kind) : kind_(kind) { MOZ_ASSERT(kind != Builtin); }
+    MOZ_IMPLICIT ExitReason(wasm::Builtin builtin) : kind_(Builtin), builtin_(builtin) {}
+    Kind kind() const { return kind_; }
+    wasm::Builtin builtin() const { MOZ_ASSERT(kind_ == Builtin); return builtin_; }
+
+    uint32_t pack() const {
+        static_assert(sizeof(wasm::Builtin) == 2, "fits");
+        return uint16_t(kind_) | (uint16_t(builtin_) << 16);
+    }
+    static ExitReason unpack(uint32_t u32) {
+        static_assert(sizeof(wasm::Builtin) == 2, "fits");
+        ExitReason r;
+        r.kind_ = Kind(uint16_t(u32));
+        r.builtin_ = wasm::Builtin(uint16_t(u32 >> 16));
+        return r;
+    }
+};
+
+// A hoisting of constants that would otherwise require #including WasmModule.h
+// everywhere. Values are asserted in WasmModule.h.
+
+static const unsigned ActivationGlobalDataOffset = 0;
+static const unsigned HeapGlobalDataOffset = sizeof(void*);
+static const unsigned NaN64GlobalDataOffset = 2 * sizeof(void*);
+static const unsigned NaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
+
 } // namespace wasm
 } // namespace js
 
 #endif // asmjs_wasm_h
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -752,17 +752,17 @@ class FunctionCompiler
         }
 
         MAsmJSLoadFFIFunc* ptrFun = MAsmJSLoadFFIFunc::New(alloc(), globalDataOffset);
         curBlock_->add(ptrFun);
 
         return callPrivate(MAsmJSCall::Callee(ptrFun), call, ret, def);
     }
 
-    bool builtinCall(AsmJSImmKind builtin, const Call& call, ValType type, MDefinition** def)
+    bool builtinCall(Builtin builtin, const Call& call, ValType type, MDefinition** def)
     {
         return callPrivate(MAsmJSCall::Callee(builtin), call, ToExprType(type), def);
     }
 
     /*********************************************** Control flow generation */
 
     inline bool inDeadCode() const {
         return curBlock_ == nullptr;
@@ -1623,17 +1623,17 @@ EmitMathBuiltinCall(FunctionCompiler& f,
     f.startCallArgs(&call);
 
     MDefinition* firstArg;
     if (!EmitF32Expr(f, &firstArg) || !f.passArg(firstArg, ValType::F32, &call))
         return false;
 
     f.finishCallArgs(&call);
 
-    AsmJSImmKind callee = f32 == F32::Ceil ? AsmJSImm_CeilF : AsmJSImm_FloorF;
+    Builtin callee = f32 == F32::Ceil ? Builtin::CeilF : Builtin::FloorF;
     return f.builtinCall(callee, call, ValType::F32, def);
 }
 
 static bool
 EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def)
 {
     uint32_t lineno, column;
     ReadCallLineCol(f, &lineno, &column);
@@ -1646,30 +1646,30 @@ EmitMathBuiltinCall(FunctionCompiler& f,
         return false;
 
     if (f64 == F64::Pow || f64 == F64::Atan2) {
         MDefinition* secondArg;
         if (!EmitF64Expr(f, &secondArg) || !f.passArg(secondArg, ValType::F64, &call))
             return false;
     }
 
-    AsmJSImmKind callee;
+    Builtin callee;
     switch (f64) {
-      case F64::Ceil:  callee = AsmJSImm_CeilD; break;
-      case F64::Floor: callee = AsmJSImm_FloorD; break;
-      case F64::Sin:   callee = AsmJSImm_SinD; break;
-      case F64::Cos:   callee = AsmJSImm_CosD; break;
-      case F64::Tan:   callee = AsmJSImm_TanD; break;
-      case F64::Asin:  callee = AsmJSImm_ASinD; break;
-      case F64::Acos:  callee = AsmJSImm_ACosD; break;
-      case F64::Atan:  callee = AsmJSImm_ATanD; break;
-      case F64::Exp:   callee = AsmJSImm_ExpD; break;
-      case F64::Log:   callee = AsmJSImm_LogD; break;
-      case F64::Pow:   callee = AsmJSImm_PowD; break;
-      case F64::Atan2: callee = AsmJSImm_ATan2D; break;
+      case F64::Ceil:  callee = Builtin::CeilD; break;
+      case F64::Floor: callee = Builtin::FloorD; break;
+      case F64::Sin:   callee = Builtin::SinD; break;
+      case F64::Cos:   callee = Builtin::CosD; break;
+      case F64::Tan:   callee = Builtin::TanD; break;
+      case F64::Asin:  callee = Builtin::ASinD; break;
+      case F64::Acos:  callee = Builtin::ACosD; break;
+      case F64::Atan:  callee = Builtin::ATanD; break;
+      case F64::Exp:   callee = Builtin::ExpD; break;
+      case F64::Log:   callee = Builtin::LogD; break;
+      case F64::Pow:   callee = Builtin::PowD; break;
+      case F64::Atan2: callee = Builtin::ATan2D; break;
       default: MOZ_CRASH("unexpected double math builtin callee");
     }
 
     f.finishCallArgs(&call);
 
     return f.builtinCall(callee, call, ValType::F64, def);
 }
 
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -14,26 +14,28 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/WasmStubs.h"
 
 #include "mozilla/ArrayUtils.h"
+#include "mozilla/EnumeratedRange.h"
 
 #include "asmjs/AsmJSModule.h"
 
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
 
 using mozilla::ArrayLength;
+using mozilla::MakeEnumeratedRange;
 
 typedef Vector<MIRType, 8, SystemAllocPolicy> MIRTypeVector;
 typedef ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
 typedef ABIArgIter<MallocSig::ArgVector> ABIArgValTypeIter;
 
 static void
 AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBeforeAssert = 0)
 {
@@ -297,78 +299,80 @@ GenerateEntry(MacroAssembler& masm, AsmJ
 // pushes an AsmJSFrame on the stack, that means we must rebuild the stack
 // frame. Fortunately, these are low arity functions and everything is passed in
 // regs on everything but x86 anyhow.
 //
 // NB: Since this thunk is being injected at system ABI callsites, it must
 //     preserve the argument registers (going in) and the return register
 //     (coming out) and preserve non-volatile registers.
 static bool
-GenerateBuiltinThunk(MacroAssembler& masm, AsmJSModule& module, AsmJSExit::BuiltinKind builtin)
+GenerateBuiltinThunk(MacroAssembler& masm, AsmJSModule& module, Builtin builtin)
 {
     MIRTypeVector args;
     switch (builtin) {
-      case AsmJSExit::Builtin_ToInt32:
+      case Builtin::ToInt32:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         break;
 #if defined(JS_CODEGEN_ARM)
-      case AsmJSExit::Builtin_IDivMod:
-      case AsmJSExit::Builtin_UDivMod:
+      case Builtin::aeabi_idivmod:
+      case Builtin::aeabi_uidivmod:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         break;
-      case AsmJSExit::Builtin_AtomicCmpXchg:
+      case Builtin::AtomicCmpXchg:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         break;
-      case AsmJSExit::Builtin_AtomicXchg:
-      case AsmJSExit::Builtin_AtomicFetchAdd:
-      case AsmJSExit::Builtin_AtomicFetchSub:
-      case AsmJSExit::Builtin_AtomicFetchAnd:
-      case AsmJSExit::Builtin_AtomicFetchOr:
-      case AsmJSExit::Builtin_AtomicFetchXor:
+      case Builtin::AtomicXchg:
+      case Builtin::AtomicFetchAdd:
+      case Builtin::AtomicFetchSub:
+      case Builtin::AtomicFetchAnd:
+      case Builtin::AtomicFetchOr:
+      case Builtin::AtomicFetchXor:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Int32));
         break;
 #endif
-      case AsmJSExit::Builtin_SinD:
-      case AsmJSExit::Builtin_CosD:
-      case AsmJSExit::Builtin_TanD:
-      case AsmJSExit::Builtin_ASinD:
-      case AsmJSExit::Builtin_ACosD:
-      case AsmJSExit::Builtin_ATanD:
-      case AsmJSExit::Builtin_CeilD:
-      case AsmJSExit::Builtin_FloorD:
-      case AsmJSExit::Builtin_ExpD:
-      case AsmJSExit::Builtin_LogD:
+      case Builtin::SinD:
+      case Builtin::CosD:
+      case Builtin::TanD:
+      case Builtin::ASinD:
+      case Builtin::ACosD:
+      case Builtin::ATanD:
+      case Builtin::CeilD:
+      case Builtin::FloorD:
+      case Builtin::ExpD:
+      case Builtin::LogD:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
         break;
-      case AsmJSExit::Builtin_ModD:
-      case AsmJSExit::Builtin_PowD:
-      case AsmJSExit::Builtin_ATan2D:
+      case Builtin::ModD:
+      case Builtin::PowD:
+      case Builtin::ATan2D:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
         MOZ_ALWAYS_TRUE(args.append(MIRType_Double));
         break;
-      case AsmJSExit::Builtin_CeilF:
-      case AsmJSExit::Builtin_FloorF:
+      case Builtin::CeilF:
+      case Builtin::FloorF:
         MOZ_ALWAYS_TRUE(args.append(MIRType_Float32));
         break;
-      case AsmJSExit::Builtin_Limit:
+      case Builtin::Limit:
         MOZ_CRASH("Bad builtin");
     }
 
+    MOZ_ASSERT(args.length() <= 4);
+    static_assert(MIRTypeVector::InlineLength >= 4, "infallibility of append");
+
     MOZ_ASSERT(masm.framePushed() == 0);
-
     uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
 
     AsmJSProfilingOffsets offsets;
-    GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::Builtin(builtin), &offsets);
+    GenerateAsmJSExitPrologue(masm, framePushed, ExitReason(builtin), &offsets);
 
     for (ABIArgMIRTypeIter i(args); !i.done(); i++) {
         if (i->kind() != ABIArg::Stack)
             continue;
 #if !defined(JS_CODEGEN_ARM)
         unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
         Address srcAddr(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
         Address dstAddr(masm.getStackPointer(), i->offsetFromArgBase());
@@ -381,19 +385,19 @@ GenerateBuiltinThunk(MacroAssembler& mas
             masm.storeDouble(ScratchDoubleReg, dstAddr);
         }
 #else
         MOZ_CRASH("Architecture should have enough registers for all builtin calls");
 #endif
     }
 
     AssertStackAlignment(masm, ABIStackAlignment);
-    masm.call(BuiltinToImmKind(builtin));
+    masm.call(BuiltinToImmediate(builtin));
 
-    GenerateAsmJSExitEpilogue(masm, framePushed, AsmJSExit::Builtin(builtin), &offsets);
+    GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason(builtin), &offsets);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     return module.addBuiltinThunkCodeRange(builtin, offsets);
 }
 
@@ -447,17 +451,17 @@ CheckForHeapDetachment(MacroAssembler& m
 {
     if (!module.hasArrayView())
         return;
 
     MOZ_ASSERT(int(masm.framePushed()) >= int(ShadowStackSpace));
     AssertStackAlignment(masm, ABIStackAlignment);
 #if defined(JS_CODEGEN_X86)
     CodeOffset offset = masm.movlWithPatch(PatchedAbsoluteAddress(), scratch);
-    masm.append(AsmJSGlobalAccess(offset, AsmJSHeapGlobalDataOffset));
+    masm.append(AsmJSGlobalAccess(offset, HeapGlobalDataOffset));
     masm.branchTestPtr(Assembler::Zero, scratch, scratch, onDetached);
 #else
     masm.branchTestPtr(Assembler::Zero, HeapReg, HeapReg, onDetached);
 #endif
 }
 
 // Generate a stub that is called via the internal ABI derived from the
 // signature of the exit and calls into an appropriate InvokeFromAsmJS_* C++
@@ -481,17 +485,17 @@ GenerateInterpExit(MacroAssembler& masm,
     //   | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
     // The padding between stack args and argv ensures that argv is aligned. The
     // padding between argv and retaddr ensures that sp is aligned.
     unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
     unsigned argBytes = Max<size_t>(1, exit.sig().args().length()) * sizeof(Value);
     unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
 
     AsmJSProfilingOffsets offsets;
-    GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::SlowFFI, &offsets);
+    GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Slow, &offsets);
 
     // Fill the argument array.
     unsigned offsetToCallerStackArgs = sizeof(AsmJSFrame) + masm.framePushed();
     Register scratch = ABIArgGenerator::NonArgReturnReg0;
     FillArgumentArray(masm, exit.sig().args(), argOffset, offsetToCallerStackArgs, scratch);
 
     // Prepare the arguments for the call to InvokeFromAsmJS_*.
     ABIArgMIRTypeIter i(invokeArgTypes);
@@ -521,44 +525,44 @@ GenerateInterpExit(MacroAssembler& masm,
     }
     i++;
     MOZ_ASSERT(i.done());
 
     // Make the call, test whether it succeeded, and extract the return value.
     AssertStackAlignment(masm, ABIStackAlignment);
     switch (exit.sig().ret()) {
       case ExprType::Void:
-        masm.call(AsmJSImmPtr(AsmJSImm_InvokeFromAsmJS_Ignore));
+        masm.call(SymbolicAddress::InvokeFromAsmJS_Ignore);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         break;
       case ExprType::I32:
-        masm.call(AsmJSImmPtr(AsmJSImm_InvokeFromAsmJS_ToInt32));
+        masm.call(SymbolicAddress::InvokeFromAsmJS_ToInt32);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.unboxInt32(argv, ReturnReg);
         break;
       case ExprType::I64:
         MOZ_CRASH("no int64 in asm.js");
       case ExprType::F32:
         MOZ_CRASH("Float32 shouldn't be returned from a FFI");
       case ExprType::F64:
-        masm.call(AsmJSImmPtr(AsmJSImm_InvokeFromAsmJS_ToNumber));
+        masm.call(SymbolicAddress::InvokeFromAsmJS_ToNumber);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.loadDouble(argv, ReturnDoubleReg);
         break;
       case ExprType::I32x4:
       case ExprType::F32x4:
         MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
     }
 
     // The heap pointer may have changed during the FFI, so reload it and test
     // for detachment.
     masm.loadAsmJSHeapRegisterFromGlobalData();
     CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
 
-    GenerateAsmJSExitEpilogue(masm, framePushed, AsmJSExit::SlowFFI, &offsets);
+    GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Slow, &offsets);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     exit.initInterpOffset(offsets.begin);
     return module.addCodeRange(AsmJSModule::CodeRange::SlowFFI, offsets);
 }
@@ -588,17 +592,17 @@ GenerateIonExit(MacroAssembler& masm, As
     static_assert(AsmJSStackAlignment >= JitStackAlignment, "subsumes");
     unsigned sizeOfRetAddr = sizeof(void*);
     unsigned ionFrameBytes = 3 * sizeof(void*) + (1 + exit.sig().args().length()) * sizeof(Value);
     unsigned totalIonBytes = sizeOfRetAddr + ionFrameBytes + MaybeSavedGlobalReg;
     unsigned ionFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalIonBytes) -
                               sizeOfRetAddr;
 
     AsmJSProfilingOffsets offsets;
-    GenerateAsmJSExitPrologue(masm, ionFramePushed, AsmJSExit::JitFFI, &offsets);
+    GenerateAsmJSExitPrologue(masm, ionFramePushed, ExitReason::Jit, &offsets);
 
     // 1. Descriptor
     size_t argOffset = 0;
     uint32_t descriptor = MakeFrameDescriptor(ionFramePushed, JitFrame_Entry);
     masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
     argOffset += sizeof(size_t);
 
     // 2. Callee
@@ -734,17 +738,17 @@ GenerateIonExit(MacroAssembler& masm, As
         //   rt->jitActivation = prevJitActivation_;
         // On the ARM store8() uses the secondScratchReg (lr) as a temp.
         size_t offsetOfActivation = JSRuntime::offsetOfActivation();
         size_t offsetOfJitTop = offsetof(JSRuntime, jitTop);
         size_t offsetOfJitJSContext = offsetof(JSRuntime, jitJSContext);
         size_t offsetOfJitActivation = offsetof(JSRuntime, jitActivation);
         size_t offsetOfProfilingActivation = JSRuntime::offsetOfProfilingActivation();
 
-        masm.movePtr(AsmJSImmPtr(AsmJSImm_Runtime), reg0);
+        masm.movePtr(SymbolicAddress::Runtime, reg0);
         masm.loadPtr(Address(reg0, offsetOfActivation), reg1);
 
         //   rt->jitTop = prevJitTop_;
         masm.loadPtr(Address(reg1, JitActivation::offsetOfPrevJitTop()), reg2);
         masm.storePtr(reg2, Address(reg0, offsetOfJitTop));
 
         //   rt->profilingActivation = rt->activation()->prevProfiling_;
         masm.loadPtr(Address(reg1, Activation::offsetOfPrevProfiling()), reg2);
@@ -802,17 +806,17 @@ GenerateIonExit(MacroAssembler& masm, As
     Label done;
     masm.bind(&done);
 
     // The heap pointer has to be reloaded anyway since Ion could have clobbered
     // it. Additionally, the FFI may have detached the heap buffer.
     masm.loadAsmJSHeapRegisterFromGlobalData();
     CheckForHeapDetachment(masm, module, ABIArgGenerator::NonReturn_VolatileReg0, onDetached);
 
-    GenerateAsmJSExitEpilogue(masm, masm.framePushed(), AsmJSExit::JitFFI, &offsets);
+    GenerateAsmJSExitEpilogue(masm, masm.framePushed(), ExitReason::Jit, &offsets);
 
     if (oolConvert.used()) {
         masm.bind(&oolConvert);
         masm.setFramePushed(nativeFramePushed);
 
         // Coercion calls use the following stack layout (sp grows to the left):
         //   | args | padding | Value argv[1] | padding | exit AsmJSFrame |
         MIRTypeVector coerceArgTypes;
@@ -835,22 +839,22 @@ GenerateIonExit(MacroAssembler& masm, As
         }
         i++;
         MOZ_ASSERT(i.done());
 
         // Call coercion function
         AssertStackAlignment(masm, ABIStackAlignment);
         switch (exit.sig().ret()) {
           case ExprType::I32:
-            masm.call(AsmJSImmPtr(AsmJSImm_CoerceInPlace_ToInt32));
+            masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
             masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
             masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
             break;
           case ExprType::F64:
-            masm.call(AsmJSImmPtr(AsmJSImm_CoerceInPlace_ToNumber));
+            masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
             masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
             masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
             break;
           default:
             MOZ_CRASH("Unsupported convert type");
         }
 
         masm.jump(&done);
@@ -877,17 +881,17 @@ GenerateOnDetachedExit(MacroAssembler& m
 {
     masm.haltingAlign(CodeAlignment);
     AsmJSOffsets offsets;
     offsets.begin = masm.currentOffset();
     masm.bind(onDetached);
 
     // For now, OnDetached always throws (see OnDetached comment).
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImmPtr(AsmJSImm_OnDetached));
+    masm.call(SymbolicAddress::OnDetached);
     masm.jump(throwLabel);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
 }
@@ -914,17 +918,17 @@ GenerateStackOverflowExit(MacroAssembler
     masm.storePtr(masm.getStackPointer(), Address(activation, AsmJSActivation::offsetOfFP()));
 
     // Prepare the stack for calling C++.
     if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace))
         masm.subFromStackPtr(Imm32(d));
 
     // No need to restore the stack; the throw stub pops everything.
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImmPtr(AsmJSImm_ReportOverRecursed));
+    masm.call(SymbolicAddress::ReportOverRecursed);
     masm.jump(throwLabel);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
 }
@@ -934,24 +938,24 @@ GenerateStackOverflowExit(MacroAssembler
 // the interrupt which returns whether execution has been interrupted.
 static bool
 GenerateSyncInterruptExit(MacroAssembler& masm, AsmJSModule& module, Label* throwLabel)
 {
     masm.setFramePushed(0);
     unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, ShadowStackSpace);
 
     AsmJSProfilingOffsets offsets;
-    GenerateAsmJSExitPrologue(masm, framePushed, AsmJSExit::Interrupt, &offsets,
+    GenerateAsmJSExitPrologue(masm, framePushed, ExitReason::Interrupt, &offsets,
                               masm.asmSyncInterruptLabel());
 
     AssertStackAlignment(masm, ABIStackAlignment);
-    masm.call(AsmJSImmPtr(AsmJSImm_HandleExecutionInterrupt));
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
-    GenerateAsmJSExitEpilogue(masm, framePushed, AsmJSExit::Interrupt, &offsets);
+    GenerateAsmJSExitEpilogue(masm, framePushed, ExitReason::Interrupt, &offsets);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     return module.addCodeRange(AsmJSModule::CodeRange::Interrupt, offsets);
 }
 
@@ -967,17 +971,17 @@ GenerateConversionErrorExit(MacroAssembl
     masm.bind(masm.asmOnConversionErrorLabel());
 
     // sp can be anything at this point, so ensure it is aligned when calling
     // into C++.  We unconditionally jump to throw so don't worry about restoring sp.
     masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
 
     // OnOutOfBounds always throws.
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImm_OnImpreciseConversion);
+    masm.call(SymbolicAddress::OnImpreciseConversion);
     masm.jump(throwLabel);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     module.setOnOutOfBoundsExitOffset(offsets.begin);
     return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
@@ -995,17 +999,17 @@ GenerateOutOfBoundsExit(MacroAssembler& 
     masm.bind(masm.asmOnOutOfBoundsLabel());
 
     // sp can be anything at this point, so ensure it is aligned when calling
     // into C++.  We unconditionally jump to throw so don't worry about restoring sp.
     masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
 
     // OnOutOfBounds always throws.
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImm_OnOutOfBounds);
+    masm.call(SymbolicAddress::OnOutOfBounds);
     masm.jump(throwLabel);
 
     if (masm.oom())
         return false;
 
     offsets.end = masm.currentOffset();
     module.setOnOutOfBoundsExitOffset(offsets.begin);
     return module.addCodeRange(AsmJSModule::CodeRange::Inline, offsets);
@@ -1050,17 +1054,17 @@ GenerateAsyncInterruptExit(MacroAssemble
     // We know that StackPointer is word-aligned, but not necessarily
     // stack-aligned, so we need to align it dynamically.
     masm.moveStackPtrTo(ABIArgGenerator::NonVolatileReg);
     masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
     if (ShadowStackSpace)
         masm.subFromStackPtr(Imm32(ShadowStackSpace));
 
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImmPtr(AsmJSImm_HandleExecutionInterrupt));
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
 
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // Restore the StackPointer to its position before the call.
     masm.moveToStackPtr(ABIArgGenerator::NonVolatileReg);
 
     // Restore the machine state to before the interrupt.
     masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
@@ -1086,17 +1090,17 @@ GenerateAsyncInterruptExit(MacroAssemble
     masm.loadAsmJSActivation(IntArgReg0);
     masm.loadPtr(Address(IntArgReg0, AsmJSActivation::offsetOfResumePC()), IntArgReg1);
     masm.storePtr(IntArgReg1, Address(s0, masm.framePushed()));
 
     // MIPS ABI requires rewserving stack for registes $a0 to $a3.
     masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
 
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImm_HandleExecutionInterrupt);
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
 
     masm.addToStackPtr(Imm32(4 * sizeof(intptr_t)));
 
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // This will restore stack to the address before the call.
     masm.moveToStackPtr(s0);
     masm.PopRegsInMask(AllRegsExceptSP);
@@ -1131,17 +1135,17 @@ GenerateAsyncInterruptExit(MacroAssemble
     // high lanes of SIMD registers as well.
 
     // Save all FP registers
     JS_STATIC_ASSERT(!SupportsSimd);
     masm.PushRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
                                         FloatRegisterSet(FloatRegisters::AllDoubleMask)));
 
     masm.assertStackAlignment(ABIStackAlignment);
-    masm.call(AsmJSImm_HandleExecutionInterrupt);
+    masm.call(SymbolicAddress::HandleExecutionInterrupt);
 
     masm.branchIfFalseBool(ReturnReg, throwLabel);
 
     // Restore the machine state to before the interrupt. this will set the pc!
 
     // Restore all FP registers
     masm.PopRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
                                        FloatRegisterSet(FloatRegisters::AllDoubleMask)));
@@ -1221,18 +1225,18 @@ GenerateThrowStub(MacroAssembler& masm, 
 bool
 wasm::GenerateStubs(MacroAssembler& masm, AsmJSModule& module, const FuncOffsetVector& funcOffsets)
 {
     for (unsigned i = 0; i < module.numExportedFunctions(); i++) {
         if (!GenerateEntry(masm, module, i, funcOffsets))
             return false;
     }
 
-    for (unsigned i = 0; i < AsmJSExit::Builtin_Limit; i++) {
-        if (!GenerateBuiltinThunk(masm, module, AsmJSExit::BuiltinKind(i)))
+    for (auto builtin : MakeEnumeratedRange(Builtin::Limit)) {
+        if (!GenerateBuiltinThunk(masm, module, builtin))
             return false;
     }
 
     Label onThrow;
 
     {
         Label onDetached;
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -5548,17 +5548,17 @@ CodeGenerator::visitModD(LModD* ins)
 
     MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg);
 
     masm.setupUnalignedABICall(temp);
     masm.passABIArg(lhs, MoveOp::DOUBLE);
     masm.passABIArg(rhs, MoveOp::DOUBLE);
 
     if (gen->compilingAsmJS())
-        masm.callWithABI(AsmJSImm_ModD, MoveOp::DOUBLE);
+        masm.callWithABI(wasm::SymbolicAddress::ModD, MoveOp::DOUBLE);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE);
 }
 
 typedef bool (*BinaryFn)(JSContext*, MutableHandleValue, MutableHandleValue, MutableHandleValue);
 
 static const VMFunction AddInfo = FunctionInfo<BinaryFn>(js::AddValues);
 static const VMFunction SubInfo = FunctionInfo<BinaryFn>(js::SubValues);
@@ -7887,17 +7887,17 @@ CodeGenerator::generateAsmJS(AsmJSFuncti
     // Overflow checks are omitted by CodeGenerator in some cases (leaf
     // functions with small framePushed). Perform overflow-checking after
     // pushing framePushed to catch cases with really large frames.
     Label onOverflow;
     if (!omitOverRecursedCheck()) {
         // See comment below.
         Label* target = frameSize() > 0 ? &onOverflow : masm.asmStackOverflowLabel();
         masm.branchPtr(Assembler::AboveOrEqual,
-                       AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
+                       wasm::SymbolicAddress::StackLimit,
                        masm.getStackPointer(),
                        target);
     }
 
 
     if (!generateBody())
         return false;
 
@@ -10205,18 +10205,20 @@ CodeGenerator::visitInterruptCheck(LInte
     masm.branch32(Assembler::NotEqual, interruptAddr, Imm32(0), ool->entry());
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGenerator::visitAsmJSInterruptCheck(LAsmJSInterruptCheck* lir)
 {
     Label rejoin;
-    masm.branch32(Assembler::Equal, AsmJSAbsoluteAddress(AsmJSImm_RuntimeInterruptUint32),
-                  Imm32(0), &rejoin);
+    masm.branch32(Assembler::Equal,
+                  wasm::SymbolicAddress::RuntimeInterruptUint32,
+                  Imm32(0),
+                  &rejoin);
     {
         uint32_t stackFixup = ComputeByteAlignment(masm.framePushed() + sizeof(AsmJSFrame),
                                                    ABIStackAlignment);
         masm.reserveStack(stackFixup);
         masm.call(lir->funcDesc(), lir->interruptExit());
         masm.freeStack(stackFixup);
     }
     masm.bind(&rejoin);
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -4747,17 +4747,17 @@ MAsmJSUnsignedToFloat32::foldsTo(TempAll
                 return MConstant::NewAsmJS(alloc, JS::Float32Value(float(dval)), MIRType_Float32);
         }
     }
 
     return this;
 }
 
 MAsmJSCall*
-MAsmJSCall::New(TempAllocator& alloc, const CallSiteDesc& desc, Callee callee,
+MAsmJSCall::New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, Callee callee,
                 const Args& args, MIRType resultType, size_t spIncrement)
 {
     MAsmJSCall* call = new(alloc) MAsmJSCall(desc, callee, spIncrement);
     call->setResultType(resultType);
 
     if (!call->argRegs_.init(alloc, args.length()))
         return nullptr;
     for (size_t i = 0; i < call->argRegs_.length(); i++)
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -7213,34 +7213,34 @@ class MInterruptCheck : public MNullaryI
 
 // Check whether we need to fire the interrupt handler at loop headers and
 // function prologues in asm.js. Generated only if we can't use implicit
 // interrupt checks with signal handlers.
 class MAsmJSInterruptCheck
   : public MNullaryInstruction
 {
     Label* interruptExit_;
-    CallSiteDesc funcDesc_;
-
-    MAsmJSInterruptCheck(Label* interruptExit, const CallSiteDesc& funcDesc)
+    wasm::CallSiteDesc funcDesc_;
+
+    MAsmJSInterruptCheck(Label* interruptExit, const wasm::CallSiteDesc& funcDesc)
       : interruptExit_(interruptExit), funcDesc_(funcDesc)
     {}
 
   public:
     INSTRUCTION_HEADER(AsmJSInterruptCheck)
 
     static MAsmJSInterruptCheck* New(TempAllocator& alloc, Label* interruptExit,
-                                     const CallSiteDesc& funcDesc)
+                                     const wasm::CallSiteDesc& funcDesc)
     {
         return new(alloc) MAsmJSInterruptCheck(interruptExit, funcDesc);
     }
     Label* interruptExit() const {
         return interruptExit_;
     }
-    const CallSiteDesc& funcDesc() const {
+    const wasm::CallSiteDesc& funcDesc() const {
         return funcDesc_;
     }
 };
 
 // Checks if a value is JS_UNINITIALIZED_LEXICAL, bailout out if so, leaving
 // it to baseline to throw at the correct pc.
 class MLexicalCheck
   : public MUnaryInstruction,
@@ -13794,60 +13794,60 @@ class MAsmJSCall final
     class Callee {
       public:
         enum Which { Internal, Dynamic, Builtin };
       private:
         Which which_;
         union {
             AsmJSInternalCallee internal_;
             MDefinition* dynamic_;
-            AsmJSImmKind builtin_;
+            wasm::Builtin builtin_;
         } u;
       public:
         Callee() {}
         explicit Callee(AsmJSInternalCallee callee) : which_(Internal) { u.internal_ = callee; }
         explicit Callee(MDefinition* callee) : which_(Dynamic) { u.dynamic_ = callee; }
-        explicit Callee(AsmJSImmKind callee) : which_(Builtin) { u.builtin_ = callee; }
+        explicit Callee(wasm::Builtin callee) : which_(Builtin) { u.builtin_ = callee; }
         Which which() const { return which_; }
         AsmJSInternalCallee internal() const { MOZ_ASSERT(which_ == Internal); return u.internal_; }
         MDefinition* dynamic() const { MOZ_ASSERT(which_ == Dynamic); return u.dynamic_; }
-        AsmJSImmKind builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; }
+        wasm::Builtin builtin() const { MOZ_ASSERT(which_ == Builtin); return u.builtin_; }
     };
 
   private:
-    CallSiteDesc desc_;
+    wasm::CallSiteDesc desc_;
     Callee callee_;
     FixedList<AnyRegister> argRegs_;
     size_t spIncrement_;
 
-    MAsmJSCall(const CallSiteDesc& desc, Callee callee, size_t spIncrement)
+    MAsmJSCall(const wasm::CallSiteDesc& desc, Callee callee, size_t spIncrement)
      : desc_(desc), callee_(callee), spIncrement_(spIncrement)
     { }
 
   public:
     INSTRUCTION_HEADER(AsmJSCall)
 
     struct Arg {
         AnyRegister reg;
         MDefinition* def;
         Arg(AnyRegister reg, MDefinition* def) : reg(reg), def(def) {}
     };
     typedef Vector<Arg, 8, SystemAllocPolicy> Args;
 
-    static MAsmJSCall* New(TempAllocator& alloc, const CallSiteDesc& desc, Callee callee,
+    static MAsmJSCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, Callee callee,
                            const Args& args, MIRType resultType, size_t spIncrement);
 
     size_t numArgs() const {
         return argRegs_.length();
     }
     AnyRegister registerForArg(size_t index) const {
         MOZ_ASSERT(index < numArgs());
         return argRegs_[index];
     }
-    const CallSiteDesc& desc() const {
+    const wasm::CallSiteDesc& desc() const {
         return desc_;
     }
     Callee callee() const {
         return callee_;
     }
     size_t dynamicCalleeOperandIndex() const {
         MOZ_ASSERT(callee_.which() == Callee::Dynamic);
         MOZ_ASSERT(numArgs() == numOperands() - 1);
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -74,31 +74,31 @@ MacroAssembler::PushWithPatch(ImmPtr imm
 {
     return PushWithPatch(ImmWord(uintptr_t(imm.value)));
 }
 
 // ===============================================================
 // Simple call functions.
 
 void
-MacroAssembler::call(const CallSiteDesc& desc, const Register reg)
+MacroAssembler::call(const wasm::CallSiteDesc& desc, const Register reg)
 {
     CodeOffset l = call(reg);
     append(desc, l, framePushed());
 }
 
 void
-MacroAssembler::call(const CallSiteDesc& desc, Label* label)
+MacroAssembler::call(const wasm::CallSiteDesc& desc, Label* label)
 {
     CodeOffset l = call(label);
     append(desc, l, framePushed());
 }
 
 void
-MacroAssembler::call(const CallSiteDesc& desc, AsmJSInternalCallee callee)
+MacroAssembler::call(const wasm::CallSiteDesc& desc, AsmJSInternalCallee callee)
 {
     CodeOffset l = callWithPatch();
     append(desc, l, framePushed(), callee.index);
 }
 
 // ===============================================================
 // ABI function calls.
 
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2516,17 +2516,17 @@ MacroAssembler::callWithABINoProfiler(vo
 
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust);
     call(ImmPtr(fun));
     callWithABIPost(stackAdjust, result);
 }
 
 void
-MacroAssembler::callWithABINoProfiler(AsmJSImmPtr imm, MoveOp::Type result)
+MacroAssembler::callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result)
 {
     uint32_t stackAdjust;
     callWithABIPre(&stackAdjust, /* callFromAsmJS = */ true);
     call(imm);
     callWithABIPost(stackAdjust, result);
 }
 
 // ===============================================================
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -495,23 +495,23 @@ class MacroAssembler : public MacroAssem
     // Simple call functions.
 
     CodeOffset call(Register reg) PER_SHARED_ARCH;
     CodeOffset call(Label* label) PER_SHARED_ARCH;
     void call(const Address& addr) DEFINED_ON(x86_shared);
     void call(ImmWord imm) PER_SHARED_ARCH;
     // Call a target native function, which is neither traceable nor movable.
     void call(ImmPtr imm) PER_SHARED_ARCH;
-    void call(AsmJSImmPtr imm) PER_SHARED_ARCH;
+    void call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
     // Call a target JitCode, which must be traceable, and may be movable.
     void call(JitCode* c) PER_SHARED_ARCH;
 
-    inline void call(const CallSiteDesc& desc, const Register reg);
-    inline void call(const CallSiteDesc& desc, Label* label);
-    inline void call(const CallSiteDesc& desc, AsmJSInternalCallee callee);
+    inline void call(const wasm::CallSiteDesc& desc, const Register reg);
+    inline void call(const wasm::CallSiteDesc& desc, Label* label);
+    inline void call(const wasm::CallSiteDesc& desc, AsmJSInternalCallee callee);
 
     CodeOffset callWithPatch() PER_SHARED_ARCH;
     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
 
     // Push the return address and make a call. On platforms where this function
     // is not defined, push the link register (pushReturnAddress) at the entry
     // point of the callee.
     void callAndPushReturnAddress(Register reg) DEFINED_ON(mips_shared, x86_shared);
@@ -550,17 +550,17 @@ class MacroAssembler : public MacroAssem
     // with callWithABI.
     void setupABICall();
 
     // Reserve the stack and resolve the arguments move.
     void callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS = false) PER_ARCH;
 
     // Emits a call to a C/C++ function, resolving all argument moves.
     void callWithABINoProfiler(void* fun, MoveOp::Type result);
-    void callWithABINoProfiler(AsmJSImmPtr imm, MoveOp::Type result);
+    void callWithABINoProfiler(wasm::SymbolicAddress imm, MoveOp::Type result);
     void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
     void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
 
     // Restore the stack to its state before the setup function call.
     void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) PER_ARCH;
 
     // Create the signature to be able to decode the arguments of a native
     // function, when calling a function within the simulator.
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -576,17 +576,17 @@ CodeGeneratorARM::visitSoftDivI(LSoftDiv
 
     Label done;
     divICommon(mir, lhs, rhs, output, ins->snapshot(), done);
 
     masm.setupAlignedABICall();
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
     if (gen->compilingAsmJS())
-        masm.callWithABI(AsmJSImm_aeabi_idivmod);
+        masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
 
     // idivmod returns the quotient in r0, and the remainder in r1.
     if (!mir->canTruncateRemainder()) {
         MOZ_ASSERT(mir->fallible());
         masm.ma_cmp(r1, Imm32(0));
         bailoutIf(Assembler::NonZero, ins->snapshot());
@@ -746,17 +746,17 @@ CodeGeneratorARM::visitSoftModI(LSoftMod
     }
 
     modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
 
     masm.setupAlignedABICall();
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
     if (gen->compilingAsmJS())
-        masm.callWithABI(AsmJSImm_aeabi_idivmod);
+        masm.callWithABI(wasm::SymbolicAddress::aeabi_idivmod);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_idivmod));
 
     // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
     if (mir->canBeNegativeDividend()) {
         if (mir->isTruncated()) {
             // -0.0|0 == 0
         } else {
@@ -2167,34 +2167,34 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
         return;
     }
 
     BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
     if (isFloat) {
         FloatRegister dst = ToFloatRegister(ins->output());
         VFPRegister vd(dst);
         if (size == 32) {
-            masm.ma_vldr(Address(GlobalReg, AsmJSNaN32GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.ma_vldr(Address(GlobalReg, wasm::NaN32GlobalDataOffset - AsmJSGlobalRegBias),
                          vd.singleOverlay(), Assembler::AboveOrEqual);
             masm.ma_vldr(vd.singleOverlay(), HeapReg, ptrReg, 0, Assembler::Below);
         } else {
-            masm.ma_vldr(Address(GlobalReg, AsmJSNaN64GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.ma_vldr(Address(GlobalReg, wasm::NaN64GlobalDataOffset - AsmJSGlobalRegBias),
                          vd, Assembler::AboveOrEqual);
             masm.ma_vldr(vd, HeapReg, ptrReg, 0, Assembler::Below);
         }
     } else {
         Register d = ToRegister(ins->output());
         if (mir->isAtomicAccess())
             masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
         else
             masm.ma_mov(Imm32(0), d, LeaveCC, Assembler::AboveOrEqual);
         masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(bo.getOffset()));
+    masm.append(wasm::HeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     bool isSigned;
     int size;
@@ -2257,17 +2257,17 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA
             masm.ma_vstr(vd, HeapReg, ptrReg, 0, 0, Assembler::Below);
     } else {
         if (mir->isAtomicAccess())
             masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
         masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
                               ToRegister(ins->value()), Offset, Assembler::Below);
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(bo.getOffset()));
+    masm.append(wasm::HeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     const LAllocation* ptr = ins->ptr();
@@ -2283,17 +2283,17 @@ CodeGeneratorARM::visitAsmJSCompareExcha
         BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
         maybeCmpOffset = bo.getOffset();
         masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
     }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
 {
     const MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type viewType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
@@ -2306,17 +2306,17 @@ CodeGeneratorARM::visitAsmJSCompareExcha
     {
         ScratchRegisterScope scratch(masm);
         masm.ma_mov(Imm32(viewType), scratch);
         masm.passABIArg(scratch);
         masm.passABIArg(ptr);
         masm.passABIArg(oldval);
         masm.passABIArg(newval);
     }
-    masm.callWithABI(AsmJSImm_AtomicCmpXchg);
+    masm.callWithABI(wasm::SymbolicAddress::AtomicCmpXchg);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
@@ -2330,17 +2330,17 @@ CodeGeneratorARM::visitAsmJSAtomicExchan
         maybeCmpOffset = bo.getOffset();
         masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
     }
 
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
 
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
 {
     const MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type viewType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
@@ -2352,17 +2352,17 @@ CodeGeneratorARM::visitAsmJSAtomicExchan
     {
         ScratchRegisterScope scratch(masm);
         masm.ma_mov(Imm32(viewType), scratch);
         masm.passABIArg(scratch);
     }
     masm.passABIArg(ptr);
     masm.passABIArg(value);
 
-    masm.callWithABI(AsmJSImm_AtomicXchg);
+    masm.callWithABI(wasm::SymbolicAddress::AtomicXchg);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -2387,17 +2387,17 @@ CodeGeneratorARM::visitAsmJSAtomicBinopH
                                    Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
                                    ToAnyRegister(ins->output()));
     else
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
                                    ToAnyRegister(ins->output()));
 
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -2418,17 +2418,17 @@ CodeGeneratorARM::visitAsmJSAtomicBinopH
     }
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
 
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
 {
     const MAsmJSAtomicBinopHeap* mir = ins->mir();
     Scalar::Type viewType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
@@ -2440,29 +2440,29 @@ CodeGeneratorARM::visitAsmJSAtomicBinopC
         masm.move32(Imm32(viewType), scratch);
         masm.passABIArg(scratch);
     }
     masm.passABIArg(ptr);
     masm.passABIArg(value);
 
     switch (mir->operation()) {
       case AtomicFetchAddOp:
-        masm.callWithABI(AsmJSImm_AtomicFetchAdd);
+        masm.callWithABI(wasm::SymbolicAddress::AtomicFetchAdd);
         break;
       case AtomicFetchSubOp:
-        masm.callWithABI(AsmJSImm_AtomicFetchSub);
+        masm.callWithABI(wasm::SymbolicAddress::AtomicFetchSub);
         break;
       case AtomicFetchAndOp:
-        masm.callWithABI(AsmJSImm_AtomicFetchAnd);
+        masm.callWithABI(wasm::SymbolicAddress::AtomicFetchAnd);
         break;
       case AtomicFetchOrOp:
-        masm.callWithABI(AsmJSImm_AtomicFetchOr);
+        masm.callWithABI(wasm::SymbolicAddress::AtomicFetchOr);
         break;
       case AtomicFetchXorOp:
-        masm.callWithABI(AsmJSImm_AtomicFetchXor);
+        masm.callWithABI(wasm::SymbolicAddress::AtomicFetchXor);
         break;
       default:
         MOZ_CRASH("Unknown op");
     }
 }
 
 void
 CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
@@ -2580,17 +2580,17 @@ CodeGeneratorARM::visitSoftUDivOrMod(LSo
 
     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), div);
     generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), mod);
 
     masm.setupAlignedABICall();
     masm.passABIArg(lhs);
     masm.passABIArg(rhs);
     if (gen->compilingAsmJS())
-        masm.callWithABI(AsmJSImm_aeabi_uidivmod);
+        masm.callWithABI(wasm::SymbolicAddress::aeabi_uidivmod);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, __aeabi_uidivmod));
 
     // uidivmod returns the quotient in r0, and the remainder in r1.
     if (div && !div->canTruncateRemainder()) {
         MOZ_ASSERT(div->fallible());
         masm.ma_cmp(r1, Imm32(0));
         bailoutIf(Assembler::NonZero, ins->snapshot());
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -2014,25 +2014,25 @@ MacroAssemblerARMCompat::movePtr(ImmGCPt
 
 void
 MacroAssemblerARMCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 
 void
-MacroAssemblerARMCompat::movePtr(AsmJSImmPtr imm, Register dest)
+MacroAssemblerARMCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
 {
     RelocStyle rs;
     if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
-    append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm.kind()));
+    append(AsmJSAbsoluteLink(CodeOffset(currentOffset()), imm));
     ma_movPatchable(Imm32(-1), dest, Always, rs);
 }
 
 void
 MacroAssemblerARMCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_dataTransferN(IsLoad, 8, false, address.base, Imm32(address.offset), dest);
 }
@@ -2184,20 +2184,20 @@ void
 MacroAssemblerARMCompat::loadPtr(AbsoluteAddress address, Register dest)
 {
     MOZ_ASSERT(dest != pc); // Use dest as a scratch register.
     movePtr(ImmWord(uintptr_t(address.addr)), dest);
     loadPtr(Address(dest, 0), dest);
 }
 
 void
-MacroAssemblerARMCompat::loadPtr(AsmJSAbsoluteAddress address, Register dest)
+MacroAssemblerARMCompat::loadPtr(wasm::SymbolicAddress address, Register dest)
 {
     MOZ_ASSERT(dest != pc); // Use dest as a scratch register.
-    movePtr(AsmJSImmPtr(address.kind()), dest);
+    movePtr(address, dest);
     loadPtr(Address(dest, 0), dest);
 }
 
 void
 MacroAssemblerARMCompat::loadPrivate(const Address& address, Register dest)
 {
     ma_ldr(ToPayload(address), dest);
 }
@@ -5111,17 +5111,17 @@ void
 MacroAssembler::call(ImmPtr imm)
 {
     BufferOffset bo = m_buffer.nextOffset();
     addPendingJump(bo, imm, Relocation::HARDCODED);
     ma_call(imm);
 }
 
 void
-MacroAssembler::call(AsmJSImmPtr imm)
+MacroAssembler::call(wasm::SymbolicAddress imm)
 {
     movePtr(imm, CallReg);
     call(CallReg);
 }
 
 void
 MacroAssembler::call(JitCode* c)
 {
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -969,17 +969,17 @@ class MacroAssemblerARMCompat : public M
         branchPtr(cond, lhs, scratch, label);
     }
     void branchPtr(Condition cond, Register lhs, ImmWord imm, Label* label) {
         branch32(cond, lhs, Imm32(imm.value), label);
     }
     void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label* label) {
         branchPtr(cond, lhs, ImmWord(uintptr_t(imm.value)), label);
     }
-    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label* label) {
+    void branchPtr(Condition cond, Register lhs, wasm::SymbolicAddress imm, Label* label) {
         ScratchRegisterScope scratch(asMasm());
         movePtr(imm, scratch);
         branchPtr(cond, lhs, scratch, label);
     }
     void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
         branch32(cond, lhs, imm, label);
     }
     void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
@@ -1028,17 +1028,17 @@ class MacroAssemblerARMCompat : public M
         ma_b(label, cond);
     }
     void branchPtr(Condition cond, AbsoluteAddress addr, ImmWord ptr, Label* label) {
         ScratchRegisterScope scratch(asMasm());
         loadPtr(addr, scratch);
         ma_cmp(scratch, ptr);
         ma_b(label, cond);
     }
-    void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label* label) {
+    void branchPtr(Condition cond, wasm::SymbolicAddress addr, Register ptr, Label* label) {
         ScratchRegisterScope scratch(asMasm());
         loadPtr(addr, scratch);
         ma_cmp(scratch, ptr);
         ma_b(label, cond);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
         AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
         loadPtr(lhs, scratch2); // ma_cmp will use the scratch register.
@@ -1046,17 +1046,17 @@ class MacroAssemblerARMCompat : public M
         ma_b(label, cond);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
         loadPtr(lhs, scratch2); // ma_cmp will use the scratch register.
         ma_cmp(scratch2, rhs);
         ma_b(label, cond);
     }
-    void branch32(Condition cond, AsmJSAbsoluteAddress addr, Imm32 imm, Label* label) {
+    void branch32(Condition cond, wasm::SymbolicAddress addr, Imm32 imm, Label* label) {
         ScratchRegisterScope scratch(asMasm());
         loadPtr(addr, scratch);
         ma_cmp(scratch, imm);
         ma_b(label, cond);
     }
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
@@ -1208,17 +1208,17 @@ class MacroAssemblerARMCompat : public M
     void not32(Register reg);
 
     void move32(Imm32 imm, Register dest);
     void move32(Register src, Register dest);
 
     void movePtr(Register src, Register dest);
     void movePtr(ImmWord imm, Register dest);
     void movePtr(ImmPtr imm, Register dest);
-    void movePtr(AsmJSImmPtr imm, Register dest);
+    void movePtr(wasm::SymbolicAddress imm, Register dest);
     void movePtr(ImmGCPtr imm, Register dest);
     void move64(Register64 src, Register64 dest) {
         move32(src.low, dest.low);
         move32(src.high, dest.high);
     }
 
     void load8SignExtend(const Address& address, Register dest);
     void load8SignExtend(const BaseIndex& src, Register dest);
@@ -1238,17 +1238,17 @@ class MacroAssemblerARMCompat : public M
     void load64(const Address& address, Register64 dest) {
         load32(address, dest.low);
         load32(Address(address.base, address.offset + 4), dest.high);
     }
 
     void loadPtr(const Address& address, Register dest);
     void loadPtr(const BaseIndex& src, Register dest);
     void loadPtr(AbsoluteAddress address, Register dest);
-    void loadPtr(AsmJSAbsoluteAddress address, Register dest);
+    void loadPtr(wasm::SymbolicAddress address, Register dest);
 
     void loadPrivate(const Address& address, Register dest);
 
     void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
@@ -1817,20 +1817,20 @@ class MacroAssemblerARMCompat : public M
     void moveFloat32(FloatRegister src, FloatRegister dest) {
         as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay());
     }
 
     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label);
 
     void loadAsmJSActivation(Register dest) {
-        loadPtr(Address(GlobalReg, AsmJSActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, wasm::ActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
-        loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
+        loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
     }
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
 };
 
 typedef MacroAssemblerARMCompat MacroAssemblerSpecific;
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -535,17 +535,17 @@ void
 MacroAssembler::call(ImmPtr imm)
 {
     syncStackPtr();
     movePtr(imm, ip0);
     Blr(vixl::ip0);
 }
 
 void
-MacroAssembler::call(AsmJSImmPtr imm)
+MacroAssembler::call(wasm::SymbolicAddress imm)
 {
     vixl::UseScratchRegisterScope temps(this);
     const Register scratch = temps.AcquireX().asUnsized();
     syncStackPtr();
     movePtr(imm, scratch);
     call(scratch);
 }
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -771,35 +771,35 @@ class MacroAssemblerCompat : public vixl
         Mov(ARMRegister(dest, 64), ARMRegister(src, 64));
     }
     void movePtr(ImmWord imm, Register dest) {
         Mov(ARMRegister(dest, 64), int64_t(imm.value));
     }
     void movePtr(ImmPtr imm, Register dest) {
         Mov(ARMRegister(dest, 64), int64_t(imm.value));
     }
-    void movePtr(AsmJSImmPtr imm, Register dest) {
+    void movePtr(wasm::SymbolicAddress imm, Register dest) {
         BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
-        append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm));
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
         writeDataRelocation(imm, load);
     }
     void move64(Register64 src, Register64 dest) {
         movePtr(src.reg, dest.reg);
     }
 
     void mov(ImmWord imm, Register dest) {
         movePtr(imm, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         movePtr(imm, dest);
     }
-    void mov(AsmJSImmPtr imm, Register dest) {
+    void mov(wasm::SymbolicAddress imm, Register dest) {
         movePtr(imm, dest);
     }
     void mov(Register src, Register dest) {
         movePtr(src, dest);
     }
 
     void move32(Imm32 imm, Register dest) {
         Mov(ARMRegister(dest, 32), (int64_t)imm.value);
@@ -813,20 +813,20 @@ class MacroAssemblerCompat : public vixl
     // Returns the BufferOffset of the load instruction emitted.
     BufferOffset movePatchablePtr(ImmWord ptr, Register dest);
     BufferOffset movePatchablePtr(ImmPtr ptr, Register dest);
 
     void neg32(Register reg) {
         Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
     }
 
-    void loadPtr(AsmJSAbsoluteAddress address, Register dest) {
+    void loadPtr(wasm::SymbolicAddress address, Register dest) {
         vixl::UseScratchRegisterScope temps(this);
         const ARMRegister scratch = temps.AcquireX();
-        movePtr(AsmJSImmPtr(address.kind()), scratch.asUnsized());
+        movePtr(address, scratch.asUnsized());
         Ldr(ARMRegister(dest, 64), MemOperand(scratch));
     }
     void loadPtr(AbsoluteAddress address, Register dest) {
         vixl::UseScratchRegisterScope temps(this);
         const ARMRegister scratch = temps.AcquireX();
         movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized());
         Ldr(ARMRegister(dest, 64), MemOperand(scratch));
     }
@@ -1508,20 +1508,20 @@ class MacroAssemblerCompat : public vixl
         branch32(cond, Address(scratch, 0), rhs, label);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
         movePtr(ImmPtr(lhs.addr), scratch);
         branch32(cond, Address(scratch, 0), rhs, label);
     }
-    void branch32(Condition cond, AsmJSAbsoluteAddress lhs, Imm32 rhs, Label* label) {
+    void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
-        movePtr(AsmJSImmPtr(lhs.kind()), scratch);
+        movePtr(lhs, scratch);
         branch32(cond, Address(scratch, 0), rhs, label);
     }
     void branch32(Condition cond, BaseIndex lhs, Imm32 rhs, Label* label) {
         vixl::UseScratchRegisterScope temps(this);
         const ARMRegister scratch32 = temps.AcquireW();
         MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
         MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
         doBaseIndex(scratch32, lhs, vixl::LDR_w);
@@ -1599,17 +1599,17 @@ class MacroAssemblerCompat : public vixl
             const Register scratch = temps.AcquireX().asUnsized();
             MOZ_ASSERT(scratch != addr.base);
             loadPtr(addr, scratch);
             cmpPtr(scratch, ptr);
         }
         return jumpWithPatch(label, cond);
     }
 
-    void branchPtr(Condition cond, AsmJSAbsoluteAddress lhs, Register rhs, Label* label) {
+    void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
         MOZ_ASSERT(scratch != rhs);
         loadPtr(lhs, scratch);
         branchPtr(cond, scratch, rhs, label);
     }
     void branchPtr(Condition cond, Address lhs, ImmWord ptr, Label* label) {
         vixl::UseScratchRegisterScope temps(this);
@@ -2574,21 +2574,21 @@ class MacroAssemblerCompat : public vixl
         MOZ_ASSERT(scratch != dest.base);
         load32(src, scratch);
         storeValue(JSVAL_TYPE_INT32, scratch, dest);
     }
 
     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label);
 
-    void appendCallSite(const CallSiteDesc& desc) {
+    void appendCallSite(const wasm::CallSiteDesc& desc) {
         MOZ_CRASH("appendCallSite");
     }
 
-    void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) {
+    void callExit(wasm::SymbolicAddress imm, uint32_t stackArgBytes) {
         MOZ_CRASH("callExit");
     }
 
     void profilerEnterFrame(Register framePtr, Register scratch) {
         AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
         loadPtr(activation, scratch);
         storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
         storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
@@ -3058,21 +3058,21 @@ class MacroAssemblerCompat : public vixl
     // and assert that the value is equal to the current sp.
     void simulatorCheckSP() {
 #ifdef JS_SIMULATOR_ARM64
         svc(vixl::kCheckStackPointer);
 #endif
     }
 
     void loadAsmJSActivation(Register dest) {
-        loadPtr(Address(GlobalReg, AsmJSActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, wasm::ActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
-        loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
-        loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias + 8), HeapLenReg);
+        loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
+        loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias + 8), HeapLenReg);
     }
 
     // Overwrites the payload bits of a dest register containing a Value.
     void movePayload(Register src, Register dest) {
         // Bfxil cannot be used with the zero register as a source.
         if (src == rzr)
             And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(~int64_t(JSVAL_PAYLOAD_MASK)));
         else
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1650,31 +1650,31 @@ CodeGeneratorMIPSShared::visitAsmJSLoadH
         masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
                      static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
     }
     masm.ma_b(&done, ShortJump);
     masm.bind(&outOfRange);
     // Offset is out of range. Load default values.
     if (isFloat) {
         if (size == 32)
-            masm.loadFloat32(Address(GlobalReg, AsmJSNaN32GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.loadFloat32(Address(GlobalReg, wasm::NaN32GlobalDataOffset - AsmJSGlobalRegBias),
                              ToFloatRegister(out));
         else
-            masm.loadDouble(Address(GlobalReg, AsmJSNaN64GlobalDataOffset - AsmJSGlobalRegBias),
+            masm.loadDouble(Address(GlobalReg, wasm::NaN64GlobalDataOffset - AsmJSGlobalRegBias),
                             ToFloatRegister(out));
     } else {
         if (mir->isAtomicAccess())
             masm.ma_b(masm.asmOnOutOfBoundsLabel());
         else
             masm.move32(Imm32(0), ToRegister(out));
     }
     masm.bind(&done);
 
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(bo.getOffset()));
+    masm.append(wasm::HeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     const LAllocation* value = ins->value();
     const LAllocation* ptr = ins->ptr();
@@ -1749,17 +1749,17 @@ CodeGeneratorMIPSShared::visitAsmJSStore
     masm.ma_b(&done, ShortJump);
     masm.bind(&outOfRange);
     // Offset is out of range.
     if (mir->isAtomicAccess())
         masm.ma_b(masm.asmOnOutOfBoundsLabel());
     masm.bind(&done);
 
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(bo.getOffset()));
+    masm.append(wasm::HeapAccess(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     const LAllocation* ptr = ins->ptr();
@@ -1779,17 +1779,17 @@ CodeGeneratorMIPSShared::visitAsmJSCompa
         maybeCmpOffset = bo.getOffset();
         masm.ma_b(ptrReg, ScratchRegister, masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
     }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         valueTemp, offsetTemp, maskTemp,
                                         ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
@@ -1806,17 +1806,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
         BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
         maybeCmpOffset = bo.getOffset();
         masm.ma_b(ptrReg, ScratchRegister, masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
     }
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, valueTemp,
                                        offsetTemp, maskTemp, ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -1844,17 +1844,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
                                    valueTemp, offsetTemp, maskTemp,
                                    ToAnyRegister(ins->output()));
     else
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
                                    valueTemp, offsetTemp, maskTemp,
                                    ToAnyRegister(ins->output()));
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -1880,17 +1880,17 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
                                    valueTemp, offsetTemp, maskTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
                                    valueTemp, offsetTemp, maskTemp);
 
     if (mir->needsBoundsCheck())
-        masm.append(AsmJSHeapAccess(maybeCmpOffset));
+        masm.append(wasm::HeapAccess(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
 {
     const MAsmJSPassStackArg* mir = ins->mir();
     if (ins->arg()->isConstant()) {
         masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -1151,17 +1151,17 @@ MacroAssembler::callWithPatch()
 void
 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
 {
     BufferOffset li(callerOffset - 6 * sizeof(uint32_t));
     Assembler::UpdateLoad64Value(editSrc(li), calleeOffset);
 }
 
 void
-MacroAssembler::call(AsmJSImmPtr target)
+MacroAssembler::call(wasm::SymbolicAddress target)
 {
     movePtr(target, CallReg);
     call(CallReg);
 }
 
 void
 MacroAssembler::call(ImmWord target)
 {
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -885,19 +885,19 @@ MacroAssemblerMIPSCompat::movePtr(ImmGCP
 }
 
 void
 MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
-MacroAssemblerMIPSCompat::movePtr(AsmJSImmPtr imm, Register dest)
+MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
 {
-    append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm.kind()));
+    append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
     ma_liPatchable(dest, ImmWord(-1));
 }
 
 void
 MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeByte, ZeroExtend);
 }
@@ -959,20 +959,20 @@ MacroAssemblerMIPSCompat::load32(const B
 void
 MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest)
 {
     movePtr(ImmPtr(address.addr), ScratchRegister);
     load32(Address(ScratchRegister, 0), dest);
 }
 
 void
-MacroAssemblerMIPSCompat::load32(AsmJSAbsoluteAddress address, Register dest)
+MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address, Register dest)
 {
-    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
-    load32(Address(ScratchRegister, 0), dest);
+    movePtr(address, ScratchRegister);
+    load32(Address(ScratchRegister, 0), address);
 }
 
 void
 MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeWord);
 }
 
@@ -985,19 +985,19 @@ MacroAssemblerMIPSCompat::loadPtr(const 
 void
 MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest)
 {
     movePtr(ImmPtr(address.addr), ScratchRegister);
     loadPtr(Address(ScratchRegister, 0), dest);
 }
 
 void
-MacroAssemblerMIPSCompat::loadPtr(AsmJSAbsoluteAddress address, Register dest)
+MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address, Register dest)
 {
-    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    movePtr(address, ScratchRegister);
     loadPtr(Address(ScratchRegister, 0), dest);
 }
 
 void
 MacroAssemblerMIPSCompat::loadPrivate(const Address& address, Register dest)
 {
     ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
 }
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -554,17 +554,17 @@ class MacroAssemblerMIPSCompat : public 
         ma_b(lhs, ptr, label, cond);
     }
     void branchPtr(Condition cond, Register lhs, ImmWord imm, Label* label) {
         ma_b(lhs, imm, label, cond);
     }
     void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label* label) {
         ma_b(lhs, imm, label, cond);
     }
-    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label* label) {
+    void branchPtr(Condition cond, Register lhs, wasm::SymbolicAddress imm, Label* label) {
         movePtr(imm, SecondScratchReg);
         ma_b(lhs, SecondScratchReg, label, cond);
     }
     void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
         ma_b(lhs, imm, label, cond);
     }
     void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
         subPtr(imm, lhs);
@@ -627,29 +627,29 @@ class MacroAssemblerMIPSCompat : public 
     void branchPtr(Condition cond, AbsoluteAddress addr, Register ptr, Label* label) {
         loadPtr(addr, SecondScratchReg);
         ma_b(SecondScratchReg, ptr, label, cond);
     }
     void branchPtr(Condition cond, AbsoluteAddress addr, ImmWord ptr, Label* label) {
         loadPtr(addr, SecondScratchReg);
         ma_b(SecondScratchReg, ptr, label, cond);
     }
-    void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label* label) {
+    void branchPtr(Condition cond, wasm::SymbolicAddress addr, Register ptr, Label* label) {
         loadPtr(addr, SecondScratchReg);
         ma_b(SecondScratchReg, ptr, label, cond);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
         load32(lhs, SecondScratchReg);
         ma_b(SecondScratchReg, rhs, label, cond);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         load32(lhs, SecondScratchReg);
         ma_b(SecondScratchReg, rhs, label, cond);
     }
-    void branch32(Condition cond, AsmJSAbsoluteAddress addr, Imm32 imm, Label* label) {
+    void branch32(Condition cond, wasm::SymbolicAddress addr, Imm32 imm, Label* label) {
         load32(addr, SecondScratchReg);
         ma_b(SecondScratchReg, imm, label, cond);
     }
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
             loadInt32OrDouble(address, dest.fpu());
         else
@@ -1120,17 +1120,17 @@ class MacroAssemblerMIPSCompat : public 
     void move64(Register64 src, Register64 dest) {
         move32(src.low, dest.low);
         move32(src.high, dest.high);
     }
 
     void movePtr(Register src, Register dest);
     void movePtr(ImmWord imm, Register dest);
     void movePtr(ImmPtr imm, Register dest);
-    void movePtr(AsmJSImmPtr imm, Register dest);
+    void movePtr(wasm::SymbolicAddress imm, Register dest);
     void movePtr(ImmGCPtr imm, Register dest);
 
     void load8SignExtend(const Address& address, Register dest);
     void load8SignExtend(const BaseIndex& src, Register dest);
 
     void load8ZeroExtend(const Address& address, Register dest);
     void load8ZeroExtend(const BaseIndex& src, Register dest);
 
@@ -1138,26 +1138,26 @@ class MacroAssemblerMIPSCompat : public 
     void load16SignExtend(const BaseIndex& src, Register dest);
 
     void load16ZeroExtend(const Address& address, Register dest);
     void load16ZeroExtend(const BaseIndex& src, Register dest);
 
     void load32(const Address& address, Register dest);
     void load32(const BaseIndex& address, Register dest);
     void load32(AbsoluteAddress address, Register dest);
-    void load32(AsmJSAbsoluteAddress address, Register dest);
+    void load32(wasm::SymbolicAddress address, Register dest);
     void load64(const Address& address, Register64 dest) {
         load32(Address(address.base, address.offset + LOW_32_OFFSET), dest.low);
         load32(Address(address.base, address.offset + HIGH_32_OFFSET), dest.high);
     }
 
     void loadPtr(const Address& address, Register dest);
     void loadPtr(const BaseIndex& src, Register dest);
     void loadPtr(AbsoluteAddress address, Register dest);
-    void loadPtr(AsmJSAbsoluteAddress address, Register dest);
+    void loadPtr(wasm::SymbolicAddress address, Register dest);
 
     void loadPrivate(const Address& address, Register dest);
 
     void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
@@ -1346,21 +1346,21 @@ class MacroAssemblerMIPSCompat : public 
         as_movs(dest, src);
     }
 
     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
                                     Label* label);
 
     void loadAsmJSActivation(Register dest) {
-        loadPtr(Address(GlobalReg, AsmJSActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, wasm::ActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
-        MOZ_ASSERT(Imm16::IsInSignedRange(AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias));
-        loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
+        MOZ_ASSERT(Imm16::IsInSignedRange(wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias));
+        loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
     }
 
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
 };
 
 typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -970,19 +970,19 @@ MacroAssemblerMIPS64Compat::movePtr(ImmG
 }
 
 void
 MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
 {
     movePtr(ImmWord(uintptr_t(imm.value)), dest);
 }
 void
-MacroAssemblerMIPS64Compat::movePtr(AsmJSImmPtr imm, Register dest)
+MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest)
 {
-    append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm.kind()));
+    append(AsmJSAbsoluteLink(CodeOffset(nextOffset().getOffset()), imm));
     ma_liPatchable(dest, ImmWord(-1));
 }
 
 void
 MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeByte, ZeroExtend);
 }
@@ -1044,19 +1044,19 @@ MacroAssemblerMIPS64Compat::load32(const
 void
 MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address, Register dest)
 {
     movePtr(ImmPtr(address.addr), ScratchRegister);
     load32(Address(ScratchRegister, 0), dest);
 }
 
 void
-MacroAssemblerMIPS64Compat::load32(AsmJSAbsoluteAddress address, Register dest)
+MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address, Register dest)
 {
-    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    movePtr(address, ScratchRegister);
     load32(Address(ScratchRegister, 0), dest);
 }
 
 void
 MacroAssemblerMIPS64Compat::loadPtr(const Address& address, Register dest)
 {
     ma_load(dest, address, SizeDouble);
 }
@@ -1070,19 +1070,19 @@ MacroAssemblerMIPS64Compat::loadPtr(cons
 void
 MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address, Register dest)
 {
     movePtr(ImmPtr(address.addr), ScratchRegister);
     loadPtr(Address(ScratchRegister, 0), dest);
 }
 
 void
-MacroAssemblerMIPS64Compat::loadPtr(AsmJSAbsoluteAddress address, Register dest)
+MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address, Register dest)
 {
-    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    movePtr(address, ScratchRegister);
     loadPtr(Address(ScratchRegister, 0), dest);
 }
 
 void
 MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest)
 {
     loadPtr(address, dest);
     ma_dsll(dest, dest, Imm32(1));
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -598,17 +598,17 @@ class MacroAssemblerMIPS64Compat : publi
         ma_b(lhs, ptr, label, cond);
     }
     void branchPtr(Condition cond, Register lhs, ImmWord imm, Label* label) {
         ma_b(lhs, imm, label, cond);
     }
     void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label* label) {
         ma_b(lhs, imm, label, cond);
     }
-    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label* label) {
+    void branchPtr(Condition cond, Register lhs, wasm::SymbolicAddress imm, Label* label) {
         movePtr(imm, SecondScratchReg);
         ma_b(lhs, SecondScratchReg, label, cond);
     }
     void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
         ma_b(lhs, imm, label, cond);
     }
     void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
         subPtr(imm, lhs);
@@ -661,29 +661,29 @@ class MacroAssemblerMIPS64Compat : publi
     void branchPtr(Condition cond, AbsoluteAddress addr, Register ptr, Label* label) {
         loadPtr(addr, SecondScratchReg);
         ma_b(SecondScratchReg, ptr, label, cond);
     }
     void branchPtr(Condition cond, AbsoluteAddress addr, ImmWord ptr, Label* label) {
         loadPtr(addr, SecondScratchReg);
         ma_b(SecondScratchReg, ptr, label, cond);
     }
-    void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label* label) {
+    void branchPtr(Condition cond, wasm::SymbolicAddress addr, Register ptr, Label* label) {
         loadPtr(addr, SecondScratchReg);
         ma_b(SecondScratchReg, ptr, label, cond);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
         load32(lhs, SecondScratchReg);
         ma_b(SecondScratchReg, rhs, label, cond);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         load32(lhs, SecondScratchReg);
         ma_b(SecondScratchReg, rhs, label, cond);
     }
-    void branch32(Condition cond, AsmJSAbsoluteAddress addr, Imm32 imm, Label* label) {
+    void branch32(Condition cond, wasm::SymbolicAddress addr, Imm32 imm, Label* label) {
         load32(addr, SecondScratchReg);
         ma_b(SecondScratchReg, imm, label, cond);
     }
 
     template <typename T>
     void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
             loadInt32OrDouble(address, dest.fpu());
@@ -1134,17 +1134,17 @@ class MacroAssemblerMIPS64Compat : publi
     void move32(Register src, Register dest);
     void move64(Register64 src, Register64 dest) {
         movePtr(src.reg, dest.reg);
     }
 
     void movePtr(Register src, Register dest);
     void movePtr(ImmWord imm, Register dest);
     void movePtr(ImmPtr imm, Register dest);
-    void movePtr(AsmJSImmPtr imm, Register dest);
+    void movePtr(wasm::SymbolicAddress imm, Register dest);
     void movePtr(ImmGCPtr imm, Register dest);
 
     void load8SignExtend(const Address& address, Register dest);
     void load8SignExtend(const BaseIndex& src, Register dest);
 
     void load8ZeroExtend(const Address& address, Register dest);
     void load8ZeroExtend(const BaseIndex& src, Register dest);
 
@@ -1152,25 +1152,25 @@ class MacroAssemblerMIPS64Compat : publi
     void load16SignExtend(const BaseIndex& src, Register dest);
 
     void load16ZeroExtend(const Address& address, Register dest);
     void load16ZeroExtend(const BaseIndex& src, Register dest);
 
     void load32(const Address& address, Register dest);
     void load32(const BaseIndex& address, Register dest);
     void load32(AbsoluteAddress address, Register dest);
-    void load32(AsmJSAbsoluteAddress address, Register dest);
+    void load32(wasm::SymbolicAddress address, Register dest);
     void load64(const Address& address, Register64 dest) {
         loadPtr(address, dest.reg);
     }
 
     void loadPtr(const Address& address, Register dest);
     void loadPtr(const BaseIndex& src, Register dest);
     void loadPtr(AbsoluteAddress address, Register dest);
-    void loadPtr(AsmJSAbsoluteAddress address, Register dest);
+    void loadPtr(wasm::SymbolicAddress address, Register dest);
 
     void loadPrivate(const Address& address, Register dest);
 
     void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
@@ -1360,21 +1360,21 @@ class MacroAssemblerMIPS64Compat : publi
         as_movs(dest, src);
     }
 
     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
                                     Label* label);
 
     void loadAsmJSActivation(Register dest) {
-        loadPtr(Address(GlobalReg, AsmJSActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
+        loadPtr(Address(GlobalReg, wasm::ActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
-        MOZ_ASSERT(Imm16::IsInSignedRange(AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias));
-        loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
+        MOZ_ASSERT(Imm16::IsInSignedRange(wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias));
+        loadPtr(Address(GlobalReg, wasm::HeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
     }
 
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
 };
 
 typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific;
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -146,18 +146,18 @@ IsCompilingAsmJS()
 
 // Pointer to be embedded as an immediate in an instruction.
 struct ImmPtr
 {
     void* value;
 
     explicit ImmPtr(const void* value) : value(const_cast<void*>(value))
     {
-        // To make code serialization-safe, asm.js compilation should only
-        // compile pointer immediates using AsmJSImmPtr.
+        // To make code serialization-safe, wasm compilation should only
+        // compile pointer immediates using a SymbolicAddress.
         MOZ_ASSERT(!IsCompilingAsmJS());
     }
 
     template <class R>
     explicit ImmPtr(R (*pf)())
       : value(JS_FUNC_TO_DATA_PTR(void*, pf))
     {
         MOZ_ASSERT(!IsCompilingAsmJS());
@@ -644,90 +644,16 @@ class CodeLocationLabel
         return raw_;
     }
     uint8_t* offset() const {
         MOZ_ASSERT(state_ == Relative);
         return raw_;
     }
 };
 
-// While the frame-pointer chain allows the stack to be unwound without
-// metadata, Error.stack still needs to know the line/column of every call in
-// the chain. A CallSiteDesc describes the line/column of a single callsite.
-// A CallSiteDesc is created by callers of MacroAssembler.
-class CallSiteDesc
-{
-    uint32_t line_;
-    uint32_t column_ : 31;
-    uint32_t kind_ : 1;
-  public:
-    enum Kind {
-        Relative,  // pc-relative call
-        Register   // call *register
-    };
-    CallSiteDesc() {}
-    explicit CallSiteDesc(Kind kind)
-      : line_(0), column_(0), kind_(kind)
-    {}
-    CallSiteDesc(uint32_t line, uint32_t column, Kind kind)
-      : line_(line), column_(column), kind_(kind)
-    {
-        MOZ_ASSERT(column_ == column, "column must fit in 31 bits");
-    }
-    uint32_t line() const { return line_; }
-    uint32_t column() const { return column_; }
-    Kind kind() const { return Kind(kind_); }
-};
-
-// Adds to CallSiteDesc the metadata necessary to walk the stack given an
-// initial stack-pointer.
-class CallSite : public CallSiteDesc
-{
-    uint32_t returnAddressOffset_;
-    uint32_t stackDepth_;
-
-  public:
-    CallSite() {}
-
-    CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
-      : CallSiteDesc(desc),
-        returnAddressOffset_(returnAddressOffset),
-        stackDepth_(stackDepth)
-    { }
-
-    void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
-    void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
-    uint32_t returnAddressOffset() const { return returnAddressOffset_; }
-
-    // The stackDepth measures the amount of stack space pushed since the
-    // function was called. In particular, this includes the pushed return
-    // address on all archs (whether or not the call instruction pushes the
-    // return address (x86/x64) or the prologue does (ARM/MIPS)).
-    uint32_t stackDepth() const { return stackDepth_; }
-};
-
-typedef Vector<CallSite, 0, SystemAllocPolicy> CallSiteVector;
-
-class CallSiteAndTarget : public CallSite
-{
-    uint32_t targetIndex_;
-
-  public:
-    explicit CallSiteAndTarget(CallSite cs, uint32_t targetIndex)
-      : CallSite(cs), targetIndex_(targetIndex)
-    { }
-
-    static const uint32_t NOT_INTERNAL = UINT32_MAX;
-
-    bool isInternal() const { return targetIndex_ != NOT_INTERNAL; }
-    uint32_t targetIndex() const { MOZ_ASSERT(isInternal()); return targetIndex_; }
-};
-
-typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
-
 // As an invariant across architectures, within asm.js code:
 //   $sp % AsmJSStackAlignment = (sizeof(AsmJSFrame) + masm.framePushed) % AsmJSStackAlignment
 // Thus, AsmJSFrame represents the bytes pushed after the call (which occurred
 // with a AsmJSStackAlignment-aligned StackPointer) that are not included in
 // masm.framePushed.
 struct AsmJSFrame
 {
     // The caller's saved frame pointer. In non-profiling mode, internal
@@ -738,217 +664,36 @@ struct AsmJSFrame
 
     // The return address pushed by the call (in the case of ARM/MIPS the return
     // address is pushed by the first instruction of the prologue).
     void* returnAddress;
 };
 static_assert(sizeof(AsmJSFrame) == 2 * sizeof(void*), "?!");
 static const uint32_t AsmJSFrameBytesAfterReturnAddress = sizeof(void*);
 
-// A hoisting of constants that would otherwise require #including AsmJSModule.h
-// everywhere. Values are asserted in AsmJSModule.h.
-static const unsigned AsmJSActivationGlobalDataOffset = 0;
-static const unsigned AsmJSHeapGlobalDataOffset = sizeof(void*);
-static const unsigned AsmJSNaN64GlobalDataOffset = 2 * sizeof(void*);
-static const unsigned AsmJSNaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
-
-// Summarizes a heap access made by asm.js code that needs to be patched later
-// and/or looked up by the asm.js signal handlers. Different architectures need
-// to know different things (x64: offset and length, ARM: where to patch in
-// heap length, x86: where to patch in heap length and base) hence the massive
-// #ifdefery.
-class AsmJSHeapAccess
-{
-#if defined(JS_CODEGEN_X64)
-  public:
-    enum WhatToDoOnOOB {
-        CarryOn, // loads return undefined, stores do nothing.
-        Throw    // throw a RangeError
-    };
-#endif
-
-  private:
-    uint32_t insnOffset_;
-#if defined(JS_CODEGEN_X86)
-    uint8_t opLength_;  // the length of the load/store instruction
-#endif
-#if defined(JS_CODEGEN_X64)
-    uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
-    bool throwOnOOB_;   // should we throw on OOB?
-#endif
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-    uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
-#endif
-
-    JS_STATIC_ASSERT(AnyRegister::Total < UINT8_MAX);
-
-  public:
-    AsmJSHeapAccess() {}
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-    static const uint32_t NoLengthCheck = UINT32_MAX;
-#endif
-
-#if defined(JS_CODEGEN_X86)
-    // If 'cmp' equals 'insnOffset' or if it is not supplied then the
-    // cmpDelta_ is zero indicating that there is no length to patch.
-    AsmJSHeapAccess(uint32_t insnOffset, uint32_t after, uint32_t cmp = NoLengthCheck)
-    {
-        mozilla::PodZero(this);  // zero padding for Valgrind
-        insnOffset_ = insnOffset;
-        opLength_ = after - insnOffset;
-        cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
-    }
-#elif defined(JS_CODEGEN_X64)
-    // If 'cmp' equals 'insnOffset' or if it is not supplied then the
-    // cmpDelta_ is zero indicating that there is no length to patch.
-    AsmJSHeapAccess(uint32_t insnOffset, WhatToDoOnOOB oob,
-                    uint32_t cmp = NoLengthCheck,
-                    uint32_t offsetWithinWholeSimdVector = 0)
-    {
-        mozilla::PodZero(this);  // zero padding for Valgrind
-        insnOffset_ = insnOffset;
-        offsetWithinWholeSimdVector_ = offsetWithinWholeSimdVector;
-        throwOnOOB_ = oob == Throw;
-        cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
-        MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
-    }
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
-      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-    explicit AsmJSHeapAccess(uint32_t insnOffset)
-    {
-        mozilla::PodZero(this);  // zero padding for Valgrind
-        insnOffset_ = insnOffset;
-    }
-#endif
-
-    uint32_t insnOffset() const { return insnOffset_; }
-    void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
-    void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
-#if defined(JS_CODEGEN_X86)
-    void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
-#endif
-#if defined(JS_CODEGEN_X64)
-    bool throwOnOOB() const { return throwOnOOB_; }
-    uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
-#endif
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-    bool hasLengthCheck() const { return cmpDelta_ > 0; }
-    void* patchLengthAt(uint8_t* code) const {
-        MOZ_ASSERT(hasLengthCheck());
-        return code + (insnOffset_ - cmpDelta_);
-    }
-#endif
-};
-
-typedef Vector<AsmJSHeapAccess, 0, SystemAllocPolicy> AsmJSHeapAccessVector;
-
 struct AsmJSGlobalAccess
 {
     CodeOffset patchAt;
     unsigned globalDataOffset;
 
     AsmJSGlobalAccess(CodeOffset patchAt, unsigned globalDataOffset)
       : patchAt(patchAt), globalDataOffset(globalDataOffset)
     {}
 };
 
-// Describes the intended pointee of an immediate to be embedded in asm.js
-// code. By representing the pointee as a symbolic enum, the pointee can be
-// patched after deserialization when the address of global things has changed.
-enum AsmJSImmKind
-{
-    AsmJSImm_ToInt32         = AsmJSExit::Builtin_ToInt32,
-#if defined(JS_CODEGEN_ARM)
-    AsmJSImm_aeabi_idivmod   = AsmJSExit::Builtin_IDivMod,
-    AsmJSImm_aeabi_uidivmod  = AsmJSExit::Builtin_UDivMod,
-    AsmJSImm_AtomicCmpXchg   = AsmJSExit::Builtin_AtomicCmpXchg,
-    AsmJSImm_AtomicXchg      = AsmJSExit::Builtin_AtomicXchg,
-    AsmJSImm_AtomicFetchAdd  = AsmJSExit::Builtin_AtomicFetchAdd,
-    AsmJSImm_AtomicFetchSub  = AsmJSExit::Builtin_AtomicFetchSub,
-    AsmJSImm_AtomicFetchAnd  = AsmJSExit::Builtin_AtomicFetchAnd,
-    AsmJSImm_AtomicFetchOr   = AsmJSExit::Builtin_AtomicFetchOr,
-    AsmJSImm_AtomicFetchXor  = AsmJSExit::Builtin_AtomicFetchXor,
-#endif
-    AsmJSImm_ModD            = AsmJSExit::Builtin_ModD,
-    AsmJSImm_SinD            = AsmJSExit::Builtin_SinD,
-    AsmJSImm_CosD            = AsmJSExit::Builtin_CosD,
-    AsmJSImm_TanD            = AsmJSExit::Builtin_TanD,
-    AsmJSImm_ASinD           = AsmJSExit::Builtin_ASinD,
-    AsmJSImm_ACosD           = AsmJSExit::Builtin_ACosD,
-    AsmJSImm_ATanD           = AsmJSExit::Builtin_ATanD,
-    AsmJSImm_CeilD           = AsmJSExit::Builtin_CeilD,
-    AsmJSImm_CeilF           = AsmJSExit::Builtin_CeilF,
-    AsmJSImm_FloorD          = AsmJSExit::Builtin_FloorD,
-    AsmJSImm_FloorF          = AsmJSExit::Builtin_FloorF,
-    AsmJSImm_ExpD            = AsmJSExit::Builtin_ExpD,
-    AsmJSImm_LogD            = AsmJSExit::Builtin_LogD,
-    AsmJSImm_PowD            = AsmJSExit::Builtin_PowD,
-    AsmJSImm_ATan2D          = AsmJSExit::Builtin_ATan2D,
-    AsmJSImm_Runtime,
-    AsmJSImm_RuntimeInterruptUint32,
-    AsmJSImm_StackLimit,
-    AsmJSImm_ReportOverRecursed,
-    AsmJSImm_OnDetached,
-    AsmJSImm_OnOutOfBounds,
-    AsmJSImm_OnImpreciseConversion,
-    AsmJSImm_HandleExecutionInterrupt,
-    AsmJSImm_InvokeFromAsmJS_Ignore,
-    AsmJSImm_InvokeFromAsmJS_ToInt32,
-    AsmJSImm_InvokeFromAsmJS_ToNumber,
-    AsmJSImm_CoerceInPlace_ToInt32,
-    AsmJSImm_CoerceInPlace_ToNumber,
-    AsmJSImm_Limit
-};
-
-static inline AsmJSImmKind
-BuiltinToImmKind(AsmJSExit::BuiltinKind builtin)
-{
-    return AsmJSImmKind(builtin);
-}
-
-static inline bool
-ImmKindIsBuiltin(AsmJSImmKind imm, AsmJSExit::BuiltinKind* builtin)
-{
-    if (unsigned(imm) >= unsigned(AsmJSExit::Builtin_Limit))
-        return false;
-    *builtin = AsmJSExit::BuiltinKind(imm);
-    return true;
-}
-
-// Pointer to be embedded as an immediate in asm.js code.
-class AsmJSImmPtr
-{
-    AsmJSImmKind kind_;
-  public:
-    AsmJSImmKind kind() const { return kind_; }
-    // This needs to be MOZ_IMPLICIT in order to make MacroAssember::CallWithABINoProfiling compile.
-    MOZ_IMPLICIT AsmJSImmPtr(AsmJSImmKind kind) : kind_(kind) { MOZ_ASSERT(IsCompilingAsmJS()); }
-    AsmJSImmPtr() {}
-};
-
-// Pointer to be embedded as an immediate that is loaded/stored from by an
-// instruction in asm.js code.
-class AsmJSAbsoluteAddress
-{
-    AsmJSImmKind kind_;
-  public:
-    AsmJSImmKind kind() const { return kind_; }
-    explicit AsmJSAbsoluteAddress(AsmJSImmKind kind) : kind_(kind) { MOZ_ASSERT(IsCompilingAsmJS()); }
-    AsmJSAbsoluteAddress() {}
-};
-
 // Represents an instruction to be patched and the intended pointee. These
 // links are accumulated in the MacroAssembler, but patching is done outside
 // the MacroAssembler (in AsmJSModule::staticallyLink).
 struct AsmJSAbsoluteLink
 {
-    AsmJSAbsoluteLink(CodeOffset patchAt, AsmJSImmKind target)
+    AsmJSAbsoluteLink(CodeOffset patchAt, wasm::SymbolicAddress target)
       : patchAt(patchAt), target(target) {}
+
     CodeOffset patchAt;
-    AsmJSImmKind target;
+    wasm::SymbolicAddress target;
 };
 
 // Represents a call from an asm.js function to another asm.js function,
 // represented by the index of the callee in the Module Validator
 struct AsmJSInternalCallee
 {
     uint32_t index;
 
@@ -958,18 +703,18 @@ struct AsmJSInternalCallee
     explicit AsmJSInternalCallee(uint32_t calleeIndex)
       : index(calleeIndex)
     {}
 };
 
 // The base class of all Assemblers for all archs.
 class AssemblerShared
 {
-    CallSiteAndTargetVector callsites_;
-    Vector<AsmJSHeapAccess, 0, SystemAllocPolicy> asmJSHeapAccesses_;
+    wasm::CallSiteAndTargetVector callsites_;
+    wasm::HeapAccessVector heapAccesses_;
     Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
     Vector<AsmJSAbsoluteLink, 0, SystemAllocPolicy> asmJSAbsoluteLinks_;
 
   protected:
     Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
 
     bool enoughMemory_;
     bool embedsNurseryPointers_;
@@ -991,28 +736,28 @@ class AssemblerShared
     bool oom() const {
         return !enoughMemory_;
     }
 
     bool embedsNurseryPointers() const {
         return embedsNurseryPointers_;
     }
 
-    void append(const CallSiteDesc& desc, CodeOffset label, size_t framePushed,
-                uint32_t targetIndex = CallSiteAndTarget::NOT_INTERNAL)
+    void append(const wasm::CallSiteDesc& desc, CodeOffset label, size_t framePushed,
+                uint32_t targetIndex = wasm::CallSiteAndTarget::NOT_INTERNAL)
     {
         // framePushed does not include sizeof(AsmJSFrame), so add it in here (see
         // CallSite::stackDepth).
-        CallSite callsite(desc, label.offset(), framePushed + sizeof(AsmJSFrame));
-        enoughMemory_ &= callsites_.append(CallSiteAndTarget(callsite, targetIndex));
+        wasm::CallSite callsite(desc, label.offset(), framePushed + sizeof(AsmJSFrame));
+        enoughMemory_ &= callsites_.append(wasm::CallSiteAndTarget(callsite, targetIndex));
     }
-    CallSiteAndTargetVector& callSites() { return callsites_; }
+    wasm::CallSiteAndTargetVector& callSites() { return callsites_; }
 
-    void append(AsmJSHeapAccess access) { enoughMemory_ &= asmJSHeapAccesses_.append(access); }
-    AsmJSHeapAccessVector&& extractAsmJSHeapAccesses() { return Move(asmJSHeapAccesses_); }
+    void append(wasm::HeapAccess access) { enoughMemory_ &= heapAccesses_.append(access); }
+    wasm::HeapAccessVector&& extractHeapAccesses() { return Move(heapAccesses_); }
 
     void append(AsmJSGlobalAccess access) { enoughMemory_ &= asmJSGlobalAccesses_.append(access); }
     size_t numAsmJSGlobalAccesses() const { return asmJSGlobalAccesses_.length(); }
     AsmJSGlobalAccess asmJSGlobalAccess(size_t i) const { return asmJSGlobalAccesses_[i]; }
 
     void append(AsmJSAbsoluteLink link) { enoughMemory_ &= asmJSAbsoluteLinks_.append(link); }
     size_t numAsmJSAbsoluteLinks() const { return asmJSAbsoluteLinks_.length(); }
     AsmJSAbsoluteLink asmJSAbsoluteLink(size_t i) const { return asmJSAbsoluteLinks_[i]; }
@@ -1032,20 +777,20 @@ class AssemblerShared
     // Merge this assembler with the other one, invalidating it, by shifting all
     // offsets by a delta.
     bool asmMergeWith(size_t delta, const AssemblerShared& other) {
         size_t i = callsites_.length();
         enoughMemory_ &= callsites_.appendAll(other.callsites_);
         for (; i < callsites_.length(); i++)
             callsites_[i].offsetReturnAddressBy(delta);
 
-        i = asmJSHeapAccesses_.length();
-        enoughMemory_ &= asmJSHeapAccesses_.appendAll(other.asmJSHeapAccesses_);
-        for (; i < asmJSHeapAccesses_.length(); i++)
-            asmJSHeapAccesses_[i].offsetInsnOffsetBy(delta);
+        i = heapAccesses_.length();
+        enoughMemory_ &= heapAccesses_.appendAll(other.heapAccesses_);
+        for (; i < heapAccesses_.length(); i++)
+            heapAccesses_[i].offsetInsnOffsetBy(delta);
 
         i = asmJSGlobalAccesses_.length();
         enoughMemory_ &= asmJSGlobalAccesses_.appendAll(other.asmJSGlobalAccesses_);
         for (; i < asmJSGlobalAccesses_.length(); i++)
             asmJSGlobalAccesses_[i].patchAt.offsetBy(delta);
 
         i = asmJSAbsoluteLinks_.length();
         enoughMemory_ &= asmJSAbsoluteLinks_.appendAll(other.asmJSAbsoluteLinks_);
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1461,17 +1461,17 @@ CodeGeneratorShared::visitOutOfLineTrunc
         masm.convertFloat32ToDouble(src, src);
         src = src.asDouble();
     }
 #endif
 
     masm.setupUnalignedABICall(dest);
     masm.passABIArg(src, MoveOp::DOUBLE);
     if (gen->compilingAsmJS())
-        masm.callWithABI(AsmJSImm_ToInt32);
+        masm.callWithABI(wasm::SymbolicAddress::ToInt32);
     else
         masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
     masm.storeCallResult(dest);
 
 #if !defined(JS_CODEGEN_ARM) && !defined(JS_CODEGEN_ARM64)
     if (ool->needFloat32Conversion())
         masm.pop(srcSingle);
 #endif
@@ -1515,17 +1515,17 @@ CodeGeneratorShared::emitAsmJSCall(LAsmJ
     switch (callee.which()) {
       case MAsmJSCall::Callee::Internal:
         masm.call(mir->desc(), callee.internal());
         break;
       case MAsmJSCall::Callee::Dynamic:
         masm.call(mir->desc(), ToRegister(ins->getOperand(mir->dynamicCalleeOperandIndex())));
         break;
       case MAsmJSCall::Callee::Builtin:
-        masm.call(AsmJSImmPtr(callee.builtin()));
+        masm.call(BuiltinToImmediate(callee.builtin()));
         break;
     }
 
     if (mir->spIncrement())
         masm.reserveStack(mir->spIncrement());
 }
 
 void
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -1151,34 +1151,34 @@ class LCheckOverRecursed : public LInstr
     MCheckOverRecursed* mir() const {
         return mir_->toCheckOverRecursed();
     }
 };
 
 class LAsmJSInterruptCheck : public LInstructionHelper<0, 0, 0>
 {
     Label* interruptExit_;
-    const CallSiteDesc& funcDesc_;
+    const wasm::CallSiteDesc& funcDesc_;
 
   public:
     LIR_HEADER(AsmJSInterruptCheck);
 
-    LAsmJSInterruptCheck(Label* interruptExit, const CallSiteDesc& funcDesc)
+    LAsmJSInterruptCheck(Label* interruptExit, const wasm::CallSiteDesc& funcDesc)
       : interruptExit_(interruptExit), funcDesc_(funcDesc)
     {
     }
 
     bool isCall() const {
         return true;
     }
 
     Label* interruptExit() const {
         return interruptExit_;
     }
-    const CallSiteDesc& funcDesc() const {
+    const wasm::CallSiteDesc& funcDesc() const {
         return funcDesc_;
     }
 };
 
 class LInterruptCheck : public LInstructionHelper<0, 0, 0>
 {
   public:
     LIR_HEADER(InterruptCheck)
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -590,19 +590,19 @@ class Assembler : public AssemblerX86Sha
         if (word.value == 0)
             xorl(dest, dest);
         else
             movq(word, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         movq(imm, dest);
     }
-    void mov(AsmJSImmPtr imm, Register dest) {
+    void mov(wasm::SymbolicAddress imm, Register dest) {
         masm.movq_i64r(-1, dest.encoding());
-        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
     }
     void mov(const Operand& src, Register dest) {
         movq(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movq(src, dest);
     }
     void mov(Imm32 imm32, const Operand& dest) {
@@ -665,21 +665,21 @@ class Assembler : public AssemblerX86Sha
         return CodeOffset(masm.vmovaps_rrip(dest.encoding()).offset());
     }
     CodeOffset leaRipRelative(Register dest) {
         return CodeOffset(masm.leaq_rip(dest.encoding()).offset());
     }
 
     void loadAsmJSActivation(Register dest) {
         CodeOffset label = loadRipRelativeInt64(dest);
-        append(AsmJSGlobalAccess(label, AsmJSActivationGlobalDataOffset));
+        append(AsmJSGlobalAccess(label, wasm::ActivationGlobalDataOffset));
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
         CodeOffset label = loadRipRelativeInt64(HeapReg);
-        append(AsmJSGlobalAccess(label, AsmJSHeapGlobalDataOffset));
+        append(AsmJSGlobalAccess(label, wasm::HeapGlobalDataOffset));
     }
 
     void cmpq(Register rhs, Register lhs) {
         masm.cmpq_rr(rhs.encoding(), lhs.encoding());
     }
     void cmpq(Register rhs, const Operand& lhs) {
         switch (lhs.kind()) {
           case Operand::REG:
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -305,17 +305,17 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoa
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister out = ToFloatRegister(ins->output());
     const LAllocation* ptr = ins->ptr();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir))
         maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
                                                     masm.asmOnOutOfBoundsLabel());
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
@@ -325,40 +325,40 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoa
             : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
 
         // Load XY
         uint32_t before = masm.size();
         loadSimd(type, 2, srcAddr, out);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 2, srcAddr,
                                     *ins->output()->output());
-        masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 
         // Load Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = after;
         loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
         after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 1, srcAddrZ,
                                     LFloatReg(ScratchSimd128Reg));
-        masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw,
-                                    AsmJSHeapAccess::NoLengthCheck, 8));
+        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw,
+                                     wasm::HeapAccess::NoLengthCheck, 8));
 
         // Move ZW atop XY
         masm.vmovlhps(ScratchSimd128Reg, out, out);
     } else {
         uint32_t before = masm.size();
         loadSimd(type, numElems, srcAddr, out);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr, *ins->output()->output());
-        masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
     }
 
-    if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
+    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -369,17 +369,17 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
     const LAllocation* ptr = ins->ptr();
     const LDefinition* out = ins->output();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     memoryBarrier(mir->barrierBefore());
     OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir)) {
         Label* jumpTo = nullptr;
         if (mir->isAtomicAccess()) {
             jumpTo = masm.asmOnOutOfBoundsLabel();
         } else {
             ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
             addOutOfLineCode(ool, mir);
             jumpTo = ool->entry();
@@ -405,17 +405,17 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
     }
     uint32_t after = masm.size();
     verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());
     if (ool) {
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(ool->rejoin());
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::CarryOn, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::CarryOn, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
                             const Operand& dstAddr)
 {
     switch (type) {
       case Scalar::Float32x4: {
@@ -460,58 +460,58 @@ CodeGeneratorX64::emitSimdStore(LAsmJSSt
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister in = ToFloatRegister(ins->value());
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir))
         maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
                                                     masm.asmOnOutOfBoundsLabel());
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
         Operand dstAddrZ =
             ptr->isBogus()
             ? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
             : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
 
         // It's possible that the Z could be out of bounds when the XY is in
         // bounds. To avoid storing the XY before the exception is thrown, we
-        // store the Z first, and record its offset in the AsmJSHeapAccess so
+        // store the Z first, and record its offset in the HeapAccess so
         // that the signal handler knows to check the bounds of the full
         // access, rather than just the Z.
         masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
         uint32_t before = masm.size();
         storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 1, dstAddrZ,
                                     LFloatReg(ScratchSimd128Reg));
-        masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset, 8));
+        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset, 8));
 
         // Store XY
         before = after;
         storeSimd(type, 2, in, dstAddr);
         after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 2, dstAddr, *ins->value());
-        masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw));
+        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw));
     } else {
         uint32_t before = masm.size();
         storeSimd(type, numElems, in, dstAddr);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, numElems, dstAddr, *ins->value());
-        masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
     }
 
-    if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
+    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -522,17 +522,17 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
     const LAllocation* value = ins->value();
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     memoryBarrier(mir->barrierBefore());
     Label* rejoin = nullptr;
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir)) {
         Label* jumpTo = nullptr;
         if (mir->isAtomicAccess())
             jumpTo = masm.asmOnOutOfBoundsLabel();
         else
             rejoin = jumpTo = alloc().lifoAlloc()->newInfallible<Label>();
         maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), jumpTo);
     }
@@ -573,17 +573,17 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
     }
     uint32_t after = masm.size();
     verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, accessType, 0, dstAddr, *value);
     if (rejoin) {
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(rejoin);
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::CarryOn, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::CarryOn, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     const LAllocation* ptr = ins->ptr();
@@ -593,32 +593,32 @@ CodeGeneratorX64::visitAsmJSCompareExcha
     BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
     // Note that we can't use
     // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
     // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
         maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
         masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
     }
     uint32_t before = masm.size();
     masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                         srcAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     const LAllocation* ptr = ins->ptr();
@@ -628,31 +628,31 @@ CodeGeneratorX64::visitAsmJSAtomicExchan
     MOZ_ASSERT(accessType <= Scalar::Uint32);
 
     BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
     Register value = ToRegister(ins->value());
 
     // Note that we can't use
     // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
     // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
         maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
         masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
     }
     uint32_t before = masm.size();
     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        value,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -663,17 +663,17 @@ CodeGeneratorX64::visitAsmJSAtomicBinopH
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset());
 
     // Note that we can't use
     // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
     // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
         maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
         masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
     }
     uint32_t before = masm.size();
     if (value->isConstant()) {
         atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                    Imm32(ToInt32(value)),
@@ -687,17 +687,17 @@ CodeGeneratorX64::visitAsmJSAtomicBinopH
                                    srcAddr,
                                    temp,
                                    InvalidReg,
                                    ToAnyRegister(ins->output()));
     }
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -707,31 +707,31 @@ CodeGeneratorX64::visitAsmJSAtomicBinopH
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->offset());
 
     // Note that we can't use
     // needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
     // since signal-handler bounds checking is not yet implemented for atomic accesses.
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
         maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
         masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
     }
 
     uint32_t before = masm.size();
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
     else
         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
     MOZ_ASSERT(mir->offset() == 0,
                "The AsmJS signal handler doesn't yet support emulating "
                "atomic accesses in the case of a fault from an unwrapped offset");
-    masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, wasm::HeapAccess::Throw, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar* ins)
 {
     MAsmJSLoadGlobalVar* mir = ins->mir();
 
     MIRType type = mir->type();
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -598,19 +598,19 @@ class MacroAssemblerX64 : public MacroAs
         if (X86Encoding::IsAddressImmediate(lhs.addr)) {
             branch32(cond, Operand(lhs), rhs, label);
         } else {
             ScratchRegisterScope scratch(asMasm());
             mov(ImmPtr(lhs.addr), scratch);
             branch32(cond, Address(scratch, 0), rhs, label);
         }
     }
-    void branch32(Condition cond, AsmJSAbsoluteAddress lhs, Imm32 rhs, Label* label) {
+    void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label) {
         ScratchRegisterScope scratch(asMasm());
-        mov(AsmJSImmPtr(lhs.kind()), scratch);
+        mov(lhs, scratch);
         branch32(cond, Address(scratch, 0), rhs, label);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         if (X86Encoding::IsAddressImmediate(lhs.addr)) {
             branch32(cond, Operand(lhs), rhs, label);
         } else {
             ScratchRegisterScope scratch(asMasm());
             mov(ImmPtr(lhs.addr), scratch);
@@ -643,20 +643,20 @@ class MacroAssemblerX64 : public MacroAs
         if (X86Encoding::IsAddressImmediate(addr.addr)) {
             branchPtr(cond, Operand(addr), ptr, label);
         } else {
             ScratchRegisterScope scratch(asMasm());
             mov(ImmPtr(addr.addr), scratch);
             branchPtr(cond, Operand(scratch, 0x0), ptr, label);
         }
     }
-    void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label* label) {
+    void branchPtr(Condition cond, wasm::SymbolicAddress addr, Register ptr, Label* label) {
         ScratchRegisterScope scratch(asMasm());
         MOZ_ASSERT(ptr != scratch);
-        mov(AsmJSImmPtr(addr.kind()), scratch);
+        mov(addr, scratch);
         branchPtr(cond, Operand(scratch, 0x0), ptr, label);
     }
 
     void branchPrivatePtr(Condition cond, Address lhs, ImmPtr ptr, Label* label) {
         branchPtr(cond, lhs, ImmWord(uintptr_t(ptr.value) >> 1), label);
     }
 
     void branchPrivatePtr(Condition cond, Address lhs, Register ptr, Label* label);
@@ -719,17 +719,17 @@ class MacroAssemblerX64 : public MacroAs
         movq(src, dest);
     }
     void movePtr(ImmWord imm, Register dest) {
         mov(imm, dest);
     }
     void movePtr(ImmPtr imm, Register dest) {
         mov(imm, dest);
     }
-    void movePtr(AsmJSImmPtr imm, Register dest) {
+    void movePtr(wasm::SymbolicAddress imm, Register dest) {
         mov(imm, dest);
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         movq(imm, dest);
     }
     void move64(Register64 src, Register64 dest) {
         movq(src.reg, dest.reg);
     }
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -558,17 +558,17 @@ MacroAssembler::call(Label* label)
 
 void
 MacroAssembler::call(const Address& addr)
 {
     Assembler::call(Operand(addr.base, addr.offset));
 }
 
 void
-MacroAssembler::call(AsmJSImmPtr target)
+MacroAssembler::call(wasm::SymbolicAddress target)
 {
     mov(target, eax);
     Assembler::call(eax);
 }
 
 void
 MacroAssembler::call(ImmWord target)
 {
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -279,19 +279,19 @@ class Assembler : public AssemblerX86Sha
         if (imm.value == 0)
             xorl(dest, dest);
         else
             movl(imm, dest);
     }
     void mov(ImmPtr imm, Register dest) {
         mov(ImmWord(uintptr_t(imm.value)), dest);
     }
-    void mov(AsmJSImmPtr imm, Register dest) {
+    void mov(wasm::SymbolicAddress imm, Register dest) {
         masm.movl_i32r(-1, dest.encoding());
-        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), imm));
     }
     void mov(const Operand& src, Register dest) {
         movl(src, dest);
     }
     void mov(Register src, const Operand& dest) {
         movl(src, dest);
     }
     void mov(Imm32 imm, const Operand& dest) {
@@ -358,23 +358,23 @@ class Assembler : public AssemblerX86Sha
           case Operand::MEM_ADDRESS32:
             masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
             writeDataRelocation(rhs);
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
-    void cmpl(Register rhs, AsmJSAbsoluteAddress lhs) {
+    void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
         masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
-        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(masm.currentOffset()), lhs));
     }
-    void cmpl(Imm32 rhs, AsmJSAbsoluteAddress lhs) {
+    void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
         JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
-        append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs.kind()));
+        append(AsmJSAbsoluteLink(CodeOffset(src.offset()), lhs));
     }
 
     void adcl(Imm32 imm, Register dest) {
         masm.adcl_ir(imm.value, dest.encoding());
     }
     void adcl(Register src, Register dest) {
         masm.adcl_rr(src.encoding(), dest.encoding());
     }
@@ -898,17 +898,17 @@ class Assembler : public AssemblerX86Sha
     CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.vmovups_rm(src.encoding(), dest.addr);
         return CodeOffset(masm.currentOffset());
     }
 
     void loadAsmJSActivation(Register dest) {
         CodeOffset label = movlWithPatch(PatchedAbsoluteAddress(), dest);
-        append(AsmJSGlobalAccess(label, AsmJSActivationGlobalDataOffset));
+        append(AsmJSGlobalAccess(label, wasm::ActivationGlobalDataOffset));
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
         // x86 doesn't have a pinned heap register.
     }
 
     static bool canUseInSingleByteInstruction(Register reg) {
         return X86Encoding::HasSubregL(reg.encoding());
     }
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -390,17 +390,17 @@ CodeGeneratorX86::emitSimdLoad(LAsmJSLoa
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister out = ToFloatRegister(ins->output());
     const LAllocation* ptr = ins->ptr();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir))
         maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
                                                     masm.asmOnOutOfBoundsLabel());
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
@@ -408,36 +408,36 @@ CodeGeneratorX86::emitSimdLoad(LAsmJSLoa
             ptr->isBogus()
             ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
             : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
 
         // Load XY
         uint32_t before = masm.size();
         loadSimd(type, 2, srcAddr, out);
         uint32_t after = masm.size();
-        masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
 
         // Load Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = after;
         loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
         after = masm.size();
-        masm.append(AsmJSHeapAccess(before, after));
+        masm.append(wasm::HeapAccess(before, after));
 
         // Move ZW atop XY
         masm.vmovlhps(ScratchSimd128Reg, out, out);
     } else {
         uint32_t before = masm.size();
         loadSimd(type, numElems, srcAddr, out);
         uint32_t after = masm.size();
-        masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
     }
 
-    if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
+    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -448,17 +448,17 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAs
     const LAllocation* ptr = ins->ptr();
     const LDefinition* out = ins->output();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
     memoryBarrier(mir->barrierBefore());
     OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir)) {
         Label* jumpTo = nullptr;
         if (mir->isAtomicAccess()) {
             jumpTo = masm.asmOnOutOfBoundsLabel();
         } else {
             ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
             addOutOfLineCode(ool, mir);
             jumpTo = ool->entry();
@@ -469,17 +469,17 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAs
     uint32_t before = masm.size();
     load(accessType, srcAddr, out);
     uint32_t after = masm.size();
     if (ool) {
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(ool->rejoin());
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX86::store(Scalar::Type accessType, const LAllocation* value, const Operand& dstAddr)
 {
     switch (accessType) {
       case Scalar::Int8:
       case Scalar::Uint8Clamped:
@@ -568,17 +568,17 @@ CodeGeneratorX86::emitSimdStore(LAsmJSSt
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type type = mir->accessType();
     FloatRegister in = ToFloatRegister(ins->value());
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir))
         maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
                                                     masm.asmOnOutOfBoundsLabel());
 
     unsigned numElems = mir->numSimdElems();
     if (numElems == 3) {
         MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
 
@@ -586,35 +586,35 @@ CodeGeneratorX86::emitSimdStore(LAsmJSSt
             ptr->isBogus()
             ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
             : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
 
         // Store XY
         uint32_t before = masm.size();
         storeSimd(type, 2, in, dstAddr);
         uint32_t after = masm.size();
-        masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
 
         masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
 
         // Store Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = masm.size();
         storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
         after = masm.size();
-        masm.append(AsmJSHeapAccess(before, after));
+        masm.append(wasm::HeapAccess(before, after));
     } else {
         uint32_t before = masm.size();
         storeSimd(type, numElems, in, dstAddr);
         uint32_t after = masm.size();
-        masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+        masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
     }
 
-    if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
+    if (maybeCmpOffset != wasm::HeapAccess::NoLengthCheck)
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
 }
 
 void
 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
@@ -625,17 +625,17 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LA
     const LAllocation* value = ins->value();
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
     memoryBarrier(mir->barrierBefore());
     Label* rejoin = nullptr;
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
     if (gen->needsAsmJSBoundsCheckBranch(mir)) {
         Label* jumpTo = nullptr;
         if (mir->isAtomicAccess())
             jumpTo = masm.asmOnOutOfBoundsLabel();
         else
             rejoin = jumpTo = alloc().lifoAlloc()->newInfallible<Label>();
         maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), jumpTo);
     }
@@ -643,17 +643,17 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LA
     uint32_t before = masm.size();
     store(accessType, value, dstAddr);
     uint32_t after = masm.size();
     if (rejoin) {
         cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
         masm.bind(rejoin);
     }
     memoryBarrier(mir->barrierAfter());
-    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
@@ -676,30 +676,30 @@ CodeGeneratorX86::visitAsmJSCompareExcha
 // Perform bounds checking on the access if necessary; if it fails,
 // jump to out-of-line code that throws.  If the bounds check passes,
 // set up the heap address in addrTemp.
 
 void
 CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, bool boundsCheck,
                                             int32_t offset, int32_t endOffset)
 {
-    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
 
     if (boundsCheck) {
         maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
         masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
     }
 
     // Add in the actual heap pointer explicitly, to avoid opening up
     // the abstraction that is atomicBinopToTypedIntArray at this time.
     masm.movl(ptrReg, addrTemp);
     uint32_t before = masm.size();
     masm.addlWithPatch(Imm32(offset), addrTemp);
     uint32_t after = masm.size();
-    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+    masm.append(wasm::HeapAccess(before, after, maybeCmpOffset));
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
@@ -995,17 +995,17 @@ CodeGeneratorX86::visitOutOfLineTruncate
 
     masm.bind(&fail);
     {
         saveVolatile(output);
 
         masm.setupUnalignedABICall(output);
         masm.passABIArg(input, MoveOp::DOUBLE);
         if (gen->compilingAsmJS())
-            masm.callWithABI(AsmJSImm_ToInt32);
+            masm.callWithABI(wasm::SymbolicAddress::ToInt32);
         else
             masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
         masm.storeCallResult(output);
 
         restoreVolatile(output);
     }
 
     masm.jump(ool->rejoin());
@@ -1087,17 +1087,17 @@ CodeGeneratorX86::visitOutOfLineTruncate
         saveVolatile(output);
 
         masm.push(input);
         masm.setupUnalignedABICall(output);
         masm.vcvtss2sd(input, input, input);
         masm.passABIArg(input.asDouble(), MoveOp::DOUBLE);
 
         if (gen->compilingAsmJS())
-            masm.callWithABI(AsmJSImm_ToInt32);
+            masm.callWithABI(wasm::SymbolicAddress::ToInt32);
         else
             masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
 
         masm.storeCallResult(output);
         masm.pop(input);
 
         restoreVolatile(output);
     }
--- a/js/src/jit/x86/MacroAssembler-x86.h
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -638,31 +638,30 @@ class MacroAssemblerX86 : public MacroAs
         // LOW(dest) = eax;
         movl(eax, dest.low);
     }
 
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
         cmp32(Operand(lhs), rhs);
         j(cond, label);
     }
-    void branch32(Condition cond, AsmJSAbsoluteAddress lhs, Imm32 rhs, Label* label) {
+    void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label) {
         cmpl(rhs, lhs);
         j(cond, label);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         cmp32(Operand(lhs), rhs);
         j(cond, label);
     }
     void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label* label) {
         test32(Operand(address), imm);
         j(cond, label);
     }
 
-    // Specialization for AsmJSAbsoluteAddress.
-    void branchPtr(Condition cond, AsmJSAbsoluteAddress lhs, Register ptr, Label* label) {
+    void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register ptr, Label* label) {
         cmpl(ptr, lhs);
         j(cond, label);
     }
 
     template <typename T, typename S>
     void branchPtr(Condition cond, T lhs, S ptr, Label* label) {
         cmpPtr(Operand(lhs), ptr);
         j(cond, label);
@@ -741,17 +740,17 @@ class MacroAssemblerX86 : public MacroAs
     }
 
     void movePtr(ImmWord imm, Register dest) {
         movl(Imm32(imm.value), dest);
     }
     void movePtr(ImmPtr imm, Register dest) {
         movl(imm, dest);
     }
-    void movePtr(AsmJSImmPtr imm, Register dest) {
+    void movePtr(wasm::SymbolicAddress imm, Register dest) {
         mov(imm, dest);
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         movl(imm, dest);
     }
     void move64(Register64 src, Register64 dest) {
         movl(src.low, dest.low);
         movl(src.high, dest.high);
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -1739,17 +1739,17 @@ jit::JitActivation::markIonRecovery(JSTr
 }
 
 AsmJSActivation::AsmJSActivation(JSContext* cx, AsmJSModule& module)
   : Activation(cx, AsmJS),
     module_(module),
     entrySP_(nullptr),
     resumePC_(nullptr),
     fp_(nullptr),
-    exitReason_(AsmJSExit::None)
+    packedExitReason_(wasm::ExitReason(wasm::ExitReason::None).pack())
 {
     (void) entrySP_;  // squelch GCC warning
 
     prevAsmJSForModule_ = module.activation();
     module.activation() = this;
 
     prevAsmJS_ = cx->runtime()->asmJSActivationStack_;
     cx->runtime()->asmJSActivationStack_ = this;
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -1799,17 +1799,17 @@ class InterpreterFrameIterator
 class AsmJSActivation : public Activation
 {
     AsmJSModule& module_;
     AsmJSActivation* prevAsmJS_;
     AsmJSActivation* prevAsmJSForModule_;
     void* entrySP_;
     void* resumePC_;
     uint8_t* fp_;
-    AsmJSExit::Reason exitReason_;
+    uint32_t packedExitReason_;
 
   public:
     AsmJSActivation(JSContext* cx, AsmJSModule& module);
     ~AsmJSActivation();
 
     inline JSContext* cx();
     AsmJSModule& module() const { return module_; }
     AsmJSActivation* prevAsmJS() const { return prevAsmJS_; }
@@ -1818,26 +1818,26 @@ class AsmJSActivation : public Activatio
         return true;
     }
 
     // Returns a pointer to the base of the innermost stack frame of asm.js code
     // in this activation.
     uint8_t* fp() const { return fp_; }
 
     // Returns the reason why asm.js code called out of asm.js code.
-    AsmJSExit::Reason exitReason() const { return exitReason_; }
+    wasm::ExitReason exitReason() const { return wasm::ExitReason::unpack(packedExitReason_); }
 
     // Read by JIT code:
     static unsigned offsetOfContext() { return offsetof(AsmJSActivation, cx_); }
     static unsigned offsetOfResumePC() { return offsetof(AsmJSActivation, resumePC_); }
 
     // Written by JIT code:
     static unsigned offsetOfEntrySP() { return offsetof(AsmJSActivation, entrySP_); }
     static unsigned offsetOfFP() { return offsetof(AsmJSActivation, fp_); }
-    static unsigned offsetOfExitReason() { return offsetof(AsmJSActivation, exitReason_); }
+    static unsigned offsetOfPackedExitReason() { return offsetof(AsmJSActivation, packedExitReason_); }
 
     // Read/written from SIGSEGV handler:
     void setResumePC(void* pc) { resumePC_ = pc; }
     void* resumePC() const { return resumePC_; }
 };
 
 // A FrameIter walks over the runtime's stack of JS script activations,
 // abstracting over whether the JS scripts were running in the interpreter or