Bug 1175556 - ARM64: Land miscellaneous changes. r=evilpie
☠☠ backed out by 5ddf0a252b08 ☠ ☠
authorSean Stangl <sstangl@mozilla.com>
Thu, 25 Jun 2015 11:37:22 -0700
changeset 250258 efe7247291788d38440ca378ff148131e15b0314
parent 250257 cbe9a2aea9541146ae0a11051ed32d31eaf4e428
child 250259 a5cd9f18f1b6173844f26da8780635c1683f80c4
push id28951
push usercbook@mozilla.com
push dateFri, 26 Jun 2015 11:19:38 +0000
treeherdermozilla-central@56e207dbb3bd [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersevilpie
bugs1175556
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1175556 - ARM64: Land miscellaneous changes. r=evilpie
js/src/asmjs/AsmJSFrameIterator.cpp
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/asmjs/AsmJSValidate.cpp
js/src/builtin/TestingFunctions.cpp
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/jit/AtomicOperations-inl.h
js/src/jit/CodeGenerator.h
js/src/jit/IonCaches.h
js/src/jit/JitCommon.h
js/src/jit/JitFrames.cpp
js/src/jit/LIR.h
js/src/jit/LOpcodes.h
js/src/jit/Lowering.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/MoveEmitter.h
js/src/jit/RegisterAllocator.h
js/src/jit/Registers.h
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/arm64/vixl/Assembler-vixl.h
js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
js/src/jit/arm64/vixl/Simulator-vixl.h
js/src/jit/mips/Simulator-mips.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-shared-inl.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
js/src/jit/shared/Lowering-shared-inl.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
--- a/js/src/asmjs/AsmJSFrameIterator.cpp
+++ b/js/src/asmjs/AsmJSFrameIterator.cpp
@@ -126,16 +126,21 @@ static const unsigned PostStorePrePopFP 
 # endif
 static const unsigned PushedFP = 8;
 static const unsigned StoredFP = 11;
 #elif defined(JS_CODEGEN_ARM)
 static const unsigned PushedRetAddr = 4;
 static const unsigned PushedFP = 16;
 static const unsigned StoredFP = 20;
 static const unsigned PostStorePrePopFP = 4;
+#elif defined(JS_CODEGEN_ARM64)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 0;
+static const unsigned StoredFP = 0;
+static const unsigned PostStorePrePopFP = 0;
 #elif defined(JS_CODEGEN_MIPS)
 static const unsigned PushedRetAddr = 8;
 static const unsigned PushedFP = 24;
 static const unsigned StoredFP = 28;
 static const unsigned PostStorePrePopFP = 4;
 #elif defined(JS_CODEGEN_NONE)
 # if defined(DEBUG)
 static const unsigned PushedRetAddr = 0;
@@ -211,17 +216,17 @@ GenerateProfilingPrologue(MacroAssembler
 }
 
 // Generate the inverse of GenerateProfilingPrologue.
 static void
 GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, AsmJSExit::Reason reason,
                           Label* profilingReturn)
 {
     Register scratch = ABIArgGenerator::NonReturn_VolatileReg0;
-#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS)
     Register scratch2 = ABIArgGenerator::NonReturn_VolatileReg1;
 #endif
 
     if (framePushed)
         masm.addToStackPtr(Imm32(framePushed));
 
     masm.loadAsmJSActivation(scratch);
 
@@ -235,21 +240,21 @@ GenerateProfilingEpilogue(MacroAssembler
 #if defined(JS_CODEGEN_ARM)
         AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
 #endif
 
         // sp protects the stack from clobber via asynchronous signal handlers
         // and the async interrupt exit. Since activation.fp can be read at any
         // time and still points to the current frame, be careful to only update
         // sp after activation.fp has been repointed to the caller's frame.
-#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS)
         masm.loadPtr(Address(masm.getStackPointer(), 0), scratch2);
         masm.storePtr(scratch2, Address(scratch, AsmJSActivation::offsetOfFP()));
         DebugOnly<uint32_t> prePop = masm.currentOffset();
-        masm.add32(Imm32(4), masm.getStackPointer());
+        masm.addToStackPtr(Imm32(sizeof(void *)));
         MOZ_ASSERT(PostStorePrePopFP == masm.currentOffset() - prePop);
 #else
         masm.pop(Address(scratch, AsmJSActivation::offsetOfFP()));
         MOZ_ASSERT(PostStorePrePopFP == 0);
 #endif
 
         masm.bind(profilingReturn);
         masm.ret();
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -1782,16 +1782,19 @@ AsmJSModule::setProfilingEnabled(bool en
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
         void* callee = X86Encoding::GetRel32Target(callerRetAddr);
 #elif defined(JS_CODEGEN_ARM)
         uint8_t* caller = callerRetAddr - 4;
         Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
         BOffImm calleeOffset;
         callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
         void* callee = calleeOffset.getDest(callerInsn);
+#elif defined(JS_CODEGEN_ARM64)
+        MOZ_CRASH();
+        void* callee = nullptr;
 #elif defined(JS_CODEGEN_MIPS)
         Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t));
         void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next());
 #elif defined(JS_CODEGEN_NONE)
         MOZ_CRASH();
         void* callee = nullptr;
 #else
 # error "Missing architecture"
@@ -1806,16 +1809,18 @@ AsmJSModule::setProfilingEnabled(bool en
         MOZ_ASSERT_IF(profilingEnabled_, callee == profilingEntry);
         MOZ_ASSERT_IF(!profilingEnabled_, callee == entry);
         uint8_t* newCallee = enabled ? profilingEntry : entry;
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
         X86Encoding::SetRel32(callerRetAddr, newCallee);
 #elif defined(JS_CODEGEN_ARM)
         new (caller) InstBLImm(BOffImm(newCallee - caller), Assembler::Always);
+#elif defined(JS_CODEGEN_ARM64)
+        MOZ_CRASH();
 #elif defined(JS_CODEGEN_MIPS)
         Assembler::WriteLuiOriInstructions(instr, instr->next(),
                                            ScratchRegister, (uint32_t)newCallee);
         instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
 #elif defined(JS_CODEGEN_NONE)
         MOZ_CRASH();
 #else
 # error "Missing architecture"
@@ -1869,16 +1874,18 @@ AsmJSModule::setProfilingEnabled(bool en
 #elif defined(JS_CODEGEN_ARM)
         if (enabled) {
             MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstNOP>());
             new (jump) InstBImm(BOffImm(profilingEpilogue - jump), Assembler::Always);
         } else {
             MOZ_ASSERT(reinterpret_cast<Instruction*>(jump)->is<InstBImm>());
             new (jump) InstNOP();
         }
+#elif defined(JS_CODEGEN_ARM64)
+        MOZ_CRASH();
 #elif defined(JS_CODEGEN_MIPS)
         Instruction* instr = (Instruction*)jump;
         if (enabled) {
             Assembler::WriteLuiOriInstructions(instr, instr->next(),
                                                ScratchRegister, (uint32_t)profilingEpilogue);
             instr[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr);
         } else {
             instr[0].makeNop();
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -1159,18 +1159,18 @@ static bool
 RedirectJitCodeToInterruptCheck(JSRuntime* rt, CONTEXT* context)
 {
     RedirectIonBackedgesToInterruptCheck(rt);
 
     if (AsmJSActivation* activation = rt->asmJSActivationStack()) {
         const AsmJSModule& module = activation->module();
 
 #ifdef JS_SIMULATOR
-        if (module.containsFunctionPC((void*)rt->simulator()->get_pc()))
-            rt->simulator()->set_resume_pc(int32_t(module.interruptExit()));
+        if (module.containsFunctionPC(rt->simulator()->get_pc_as<void*>()))
+            rt->simulator()->set_resume_pc(module.interruptExit());
 #endif
 
         uint8_t** ppc = ContextToPC(context);
         uint8_t* pc = *ppc;
         if (module.containsFunctionPC(pc)) {
             activation->setResumePC(pc);
             *ppc = module.interruptExit();
             return true;
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -9075,17 +9075,18 @@ GenerateAsyncInterruptExit(ModuleCompile
     masm.transferReg(r8);
     masm.transferReg(r9);
     masm.transferReg(r10);
     masm.transferReg(r11);
     masm.transferReg(r12);
     masm.transferReg(lr);
     masm.finishDataTransfer();
     masm.ret();
-
+#elif defined(JS_CODEGEN_ARM64)
+    MOZ_CRASH();
 #elif defined (JS_CODEGEN_NONE)
     MOZ_CRASH();
 #else
 # error "Unknown architecture!"
 #endif
 
     return m.finishGeneratingInlineStub(&m.asyncInterruptLabel()) && !masm.oom();
 }
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -107,16 +107,24 @@ GetBuildConfiguration(JSContext* cx, uns
 #ifdef JS_SIMULATOR_ARM
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "arm-simulator", value))
         return false;
 
+#ifdef JS_SIMULATOR_ARM64
+    value = BooleanValue(true);
+#else
+    value = BooleanValue(false);
+#endif
+    if (!JS_SetProperty(cx, info, "arm64-simulator", value))
+        return false;
+
 #ifdef MOZ_ASAN
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "asan", value))
         return false;
 
--- a/js/src/irregexp/NativeRegExpMacroAssembler.cpp
+++ b/js/src/irregexp/NativeRegExpMacroAssembler.cpp
@@ -115,16 +115,22 @@ NativeRegExpMacroAssembler::GenerateCode
         num_registers_++;
 
     Label return_temp0;
 
     // Finalize code - write the entry point code now we know how many
     // registers we need.
     masm.bind(&entry_label_);
 
+#ifdef JS_CODEGEN_ARM64
+    // ARM64 communicates stack address via sp, but uses a pseudo-sp for addressing.
+    MOZ_ASSERT(!masm.GetStackPointer64().Is(sp));
+    masm.moveStackPtrTo(masm.getStackPointer());
+#endif
+
     // Push non-volatile registers which might be modified by jitcode.
     size_t pushedNonVolatileRegisters = 0;
     for (GeneralRegisterForwardIterator iter(savedNonVolatileRegisters); iter.more(); ++iter) {
         masm.Push(*iter);
         pushedNonVolatileRegisters++;
     }
 
 #ifndef JS_CODEGEN_X86
@@ -382,17 +388,17 @@ NativeRegExpMacroAssembler::GenerateCode
         masm.bind(&stack_overflow_label_);
 
         Label grow_failed;
 
         masm.movePtr(ImmPtr(runtime), temp1);
 
         // Save registers before calling C function
         LiveGeneralRegisterSet volatileRegs(GeneralRegisterSet::Volatile());
-#if defined(JS_CODEGEN_ARM)
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
         volatileRegs.add(Register::FromCode(Registers::lr));
 #elif defined(JS_CODEGEN_MIPS)
         volatileRegs.add(Register::FromCode(Registers::ra));
 #endif
         volatileRegs.takeUnchecked(temp0);
         volatileRegs.takeUnchecked(temp1);
         masm.PushRegsInMask(volatileRegs);
 
--- a/js/src/jit/AtomicOperations-inl.h
+++ b/js/src/jit/AtomicOperations-inl.h
@@ -4,19 +4,21 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_AtomicOperations_inl_h
 #define jit_AtomicOperations_inl_h
 
 #if defined(JS_CODEGEN_ARM)
 # include "jit/arm/AtomicOperations-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/AtomicOperations-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/AtomicOperations-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/AtomicOperations-none.h"
 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
 # include "jit/x86-shared/AtomicOperations-x86-shared.h"
 #else
 # error "Atomic operations must be defined for this platform"
 #endif
 
-#endif //  jit_AtomicOperations_inl_h
+#endif // jit_AtomicOperations_inl_h
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -13,16 +13,18 @@
 #endif
 
 #if defined(JS_CODEGEN_X86)
 # include "jit/x86/CodeGenerator-x86.h"
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/CodeGenerator-x64.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/CodeGenerator-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/CodeGenerator-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/CodeGenerator-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/CodeGenerator-none.h"
 #else
 #error "Unknown architecture!"
 #endif
 
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -4,16 +4,18 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_IonCaches_h
 #define jit_IonCaches_h
 
 #if defined(JS_CODEGEN_ARM)
 # include "jit/arm/Assembler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Assembler-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/Assembler-mips.h"
 #endif
 #include "jit/Registers.h"
 #include "jit/shared/Assembler-shared.h"
 #include "vm/TypedArrayCommon.h"
 
 namespace js {
--- a/js/src/jit/JitCommon.h
+++ b/js/src/jit/JitCommon.h
@@ -6,21 +6,23 @@
 
 #ifndef jit_JitCommon_h
 #define jit_JitCommon_h
 
 // Various macros used by all JITs.
 
 #if defined(JS_SIMULATOR_ARM)
 #include "jit/arm/Simulator-arm.h"
+#elif defined(JS_SIMULATOR_ARM64)
+# include "jit/arm64/vixl/Simulator-vixl.h"
 #elif defined(JS_SIMULATOR_MIPS)
 #include "jit/mips/Simulator-mips.h"
 #endif
 
-#if defined(JS_SIMULATOR_ARM) || defined(JS_SIMULATOR_MIPS)
+#ifdef JS_SIMULATOR
 // Call into cross-jitted code by following the ABI of the simulated architecture.
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7)     \
     (js::jit::Simulator::Current()->call(                              \
         JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 8, p0, p1, p2, p3, p4, p5, p6, p7) & 0xffffffff)
 
 #define CALL_GENERATED_1(entry, p0)                     \
     (js::jit::Simulator::Current()->call(               \
         JS_FUNC_TO_DATA_PTR(uint8_t*, entry), 1, p0) & 0xffffffff)
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -2588,16 +2588,22 @@ MachineState::FromBailout(RegisterDump::
     }
 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Int32x4), &fpregs[i]);
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Float32x4), &fpregs[i]);
     }
+#elif defined(JS_CODEGEN_ARM64)
+    for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
+        machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
+        machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
+    }
+
 #elif defined(JS_CODEGEN_NONE)
     MOZ_CRASH();
 #else
 # error "Unknown architecture!"
 #endif
     return machine;
 }
 
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -1820,16 +1820,18 @@ LAllocation::toRegister() const
 # if defined(JS_CODEGEN_X86)
 #  include "jit/x86/LIR-x86.h"
 # elif defined(JS_CODEGEN_X64)
 #  include "jit/x64/LIR-x64.h"
 # endif
 # include "jit/x86-shared/LIR-x86-shared.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/LIR-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/LIR-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/LIR-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/LIR-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -354,16 +354,18 @@
     _(ArrowNewTarget)
 
 #if defined(JS_CODEGEN_X86)
 # include "jit/x86/LOpcodes-x86.h"
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/LOpcodes-x64.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/LOpcodes-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/LOpcodes-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/LOpcodes-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/LOpcodes-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -12,16 +12,18 @@
 
 #include "jit/LIR.h"
 #if defined(JS_CODEGEN_X86)
 # include "jit/x86/Lowering-x86.h"
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/Lowering-x64.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/Lowering-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Lowering-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/Lowering-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/Lowering-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -1589,33 +1589,32 @@ MacroAssembler::generateBailoutTail(Regi
     bind(&baseline);
     {
         // Prepare a register set for use in this case.
         AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
         MOZ_ASSERT(!regs.has(BaselineStackReg));
         regs.take(bailoutInfo);
 
         // Reset SP to the point where clobbering starts.
-        loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
-                BaselineStackReg);
+        loadStackPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
 
         Register copyCur = regs.takeAny();
         Register copyEnd = regs.takeAny();
         Register temp = regs.takeAny();
 
         // Copy data onto stack.
         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
         {
             Label copyLoop;
             Label endOfCopy;
             bind(&copyLoop);
             branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
             subPtr(Imm32(4), copyCur);
-            subPtr(Imm32(4), BaselineStackReg);
+            subFromStackPtr(Imm32(4));
             load32(Address(copyCur, 0), temp);
             store32(temp, Address(BaselineStackReg, 0));
             jump(&copyLoop);
             bind(&endOfCopy);
         }
 
         // Enter exit frame for the FinishBailoutToBaseline call.
         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
@@ -2505,19 +2504,22 @@ MacroAssembler::MacroAssembler(JSContext
                                JSScript* script, jsbytecode* pc)
   : emitProfilingInstrumentation_(false),
     framePushed_(0)
 {
     constructRoot(cx);
     jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
     alloc_.emplace(cx);
     moveResolver_.setAllocator(*jitContext_->temp);
-#ifdef JS_CODEGEN_ARM
+#if defined(JS_CODEGEN_ARM)
     initWithAllocator();
     m_buffer.id = GetJitContext()->getNextAssemblerId();
+#elif defined(JS_CODEGEN_ARM64)
+    initWithAllocator();
+    armbuffer_.id = GetJitContext()->getNextAssemblerId();
 #endif
     if (ion) {
         setFramePushed(ion->frameSize());
         if (pc && cx->runtime()->spsProfiler.enabled())
             emitProfilingInstrumentation_ = true;
     }
 }
 
@@ -2702,17 +2704,17 @@ MacroAssembler::adjustStack(int amount)
         reserveStack(-amount);
 }
 
 void
 MacroAssembler::freeStack(uint32_t amount)
 {
     MOZ_ASSERT(amount <= framePushed_);
     if (amount)
-        addPtr(Imm32(amount), StackPointer);
+        addToStackPtr(Imm32(amount));
     framePushed_ -= amount;
 }
 
 void
 MacroAssembler::freeStack(Register amount)
 {
-    addPtr(amount, StackPointer);
+    addToStackPtr(amount);
 }
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -12,16 +12,18 @@
 #include "jscompartment.h"
 
 #if defined(JS_CODEGEN_X86)
 # include "jit/x86/MacroAssembler-x86.h"
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/MacroAssembler-x64.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/MacroAssembler-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MacroAssembler-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/MacroAssembler-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/MacroAssembler-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 #include "jit/AtomicOp.h"
@@ -39,16 +41,18 @@
 # define PER_ARCH
 
 #if defined(JS_CODEGEN_X86)
 # define ONLY_X86_X64
 #elif defined(JS_CODEGEN_X64)
 # define ONLY_X86_X64
 #elif defined(JS_CODEGEN_ARM)
 # define ONLY_X86_X64 = delete
+#elif defined(JS_CODEGEN_ARM64)
+# define ONLY_X86_X64 = delete
 #elif defined(JS_CODEGEN_MIPS)
 # define ONLY_X86_X64 = delete
 #elif defined(JS_CODEGEN_NONE)
 # define ONLY_X86_X64 = delete
 #else
 # error "Unknown architecture!"
 #endif
 
@@ -228,36 +232,43 @@ class MacroAssembler : public MacroAssem
             constructRoot(cx);
 
         if (!jcx->temp) {
             MOZ_ASSERT(cx);
             alloc_.emplace(cx);
         }
 
         moveResolver_.setAllocator(*jcx->temp);
-#ifdef JS_CODEGEN_ARM
+
+#if defined(JS_CODEGEN_ARM)
         initWithAllocator();
         m_buffer.id = jcx->getNextAssemblerId();
+#elif defined(JS_CODEGEN_ARM64)
+        initWithAllocator();
+        armbuffer_.id = jcx->getNextAssemblerId();
 #endif
     }
 
     // This constructor should only be used when there is no JitContext active
     // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
     explicit MacroAssembler(JSContext* cx, IonScript* ion = nullptr,
                             JSScript* script = nullptr, jsbytecode* pc = nullptr);
 
     // asm.js compilation handles its own JitContext-pushing
     struct AsmJSToken {};
     explicit MacroAssembler(AsmJSToken)
       : emitProfilingInstrumentation_(false),
         framePushed_(0)
     {
-#ifdef JS_CODEGEN_ARM
+#if defined(JS_CODEGEN_ARM)
         initWithAllocator();
         m_buffer.id = 0;
+#elif defined(JS_CODEGEN_ARM64)
+        initWithAllocator();
+        armbuffer_.id = 0;
 #endif
     }
 
     void enableProfilingInstrumentation() {
         emitProfilingInstrumentation_ = true;
     }
 
     void resetForNewCodeGenerator(TempAllocator& alloc);
@@ -567,17 +578,17 @@ class MacroAssembler : public MacroAssem
                 mov(JSReturnReg_Type, dest.typeReg());
             }
         } else {
             mov(JSReturnReg_Type, dest.typeReg());
             mov(JSReturnReg_Data, dest.payloadReg());
         }
 #elif defined(JS_PUNBOX64)
         if (dest.valueReg() != JSReturnReg)
-            movq(JSReturnReg, dest.valueReg());
+            mov(JSReturnReg, dest.valueReg());
 #else
 #error "Bad architecture"
 #endif
     }
 
     void storeCallResultValue(TypedOrValueRegister dest) {
         if (dest.hasValue())
             storeCallResultValue(dest.valueReg());
--- a/js/src/jit/MoveEmitter.h
+++ b/js/src/jit/MoveEmitter.h
@@ -6,16 +6,18 @@
 
 #ifndef jit_MoveEmitter_h
 #define jit_MoveEmitter_h
 
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
 # include "jit/x86-shared/MoveEmitter-x86-shared.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/MoveEmitter-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/MoveEmitter-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/MoveEmitter-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/MoveEmitter-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 
--- a/js/src/jit/RegisterAllocator.h
+++ b/js/src/jit/RegisterAllocator.h
@@ -273,16 +273,20 @@ class RegisterAllocator
         allRegisters_(RegisterSet::All())
     {
         if (mir->compilingAsmJS()) {
 #if defined(JS_CODEGEN_X64)
             allRegisters_.take(AnyRegister(HeapReg));
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
             allRegisters_.take(AnyRegister(HeapReg));
             allRegisters_.take(AnyRegister(GlobalReg));
+#elif defined(JS_CODEGEN_ARM64)
+            allRegisters_.take(AnyRegister(HeapReg));
+            allRegisters_.take(AnyRegister(HeapLenReg));
+            allRegisters_.take(AnyRegister(GlobalReg));
 #endif
         } else {
             if (FramePointer != InvalidReg && mir->instrumentedProfiling())
                 allRegisters_.take(AnyRegister(FramePointer));
         }
     }
 
     bool init();
--- a/js/src/jit/Registers.h
+++ b/js/src/jit/Registers.h
@@ -9,16 +9,18 @@
 
 #include "mozilla/Array.h"
 
 #include "jit/IonTypes.h"
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
 # include "jit/x86-shared/Architecture-x86-shared.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/Architecture-arm.h"
+#elif defined(JS_CODEGEN_ARM64)
+# include "jit/arm64/Architecture-arm64.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/Architecture-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/Architecture-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 
@@ -37,18 +39,17 @@ struct Register {
         Register r = { Encoding(i) };
         return r;
     }
     static Register FromName(const char* name) {
         Code code = Registers::FromName(name);
         Register r = { Encoding(code) };
         return r;
     }
-    Code code() const {
-        MOZ_ASSERT(Code(reg_) < Registers::Total);
+    MOZ_CONSTEXPR Code code() const {
         return Code(reg_);
     }
     Encoding encoding() const {
         MOZ_ASSERT(Code(reg_) < Registers::Total);
         return reg_;
     }
     const char* name() const {
         return Registers::GetName(code());
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -156,18 +156,21 @@ class Simulator
     int get_sinteger_from_s_register(int sreg) {
         return getFromVFPRegister<int, 1>(sreg);
     }
 
     // Special case of set_register and get_register to access the raw PC value.
     void set_pc(int32_t value);
     int32_t get_pc() const;
 
-    void set_resume_pc(int32_t value) {
-        resume_pc_ = value;
+    template <typename T>
+    T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
+
+    void set_resume_pc(void* value) {
+        resume_pc_ = int32_t(value);
     }
 
     void enable_single_stepping(SingleStepCallback cb, void* arg);
     void disable_single_stepping();
 
     uintptr_t stackLimit() const;
     bool overRecursed(uintptr_t newsp = 0) const;
     bool overRecursedWithExtra(uint32_t extra) const;
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -318,17 +318,17 @@ Assembler::addPatchableJump(BufferOffset
         addJumpRelocation(src, reloc);
 
     size_t extendedTableIndex = pendingJumps_.length();
     enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc));
     return extendedTableIndex;
 }
 
 void
-PatchJump(CodeLocationJump& jump_, CodeLocationLabel label)
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
 {
     MOZ_CRASH("PatchJump");
 }
 
 void
 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                    PatchedImmPtr expected)
 {
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -548,20 +548,20 @@ GetTempRegForIntArg(uint32_t usedIntArgs
     // Unfortunately, we have to assume things about the point at which
     // GetIntArgReg returns false, because we need to know how many registers it
     // can allocate.
     usedIntArgs -= NumIntArgRegs;
     if (usedIntArgs >= NumCallTempNonArgRegs)
         return false;
     *out = CallTempNonArgRegs[usedIntArgs];
     return true;
-
 }
 
-void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label);
+void PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
+               ReprotectCode reprotect = DontReprotect);
 
 static inline void
 PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
 {
     PatchJump(jump_, label);
 }
 
 // Forbids pool generation during a specified interval. Not nestable.
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -1,17 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/arm64/MacroAssembler-arm64.h"
 
-// TODO #include "jit/arm64/MoveEmitter-arm64.h"
+#include "jit/arm64/MoveEmitter-arm64.h"
 #include "jit/arm64/SharedICRegisters-arm64.h"
 #include "jit/Bailouts.h"
 #include "jit/BaselineFrame.h"
 #include "jit/MacroAssembler.h"
 
 namespace js {
 namespace jit {
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -174,19 +174,16 @@ class MacroAssemblerCompat : public vixl
             vixl::MacroAssembler::Push(vixl::xzr);
         } else {
             vixl::UseScratchRegisterScope temps(this);
             const ARMRegister scratch64 = temps.AcquireX();
             movePtr(imm, scratch64.asUnsized());
             vixl::MacroAssembler::Push(scratch64);
         }
     }
-    void push(ImmMaybeNurseryPtr imm) {
-        push(noteMaybeNurseryPtr(imm));
-    }
     void push(ARMRegister reg) {
         vixl::MacroAssembler::Push(reg);
     }
     void push(Address a) {
         vixl::UseScratchRegisterScope temps(this);
         const ARMRegister scratch64 = temps.AcquireX();
         MOZ_ASSERT(a.base != scratch64.asUnsized());
         loadPtr(a, scratch64.asUnsized());
@@ -804,23 +801,29 @@ class MacroAssemblerCompat : public vixl
     void movePtr(AsmJSImmPtr imm, Register dest) {
         BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest);
         append(AsmJSAbsoluteLink(CodeOffsetLabel(off.getOffset()), imm.kind()));
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest);
         writeDataRelocation(imm, load);
     }
-    void movePtr(ImmMaybeNurseryPtr imm, Register dest) {
-        movePtr(noteMaybeNurseryPtr(imm), dest);
-    }
 
     void mov(ImmWord imm, Register dest) {
         movePtr(imm, dest);
     }
+    void mov(ImmPtr imm, Register dest) {
+        movePtr(imm, dest);
+    }
+    void mov(AsmJSImmPtr imm, Register dest) {
+        movePtr(imm, dest);
+    }
+    void mov(Register src, Register dest) {
+        movePtr(src, dest);
+    }
 
     void move32(Imm32 imm, Register dest) {
         Mov(ARMRegister(dest, 32), (int64_t)imm.value);
     }
     void move32(Register src, Register dest) {
         Mov(ARMRegister(dest, 32), ARMRegister(src, 32));
     }
 
@@ -1213,19 +1216,16 @@ class MacroAssemblerCompat : public vixl
     }
     void cmpPtr(Register lhs, ImmGCPtr rhs) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
         MOZ_ASSERT(scratch != lhs);
         movePtr(rhs, scratch);
         cmpPtr(lhs, scratch);
     }
-    void cmpPtr(Register lhs, ImmMaybeNurseryPtr rhs) {
-        cmpPtr(lhs, noteMaybeNurseryPtr(rhs));
-    }
 
     void cmpPtr(const Address& lhs, Register rhs) {
         vixl::UseScratchRegisterScope temps(this);
         const ARMRegister scratch64 = temps.AcquireX();
         MOZ_ASSERT(scratch64.asUnsized() != lhs.base);
         MOZ_ASSERT(scratch64.asUnsized() != rhs);
         Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset));
         Cmp(scratch64, Operand(ARMRegister(rhs, 64)));
@@ -1789,19 +1789,16 @@ class MacroAssemblerCompat : public vixl
         MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
 
         movePtr(ptr, scratch1_64.asUnsized());
         loadPtr(lhs, scratch2_64.asUnsized());
         cmp(scratch2_64, scratch1_64);
         B(cond, label);
 
     }
-    void branchPtr(Condition cond, Address lhs, ImmMaybeNurseryPtr ptr, Label* label) {
-        branchPtr(cond, lhs, noteMaybeNurseryPtr(ptr), label);
-    }
     void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) {
         Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
         B(label, cond);
     }
     void branchPtr(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         vixl::UseScratchRegisterScope temps(this);
         const Register scratch = temps.AcquireX().asUnsized();
         MOZ_ASSERT(scratch != rhs);
--- a/js/src/jit/arm64/vixl/Assembler-vixl.h
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.h
@@ -210,16 +210,22 @@ class Register : public CPURegister {
     : CPURegister(r.code(), size, kRegister) {
   }
 
   bool IsValid() const {
     VIXL_ASSERT(IsRegister() || IsNone());
     return IsValidRegister();
   }
 
+  js::jit::Register asUnsized() const {
+    if (code_ == kSPRegInternalCode)
+      return js::jit::Register::FromCode((js::jit::Register::Code)kZeroRegCode);
+    return js::jit::Register::FromCode((js::jit::Register::Code)code_);
+  }
+
   static const Register& WRegFromCode(unsigned code);
   static const Register& XRegFromCode(unsigned code);
 
   // V8 compatibility.
   static const int kNumRegisters = kNumberOfRegisters;
   static const int kNumAllocatableRegisters = kNumberOfRegisters - 1;
 
  private:
@@ -492,16 +498,29 @@ class Operand {
           Shift shift = LSL,
           unsigned shift_amount = 0);   // NOLINT(runtime/explicit)
 
   // rm, {<extend> {#<shift_amount>}}
   // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
   //       <shift_amount> is uint2_t.
   explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
 
+  // FIXME: Temporary constructors for compilation.
+  // FIXME: These should be removed -- Operand should not leak into shared code.
+  // FIXME: Something like an LAllocationUnion for {gpreg, fpreg, Address} is wanted.
+  explicit Operand(js::jit::Register) {
+    MOZ_CRASH("Operand with Register");
+  }
+  explicit Operand(js::jit::FloatRegister) {
+    MOZ_CRASH("Operand with FloatRegister");
+  }
+  explicit Operand(js::jit::Register, int32_t) {
+    MOZ_CRASH("Operand with implicit Address");
+  }
+
   bool IsImmediate() const;
   bool IsShiftedRegister() const;
   bool IsExtendedRegister() const;
   bool IsZero() const;
 
   // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
   // which helps in the encoding of instructions that use the stack pointer.
   Operand ToExtendedRegister() const;
@@ -560,18 +579,20 @@ class MemOperand {
                       Register regoffset,
                       Extend extend,
                       unsigned shift_amount = 0);
   explicit MemOperand(Register base,
                       const Operand& offset,
                       AddrMode addrmode = Offset);
 
   // Adapter constructors using C++11 delegating.
+  // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
   explicit MemOperand(js::jit::Address addr)
-    : MemOperand(Register(addr.base, 64), (ptrdiff_t)addr.offset) {
+    : MemOperand(addr.base.code() == 31 ? sp : Register(addr.base, 64),
+		 (ptrdiff_t)addr.offset) {
   }
 
   const Register& base() const {
     return base_;
   }
   const Register& regoffset() const {
     return regoffset_;
   }
@@ -668,16 +689,39 @@ class Assembler : public MozBaseAssemble
   // In order to avoid any accidental transfer of state, Reset ASSERTs that the
   // constant pool is not blocked.
   void Reset();
 
   // Finalize a code buffer of generated instructions. This function must be
   // called before executing or copying code from the buffer.
   void FinalizeCode();
 
+#define COPYENUM(v) static const Condition v = vixl::v
+#define COPYENUM_(v) static const Condition v = vixl::v##_
+  COPYENUM(Equal);
+  COPYENUM(Zero);
+  COPYENUM(NotEqual);
+  COPYENUM(NonZero);
+  COPYENUM(AboveOrEqual);
+  COPYENUM(Below);
+  COPYENUM(Signed);
+  COPYENUM(NotSigned);
+  COPYENUM(Overflow);
+  COPYENUM(NoOverflow);
+  COPYENUM(Above);
+  COPYENUM(BelowOrEqual);
+  COPYENUM_(GreaterThanOrEqual);
+  COPYENUM_(LessThan);
+  COPYENUM_(GreaterThan);
+  COPYENUM_(LessThanOrEqual);
+  COPYENUM(Always);
+  COPYENUM(Never);
+#undef COPYENUM
+#undef COPYENUM_
+
   // Bit set when a DoubleCondition does not map to a single ARM condition.
   // The MacroAssembler must special-case these conditions, or else
   // ConditionFromDoubleCondition will complain.
   static const int DoubleConditionBitSpecial = 0x100;
 
   enum DoubleCondition {
     DoubleOrdered                        = Condition::vc,
     DoubleEqual                          = Condition::eq,
--- a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
@@ -1199,17 +1199,17 @@ void MacroAssembler::PrintfNoPreserve(co
   // directly in the instruction stream. It might be cleaner to encode it in a
   // literal pool, but since Printf is usually used for debugging, it is
   // beneficial for it to be minimally dependent on other features.
   temps.Exclude(x0);
   Label format_address;
   Adr(x0, &format_address);
 
   // Emit the format string directly in the instruction stream.
-  { 
+  {
     flushBuffer();
     Label after_data;
     B(&after_data);
     Bind(&format_address);
     EmitStringData(format);
     Unreachable();
     Bind(&after_data);
   }
--- a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
@@ -56,26 +56,26 @@ void Assembler::FinalizeCode() {
 // The offset is calculated by aligning the PC and label addresses down to a
 // multiple of element_size, then calculating the (scaled) offset between them.
 // This matches the semantics of adrp, for example.
 template <int element_size>
 ptrdiff_t Assembler::LinkAndGetOffsetTo(BufferOffset branch, Label* label) {
   if (armbuffer_.oom())
     return js::jit::LabelBase::INVALID_OFFSET;
 
-  // The label is bound: all uses are already linked.
   if (label->bound()) {
+    // The label is bound: all uses are already linked.
     ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() / element_size);
     ptrdiff_t label_offset = ptrdiff_t(label->offset() / element_size);
     return label_offset - branch_offset;
   }
 
-  // The label is unbound and unused: store the offset in the label itself
-  // for patching by bind().
   if (!label->used()) {
+    // The label is unbound and unused: store the offset in the label itself
+    // for patching by bind().
     label->use(branch.getOffset());
     return js::jit::LabelBase::INVALID_OFFSET;
   }
 
   // The label is unbound but used. Create an implicit linked list between
   // the branches, and update the linked list head in the label struct.
   ptrdiff_t prevHeadOffset = static_cast<ptrdiff_t>(label->offset());
   label->use(branch.getOffset());
@@ -115,29 +115,29 @@ BufferOffset Assembler::b(int imm19, Con
 
 
 void Assembler::b(Instruction* at, int imm19, Condition cond) {
   EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
 }
 
 
 BufferOffset Assembler::b(Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0);
   Instruction* ins = getInstructionAt(branch);
   VIXL_ASSERT(ins->IsUncondBranchImm());
 
   // Encode the relative offset.
   b(ins, LinkAndGetInstructionOffsetTo(branch, label));
   return branch;
 }
 
 
 BufferOffset Assembler::b(Label* label, Condition cond) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0, Always);
   Instruction* ins = getInstructionAt(branch);
   VIXL_ASSERT(ins->IsCondBranchImm());
 
   // Encode the relative offset.
   b(ins, LinkAndGetInstructionOffsetTo(branch, label), cond);
   return branch;
 }
@@ -149,17 +149,17 @@ void Assembler::bl(int imm26) {
 
 
 void Assembler::bl(Instruction* at, int imm26) {
   EmitBranch(at, BL | ImmUncondBranch(imm26));
 }
 
 
 void Assembler::bl(Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0);
   Instruction* ins = getInstructionAt(branch);
 
   // Encode the relative offset.
   bl(ins, LinkAndGetInstructionOffsetTo(branch, label));
 }
 
 
@@ -169,17 +169,17 @@ void Assembler::cbz(const Register& rt, 
 
 
 void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
   EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbz(const Register& rt, Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0);
   Instruction* ins = getInstructionAt(branch);
 
   // Encode the relative offset.
   cbz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label));
 }
 
 
@@ -189,17 +189,17 @@ void Assembler::cbnz(const Register& rt,
 
 
 void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
   EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
 }
 
 
 void Assembler::cbnz(const Register& rt, Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0);
   Instruction* ins = getInstructionAt(branch);
 
   // Encode the relative offset.
   cbnz(ins, rt, LinkAndGetInstructionOffsetTo(branch, label));
 }
 
 
@@ -211,17 +211,17 @@ void Assembler::tbz(const Register& rt, 
 
 void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
   EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
 
 void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0);
   Instruction* ins = getInstructionAt(branch);
 
   // Encode the relative offset.
   tbz(ins, rt, bit_pos, LinkAndGetInstructionOffsetTo(branch, label));
 }
 
 
@@ -233,17 +233,17 @@ void Assembler::tbnz(const Register& rt,
 
 void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
   VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
   EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
 
 void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset branch = b(0);
   Instruction* ins = getInstructionAt(branch);
 
   // Encode the relative offset.
   tbnz(ins, rt, bit_pos, LinkAndGetInstructionOffsetTo(branch, label));
 }
 
 
@@ -255,18 +255,18 @@ void Assembler::adr(const Register& rd, 
 
 void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
   VIXL_ASSERT(rd.Is64Bits());
   EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
 
 void Assembler::adr(const Register& rd, Label* label) {
-  // Flush the instruction buffer before calculating relative offset.
-  // ADR is not a branch.
+  // Flush the instruction buffer if necessary before getting an offset.
+  // Note that ADR is not a branch, but it encodes an offset like a branch.
   BufferOffset offset = Emit(0);
   Instruction* ins = getInstructionAt(offset);
 
   // Encode the relative offset.
   adr(ins, rd, LinkAndGetByteOffsetTo(offset, label));
 }
 
 
@@ -280,16 +280,17 @@ void Assembler::adrp(Instruction* at, co
   VIXL_ASSERT(rd.Is64Bits());
   EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
 
 void Assembler::adrp(const Register& rd, Label* label) {
   VIXL_ASSERT(AllowPageOffsetDependentCode());
 
+  // Flush the instruction buffer if necessary before getting an offset.
   BufferOffset offset = Emit(0);
   Instruction* ins = getInstructionAt(offset);
 
   // Encode the relative offset.
   adrp(ins, rd, LinkAndGetPageOffsetTo(offset, label));
 }
 
 
@@ -396,17 +397,18 @@ void MozBaseAssembler::InsertIndexIntoTa
 
 bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
   Instruction* load = reinterpret_cast<Instruction*>(loadAddr);
 
   // The load currently contains the js::jit::PoolEntry's index,
   // as written by InsertIndexIntoTag().
   uint32_t index = load->ImmLLiteral();
 
-  // Each entry in the literal pool is uint32_t-sized.
+  // Each entry in the literal pool is uint32_t-sized,
+  // but literals may use multiple entries.
   uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
   Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
 
   load->SetImmLLiteral(source);
   return false; // Nothing uses the return value.
 }
 
 
@@ -419,16 +421,20 @@ struct PoolHeader {
   uint32_t data;
 
   struct Header {
     // The size should take into account the pool header.
     // The size is in units of Instruction (4bytes), not byte.
     union {
       struct {
         uint32_t size : 15;
+
+	// "Natural" guards are part of the normal instruction stream,
+	// while "non-natural" guards are inserted for the sole purpose
+	// of skipping around a pool.
         bool isNatural : 1;
         uint32_t ONES : 16;
       };
       uint32_t data;
     };
 
     Header(int size_, bool isNatural_)
       : size(size_),
@@ -464,24 +470,23 @@ struct PoolHeader {
   }
 };
 
 
 void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) {
   JS_STATIC_ASSERT(sizeof(PoolHeader) == 4);
 
   // Get the total size of the pool.
-  uint8_t* pool = start + sizeof(PoolHeader) + p->getPoolSize();
+  const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
+  const uintptr_t totalPoolInstructions = totalPoolSize / sizeof(Instruction);
 
-  uintptr_t size = pool - start;
-  VIXL_ASSERT((size & 3) == 0);
-  size = size >> 2;
-  VIXL_ASSERT(size < (1 << 15));
+  VIXL_ASSERT((totalPoolSize & 0x3) == 0);
+  VIXL_ASSERT(totalPoolInstructions < (1 << 15));
 
-  PoolHeader header(size, isNatural);
+  PoolHeader header(totalPoolInstructions, isNatural);
   *(PoolHeader*)start = header;
 }
 
 
 void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) {
   return;
 }
 
--- a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -152,17 +152,17 @@ Simulator* Simulator::Create() {
   Simulator* sim = js_new<Simulator>();
   if (!sim) {
     MOZ_CRASH("NEED SIMULATOR");
     return nullptr;
   }
   sim->init(decoder, stdout);
 
   return sim;
-} 
+}
 
 
 void Simulator::Destroy(Simulator* sim) {
   js_delete(sim);
 }
 
 
 void Simulator::ExecuteInstruction() {
@@ -202,18 +202,18 @@ bool Simulator::overRecursed(uintptr_t n
 
 
 bool Simulator::overRecursedWithExtra(uint32_t extra) const {
   uintptr_t newsp = xreg(31, Reg31IsStackPointer) - extra;
   return newsp <= stackLimit();
 }
 
 
-void Simulator::set_resume_pc(const Instruction* new_resume_pc) {
-  resume_pc_ = AddressUntag(new_resume_pc);
+void Simulator::set_resume_pc(void* new_resume_pc) {
+  resume_pc_ = AddressUntag(reinterpret_cast<Instruction*>(new_resume_pc));
 }
 
 
 int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
 
   // First eight arguments passed in registers.
--- a/js/src/jit/arm64/vixl/Simulator-vixl.h
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -24,27 +24,30 @@
 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #ifndef VIXL_A64_SIMULATOR_A64_H_
 #define VIXL_A64_SIMULATOR_A64_H_
 
 #include "mozilla/Vector.h"
 
+#include "js-config.h"
 #include "jsalloc.h"
 
 #include "jit/arm64/vixl/Assembler-vixl.h"
 #include "jit/arm64/vixl/Disasm-vixl.h"
 #include "jit/arm64/vixl/Globals-vixl.h"
 #include "jit/arm64/vixl/Instructions-vixl.h"
 #include "jit/arm64/vixl/Instrument-vixl.h"
 #include "jit/arm64/vixl/Utils-vixl.h"
 #include "jit/IonTypes.h"
 #include "vm/PosixNSPR.h"
 
+#ifdef JS_SIMULATOR_ARM64
+
 #define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror)             \
     JS_BEGIN_MACRO                                                              \
         if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) {       \
             js::ReportOverRecursed(cx);                                         \
             onerror;                                                            \
         }                                                                       \
     JS_END_MACRO
 
@@ -333,27 +336,36 @@ class Simulator : public DecoderVisitor 
   void setGPR32Result(int32_t result);
   void setGPR64Result(int64_t result);
   void setFP32Result(float result);
   void setFP64Result(double result);
   void VisitCallRedirection(const Instruction* instr);
 
   void ResetState();
 
+  static inline uintptr_t StackLimit() {
+    return Simulator::Current()->stackLimit();
+  }
+
   // Run the simulator.
   virtual void Run();
   void RunFrom(const Instruction* first);
 
   // Simulation helpers.
   const Instruction* pc() const { return pc_; }
+  const Instruction* get_pc() const { return pc_; }
+
+  template <typename T>
+  T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
+
   void set_pc(const Instruction* new_pc) {
     pc_ = AddressUntag(new_pc);
     pc_modified_ = true;
   }
-  void set_resume_pc(const Instruction* new_resume_pc);
+  void set_resume_pc(void* new_resume_pc);
 
   void increment_pc() {
     if (!pc_modified_) {
       pc_ = pc_->NextInstruction();
     }
 
     pc_modified_ = false;
   }
@@ -925,19 +937,18 @@ class Simulator : public DecoderVisitor 
   static int CalcZFlag(uint64_t result) {
     return result == 0;
   }
 
   static const uint32_t kConditionFlagsMask = 0xf0000000;
 
   // Stack
   byte* stack_;
-  static const int stack_protection_size_ = 256;
-  // 2 KB stack.
-  static const int stack_size_ = 2 * 1024 + 2 * stack_protection_size_;
+  static const int stack_protection_size_ = 128 * KBytes;
+  static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_);
   byte* stack_limit_;
 
   Decoder* decoder_;
   // Indicates if the pc has been modified by the instruction and should not be
   // automatically incremented.
   bool pc_modified_;
   const Instruction* pc_;
   const Instruction* resume_pc_;
@@ -969,9 +980,10 @@ class Simulator : public DecoderVisitor 
 #ifdef DEBUG
   PRThread* lockOwner_;
 #endif
   Redirection* redirection_;
   mozilla::Vector<int64_t, 0, js::SystemAllocPolicy> spStack_;
 };
 }  // namespace vixl
 
+#endif  // JS_SIMULATOR_ARM64
 #endif  // VIXL_A64_SIMULATOR_A64_H_
--- a/js/src/jit/mips/Simulator-mips.h
+++ b/js/src/jit/mips/Simulator-mips.h
@@ -179,18 +179,21 @@ class Simulator {
     void setFCSRBit(uint32_t cc, bool value);
     bool testFCSRBit(uint32_t cc);
     bool setFCSRRoundError(double original, double rounded);
 
     // Special case of set_register and get_register to access the raw PC value.
     void set_pc(int32_t value);
     int32_t get_pc() const;
 
-    void set_resume_pc(int32_t value) {
-        resume_pc_ = value;
+    template <typename T>
+    T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
+
+    void set_resume_pc(void* value) {
+        resume_pc_ = int32_t(value);
     }
 
     // Accessor to the internal simulator stack area.
     uintptr_t stackLimit() const;
     bool overRecursed(uintptr_t newsp = 0) const;
     bool overRecursedWithExtra(uint32_t extra) const;
 
     // Executes MIPS instructions until the PC reaches end_sim_pc.
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -13,25 +13,27 @@
 
 #include "asmjs/AsmJSFrameIterator.h"
 #include "jit/JitAllocPolicy.h"
 #include "jit/Label.h"
 #include "jit/Registers.h"
 #include "jit/RegisterSets.h"
 #include "vm/HelperThreads.h"
 
-#if defined(JS_CODEGEN_ARM)
-#define JS_USE_LINK_REGISTER
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+// Push return addresses callee-side.
+# define JS_USE_LINK_REGISTER
 #endif
 
-#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
 // JS_SMALL_BRANCH means the range on a branch instruction
 // is smaller than the whole address space
-#    define JS_SMALL_BRANCH
+# define JS_SMALL_BRANCH
 #endif
+
 namespace js {
 namespace jit {
 
 namespace Disassembler {
 class HeapAccess;
 };
 
 static const uint32_t Simd128DataSize = 4 * sizeof(int32_t);
@@ -701,19 +703,23 @@ struct AsmJSFrame
 };
 static_assert(sizeof(AsmJSFrame) == 2 * sizeof(void*), "?!");
 static const uint32_t AsmJSFrameBytesAfterReturnAddress = sizeof(void*);
 
 // A hoisting of constants that would otherwise require #including AsmJSModule.h
 // everywhere. Values are asserted in AsmJSModule.h.
 static const unsigned AsmJSActivationGlobalDataOffset = 0;
 static const unsigned AsmJSHeapGlobalDataOffset = sizeof(void*);
-static const unsigned AsmJSNaN64GlobalDataOffset = 2 * sizeof(void*);
-static const unsigned AsmJSNaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(double);
-
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
+static const unsigned AsmJSNaN64GlobalDataOffset = 3 * sizeof(void*);
+static const unsigned AsmJSNaN32GlobalDataOffset = 3 * sizeof(void*) + sizeof(double);
+#else
+static const unsigned AsmJSNaN64GlobalDataOffset = 4 * sizeof(void*);
+static const unsigned AsmJSNaN32GlobalDataOffset = 4 * sizeof(void*) + sizeof(double);
+#endif
 // Summarizes a heap access made by asm.js code that needs to be patched later
 // and/or looked up by the asm.js signal handlers. Different architectures need
 // to know different things (x64: offset and length, ARM: where to patch in
 // heap length, x86: where to patch in heap length and base) hence the massive
 // #ifdefery.
 class AsmJSHeapAccess
 {
 #if defined(JS_CODEGEN_X64)
@@ -764,17 +770,17 @@ class AsmJSHeapAccess
     {
         mozilla::PodZero(this);  // zero padding for Valgrind
         insnOffset_ = insnOffset;
         offsetWithinWholeSimdVector_ = offsetWithinWholeSimdVector;
         throwOnOOB_ = oob == Throw;
         cmpDelta_ = cmp == NoLengthCheck ? 0 : insnOffset - cmp;
         MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
     }
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS)
     explicit AsmJSHeapAccess(uint32_t insnOffset)
     {
         mozilla::PodZero(this);  // zero padding for Valgrind
         insnOffset_ = insnOffset;
     }
 #endif
 
     uint32_t insnOffset() const { return insnOffset_; }
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -216,17 +216,17 @@ CodeGeneratorShared::ToStackOffset(const
 
 Operand
 CodeGeneratorShared::ToOperand(const LAllocation& a)
 {
     if (a.isGeneralReg())
         return Operand(a.toGeneralReg()->reg());
     if (a.isFloatReg())
         return Operand(a.toFloatReg()->reg());
-    return Operand(StackPointer, ToStackOffset(&a));
+    return Operand(masm.getStackPointer(), ToStackOffset(&a));
 }
 
 Operand
 CodeGeneratorShared::ToOperand(const LAllocation* a)
 {
     return ToOperand(*a);
 }
 
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1380,45 +1380,46 @@ CodeGeneratorShared::emitTruncateFloat32
 
 void
 CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
 {
     FloatRegister src = ool->src();
     Register dest = ool->dest();
 
     saveVolatile(dest);
-#if defined(JS_CODEGEN_ARM)
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
     if (ool->needFloat32Conversion()) {
         masm.convertFloat32ToDouble(src, ScratchDoubleReg);
         src = ScratchDoubleReg;
     }
-
 #else
     FloatRegister srcSingle = src.asSingle();
     if (ool->needFloat32Conversion()) {
         MOZ_ASSERT(src.isSingle());
         masm.push(src);
         masm.convertFloat32ToDouble(src, src);
         src = src.asDouble();
     }
 #endif
+
     masm.setupUnalignedABICall(1, dest);
     masm.passABIArg(src, MoveOp::DOUBLE);
     if (gen->compilingAsmJS())
         masm.callWithABI(AsmJSImm_ToInt32);
     else
         masm.callWithABI(BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
     masm.storeCallResult(dest);
 
-#if !defined(JS_CODEGEN_ARM)
+#if !defined(JS_CODEGEN_ARM) && !defined(JS_CODEGEN_ARM64)
     if (ool->needFloat32Conversion())
         masm.pop(srcSingle);
 #endif
+
     restoreVolatile(dest);
-
     masm.jump(ool->rejoin());
 }
 
 bool
 CodeGeneratorShared::omitOverRecursedCheck() const
 {
     // If the current function makes no calls (which means it isn't recursive)
     // and it uses only a small amount of stack space, it doesn't need a
--- a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -818,17 +818,17 @@ struct AssemblerBufferWithConstantPools 
     void patchBranch(Inst* i, unsigned curpool, BufferOffset branch) {
         const Inst* ci = i;
         ptrdiff_t offset = Asm::GetBranchOffset(ci);
         // If the offset is 0, then there is nothing to do.
         if (offset == 0)
             return;
         unsigned destOffset = branch.getOffset() + offset;
         if (offset > 0) {
-            while (curpool < numDumps_ && poolInfo_[curpool].offset <= destOffset) {
+            while (curpool < numDumps_ && poolInfo_[curpool].offset <= (size_t)destOffset) {
                 offset += poolInfo_[curpool].size;
                 curpool++;
             }
         } else {
             // Ignore the pool that comes next, since this is a backwards
             // branch.
             for (int p = curpool - 1; p >= 0 && poolInfo_[p].offset > destOffset; p--)
                 offset -= poolInfo_[p].size;
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -306,17 +306,17 @@ LIRGeneratorShared::useRegisterOrZeroAtS
 LAllocation
 LIRGeneratorShared::useRegisterOrNonDoubleConstant(MDefinition* mir)
 {
     if (mir->isConstant() && mir->type() != MIRType_Double && mir->type() != MIRType_Float32)
         return LAllocation(mir->toConstant()->vp());
     return useRegister(mir);
 }
 
-#if defined(JS_CODEGEN_ARM)
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
 LAllocation
 LIRGeneratorShared::useAnyOrConstant(MDefinition* mir)
 {
     return useRegisterOrConstant(mir);
 }
 LAllocation
 LIRGeneratorShared::useStorable(MDefinition* mir)
 {
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -27,16 +27,17 @@
 #include "jsobj.h"
 #include "jsscript.h"
 #include "jswatchpoint.h"
 #include "jswin.h"
 #include "jswrapper.h"
 
 #include "asmjs/AsmJSSignalHandlers.h"
 #include "jit/arm/Simulator-arm.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
 #include "jit/JitCompartment.h"
 #include "jit/mips/Simulator-mips.h"
 #include "jit/PcScriptCache.h"
 #include "js/MemoryMetrics.h"
 #include "js/SliceBudget.h"
 #include "vm/Debugger.h"
 
 #include "jscntxtinlines.h"
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -60,16 +60,22 @@ class TraceLoggerThread;
 
 /* Thread Local Storage slot for storing the runtime for a thread. */
 extern mozilla::ThreadLocal<PerThreadData*> TlsPerThreadData;
 
 } // namespace js
 
 struct DtoaState;
 
+#ifdef JS_SIMULATOR_ARM64
+namespace vixl {
+class Simulator;
+}
+#endif
+
 namespace js {
 
 extern MOZ_COLD void
 ReportOutOfMemory(ExclusiveContext* cx);
 
 extern MOZ_COLD void
 ReportAllocationOverflow(ExclusiveContext* maybecx);
 
@@ -81,19 +87,24 @@ class ActivationIterator;
 class AsmJSActivation;
 class AsmJSModule;
 class MathCache;
 
 namespace jit {
 class JitRuntime;
 class JitActivation;
 struct PcScriptCache;
-class Simulator;
 struct AutoFlushICache;
 class CompileRuntime;
+
+#ifdef JS_SIMULATOR_ARM64
+typedef vixl::Simulator Simulator;
+#elif defined(JS_SIMULATOR)
+class Simulator;
+#endif
 }
 
 /*
  * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
  * given pc in a script. We use the script->code pointer to tag the cache,
  * instead of the script address itself, so that source notes are always found
  * by offset from the bytecode with which they were generated.
  */