Bug 1213746 - IonMonkey: MIPS64: Import MacroAssembler-mips64. r=lth f=rankov
authorHeiher <r@hev.cc>
Sat, 07 Nov 2015 05:51:06 +0800
changeset 307559 a1b585ab78b5a0eed5bfcdb4b2718613176bbc5d
parent 307558 5140720f961bddbb58c3f441c62403be4e6e7a9e
child 307560 59bc78b37a726bb8715d04cc046745aed3763a1f
push id1040
push userraliiev@mozilla.com
push dateMon, 29 Feb 2016 17:11:22 +0000
treeherdermozilla-release@8c3167321162 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1213746, 100644
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1213746 - IonMonkey: MIPS64: Import MacroAssembler-mips64. r=lth f=rankov --- config/check_macroassembler_style.py | 2 +- js/src/jit/MacroAssembler.h | 14 +- js/src/jit/mips64/MacroAssembler-mips64-inl.h | 109 + js/src/jit/mips64/MacroAssembler-mips64.cpp | 2754 +++++++++++++++++++++++++ js/src/jit/mips64/MacroAssembler-mips64.h | 1283 ++++++++++++ js/src/moz.build | 1 + 6 files changed, 4159 insertions(+), 4 deletions(-) create mode 100644 js/src/jit/mips64/MacroAssembler-mips64-inl.h create mode 100644 js/src/jit/mips64/MacroAssembler-mips64.cpp create mode 100644 js/src/jit/mips64/MacroAssembler-mips64.h
config/check_macroassembler_style.py
js/src/jit/MacroAssembler.h
js/src/jit/mips64/MacroAssembler-mips64-inl.h
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.h
js/src/moz.build
--- a/config/check_macroassembler_style.py
+++ b/config/check_macroassembler_style.py
@@ -25,17 +25,17 @@ from __future__ import print_function
 import difflib
 import os
 import re
 import subprocess
 import sys
 from check_utils import get_all_toplevel_filenames
 
 architecture_independent = set([ 'generic' ])
-all_architecture_names = set([ 'x86', 'x64', 'arm', 'arm64', 'mips32' ])
+all_architecture_names = set([ 'x86', 'x64', 'arm', 'arm64', 'mips32', 'mips64' ])
 all_shared_architecture_names = set([ 'x86_shared', 'mips_shared', 'arm', 'arm64' ])
 
 reBeforeArg = "(?<=[(,\s])"
 reArgType = "(?P<type>[\w\s:*&]+)"
 reArgName = "(?P<name>\s\w+)"
 reArgDefault = "(?P<default>(?:\s=[^,)]+)?)"
 reAfterArg = "(?=[,)])"
 reMatchArg = re.compile(reBeforeArg + reArgType + reArgName + reArgDefault + reAfterArg)
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -17,16 +17,18 @@
 #elif defined(JS_CODEGEN_X64)
 # include "jit/x64/MacroAssembler-x64.h"
 #elif defined(JS_CODEGEN_ARM)
 # include "jit/arm/MacroAssembler-arm.h"
 #elif defined(JS_CODEGEN_ARM64)
 # include "jit/arm64/MacroAssembler-arm64.h"
 #elif defined(JS_CODEGEN_MIPS32)
 # include "jit/mips32/MacroAssembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/MacroAssembler-mips64.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/MacroAssembler-none.h"
 #else
 # error "Unknown architecture!"
 #endif
 #include "jit/AtomicOp.h"
 #include "jit/IonInstrumentation.h"
 #include "jit/JitCompartment.h"
@@ -57,17 +59,17 @@
 // to the method declarations.  If there is any difference, then you either
 // forgot to define the method in one of the macro assembler, or you forgot to
 // update the annotation of the macro assembler declaration.
 //
 // Some convenient short-cuts are used to avoid repeating the same list of
 // architectures on each method declaration, such as PER_ARCH and
 // PER_SHARED_ARCH.
 
-# define ALL_ARCH mips32, arm, arm64, x86, x64
+# define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
 # define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
 
 // * How this macro works:
 //
 // DEFINED_ON is a macro which check if, for the current architecture, the
 // method is defined on the macro assembler or not.
 //
 // For each architecutre, we have a macro named DEFINED_ON_arch.  This macro is
@@ -102,16 +104,17 @@
 // annotation on the method declaration.
 
 # define DEFINED_ON_x86
 # define DEFINED_ON_x64
 # define DEFINED_ON_x86_shared
 # define DEFINED_ON_arm
 # define DEFINED_ON_arm64
 # define DEFINED_ON_mips32
+# define DEFINED_ON_mips64
 # define DEFINED_ON_mips_shared
 # define DEFINED_ON_none
 
 // Specialize for each architecture.
 #if defined(JS_CODEGEN_X86)
 # undef DEFINED_ON_x86
 # define DEFINED_ON_x86 define
 # undef DEFINED_ON_x86_shared
@@ -127,16 +130,21 @@
 #elif defined(JS_CODEGEN_ARM64)
 # undef DEFINED_ON_arm64
 # define DEFINED_ON_arm64 define
 #elif defined(JS_CODEGEN_MIPS32)
 # undef DEFINED_ON_mips32
 # define DEFINED_ON_mips32 define
 # undef DEFINED_ON_mips_shared
 # define DEFINED_ON_mips_shared define
+#elif defined(JS_CODEGEN_MIPS64)
+# undef DEFINED_ON_mips64
+# define DEFINED_ON_mips64 define
+# undef DEFINED_ON_mips_shared
+# define DEFINED_ON_mips_shared define
 #elif defined(JS_CODEGEN_NONE)
 # undef DEFINED_ON_none
 # define DEFINED_ON_none crash
 #else
 # error "Unknown architecture!"
 #endif
 
 # define DEFINED_ON_RESULT_crash   { MOZ_CRASH(); }
@@ -416,23 +424,23 @@ class MacroAssembler : public MacroAssem
     // It is maintained by all stack manipulation functions below.
     uint32_t framePushed_;
 
   public:
     // ===============================================================
     // Stack manipulation functions.
 
     void PushRegsInMask(LiveRegisterSet set)
-                            DEFINED_ON(arm, arm64, mips32, x86_shared);
+                            DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
     void PushRegsInMask(LiveGeneralRegisterSet set);
 
     void PopRegsInMask(LiveRegisterSet set);
     void PopRegsInMask(LiveGeneralRegisterSet set);
     void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
-                                 DEFINED_ON(arm, arm64, mips32, x86_shared);
+                                 DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
 
     void Push(const Operand op) DEFINED_ON(x86_shared);
     void Push(Register reg) PER_SHARED_ARCH;
     void Push(const Imm32 imm) PER_SHARED_ARCH;
     void Push(const ImmWord imm) PER_SHARED_ARCH;
     void Push(const ImmPtr imm) PER_SHARED_ARCH;
     void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
     void Push(FloatRegister reg) PER_SHARED_ARCH;
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_inl_h
+#define jit_mips64_MacroAssembler_mips64_inl_h
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+    ma_and(dest, src);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+    ma_and(dest, imm);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+    ma_li(ScratchRegister, ImmWord(imm.value));
+    ma_and(dest.reg, ScratchRegister);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+    ma_or(dest, src);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+    ma_or(dest, imm);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+    ma_or(dest.reg, src.reg);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+    ma_xor(dest, src);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+    ma_xor(dest, imm);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+    ma_dsll(dest, dest, imm);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+    ma_dsll(dest.reg, dest.reg, imm);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+    ma_dsrl(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+    ma_dsra(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+    ma_dsrl(dest.reg, dest.reg, imm);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_inl_h */
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -0,0 +1,2754 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
+
+void
+MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src, Register dest)
+{
+    // Note that C++ bool is only 1 byte, so zero extend it to clear the
+    // higher-order bits.
+    ma_and(dest, src, Imm32(0xff));
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src, FloatRegister dest)
+{
+    as_mtc1(src, dest);
+    as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src, FloatRegister dest)
+{
+    ma_ls(dest, src);
+    as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
+{
+    computeScaledAddress(src, ScratchRegister);
+    convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src, FloatRegister dest)
+{
+    // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray
+    // calls with ScratchDoubleReg as dest.
+    MOZ_ASSERT(dest != SecondScratchDoubleReg);
+
+    // Subtract INT32_MIN to get a positive number
+    ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
+
+    // Convert value
+    as_mtc1(ScratchRegister, dest);
+    as_cvtdw(dest, dest);
+
+    // Add unsigned value of INT32_MIN
+    ma_lid(SecondScratchDoubleReg, 2147483648.0);
+    as_addd(dest, dest, SecondScratchDoubleReg);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest)
+{
+    Label positive, done;
+    ma_b(src.reg, src.reg, &positive, NotSigned, ShortJump);
+
+    MOZ_ASSERT(src.reg != ScratchRegister);
+    MOZ_ASSERT(src.reg != SecondScratchReg);
+
+    ma_and(ScratchRegister, src.reg, Imm32(1));
+    ma_dsrl(SecondScratchReg, src.reg, Imm32(1));
+    ma_or(ScratchRegister, SecondScratchReg);
+    as_dmtc1(ScratchRegister, dest);
+    as_cvtdl(dest, dest);
+    addDouble(dest, dest);
+    ma_b(&done, ShortJump);
+
+    bind(&positive);
+    as_dmtc1(src.reg, dest);
+    as_cvtdl(dest, dest);
+
+    bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src, FloatRegister dest)
+{
+    Label positive, done;
+    ma_b(src, src, &positive, NotSigned, ShortJump);
+
+    // We cannot do the same as convertUInt32ToDouble because float32 doesn't
+    // have enough precision.
+    convertUInt32ToDouble(src, dest);
+    convertDoubleToFloat32(dest, dest);
+    ma_b(&done, ShortJump);
+
+    bind(&positive);
+    convertInt32ToFloat32(src, dest);
+
+    bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest)
+{
+    as_cvtsd(dest, src);
+}
+
+// Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT32_MIN/INT32_MAX, and we can test it.
+// NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
+// will be wrong.
+void
+MacroAssemblerMIPS64Compat::branchTruncateDouble(FloatRegister src, Register dest,
+                                                 Label* fail)
+{
+    Label test, success;
+    as_truncwd(ScratchDoubleReg, src);
+    as_mfc1(dest, ScratchDoubleReg);
+
+    ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+    ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src, Register dest,
+                                                 Label* fail, bool negativeZeroCheck)
+{
+    // Convert double to int, then convert back and check if we have the
+    // same number.
+    as_cvtwd(ScratchDoubleReg, src);
+    as_mfc1(dest, ScratchDoubleReg);
+    as_cvtdw(ScratchDoubleReg, ScratchDoubleReg);
+    ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+    if (negativeZeroCheck) {
+        Label notZero;
+        ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+        // Test and bail for -0.0, when integer result is 0
+        // Move the top word of the double into the output reg, if it is
+        // non-zero, then the original value was -0.0
+        moveFromDoubleHi(src, dest);
+        ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+        bind(&notZero);
+    }
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src, Register dest,
+                                                  Label* fail, bool negativeZeroCheck)
+{
+    // Converting the floating point value to an integer and then converting it
+    // back to a float32 would not work, as float to int32 conversions are
+    // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
+    // and then back to float(INT32_MAX + 1)).  If this ever happens, we just
+    // bail out.
+    as_cvtws(ScratchFloat32Reg, src);
+    as_mfc1(dest, ScratchFloat32Reg);
+    as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg);
+    ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+    // Bail out in the clamped cases.
+    ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+
+    if (negativeZeroCheck) {
+        Label notZero;
+        ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+        // Test and bail for -0.0, when integer result is 0
+        // Move the top word of the double into the output reg,
+        // if it is non-zero, then the original value was -0.0
+        moveFromDoubleHi(src, dest);
+        ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+        bind(&notZero);
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
+{
+    as_cvtds(dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTruncateFloat32(FloatRegister src, Register dest,
+                                                  Label* fail)
+{
+    Label test, success;
+    as_truncws(ScratchFloat32Reg, src);
+    as_mfc1(dest, ScratchFloat32Reg);
+
+    ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src, FloatRegister dest)
+{
+    as_mtc1(src, dest);
+    as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src, FloatRegister dest)
+{
+    ma_ls(dest, src);
+    as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::addDouble(FloatRegister src, FloatRegister dest)
+{
+    as_addd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::subDouble(FloatRegister src, FloatRegister dest)
+{
+    as_subd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::mulDouble(FloatRegister src, FloatRegister dest)
+{
+    as_muld(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::divDouble(FloatRegister src, FloatRegister dest)
+{
+    as_divd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::negateDouble(FloatRegister reg)
+{
+    as_negd(reg, reg);
+}
+
+void
+MacroAssemblerMIPS64Compat::inc64(AbsoluteAddress dest)
+{
+    ma_li(ScratchRegister, ImmWord(uintptr_t(dest.addr)));
+    as_ld(SecondScratchReg, ScratchRegister, 0);
+    as_daddiu(SecondScratchReg, SecondScratchReg, 1);
+    as_sd(SecondScratchReg, ScratchRegister, 0);
+}
+
+void
+MacroAssemblerMIPS64Compat::movq(Register rs, Register rd)
+{
+    ma_move(rd, rs);
+}
+
+void
+MacroAssemblerMIPS64::ma_li(Register dest, AbsoluteLabel* label)
+{
+    MOZ_ASSERT(!label->bound());
+    // Thread the patch list through the unpatched address word in the
+    // instruction stream.
+    BufferOffset bo = m_buffer.nextOffset();
+    ma_liPatchable(dest, ImmWord(label->prev()));
+    label->setPrev(bo.getOffset());
+}
+
+void
+MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm)
+{
+    if ((int64_t)imm.value >= INT16_MIN  && (int64_t)imm.value <= INT16_MAX) {
+        as_addiu(dest, zero, imm.value);
+    } else if (imm.value <= UINT16_MAX) {
+        as_ori(dest, zero, Imm16::Lower(Imm32(imm.value)).encode());
+    } else if (0 == (imm.value & 0xffff) && 0 == (imm.value >> 32)) {
+        as_lui(dest, Imm16::Upper(Imm32(imm.value)).encode());
+    } else if (imm.value <= UINT32_MAX) {
+        as_lui(dest, Imm16::Upper(Imm32(imm.value)).encode());
+        as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+    } else {
+        if (imm.value >> 48) {
+            as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
+            if ((imm.value >> 32) & 0xffff)
+              as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+        } else {
+            as_ori(dest, zero, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+        }
+        as_dsll(dest, dest, 16);
+        if ((imm.value >> 16) & 0xffff)
+          as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+        as_dsll(dest, dest, 16);
+        if (imm.value & 0xffff)
+          as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+    }
+}
+
+// This method generates lui, dsll and ori instruction block that can be modified
+// by UpdateLoad64Value, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm)
+{
+    return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm, LiFlags flags)
+{
+    if (Li64 == flags) {
+        m_buffer.ensureSpace(6 * sizeof(uint32_t));
+        as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
+        as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+        as_dsll(dest, dest, 16);
+        as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+        as_dsll(dest, dest, 16);
+        as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+    } else {
+        m_buffer.ensureSpace(4 * sizeof(uint32_t));
+        as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+        as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+        as_drotr32(dest, dest, 48);
+        as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+    }
+}
+
+// Shifts
+void
+MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift)
+{
+    if (31 < shift.value)
+      as_dsll32(rd, rt, shift.value);
+    else
+      as_dsll(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift)
+{
+    if (31 < shift.value)
+      as_dsrl32(rd, rt, shift.value);
+    else
+      as_dsrl(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift)
+{
+    if (31 < shift.value)
+      as_dsra32(rd, rt, shift.value);
+    else
+      as_dsra(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift)
+{
+    if (31 < shift.value)
+      as_drotr32(rd, rt, shift.value);
+    else
+      as_drotr(rd, rt, shift.value);
+}
+
+void
+MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift)
+{
+    uint32_t s =  64 - shift.value;
+
+    if (31 < s)
+      as_drotr32(rd, rt, s);
+    else
+      as_drotr(rd, rt, s);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift)
+{
+    as_dsllv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift)
+{
+    as_dsrlv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift)
+{
+    as_dsrav(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift)
+{
+    as_drotrv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift)
+{
+    ma_negu(ScratchRegister, shift);
+    as_drotrv(rd, rt, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size)
+{
+    if (pos.value >= 0 && pos.value < 32) {
+        if (size.value >= 2)
+          as_dinsm(rt, rs, pos.value, size.value);
+        else
+          as_dins(rt, rs, pos.value, size.value);
+    } else {
+        as_dinsu(rt, rs, pos.value, size.value);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size)
+{
+    if (pos.value >= 0 && pos.value < 32) {
+        if (size.value > 32)
+          as_dextm(rt, rs, pos.value, size.value);
+        else
+          as_dext(rt, rs, pos.value, size.value);
+    } else {
+        as_dextu(rt, rs, pos.value, size.value);
+    }
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void
+MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::IsInSignedRange(imm.value)) {
+        as_daddiu(rd, rs, imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_daddu(rd, rs, ScratchRegister);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs)
+{
+    as_daddu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm)
+{
+    ma_daddu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+    as_addu(rd, rs, rt);
+    as_daddu(ScratchRegister, rs, rt);
+    ma_b(rd, ScratchRegister, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+    // Check for signed range because of as_daddiu
+    if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
+        as_addiu(rd, rs, imm.value);
+        as_daddiu(ScratchRegister, rs, imm.value);
+        ma_b(rd, ScratchRegister, overflow, Assembler::NotEqual);
+    } else {
+        ma_li(ScratchRegister, imm);
+        ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+    }
+}
+
+// Subtract.
+void
+MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::IsInSignedRange(-imm.value)) {
+        as_daddiu(rd, rs, -imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_dsubu(rd, rs, ScratchRegister);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm)
+{
+    ma_dsubu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS64::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+    as_subu(rd, rs, rt);
+    as_dsubu(ScratchRegister, rs, rt);
+    ma_b(rd, ScratchRegister, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm)
+{
+    ma_li(ScratchRegister, imm);
+    as_dmult(rs, ScratchRegister);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPS64::ma_load(Register dest, Address address,
+                              LoadStoreSize size, LoadStoreExtension extension)
+{
+    int16_t encodedOffset;
+    Register base;
+    if (!Imm16::IsInSignedRange(address.offset)) {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_daddu(ScratchRegister, address.base, ScratchRegister);
+        base = ScratchRegister;
+        encodedOffset = Imm16(0).encode();
+    } else {
+        encodedOffset = Imm16(address.offset).encode();
+        base = address.base;
+    }
+
+    switch (size) {
+      case SizeByte:
+        if (ZeroExtend == extension)
+            as_lbu(dest, base, encodedOffset);
+        else
+            as_lb(dest, base, encodedOffset);
+        break;
+      case SizeHalfWord:
+        if (ZeroExtend == extension)
+            as_lhu(dest, base, encodedOffset);
+        else
+            as_lh(dest, base, encodedOffset);
+        break;
+      case SizeWord:
+        if (ZeroExtend == extension)
+            as_lwu(dest, base, encodedOffset);
+        else
+            as_lw(dest, base, encodedOffset);
+        break;
+      case SizeDouble:
+        as_ld(dest, base, encodedOffset);
+        break;
+      default:
+        MOZ_CRASH("Invalid argument for ma_load");
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_store(Register data, Address address, LoadStoreSize size,
+                               LoadStoreExtension extension)
+{
+    int16_t encodedOffset;
+    Register base;
+    if (!Imm16::IsInSignedRange(address.offset)) {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_daddu(ScratchRegister, address.base, ScratchRegister);
+        base = ScratchRegister;
+        encodedOffset = Imm16(0).encode();
+    } else {
+        encodedOffset = Imm16(address.offset).encode();
+        base = address.base;
+    }
+
+    switch (size) {
+      case SizeByte:
+        as_sb(data, base, encodedOffset);
+        break;
+      case SizeHalfWord:
+        as_sh(data, base, encodedOffset);
+        break;
+      case SizeWord:
+        as_sw(data, base, encodedOffset);
+        break;
+      case SizeDouble:
+        as_sd(data, base, encodedOffset);
+        break;
+      default:
+        MOZ_CRASH("Invalid argument for ma_store");
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address, Register dest)
+{
+    int32_t shift = Imm32::ShiftOf(address.scale).value;
+    if (shift) {
+        ma_dsll(ScratchRegister, address.index, Imm32(shift));
+        as_daddu(dest, address.base, ScratchRegister);
+    } else {
+        as_daddu(dest, address.base, address.index);
+    }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerMIPS64::ma_pop(Register r)
+{
+    as_ld(r, StackPointer, 0);
+    as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void
+MacroAssemblerMIPS64::ma_push(Register r)
+{
+    if (r == sp) {
+        // Pushing sp requires one more instruction.
+        ma_move(ScratchRegister, sp);
+        r = ScratchRegister;
+    }
+
+    as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(intptr_t));
+    as_sd(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label, Condition c, JumpKind jumpKind)
+{
+    MOZ_ASSERT(c != Overflow);
+    if (imm.value == 0) {
+        if (c == Always || c == AboveOrEqual)
+            ma_b(label, jumpKind);
+        else if (c == Below)
+            ; // This condition is always false. No branch required.
+        else
+            branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+    } else {
+        MOZ_ASSERT(lhs != ScratchRegister);
+        ma_li(ScratchRegister, imm);
+        ma_b(lhs, ScratchRegister, label, c, jumpKind);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind)
+{
+    MOZ_ASSERT(lhs != ScratchRegister);
+    ma_load(ScratchRegister, addr, SizeDouble);
+    ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
+{
+    ma_load(SecondScratchReg, addr, SizeDouble);
+    ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind)
+{
+    ma_load(SecondScratchReg, addr, SizeDouble);
+    ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+{
+    if (label->bound()) {
+        // Generate the long jump for calls because return address has to be
+        // the address after the reserved block.
+        addLongJump(nextOffset());
+        ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
+        as_jalr(ScratchRegister);
+        if (delaySlotFill == FillDelaySlot)
+            as_nop();
+        return;
+    }
+
+    // Second word holds a pointer to the next branch in label's chain.
+    uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+    // Make the whole branch continous in the buffer. The '6'
+    // instructions are writing at below (contain delay slot).
+    m_buffer.ensureSpace(6 * sizeof(uint32_t));
+
+    BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+    writeInst(nextInChain);
+    label->use(bo.getOffset());
+    // Leave space for long jump.
+    as_nop();
+    as_nop();
+    as_nop();
+    if (delaySlotFill == FillDelaySlot)
+        as_nop();
+}
+
+void
+MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
+{
+    MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+    InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+    if (label->bound()) {
+        int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+        if (BOffImm16::IsInRange(offset))
+            jumpKind = ShortJump;
+
+        if (jumpKind == ShortJump) {
+            MOZ_ASSERT(BOffImm16::IsInRange(offset));
+            code.setBOffImm16(BOffImm16(offset));
+            writeInst(code.encode());
+            as_nop();
+            return;
+        }
+
+        if (code.encode() == inst_beq.encode()) {
+            // Handle long jump
+            addLongJump(nextOffset());
+            ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
+            as_jr(ScratchRegister);
+            as_nop();
+            return;
+        }
+
+        // Handle long conditional branch, the target offset is based on self,
+        // point to next instruction of nop at below.
+        writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
+        // No need for a "nop" here because we can clobber scratch.
+        addLongJump(nextOffset());
+        ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
+        as_jr(ScratchRegister);
+        as_nop();
+        return;
+    }
+
+    // Generate open jump and link it to a label.
+
+    // Second word holds a pointer to the next branch in label's chain.
+    uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+    if (jumpKind == ShortJump) {
+        // Make the whole branch continous in the buffer.
+        m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+        // Indicate that this is short jump with offset 4.
+        code.setBOffImm16(BOffImm16(4));
+        BufferOffset bo = writeInst(code.encode());
+        writeInst(nextInChain);
+        label->use(bo.getOffset());
+        return;
+    }
+
+    bool conditional = code.encode() != inst_beq.encode();
+
+    // Make the whole branch continous in the buffer. The '7'
+    // instructions are writing at below (contain conditional nop).
+    m_buffer.ensureSpace(7 * sizeof(uint32_t));
+
+    BufferOffset bo = writeInst(code.encode());
+    writeInst(nextInChain);
+    label->use(bo.getOffset());
+    // Leave space for potential long jump.
+    as_nop();
+    as_nop();
+    as_nop();
+    as_nop();
+    if (conditional)
+        as_nop();
+}
+
+void
+MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c)
+{
+    ma_li(ScratchRegister, imm);
+    ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm, Condition c)
+{
+    ma_li(ScratchRegister, ImmWord(uintptr_t(imm.value)));
+    ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value)
+{
+    ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+    ma_li(ScratchRegister, imm);
+    moveToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest)
+{
+    as_dmfc1(dest.valueReg(), src);
+}
+
+void
+MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest)
+{
+    as_dmtc1(src.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address)
+{
+    if (Imm16::IsInSignedRange(address.offset)) {
+        as_ls(ft, address.base, address.offset);
+    } else {
+        MOZ_ASSERT(address.base != ScratchRegister);
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_daddu(ScratchRegister, address.base, ScratchRegister);
+        as_ls(ft, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address)
+{
+    if (Imm16::IsInSignedRange(address.offset)) {
+        as_ld(ft, address.base, address.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_daddu(ScratchRegister, address.base, ScratchRegister);
+        as_ld(ft, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address)
+{
+    if (Imm16::IsInSignedRange(address.offset)) {
+        as_sd(ft, address.base, address.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_daddu(ScratchRegister, address.base, ScratchRegister);
+        as_sd(ft, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address)
+{
+    if (Imm16::IsInSignedRange(address.offset)) {
+        as_ss(ft, address.base, address.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_daddu(ScratchRegister, address.base, ScratchRegister);
+        as_ss(ft, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS64::ma_pop(FloatRegister fs)
+{
+    ma_ld(fs, Address(StackPointer, 0));
+    as_daddiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void
+MacroAssemblerMIPS64::ma_push(FloatRegister fs)
+{
+    as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(double));
+    ma_sd(fs, Address(StackPointer, 0));
+}
+
+bool
+MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr)
+{
+    uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS);
+
+    asMasm().Push(Imm32(descriptor)); // descriptor_
+    asMasm().Push(ImmPtr(fakeReturnAddr));
+
+    return true;
+}
+
+void
+MacroAssemblerMIPS64Compat::add32(Register src, Register dest)
+{
+    as_addu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::add32(Imm32 imm, Register dest)
+{
+    ma_addu(dest, dest, imm);
+}
+
+void
+
+MacroAssemblerMIPS64Compat::add32(Imm32 imm, const Address& dest)
+{
+    load32(dest, SecondScratchReg);
+    ma_addu(SecondScratchReg, imm);
+    store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::sub32(Imm32 imm, Register dest)
+{
+    ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::sub32(Register src, Register dest)
+{
+    as_subu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::addPtr(Register src, Register dest)
+{
+    ma_daddu(dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::addPtr(const Address& src, Register dest)
+{
+    loadPtr(src, ScratchRegister);
+    ma_daddu(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS64Compat::subPtr(Register src, Register dest)
+{
+    as_dsubu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest)
+{
+    ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::move32(Register src, Register dest)
+{
+    ma_move(dest, src);
+}
+
+void
+MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest)
+{
+    ma_move(dest, src);
+}
+void
+MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest)
+{
+    ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest)
+{
+    ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest)
+{
+    movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void
+MacroAssemblerMIPS64Compat::movePtr(AsmJSImmPtr imm, Register dest)
+{
+    append(AsmJSAbsoluteLink(CodeOffsetLabel(nextOffset().getOffset()), imm.kind()));
+    ma_liPatchable(dest, ImmWord(-1));
+}
+
+void
+MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest)
+{
+    ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src, Register dest)
+{
+    ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address, Register dest)
+{
+    ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src, Register dest)
+{
+    ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address, Register dest)
+{
+    ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src, Register dest)
+{
+    ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address, Register dest)
+{
+    ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src, Register dest)
+{
+    ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest)
+{
+    ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(const BaseIndex& address, Register dest)
+{
+    ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address, Register dest)
+{
+    movePtr(ImmPtr(address.addr), ScratchRegister);
+    load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::load32(AsmJSAbsoluteAddress address, Register dest)
+{
+    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(const Address& address, Register dest)
+{
+    ma_load(dest, address, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest)
+{
+    ma_load(dest, src, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address, Register dest)
+{
+    movePtr(ImmPtr(address.addr), ScratchRegister);
+    loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPtr(AsmJSAbsoluteAddress address, Register dest)
+{
+    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest)
+{
+    loadPtr(address, dest);
+    ma_dsll(dest, dest, Imm32(1));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadDouble(const Address& address, FloatRegister dest)
+{
+    ma_ld(dest, address);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+    ma_ld(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest)
+{
+    ma_ls(dest, address);
+    as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
+{
+    loadFloat32(src, dest);
+    as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloat32(const Address& address, FloatRegister dest)
+{
+    ma_ls(dest, address);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+    ma_ls(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address)
+{
+    ma_li(SecondScratchReg, imm);
+    ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Register src, const Address& address)
+{
+    ma_store(src, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest)
+{
+    ma_store(imm, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest)
+{
+    ma_store(src, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address)
+{
+    ma_li(SecondScratchReg, imm);
+    ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Register src, const Address& address)
+{
+    ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest)
+{
+    ma_store(imm, dest, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store16(Register src, const BaseIndex& address)
+{
+    ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Register src, AbsoluteAddress address)
+{
+    movePtr(ImmPtr(address.addr), ScratchRegister);
+    store32(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Register src, const Address& address)
+{
+    ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address)
+{
+    move32(src, SecondScratchReg);
+    ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest)
+{
+    ma_store(imm, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest)
+{
+    ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address)
+{
+    ma_li(SecondScratchReg, imm);
+    ma_store(SecondScratchReg, address, SizeDouble);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm, Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address)
+{
+    storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm, Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address)
+{
+    storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm, Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
+
+void
+MacroAssemblerMIPS64Compat::storePtr(Register src, const Address& address)
+{
+    ma_store(src, address, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::storePtr(Register src, const BaseIndex& address)
+{
+    ma_store(src, address, SizeDouble);
+}
+
+void
+MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
+{
+    movePtr(ImmPtr(dest.addr), ScratchRegister);
+    storePtr(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::clampIntToUint8(Register reg)
+{
+    // look at (reg >> 8) if it is 0, then src shouldn't be clamped
+    // if it is <0, then we want to clamp to 0,
+    // otherwise, we wish to clamp to 255
+    Label done;
+    ma_move(ScratchRegister, reg);
+    asMasm().rshiftPtrArithmetic(Imm32(8), ScratchRegister);
+    ma_b(ScratchRegister, ScratchRegister, &done, Assembler::Zero, ShortJump);
+    {
+        Label negative;
+        ma_b(ScratchRegister, ScratchRegister, &negative, Assembler::Signed, ShortJump);
+        {
+            ma_li(reg, Imm32(255));
+            ma_b(&done, ShortJump);
+        }
+        bind(&negative);
+        {
+            ma_move(reg, zero);
+        }
+    }
+    bind(&done);
+}
+
+// Note: this function clobbers the input register.
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+    MOZ_ASSERT(input != ScratchDoubleReg);
+    Label positive, done;
+
+    // <= 0 or NaN --> 0
+    zeroDouble(ScratchDoubleReg);
+    branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
+    {
+        move32(Imm32(0), output);
+        jump(&done);
+    }
+
+    bind(&positive);
+
+    // Add 0.5 and truncate.
+    loadConstantDouble(0.5, ScratchDoubleReg);
+    addDouble(ScratchDoubleReg, input);
+
+    Label outOfRange;
+
+    branchTruncateDouble(input, output, &outOfRange);
+    branch32(Assembler::Above, output, Imm32(255), &outOfRange);
+    {
+        // Check if we had a tie.
+        convertInt32ToDouble(output, ScratchDoubleReg);
+        branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
+
+        // It was a tie. Mask out the ones bit to get an even value.
+        // See also js_TypedArray_uint8_clamp_double.
+        and32(Imm32(~1), output);
+        jump(&done);
+    }
+
+    // > 255 --> 255
+    bind(&outOfRange);
+    {
+        move32(Imm32(255), output);
+    }
+
+    bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::subPtr(Imm32 imm, const Register dest)
+{
+    ma_dsubu(dest, dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::subPtr(const Address& addr, const Register dest)
+{
+    loadPtr(addr, SecondScratchReg);
+    subPtr(SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::subPtr(Register src, const Address& dest)
+{
+    loadPtr(dest, SecondScratchReg);
+    subPtr(src, SecondScratchReg);
+    storePtr(SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::addPtr(Imm32 imm, const Register dest)
+{
+    ma_daddu(dest, imm);
+}
+
+void
+MacroAssemblerMIPS64Compat::addPtr(Imm32 imm, const Address& dest)
+{
+    loadPtr(dest, ScratchRegister);
+    addPtr(imm, ScratchRegister);
+    storePtr(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchDouble(DoubleCondition cond, FloatRegister lhs,
+                                         FloatRegister rhs, Label* label)
+{
+    ma_bc1d(lhs, rhs, label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchFloat(DoubleCondition cond, FloatRegister lhs,
+                                        FloatRegister rhs, Label* label)
+{
+    ma_bc1s(lhs, rhs, label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestGCThing(Condition cond, const Address& address, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+         (cond == Equal) ? AboveOrEqual : Below);
+}
+void
+MacroAssemblerMIPS64Compat::branchTestGCThing(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+         (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestPrimitive(Condition cond, const ValueOperand& value,
+                                                Label* label)
+{
+    splitTag(value, SecondScratchReg);
+    branchTestPrimitive(cond, SecondScratchReg, label);
+}
+void
+MacroAssemblerMIPS64Compat::branchTestPrimitive(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
+         (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, const Address& address, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestInt32(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat:: branchTestBoolean(Condition cond, const ValueOperand& value,
+                                               Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat:: branchTestBoolean(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestBoolean(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
+    splitTag(value, SecondScratchReg);
+    branchTestDouble(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
+    Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+    ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, const Address& address, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    branchTestDouble(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestDouble(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    branchTestDouble(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestNull(Condition cond, const Address& address, Label* label) {
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::testNullSet(Condition cond, const ValueOperand& value, Register dest)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+    splitTag(value, SecondScratchReg);
+    branchTestObject(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestObject(Condition cond, const Address& address, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::testObjectSet(Condition cond, const ValueOperand& value, Register dest)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+    splitTag(value, SecondScratchReg);
+    branchTestString(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestString(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestString(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+    splitTag(value, SecondScratchReg);
+    branchTestSymbol(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestSymbol(Condition cond, const Register& tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestSymbol(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, const ValueOperand& value,
+                                                Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestUndefined(Condition cond, const Address& address, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    splitTag(value, SecondScratchReg);
+    ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+    splitTag(value, SecondScratchReg);
+    branchTestNumber(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestNumber(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label,
+         cond == Equal ? BelowOrEqual : Above);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, const ValueOperand& value, Label* label)
+{
+    splitTag(value, SecondScratchReg);
+    branchTestMagic(cond, SecondScratchReg, label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, Register tag, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, const Address& address, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestMagic(Condition cond, const BaseIndex& src, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, SecondScratchReg);
+    ma_b(SecondScratchReg, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestValue(Condition cond, const ValueOperand& value,
+                                            const Value& v, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    moveValue(v, ScratchRegister);
+    ma_b(value.valueReg(), ScratchRegister, label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestValue(Condition cond, const Address& valaddr,
+                                            const ValueOperand& value, Label* label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    loadPtr(Address(valaddr.base, valaddr.offset), ScratchRegister);
+    ma_b(value.valueReg(), ScratchRegister, label, cond);
+}
+
+// unboxing code
+void
+MacroAssemblerMIPS64Compat::unboxNonDouble(const ValueOperand& operand, Register dest)
+{
+    ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxNonDouble(const Address& src, Register dest)
+{
+    loadPtr(Address(src.base, src.offset), dest);
+    ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxNonDouble(const BaseIndex& src, Register dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+    loadPtr(Address(SecondScratchReg, src.offset), dest);
+    ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand, Register dest)
+{
+    ma_dsll(dest, operand.valueReg(), Imm32(32));
+    ma_dsra(dest, dest, Imm32(32));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const Operand& operand, Register dest)
+{
+    switch(operand.getTag()) {
+    case Operand::REG:
+        ma_dsll(dest, operand.toReg(), Imm32(32));
+        ma_dsra(dest, dest, Imm32(32));
+        break;
+    case Operand::MEM:
+        unboxInt32(operand.toAddress(), dest);
+        break;
+    case Operand::FREG:
+    default:
+        MOZ_CRASH("unexpected operand kind");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest)
+{
+    load32(Address(src.base, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src, Register dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+    load32(Address(SecondScratchReg, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand, Register dest)
+{
+    ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const Operand& operand, Register dest)
+{
+    switch(operand.getTag()) {
+    case Operand::REG:
+        ma_dext(dest, operand.toReg(), Imm32(0), Imm32(32));
+        break;
+    case Operand::MEM:
+        unboxBoolean(operand.toAddress(), dest);
+        break;
+    case Operand::FREG:
+    default:
+        MOZ_CRASH("unexpected operand kind");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src, Register dest)
+{
+    ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src, Register dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+    ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
+{
+    as_dmtc1(operand.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxDouble(const Address& src, FloatRegister dest)
+{
+    ma_ld(dest, Address(src.base, src.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand, Register dest)
+{
+    unboxNonDouble(operand, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxString(const Operand& operand, Register dest)
+{
+    switch(operand.getTag()) {
+    case Operand::REG:
+        ma_dext(dest, operand.toReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+        break;
+    case Operand::MEM:
+        unboxNonDouble(operand.toAddress(), dest);
+        break;
+    case Operand::FREG:
+    default:
+        MOZ_CRASH("unexpected operand kind");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxString(const Address& src, Register dest)
+{
+    unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxSymbol(const Operand& operand, Register dest)
+{
+    switch(operand.getTag()) {
+    case Operand::REG:
+        ma_dext(dest, operand.toReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+        break;
+    case Operand::MEM:
+        unboxNonDouble(operand.toAddress(), dest);
+        break;
+    case Operand::FREG:
+    default:
+        MOZ_CRASH("unexpected operand kind");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src, Register dest)
+{
+    unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src, Register dest)
+{
+    unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxObject(const Operand& src, Register dest)
+{
+    switch(src.getTag()) {
+    case Operand::REG:
+        ma_dext(dest, src.toReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+        break;
+    case Operand::MEM:
+        unboxNonDouble(src.toAddress(), dest);
+        break;
+    case Operand::FREG:
+    default:
+        MOZ_CRASH("unexpected operand kind");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxObject(const Address& src, Register dest)
+{
+    unboxNonDouble(src, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+    if (dest.isFloat()) {
+        Label notInt32, end;
+        branchTestInt32(Assembler::NotEqual, src, &notInt32);
+        convertInt32ToDouble(src.valueReg(), dest.fpu());
+        ma_b(&end, ShortJump);
+        bind(&notInt32);
+        unboxDouble(src, dest.fpu());
+        bind(&end);
+    } else {
+        unboxNonDouble(src, dest.gpr());
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::unboxPrivate(const ValueOperand& src, Register dest)
+{
+    ma_dsrl(dest, src.valueReg(), Imm32(1));
+}
+
+void
+MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src, const ValueOperand& dest)
+{
+    as_dmfc1(dest.valueReg(), src);
+}
+
+void
+MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
+                                         const ValueOperand& dest)
+{
+    MOZ_ASSERT(src != dest.valueReg());
+    boxValue(type, src, dest.valueReg());
+}
+
+void
+MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
+{
+    convertBoolToInt32(operand.valueReg(), ScratchRegister);
+    convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
+                                               FloatRegister dest)
+{
+    convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
+                                               FloatRegister dest)
+{
+
+    convertBoolToInt32(operand.valueReg(), ScratchRegister);
+    convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::int32ValueToFloat32(const ValueOperand& operand,
+                                                FloatRegister dest)
+{
+    convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadConstantFloat32(float f, FloatRegister dest)
+{
+    ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src, FloatRegister dest)
+{
+    Label notInt32, end;
+    // If it's an int, convert it to double.
+    loadPtr(Address(src.base, src.offset), ScratchRegister);
+    ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+    branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+    loadPtr(Address(src.base, src.offset), SecondScratchReg);
+    convertInt32ToDouble(SecondScratchReg, dest);
+    ma_b(&end, ShortJump);
+
+    // Not an int, just load as double.
+    bind(&notInt32);
+    ma_ld(dest, src);
+    bind(&end);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest)
+{
+    Label notInt32, end;
+
+    // If it's an int, convert it to double.
+    computeScaledAddress(addr, SecondScratchReg);
+    // Since we only have one scratch, we need to stomp over it with the tag.
+    loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
+    ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+    branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+    computeScaledAddress(addr, SecondScratchReg);
+    loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
+    convertInt32ToDouble(SecondScratchReg, dest);
+    ma_b(&end, ShortJump);
+
+    // Not an int, just load as double.
+    bind(&notInt32);
+    // First, recompute the offset that had been stored in the scratch register
+    // since the scratch register was overwritten loading in the type.
+    computeScaledAddress(addr, SecondScratchReg);
+    loadDouble(Address(SecondScratchReg, 0), dest);
+    bind(&end);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadConstantDouble(double dp, FloatRegister dest)
+{
+    ma_lid(dest, dp);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label)
+{
+    ma_dext(ScratchRegister, value.valueReg(), Imm32(0), Imm32(32));
+    ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label)
+{
+    unboxString(value, SecondScratchReg);
+    load32(Address(SecondScratchReg, JSString::offsetOfLength()), SecondScratchReg);
+    ma_b(SecondScratchReg, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestDoubleTruthy(bool b, FloatRegister value, Label* label)
+{
+    ma_lid(ScratchDoubleReg, 0.0);
+    DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+    ma_bc1d(value, ScratchDoubleReg, label, cond);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchTestBooleanTruthy(bool b, const ValueOperand& operand,
+                                                    Label* label)
+{
+    unboxBoolean(operand, SecondScratchReg);
+    ma_b(SecondScratchReg, SecondScratchReg, label, b ? NonZero : Zero);
+}
+
+Register
+MacroAssemblerMIPS64Compat::extractObject(const Address& address, Register scratch)
+{
+    loadPtr(Address(address.base, address.offset), scratch);
+    ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+    return scratch;
+}
+
+Register
+MacroAssemblerMIPS64Compat::extractTag(const Address& address, Register scratch)
+{
+    loadPtr(Address(address.base, address.offset), scratch);
+    ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
+    return scratch;
+}
+
+Register
+MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address, Register scratch)
+{
+    computeScaledAddress(address, scratch);
+    return extractTag(Address(scratch, address.offset), scratch);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
+                                              MIRType slotType)
+{
+    if (valueType == MIRType_Double) {
+        storeDouble(value.reg().typedReg().fpu(), dest);
+        return;
+    }
+
+    // For known integers and booleans, we can just store the unboxed value if
+    // the slot has the same type.
+    if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) {
+        if (value.constant()) {
+            Value val = value.value();
+            if (valueType == MIRType_Int32)
+                store32(Imm32(val.toInt32()), dest);
+            else
+                store32(Imm32(val.toBoolean() ? 1 : 0), dest);
+        } else {
+            store32(value.reg().typedReg().gpr(), dest);
+        }
+        return;
+    }
+
+    if (value.constant())
+        storeValue(value.value(), dest);
+    else
+        storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest,
+                                              MIRType slotType);
+
+template void
+MacroAssemblerMIPS64Compat::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest,
+                                              MIRType slotType);
+
+void
+MacroAssemblerMIPS64Compat::moveValue(const Value& val, Register dest)
+{
+    jsval_layout jv = JSVAL_TO_IMPL(val);
+    writeDataRelocation(val);
+    movWithPatch(ImmWord(jv.asBits), dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::moveValue(const Value& val, const ValueOperand& dest)
+{
+    moveValue(val, dest.valueReg());
+}
+
+/* There are 3 paths trough backedge jump. They are listed here in the order
+ * in which instructions are executed.
+ *  - The short jump is simple:
+ *     b offset            # Jumps directly to target.
+ *     lui at, addr1_hl    # In delay slot. Don't care about 'at' here.
+ *
+ *  - The long jump to loop header:
+ *      b label1
+ *      lui at, addr1_hl   # In delay slot. We use the value in 'at' later.
+ *    label1:
+ *      ori at, addr1_lh
+ *      drotr32 at, at, 48
+ *      ori at, addr1_ll
+ *      jr at
+ *      lui at, addr2_hl   # In delay slot. Don't care about 'at' here.
+ *
+ *  - The long jump to interrupt loop:
+ *      b label2
+ *      ...
+ *      jr at
+ *    label2:
+ *      lui at, addr2_hl   # In delay slot. Don't care about 'at' here.
+ *      ori at, addr2_lh
+ *      drotr32 at, at, 48
+ *      ori at, addr2_ll
+ *      jr at
+ *      nop                # In delay slot.
+ *
+ * The backedge is done this way to avoid patching lui+ori pair while it is
+ * being executed. Look also at jit::PatchBackedge().
+ */
+CodeOffsetJump
+MacroAssemblerMIPS64Compat::backedgeJump(RepatchLabel* label, Label* documentation)
+{
+    // Only one branch per label.
+    MOZ_ASSERT(!label->used());
+    uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+    BufferOffset bo = nextOffset();
+    label->use(bo.getOffset());
+
+    // Backedges are short jumps when bound, but can become long when patched.
+    m_buffer.ensureSpace(16 * sizeof(uint32_t));
+    if (label->bound()) {
+        int32_t offset = label->offset() - bo.getOffset();
+        MOZ_ASSERT(BOffImm16::IsInRange(offset));
+        as_b(BOffImm16(offset));
+    } else {
+        // Jump to "label1" by default to jump to the loop header.
+        as_b(BOffImm16(2 * sizeof(uint32_t)));
+    }
+    // No need for nop here. We can safely put next instruction in delay slot.
+    ma_liPatchable(ScratchRegister, ImmWord(dest));
+    MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 5 * sizeof(uint32_t));
+    as_jr(ScratchRegister);
+    // No need for nop here. We can safely put next instruction in delay slot.
+    ma_liPatchable(ScratchRegister, ImmWord(dest));
+    as_jr(ScratchRegister);
+    as_nop();
+    MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 12 * sizeof(uint32_t));
+    return CodeOffsetJump(bo.getOffset());
+}
+
+CodeOffsetJump
+MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentation)
+{
+    // Only one branch per label.
+    MOZ_ASSERT(!label->used());
+    uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+    BufferOffset bo = nextOffset();
+    label->use(bo.getOffset());
+    addLongJump(bo);
+    ma_liPatchable(ScratchRegister, ImmWord(dest));
+    as_jr(ScratchRegister);
+    as_nop();
+    return CodeOffsetJump(bo.getOffset());
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst)
+{
+    storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const BaseIndex& dest)
+{
+    computeScaledAddress(dest, SecondScratchReg);
+    storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, BaseIndex dest)
+{
+    computeScaledAddress(dest, ScratchRegister);
+
+    int32_t offset = dest.offset;
+    if (!Imm16::IsInSignedRange(offset)) {
+        ma_li(SecondScratchReg, Imm32(offset));
+        as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+        offset = 0;
+    }
+
+    storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const Address& dest)
+{
+    storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, Address dest)
+{
+    MOZ_ASSERT(dest.base != SecondScratchReg);
+
+    ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+    ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
+    ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+    storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest)
+{
+    jsval_layout jv = JSVAL_TO_IMPL(val);
+    if (val.isMarkable()) {
+        writeDataRelocation(val);
+        movWithPatch(ImmWord(jv.asBits), SecondScratchReg);
+    } else {
+        ma_li(SecondScratchReg, ImmWord(jv.asBits));
+    }
+    storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest)
+{
+    computeScaledAddress(dest, ScratchRegister);
+
+    int32_t offset = dest.offset;
+    if (!Imm16::IsInSignedRange(offset)) {
+        ma_li(SecondScratchReg, Imm32(offset));
+        as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+        offset = 0;
+    }
+    storeValue(val, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr, ValueOperand val)
+{
+    computeScaledAddress(addr, SecondScratchReg);
+    loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void
+MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val)
+{
+    loadPtr(Address(src.base, src.offset), val.valueReg());
+}
+
+void
+MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+    MOZ_ASSERT(dest.valueReg() != ScratchRegister);
+    if (payload != dest.valueReg())
+      ma_move(dest.valueReg(), payload);
+    ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+    ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
+}
+
+void
+MacroAssemblerMIPS64Compat::pushValue(ValueOperand val)
+{
+    // Allocate stack slots for Value. One for each.
+    subPtr(Imm32(sizeof(Value)), StackPointer);
+    // Store Value
+    storeValue(val, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::pushValue(const Address& addr)
+{
+    // Load value before allocate stack, addr.base may be is sp.
+    loadPtr(Address(addr.base, addr.offset), ScratchRegister);
+    ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+    storePtr(ScratchRegister, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPS64Compat::popValue(ValueOperand val)
+{
+    as_ld(val.valueReg(), StackPointer, 0);
+    as_daddiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void
+MacroAssemblerMIPS64Compat::breakpoint()
+{
+    as_break(0);
+}
+
+void
+MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source, FloatRegister dest,
+                                         Label* failure)
+{
+    Label isDouble, done;
+    Register tag = splitTagForTest(source);
+    branchTestDouble(Assembler::Equal, tag, &isDouble);
+    branchTestInt32(Assembler::NotEqual, tag, failure);
+
+    unboxInt32(source, ScratchRegister);
+    convertInt32ToDouble(ScratchRegister, dest);
+    jump(&done);
+
+    bind(&isDouble);
+    unboxDouble(source, dest);
+
+    bind(&done);
+}
+
+void
+MacroAssemblerMIPS64Compat::cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+                                      Register dest)
+{
+    loadPtr(lhs, ScratchRegister);
+    movePtr(rhs, SecondScratchReg);
+    cmpPtrSet(cond, ScratchRegister, SecondScratchReg, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+                                      Register dest)
+{
+    loadPtr(rhs, ScratchRegister);
+    cmpPtrSet(cond, lhs, ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+                                     Register dest)
+{
+    load32(rhs, ScratchRegister);
+    cmp32Set(cond, lhs, ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS64Compat::checkStackAlignment()
+{
+#ifdef DEBUG
+    Label aligned;
+    as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+    ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+    as_break(BREAK_STACK_UNALIGNED);
+    bind(&aligned);
+#endif
+}
+
+void
+MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
+{
+    if (framePushed() % ABIStackAlignment != 0) {
+        aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment);
+        reserveStack(aic.alignmentPadding);
+    } else {
+        aic.alignmentPadding = 0;
+    }
+    MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
+    checkStackAlignment();
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
+{
+    if (aic.alignmentPadding != 0)
+        freeStack(aic.alignmentPadding);
+}
+
+void
+MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
+{
+    // Reserve space for exception information.
+    int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
+    subPtr(Imm32(size), StackPointer);
+    ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+    // Call the handler.
+    asMasm().setupUnalignedABICall(a1);
+    asMasm().passABIArg(a0);
+    asMasm().callWithABI(handler);
+
+    Label entryFrame;
+    Label catch_;
+    Label finally;
+    Label return_;
+    Label bailout;
+
+    // Already clobbered a0, so use it...
+    load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+    breakpoint(); // Invalid kind.
+
+    // No exception handler. Load the error value, load the new stack pointer
+    // and return from the entry frame.
+    bind(&entryFrame);
+    moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+    loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+
+    // We're going to be returning by the ion calling convention
+    ma_pop(ra);
+    as_jr(ra);
+    as_nop();
+
+    // If we found a catch handler, this must be a baseline frame. Restore
+    // state and jump to the catch block.
+    bind(&catch_);
+    loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
+    loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+    loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+    jump(a0);
+
+    // If we found a finally block, this must be a baseline frame. Push
+    // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+    // exception.
+    bind(&finally);
+    ValueOperand exception = ValueOperand(a1);
+    loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+    loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
+    loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+    loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
+
+    pushValue(BooleanValue(true));
+    pushValue(exception);
+    jump(a0);
+
+    // Only used in debug mode. Return BaselineFrame->returnValue() to the
+    // caller.
+    bind(&return_);
+    loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+    loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+    loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+              JSReturnOperand);
+    ma_move(StackPointer, BaselineFrameReg);
+    pop(BaselineFrameReg);
+
+    // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+    // frame before returning.
+    {
+        Label skipProfilingInstrumentation;
+        // Test if profiler enabled.
+        AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+        branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+        profilerExitFrame();
+        bind(&skipProfilingInstrumentation);
+    }
+
+    ret();
+
+    // If we are bailing out to baseline to handle an exception, jump to
+    // the bailout tail stub.
+    bind(&bailout);
+    loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
+    ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
+    loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
+    jump(a1);
+}
+
+CodeOffsetLabel
+MacroAssemblerMIPS64Compat::toggledJump(Label* label)
+{
+    CodeOffsetLabel ret(nextOffset().getOffset());
+    ma_b(label);
+    return ret;
+}
+
+CodeOffsetLabel
+MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled)
+{
+    BufferOffset bo = nextOffset();
+    CodeOffsetLabel offset(bo.getOffset());
+    addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+    ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+    if (enabled) {
+        as_jalr(ScratchRegister);
+        as_nop();
+    } else {
+        as_nop();
+        as_nop();
+    }
+    MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr));
+    return offset;
+}
+
+void
+MacroAssemblerMIPS64Compat::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp,
+                                                    Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    MOZ_ASSERT(ptr != temp);
+    MOZ_ASSERT(ptr != SecondScratchReg);
+
+    const Nursery& nursery = GetJitContext()->runtime->gcNursery();
+    movePtr(ImmWord(-ptrdiff_t(nursery.start())), SecondScratchReg);
+    addPtr(ptr, SecondScratchReg);
+    branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
+              SecondScratchReg, Imm32(nursery.nurserySize()), label);
+}
+
+void
+MacroAssemblerMIPS64Compat::branchValueIsNurseryObject(Condition cond, ValueOperand value,
+                                                       Register temp, Label* label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+    // 'Value' representing the start of the nursery tagged as a JSObject
+    const Nursery& nursery = GetJitContext()->runtime->gcNursery();
+    Value start = ObjectValue(*reinterpret_cast<JSObject *>(nursery.start()));
+
+    movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), SecondScratchReg);
+    addPtr(value.valueReg(), SecondScratchReg);
+    branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
+              SecondScratchReg, Imm32(nursery.nurserySize()), label);
+}
+
+void
+MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr, Register scratch)
+{
+    AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+    loadPtr(activation, scratch);
+    storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+    storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerMIPS64Compat::profilerExitFrame()
+{
+    branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+    int32_t diff = set.gprs().size() * sizeof(intptr_t) +
+        set.fpus().getPushSizeInBytes();
+    const int32_t reserved = diff;
+
+    reserveStack(reserved);
+    for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
+        diff -= sizeof(intptr_t);
+        storePtr(*iter, Address(StackPointer, diff));
+    }
+    for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
+        diff -= sizeof(double);
+        storeDouble(*iter, Address(StackPointer, diff));
+    }
+    MOZ_ASSERT(diff == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+    int32_t diff = set.gprs().size() * sizeof(intptr_t) +
+        set.fpus().getPushSizeInBytes();
+    const int32_t reserved = diff;
+
+    for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
+        diff -= sizeof(intptr_t);
+        if (!ignore.has(*iter))
+          loadPtr(Address(StackPointer, diff), *iter);
+    }
+    for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); iter++) {
+        diff -= sizeof(double);
+        if (!ignore.has(*iter))
+          loadDouble(Address(StackPointer, diff), *iter);
+    }
+    MOZ_ASSERT(diff == 0);
+    freeStack(reserved);
+}
+
+void
+MacroAssembler::reserveStack(uint32_t amount)
+{
+    if (amount)
+        subPtr(Imm32(amount), StackPointer);
+    adjustFrame(amount);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+    setupABICall();
+    dynamicAlignment_ = true;
+
+    ma_move(scratch, StackPointer);
+
+    // Force sp to be aligned
+    subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+    ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+    storePtr(scratch, Address(StackPointer, 0));
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromAsmJS)
+{
+    MOZ_ASSERT(inCall_);
+    uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+    // Reserve place for $ra.
+    stackForCall += sizeof(intptr_t);
+
+    if (dynamicAlignment_) {
+        stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+    } else {
+        uint32_t alignmentAtPrologue = callFromAsmJS ? sizeof(AsmJSFrame) : 0;
+        stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+                                             ABIStackAlignment);
+    }
+
+    *stackAdjust = stackForCall;
+    reserveStack(stackForCall);
+
+    // Save $ra because call is going to clobber it. Restore it in
+    // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+    // Maybe we can do this differently.
+    storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+    // Position all arguments.
+    {
+        enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+        if (!enoughMemory_)
+            return;
+
+        MoveEmitter emitter(*this);
+        emitter.emit(moveResolver_);
+        emitter.finish();
+    }
+
+    assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+    // Restore ra value (as stored in callWithABIPre()).
+    loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+    if (dynamicAlignment_) {
+        // Restore sp value from stack (as stored in setupUnalignedABICall()).
+        loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+        // Use adjustFrame instead of freeStack because we already restored sp.
+        adjustFrame(-stackAdjust);
+    } else {
+        freeStack(stackAdjust);
+    }
+
+#ifdef DEBUG
+    MOZ_ASSERT(inCall_);
+    inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+    // Load the callee in t9, no instruction between the lw and call
+    // should clobber it. Note that we can't use fun.base because it may
+    // be one of the IntArg registers clobbered before the call.
+    ma_move(t9, fun);
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(t9);
+    callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+    // Load the callee in t9, as above.
+    loadPtr(Address(fun.base, fun.offset), t9);
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(t9);
+    callWithABIPost(stackAdjust, result);
+}
+
+//}}} check_macroassembler_style
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -0,0 +1,1283 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_h
+#define jit_mips64_MacroAssembler_mips64_h
+
+#include "jsopcode.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/IonCaches.h"
+#include "jit/JitFrames.h"
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+enum LiFlags
+{
+    Li64 = 0,
+    Li48 = 1,
+};
+
+struct ImmShiftedTag : public ImmWord
+{
+    explicit ImmShiftedTag(JSValueShiftedTag shtag)
+      : ImmWord((uintptr_t)shtag)
+    { }
+
+    explicit ImmShiftedTag(JSValueType type)
+      : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type))))
+    { }
+};
+
+struct ImmTag : public Imm32
+{
+    ImmTag(JSValueTag mask)
+      : Imm32(int32_t(mask))
+    { }
+};
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value), "The defaultShift is wrong");
+
+class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
+{
+  public:
+    using MacroAssemblerMIPSShared::ma_b;
+    using MacroAssemblerMIPSShared::ma_li;
+    using MacroAssemblerMIPSShared::ma_ss;
+    using MacroAssemblerMIPSShared::ma_sd;
+    using MacroAssemblerMIPSShared::ma_load;
+    using MacroAssemblerMIPSShared::ma_store;
+    using MacroAssemblerMIPSShared::ma_cmp_set;
+    using MacroAssemblerMIPSShared::ma_subTestOverflow;
+
+    void ma_li(Register dest, AbsoluteLabel* label);
+    void ma_li(Register dest, ImmWord imm);
+    void ma_liPatchable(Register dest, ImmPtr imm);
+    void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+
+    // Shift operations
+    void ma_dsll(Register rd, Register rt, Imm32 shift);
+    void ma_dsrl(Register rd, Register rt, Imm32 shift);
+    void ma_dsra(Register rd, Register rt, Imm32 shift);
+    void ma_dror(Register rd, Register rt, Imm32 shift);
+    void ma_drol(Register rd, Register rt, Imm32 shift);
+
+    void ma_dsll(Register rd, Register rt, Register shift);
+    void ma_dsrl(Register rd, Register rt, Register shift);
+    void ma_dsra(Register rd, Register rt, Register shift);
+    void ma_dror(Register rd, Register rt, Register shift);
+    void ma_drol(Register rd, Register rt, Register shift);
+
+    void ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size);
+    void ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size);
+
+    // load
+    void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+                 LoadStoreExtension extension = SignExtend);
+
+    // store
+    void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+                  LoadStoreExtension extension = SignExtend);
+
+    // arithmetic based ops
+    // add
+    void ma_daddu(Register rd, Register rs, Imm32 imm);
+    void ma_daddu(Register rd, Register rs);
+    void ma_daddu(Register rd, Imm32 imm);
+    void ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
+    void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+    // subtract
+    void ma_dsubu(Register rd, Register rs, Imm32 imm);
+    void ma_dsubu(Register rd, Imm32 imm);
+    void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
+
+    // multiplies.  For now, there are only few that we care about.
+    void ma_dmult(Register rs, Imm32 imm);
+
+    // stack
+    void ma_pop(Register r);
+    void ma_push(Register r);
+
+    void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+    // branches when done from within mips-specific code
+    void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+        MOZ_ASSERT(rhs != ScratchRegister);
+        ma_load(ScratchRegister, addr, SizeDouble);
+        ma_b(ScratchRegister, rhs, l, c, jumpKind);
+    }
+
+    void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+    // fp instructions
+    void ma_lid(FloatRegister dest, double value);
+
+    void ma_mv(FloatRegister src, ValueOperand dest);
+    void ma_mv(ValueOperand src, FloatRegister dest);
+
+    void ma_ls(FloatRegister fd, Address address);
+    void ma_ld(FloatRegister fd, Address address);
+    void ma_sd(FloatRegister fd, Address address);
+    void ma_ss(FloatRegister fd, Address address);
+
+    void ma_pop(FloatRegister fs);
+    void ma_push(FloatRegister fs);
+
+    void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+    void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+
+    // These functions abstract the access to high part of the double precision
+    // float register. They are intended to work on both 32 bit and 64 bit
+    // floating point coprocessor.
+    void moveToDoubleHi(Register src, FloatRegister dest) {
+        as_mthc1(src, dest);
+    }
+    void moveFromDoubleHi(FloatRegister src, Register dest) {
+        as_mfhc1(dest, src);
+    }
+
+    void moveToDouble(Register src, FloatRegister dest) {
+        as_dmtc1(src, dest);
+    }
+    void moveFromDouble(FloatRegister src, Register dest) {
+        as_dmfc1(dest, src);
+    }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
+{
+  public:
+    using MacroAssemblerMIPS64::call;
+
+    MacroAssemblerMIPS64Compat()
+    { }
+
+    void convertBoolToInt32(Register source, Register dest);
+    void convertInt32ToDouble(Register src, FloatRegister dest);
+    void convertInt32ToDouble(const Address& src, FloatRegister dest);
+    void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+    void convertUInt32ToDouble(Register src, FloatRegister dest);
+    void convertUInt32ToFloat32(Register src, FloatRegister dest);
+    void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+    void branchTruncateDouble(FloatRegister src, Register dest, Label* fail);
+    void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+                              bool negativeZeroCheck = true);
+    void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+                               bool negativeZeroCheck = true);
+
+    void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+    void branchTruncateFloat32(FloatRegister src, Register dest, Label* fail);
+    void convertInt32ToFloat32(Register src, FloatRegister dest);
+    void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+    void addDouble(FloatRegister src, FloatRegister dest);
+    void subDouble(FloatRegister src, FloatRegister dest);
+    void mulDouble(FloatRegister src, FloatRegister dest);
+    void divDouble(FloatRegister src, FloatRegister dest);
+
+    void negateDouble(FloatRegister reg);
+    void inc64(AbsoluteAddress dest);
+
+    void movq(Register rs, Register rd);
+
+    void computeScaledAddress(const BaseIndex& address, Register dest);
+
+    void computeEffectiveAddress(const Address& address, Register dest) {
+        ma_daddu(dest, address.base, Imm32(address.offset));
+    }
+
+    void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+        computeScaledAddress(address, dest);
+        if (address.offset) {
+            addPtr(Imm32(address.offset), dest);
+        }
+    }
+
+    void j(Label* dest) {
+        ma_b(dest);
+    }
+
+    void mov(Register src, Register dest) {
+        as_ori(dest, src, 0);
+    }
+    void mov(ImmWord imm, Register dest) {
+        ma_li(dest, imm);
+    }
+    void mov(ImmPtr imm, Register dest) {
+        mov(ImmWord(uintptr_t(imm.value)), dest);
+    }
+    void mov(Register src, Address dest) {
+        MOZ_CRASH("NYI-IC");
+    }
+    void mov(Address src, Register dest) {
+        MOZ_CRASH("NYI-IC");
+    }
+
+    void writeDataRelocation(const Value& val) {
+        if (val.isMarkable()) {
+            gc::Cell* cell = reinterpret_cast<gc::Cell *>(val.toGCThing());
+            if (cell && gc::IsInsideNursery(cell))
+                embedsNurseryPointers_ = true;
+            dataRelocations_.writeUnsigned(currentOffset());
+        }
+    }
+
+    void branch(JitCode* c) {
+        BufferOffset bo = m_buffer.nextOffset();
+        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+        ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+        as_jr(ScratchRegister);
+        as_nop();
+    }
+    void branch(const Register reg) {
+        as_jr(reg);
+        as_nop();
+    }
+    void nop() {
+        as_nop();
+    }
+    void ret() {
+        ma_pop(ra);
+        as_jr(ra);
+        as_nop();
+    }
+    void retn(Imm32 n) {
+        // pc <- [sp]; sp += n
+        loadPtr(Address(StackPointer, 0), ra);
+        addPtr(n, StackPointer);
+        as_jr(ra);
+        as_nop();
+    }
+    void push(Imm32 imm) {
+        ma_li(ScratchRegister, imm);
+        ma_push(ScratchRegister);
+    }
+    void push(ImmWord imm) {
+        ma_li(ScratchRegister, imm);
+        ma_push(ScratchRegister);
+    }
+    void push(ImmGCPtr imm) {
+        ma_li(ScratchRegister, imm);
+        ma_push(ScratchRegister);
+    }
+    void push(const Address& address) {
+        loadPtr(address, ScratchRegister);
+        ma_push(ScratchRegister);
+    }
+    void push(Register reg) {
+        ma_push(reg);
+    }
+    void push(FloatRegister reg) {
+        ma_push(reg);
+    }
+    void pop(Register reg) {
+        ma_pop(reg);
+    }
+    void pop(FloatRegister reg) {
+        ma_pop(reg);
+    }
+
+    // Emit a branch that can be toggled to a non-operation. On MIPS64 we use
+    // "andi" instruction to toggle the branch.
+    // See ToggleToJmp(), ToggleToCmp().
+    CodeOffsetLabel toggledJump(Label* label);
+
+    // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+    // this instruction.
+    CodeOffsetLabel toggledCall(JitCode* target, bool enabled);
+
+    static size_t ToggledCallSize(uint8_t* code) {
+        // Six instructions used in: MacroAssemblerMIPS64Compat::toggledCall
+        return 6 * sizeof(uint32_t);
+    }
+
+    CodeOffsetLabel pushWithPatch(ImmWord imm) {
+        CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
+        ma_push(ScratchRegister);
+        return label;
+    }
+
+    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
+        CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
+        ma_liPatchable(dest, imm, Li64);
+        return label;
+    }
+    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+        CodeOffsetLabel label = CodeOffsetLabel(currentOffset());
+        ma_liPatchable(dest, imm);
+        return label;
+    }
+
+    void jump(Label* label) {
+        ma_b(label);
+    }
+    void jump(Register reg) {
+        as_jr(reg);
+        as_nop();
+    }
+    void jump(const Address& address) {
+        loadPtr(address, ScratchRegister);
+        as_jr(ScratchRegister);
+        as_nop();
+    }
+
+    void jump(JitCode* code) {
+        branch(code);
+    }
+
+    void neg32(Register reg) {
+        ma_negu(reg, reg);
+    }
+
+    void splitTag(Register src, Register dest) {
+        ma_dsrl(dest, src, Imm32(JSVAL_TAG_SHIFT));
+    }
+
+    void splitTag(const ValueOperand& operand, Register dest) {
+        splitTag(operand.valueReg(), dest);
+    }
+
+    // Returns the register containing the type tag.
+    Register splitTagForTest(const ValueOperand& value) {
+        splitTag(value, SecondScratchReg);
+        return SecondScratchReg;
+    }
+
+    void branchTestGCThing(Condition cond, const Address& address, Label* label);
+    void branchTestGCThing(Condition cond, const BaseIndex& src, Label* label);
+
+    void branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestPrimitive(Condition cond, Register tag, Label* label);
+
+    void branchTestValue(Condition cond, const ValueOperand& value, const Value& v, Label* label);
+    void branchTestValue(Condition cond, const Address& valaddr, const ValueOperand& value,
+                         Label* label);
+
+    // unboxing code
+    void unboxNonDouble(const ValueOperand& operand, Register dest);
+    void unboxNonDouble(const Address& src, Register dest);
+    void unboxNonDouble(const BaseIndex& src, Register dest);
+    void unboxInt32(const ValueOperand& operand, Register dest);
+    void unboxInt32(const Operand& operand, Register dest);
+    void unboxInt32(const Address& src, Register dest);
+    void unboxInt32(const BaseIndex& src, Register dest);
+    void unboxBoolean(const ValueOperand& operand, Register dest);
+    void unboxBoolean(const Operand& operand, Register dest);
+    void unboxBoolean(const Address& src, Register dest);
+    void unboxBoolean(const BaseIndex& src, Register dest);
+    void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+    void unboxDouble(const Address& src, FloatRegister dest);
+    void unboxString(const ValueOperand& operand, Register dest);
+    void unboxString(const Operand& operand, Register dest);
+    void unboxString(const Address& src, Register dest);
+    void unboxSymbol(const ValueOperand& src, Register dest);
+    void unboxSymbol(const Operand& src, Register dest);
+    void unboxSymbol(const Address& src, Register dest);
+    void unboxObject(const ValueOperand& src, Register dest);
+    void unboxObject(const Operand& src, Register dest);
+    void unboxObject(const Address& src, Register dest);
+    void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+    void unboxValue(const ValueOperand& src, AnyRegister dest);
+    void unboxPrivate(const ValueOperand& src, Register dest);
+
+    void notBoolean(const ValueOperand& val) {
+        as_xori(val.valueReg(), val.valueReg(), 1);
+    }
+
+    // boxing code
+    void boxDouble(FloatRegister src, const ValueOperand& dest);
+    void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+    // Extended unboxing API. If the payload is already in a register, returns
+    // that register. Otherwise, provides a move to the given scratch register,
+    // and returns that.
+    Register extractObject(const Address& address, Register scratch);
+    Register extractObject(const ValueOperand& value, Register scratch) {
+        unboxObject(value, scratch);
+        return scratch;
+    }
+    Register extractInt32(const ValueOperand& value, Register scratch) {
+        unboxInt32(value, scratch);
+        return scratch;
+    }
+    Register extractBoolean(const ValueOperand& value, Register scratch) {
+        unboxBoolean(value, scratch);
+        return scratch;
+    }
+    Register extractTag(const Address& address, Register scratch);
+    Register extractTag(const BaseIndex& address, Register scratch);
+    Register extractTag(const ValueOperand& value, Register scratch) {
+        MOZ_ASSERT(scratch != ScratchRegister);
+        splitTag(value, scratch);
+        return scratch;
+    }
+
+    void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+    void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+    void loadInt32OrDouble(const Address& src, FloatRegister dest);
+    void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+    void loadConstantDouble(double dp, FloatRegister dest);
+
+    void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+    void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+    void loadConstantFloat32(float f, FloatRegister dest);
+
+    void branchTestInt32(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestInt32(Condition cond, Register tag, Label* label);
+    void branchTestInt32(Condition cond, const Address& address, Label* label);
+    void branchTestInt32(Condition cond, const BaseIndex& src, Label* label);
+
+    void branchTestBoolean(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestBoolean(Condition cond, Register tag, Label* label);
+    void branchTestBoolean(Condition cond, const BaseIndex& src, Label* label);
+
+    void branch32(Condition cond, Register lhs, Register rhs, Label* label) {
+        ma_b(lhs, rhs, label, cond);
+    }
+    void branch32(Condition cond, Register lhs, Imm32 imm, Label* label) {
+        ma_b(lhs, imm, label, cond);
+    }
+    void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) {
+        if (lhs.getTag() == Operand::REG) {
+            ma_b(lhs.toReg(), rhs, label, cond);
+        } else {
+            branch32(cond, lhs.toAddress(), rhs, label);
+        }
+    }
+    void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) {
+        if (lhs.getTag() == Operand::REG) {
+            ma_b(lhs.toReg(), rhs, label, cond);
+        } else {
+            branch32(cond, lhs.toAddress(), rhs, label);
+        }
+    }
+    void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) {
+        load32(lhs, SecondScratchReg);
+        ma_b(SecondScratchReg, rhs, label, cond);
+    }
+    void branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label) {
+        load32(lhs, SecondScratchReg);
+        ma_b(SecondScratchReg, rhs, label, cond);
+    }
+    void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label) {
+        load32(lhs, SecondScratchReg);
+        ma_b(SecondScratchReg, rhs, label, cond);
+    }
+    void branchPtr(Condition cond, const Address& lhs, Register rhs, Label* label) {
+        loadPtr(lhs, SecondScratchReg);
+        ma_b(SecondScratchReg, rhs, label, cond);
+    }
+
+    void branchPrivatePtr(Condition cond, const Address& lhs, ImmPtr ptr, Label* label) {
+        branchPtr(cond, lhs, ptr, label);
+    }
+
+    void branchPrivatePtr(Condition cond, const Address& lhs, Register ptr, Label* label) {
+        branchPtr(cond, lhs, ptr, label);
+    }
+
+    void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label* label) {
+        branchPtr(cond, lhs, ptr, label);
+    }
+
+    void branchTestDouble(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestDouble(Condition cond, Register tag, Label* label);
+    void branchTestDouble(Condition cond, const Address& address, Label* label);
+    void branchTestDouble(Condition cond, const BaseIndex& src, Label* label);
+
+    void branchTestNull(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestNull(Condition cond, Register tag, Label* label);
+    void branchTestNull(Condition cond, const BaseIndex& src, Label* label);
+    void branchTestNull(Condition cond, const Address& address, Label* label);
+    void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+    void branchTestObject(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestObject(Condition cond, Register tag, Label* label);
+    void branchTestObject(Condition cond, const BaseIndex& src, Label* label);
+    void branchTestObject(Condition cond, const Address& src, Label* label);
+    void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+    void branchTestString(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestString(Condition cond, Register tag, Label* label);
+    void branchTestString(Condition cond, const BaseIndex& src, Label* label);
+
+    void branchTestSymbol(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestSymbol(Condition cond, const Register& tag, Label* label);
+    void branchTestSymbol(Condition cond, const BaseIndex& src, Label* label);
+
+    void branchTestUndefined(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestUndefined(Condition cond, Register tag, Label* label);
+    void branchTestUndefined(Condition cond, const BaseIndex& src, Label* label);
+    void branchTestUndefined(Condition cond, const Address& address, Label* label);
+    void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest);
+
+    void branchTestNumber(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestNumber(Condition cond, Register tag, Label* label);
+
+    void branchTestMagic(Condition cond, const ValueOperand& value, Label* label);
+    void branchTestMagic(Condition cond, Register tag, Label* label);
+    void branchTestMagic(Condition cond, const Address& address, Label* label);
+    void branchTestMagic(Condition cond, const BaseIndex& src, Label* label);
+
+    void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why,
+                              Label* label) {
+        MOZ_ASSERT(cond == Equal || cond == NotEqual);
+        branchTestValue(cond, val, MagicValue(why), label);
+    }
+
+    void branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label);
+
+    void branchTestStringTruthy(bool b, const ValueOperand& value, Label* label);
+
+    void branchTestDoubleTruthy(bool b, FloatRegister value, Label* label);
+
+    void branchTestBooleanTruthy(bool b, const ValueOperand& operand, Label* label);
+
+    void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) {
+        MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+        if (lhs == rhs) {
+            ma_b(lhs, rhs, label, cond);
+        } else {
+            as_and(ScratchRegister, lhs, rhs);
+            ma_b(ScratchRegister, ScratchRegister, label, cond);
+        }
+    }
+    void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) {
+        ma_li(ScratchRegister, imm);
+        branchTest32(cond, lhs, ScratchRegister, label);
+    }
+    void branchTest32(Condition cond, const Address& address, Imm32 imm, Label* label) {
+        load32(address, SecondScratchReg);
+        branchTest32(cond, SecondScratchReg, imm, label);
+    }
+    void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label* label) {
+        load32(address, ScratchRegister);
+        branchTest32(cond, ScratchRegister, imm, label);
+    }
+    void branchTestPtr(Condition cond, Register lhs, Register rhs, Label* label) {
+        MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+        if (lhs == rhs) {
+            ma_b(lhs, rhs, label, cond);
+        } else {
+            as_and(ScratchRegister, lhs, rhs);
+            ma_b(ScratchRegister, ScratchRegister, label, cond);
+        }
+    }
+    void branchTestPtr(Condition cond, Register lhs, const Imm32 rhs, Label* label) {
+        ma_li(ScratchRegister, rhs);
+        branchTestPtr(cond, lhs, ScratchRegister, label);
+    }
+    void branchTestPtr(Condition cond, const Address& lhs, Imm32 imm, Label* label) {
+        loadPtr(lhs, SecondScratchReg);
+        branchTestPtr(cond, SecondScratchReg, imm, label);
+    }
+    void branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+                      Label* label) {
+        branchTestPtr(cond, lhs.reg, rhs.reg, label);
+    }
+    void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) {
+        ma_b(lhs, rhs, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label* label) {
+        ma_b(lhs, ptr, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, ImmWord imm, Label* label) {
+        ma_b(lhs, imm, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label* label) {
+        ma_b(lhs, imm, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label* label) {
+        movePtr(imm, SecondScratchReg);
+        ma_b(lhs, SecondScratchReg, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
+        ma_b(lhs, imm, label, cond);
+    }
+    void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) {
+        subPtr(imm, lhs);
+        branchPtr(cond, lhs, Imm32(0), label);
+    }
+
+    // higher level tag testing code
+    Address ToPayload(Address value) {
+        return value;
+    }
+
+    void moveValue(const Value& val, Register dest);
+
+    CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
+    CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
+
+    template <typename T>
+    CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) {
+        movePtr(ptr, ScratchRegister);
+        Label skipJump;
+        ma_b(reg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+        CodeOffsetJump off = jumpWithPatch(label);
+        bind(&skipJump);
+        return off;
+    }
+
+    template <typename T>
+    CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel* label) {
+        loadPtr(addr, SecondScratchReg);
+        movePtr(ptr, ScratchRegister);
+        Label skipJump;
+        ma_b(SecondScratchReg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+        CodeOffsetJump off = jumpWithPatch(label);
+        bind(&skipJump);
+        return off;
+    }
+    void branchPtr(Condition cond, Address addr, ImmGCPtr ptr, Label* label) {
+        loadPtr(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, ptr, label, cond);
+    }
+
+    void branchPtr(Condition cond, Address addr, ImmWord ptr, Label* label) {
+        loadPtr(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, ptr, label, cond);
+    }
+    void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label* label) {
+        loadPtr(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, ptr, label, cond);
+    }
+    void branchPtr(Condition cond, AbsoluteAddress addr, Register ptr, Label* label) {
+        loadPtr(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, ptr, label, cond);
+    }
+    void branchPtr(Condition cond, AbsoluteAddress addr, ImmWord ptr, Label* label) {
+        loadPtr(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, ptr, label, cond);
+    }
+    void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label* label) {
+        loadPtr(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, ptr, label, cond);
+    }
+    void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
+        load32(lhs, SecondScratchReg);
+        ma_b(SecondScratchReg, rhs, label, cond);
+    }
+    void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
+        load32(lhs, SecondScratchReg);
+        ma_b(SecondScratchReg, rhs, label, cond);
+    }
+    void branch32(Condition cond, AsmJSAbsoluteAddress addr, Imm32 imm, Label* label) {
+        load32(addr, SecondScratchReg);
+        ma_b(SecondScratchReg, imm, label, cond);
+    }
+
+    template <typename T>
+    void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+        if (dest.isFloat())
+            loadInt32OrDouble(address, dest.fpu());
+        else if (type == MIRType_Int32)
+            unboxInt32(address, dest.gpr());
+        else if (type == MIRType_Boolean)
+            unboxBoolean(address, dest.gpr());
+        else
+            unboxNonDouble(address, dest.gpr());
+    }
+
+    template <typename T>
+    void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
+                           MIRType slotType);
+
+    template <typename T>
+    void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+        switch (nbytes) {
+          case 8:
+            unboxNonDouble(value, ScratchRegister);
+            storePtr(ScratchRegister, address);
+            return;
+          case 4:
+            store32(value.valueReg(), address);
+            return;
+          case 1:
+            store8(value.valueReg(), address);
+            return;
+          default: MOZ_CRASH("Bad payload width");
+        }
+    }
+
+    void moveValue(const Value& val, const ValueOperand& dest);
+
+    void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+        if (src.valueReg() != dest.valueReg())
+          ma_move(dest.valueReg(), src.valueReg());
+    }
+    void boxValue(JSValueType type, Register src, Register dest) {
+        MOZ_ASSERT(src != dest);
+
+        JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
+        ma_li(dest, Imm32(tag));
+        ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
+        ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+    }
+
+    void storeValue(ValueOperand val, Operand dst);
+    void storeValue(ValueOperand val, const BaseIndex& dest);
+    void storeValue(JSValueType type, Register reg, BaseIndex dest);
+    void storeValue(ValueOperand val, const Address& dest);
+    void storeValue(JSValueType type, Register reg, Address dest);
+    void storeValue(const Value& val, Address dest);
+    void storeValue(const Value& val, BaseIndex dest);
+
+    void loadValue(Address src, ValueOperand val);
+    void loadValue(Operand dest, ValueOperand val) {
+        loadValue(dest.toAddress(), val);
+    }
+    void loadValue(const BaseIndex& addr, ValueOperand val);
+    void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+    void pushValue(ValueOperand val);
+    void popValue(ValueOperand val);
+    void pushValue(const Value& val) {
+        jsval_layout jv = JSVAL_TO_IMPL(val);
+        if (val.isMarkable()) {
+            writeDataRelocation(val);
+            movWithPatch(ImmWord(jv.asBits), ScratchRegister);
+            push(ScratchRegister);
+        } else {
+            push(ImmWord(jv.asBits));
+        }
+    }
+    void pushValue(JSValueType type, Register reg) {
+        boxValue(type, reg, ScratchRegister);
+        push(ScratchRegister);
+    }
+    void pushValue(const Address& addr);
+
+    void handleFailureWithHandlerTail(void* handler);
+
+    /////////////////////////////////////////////////////////////////
+    // Common interface.
+    /////////////////////////////////////////////////////////////////
+  public:
+    // The following functions are exposed for use in platform-shared code.
+
+    template<typename T>
+    void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void compareExchange32(const T& mem, Register oldval, Register newval, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+
+    template<typename T>
+    void atomicExchange8SignExtend(const T& mem, Register value, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void atomicExchange8ZeroExtend(const T& mem, Register value, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void atomicExchange16SignExtend(const T& mem, Register value, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void atomicExchange16ZeroExtend(const T& mem, Register value, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T>
+    void atomicExchange32(const T& mem, Register value, Register output)
+    {
+        MOZ_CRASH("NYI");
+    }
+
+    template<typename T, typename S>
+    void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd32(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicAdd8(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicAdd16(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicAdd32(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+
+    template<typename T, typename S>
+    void atomicFetchSub8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchSub16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchSub32(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S> void atomicSub8(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S> void atomicSub16(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S> void atomicSub32(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+
+    template<typename T, typename S>
+    void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd32(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicAnd8(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicAnd16(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicAnd32(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+
+    template<typename T, typename S>
+    void atomicFetchOr8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchOr16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchOr32(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicOr8(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicOr16(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicOr32(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+
+    template<typename T, typename S>
+    void atomicFetchXor8SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchXor16SignExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template<typename T, typename S>
+    void atomicFetchXor32(const S& value, const T& mem, Register temp, Register output) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicXor8(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicXor16(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+    template <typename T, typename S>
+    void atomicXor32(const T& value, const S& mem) {
+        MOZ_CRASH("NYI");
+    }
+
+    void add32(Register src, Register dest);
+    void add32(Imm32 imm, Register dest);
+    void add32(Imm32 imm, const Address& dest);
+    void add64(Imm32 imm, Register64 dest) {
+        ma_daddu(dest.reg, imm);
+    }
+    void sub32(Imm32 imm, Register dest);
+    void sub32(Register src, Register dest);
+
+    void incrementInt32Value(const Address& addr) {
+        add32(Imm32(1), addr);
+    }
+
+    template <typename T>
+    void branchAdd32(Condition cond, T src, Register dest, Label* overflow) {
+        switch (cond) {
+          case Overflow:
+            ma_addTestOverflow(dest, dest, src, overflow);
+            break;
+          default:
+            MOZ_CRASH("NYI");
+        }
+    }
+    template <typename T>
+    void branchSub32(Condition cond, T src, Register dest, Label* overflow) {
+        switch (cond) {
+          case Overflow:
+            ma_subTestOverflow(dest, dest, src, overflow);
+            break;
+          case NonZero:
+          case Zero:
+            sub32(src, dest);
+            ma_b(dest, dest, overflow, cond);
+            break;
+          default:
+            MOZ_CRASH("NYI");
+        }
+    }
+
+    void addPtr(Register src, Register dest);
+    void subPtr(Register src, Register dest);
+    void addPtr(const Address& src, Register dest);
+
+    void move32(Imm32 imm, Register dest);
+    void move32(Register src, Register dest);
+    void move64(Register64 src, Register64 dest) {
+        movePtr(src.reg, dest.reg);
+    }
+
+    void movePtr(Register src, Register dest);
+    void movePtr(ImmWord imm, Register dest);
+    void movePtr(ImmPtr imm, Register dest);
+    void movePtr(AsmJSImmPtr imm, Register dest);
+    void movePtr(ImmGCPtr imm, Register dest);
+
+    void load8SignExtend(const Address& address, Register dest);
+    void load8SignExtend(const BaseIndex& src, Register dest);
+
+    void load8ZeroExtend(const Address& address, Register dest);
+    void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+    void load16SignExtend(const Address& address, Register dest);
+    void load16SignExtend(const BaseIndex& src, Register dest);
+
+    void load16ZeroExtend(const Address& address, Register dest);
+    void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+    void load32(const Address& address, Register dest);
+    void load32(const BaseIndex& address, Register dest);
+    void load32(AbsoluteAddress address, Register dest);
+    void load32(AsmJSAbsoluteAddress address, Register dest);
+    void load64(const Address& address, Register64 dest) {
+        loadPtr(address, dest.reg);
+    }
+
+    void loadPtr(const Address& address, Register dest);
+    void loadPtr(const BaseIndex& src, Register dest);
+    void loadPtr(AbsoluteAddress address, Register dest);
+    void loadPtr(AsmJSAbsoluteAddress address, Register dest);
+
+    void loadPrivate(const Address& address, Register dest);
+
+    void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+    void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+    void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+    void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+    void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+    void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+    void loadAlignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void storeAlignedInt32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void loadUnalignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadUnalignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void storeUnalignedInt32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void storeUnalignedInt32x4(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+    void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void storeFloat32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+    void storeFloat32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+    void loadAlignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void storeAlignedFloat32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void loadUnalignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void loadUnalignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+    void storeUnalignedFloat32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void storeUnalignedFloat32x4(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+    void loadDouble(const Address& addr, FloatRegister dest);
+    void loadDouble(const BaseIndex& src, FloatRegister dest);
+
+    // Load a float value into a register, then expand it to a double.
+    void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+    void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+    void loadFloat32(const Address& addr, FloatRegister dest);
+    void loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+    void store8(Register src, const Address& address);
+    void store8(Imm32 imm, const Address& address);
+    void store8(Register src, const BaseIndex& address);
+    void store8(Imm32 imm, const BaseIndex& address);
+
+    void store16(Register src, const Address& address);
+    void store16(Imm32 imm, const Address& address);
+    void store16(Register src, const BaseIndex& address);
+    void store16(Imm32 imm, const BaseIndex& address);
+
+    void store32(Register src, AbsoluteAddress address);
+    void store32(Register src, const Address& address);
+    void store32(Register src, const BaseIndex& address);
+    void store32(Imm32 src, const Address& address);
+    void store32(Imm32 src, const BaseIndex& address);
+
+    // NOTE: This will use second scratch on MIPS64. Only ARM needs the
+    // implementation without second scratch.
+    void store32_NoSecondScratch(Imm32 src, const Address& address) {
+        store32(src, address);
+    }
+
+    void store64(Register64 src, Address address) {
+        storePtr(src.reg, address);
+    }
+
+    template <typename T> void storePtr(ImmWord imm, T address);
+    template <typename T> void storePtr(ImmPtr imm, T address);
+    template <typename T> void storePtr(ImmGCPtr imm, T address);
+    void storePtr(Register src, const Address& address);
+    void storePtr(Register src, const BaseIndex& address);
+    void storePtr(Register src, AbsoluteAddress dest);
+    void storeDouble(FloatRegister src, Address addr) {
+        ma_sd(src, addr);
+    }
+    void storeDouble(FloatRegister src, BaseIndex addr) {
+        MOZ_ASSERT(addr.offset == 0);
+        ma_sd(src, addr);
+    }
+    void moveDouble(FloatRegister src, FloatRegister dest) {
+        as_movd(dest, src);
+    }
+
+    void storeFloat32(FloatRegister src, Address addr) {
+        ma_ss(src, addr);
+    }
+    void storeFloat32(FloatRegister src, BaseIndex addr) {
+        MOZ_ASSERT(addr.offset == 0);
+        ma_ss(src, addr);
+    }
+
+    void zeroDouble(FloatRegister reg) {
+        moveToDouble(zero, reg);
+    }
+
+    void clampIntToUint8(Register reg);
+
+    void subPtr(Imm32 imm, const Register dest);
+    void subPtr(const Address& addr, const Register dest);
+    void subPtr(Register src, const Address& dest);
+    void addPtr(Imm32 imm, const Register dest);
+    void addPtr(Imm32 imm, const Address& dest);
+    void addPtr(ImmWord imm, const Register dest) {
+        movePtr(imm, ScratchRegister);
+        addPtr(ScratchRegister, dest);
+    }
+    void addPtr(ImmPtr imm, const Register dest) {
+        addPtr(ImmWord(uintptr_t(imm.value)), dest);
+    }
+    void mulBy3(const Register& src, const Register& dest) {
+        as_daddu(dest, src, src);
+        as_daddu(dest, dest, src);
+    }
+
+    void mul64(Imm64 imm, const Register64& dest) {
+        MOZ_ASSERT(dest.reg != ScratchRegister);
+        mov(ImmWord(imm.value), ScratchRegister);
+        as_dmultu(dest.reg, ScratchRegister);
+        as_mflo(dest.reg);
+    }
+
+    void convertUInt64ToDouble(Register64 src, Register temp, FloatRegister dest);
+    void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) {
+        movePtr(imm, ScratchRegister);
+        loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg);
+        mulDouble(ScratchDoubleReg, dest);
+    }
+
+    void breakpoint();
+
+    void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+                      Label* label);
+
+    void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+                     Label* label);
+
+    void checkStackAlignment();
+
+    static void calculateAlignedStackPointer(void** stackPointer);
+
+    // If source is a double, load it into dest. If source is int32,
+    // convert it to double. Else, branch to failure.
+    void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+    template <typename T1, typename T2>
+    void cmpPtrSet(Assembler::Condition cond, T1 lhs, T2 rhs, Register dest)
+    {
+        ma_cmp_set(dest, lhs, rhs, cond);
+    }
+    void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs, Register dest);
+    void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs, Register dest);
+
+    template <typename T1, typename T2>
+    void cmp32Set(Assembler::Condition cond, T1 lhs, T2 rhs, Register dest)
+    {
+        ma_cmp_set(dest, lhs, rhs, cond);
+    }
+    void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, Register dest);
+
+  protected:
+    bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+  public:
+    CodeOffsetLabel labelForPatch() {
+        return CodeOffsetLabel(nextOffset().getOffset());
+    }
+
+    void memIntToValue(Address Source, Address Dest) {
+        load32(Source, ScratchRegister);
+        storeValue(JSVAL_TYPE_INT32, ScratchRegister, Dest);
+    }
+
+    void lea(Operand addr, Register dest) {
+        ma_daddu(dest, addr.baseReg(), Imm32(addr.disp()));
+    }
+
+    void abiret() {
+        as_jr(ra);
+        as_nop();
+    }
+
+    BufferOffset ma_BoundsCheck(Register bounded) {
+        BufferOffset bo = m_buffer.nextOffset();
+        ma_liPatchable(bounded, ImmWord(0));
+        return bo;
+    }
+
+    void moveFloat32(FloatRegister src, FloatRegister dest) {
+        as_movs(dest, src);
+    }
+
+    void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
+    void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
+                                    Label* label);
+
+    void loadAsmJSActivation(Register dest) {
+        loadPtr(Address(GlobalReg, AsmJSActivationGlobalDataOffset - AsmJSGlobalRegBias), dest);
+    }
+    void loadAsmJSHeapRegisterFromGlobalData() {
+        MOZ_ASSERT(Imm16::IsInSignedRange(AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias));
+        loadPtr(Address(GlobalReg, AsmJSHeapGlobalDataOffset - AsmJSGlobalRegBias), HeapReg);
+    }
+
+    // Instrumentation for entering and leaving the profiler.
+    void profilerEnterFrame(Register framePtr, Register scratch);
+    void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_h */
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -514,16 +514,17 @@ elif CONFIG['JS_CODEGEN_MIPS32'] or CONF
         UNIFIED_SOURCES += [
             'jit/mips64/Architecture-mips64.cpp',
             'jit/mips64/Assembler-mips64.cpp',
             'jit/mips64/Bailouts-mips64.cpp',
             'jit/mips64/BaselineCompiler-mips64.cpp',
             'jit/mips64/BaselineIC-mips64.cpp',
             'jit/mips64/CodeGenerator-mips64.cpp',
             'jit/mips64/Lowering-mips64.cpp',
+            'jit/mips64/MacroAssembler-mips64.cpp',
             'jit/mips64/MoveEmitter-mips64.cpp',
             'jit/mips64/SharedIC-mips64.cpp',
             'jit/mips64/Trampoline-mips64.cpp',
         ]
         if CONFIG['JS_SIMULATOR_MIPS64']:
             UNIFIED_SOURCES += [
                 'jit/mips64/Simulator-mips64.cpp'
             ]