Bug 1303688 - Baldr: MIPS: Take advantage of guard page to simplify asm.js/wasm memory access. r=luke
authorHeiher <r@hev.cc>
Wed, 21 Sep 2016 08:58:05 +0800
changeset 314554 3e7561636a36d48241f73d4d826305f8a0cc1a0c
parent 314553 8c51ff54a0ea1e6d527a80b157d67112235db01f
child 314555 9502a353f4dba3ff8f9be3496a7c46f21097093a
push id81929
push userr@hev.cc
push dateWed, 21 Sep 2016 00:58:32 +0000
treeherdermozilla-inbound@9502a353f4db [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1303688
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1303688 - Baldr: MIPS: Take advantage of guard page to simplify asm.js/wasm memory access. r=luke --- js/src/jit/mips-shared/Assembler-mips-shared.h | 2 + .../jit/mips-shared/CodeGenerator-mips-shared.cpp | 66 +++++----------------- js/src/jit/mips-shared/CodeGenerator-mips-shared.h | 1 + .../mips-shared/MacroAssembler-mips-shared-inl.h | 3 + .../jit/mips-shared/MacroAssembler-mips-shared.cpp | 31 ++++++++++ .../jit/mips-shared/MacroAssembler-mips-shared.h | 4 ++ js/src/jit/mips32/MacroAssembler-mips32-inl.h | 5 +- js/src/jit/mips32/MacroAssembler-mips32.cpp | 20 ++++++- js/src/jit/mips32/MacroAssembler-mips32.h | 6 +- js/src/jit/mips64/MacroAssembler-mips64-inl.h | 5 +- js/src/jit/mips64/MacroAssembler-mips64.cpp | 20 ++++++- js/src/jit/mips64/MacroAssembler-mips64.h | 6 +- 12 files changed, 106 insertions(+), 63 deletions(-)
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/CodeGenerator-mips-shared.h
js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.h
js/src/jit/mips32/MacroAssembler-mips32-inl.h
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips32/MacroAssembler-mips32.h
js/src/jit/mips64/MacroAssembler-mips64-inl.h
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.h
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -771,16 +771,18 @@ class AssemblerMIPSShared : public Assem
         AboveOrEqual,
         Below,
         BelowOrEqual,
         GreaterThan,
         GreaterThanOrEqual,
         LessThan,
         LessThanOrEqual,
         Overflow,
+        CarrySet,
+        CarryClear,
         Signed,
         NotSigned,
         Zero,
         NonZero,
         Always,
     };
 
     enum DoubleCondition {
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1774,49 +1774,46 @@ CodeGeneratorMIPSShared::visitAsmJSLoadH
       case Scalar::Uint16:  isSigned = false; size = 16; break;
       case Scalar::Int32:   isSigned = true;  size = 32; break;
       case Scalar::Uint32:  isSigned = false; size = 32; break;
       case Scalar::Float64: isFloat  = true;  size = 64; break;
       case Scalar::Float32: isFloat  = true;  size = 32; break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
     if (ptr->isConstant()) {
         MOZ_ASSERT(!mir->needsBoundsCheck());
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         if (isFloat) {
             if (size == 32) {
                 masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
             } else {
                 masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
             }
         }  else {
             masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
                          static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
         }
-        memoryBarrier(mir->barrierAfter());
         return;
     }
 
     Register ptrReg = ToRegister(ptr);
 
     if (!mir->needsBoundsCheck()) {
         if (isFloat) {
             if (size == 32) {
                 masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
             } else {
                 masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
             }
         } else {
             masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
                          static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
         }
-        memoryBarrier(mir->barrierAfter());
         return;
     }
 
     BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
 
     Label done, outOfRange;
     masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
     // Offset is ok, let's load value.
@@ -1835,24 +1832,20 @@ CodeGeneratorMIPSShared::visitAsmJSLoadH
     if (isFloat) {
         if (size == 32)
             masm.loadFloat32(Address(GlobalReg, wasm::NaN32GlobalDataOffset - AsmJSGlobalRegBias),
                              ToFloatRegister(out));
         else
             masm.loadDouble(Address(GlobalReg, wasm::NaN64GlobalDataOffset - AsmJSGlobalRegBias),
                             ToFloatRegister(out));
     } else {
-        if (mir->isAtomicAccess())
-            masm.ma_b(wasm::JumpTarget::OutOfBounds);
-        else
-            masm.move32(Imm32(0), ToRegister(out));
+        masm.move32(Imm32(0), ToRegister(out));
     }
     masm.bind(&done);
 
-    memoryBarrier(mir->barrierAfter());
     masm.append(wasm::BoundsCheck(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     const LAllocation* value = ins->value();
@@ -1868,34 +1861,32 @@ CodeGeneratorMIPSShared::visitAsmJSStore
       case Scalar::Uint16:  isSigned = false; size = 16; break;
       case Scalar::Int32:   isSigned = true;  size = 32; break;
       case Scalar::Uint32:  isSigned = false; size = 32; break;
       case Scalar::Float64: isFloat  = true;  size = 64; break;
       case Scalar::Float32: isFloat  = true;  size = 32; break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
     if (ptr->isConstant()) {
         MOZ_ASSERT(!mir->needsBoundsCheck());
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
 
         if (isFloat) {
             FloatRegister freg = ToFloatRegister(value);
             Address addr(HeapReg, ptrImm);
             if (size == 32)
                 masm.storeFloat32(freg, addr);
             else
                 masm.storeDouble(freg, addr);
         }  else {
             masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
                           static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
         }
-        memoryBarrier(mir->barrierAfter());
         return;
     }
 
     Register ptrReg = ToRegister(ptr);
     Address dstAddr(ptrReg, 0);
 
     if (!mir->needsBoundsCheck()) {
         if (isFloat) {
@@ -1904,43 +1895,36 @@ CodeGeneratorMIPSShared::visitAsmJSStore
             if (size == 32)
                 masm.storeFloat32(freg, bi);
             else
                 masm.storeDouble(freg, bi);
         } else {
             masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
                           static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
         }
-        memoryBarrier(mir->barrierAfter());
         return;
     }
 
     BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
 
-    Label done, outOfRange;
+    Label outOfRange;
     masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
 
     // Offset is ok, let's store value.
     if (isFloat) {
         if (size == 32) {
             masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
         } else
             masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
     } else {
         masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
                       static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
     }
-    masm.ma_b(&done, ShortJump);
+
     masm.bind(&outOfRange);
-    // Offset is out of range.
-    if (mir->isAtomicAccess())
-        masm.ma_b(wasm::JumpTarget::OutOfBounds);
-    masm.bind(&done);
-
-    memoryBarrier(mir->barrierAfter());
     masm.append(wasm::BoundsCheck(bo.getOffset()));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
@@ -1950,55 +1934,39 @@ CodeGeneratorMIPSShared::visitAsmJSCompa
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
 
-    uint32_t maybeCmpOffset = 0;
-    if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
-        maybeCmpOffset = bo.getOffset();
-        masm.ma_b(ptrReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-    }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         valueTemp, offsetTemp, maskTemp,
                                         ToAnyRegister(ins->output()));
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
 
-    uint32_t maybeCmpOffset = 0;
-    if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
-        maybeCmpOffset = bo.getOffset();
-        masm.ma_b(ptrReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-    }
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, valueTemp,
                                        offsetTemp, maskTemp, ToAnyRegister(ins->output()));
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -2009,34 +1977,26 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
-    uint32_t maybeCmpOffset = 0;
-    if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
-        maybeCmpOffset = bo.getOffset();
-        masm.ma_b(ptrReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-    }
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
                                    valueTemp, offsetTemp, maskTemp,
                                    ToAnyRegister(ins->output()));
     else
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
                                    valueTemp, offsetTemp, maskTemp,
                                    ToAnyRegister(ins->output()));
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
@@ -2047,32 +2007,22 @@ CodeGeneratorMIPSShared::visitAsmJSAtomi
     Register valueTemp = ToRegister(ins->valueTemp());
     Register offsetTemp = ToRegister(ins->offsetTemp());
     Register maskTemp = ToRegister(ins->maskTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
-    uint32_t maybeCmpOffset = 0;
-    if (mir->needsBoundsCheck()) {
-        BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
-        maybeCmpOffset = bo.getOffset();
-        masm.ma_b(ptrReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-    }
-
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
                                    valueTemp, offsetTemp, maskTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
                                    valueTemp, offsetTemp, maskTemp);
-
-    if (mir->needsBoundsCheck())
-        masm.append(wasm::BoundsCheck(maybeCmpOffset));
 }
 
 void
 CodeGeneratorMIPSShared::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
 {
     const MAsmJSPassStackArg* mir = ins->mir();
     if (ins->arg()->isConstant()) {
         masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
@@ -2532,16 +2482,26 @@ CodeGeneratorMIPSShared::atomicBinopToTy
                                                     Register offsetTemp, Register maskTemp);
 template void
 CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
                                                     const Register& value, const BaseIndex& mem,
                                                     Register flagTemp, Register valueTemp,
                                                     Register offsetTemp, Register maskTemp);
 
 
+void
+CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
+{
+    MWasmAddOffset* mir = lir->mir();
+    Register base = ToRegister(lir->base());
+    Register out = ToRegister(lir->output());
+
+    masm.ma_addTestCarry(out, base, Imm32(mir->offset()), wasm::JumpTarget::OutOfBounds);
+}
+
 template <typename T>
 static inline void
 AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op,
                         Scalar::Type arrayType, const LAllocation* value, const T& mem,
                         Register flagTemp, Register outTemp, Register valueTemp,
                         Register offsetTemp, Register maskTemp, AnyRegister output)
 {
     if (value->isConstant())
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -193,16 +193,17 @@ class CodeGeneratorMIPSShared : public C
     void visitNegD(LNegD* lir);
     void visitNegF(LNegF* lir);
     void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
     void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
     void visitWasmCall(LWasmCall* ins);
     void visitWasmCallI64(LWasmCallI64* ins);
     void visitWasmLoad(LWasmLoad* ins);
     void visitWasmStore(LWasmStore* ins);
+    void visitWasmAddOffset(LWasmAddOffset* ins);
     void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
     void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
 
     void visitAsmJSPassStackArg(LAsmJSPassStackArg* ins);
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -626,16 +626,19 @@ MacroAssembler::branchTruncateDoubleToIn
 template <typename T, typename L>
 void
 MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L overflow)
 {
     switch (cond) {
       case Overflow:
         ma_addTestOverflow(dest, dest, src, overflow);
         break;
+      case CarrySet:
+        ma_addTestCarry(dest, dest, src, overflow);
+        break;
       default:
         MOZ_CRASH("NYI");
     }
 }
 
 template <typename T>
 void
 MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* overflow)
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -216,16 +216,47 @@ MacroAssemblerMIPSShared::ma_addu(Regist
 }
 
 void
 MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm)
 {
     ma_addu(rd, rd, imm);
 }
 
+template <typename L>
+void
+MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Register rt, L overflow)
+{
+    as_addu(rd, rs, rt);
+    as_sltu(SecondScratchReg, rd, rs);
+    ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
+}
+
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
+                                                  Register rt, Label* overflow);
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<wasm::JumpTarget>(Register rd, Register rs, Register rt,
+                                                            wasm::JumpTarget overflow);
+
+template <typename L>
+void
+MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow)
+{
+    ma_li(ScratchRegister, imm);
+    ma_addTestCarry(rd, rs, ScratchRegister, overflow);
+}
+
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
+                                                  Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<wasm::JumpTarget>(Register rd, Register rs, Imm32 imm,
+                                                            wasm::JumpTarget overflow);
+
 // Subtract.
 void
 MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm)
 {
     if (Imm16::IsInSignedRange(-imm.value)) {
         as_addiu(rd, rs, -imm.value);
     } else {
         ma_li(ScratchRegister, imm);
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -111,16 +111,20 @@ class MacroAssemblerMIPSShared : public 
     void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
 
     // arithmetic based ops
     // add
     void ma_addu(Register rd, Register rs, Imm32 imm);
     void ma_addu(Register rd, Register rs);
     void ma_addu(Register rd, Imm32 imm);
+    template <typename L>
+    void ma_addTestCarry(Register rd, Register rs, Register rt, L overflow);
+    template <typename L>
+    void ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow);
 
     // subtract
     void ma_subu(Register rd, Register rs, Imm32 imm);
     void ma_subu(Register rd, Register rs);
     void ma_subu(Register rd, Imm32 imm);
     void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
 
     // multiplies.  For now, there are only few that we care about.
--- a/js/src/jit/mips32/MacroAssembler-mips32-inl.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
@@ -454,17 +454,20 @@ MacroAssembler::storeUncanonicalizedFloa
 
 // ========================================================================
 // wasm support
 
 template <class L>
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
 {
-    MOZ_CRASH("NYI");
+    BufferOffset bo = ma_BoundsCheck(ScratchRegister);
+    append(wasm::BoundsCheck(bo.getOffset()));
+
+    ma_b(index, ScratchRegister, label, cond);
 }
 
 void
 MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
 {
     Instruction* inst = (Instruction*) patchAt;
     InstImm* i0 = (InstImm*) inst;
     InstImm* i1 = (InstImm*) i0->next();
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -216,34 +216,43 @@ void
 MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm)
 {
     ma_liPatchable(dest, Imm32(int32_t(imm.value)));
 }
 
 // Arithmetic-based ops.
 
 // Add.
+template <typename L>
 void
-MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow)
 {
     Label goodAddition;
     as_addu(rd, rs, rt);
 
     as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
     ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
 
     // If different sign, then overflow
     as_xor(ScratchRegister, rs, rd);
     ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
 
     bind(&goodAddition);
 }
 
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
+                                               Register rt, Label* overflow);
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<wasm::JumpTarget>(Register rd, Register rs, Register rt,
+                                                         wasm::JumpTarget overflow);
+
+template <typename L>
 void
-MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
 {
     // Check for signed range because of as_addiu
     // Check for unsigned range because of as_xori
     if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
         Label goodAddition;
         as_addiu(rd, rs, imm.value);
 
         // If different sign, no overflow
@@ -256,16 +265,23 @@ MacroAssemblerMIPS::ma_addTestOverflow(R
 
         bind(&goodAddition);
     } else {
         ma_li(ScratchRegister, imm);
         ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
     }
 }
 
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
+                                               Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<wasm::JumpTarget>(Register rd, Register rs, Imm32 imm,
+                                                         wasm::JumpTarget overflow);
+
 // Subtract.
 void
 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
 {
     Label goodSubtraction;
     // Use second scratch. The instructions generated by ma_b don't use the
     // second scratch register.
     as_subu(rd, rs, rt);
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -65,18 +65,20 @@ class MacroAssemblerMIPS : public MacroA
                  LoadStoreExtension extension = SignExtend);
 
     // store
     void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
 
     // arithmetic based ops
     // add
-    void ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
-    void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+    template <typename L>
+    void ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow);
+    template <typename L>
+    void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow);
 
     // subtract
     void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
 
     // memory
     // shortcut for when we know we're transferring 32 bits of data
     void ma_lw(Register data, Address address);
 
--- a/js/src/jit/mips64/MacroAssembler-mips64-inl.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -411,17 +411,20 @@ MacroAssembler::storeUncanonicalizedFloa
 
 // ========================================================================
 // wasm support
 
 template <class L>
 void
 MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
 {
-    MOZ_CRASH("NYI");
+    BufferOffset bo = ma_BoundsCheck(ScratchRegister);
+    append(wasm::BoundsCheck(bo.getOffset()));
+
+    ma_b(index, ScratchRegister, label, cond);
 }
 
 void
 MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
 {
     // Replace with new value
     Assembler::UpdateLoad64Value((Instruction*) patchAt, limit);
 }
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -395,38 +395,54 @@ MacroAssemblerMIPS64::ma_daddu(Register 
 }
 
 void
 MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm)
 {
     ma_daddu(rd, rd, imm);
 }
 
+template <typename L>
 void
-MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow)
 {
     as_daddu(SecondScratchReg, rs, rt);
     as_addu(rd, rs, rt);
     ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
 }
 
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs,
+                                                 Register rt, Label* overflow);
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<wasm::JumpTarget>(Register rd, Register rs, Register rt,
+                                                           wasm::JumpTarget overflow);
+
+template <typename L>
 void
-MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
 {
     // Check for signed range because of as_daddiu
     if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
         as_daddiu(SecondScratchReg, rs, imm.value);
         as_addiu(rd, rs, imm.value);
         ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
     } else {
         ma_li(ScratchRegister, imm);
         ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
     }
 }
 
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs,
+                                                 Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPS64::ma_addTestOverflow<wasm::JumpTarget>(Register rd, Register rs, Imm32 imm,
+                                                           wasm::JumpTarget overflow);
+
 // Subtract.
 void
 MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm)
 {
     if (Imm16::IsInSignedRange(-imm.value)) {
         as_daddiu(rd, rs, -imm.value);
     } else {
         ma_li(ScratchRegister, imm);
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -87,18 +87,20 @@ class MacroAssemblerMIPS64 : public Macr
     void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
 
     // arithmetic based ops
     // add
     void ma_daddu(Register rd, Register rs, Imm32 imm);
     void ma_daddu(Register rd, Register rs);
     void ma_daddu(Register rd, Imm32 imm);
-    void ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
-    void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+    template <typename L>
+    void ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow);
+    template <typename L>
+    void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow);
 
     // subtract
     void ma_dsubu(Register rd, Register rs, Imm32 imm);
     void ma_dsubu(Register rd, Imm32 imm);
     void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
 
     // multiplies.  For now, there are only few that we care about.
     void ma_dmult(Register rs, Imm32 imm);