Bug 1303690 - Baldr: MIPS: Take alignment hints into account when compiling load/store. r=bbouvier
authorHeiher <r@hev.cc>
Mon, 10 Oct 2016 17:08:45 +0800
changeset 317318 25bc09ae4be007dfe33f133c6a4d7ff1d0e51a42
parent 317317 241c41e008ca2a8ec9459d81f319e97d4c9892ae
child 317319 1215be5477392bdc60bd4da85e174d5c6417a096
push id30800
push userphilringnalda@gmail.com
push dateTue, 11 Oct 2016 02:08:53 +0000
treeherdermozilla-central@ece56e142a1e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1303690
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1303690 - Baldr: MIPS: Take alignment hints into account when compiling load/store. r=bbouvier --- .../jit/mips-shared/CodeGenerator-mips-shared.cpp | 72 +++++++++++++- js/src/jit/mips-shared/CodeGenerator-mips-shared.h | 7 ++ js/src/jit/mips-shared/LIR-mips-shared.h | 104 +++++++++++++++++++++ js/src/jit/mips-shared/Lowering-mips-shared.cpp | 46 ++++++++- .../jit/mips-shared/MacroAssembler-mips-shared.cpp | 89 ++++++++++++++++++ .../jit/mips-shared/MacroAssembler-mips-shared.h | 6 ++ js/src/jit/mips32/CodeGenerator-mips32.cpp | 68 +++++++++++++- js/src/jit/mips32/CodeGenerator-mips32.h | 7 ++ js/src/jit/mips32/LOpcodes-mips32.h | 4 + js/src/jit/mips32/MacroAssembler-mips32.cpp | 87 +++++++++++++++++ js/src/jit/mips32/MacroAssembler-mips32.h | 6 ++ js/src/jit/mips64/CodeGenerator-mips64.cpp | 47 +++++++++- js/src/jit/mips64/CodeGenerator-mips64.h | 7 ++ js/src/jit/mips64/LOpcodes-mips64.h | 4 + js/src/jit/mips64/MacroAssembler-mips64.cpp | 74 +++++++++++++++ js/src/jit/mips64/MacroAssembler-mips64.h | 6 ++ 16 files changed, 625 insertions(+), 9 deletions(-)
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/CodeGenerator-mips-shared.h
js/src/jit/mips-shared/LIR-mips-shared.h
js/src/jit/mips-shared/Lowering-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.h
js/src/jit/mips32/CodeGenerator-mips32.cpp
js/src/jit/mips32/CodeGenerator-mips32.h
js/src/jit/mips32/LOpcodes-mips32.h
js/src/jit/mips32/MacroAssembler-mips32.cpp
js/src/jit/mips32/MacroAssembler-mips32.h
js/src/jit/mips64/CodeGenerator-mips64.cpp
js/src/jit/mips64/CodeGenerator-mips64.h
js/src/jit/mips64/LOpcodes-mips64.h
js/src/jit/mips64/MacroAssembler-mips64.cpp
js/src/jit/mips64/MacroAssembler-mips64.h
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1883,18 +1883,19 @@ CodeGeneratorMIPSShared::visitWasmCall(L
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmCallI64(LWasmCallI64* ins)
 {
     emitWasmCallBase(ins);
 }
 
+template <typename T>
 void
-CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
+CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset <= INT32_MAX);
 
     Register ptr = ToRegister(lir->ptr());
 
@@ -1920,31 +1921,64 @@ CodeGeneratorMIPSShared::visitWasmLoad(L
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Float64: isFloat  = true;  break;
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
+    if (mir->isUnaligned()) {
+        Register temp = ToRegister(lir->getTemp(1));
+
+        if (isFloat) {
+            if (byteSize == 4) {
+                masm.loadUnalignedFloat32(BaseIndex(HeapReg, ptr, TimesOne), temp,
+                                          ToFloatRegister(lir->output()));
+            } else
+                masm.loadUnalignedDouble(BaseIndex(HeapReg, ptr, TimesOne), temp,
+                                         ToFloatRegister(lir->output()));
+        } else {
+            masm.ma_load_unaligned(ToRegister(lir->output()), BaseIndex(HeapReg, ptr, TimesOne),
+                                   temp, static_cast<LoadStoreSize>(8 * byteSize),
+                                   isSigned ? SignExtend : ZeroExtend);
+        }
+
+        memoryBarrier(mir->barrierAfter());
+        return;
+    }
+
     if (isFloat) {
         if (byteSize == 4) {
             masm.loadFloat32(BaseIndex(HeapReg, ptr, TimesOne), ToFloatRegister(lir->output()));
         } else
             masm.loadDouble(BaseIndex(HeapReg, ptr, TimesOne), ToFloatRegister(lir->output()));
     } else {
         masm.ma_load(ToRegister(lir->output()), BaseIndex(HeapReg, ptr, TimesOne),
-                      static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+                     static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
     }
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
-CodeGeneratorMIPSShared::visitWasmStore(LWasmStore* lir)
+CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
+{
+    emitWasmLoad(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
+{
+    emitWasmLoad(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPSShared::emitWasmStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset <= INT32_MAX);
 
     Register ptr = ToRegister(lir->ptr());
 
@@ -1971,30 +2005,62 @@ CodeGeneratorMIPSShared::visitWasmStore(
       case Scalar::Int64:   isSigned = true;  break;
       case Scalar::Float64: isFloat  = true;  break;
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
+    if (mir->isUnaligned()) {
+        Register temp = ToRegister(lir->getTemp(1));
+
+        if (isFloat) {
+            if (byteSize == 4) {
+                masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp,
+                                           BaseIndex(HeapReg, ptr, TimesOne));
+            } else
+                masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp,
+                                          BaseIndex(HeapReg, ptr, TimesOne));
+        } else {
+            masm.ma_store_unaligned(ToRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne),
+                                    temp, static_cast<LoadStoreSize>(8 * byteSize),
+                                    isSigned ? SignExtend : ZeroExtend);
+        }
+
+        memoryBarrier(mir->barrierAfter());
+        return;
+    }
+
     if (isFloat) {
         if (byteSize == 4) {
             masm.storeFloat32(ToFloatRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne));
         } else
             masm.storeDouble(ToFloatRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne));
     } else {
         masm.ma_store(ToRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne),
                       static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
     }
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
+CodeGeneratorMIPSShared::visitWasmStore(LWasmStore* lir)
+{
+    emitWasmStore(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
+{
+    emitWasmStore(lir);
+}
+
+void
 CodeGeneratorMIPSShared::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
     const LAllocation* ptr = ins->ptr();
     const LDefinition* out = ins->output();
 
     bool isSigned;
     int size;
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -114,16 +114,21 @@ class CodeGeneratorMIPSShared : public C
         }
     }
     void testZeroEmitBranch(Assembler::Condition cond, Register reg,
                             MBasicBlock* ifTrue, MBasicBlock* ifFalse)
     {
         emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
     }
 
+    template <typename T>
+    void emitWasmLoad(T* ins);
+    template <typename T>
+    void emitWasmStore(T* ins);
+
   public:
     // Instruction visitors.
     virtual void visitMinMaxD(LMinMaxD* ins);
     virtual void visitMinMaxF(LMinMaxF* ins);
     virtual void visitAbsD(LAbsD* ins);
     virtual void visitAbsF(LAbsF* ins);
     virtual void visitSqrtD(LSqrtD* ins);
     virtual void visitSqrtF(LSqrtF* ins);
@@ -209,17 +214,19 @@ class CodeGeneratorMIPSShared : public C
     void visitNegI(LNegI* lir);
     void visitNegD(LNegD* lir);
     void visitNegF(LNegF* lir);
     void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
     void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
     void visitWasmCall(LWasmCall* ins);
     void visitWasmCallI64(LWasmCallI64* ins);
     void visitWasmLoad(LWasmLoad* ins);
+    void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins);
     void visitWasmStore(LWasmStore* ins);
+    void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
     void visitWasmAddOffset(LWasmAddOffset* ins);
     void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
     void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
 
--- a/js/src/jit/mips-shared/LIR-mips-shared.h
+++ b/js/src/jit/mips-shared/LIR-mips-shared.h
@@ -286,12 +286,116 @@ class LInt64ToFloatingPoint : public LIn
         setInt64Operand(0, in);
     }
 
     MInt64ToFloatingPoint* mir() const {
         return mir_->toInt64ToFloatingPoint();
     }
 };
 
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumDefs>
+class LWasmUnalignedLoadBase : public details::LWasmLoadBase<NumDefs, 2>
+{
+  public:
+    typedef LWasmLoadBase<NumDefs, 2> Base;
+
+    explicit LWasmUnalignedLoadBase(const LAllocation& ptr, const LDefinition& valueHelper)
+      : Base(ptr)
+    {
+        Base::setTemp(0, LDefinition::BogusTemp());
+        Base::setTemp(1, valueHelper);
+    }
+    const LAllocation* ptr() {
+        return Base::getOperand(0);
+    }
+    const LDefinition* ptrCopy() {
+        return Base::getTemp(0);
+    }
+};
+
+} // namespace details
+
+class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1>
+{
+  public:
+    explicit LWasmUnalignedLoad(const LAllocation& ptr, const LDefinition& valueHelper)
+      : LWasmUnalignedLoadBase(ptr, valueHelper)
+    {}
+    LIR_HEADER(WasmUnalignedLoad);
+};
+
+class LWasmUnalignedLoadI64 : public details::LWasmUnalignedLoadBase<INT64_PIECES>
+{
+  public:
+    explicit LWasmUnalignedLoadI64(const LAllocation& ptr, const LDefinition& valueHelper)
+      : LWasmUnalignedLoadBase(ptr, valueHelper)
+    {}
+    LIR_HEADER(WasmUnalignedLoadI64);
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumOps>
+class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2>
+{
+  public:
+    typedef LInstructionHelper<0, NumOps, 2> Base;
+
+    static const size_t PtrIndex = 0;
+    static const size_t ValueIndex = 1;
+
+    LWasmUnalignedStoreBase(const LAllocation& ptr, const LDefinition& valueHelper)
+    {
+        Base::setOperand(0, ptr);
+        Base::setTemp(0, LDefinition::BogusTemp());
+        Base::setTemp(1, valueHelper);
+    }
+    MWasmStore* mir() const {
+        return Base::mir_->toWasmStore();
+    }
+    const LAllocation* ptr() {
+        return Base::getOperand(PtrIndex);
+    }
+    const LDefinition* ptrCopy() {
+        return Base::getTemp(0);
+    }
+};
+
+} // namespace details
+
+class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2>
+{
+  public:
+    LIR_HEADER(WasmUnalignedStore);
+    LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value,
+                        const LDefinition& valueHelper)
+      : LWasmUnalignedStoreBase(ptr, valueHelper)
+    {
+        setOperand(1, value);
+    }
+    const LAllocation* value() {
+        return Base::getOperand(ValueIndex);
+    }
+};
+
+class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES>
+{
+  public:
+    LIR_HEADER(WasmUnalignedStoreI64);
+    LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+                           const LDefinition& valueHelper)
+      : LWasmUnalignedStoreBase(ptr, valueHelper)
+    {
+        setInt64Operand(1, value);
+    }
+    const LInt64Allocation value() {
+        return getInt64Operand(ValueIndex);
+    }
+};
+
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips_shared_LIR_mips_shared_h */
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -317,41 +317,83 @@ LIRGeneratorMIPSShared::visitAsmJSNeg(MA
 }
 
 void
 LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
+    LAllocation ptr = useRegisterAtStart(base);
+
+    if (ins->isUnaligned()) {
+        if (ins->type() == MIRType::Int64) {
+            auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
+            if (ins->offset())
+                lir->setTemp(0, tempCopy(base, 0));
+
+            defineInt64(lir, ins);
+            return;
+        }
+
+        auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, temp());
+        if (ins->offset())
+            lir->setTemp(0, tempCopy(base, 0));
+
+        define(lir, ins);
+
+        return;
+    }
+
     if (ins->type() == MIRType::Int64) {
-        auto* lir = new(alloc()) LWasmLoadI64(useRegisterAtStart(base));
+        auto* lir = new(alloc()) LWasmLoadI64(ptr);
         if (ins->offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         defineInt64(lir, ins);
         return;
     }
 
-    auto* lir = new(alloc()) LWasmLoad(useRegisterAtStart(base));
+    auto* lir = new(alloc()) LWasmLoad(ptr);
     if (ins->offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* value = ins->value();
     LAllocation baseAlloc = useRegisterAtStart(base);
 
+    if (ins->isUnaligned()) {
+        if (ins->type() == MIRType::Int64) {
+            LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+            auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
+            if (ins->offset())
+                lir->setTemp(0, tempCopy(base, 0));
+
+            add(lir, ins);
+            return;
+        }
+
+        LAllocation valueAlloc = useRegisterAtStart(value);
+        auto* lir = new(alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
+        if (ins->offset())
+            lir->setTemp(0, tempCopy(base, 0));
+
+        add(lir, ins);
+
+        return;
+    }
+
     if (ins->type() == MIRType::Int64) {
         LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
         auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
         if (ins->offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         add(lir, ins);
         return;
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -441,16 +441,64 @@ MacroAssemblerMIPSShared::ma_load(Regist
         return;
     }
 
     asMasm().computeScaledAddress(src, SecondScratchReg);
     asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
 }
 
 void
+MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src,
+                                  Register temp, LoadStoreSize size, LoadStoreExtension extension)
+{
+    int16_t encodedOffset;
+    Register base;
+
+    asMasm().computeScaledAddress(src, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+        encodedOffset = Imm16(src.offset).encode();
+        base = SecondScratchReg;
+    } else {
+        ma_li(ScratchRegister, Imm32(src.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        base = ScratchRegister;
+        encodedOffset = Imm16(0).encode();
+    }
+
+    switch (size) {
+      case SizeByte:
+        if (ZeroExtend == extension)
+            as_lbu(dest, base, encodedOffset);
+        else
+            as_lb(dest, base, encodedOffset);
+        break;
+      case SizeHalfWord:
+        as_lbu(dest, base, encodedOffset);
+        as_lbu(temp, base, encodedOffset + 1);
+        as_ins(dest, temp, 8, 8);
+        if (ZeroExtend != extension)
+            as_seh(dest, dest);
+        break;
+      case SizeWord:
+        as_lwl(dest, base, encodedOffset + 3);
+        as_lwr(dest, base, encodedOffset);
+        if (ZeroExtend == extension)
+            as_ext(dest, dest, 0, 32);
+        break;
+      case SizeDouble:
+        as_ldl(dest, base, encodedOffset + 7);
+        as_ldr(dest, base, encodedOffset);
+        break;
+      default:
+        MOZ_CRASH("Invalid argument for ma_load");
+    }
+}
+
+void
 MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
                                    LoadStoreSize size, LoadStoreExtension extension)
 {
     if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
         Register index = dest.index;
 
         if (dest.scale != TimesOne) {
             int32_t shift = Imm32::ShiftOf(dest.scale).value;
@@ -540,16 +588,57 @@ MacroAssemblerMIPSShared::ma_store(Imm32
     // Scrach register is free now, use it for loading imm value
     ma_li(ScratchRegister, imm);
 
     // with offset=0 ScratchRegister will not be used in ma_store()
     // so we can use it as a parameter here
     asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
 }
 
+void
+MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest,
+                                   Register temp, LoadStoreSize size, LoadStoreExtension extension)
+{
+    int16_t encodedOffset;
+    Register base;
+
+    asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+        encodedOffset = Imm16(dest.offset).encode();
+        base = SecondScratchReg;
+    } else {
+        ma_li(ScratchRegister, Imm32(dest.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        base = ScratchRegister;
+        encodedOffset = Imm16(0).encode();
+    }
+
+    switch (size) {
+      case SizeByte:
+        as_sb(data, base, encodedOffset);
+        break;
+      case SizeHalfWord:
+        as_sb(data, base, encodedOffset);
+        as_ext(temp, data, 8, 8);
+        as_sb(temp, base, encodedOffset + 1);
+        break;
+      case SizeWord:
+        as_swl(data, base, encodedOffset + 3);
+        as_swr(data, base, encodedOffset);
+        break;
+      case SizeDouble:
+        as_sdl(data, base, encodedOffset + 7);
+        as_sdr(data, base, encodedOffset);
+        break;
+      default:
+        MOZ_CRASH("Invalid argument for ma_store");
+    }
+}
+
 // Branches when done from within mips-specific code.
 void
 MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Condition c, JumpKind jumpKind)
 {
     switch (c) {
       case Equal :
       case NotEqual:
         asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -99,22 +99,28 @@ class MacroAssemblerMIPSShared : public 
     void ma_xor(Register rd, Imm32 imm);
     void ma_xor(Register rd, Register rs, Imm32 imm);
 
     void ma_ctz(Register rd, Register rs);
 
     // load
     void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
                  LoadStoreExtension extension = SignExtend);
+    void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+                           LoadStoreSize size = SizeWord,
+                           LoadStoreExtension extension = SignExtend);
 
     // store
     void ma_store(Register data, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
     void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
+    void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+                            LoadStoreSize size = SizeWord,
+                            LoadStoreExtension extension = SignExtend);
 
     // arithmetic based ops
     // add
     void ma_addu(Register rd, Register rs, Imm32 imm);
     void ma_addu(Register rd, Register rs);
     void ma_addu(Register rd, Imm32 imm);
     template <typename L>
     void ma_addTestCarry(Register rd, Register rs, Register rt, L overflow);
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -448,18 +448,19 @@ CodeGeneratorMIPS::visitUDivOrModI64(LUD
 
     MOZ_ASSERT(gen->compilingAsmJS());
     if (lir->mir()->isMod())
         masm.callWithABI(wasm::SymbolicAddress::UModI64);
     else
         masm.callWithABI(wasm::SymbolicAddress::UDivI64);
 }
 
+template <typename T>
 void
-CodeGeneratorMIPS::visitWasmLoadI64(LWasmLoadI64* lir)
+CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
     Register64 output = ToOutRegister64(lir);
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
@@ -483,16 +484,38 @@ CodeGeneratorMIPS::visitWasmLoadI64(LWas
         case Scalar::Uint32: isSigned = false; break;
         case Scalar::Int64:  isSigned = true; break;
         default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
+    if (mir->isUnaligned()) {
+        Register temp = ToRegister(lir->getTemp(1));
+
+        if (byteSize <= 4) {
+            masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+                                   temp, static_cast<LoadStoreSize>(8 * byteSize),
+                                   isSigned ? SignExtend : ZeroExtend);
+            if (!isSigned)
+                masm.move32(Imm32(0), output.high);
+            else
+                masm.ma_sra(output.high, output.low, Imm32(31));
+
+        } else {
+            ScratchRegisterScope scratch(masm);
+            masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord);
+            masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+            masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne),
+                                   temp, SizeWord);
+        }
+        return;
+    }
+
     if (byteSize <= 4) {
         masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
                      static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
         if (!isSigned)
             masm.move32(Imm32(0), output.high);
         else
             masm.ma_sra(output.high, output.low, Imm32(31));
     } else {
@@ -501,17 +524,30 @@ CodeGeneratorMIPS::visitWasmLoadI64(LWas
         masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
         masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
     }
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
-CodeGeneratorMIPS::visitWasmStoreI64(LWasmStoreI64* lir)
+CodeGeneratorMIPS::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+    emitWasmLoadI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
+{
+    emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
 {
     const MWasmStore* mir = lir->mir();
     Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
@@ -524,30 +560,58 @@ CodeGeneratorMIPS::visitWasmStoreI64(LWa
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
     unsigned byteSize = mir->byteSize();
 
     memoryBarrier(mir->barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
+    if (mir->isUnaligned()) {
+        Register temp = ToRegister(lir->getTemp(1));
+
+        if (byteSize <= 4) {
+            masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+                                    temp, static_cast<LoadStoreSize>(8 * byteSize));
+        } else {
+            ScratchRegisterScope scratch(masm);
+            masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord);
+            masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+            masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne),
+                                    temp, SizeWord);
+        }
+        return;
+    }
+
     if (byteSize <= 4) {
         masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
                       static_cast<LoadStoreSize>(8 * byteSize));
     } else {
         ScratchRegisterScope scratch(masm);
         masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
         masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
         masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
     }
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
+CodeGeneratorMIPS::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+    emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir)
+{
+    emitWasmStoreI64(lir);
+}
+
+void
 CodeGeneratorMIPS::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
     MOZ_ASSERT(mir->type() == MIRType::Int64);
     Register64 output = ToOutRegister64(ins);
 
     masm.load32(Address(GlobalReg, addr + INT64LOW_OFFSET), output.low);
--- a/js/src/jit/mips32/CodeGenerator-mips32.h
+++ b/js/src/jit/mips32/CodeGenerator-mips32.h
@@ -28,27 +28,34 @@ class CodeGeneratorMIPS : public CodeGen
     void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
                               MBasicBlock* ifTrue, MBasicBlock* ifFalse)
     {
         emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_OBJECT), cond, ifTrue, ifFalse);
     }
 
     void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
 
+    template <typename T>
+    void emitWasmLoadI64(T* ins);
+    template <typename T>
+    void emitWasmStoreI64(T* ins);
+
   public:
     void visitCompareB(LCompareB* lir);
     void visitCompareBAndBranch(LCompareBAndBranch* lir);
     void visitCompareBitwise(LCompareBitwise* lir);
     void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
     void visitCompareI64(LCompareI64* lir);
     void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
     void visitDivOrModI64(LDivOrModI64* lir);
     void visitUDivOrModI64(LUDivOrModI64* lir);
     void visitWasmLoadI64(LWasmLoadI64* ins);
+    void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir);
     void visitWasmStoreI64(LWasmStoreI64* ins);
+    void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
     void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
     void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
     void visitAsmSelectI64(LAsmSelectI64* lir);
     void visitAsmReinterpretFromI64(LAsmReinterpretFromI64* lir);
     void visitAsmReinterpretToI64(LAsmReinterpretToI64* lir);
     void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
     void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
     void visitClzI64(LClzI64* ins);
--- a/js/src/jit/mips32/LOpcodes-mips32.h
+++ b/js/src/jit/mips32/LOpcodes-mips32.h
@@ -10,12 +10,16 @@
 #include "jit/shared/LOpcodes-shared.h"
 
 #define LIR_CPU_OPCODE_LIST(_)  \
     _(BoxFloatingPoint)         \
     _(ModMaskI)                 \
     _(UDivOrMod)                \
     _(DivOrModI64)              \
     _(UDivOrModI64)             \
+    _(WasmUnalignedLoad)        \
+    _(WasmUnalignedStore)       \
+    _(WasmUnalignedLoadI64)     \
+    _(WasmUnalignedStoreI64)    \
     _(WasmTruncateToInt64)      \
     _(Int64ToFloatingPoint)
 
 #endif // jit_mips32_LOpcodes_mips32_h__
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -923,16 +923,41 @@ MacroAssemblerMIPSCompat::loadDouble(con
 void
 MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
 {
     computeScaledAddress(src, SecondScratchReg);
     ma_ld(dest, Address(SecondScratchReg, src.offset));
 }
 
 void
+MacroAssemblerMIPSCompat::loadUnalignedDouble(const BaseIndex& src, Register temp,
+                                              FloatRegister dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
+        as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
+        as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
+        moveToDoubleLo(temp, dest);
+        as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
+        as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
+        moveToDoubleHi(temp, dest);
+    } else {
+        ma_li(ScratchRegister, Imm32(src.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+        as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
+        moveToDoubleLo(temp, dest);
+        as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+        as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
+        moveToDoubleHi(temp, dest);
+    }
+}
+
+void
 MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
 {
     ma_ls(dest, address);
     as_cvtds(dest, dest);
 }
 
 void
 MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
@@ -950,16 +975,35 @@ MacroAssemblerMIPSCompat::loadFloat32(co
 void
 MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
 {
     computeScaledAddress(src, SecondScratchReg);
     ma_ls(dest, Address(SecondScratchReg, src.offset));
 }
 
 void
+MacroAssemblerMIPSCompat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
+                                               FloatRegister dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
+        as_lwl(temp, SecondScratchReg, src.offset + 3);
+        as_lwr(temp, SecondScratchReg, src.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(src.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_lwl(temp, ScratchRegister, 3);
+        as_lwr(temp, ScratchRegister, 0);
+    }
+
+    moveToFloat32(temp, dest);
+}
+
+void
 MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address)
 {
     ma_li(SecondScratchReg, imm);
     ma_store(SecondScratchReg, address, SizeByte);
 }
 
 void
 MacroAssemblerMIPSCompat::store8(Register src, const Address& address)
@@ -1082,16 +1126,59 @@ MacroAssemblerMIPSCompat::storePtr(Regis
 
 void
 MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest)
 {
     movePtr(ImmPtr(dest.addr), ScratchRegister);
     storePtr(src, Address(ScratchRegister, 0));
 }
 
+void
+MacroAssemblerMIPSCompat::storeUnalignedFloat32(FloatRegister src, Register temp,
+                                                const BaseIndex& dest)
+{
+    computeScaledAddress(dest, SecondScratchReg);
+    moveFromFloat32(src, temp);
+
+    if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
+        as_swl(temp, SecondScratchReg, dest.offset + 3);
+        as_swr(temp, SecondScratchReg, dest.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(dest.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_swl(temp, ScratchRegister, 3);
+        as_swr(temp, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPSCompat::storeUnalignedDouble(FloatRegister src, Register temp,
+                                               const BaseIndex& dest)
+{
+    computeScaledAddress(dest, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
+        moveFromDoubleLo(src, temp);
+        as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
+        as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
+        moveFromDoubleHi(src, temp);
+        as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
+        as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
+    } else {
+        ma_li(ScratchRegister, Imm32(dest.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        moveFromDoubleLo(src, temp);
+        as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+        as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
+        moveFromDoubleHi(src, temp);
+        as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+        as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
+    }
+}
+
 // Note: this function clobbers the input register.
 void
 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
 {
     MOZ_ASSERT(input != ScratchDoubleReg);
     Label positive, done;
 
     // <= 0 or NaN --> 0
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -887,23 +887,25 @@ class MacroAssemblerMIPSCompat : public 
     void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
 
     void loadDouble(const Address& addr, FloatRegister dest);
     void loadDouble(const BaseIndex& src, FloatRegister dest);
+    void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
 
     // Load a float value into a register, then expand it to a double.
     void loadFloatAsDouble(const Address& addr, FloatRegister dest);
     void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
 
     void loadFloat32(const Address& addr, FloatRegister dest);
     void loadFloat32(const BaseIndex& src, FloatRegister dest);
+    void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
 
     void store8(Register src, const Address& address);
     void store8(Imm32 imm, const Address& address);
     void store8(Register src, const BaseIndex& address);
     void store8(Imm32 imm, const BaseIndex& address);
 
     void store16(Register src, const Address& address);
     void store16(Imm32 imm, const Address& address);
@@ -933,16 +935,20 @@ class MacroAssemblerMIPSCompat : public 
     }
 
     template <typename T> void storePtr(ImmWord imm, T address);
     template <typename T> void storePtr(ImmPtr imm, T address);
     template <typename T> void storePtr(ImmGCPtr imm, T address);
     void storePtr(Register src, const Address& address);
     void storePtr(Register src, const BaseIndex& address);
     void storePtr(Register src, AbsoluteAddress dest);
+
+    void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
+    void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+
     void moveDouble(FloatRegister src, FloatRegister dest) {
         as_movd(dest, src);
     }
 
     void zeroDouble(FloatRegister reg) {
         moveToDoubleLo(zero, reg);
         moveToDoubleHi(zero, reg);
     }
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -405,18 +405,19 @@ CodeGeneratorMIPS64::visitUDivOrModI64(L
     if (lir->mir()->isMod())
         masm.as_mfhi(output);
     else
         masm.as_mflo(output);
 
     masm.bind(&done);
 }
 
+template <typename T>
 void
-CodeGeneratorMIPS64::visitWasmLoadI64(LWasmLoadI64* lir)
+CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
@@ -442,24 +443,46 @@ CodeGeneratorMIPS64::visitWasmLoadI64(LW
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
+    if (mir->isUnaligned()) {
+        Register temp = ToRegister(lir->getTemp(1));
+
+        masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
+                               temp, static_cast<LoadStoreSize>(8 * byteSize),
+                               isSigned ? SignExtend : ZeroExtend);
+        return;
+    }
+
     masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
                  static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
-CodeGeneratorMIPS64::visitWasmStoreI64(LWasmStoreI64* lir)
+CodeGeneratorMIPS64::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+    emitWasmLoadI64(lir);
+}
+
+void
+CodeGeneratorMIPS64::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
+{
+    emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
     MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
@@ -485,23 +508,43 @@ CodeGeneratorMIPS64::visitWasmStoreI64(L
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
+    if (mir->isUnaligned()) {
+        Register temp = ToRegister(lir->getTemp(1));
+
+        masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
+                                temp, static_cast<LoadStoreSize>(8 * byteSize),
+                                isSigned ? SignExtend : ZeroExtend);
+        return;
+    }
     masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
                   static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
+CodeGeneratorMIPS64::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+    emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS64::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir)
+{
+    emitWasmStoreI64(lir);
+}
+
+void
 CodeGeneratorMIPS64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
 {
     const MWasmLoadGlobalVar* mir = ins->mir();
     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
     MOZ_ASSERT(mir->type() == MIRType::Int64);
     masm.load64(Address(GlobalReg, addr), ToOutRegister64(ins));
 }
 
--- a/js/src/jit/mips64/CodeGenerator-mips64.h
+++ b/js/src/jit/mips64/CodeGenerator-mips64.h
@@ -34,27 +34,34 @@ class CodeGeneratorMIPS64 : public CodeG
     {
         MOZ_ASSERT(value.valueReg() != SecondScratchReg);
         masm.splitTag(value.valueReg(), SecondScratchReg);
         emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse);
     }
 
     void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
 
+    template <typename T>
+    void emitWasmLoadI64(T* ins);
+    template <typename T>
+    void emitWasmStoreI64(T* ins);
+
   public:
     void visitCompareB(LCompareB* lir);
     void visitCompareBAndBranch(LCompareBAndBranch* lir);
     void visitCompareBitwise(LCompareBitwise* lir);
     void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
     void visitCompareI64(LCompareI64* lir);
     void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
     void visitDivOrModI64(LDivOrModI64* lir);
     void visitUDivOrModI64(LUDivOrModI64* lir);
     void visitWasmLoadI64(LWasmLoadI64* lir);
+    void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir);
     void visitWasmStoreI64(LWasmStoreI64* ins);
+    void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
     void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
     void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
     void visitAsmSelectI64(LAsmSelectI64* ins);
     void visitAsmReinterpretFromI64(LAsmReinterpretFromI64* lir);
     void visitAsmReinterpretToI64(LAsmReinterpretToI64* lir);
     void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
     void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
     void visitClzI64(LClzI64* lir);
--- a/js/src/jit/mips64/LOpcodes-mips64.h
+++ b/js/src/jit/mips64/LOpcodes-mips64.h
@@ -9,12 +9,16 @@
 
 #include "jit/shared/LOpcodes-shared.h"
 
 #define LIR_CPU_OPCODE_LIST(_)  \
     _(ModMaskI)                 \
     _(DivOrModI64)              \
     _(UDivOrMod)                \
     _(UDivOrModI64)             \
+    _(WasmUnalignedLoad)        \
+    _(WasmUnalignedStore)       \
+    _(WasmUnalignedLoadI64)     \
+    _(WasmUnalignedStoreI64)    \
     _(WasmTruncateToInt64)      \
     _(Int64ToFloatingPoint)
 
 #endif // jit_mips64_LOpcodes_mips64_h__
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -1146,16 +1146,35 @@ MacroAssemblerMIPS64Compat::loadDouble(c
 void
 MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
 {
     computeScaledAddress(src, SecondScratchReg);
     ma_ld(dest, Address(SecondScratchReg, src.offset));
 }
 
 void
+MacroAssemblerMIPS64Compat::loadUnalignedDouble(const BaseIndex& src, Register temp,
+                                                FloatRegister dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
+        as_ldl(temp, SecondScratchReg, src.offset + 7);
+        as_ldr(temp, SecondScratchReg, src.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(src.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_ldl(temp, ScratchRegister, 7);
+        as_ldr(temp, ScratchRegister, 0);
+    }
+
+    moveToDouble(temp, dest);
+}
+
+void
 MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest)
 {
     ma_ls(dest, address);
     as_cvtds(dest, dest);
 }
 
 void
 MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
@@ -1173,16 +1192,35 @@ MacroAssemblerMIPS64Compat::loadFloat32(
 void
 MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest)
 {
     computeScaledAddress(src, SecondScratchReg);
     ma_ls(dest, Address(SecondScratchReg, src.offset));
 }
 
 void
+MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
+                                                 FloatRegister dest)
+{
+    computeScaledAddress(src, SecondScratchReg);
+
+    if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
+        as_lwl(temp, SecondScratchReg, src.offset + 3);
+        as_lwr(temp, SecondScratchReg, src.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(src.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_lwl(temp, ScratchRegister, 3);
+        as_lwr(temp, ScratchRegister, 0);
+    }
+
+    moveToFloat32(temp, dest);
+}
+
+void
 MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address)
 {
     ma_li(SecondScratchReg, imm);
     ma_store(SecondScratchReg, address, SizeByte);
 }
 
 void
 MacroAssemblerMIPS64Compat::store8(Register src, const Address& address)
@@ -1305,16 +1343,52 @@ MacroAssemblerMIPS64Compat::storePtr(Reg
 
 void
 MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
 {
     movePtr(ImmPtr(dest.addr), ScratchRegister);
     storePtr(src, Address(ScratchRegister, 0));
 }
 
+void
+MacroAssemblerMIPS64Compat::storeUnalignedFloat32(FloatRegister src, Register temp,
+                                                  const BaseIndex& dest)
+{
+    computeScaledAddress(dest, SecondScratchReg);
+    moveFromFloat32(src, temp);
+
+    if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
+        as_swl(temp, SecondScratchReg, dest.offset + 3);
+        as_swr(temp, SecondScratchReg, dest.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(dest.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_swl(temp, ScratchRegister, 3);
+        as_swr(temp, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS64Compat::storeUnalignedDouble(FloatRegister src, Register temp,
+                                                 const BaseIndex& dest)
+{
+    computeScaledAddress(dest, SecondScratchReg);
+    moveFromDouble(src, temp);
+
+    if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
+        as_sdl(temp, SecondScratchReg, dest.offset + 7);
+        as_sdr(temp, SecondScratchReg, dest.offset);
+    } else {
+        ma_li(ScratchRegister, Imm32(dest.offset));
+        as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+        as_sdl(temp, ScratchRegister, 7);
+        as_sdr(temp, ScratchRegister, 0);
+    }
+}
+
 // Note: this function clobbers the input register.
 void
 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
 {
     MOZ_ASSERT(input != ScratchDoubleReg);
     Label positive, done;
 
     // <= 0 or NaN --> 0
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -899,23 +899,25 @@ class MacroAssemblerMIPS64Compat : publi
     void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
 
     void loadDouble(const Address& addr, FloatRegister dest);
     void loadDouble(const BaseIndex& src, FloatRegister dest);
+    void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
 
     // Load a float value into a register, then expand it to a double.
     void loadFloatAsDouble(const Address& addr, FloatRegister dest);
     void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
 
     void loadFloat32(const Address& addr, FloatRegister dest);
     void loadFloat32(const BaseIndex& src, FloatRegister dest);
+    void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
 
     void store8(Register src, const Address& address);
     void store8(Imm32 imm, const Address& address);
     void store8(Register src, const BaseIndex& address);
     void store8(Imm32 imm, const BaseIndex& address);
 
     void store16(Register src, const Address& address);
     void store16(Imm32 imm, const Address& address);
@@ -943,16 +945,20 @@ class MacroAssemblerMIPS64Compat : publi
     }
 
     template <typename T> void storePtr(ImmWord imm, T address);
     template <typename T> void storePtr(ImmPtr imm, T address);
     template <typename T> void storePtr(ImmGCPtr imm, T address);
     void storePtr(Register src, const Address& address);
     void storePtr(Register src, const BaseIndex& address);
     void storePtr(Register src, AbsoluteAddress dest);
+
+    void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
+    void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+
     void moveDouble(FloatRegister src, FloatRegister dest) {
         as_movd(dest, src);
     }
 
     void zeroDouble(FloatRegister reg) {
         moveToDouble(zero, reg);
     }