Bug 1303690 - Baldr: MIPS: Fix alignment hints after review. r=bbouvier
authorHeiher <r@hev.cc>
Mon, 10 Oct 2016 23:34:54 +0800
changeset 317329 85239a8b72170eeddc04910be21cb6b4651ac8df
parent 317328 fb11c2f6c9c54a876de56eb74472b9d40bf20c25
child 317330 3cd0102f89e953c1bc0598bffec956221e4207a3
push id30800
push userphilringnalda@gmail.com
push dateTue, 11 Oct 2016 02:08:53 +0000
treeherdermozilla-central@ece56e142a1e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1303690
milestone52.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1303690 - Baldr: MIPS: Fix alignment hints after review. r=bbouvier --- .../jit/mips-shared/CodeGenerator-mips-shared.cpp | 54 +++++++++-------- js/src/jit/mips-shared/Lowering-mips-shared.cpp | 2 - .../jit/mips-shared/MacroAssembler-mips-shared.cpp | 70 +++++++++++----------- .../jit/mips-shared/MacroAssembler-mips-shared.h | 6 +- js/src/jit/mips32/CodeGenerator-mips32.cpp | 25 ++++++-- 5 files changed, 83 insertions(+), 74 deletions(-)
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/Lowering-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
js/src/jit/mips-shared/MacroAssembler-mips-shared.h
js/src/jit/mips32/CodeGenerator-mips32.cpp
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1921,44 +1921,45 @@ CodeGeneratorMIPSShared::emitWasmLoad(T*
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Float64: isFloat  = true;  break;
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
+    BaseIndex address(HeapReg, ptr, TimesOne);
+
     if (mir->isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (isFloat) {
-            if (byteSize == 4) {
-                masm.loadUnalignedFloat32(BaseIndex(HeapReg, ptr, TimesOne), temp,
-                                          ToFloatRegister(lir->output()));
-            } else
-                masm.loadUnalignedDouble(BaseIndex(HeapReg, ptr, TimesOne), temp,
-                                         ToFloatRegister(lir->output()));
+            if (byteSize == 4)
+                masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
+            else
+                masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
         } else {
-            masm.ma_load_unaligned(ToRegister(lir->output()), BaseIndex(HeapReg, ptr, TimesOne),
-                                   temp, static_cast<LoadStoreSize>(8 * byteSize),
+            masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
+                                   static_cast<LoadStoreSize>(8 * byteSize),
                                    isSigned ? SignExtend : ZeroExtend);
         }
 
         memoryBarrier(mir->barrierAfter());
         return;
     }
 
     if (isFloat) {
-        if (byteSize == 4) {
-            masm.loadFloat32(BaseIndex(HeapReg, ptr, TimesOne), ToFloatRegister(lir->output()));
-        } else
-            masm.loadDouble(BaseIndex(HeapReg, ptr, TimesOne), ToFloatRegister(lir->output()));
+        if (byteSize == 4)
+            masm.loadFloat32(address, ToFloatRegister(lir->output()));
+        else
+            masm.loadDouble(address, ToFloatRegister(lir->output()));
     } else {
-        masm.ma_load(ToRegister(lir->output()), BaseIndex(HeapReg, ptr, TimesOne),
-                     static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+        masm.ma_load(ToRegister(lir->output()), address,
+                     static_cast<LoadStoreSize>(8 * byteSize),
+                     isSigned ? SignExtend : ZeroExtend);
     }
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
 {
@@ -2005,44 +2006,45 @@ CodeGeneratorMIPSShared::emitWasmStore(T
       case Scalar::Int64:   isSigned = true;  break;
       case Scalar::Float64: isFloat  = true;  break;
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     memoryBarrier(mir->barrierBefore());
 
+    BaseIndex address(HeapReg, ptr, TimesOne);
+
     if (mir->isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (isFloat) {
-            if (byteSize == 4) {
-                masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp,
-                                           BaseIndex(HeapReg, ptr, TimesOne));
-            } else
-                masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp,
-                                          BaseIndex(HeapReg, ptr, TimesOne));
+            if (byteSize == 4)
+                masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
+            else
+                masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
         } else {
-            masm.ma_store_unaligned(ToRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne),
-                                    temp, static_cast<LoadStoreSize>(8 * byteSize),
+            masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
+                                    static_cast<LoadStoreSize>(8 * byteSize),
                                     isSigned ? SignExtend : ZeroExtend);
         }
 
         memoryBarrier(mir->barrierAfter());
         return;
     }
 
     if (isFloat) {
         if (byteSize == 4) {
-            masm.storeFloat32(ToFloatRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne));
+            masm.storeFloat32(ToFloatRegister(lir->value()), address);
         } else
-            masm.storeDouble(ToFloatRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne));
+            masm.storeDouble(ToFloatRegister(lir->value()), address);
     } else {
-        masm.ma_store(ToRegister(lir->value()), BaseIndex(HeapReg, ptr, TimesOne),
-                      static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+        masm.ma_store(ToRegister(lir->value()), address,
+                      static_cast<LoadStoreSize>(8 * byteSize),
+                      isSigned ? SignExtend : ZeroExtend);
     }
 
     memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmStore(LWasmStore* lir)
 {
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -334,17 +334,16 @@ LIRGeneratorMIPSShared::visitWasmLoad(MW
             return;
         }
 
         auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, temp());
         if (ins->offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         define(lir, ins);
-
         return;
     }
 
     if (ins->type() == MIRType::Int64) {
         auto* lir = new(alloc()) LWasmLoadI64(ptr);
         if (ins->offset())
             lir->setTemp(0, tempCopy(base, 0));
 
@@ -380,17 +379,16 @@ LIRGeneratorMIPSShared::visitWasmStore(M
         }
 
         LAllocation valueAlloc = useRegisterAtStart(value);
         auto* lir = new(alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
         if (ins->offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         add(lir, ins);
-
         return;
     }
 
     if (ins->type() == MIRType::Int64) {
         LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
         auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
         if (ins->offset())
             lir->setTemp(0, tempCopy(base, 0));
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -441,57 +441,56 @@ MacroAssemblerMIPSShared::ma_load(Regist
         return;
     }
 
     asMasm().computeScaledAddress(src, SecondScratchReg);
     asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
 }
 
 void
-MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src,
-                                  Register temp, LoadStoreSize size, LoadStoreExtension extension)
+MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+                                            LoadStoreSize size, LoadStoreExtension extension)
 {
-    int16_t encodedOffset;
+    int16_t lowOffset, hiOffset;
     Register base;
 
     asMasm().computeScaledAddress(src, SecondScratchReg);
 
     if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
-        encodedOffset = Imm16(src.offset).encode();
         base = SecondScratchReg;
+        lowOffset = Imm16(src.offset).encode();
+        hiOffset = Imm16(src.offset + size / 8 - 1).encode();
     } else {
         ma_li(ScratchRegister, Imm32(src.offset));
         as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
         base = ScratchRegister;
-        encodedOffset = Imm16(0).encode();
+        lowOffset = Imm16(0).encode();
+        hiOffset = Imm16(size / 8 - 1).encode();
     }
 
     switch (size) {
-      case SizeByte:
-        if (ZeroExtend == extension)
-            as_lbu(dest, base, encodedOffset);
+      case SizeHalfWord:
+        as_lbu(dest, base, lowOffset);
+        if (extension != ZeroExtend)
+            as_lbu(temp, base, hiOffset);
         else
-            as_lb(dest, base, encodedOffset);
-        break;
-      case SizeHalfWord:
-        as_lbu(dest, base, encodedOffset);
-        as_lbu(temp, base, encodedOffset + 1);
-        as_ins(dest, temp, 8, 8);
-        if (ZeroExtend != extension)
-            as_seh(dest, dest);
+            as_lb(temp, base, hiOffset);
+        as_ins(dest, temp, 8, 24);
         break;
       case SizeWord:
-        as_lwl(dest, base, encodedOffset + 3);
-        as_lwr(dest, base, encodedOffset);
-        if (ZeroExtend == extension)
-            as_ext(dest, dest, 0, 32);
+        as_lwl(dest, base, hiOffset);
+        as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+        if (extension != ZeroExtend)
+            as_dext(dest, dest, 0, 32);
+#endif
         break;
       case SizeDouble:
-        as_ldl(dest, base, encodedOffset + 7);
-        as_ldr(dest, base, encodedOffset);
+        as_ldl(dest, base, hiOffset);
+        as_ldr(dest, base, lowOffset);
         break;
       default:
         MOZ_CRASH("Invalid argument for ma_load");
     }
 }
 
 void
 MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
@@ -589,50 +588,49 @@ MacroAssemblerMIPSShared::ma_store(Imm32
     ma_li(ScratchRegister, imm);
 
     // with offset=0 ScratchRegister will not be used in ma_store()
     // so we can use it as a parameter here
     asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
 }
 
 void
-MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest,
-                                   Register temp, LoadStoreSize size, LoadStoreExtension extension)
+MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+                                             LoadStoreSize size, LoadStoreExtension extension)
 {
-    int16_t encodedOffset;
+    int16_t lowOffset, hiOffset;
     Register base;
 
     asMasm().computeEffectiveAddress(dest, SecondScratchReg);
 
     if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
-        encodedOffset = Imm16(dest.offset).encode();
         base = SecondScratchReg;
+        lowOffset = Imm16(dest.offset).encode();
+        hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
     } else {
         ma_li(ScratchRegister, Imm32(dest.offset));
         as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
         base = ScratchRegister;
-        encodedOffset = Imm16(0).encode();
+        lowOffset = Imm16(0).encode();
+        hiOffset = Imm16(size / 8 - 1).encode();
     }
 
     switch (size) {
-      case SizeByte:
-        as_sb(data, base, encodedOffset);
-        break;
       case SizeHalfWord:
-        as_sb(data, base, encodedOffset);
+        as_sb(data, base, lowOffset);
         as_ext(temp, data, 8, 8);
-        as_sb(temp, base, encodedOffset + 1);
+        as_sb(temp, base, hiOffset);
         break;
       case SizeWord:
-        as_swl(data, base, encodedOffset + 3);
-        as_swr(data, base, encodedOffset);
+        as_swl(data, base, hiOffset);
+        as_swr(data, base, lowOffset);
         break;
       case SizeDouble:
-        as_sdl(data, base, encodedOffset + 7);
-        as_sdr(data, base, encodedOffset);
+        as_sdl(data, base, hiOffset);
+        as_sdr(data, base, lowOffset);
         break;
       default:
         MOZ_CRASH("Invalid argument for ma_store");
     }
 }
 
 // Branches when done from within mips-specific code.
 void
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -100,27 +100,25 @@ class MacroAssemblerMIPSShared : public 
     void ma_xor(Register rd, Register rs, Imm32 imm);
 
     void ma_ctz(Register rd, Register rs);
 
     // load
     void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
                  LoadStoreExtension extension = SignExtend);
     void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
-                           LoadStoreSize size = SizeWord,
-                           LoadStoreExtension extension = SignExtend);
+                           LoadStoreSize size, LoadStoreExtension extension);
 
     // store
     void ma_store(Register data, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
     void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                   LoadStoreExtension extension = SignExtend);
     void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
-                            LoadStoreSize size = SizeWord,
-                            LoadStoreExtension extension = SignExtend);
+                            LoadStoreSize size, LoadStoreExtension extension);
 
     // arithmetic based ops
     // add
     void ma_addu(Register rd, Register rs, Imm32 imm);
     void ma_addu(Register rd, Register rs);
     void ma_addu(Register rd, Imm32 imm);
     template <typename L>
     void ma_addTestCarry(Register rd, Register rs, Register rt, L overflow);
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -495,23 +495,23 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* li
         if (byteSize <= 4) {
             masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
                                    temp, static_cast<LoadStoreSize>(8 * byteSize),
                                    isSigned ? SignExtend : ZeroExtend);
             if (!isSigned)
                 masm.move32(Imm32(0), output.high);
             else
                 masm.ma_sra(output.high, output.low, Imm32(31));
-
         } else {
             ScratchRegisterScope scratch(masm);
-            masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord);
+            masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+                                   temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
             masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
             masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne),
-                                   temp, SizeWord);
+                                   temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
         }
         return;
     }
 
     if (byteSize <= 4) {
         masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
                      static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
         if (!isSigned)
@@ -556,32 +556,45 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* l
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.addPtr(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
     unsigned byteSize = mir->byteSize();
+    bool isSigned;
+    switch (mir->accessType()) {
+        case Scalar::Int8:   isSigned = true; break;
+        case Scalar::Uint8:  isSigned = false; break;
+        case Scalar::Int16:  isSigned = true; break;
+        case Scalar::Uint16: isSigned = false; break;
+        case Scalar::Int32:  isSigned = true; break;
+        case Scalar::Uint32: isSigned = false; break;
+        case Scalar::Int64:  isSigned = true; break;
+        default: MOZ_CRASH("unexpected array type");
+    }
 
     memoryBarrier(mir->barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
     if (mir->isUnaligned()) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (byteSize <= 4) {
             masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
-                                    temp, static_cast<LoadStoreSize>(8 * byteSize));
+                                    temp, static_cast<LoadStoreSize>(8 * byteSize),
+                                    isSigned ? SignExtend : ZeroExtend);
         } else {
             ScratchRegisterScope scratch(masm);
-            masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord);
+            masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+                                    temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
             masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
             masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne),
-                                    temp, SizeWord);
+                                    temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
         }
         return;
     }
 
     if (byteSize <= 4) {
         masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
                       static_cast<LoadStoreSize>(8 * byteSize));
     } else {