Bug 1330942 - move MemoryAccessDesc::isUnaligned to the ARM/MIPS platform layer. r=luke
authorLars T Hansen <lhansen@mozilla.com>
Thu, 19 Jan 2017 12:45:44 +0100
changeset 330159 7f1a68a78656619e068a0252155083c762ccd9a4
parent 330158 657cf05df28dc12563e8f43709de761335628cc6
child 330160 552e15caf7384fbf025dc940267c137f77692b47
push id31231
push userkwierso@gmail.com
push dateFri, 20 Jan 2017 00:31:55 +0000
treeherdermozilla-central@bde3fc40b9b5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1330942
milestone53.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1330942 - move MemoryAccessDesc::isUnaligned to the ARM/MIPS platform layer. r=luke
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
js/src/jit/mips-shared/Lowering-mips-shared.cpp
js/src/jit/mips32/CodeGenerator-mips32.cpp
js/src/jit/mips64/CodeGenerator-mips64.cpp
js/src/jit/shared/Assembler-shared.h
js/src/wasm/WasmBaselineCompile.cpp
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -166,16 +166,28 @@ ABIArgGenerator::hardNext(MIRType type)
 ABIArg
 ABIArgGenerator::next(MIRType type)
 {
     if (useHardFp_)
         return hardNext(type);
     return softNext(type);
 }
 
+bool
+js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access)
+{
+    if (!access.align())
+        return false;
+
+    if (access.type() == Scalar::Float64 && access.align() >= 4)
+        return false;
+
+    return access.align() < access.byteSize();
+}
+
 // Encode a standard register when it is being used as src1, the dest, and an
 // extra register. These should never be called with an InvalidReg.
 uint32_t
 js::jit::RT(Register r)
 {
     MOZ_ASSERT((r.code() & ~0xf) == 0);
     return r.code() << 12;
 }
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -103,16 +103,18 @@ class ABIArgGenerator
         MOZ_ASSERT(intRegIndex_ == 0 && floatRegIndex_ == 0);
         useHardFp_ = useHardFp;
     }
     ABIArg next(MIRType argType);
     ABIArg& current() { return current_; }
     uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
 };
 
+bool IsUnaligned(const wasm::MemoryAccessDesc& access);
+
 static constexpr Register ABINonArgReg0 = r4;
 static constexpr Register ABINonArgReg1 = r5;
 static constexpr Register ABINonArgReg2 = r6;
 static constexpr Register ABINonArgReturnReg0 = r4;
 static constexpr Register ABINonArgReturnReg1 = r5;
 
 // TLS pointer argument register for WebAssembly functions. This must not alias
 // any other register used for passing function arguments or return values.
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -608,17 +608,17 @@ LIRGeneratorARM::visitWasmUnsignedToFloa
 void
 LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
-    if (ins->access().isUnaligned()) {
+    if (IsUnaligned(ins->access())) {
         // Unaligned access expected! Revert to a byte load.
         LDefinition ptrCopy = tempCopy(base, 0);
 
         LDefinition noTemp = LDefinition::BogusTemp();
         if (ins->type() == MIRType::Int64) {
             auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, ptrCopy, temp(), noTemp, noTemp);
             defineInt64(lir, ins);
             return;
@@ -657,17 +657,17 @@ LIRGeneratorARM::visitWasmLoad(MWasmLoad
 void
 LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
-    if (ins->access().isUnaligned()) {
+    if (IsUnaligned(ins->access())) {
         // Unaligned access expected! Revert to a byte store.
         LDefinition ptrCopy = tempCopy(base, 0);
 
         MIRType valueType = ins->value()->type();
         if (valueType == MIRType::Int64) {
             LInt64Allocation value = useInt64RegisterAtStart(ins->value());
             auto* lir = new(alloc()) LWasmUnalignedStoreI64(ptr, value, ptrCopy, temp());
             add(lir, ins);
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -1519,12 +1519,18 @@ class InstGS : public Instruction
       : Instruction(raw)
     { }
     // For floating-point unaligned loads and stores.
     InstGS(Opcode op, Register rs, FloatRegister rt, Imm8 off, FunctionField ff)
       : Instruction(op | RS(rs) | RT(rt) | off.encode(6) | ff)
     { }
 };
 
+inline bool
+IsUnaligned(const wasm::MemoryAccessDesc& access)
+{
+    return access.align() && access.align() < access.byteSize();
+}
+
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips_shared_Assembler_mips_shared_h */
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1910,17 +1910,17 @@ CodeGeneratorMIPSShared::emitWasmLoad(T*
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     masm.memoryBarrier(mir->access().barrierBefore());
 
     BaseIndex address(HeapReg, ptr, TimesOne);
 
-    if (mir->access().isUnaligned()) {
+    if (IsUnaligned(mir->access())) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (isFloat) {
             if (byteSize == 4)
                 masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
             else
                 masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
         } else {
@@ -1995,17 +1995,17 @@ CodeGeneratorMIPSShared::emitWasmStore(T
       case Scalar::Float32: isFloat  = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     masm.memoryBarrier(mir->access().barrierBefore());
 
     BaseIndex address(HeapReg, ptr, TimesOne);
 
-    if (mir->access().isUnaligned()) {
+    if (IsUnaligned(mir->access())) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (isFloat) {
             if (byteSize == 4)
                 masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
             else
                 masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
         } else {
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -319,17 +319,17 @@ LIRGeneratorMIPSShared::visitAsmJSNeg(MA
 void
 LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
-    if (ins->access().isUnaligned()) {
+    if (IsUnaligned(ins->access())) {
         if (ins->type() == MIRType::Int64) {
             auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
             if (ins->access().offset())
                 lir->setTemp(0, tempCopy(base, 0));
 
             defineInt64(lir, ins);
             return;
         }
@@ -362,17 +362,17 @@ void
 LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* value = ins->value();
     LAllocation baseAlloc = useRegisterAtStart(base);
 
-    if (ins->access().isUnaligned()) {
+    if (IsUnaligned(ins->access())) {
         if (ins->type() == MIRType::Int64) {
             LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
             auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
             if (ins->access().offset())
                 lir->setTemp(0, tempCopy(base, 0));
 
             add(lir, ins);
             return;
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -485,17 +485,17 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* li
         case Scalar::Uint32: isSigned = false; break;
         case Scalar::Int64:  isSigned = true; break;
         default: MOZ_CRASH("unexpected array type");
     }
 
     masm.memoryBarrier(mir->access().barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
-    if (mir->access().isUnaligned()) {
+    if (IsUnaligned(mir->access())) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (byteSize <= 4) {
             masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
                                    temp, static_cast<LoadStoreSize>(8 * byteSize),
                                    isSigned ? SignExtend : ZeroExtend);
             if (!isSigned)
                 masm.move32(Imm32(0), output.high);
@@ -572,17 +572,17 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* l
         case Scalar::Uint32: isSigned = false; break;
         case Scalar::Int64:  isSigned = true; break;
         default: MOZ_CRASH("unexpected array type");
     }
 
     masm.memoryBarrier(mir->access().barrierBefore());
 
     MOZ_ASSERT(INT64LOW_OFFSET == 0);
-    if (mir->access().isUnaligned()) {
+    if (IsUnaligned(mir->access())) {
         Register temp = ToRegister(lir->getTemp(1));
 
         if (byteSize <= 4) {
             masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
                                     temp, static_cast<LoadStoreSize>(8 * byteSize),
                                     isSigned ? SignExtend : ZeroExtend);
         } else {
             ScratchRegisterScope scratch(masm);
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -444,17 +444,17 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* 
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     masm.memoryBarrier(mir->access().barrierBefore());
 
-    if (mir->access().isUnaligned()) {
+    if (IsUnaligned(mir->access())) {
         Register temp = ToRegister(lir->getTemp(1));
 
         masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
                                temp, static_cast<LoadStoreSize>(8 * byteSize),
                                isSigned ? SignExtend : ZeroExtend);
         return;
     }
 
@@ -509,17 +509,17 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T*
       case Scalar::Int32:   isSigned = true;  break;
       case Scalar::Uint32:  isSigned = false; break;
       case Scalar::Int64:   isSigned = true;  break;
       default: MOZ_CRASH("unexpected array type");
     }
 
     masm.memoryBarrier(mir->access().barrierBefore());
 
-    if (mir->access().isUnaligned()) {
+    if (IsUnaligned(mir->access())) {
         Register temp = ToRegister(lir->getTemp(1));
 
         masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
                                 temp, static_cast<LoadStoreSize>(8 * byteSize),
                                 isSigned ? SignExtend : ZeroExtend);
         return;
     }
     masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -724,17 +724,16 @@ class MemoryAccessDesc
     }
     unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
     jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
     jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
     bool hasTrap() const { return !!trapOffset_; }
     TrapOffset trapOffset() const { return *trapOffset_; }
     bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
     bool isSimd() const { return Scalar::isSimdType(type_); }
-    bool isUnaligned() const { return align() && align() < byteSize(); }
     bool isPlainAsmJS() const { return !hasTrap(); }
 
     void clearOffset() { offset_ = 0; }
 };
 
 // Summarizes a global access for a mutable (in asm.js) or immutable value (in
 // asm.js or the wasm MVP) that needs to get patched later.
 
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -3394,17 +3394,17 @@ class BaseCompiler
                              trap(Trap::OutOfBounds));
             access->clearOffset();
         }
     }
 
     // This is the temp register passed as the last argument to load()
     MOZ_MUST_USE size_t loadTemps(MemoryAccessDesc& access) {
 #if defined(JS_CODEGEN_ARM)
-        if (access.isUnaligned()) {
+        if (IsUnaligned(access)) {
             switch (access.type()) {
               case Scalar::Float32:
                 return 2;
               case Scalar::Float64:
                 return 3;
               default:
                 return 1;
             }
@@ -3455,17 +3455,17 @@ class BaseCompiler
             AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
 
             masm.wasmLoad(access, srcAddr, out);
 
             if (byteRegConflict)
                 masm.mov(ScratchRegX86, dest.i32());
         }
 #elif defined(JS_CODEGEN_ARM)
-        if (access.isUnaligned()) {
+        if (IsUnaligned(access)) {
             switch (dest.tag) {
               case AnyReg::I64:
                 masm.wasmUnalignedLoadI64(access, ptr, ptr, dest.i64(), tmp1);
                 break;
               case AnyReg::F32:
                 masm.wasmUnalignedLoadFP(access, ptr, ptr, dest.f32(), tmp1, tmp2, Register::Invalid());
                 break;
               case AnyReg::F64:
@@ -3487,17 +3487,17 @@ class BaseCompiler
 
         if (ool)
             masm.bind(ool->rejoin());
         return true;
     }
 
     MOZ_MUST_USE size_t storeTemps(MemoryAccessDesc& access) {
 #if defined(JS_CODEGEN_ARM)
-        if (access.isUnaligned()) {
+        if (IsUnaligned(access)) {
             // See comment in store() about how this temp could be avoided for
             // unaligned i8/i16/i32 stores with some restructuring elsewhere.
             return 1;
         }
 #endif
         return 0;
     }
 
@@ -3542,17 +3542,17 @@ class BaseCompiler
                 value = AnyRegister(ScratchRegX86);
             } else {
                 value = src.any();
             }
 
             masm.wasmStore(access, value, dstAddr);
         }
 #elif defined(JS_CODEGEN_ARM)
-        if (access.isUnaligned()) {
+        if (IsUnaligned(access)) {
             // TODO / OPTIMIZE (bug 1331264): We perform the copy on the i32
             // path (and allocate the temp for the copy) because we will destroy
             // the value in the temp.  We could avoid the copy and the temp if
             // the caller would instead preserve src when it needs to return its
             // value as a result (for teeStore).  If unaligned accesses are
             // common it will be worthwhile to make that change, but there's no
             // evidence yet that they will be common.
             switch (src.tag) {
@@ -6510,17 +6510,17 @@ BaseCompiler::emitLoad(ValType type, Sca
     RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
     RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
     RegI32 tmp3 = temps >= 3 ? needI32() : invalidI32();
 
     switch (type) {
       case ValType::I32: {
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
 #ifdef JS_CODEGEN_ARM
-        RegI32 rv = access.isUnaligned() ? needI32() : rp;
+        RegI32 rv = IsUnaligned(access) ? needI32() : rp;
 #else
         RegI32 rv = rp;
 #endif
         if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
             return false;
         pushI32(rv);
         if (rp != rv)
             freeI32(rp);