Bug 1289054 - Part 23: Implement the 64bit variant of WasmLoad and WasmStore on arm, r=bbouvier
authorHannes Verschore <hv1989@gmail.com>
Fri, 29 Jul 2016 16:53:50 +0200
changeset 349442 1449099f1906015dd12a8b488a7f426f5b6b4c52
parent 349441 1f97f6942a8458561aa29b6214da8e29f00eaed6
child 349443 0f0fe678a40ae291bf1ee4baec03da61e63635cf
push id1230
push userjlund@mozilla.com
push dateMon, 31 Oct 2016 18:13:35 +0000
treeherdermozilla-release@5e06e3766db2 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1289054
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1289054 - Part 23: Implement the 64bit variant of WasmLoad and WasmStore on arm, r=bbouvier
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/Lowering-arm.cpp
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2334,101 +2334,151 @@ CodeGeneratorARM::visitWasmBoundsCheck(L
         masm.append(wasm::BoundsCheck(cmpOffset));
         masm.as_b(&ok2, Assembler::BelowOrEqual);
         masm.assumeUnreachable("Redundant bounds check failed!");
         masm.bind(&ok2);
 #endif
     }
 }
 
+template <typename T>
 void
-CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
+CodeGeneratorARM::emitWasmLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
     MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
 
     uint32_t offset = mir->offset();
     if (offset > INT32_MAX) {
         // This is unreachable because of bounds checks.
         masm.breakpoint();
         return;
     }
 
     Register ptr = ToRegister(lir->ptr());
-    AnyRegister output = ToAnyRegister(lir->output());
+    Scalar::Type type = mir->accessType();
 
     // Maybe add the offset.
-    if (offset) {
+    if (offset || type == Scalar::Int64) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.ma_add(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    Scalar::Type type = mir->accessType();
-    bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32;
-    bool isFloat = output.isFloat();
-
+    bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
+                    type == Scalar::Int64;
     unsigned byteSize = mir->byteSize();
 
-    if (isFloat) {
-        MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
-        ScratchRegisterScope scratch(masm);
-        masm.ma_add(HeapReg, ptr, scratch);
-        masm.ma_vldr(Address(scratch, 0), output.fpu());
+    if (mir->type() == MIRType::Int64) {
+        Register64 output = ToOutRegister64(lir);
+        if (type == Scalar::Int64) {
+            MOZ_ASSERT(INT64LOW_OFFSET == 0);
+            masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
+            masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr);
+            masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
+        } else {
+            masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.low);
+            if (isSigned)
+                masm.ma_asr(Imm32(31), output.low, output.high);
+            else
+                masm.ma_mov(Imm32(0), output.high);
+        }
     } else {
-        masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
+        AnyRegister output = ToAnyRegister(lir->output());
+        bool isFloat = output.isFloat();
+        if (isFloat) {
+            MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
+            ScratchRegisterScope scratch(masm);
+            masm.ma_add(HeapReg, ptr, scratch);
+            masm.ma_vldr(Address(scratch, 0), output.fpu());
+        } else {
+            masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
+        }
     }
 }
 
 void
-CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
+CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
+{
+    emitWasmLoad(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+    emitWasmLoad(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorARM::emitWasmStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
     MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
 
     uint32_t offset = mir->offset();
     if (offset > INT32_MAX) {
         // This is unreachable because of bounds checks.
         masm.breakpoint();
         return;
     }
 
     Register ptr = ToRegister(lir->ptr());
+    unsigned byteSize = mir->byteSize();
+    Scalar::Type type = mir->accessType();
 
     // Maybe add the offset.
-    if (offset) {
+    if (offset || type == Scalar::Int64) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
         masm.ma_add(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
-    AnyRegister value = ToAnyRegister(lir->value());
-    unsigned byteSize = mir->byteSize();
-    Scalar::Type type = mir->accessType();
-
-    if (value.isFloat()) {
-        FloatRegister val = value.fpu();
-        MOZ_ASSERT((byteSize == 4) == val.isSingle());
-        ScratchRegisterScope scratch(masm);
-        masm.ma_add(HeapReg, ptr, scratch);
-        masm.ma_vstr(val, Address(scratch, 0));
+    if (type == Scalar::Int64) {
+        MOZ_ASSERT(INT64LOW_OFFSET == 0);
+
+        Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
+        masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
+        masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr);
+        masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
     } else {
-        bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
-        Register val = value.gpr();
-        masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
+        AnyRegister value = ToAnyRegister(lir->getOperand(lir->ValueIndex));
+        if (value.isFloat()) {
+            FloatRegister val = value.fpu();
+            MOZ_ASSERT((byteSize == 4) == val.isSingle());
+            ScratchRegisterScope scratch(masm);
+            masm.ma_add(HeapReg, ptr, scratch);
+            masm.ma_vstr(val, Address(scratch, 0));
+        } else {
+            bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
+            Register val = value.gpr();
+            masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
+        }
     }
 }
 
 void
+CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
+{
+    emitWasmStore(lir);
+}
+
+void
+CodeGeneratorARM::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+    emitWasmStore(lir);
+}
+
+void
 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
     bool isSigned;
     int size;
     bool isFloat = false;
     switch (mir->accessType()) {
       case Scalar::Int8:
@@ -2860,19 +2910,18 @@ CodeGeneratorARM::visitWasmStoreGlobalVa
         masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
     }
 }
 
 void
 CodeGeneratorARM::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
 {
     const MWasmStoreGlobalVar* mir = ins->mir();
-    MIRType type = mir->value()->type();
     unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
-    MOZ_ASSERT (type == MIRType::Int64);
+    MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
     Register64 input = ToRegister64(ins->value());
 
     masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), input.low);
     masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), input.high);
 }
 
 void
 CodeGeneratorARM::visitNegI(LNegI* ins)
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -91,16 +91,21 @@ class CodeGeneratorARM : public CodeGene
     {
         MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
         masm.cmpPtr(reg, ImmWord(0));
         emitBranch(cond, ifTrue, ifFalse);
     }
 
     void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
 
+    template <typename T>
+    void emitWasmLoad(T* ins);
+    template <typename T>
+    void emitWasmStore(T* ins);
+
   public:
     // Instruction visitors.
     virtual void visitMinMaxD(LMinMaxD* ins);
     virtual void visitMinMaxF(LMinMaxF* ins);
     virtual void visitAbsD(LAbsD* ins);
     virtual void visitAbsF(LAbsF* ins);
     virtual void visitSqrtD(LSqrtD* ins);
     virtual void visitSqrtF(LSqrtF* ins);
@@ -228,17 +233,19 @@ class CodeGeneratorARM : public CodeGene
     void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
     void visitAsmSelect(LAsmSelect* ins);
     void visitAsmReinterpret(LAsmReinterpret* ins);
     void emitAsmJSCall(LAsmJSCallBase* ins);
     void visitAsmJSCall(LAsmJSCall* ins);
     void visitAsmJSCallI64(LAsmJSCallI64* ins);
     void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
     void visitWasmLoad(LWasmLoad* ins);
+    void visitWasmLoadI64(LWasmLoadI64* ins);
     void visitWasmStore(LWasmStore* ins);
+    void visitWasmStoreI64(LWasmStoreI64* ins);
     void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
     void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
     void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
     void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
     void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
     void visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins);
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -614,31 +614,49 @@ LIRGeneratorARM::visitWasmBoundsCheck(MW
 
 void
 LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation baseAlloc = useRegisterAtStart(base);
+
+    if (ins->type() == MIRType::Int64) {
+        auto* lir = new(alloc()) LWasmLoadI64(baseAlloc);
+        if (ins->offset() || ins->accessType() == Scalar::Int64)
+            lir->setTemp(0, tempCopy(base, 0));
+        defineInt64(lir, ins);
+        return;
+    }
+
     auto* lir = new(alloc()) LWasmLoad(baseAlloc);
-
     if (ins->offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation baseAlloc = useRegisterAtStart(base);
+
+    if (ins->value()->type() == MIRType::Int64) {
+        LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
+        auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+        if (ins->offset() || ins->accessType() == Scalar::Int64)
+            lir->setTemp(0, tempCopy(base, 0));
+        add(lir, ins);
+        return;
+    }
+
     LAllocation valueAlloc = useRegisterAtStart(ins->value());
     auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
 
     if (ins->offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     add(lir, ins);
 }