Bug 1507572 - correctly implement unaligned stores of i64 sub-fields. r=bbouvier
authorLars T Hansen <lhansen@mozilla.com>
Fri, 16 Nov 2018 09:39:26 +0100
changeset 446734 fc0bc3b27660a2dcf0be70ad2e2dabd400f307aa
parent 446733 f970d9b9b00c57bef40a51639390a0b2d4c1502e
child 446735 7069cf49cb20869b3705a94047b8f763001a4223
push id109916
push userlhansen@mozilla.com
push dateFri, 16 Nov 2018 10:56:57 +0000
treeherdermozilla-inbound@fc0bc3b27660 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1507572
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1507572 - correctly implement unaligned stores of i64 sub-fields. r=bbouvier Fixes Rabaldr by adding code to the ARM assembler to deal with unaligned 16- and 32-bit stores from a 64-bit datum. Fixes Baldr by dispatching on the value type (as we do everywhere else), not the access type, and then relying on the just-fixed assembler to do the right thing.
js/src/jit-test/tests/wasm/regress/unaligned-store64.js
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/MacroAssembler-arm.cpp
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/regress/unaligned-store64.js
@@ -0,0 +1,35 @@
+var ins = new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(
+    `(module
+       (memory (export "mem") 1 1)
+       (func (export "store_32_1") (param $ptr i32)
+         (i64.store32 align=1 (get_local $ptr) (i64.const 0xabba1337)))
+       (func (export "store_32_2") (param $ptr i32)
+         (i64.store32 align=2 (get_local $ptr) (i64.const 0xabba1337)))
+       (func (export "store_16") (param $ptr i32)
+         (i64.store16 align=1 (get_local $ptr) (i64.const 0x1337))))`))).exports;
+
+var mem = new Uint8Array(ins.mem.buffer);
+
+ins.store_16(1);
+assertEq(mem[1], 0x37);
+assertEq(mem[2], 0x13);
+
+ins.store_32_1(11);
+assertEq(mem[11], 0x37);
+assertEq(mem[12], 0x13);
+assertEq(mem[13], 0xba);
+assertEq(mem[14], 0xab);
+
+ins.store_32_2(18);
+assertEq(mem[18], 0x37);
+assertEq(mem[19], 0x13);
+assertEq(mem[20], 0xba);
+assertEq(mem[21], 0xab);
+
+// This must also work on all platforms even though we're lying about the
+// alignment.
+ins.store_32_2(29);
+assertEq(mem[29], 0x37);
+assertEq(mem[30], 0x13);
+assertEq(mem[31], 0xba);
+assertEq(mem[32], 0xab);
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2170,25 +2170,25 @@ CodeGenerator::visitWasmStoreI64(LWasmSt
     emitWasmStore(lir);
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
-    Scalar::Type accessType = mir->access().type();
-
+    MIRType valueType = mir->value()->type();
     Register ptr = ToRegister(lir->ptrCopy());
     Register valOrTmp = ToRegister(lir->valueHelper());
-    if (accessType == Scalar::Int64) {
+
+    if (valueType == MIRType::Int64) {
         masm.wasmUnalignedStoreI64(mir->access(),
                                    ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)),
                                    HeapReg, ptr, ptr, valOrTmp);
-    } else if (accessType == Scalar::Float32 || accessType == Scalar::Float64) {
+    } else if (valueType == MIRType::Float32 || valueType == MIRType::Double) {
         FloatRegister value = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
         masm.wasmUnalignedStoreFP(mir->access(), value, HeapReg, ptr, ptr, valOrTmp);
     } else {
         masm.wasmUnalignedStore(mir->access(), valOrTmp, HeapReg, ptr, ptr, Register::Invalid());
     }
 }
 
 void
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -6570,19 +6570,22 @@ MacroAssemblerARM::wasmUnalignedStoreImp
     ma_add(memoryBase, ptr);
 
     asMasm().memoryBarrierAfter(access.sync());
 
     // If it's a two-word store we must store the high word first to get signal
     // handling right.
 
     if (val64 != Register64::Invalid()) {
-        MOZ_ASSERT(byteSize == 8);
-        emitUnalignedStore(&access, /*size=*/4, ptr, val64.high, /*offset=*/4);
-        emitUnalignedStore(nullptr, /*size=*/4, ptr, val64.low);
+        if (byteSize == 8) {
+            emitUnalignedStore(&access, /*size=*/4, ptr, val64.high, /*offset=*/4);
+            emitUnalignedStore(nullptr, /*size=*/4, ptr, val64.low);
+        } else {
+            emitUnalignedStore(&access, byteSize, ptr, val64.low);
+        }
     } else if (!floatValue.isInvalid()) {
         if (floatValue.isDouble()) {
             MOZ_ASSERT(byteSize == 8);
             ScratchRegisterScope scratch(asMasm());
             ma_vxfer(floatValue, scratch, valOrTmp);
             emitUnalignedStore(&access, /*size=*/4, ptr, valOrTmp, /*offset=*/4);
             emitUnalignedStore(nullptr, /*size=*/4, ptr, scratch);
         } else {