Bug 1254167 - Don't allow folding to full range for atomic accesses; r=sunfish, a=lizzard
authorBenjamin Bouvier <benj@benj.me>
Tue, 08 Mar 2016 20:18:46 +0100
changeset 323473 5a5f9c53b6aa4c4ce0da90b713f6936ad1915636
parent 323472 c1af4f3c15704b3a59ee373c05bdd561beaaa177
child 323474 c4a154a8765550da45b4a477673fcd35754f80bf
push id5913
push userjlund@mozilla.com
push dateMon, 25 Apr 2016 16:57:49 +0000
treeherdermozilla-beta@dcaf0a6fa115 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssunfish, lizzard
bugs1254167
milestone47.0a2
Bug 1254167 - Don't allow folding to full range for atomic accesses; r=sunfish, a=lizzard MozReview-Commit-ID: 23Gj5nV3oqq
js/src/asmjs/WasmIonCompile.cpp
js/src/jit-test/tests/asm.js/testAtomics.js
js/src/jit/EffectiveAddressAnalysis.cpp
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -1419,17 +1419,19 @@ EmitHeapAddress(FunctionCompiler& f, MDe
         return true;
     }
 
     // TODO Remove this after implementing non-wraparound offset semantics.
     uint32_t endOffset = access->endOffset();
     if (endOffset < offset)
         return false;
     bool accessNeedsBoundsCheck = true;
-    if (endOffset > f.mirGen().foldableOffsetRange(accessNeedsBoundsCheck)) {
+    // Assume worst case.
+    bool atomicAccess = true;
+    if (endOffset > f.mirGen().foldableOffsetRange(accessNeedsBoundsCheck, atomicAccess)) {
         MDefinition* rhs = f.constant(Int32Value(offset), MIRType_Int32);
         *base = f.binary<MAdd>(*base, rhs, MIRType_Int32);
         offset = 0;
         access->setOffset(offset);
     }
 
     // TODO Remove this after implementing unaligned loads/stores.
     int32_t maskVal = ~(Scalar::byteSize(access->accessType()) - 1);
--- a/js/src/jit-test/tests/asm.js/testAtomics.js
+++ b/js/src/jit-test/tests/asm.js/testAtomics.js
@@ -1837,24 +1837,40 @@ var heap = new SharedArrayBuffer(65536);
 test_int8(heap);
 test_uint8(heap);
 test_int16(heap);
 test_uint16(heap);
 test_int32(heap);
 test_uint32(heap);
 test_misc(heap);
 
+// Bug 1254167: Effective Address Analysis should be void on atomics accesses,
+var code = `
+    "use asm";
+    var HEAP32 = new stdlib.Int32Array(heap);
+    var load = stdlib.Atomics.load;
+    function f() {
+        var i2 = 0;
+        i2 = 305002 | 0;
+        return load(HEAP32, i2 >> 2) | 0;
+    }
+    return f;
+`;
+var f = asmLink(asmCompile('stdlib', 'ffi', 'heap', code), this, {}, new SharedArrayBuffer(0x10000));
+assertErrorMessage(f, RangeError, /out-of-range index/);
+
 // Test that ARM callouts compile.
 setARMHwCapFlags('vfp');
 
 asmCompile('stdlib', 'ffi', 'heap',
     USE_ASM + `
     var atomic_exchange = stdlib.Atomics.exchange;
     var i8a = new stdlib.Int8Array(heap);
 
     function do_xchg() {
         var v = 0;
         v = atomic_exchange(i8a, 200, 37);
         return v|0;
     }
 
     return { xchg: do_xchg }
 `);
+
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -185,17 +185,18 @@ EffectiveAddressAnalysis::analyzeAsmHeap
 //   truncate((y << {0,1,2,3}) + imm32)
 bool
 EffectiveAddressAnalysis::analyze()
 {
     for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
         for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
             // Note that we don't check for MAsmJSCompareExchangeHeap
             // or MAsmJSAtomicBinopHeap, because the backend and the OOB
-            // mechanism don't support non-zero offsets for them yet.
+            // mechanism don't support non-zero offsets for them yet
+            // (TODO bug 1254935).
             if (i->isLsh())
                 AnalyzeLsh(graph_.alloc(), i->toLsh());
             else if (i->isAsmJSLoadHeap())
                 analyzeAsmHeapAccess(i->toAsmJSLoadHeap());
             else if (i->isAsmJSStoreHeap())
                 analyzeAsmHeapAccess(i->toAsmJSStoreHeap());
         }
     }
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -224,17 +224,17 @@ class MIRGenerator
     AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; }
 #endif
 
   public:
     const JitCompileOptions options;
 
     bool needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const;
     size_t foldableOffsetRange(const MAsmJSHeapAccess* access) const;
-    size_t foldableOffsetRange(bool accessNeedsBoundsCheck) const;
+    size_t foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const;
 
   private:
     GraphSpewer gs_;
 
   public:
     GraphSpewer& graphSpewer() {
         return gs_;
     }
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -121,38 +121,39 @@ MIRGenerator::needsAsmJSBoundsCheckBranc
         return false;
 #endif
     return access->needsBoundsCheck();
 }
 
 size_t
 MIRGenerator::foldableOffsetRange(const MAsmJSHeapAccess* access) const
 {
-    return foldableOffsetRange(access->needsBoundsCheck());
+    return foldableOffsetRange(access->needsBoundsCheck(), access->isAtomicAccess());
 }
 
 size_t
-MIRGenerator::foldableOffsetRange(bool accessNeedsBoundsCheck) const
+MIRGenerator::foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const
 {
     // This determines whether it's ok to fold up to WasmImmediateSize
     // offsets, instead of just WasmCheckedImmediateSize.
 
     static_assert(WasmCheckedImmediateRange <= WasmImmediateRange,
                   "WasmImmediateRange should be the size of an unconstrained "
                   "address immediate");
 
 #ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
     static_assert(wasm::Uint32Range + WasmImmediateRange + sizeof(wasm::Val) < wasm::MappedSize,
                   "When using signal handlers for bounds checking, a uint32 is added to the base "
                   "address followed by an immediate in the range [0, WasmImmediateRange). An "
                   "unaligned access (whose size is conservatively approximated by wasm::Val) may "
                   "spill over, so ensure a space at the end.");
 
     // Signal-handling can be dynamically disabled by OS bugs or flags.
-    if (usesSignalHandlersForAsmJSOOB_)
+    // Bug 1254935: Atomic accesses can't be handled with signal handlers yet.
+    if (usesSignalHandlersForAsmJSOOB_ && !atomic)
         return WasmImmediateRange;
 #endif
 
     // On 32-bit platforms, if we've proven the access is in bounds after
     // 32-bit wrapping, we can fold full offsets because they're added with
     // 32-bit arithmetic.
     if (sizeof(intptr_t) == sizeof(int32_t) && !accessNeedsBoundsCheck)
         return WasmImmediateRange;
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -409,17 +409,17 @@ CodeGeneratorX86Shared::maybeEmitAsmJSLo
 {
     MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
     *ool = nullptr;
 
     if (!gen->needsAsmJSBoundsCheckBranch(mir))
         return wasm::HeapAccess::NoLengthCheck;
 
     if (mir->isAtomicAccess())
-        return maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ins->ptr());
+        return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), nullptr);
 
     *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
                                                            mir->accessType());
 
     addOutOfLineCode(*ool, mir);
     return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), (*ool)->entry());
 }
 
@@ -429,17 +429,17 @@ CodeGeneratorX86Shared::maybeEmitAsmJSSt
 {
     MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
     *rejoin = nullptr;
 
     if (!gen->needsAsmJSBoundsCheckBranch(mir))
         return wasm::HeapAccess::NoLengthCheck;
 
     if (mir->isAtomicAccess())
-        return maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ins->ptr());
+        return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), nullptr);
 
     *rejoin = alloc().lifoAlloc()->newInfallible<Label>();
     return emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
 }
 
 void
 CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
                                                            Register ptr)