Bug 1326302 - Part 4: Guard against reentrancy when patching jumps as well.
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Mon, 02 Jan 2017 14:50:00 +0100
changeset 455484 108cd94d295d25387c63ef2f6c475b885c7cf180
parent 455483 3d875cf8e44d3b63c6cd8632b1570b77734e0e1b
child 455485 91575284650d1e4fcb9e8bbac27ee379825475b7
push id40256
push userbmo:ntim.bugs@gmail.com
push dateTue, 03 Jan 2017 22:40:23 +0000
bugs1326302
milestone53.0a1
Bug 1326302 - Part 4: Guard against reentrancy when patching jumps as well.
js/src/ds/PageProtectingVector.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
js/src/jit/x86-shared/BaseAssembler-x86-shared.h
--- a/js/src/ds/PageProtectingVector.h
+++ b/js/src/ds/PageProtectingVector.h
@@ -92,17 +92,17 @@ class PageProtectingVector final
 #endif
 
     bool usable;
     bool enabled;
     bool protectUsedEnabled;
     bool protectUnusedEnabled;
 
     bool reentrancyGuardEnabled;
-    mozilla::Atomic<bool, mozilla::ReleaseAcquire> reentrancyGuard;
+    mutable mozilla::Atomic<bool, mozilla::ReleaseAcquire> reentrancyGuard;
 
     MOZ_ALWAYS_INLINE void resetTest() {
         MOZ_ASSERT(protectUsedEnabled || protectUnusedEnabled);
         size_t nextPage = (pageSize - (uintptr_t(begin() + length()) & pageMask)) >> elemShift;
         size_t nextResize = capacity() - length();
         if (MOZ_LIKELY(nextPage <= nextResize))
             elemsUntilTest = intptr_t(nextPage);
         else
@@ -277,30 +277,30 @@ class PageProtectingVector final
     MOZ_NEVER_INLINE MOZ_MUST_USE bool reserveSlow(size_t size);
 
     template<typename U>
     MOZ_NEVER_INLINE void infallibleAppendSlow(const U* values, size_t size);
 
     template<typename U>
     MOZ_NEVER_INLINE MOZ_MUST_USE bool appendSlow(const U* values, size_t size);
 
-    MOZ_ALWAYS_INLINE void lock() {
+    MOZ_ALWAYS_INLINE void lock() const {
         if (MOZ_LIKELY(!GuardAgainstReentrancy || !reentrancyGuardEnabled))
             return;
         if (MOZ_UNLIKELY(!reentrancyGuard.compareExchange(false, true)))
             lockSlow();
     }
 
-    MOZ_ALWAYS_INLINE void unlock() {
+    MOZ_ALWAYS_INLINE void unlock() const {
         if (!GuardAgainstReentrancy)
             return;
         reentrancyGuard = false;
     }
 
-    MOZ_NEVER_INLINE void lockSlow();
+    MOZ_NEVER_INLINE void lockSlow() const;
 
     /* A helper class to guard against concurrent access. */
     class AutoGuardAgainstReentrancy
     {
         PageProtectingVector& vector;
 
       public:
         MOZ_ALWAYS_INLINE explicit AutoGuardAgainstReentrancy(PageProtectingVector& holder)
@@ -309,16 +309,19 @@ class PageProtectingVector final
             vector.lock();
         }
 
         MOZ_ALWAYS_INLINE ~AutoGuardAgainstReentrancy() {
             vector.unlock();
         }
     };
 
+    MOZ_ALWAYS_INLINE T* begin() { return vector.begin(); }
+    MOZ_ALWAYS_INLINE const T* begin() const { return vector.begin(); }
+
   public:
     explicit PageProtectingVector(AllocPolicy policy = AllocPolicy())
       : vector(policy),
         elemsUntilTest(0),
         currPage(0),
         initPage(0),
         lastPage(0),
         lowerBound(InitialLowerBound),
@@ -388,18 +391,29 @@ class PageProtectingVector final
             if (r >= initPage && l < currPage)
                 reprotectRegionSlow(l, r);
         }
     }
 
     MOZ_ALWAYS_INLINE size_t capacity() const { return vector.capacity(); }
     MOZ_ALWAYS_INLINE size_t length() const { return vector.length(); }
 
-    MOZ_ALWAYS_INLINE T* begin() { return vector.begin(); }
-    MOZ_ALWAYS_INLINE const T* begin() const { return vector.begin(); }
+    MOZ_ALWAYS_INLINE T* acquire() {
+        lock();
+        return begin();
+    }
+
+    MOZ_ALWAYS_INLINE const T* acquire() const {
+        lock();
+        return begin();
+    }
+
+    MOZ_ALWAYS_INLINE void release() const {
+        unlock();
+    }
 
     void clear() {
         AutoGuardAgainstReentrancy guard(*this);
         unprotectOldBuffer();
         vector.clear();
         protectNewBuffer();
     }
 
@@ -481,16 +495,16 @@ PageProtectingVector<T, N, AP, P, Q, G, 
 {
     if (MOZ_LIKELY(length() + size <= capacity()))
         return appendNewPage(values, size);
     return appendNewBuffer(values, size);
 }
 
 template<typename T, size_t N, class AP, bool P, bool Q, bool G, size_t I>
 MOZ_NEVER_INLINE void
-PageProtectingVector<T, N, AP, P, Q, G, I>::lockSlow()
+PageProtectingVector<T, N, AP, P, Q, G, I>::lockSlow() const
 {
     MOZ_CRASH("Cannot access PageProtectingVector from more than one thread at a time!");
 }
 
 } /* namespace js */
 
 #endif /* ds_PageProtectingVector_h */
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -357,17 +357,19 @@ class Assembler : public AssemblerX86Sha
         return CodeOffset(masm.currentOffset());
     }
     CodeOffset movWithPatch(ImmPtr imm, Register dest) {
         return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
     }
 
     // This is for patching during code generation, not after.
     void patchAddq(CodeOffset offset, int32_t n) {
-        X86Encoding::SetInt32(masm.data() + offset.offset(), n);
+        unsigned char* code = masm.acquireData();
+        X86Encoding::SetInt32(code + offset.offset(), n);
+        masm.releaseData();
     }
 
     // Load an ImmWord value into a register. Note that this instruction will
     // attempt to optimize its immediate field size. When a full 64-bit
     // immediate is needed for a relocation, use movWithPatch.
     void movq(ImmWord word, Register dest) {
         // Load a 64-bit immediate into a register. If the value falls into
         // certain ranges, we can use specialized instructions which have
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -92,17 +92,19 @@ AssemblerX86Shared::trace(JSTracer* trc)
         if (rp.kind == Relocation::JITCODE) {
             JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
             TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
             MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
         }
     }
     if (dataRelocations_.length()) {
         CompactBufferReader reader(dataRelocations_);
-        ::TraceDataRelocations(trc, masm.data(), reader);
+        unsigned char* code = masm.acquireData();
+        ::TraceDataRelocations(trc, code, reader);
+        masm.releaseData();
     }
 }
 
 void
 AssemblerX86Shared::executableCopy(void* buffer)
 {
     masm.executableCopy(buffer);
 
@@ -256,17 +258,19 @@ AssemblerX86Shared::InvertCondition(Doub
 
 void
 AssemblerX86Shared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                                 const Disassembler::HeapAccess& heapAccess)
 {
 #ifdef DEBUG
     if (masm.oom())
         return;
-    Disassembler::VerifyHeapAccess(masm.data() + begin, masm.data() + end, heapAccess);
+    unsigned char* code = masm.acquireData();
+    Disassembler::VerifyHeapAccess(code + begin, code + end, heapAccess);
+    masm.releaseData();
 #endif
 }
 
 CPUInfo::SSEVersion CPUInfo::maxSSEVersion = UnknownSSE;
 CPUInfo::SSEVersion CPUInfo::maxEnabledSSEVersion = UnknownSSE;
 bool CPUInfo::avxPresent = false;
 bool CPUInfo::avxEnabled = false;
 bool CPUInfo::popcntPresent = false;
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -1068,33 +1068,37 @@ class AssemblerX86Shared : public Assemb
         }
     }
 
     CodeOffset callWithPatch() {
         return CodeOffset(masm.call().offset());
     }
 
     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
-        unsigned char* code = masm.data();
+        unsigned char* code = masm.acquireData();
         X86Encoding::SetRel32(code + callerOffset, code + calleeOffset);
+        masm.releaseData();
     }
     CodeOffset farJumpWithPatch() {
         return CodeOffset(masm.jmp().offset());
     }
     void patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
-        unsigned char* code = masm.data();
+        unsigned char* code = masm.acquireData();
         X86Encoding::SetRel32(code + farJump.offset(), code + targetOffset);
+        masm.releaseData();
     }
     static void repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset) {
         X86Encoding::SetRel32(code + farJumpOffset, code + targetOffset);
     }
 
     // This is for patching during code generation, not after.
     void patchAddl(CodeOffset offset, int32_t n) {
-        X86Encoding::SetInt32(masm.data() + offset.offset(), n);
+        unsigned char* code = masm.acquireData();
+        X86Encoding::SetInt32(code + offset.offset(), n);
+        masm.releaseData();
     }
 
     CodeOffset twoByteNop() {
         return CodeOffset(masm.twoByteNop().offset());
     }
     static void patchTwoByteNopToJump(uint8_t* jump, uint8_t* target) {
         X86Encoding::BaseAssembler::patchTwoByteNopToJump(jump, target);
     }
--- a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -111,52 +111,58 @@ namespace jit {
         {
             if (MOZ_UNLIKELY(!m_buffer.append(values, size))) {
                 oomDetected();
                 return false;
             }
             return true;
         }
 
-        unsigned char* data()
-        {
-            return m_buffer.begin();
-        }
-
         size_t size() const
         {
             return m_buffer.length();
         }
 
         bool oom() const
         {
             return m_oom;
         }
 
-        const unsigned char* buffer() const {
-            MOZ_ASSERT(!m_oom);
-            return m_buffer.begin();
+#ifndef RELEASE_OR_BETA
+        const unsigned char* acquireBuffer() const
+        {
+            MOZ_RELEASE_ASSERT(!m_oom);
+            return m_buffer.acquire();
         }
-
-#ifndef RELEASE_OR_BETA
+        void releaseBuffer() const { m_buffer.release(); }
+        unsigned char* acquireData() { return m_buffer.acquire(); }
+        void releaseData() const { m_buffer.release(); }
         void disableProtection() { m_buffer.disableProtection(); }
         void enableProtection() { m_buffer.enableProtection(); }
         void setLowerBoundForProtection(size_t size)
         {
             m_buffer.setLowerBoundForProtection(size);
         }
         void unprotectRegion(unsigned char* first, size_t size)
         {
             m_buffer.unprotectRegion(first, size);
         }
         void reprotectRegion(unsigned char* first, size_t size)
         {
             m_buffer.reprotectRegion(first, size);
         }
 #else
+        const unsigned char* acquireBuffer() const
+        {
+            MOZ_RELEASE_ASSERT(!m_oom);
+            return m_buffer.begin();
+        }
+        void releaseBuffer() const {}
+        unsigned char* acquireData() { return m_buffer.begin(); }
+        void releaseData() const {}
         void disableProtection() {}
         void enableProtection() {}
         void setLowerBoundForProtection(size_t) {}
         void unprotectRegion(unsigned char*, size_t) {}
         void reprotectRegion(unsigned char*, size_t) {}
 #endif
 
     protected:
--- a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
@@ -49,18 +49,20 @@ class BaseAssembler : public GenericAsse
 public:
     BaseAssembler()
       : useVEX_(true)
     { }
 
     void disableVEX() { useVEX_ = false; }
 
     size_t size() const { return m_formatter.size(); }
-    const unsigned char* buffer() const { return m_formatter.buffer(); }
-    unsigned char* data() { return m_formatter.data(); }
+    const unsigned char* acquireBuffer() const { return m_formatter.acquireBuffer(); }
+    void releaseBuffer() const { m_formatter.releaseBuffer(); }
+    unsigned char* acquireData() { return m_formatter.acquireData(); }
+    void releaseData() const { m_formatter.releaseData(); }
     bool oom() const { return m_formatter.oom(); }
 
     void disableProtection() { m_formatter.disableProtection(); }
     void enableProtection() { m_formatter.enableProtection(); }
     void setLowerBoundForProtection(size_t size)
     {
         m_formatter.setLowerBoundForProtection(size);
     }
@@ -3773,18 +3775,19 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
     {
         // Sanity check - if the assembler has OOM'd, it will start overwriting
         // its internal buffer and thus our links could be garbage.
         if (oom())
             return false;
 
         assertValidJmpSrc(from);
 
-        const unsigned char* code = m_formatter.data();
+        const unsigned char* code = m_formatter.acquireData();
         int32_t offset = GetInt32(code + from.offset());
+        m_formatter.releaseData();
         if (offset == -1)
             return false;
 
         if (MOZ_UNLIKELY(size_t(offset) >= size())) {
 #ifdef NIGHTLY_BUILD
             // Stash some data on the stack so we can retrieve it from minidumps,
             // see bug 1124397.
             int32_t startOffset = from.offset() - 1;
@@ -3817,45 +3820,52 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         // Sanity check - if the assembler has OOM'd, it will start overwriting
         // its internal buffer and thus our links could be garbage.
         if (oom())
             return;
 
         assertValidJmpSrc(from);
         MOZ_RELEASE_ASSERT(to.offset() == -1 || size_t(to.offset()) <= size());
 
-        unsigned char* code = m_formatter.data();
+        unsigned char* code = m_formatter.acquireData();
         SetInt32(code + from.offset(), to.offset());
+        m_formatter.releaseData();
     }
 
     void linkJump(JmpSrc from, JmpDst to)
     {
         MOZ_ASSERT(from.offset() != -1);
         MOZ_ASSERT(to.offset() != -1);
 
         // Sanity check - if the assembler has OOM'd, it will start overwriting
         // its internal buffer and thus our links could be garbage.
         if (oom())
             return;
 
         assertValidJmpSrc(from);
         MOZ_RELEASE_ASSERT(size_t(to.offset()) <= size());
 
         spew(".set .Lfrom%d, .Llabel%d", from.offset(), to.offset());
-        unsigned char* code = m_formatter.data();
+        unsigned char* code = m_formatter.acquireData();
         SetRel32(code + from.offset(), code + to.offset());
-    }
-
-    void executableCopy(void* buffer)
-    {
-        memcpy(buffer, m_formatter.buffer(), size());
+        m_formatter.releaseData();
+    }
+
+    void executableCopy(void* dst)
+    {
+        const unsigned char* src = m_formatter.acquireBuffer();
+        memcpy(dst, src, size());
+        m_formatter.releaseBuffer();
     }
     MOZ_MUST_USE bool appendBuffer(const BaseAssembler& other)
     {
-        return m_formatter.append(other.m_formatter.buffer(), other.size());
+        const unsigned char* buf = other.m_formatter.acquireBuffer();
+        bool ret = m_formatter.append(buf, other.size());
+        other.m_formatter.releaseBuffer();
+        return ret;
     }
 
   protected:
     static bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(int8_t)value; }
     static bool CAN_SIGN_EXTEND_16_32(int32_t value) { return value == (int32_t)(int16_t)value; }
     static bool CAN_ZERO_EXTEND_8_32(int32_t value) { return value == (int32_t)(uint8_t)value; }
     static bool CAN_ZERO_EXTEND_8H_32(int32_t value) { return value == (value & 0xff00); }
     static bool CAN_ZERO_EXTEND_16_32(int32_t value) { return value == (int32_t)(uint16_t)value; }
@@ -5109,20 +5119,22 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         {
             m_buffer.ensureSpace(sizeof(int32_t));
             m_buffer.putIntUnchecked(i);
         }
 
         // Administrative methods:
 
         size_t size() const { return m_buffer.size(); }
-        const unsigned char* buffer() const { return m_buffer.buffer(); }
+        const unsigned char* acquireBuffer() const { return m_buffer.acquireBuffer(); }
+        void releaseBuffer() const { m_buffer.releaseBuffer(); }
+        unsigned char* acquireData() { return m_buffer.acquireData(); }
+        void releaseData() const { m_buffer.releaseData(); }
         bool oom() const { return m_buffer.oom(); }
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
-        unsigned char* data() { return m_buffer.data(); }
 
         void disableProtection() { m_buffer.disableProtection(); }
         void enableProtection() { m_buffer.enableProtection(); }
         void setLowerBoundForProtection(size_t size)
         {
             m_buffer.setLowerBoundForProtection(size);
         }
         void unprotectRegion(unsigned char* first, size_t size)