Bug 1502207 Part 2 - Fix useUse new atomic access API in Gecko, r=froydnj.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 25 Oct 2018 11:42:18 -1000
changeset 499714 c7ed95dcc001cb73dd7f0fceb2335929909e771f
parent 499713 9a3d3ccfdd50276320d819e5be9174ba7859a13a
child 499715 e911b99c9c083417d46dd2b24e7933430f4d392a
push id10290
push userffxbld-merge
push dateMon, 03 Dec 2018 16:23:23 +0000
treeherdermozilla-beta@700bed2445e6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1502207
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1502207 Part 2 - Fix useUse new atomic access API in Gecko, r=froydnj.
mfbt/Atomics.h
mfbt/RefCounted.h
xpcom/base/nsISupportsImpl.h
--- a/mfbt/Atomics.h
+++ b/mfbt/Atomics.h
@@ -153,23 +153,23 @@ namespace detail {
  * runs non-deterministically when recording/replaying, such as during GC, the
  * JS interrupt callback, or code that is affected by JIT compilation or
  * debugger activity.
  */
 template<recordreplay::Behavior Recording> struct AutoRecordAtomicAccess;
 
 template<>
 struct AutoRecordAtomicAccess<recordreplay::Behavior::DontPreserve> {
-  AutoRecordAtomicAccess() {}
+  explicit AutoRecordAtomicAccess(const void* aValue) {}
   ~AutoRecordAtomicAccess() {}
 };
 
 template<>
 struct AutoRecordAtomicAccess<recordreplay::Behavior::Preserve> {
-  AutoRecordAtomicAccess() { recordreplay::BeginOrderedAtomicAccess(); }
+  explicit AutoRecordAtomicAccess(const void* aValue) { recordreplay::BeginOrderedAtomicAccess(aValue); }
   ~AutoRecordAtomicAccess() { recordreplay::EndOrderedAtomicAccess(); }
 };
 
 /*
  * We provide CompareExchangeFailureOrder to work around a bug in some
  * versions of GCC's <atomic> header.  See bug 898491.
  */
 template<MemoryOrdering Order> struct AtomicOrderConstraints;
@@ -213,74 +213,74 @@ struct IntrinsicBase
 
 template<typename T, MemoryOrdering Order, recordreplay::Behavior Recording>
 struct IntrinsicMemoryOps : public IntrinsicBase<T, Order>
 {
   typedef IntrinsicBase<T, Order> Base;
 
   static T load(const typename Base::ValueType& aPtr)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.load(Base::OrderedOp::LoadOrder);
   }
 
   static void store(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     aPtr.store(aVal, Base::OrderedOp::StoreOrder);
   }
 
   static T exchange(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.exchange(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 
   static bool compareExchange(typename Base::ValueType& aPtr,
                               T aOldVal, T aNewVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.compare_exchange_strong(aOldVal, aNewVal,
                                         Base::OrderedOp::AtomicRMWOrder,
                                         Base::OrderedOp::CompareExchangeFailureOrder);
   }
 };
 
 template<typename T, MemoryOrdering Order, recordreplay::Behavior Recording>
 struct IntrinsicAddSub : public IntrinsicBase<T, Order>
 {
   typedef IntrinsicBase<T, Order> Base;
 
   static T add(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_add(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 
   static T sub(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_sub(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 };
 
 template<typename T, MemoryOrdering Order, recordreplay::Behavior Recording>
 struct IntrinsicAddSub<T*, Order, Recording> : public IntrinsicBase<T*, Order>
 {
   typedef IntrinsicBase<T*, Order> Base;
 
   static T* add(typename Base::ValueType& aPtr, ptrdiff_t aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_add(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 
   static T* sub(typename Base::ValueType& aPtr, ptrdiff_t aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_sub(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 };
 
 template<typename T, MemoryOrdering Order, recordreplay::Behavior Recording>
 struct IntrinsicIncDec : public IntrinsicAddSub<T, Order, Recording>
 {
   typedef IntrinsicBase<T, Order> Base;
@@ -299,29 +299,29 @@ struct IntrinsicIncDec : public Intrinsi
 template<typename T, MemoryOrdering Order, recordreplay::Behavior Recording>
 struct AtomicIntrinsics : public IntrinsicMemoryOps<T, Order, Recording>,
                           public IntrinsicIncDec<T, Order, Recording>
 {
   typedef IntrinsicBase<T, Order> Base;
 
   static T or_(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_or(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 
   static T xor_(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_xor(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 
   static T and_(typename Base::ValueType& aPtr, T aVal)
   {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(&aPtr);
     return aPtr.fetch_and(aVal, Base::OrderedOp::AtomicRMWOrder);
   }
 };
 
 template<typename T, MemoryOrdering Order, recordreplay::Behavior Recording>
 struct AtomicIntrinsics<T*, Order, Recording>
   : public IntrinsicMemoryOps<T*, Order, Recording>,
     public IntrinsicIncDec<T*, Order, Recording>
--- a/mfbt/RefCounted.h
+++ b/mfbt/RefCounted.h
@@ -119,50 +119,50 @@ public:
     // Memory synchronization is not required when incrementing a
     // reference count.  The first increment of a reference count on a
     // thread is not important, since the first use of the object on a
     // thread can happen before it.  What is important is the transfer
     // of the pointer to that thread, which may happen prior to the
     // first increment on that thread.  The necessary memory
     // synchronization is done by the mechanism that transfers the
     // pointer between threads.
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(this);
     return mValue.fetch_add(1, std::memory_order_relaxed) + 1;
   }
 
   T operator--()
   {
     // Since this may be the last release on this thread, we need
     // release semantics so that prior writes on this thread are visible
     // to the thread that destroys the object when it reads mValue with
     // acquire semantics.
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(this);
     T result = mValue.fetch_sub(1, std::memory_order_release) - 1;
     if (result == 0) {
       // We're going to destroy the object on this thread, so we need
       // acquire semantics to synchronize with the memory released by
       // the last release on other threads, that is, to ensure that
       // writes prior to that release are now visible on this thread.
       std::atomic_thread_fence(std::memory_order_acquire);
     }
     return result;
   }
 
   // This method is only called in debug builds, so we're not too concerned
   // about its performance.
   void operator=(const T& aValue) {
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(this);
     mValue.store(aValue, std::memory_order_seq_cst);
   }
 
   operator T() const
   {
     // Use acquire semantics since we're not sure what the caller is
     // doing.
-    AutoRecordAtomicAccess<Recording> record;
+    AutoRecordAtomicAccess<Recording> record(this);
     return mValue.load(std::memory_order_acquire);
   }
 
 private:
   std::atomic<T> mValue;
 };
 
 template<typename T, RefCountAtomicity Atomicity,
--- a/xpcom/base/nsISupportsImpl.h
+++ b/xpcom/base/nsISupportsImpl.h
@@ -334,51 +334,51 @@ public:
     // Memory synchronization is not required when incrementing a
     // reference count.  The first increment of a reference count on a
     // thread is not important, since the first use of the object on a
     // thread can happen before it.  What is important is the transfer
     // of the pointer to that thread, which may happen prior to the
     // first increment on that thread.  The necessary memory
     // synchronization is done by the mechanism that transfers the
     // pointer between threads.
-    detail::AutoRecordAtomicAccess<Recording> record;
+    detail::AutoRecordAtomicAccess<Recording> record(this);
     return mValue.fetch_add(1, std::memory_order_relaxed) + 1;
   }
   MOZ_ALWAYS_INLINE nsrefcnt operator--()
   {
     // Since this may be the last release on this thread, we need
     // release semantics so that prior writes on this thread are visible
     // to the thread that destroys the object when it reads mValue with
     // acquire semantics.
-    detail::AutoRecordAtomicAccess<Recording> record;
+    detail::AutoRecordAtomicAccess<Recording> record(this);
     nsrefcnt result = mValue.fetch_sub(1, std::memory_order_release) - 1;
     if (result == 0) {
       // We're going to destroy the object on this thread, so we need
       // acquire semantics to synchronize with the memory released by
       // the last release on other threads, that is, to ensure that
       // writes prior to that release are now visible on this thread.
       std::atomic_thread_fence(std::memory_order_acquire);
     }
     return result;
   }
 
   MOZ_ALWAYS_INLINE nsrefcnt operator=(nsrefcnt aValue)
   {
     // Use release semantics since we're not sure what the caller is
     // doing.
-    detail::AutoRecordAtomicAccess<Recording> record;
+    detail::AutoRecordAtomicAccess<Recording> record(this);
     mValue.store(aValue, std::memory_order_release);
     return aValue;
   }
   MOZ_ALWAYS_INLINE operator nsrefcnt() const { return get(); }
   MOZ_ALWAYS_INLINE nsrefcnt get() const
   {
     // Use acquire semantics since we're not sure what the caller is
     // doing.
-    detail::AutoRecordAtomicAccess<Recording> record;
+    detail::AutoRecordAtomicAccess<Recording> record(this);
     return mValue.load(std::memory_order_acquire);
   }
 
   static const bool isThreadSafe = true;
 private:
   nsrefcnt operator++(int) = delete;
   nsrefcnt operator--(int) = delete;
   std::atomic<nsrefcnt> mValue;