Bug 1411786 - Use mozilla::Atomic for the recycled size count. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Thu, 26 Oct 2017 09:51:00 +0900
changeset 686648 6a3e1d1984081fbda11e9225ab74ec798537a63e
parent 686647 edebf42d73d0e7367d6cc56a03e0ff31bcc913c9
child 737417 be35224c21b24fd8c2151ee889ede29222bd7689
push id86234
push userbmo:mh+mozilla@glandium.org
push dateThu, 26 Oct 2017 05:06:52 +0000
reviewersnjn
bugs1411786
milestone58.0a1
Bug 1411786 - Use mozilla::Atomic for the recycled size count. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -104,20 +104,21 @@
  *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
  *          Metadata are stored in a separate red-black tree.
  *
  *******************************************************************************
  */
 
 #include "mozmemory_wrap.h"
 #include "mozjemalloc.h"
-#include "mozilla/Sprintf.h"
-#include "mozilla/Likely.h"
+#include "mozilla/Atomics.h"
 #include "mozilla/DoublyLinkedList.h"
 #include "mozilla/GuardObjects.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Sprintf.h"
 #include "mozilla/UniquePtr.h"
 
 /*
  * On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
  * operating system.  If we release 1MB of live pages with MADV_DONTNEED, our
  * RSS will decrease by 1MB (almost) immediately.
  *
  * On Mac, we use madvise(MADV_FREE).  Unlike MADV_DONTNEED on Linux, MADV_FREE
@@ -478,17 +479,17 @@ static size_t arena_maxclass; /* Max siz
  * Recycle at most 128 chunks. With 1 MiB chunks, this means we retain at most
  * 6.25% of the process address space on a 32-bit OS for later use.
  */
 #define CHUNK_RECYCLE_LIMIT 128
 
 static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
 
 /* The current amount of recycled bytes, updated atomically. */
-static size_t gRecycledSize;
+static mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gRecycledSize;
 
 /******************************************************************************/
 
 /* MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive. */
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
@@ -1196,34 +1197,16 @@ FORK_HOOK void _malloc_prefork(void);
 FORK_HOOK void _malloc_postfork_parent(void);
 FORK_HOOK void _malloc_postfork_child(void);
 
 /*
  * End forward declarations.
  */
 /******************************************************************************/
 
-static inline size_t
-load_acquire_z(size_t *p)
-{
-	volatile size_t result = *p;
-#  ifdef XP_WIN
-	/*
-	 * We use InterlockedExchange with a dummy value to insert a memory
-	 * barrier. This has been confirmed to generate the right instruction
-	 * and is also used by MinGW.
-	 */
-	volatile long dummy = 0;
-	InterlockedExchange(&dummy, 1);
-#  else
-	__sync_synchronize();
-#  endif
-	return result;
-}
-
 static void
 _malloc_message(const char *p)
 {
 #if !defined(XP_WIN)
 #define	_write	write
 #endif
   // Pretend to check _write() errors to suppress gcc warnings about
   // warn_unused_result annotations in some versions of glibc headers.
@@ -2173,17 +2156,17 @@ chunk_dealloc(void* aChunk, size_t aSize
   MOZ_ASSERT(aChunk);
   MOZ_ASSERT(CHUNK_ADDR2BASE(aChunk) == aChunk);
   MOZ_ASSERT(aSize != 0);
   MOZ_ASSERT((aSize & chunksize_mask) == 0);
 
   gChunkRTree.Unset(aChunk);
 
   if (CAN_RECYCLE(aSize)) {
-    size_t recycled_so_far = load_acquire_z(&gRecycledSize);
+    size_t recycled_so_far = gRecycledSize;
     // In case some race condition put us above the limit.
     if (recycled_so_far < gRecycleLimit) {
       size_t recycle_remaining = gRecycleLimit - recycled_so_far;
       size_t to_recycle;
       if (aSize > recycle_remaining) {
         to_recycle = recycle_remaining;
         // Drop pages that would overflow the recycle limit
         pages_trim(aChunk, aSize, 0, to_recycle);