Bug 1414155 - Move a few things around. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Wed, 01 Nov 2017 19:29:36 +0900
changeset 443486 4a93fcad5d8e158d5525f2345a55669e536756a3
parent 443485 cc1151e297c567f9fa2d931f682af1a37cbdf381
child 443487 0b3250499ed0cb8046159f5f2e8caf18e3a445c3
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1414155
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1414155 - Move a few things around. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -249,20 +249,16 @@ static inline void*
 #endif
 
 // Minimum alignment of non-tiny allocations is 2^QUANTUM_2POW_MIN bytes.
 #define QUANTUM_2POW_MIN 4
 
 // Size and alignment of memory chunks that are allocated by the OS's virtual
 // memory system.
 #define CHUNK_2POW_DEFAULT 20
-// Maximum number of dirty pages per arena.
-#define DIRTY_MAX_DEFAULT (1U << 8)
-
-static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
 
 // Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
 // so over-estimates are okay (up to a point), but under-estimates will
 // negatively affect performance.
 #define CACHELINE_2POW 6
 #define CACHELINE ((size_t)(1U << CACHELINE_2POW))
 
 // Smallest size class to support.  On Windows the smallest allocation size
@@ -275,35 +271,16 @@ static size_t opt_dirty_max = DIRTY_MAX_
 #endif
 
 // Maximum size class that is a multiple of the quantum, but not (necessarily)
 // a power of 2.  Above this size, allocations are rounded up to the nearest
 // power of 2.
 #define SMALL_MAX_2POW_DEFAULT 9
 #define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT)
 
-// RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
-// as small as possible such that this setting is still honored, without
-// violating other constraints.  The goal is to make runs as small as possible
-// without exceeding a per run external fragmentation threshold.
-//
-// We use binary fixed point math for overhead computations, where the binary
-// point is implicitly RUN_BFP bits to the left.
-//
-// Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
-// honored for some/all object sizes, since there is one bit of header overhead
-// per object (plus a constant).  This constraint is relaxed (ignored) for runs
-// that are so small that the per-region overhead is greater than:
-//
-//   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
-#define RUN_BFP 12
-//                                    \/   Implicit binary fixed point.
-#define RUN_MAX_OVRHD 0x0000003dU
-#define RUN_MAX_OVRHD_RELAX 0x00001800U
-
 // When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
 // compile-time for better performance, as opposed to determined at
 // runtime. Some platforms can have different page sizes at runtime
 // depending on kernel configuration, so they are opted out by default.
 // Debug builds are opted out too, for test coverage.
 #ifndef MOZ_DEBUG
 #if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) &&         \
   !defined(__aarch64__)
@@ -396,16 +373,52 @@ static size_t arena_maxclass; // Max siz
 // 6.25% of the process address space on a 32-bit OS for later use.
 #define CHUNK_RECYCLE_LIMIT 128
 
 static const size_t gRecycleLimit = CHUNK_RECYCLE_LIMIT * CHUNKSIZE_DEFAULT;
 
 // The current amount of recycled bytes, updated atomically.
 static Atomic<size_t, ReleaseAcquire> gRecycledSize;
 
+// Maximum number of dirty pages per arena.
+#define DIRTY_MAX_DEFAULT (1U << 8)
+
+static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
+
+// RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
+// as small as possible such that this setting is still honored, without
+// violating other constraints.  The goal is to make runs as small as possible
+// without exceeding a per run external fragmentation threshold.
+//
+// We use binary fixed point math for overhead computations, where the binary
+// point is implicitly RUN_BFP bits to the left.
+//
+// Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
+// honored for some/all object sizes, since there is one bit of header overhead
+// per object (plus a constant).  This constraint is relaxed (ignored) for runs
+// that are so small that the per-region overhead is greater than:
+//
+//   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+#define RUN_BFP 12
+//                                    \/   Implicit binary fixed point.
+#define RUN_MAX_OVRHD 0x0000003dU
+#define RUN_MAX_OVRHD_RELAX 0x00001800U
+
+// Return the smallest chunk multiple that is >= s.
+#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
+
+// Return the smallest cacheline multiple that is >= s.
+#define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
+
+// Return the smallest quantum multiple that is >= a.
+#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
+
+// Return the smallest pagesize multiple that is >= s.
+#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
+
 // ***************************************************************************
 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
 #error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
 #endif
 
 static void*
 base_alloc(size_t aSize);
@@ -1312,28 +1325,16 @@ GetChunkForPtr(const void* aPtr)
 
 // Return the chunk offset of address a.
 static inline size_t
 GetChunkOffsetForPtr(const void* aPtr)
 {
   return (size_t)(uintptr_t(aPtr) & chunksize_mask);
 }
 
-// Return the smallest chunk multiple that is >= s.
-#define CHUNK_CEILING(s) (((s) + chunksize_mask) & ~chunksize_mask)
-
-// Return the smallest cacheline multiple that is >= s.
-#define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
-
-// Return the smallest quantum multiple that is >= a.
-#define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
-
-// Return the smallest pagesize multiple that is >= s.
-#define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
-
 static inline const char*
 _getprogname(void)
 {
 
   return "<jemalloc>";
 }
 
 // ***************************************************************************