Bug 1413096 - Remove pow2_ceil in favor of RoundUpPow2. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Mon, 30 Oct 2017 17:22:36 +0900
changeset 689815 ab5b0c9e997593406359acb42b2de9d4b27c6867
parent 689814 f6787a4f09218879132d80d6ee721f268bb77e4d
child 689816 9660a148afc288c819cc25bbe89995947c80c55d
push id87110
push userbmo:mh+mozilla@glandium.org
push dateWed, 01 Nov 2017 00:18:09 +0000
reviewersnjn
bugs1413096
milestone58.0a1
Bug 1413096 - Remove pow2_ceil in favor of RoundUpPow2. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -107,16 +107,17 @@
 
 #include "mozmemory_wrap.h"
 #include "mozjemalloc.h"
 #include "mozilla/Atomics.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/DoublyLinkedList.h"
 #include "mozilla/GuardObjects.h"
 #include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/Unused.h"
 
 // On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
 // operating system.  If we release 1MB of live pages with MADV_DONTNEED, our
 // RSS will decrease by 1MB (almost) immediately.
 //
@@ -1316,34 +1317,16 @@ GetChunkOffsetForPtr(const void* aPtr)
 #define CACHELINE_CEILING(s) (((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
 
 // Return the smallest quantum multiple that is >= a.
 #define QUANTUM_CEILING(a) (((a) + quantum_mask) & ~quantum_mask)
 
 // Return the smallest pagesize multiple that is >= s.
 #define PAGE_CEILING(s) (((s) + pagesize_mask) & ~pagesize_mask)
 
-// Compute the smallest power of 2 that is >= x.
-static inline size_t
-pow2_ceil(size_t x)
-{
-
-  x--;
-  x |= x >> 1;
-  x |= x >> 2;
-  x |= x >> 4;
-  x |= x >> 8;
-  x |= x >> 16;
-#if (SIZEOF_PTR == 8)
-  x |= x >> 32;
-#endif
-  x++;
-  return x;
-}
-
 static inline const char*
 _getprogname(void)
 {
 
   return "<jemalloc>";
 }
 
 // ***************************************************************************
@@ -2990,32 +2973,32 @@ void*
 arena_t::MallocSmall(size_t aSize, bool aZero)
 {
   void* ret;
   arena_bin_t* bin;
   arena_run_t* run;
 
   if (aSize < small_min) {
     // Tiny.
-    aSize = pow2_ceil(aSize);
+    aSize = RoundUpPow2(aSize);
     bin = &mBins[ffs((int)(aSize >> (TINY_MIN_2POW + 1)))];
 
     // Bin calculation is always correct, but we may need
     // to fix size for the purposes of assertions and/or
     // stats accuracy.
     if (aSize < (1U << TINY_MIN_2POW)) {
       aSize = 1U << TINY_MIN_2POW;
     }
   } else if (aSize <= small_max) {
     // Quantum-spaced.
     aSize = QUANTUM_CEILING(aSize);
     bin = &mBins[ntbins + (aSize >> QUANTUM_2POW_MIN) - 1];
   } else {
     // Sub-page.
-    aSize = pow2_ceil(aSize);
+    aSize = RoundUpPow2(aSize);
     bin = &mBins[ntbins + nqbins +
                  (ffs((int)(aSize >> SMALL_MAX_2POW_DEFAULT)) - 2)];
   }
   MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
 
   {
     MutexAutoLock lock(mLock);
     if ((run = bin->runcur) && run->nfree > 0) {
@@ -3692,29 +3675,29 @@ static void*
 arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
 {
   void* ret;
   size_t copysize;
 
   // Try to avoid moving the allocation.
   if (aSize < small_min) {
     if (aOldSize < small_min &&
-        ffs((int)(pow2_ceil(aSize) >> (TINY_MIN_2POW + 1))) ==
-          ffs((int)(pow2_ceil(aOldSize) >> (TINY_MIN_2POW + 1)))) {
+        ffs((int)(RoundUpPow2(aSize) >> (TINY_MIN_2POW + 1))) ==
+          ffs((int)(RoundUpPow2(aOldSize) >> (TINY_MIN_2POW + 1)))) {
       goto IN_PLACE; // Same size class.
     }
   } else if (aSize <= small_max) {
     if (aOldSize >= small_min && aOldSize <= small_max &&
         (QUANTUM_CEILING(aSize) >> QUANTUM_2POW_MIN) ==
           (QUANTUM_CEILING(aOldSize) >> QUANTUM_2POW_MIN)) {
       goto IN_PLACE; // Same size class.
     }
   } else if (aSize <= bin_maxclass) {
     if (aOldSize > small_max && aOldSize <= bin_maxclass &&
-        pow2_ceil(aSize) == pow2_ceil(aOldSize)) {
+        RoundUpPow2(aSize) == RoundUpPow2(aOldSize)) {
       goto IN_PLACE; // Same size class.
     }
   } else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
     MOZ_ASSERT(aSize > bin_maxclass);
     if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
       return aPtr;
     }
   }
@@ -4507,30 +4490,30 @@ MozJemalloc::valloc(size_t aSize)
 template<>
 inline size_t
 MozJemalloc::malloc_good_size(size_t aSize)
 {
   // This duplicates the logic in imalloc(), arena_malloc() and
   // arena_t::MallocSmall().
   if (aSize < small_min) {
     // Small (tiny).
-    aSize = pow2_ceil(aSize);
+    aSize = RoundUpPow2(aSize);
 
     // We omit the #ifdefs from arena_t::MallocSmall() --
     // it can be inaccurate with its size in some cases, but this
     // function must be accurate.
     if (aSize < (1U << TINY_MIN_2POW)) {
       aSize = (1U << TINY_MIN_2POW);
     }
   } else if (aSize <= small_max) {
     // Small (quantum-spaced).
     aSize = QUANTUM_CEILING(aSize);
   } else if (aSize <= bin_maxclass) {
     // Small (sub-page).
-    aSize = pow2_ceil(aSize);
+    aSize = RoundUpPow2(aSize);
   } else if (aSize <= arena_maxclass) {
     // Large.
     aSize = PAGE_CEILING(aSize);
   } else {
     // Huge.  We use PAGE_CEILING to get psize, instead of using
     // CHUNK_CEILING to get csize.  This ensures that this
     // malloc_usable_size(malloc(n)) always matches
     // malloc_good_size(n).