Bug 1229384 - Invert the meaning of the arena_ralloc_large and arena_t::RallocGrowLarge return type. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 27 Oct 2017 10:14:04 +0900
changeset 439632 470e491047b75a9e1fba5792018dd44a6164789c
parent 439631 ebf19b9491feb2c705b1e2cfb9f61f0289645e62
child 439633 fa4a07b91fc232855a0991e1d36d61c026bf9549
push id8114
push userjlorenzo@mozilla.com
push dateThu, 02 Nov 2017 16:33:21 +0000
treeherdermozilla-beta@73e0d89a540f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1229384
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1229384 - Invert the meaning of the arena_ralloc_large and arena_t::RallocGrowLarge return type. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -3686,16 +3686,17 @@ arena_t::RallocShrinkLarge(arena_chunk_t
    * Shrink the run, and make trailing pages available for other
    * allocations.
    */
   MutexAutoLock lock(mLock);
   TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
   mStats.allocated_large -= aOldSize - aSize;
 }
 
+/* Returns whether reallocation was successful. */
 bool
 arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
                          size_t aOldSize)
 {
   size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
   size_t npages = aOldSize >> pagesize_2pow;
 
   MutexAutoLock lock(mLock);
@@ -3716,54 +3717,55 @@ arena_t::RallocGrowLarge(arena_chunk_t* 
         false);
 
     aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
         CHUNK_MAP_ALLOCATED;
     aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
         CHUNK_MAP_ALLOCATED;
 
     mStats.allocated_large += aSize - aOldSize;
-    return false;
+    return true;
   }
 
-  return true;
+  return false;
 }
 
 /*
  * Try to resize a large allocation, in order to avoid copying.  This will
  * always fail if growing an object, and the following run is already in use.
+ * Returns whether reallocation was successful.
  */
 static bool
 arena_ralloc_large(void* aPtr, size_t aSize, size_t aOldSize)
 {
   size_t psize;
 
   psize = PAGE_CEILING(aSize);
   if (psize == aOldSize) {
     /* Same size class. */
     if (aSize < aOldSize) {
       memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
     }
-    return false;
+    return true;
   } else {
     arena_chunk_t* chunk;
     arena_t* arena;
 
     chunk = GetChunkForPtr(aPtr);
     arena = chunk->arena;
     MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
     if (psize < aOldSize) {
       /* Fill before shrinking in order avoid a race. */
       memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
       arena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
-      return false;
+      return true;
     } else {
       bool ret = arena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
-      if (ret == false && opt_zero) {
+      if (ret && opt_zero) {
         memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
       }
       return ret;
     }
   }
 }
 
 static void*
@@ -3787,17 +3789,17 @@ arena_ralloc(void* aPtr, size_t aSize, s
     }
   } else if (aSize <= bin_maxclass) {
     if (aOldSize > small_max && aOldSize <= bin_maxclass &&
         pow2_ceil(aSize) == pow2_ceil(aOldSize)) {
       goto IN_PLACE; /* Same size class. */
     }
   } else if (aOldSize > bin_maxclass && aOldSize <= arena_maxclass) {
     MOZ_ASSERT(aSize > bin_maxclass);
-    if (arena_ralloc_large(aPtr, aSize, aOldSize) == false) {
+    if (arena_ralloc_large(aPtr, aSize, aOldSize)) {
       return aPtr;
     }
   }
 
   /*
    * If we get here, then aSize and aOldSize are different enough that we
    * need to move the object.  In that case, fall back to allocating new
    * space and copying.