Bug 1412221 - Fix clang-tidy warnings in mozjemalloc.cpp. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 27 Oct 2017 14:57:09 +0900
changeset 439645 58070c2511c64195c525783ff165dc7b2e55c487
parent 439644 00596bac1073b90e4bb129e7fc777c3582be1c76
child 439646 e2b391ae46887cf544387dff2671607cb6eb90bc
push id8114
push userjlorenzo@mozilla.com
push dateThu, 02 Nov 2017 16:33:21 +0000
treeherdermozilla-beta@73e0d89a540f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1412221, 1412214
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1412221 - Fix clang-tidy warnings in mozjemalloc.cpp. r=njn Also rearrange some code accordingly, but don't fix indentation issues just yet. Also apply changes from the google-readability-braces-around-statements check. But don't apply the modernize-use-nullptr recommendation about strerror_r because it's wrong (bug #1412214).
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -165,33 +165,34 @@
 
 /* use MSVC intrinsics */
 #pragma intrinsic(_BitScanForward)
 static __forceinline int
 ffs(int x)
 {
 	unsigned long i;
 
-	if (_BitScanForward(&i, x) != 0)
+	if (_BitScanForward(&i, x) != 0) {
 		return (i + 1);
-
+	}
 	return (0);
 }
 
 /* Implement getenv without using malloc */
 static char mozillaMallocOptionsBuf[64];
 
 #define	getenv xgetenv
 static char *
 getenv(const char *name)
 {
 
-	if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
-		    sizeof(mozillaMallocOptionsBuf)) > 0)
+	if (GetEnvironmentVariableA(name, mozillaMallocOptionsBuf,
+		    sizeof(mozillaMallocOptionsBuf)) > 0) {
 		return (mozillaMallocOptionsBuf);
+	}
 
 	return nullptr;
 }
 
 #if defined(_WIN64)
 typedef long long ssize_t;
 #else
 typedef long ssize_t;
@@ -1206,18 +1207,19 @@ FORK_HOOK void _malloc_postfork_child(vo
 static void
 _malloc_message(const char *p)
 {
 #if !defined(XP_WIN)
 #define	_write	write
 #endif
   // Pretend to check _write() errors to suppress gcc warnings about
   // warn_unused_result annotations in some versions of glibc headers.
-  if (_write(STDERR_FILENO, p, (unsigned int) strlen(p)) < 0)
+  if (_write(STDERR_FILENO, p, (unsigned int) strlen(p)) < 0) {
     return;
+  }
 }
 
 template <typename... Args>
 static void
 _malloc_message(const char *p, Args... args)
 {
   _malloc_message(p);
   _malloc_message(args...);
@@ -1427,29 +1429,31 @@ static bool
 base_pages_alloc(size_t minsize)
 {
 	size_t csize;
 	size_t pminsize;
 
 	MOZ_ASSERT(minsize != 0);
 	csize = CHUNK_CEILING(minsize);
 	base_pages = chunk_alloc(csize, chunksize, true);
-	if (!base_pages)
+	if (!base_pages) {
 		return (true);
+	}
 	base_next_addr = base_pages;
 	base_past_addr = (void *)((uintptr_t)base_pages + csize);
 	/*
 	 * Leave enough pages for minsize committed, since otherwise they would
 	 * have to be immediately recommitted.
 	 */
 	pminsize = PAGE_CEILING(minsize);
 	base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
 #  if defined(MALLOC_DECOMMIT)
-	if (pminsize < csize)
+	if (pminsize < csize) {
 		pages_decommit(base_next_decommitted, csize - pminsize);
+	}
 #  endif
 	base_mapped += csize;
 	base_committed += pminsize;
 
 	return (false);
 }
 
 static void*
@@ -1803,49 +1807,55 @@ pages_trim(void *addr, size_t alloc_size
 
         MOZ_ASSERT(alloc_size >= leadsize + size);
 #ifdef XP_WIN
         {
                 void *new_addr;
 
                 pages_unmap(addr, alloc_size);
                 new_addr = pages_map(ret, size);
-                if (new_addr == ret)
+                if (new_addr == ret) {
                         return (ret);
-                if (new_addr)
+                }
+                if (new_addr) {
                         pages_unmap(new_addr, size);
+                }
                 return nullptr;
         }
 #else
         {
                 size_t trailsize = alloc_size - leadsize - size;
 
-                if (leadsize != 0)
+                if (leadsize != 0) {
                         pages_unmap(addr, leadsize);
-                if (trailsize != 0)
+                }
+                if (trailsize != 0) {
                         pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+                }
                 return (ret);
         }
 #endif
 }
 
 static void *
 chunk_alloc_mmap_slow(size_t size, size_t alignment)
 {
         void *ret, *pages;
         size_t alloc_size, leadsize;
 
         alloc_size = size + alignment - pagesize;
         /* Beware size_t wrap-around. */
-        if (alloc_size < size)
+        if (alloc_size < size) {
                 return nullptr;
+	}
         do {
                 pages = pages_map(nullptr, alloc_size);
-                if (!pages)
+                if (!pages) {
                         return nullptr;
+		}
                 leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
                         (uintptr_t)pages;
                 ret = pages_trim(pages, alloc_size, leadsize, size);
         } while (!ret);
 
         MOZ_ASSERT(ret);
         return (ret);
 }
@@ -1865,18 +1875,19 @@ chunk_alloc_mmap(size_t size, size_t ali
          * pages_unmap().
          *
          * Optimistically try mapping precisely the right amount before falling
          * back to the slow method, with the expectation that the optimistic
          * approach works most of the time.
          */
 
         ret = pages_map(nullptr, size);
-        if (!ret)
+        if (!ret) {
                 return nullptr;
+        }
         offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
         if (offset != 0) {
                 pages_unmap(ret, size);
                 return (chunk_alloc_mmap_slow(size, alignment));
         }
 
         MOZ_ASSERT(ret);
         return (ret);
@@ -1892,18 +1903,19 @@ chunk_alloc_mmap(size_t size, size_t ali
 static bool
 pages_purge(void *addr, size_t length, bool force_zero)
 {
 #ifdef MALLOC_DECOMMIT
 	pages_decommit(addr, length);
 	return true;
 #else
 #  ifndef XP_LINUX
-	if (force_zero)
+	if (force_zero) {
 		memset(addr, 0, length);
+	}
 #  endif
 #  ifdef XP_WIN
 	/*
 	* The region starting at addr may have been allocated in multiple calls
 	* to VirtualAlloc and recycled, so resetting the entire region in one
 	* go may not be valid. However, since we allocate at least a chunk at a
 	* time, we may touch any region in chunksized increments.
 	*/
@@ -2378,21 +2390,21 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
 		};
 
-		if (size <= 128)
+		if (size <= 128) {
 			regind = (diff >> log2_table[size - 1]);
-		else if (size <= 32768)
+		} else if (size <= 32768) {
 			regind = diff >> (8 + log2_table[(size >> 8) - 1]);
-		else {
+		} else {
 			/*
 			 * The run size is too large for us to use the lookup
 			 * table.  Use real division.
 			 */
 			regind = diff / size;
 		}
 	} else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
 	    << QUANTUM_2POW_MIN) + 2) {
@@ -2406,18 +2418,19 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		 * configuration option.
 		 */
 		regind = diff / size;
 	};
 	MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
 	MOZ_DIAGNOSTIC_ASSERT(regind < bin->nregs);
 
 	elm = regind >> (SIZEOF_INT_2POW + 3);
-	if (elm < run->regs_minelm)
+	if (elm < run->regs_minelm) {
 		run->regs_minelm = elm;
+	}
 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
 	MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
 	run->regs_mask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
 bool
@@ -2761,20 +2774,21 @@ arena_t::DallocRun(arena_run_t* aRun, bo
 {
   arena_chunk_t* chunk;
   size_t size, run_ind, run_pages;
 
   chunk = GetChunkForPtr(aRun);
   run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
   MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
   MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
-  if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
+  if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
     size = chunk->map[run_ind].bits & ~pagesize_mask;
-  else
+  } else {
     size = aRun->bin->run_size;
+  }
   run_pages = (size >> pagesize_2pow);
 
   /* Mark pages as unallocated in the chunk map. */
   if (aDirty) {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
       MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
@@ -2918,18 +2932,19 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
     aBin->runs.Remove(mapelm);
     run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
     return run;
   }
   /* No existing runs have any space available. */
 
   /* Allocate a new run. */
   run = AllocRun(aBin, aBin->run_size, false, false);
-  if (!run)
+  if (!run) {
     return nullptr;
+  }
   /*
    * Don't initialize if a race in arena_t::RunAlloc() allowed an existing
    * run to become usable.
    */
   if (run == aBin->runcur) {
     return run;
   }
 
@@ -3118,18 +3133,19 @@ arena_t::MallocSmall(size_t aSize, bool 
   }
 
   if (aZero == false) {
     if (opt_junk) {
       memset(ret, kAllocJunk, aSize);
     } else if (opt_zero) {
       memset(ret, 0, aSize);
     }
-  } else
+  } else {
     memset(ret, 0, aSize);
+  }
 
   return ret;
 }
 
 void*
 arena_t::MallocLarge(size_t aSize, bool aZero)
 {
   void* ret;
@@ -3314,20 +3330,21 @@ ipalloc(size_t aAlignment, size_t aSize,
        * anything important.
        */
       run_size = (aAlignment << 1) - pagesize;
     }
 
     if (run_size <= arena_maxclass) {
       aArena = aArena ? aArena : choose_arena(aSize);
       ret = aArena->Palloc(aAlignment, ceil_size, run_size);
-    } else if (aAlignment <= chunksize)
+    } else if (aAlignment <= chunksize) {
       ret = huge_malloc(ceil_size, false);
-    else
+    } else {
       ret = huge_palloc(ceil_size, aAlignment, false);
+    }
   }
 
   MOZ_ASSERT((uintptr_t(ret) & (aAlignment - 1)) == 0);
   return ret;
 }
 
 /* Return the size of the allocation pointed to by ptr. */
 static size_t
@@ -3467,26 +3484,27 @@ MozJemalloc::jemalloc_ptr_info(const voi
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   size_t mapbits = chunk->map[pageind].bits;
 
   if (!(mapbits & CHUNK_MAP_ALLOCATED)) {
     PtrInfoTag tag = TagFreedPageDirty;
-    if (mapbits & CHUNK_MAP_DIRTY)
+    if (mapbits & CHUNK_MAP_DIRTY) {
       tag = TagFreedPageDirty;
-    else if (mapbits & CHUNK_MAP_DECOMMITTED)
+    } else if (mapbits & CHUNK_MAP_DECOMMITTED) {
       tag = TagFreedPageDecommitted;
-    else if (mapbits & CHUNK_MAP_MADVISED)
+    } else if (mapbits & CHUNK_MAP_MADVISED) {
       tag = TagFreedPageMadvised;
-    else if (mapbits & CHUNK_MAP_ZEROED)
+    } else if (mapbits & CHUNK_MAP_ZEROED) {
       tag = TagFreedPageZeroed;
-    else
+    } else {
       MOZ_CRASH();
+    }
 
     void* pageaddr = (void*)(uintptr_t(aPtr) & ~pagesize_mask);
     *aInfo = { tag, pageaddr, pagesize };
     return;
   }
 
   if (mapbits & CHUNK_MAP_LARGE) {
     // It's a large allocation. Only the first page of a large
@@ -3675,20 +3693,21 @@ arena_dalloc(void* aPtr, size_t aOffset)
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
 	MOZ_ASSERT(ptr);
 
 	offset = GetChunkOffsetForPtr(ptr);
-	if (offset != 0)
+	if (offset != 0) {
 		arena_dalloc(ptr, offset);
-	else
+	} else {
 		huge_dalloc(ptr);
+	}
 }
 
 void
 arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
                            size_t aOldSize)
 {
   MOZ_ASSERT(aSize < aOldSize);
 
@@ -3754,37 +3773,34 @@ arena_ralloc_large(void* aPtr, size_t aS
 
   psize = PAGE_CEILING(aSize);
   if (psize == aOldSize) {
     /* Same size class. */
     if (aSize < aOldSize) {
       memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
     }
     return true;
-  } else {
-    arena_chunk_t* chunk;
-    arena_t* arena;
-
-    chunk = GetChunkForPtr(aPtr);
-    arena = chunk->arena;
-    MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
-
-    if (psize < aOldSize) {
-      /* Fill before shrinking in order avoid a race. */
-      memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
-      arena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
-      return true;
-    } else {
-      bool ret = arena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
-      if (ret && opt_zero) {
-        memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
-      }
-      return ret;
-    }
   }
+
+  arena_chunk_t* chunk = GetChunkForPtr(aPtr);
+  arena_t* arena = chunk->arena;
+  MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
+
+  if (psize < aOldSize) {
+    /* Fill before shrinking in order avoid a race. */
+    memset((void*)((uintptr_t)aPtr + aSize), kAllocPoison, aOldSize - aSize);
+    arena->RallocShrinkLarge(chunk, aPtr, psize, aOldSize);
+    return true;
+  }
+
+  bool ret = arena->RallocGrowLarge(chunk, aPtr, psize, aOldSize);
+  if (ret && opt_zero) {
+    memset((void*)((uintptr_t)aPtr + aOldSize), 0, aSize - aOldSize);
+  }
+  return ret;
 }
 
 static void*
 arena_ralloc(void* aPtr, size_t aSize, size_t aOldSize, arena_t* aArena)
 {
   void* ret;
   size_t copysize;
 
@@ -4049,18 +4065,19 @@ huge_palloc(size_t aSize, size_t aAlignm
      * the program will (hopefully) never touch bytes in excess of psize.
      * Thus those bytes won't take up space in physical memory, and we can
      * reasonably claim we never "allocated" them in the first place. */
     huge_allocated += psize;
     huge_mapped += csize;
   }
 
 #ifdef MALLOC_DECOMMIT
-  if (csize - psize > 0)
+  if (csize - psize > 0) {
     pages_decommit((void*)((uintptr_t)ret + psize), csize - psize);
+  }
 #endif
 
   if (aZero == false) {
     if (opt_junk) {
 #  ifdef MALLOC_DECOMMIT
       memset(ret, kAllocJunk, psize);
 #  else
       memset(ret, kAllocJunk, csize);
@@ -4194,18 +4211,19 @@ huge_dalloc(void* aPtr)
  */
 #if defined(XP_WIN)
 #define	malloc_init() false
 #else
 static inline bool
 malloc_init(void)
 {
 
-	if (malloc_initialized == false)
+	if (malloc_initialized == false) {
 		return (malloc_init_hard());
+	}
 
 	return (false);
 }
 #endif
 
 static size_t
 GetKernelPageSize()
 {
@@ -4280,29 +4298,31 @@ malloc_init_hard(void)
             nreps *= 10;
             nreps += opts[i] - '0';
             break;
           default:
             goto MALLOC_OUT;
         }
       }
 MALLOC_OUT:
-      if (nseen == false)
+      if (nseen == false) {
         nreps = 1;
+      }
 
       for (j = 0; j < nreps; j++) {
         switch (opts[i]) {
         case 'f':
           opt_dirty_max >>= 1;
           break;
         case 'F':
-          if (opt_dirty_max == 0)
+          if (opt_dirty_max == 0) {
             opt_dirty_max = 1;
-          else if ((opt_dirty_max << 1) != 0)
+          } else if ((opt_dirty_max << 1) != 0) {
             opt_dirty_max <<= 1;
+          }
           break;
 #ifdef MOZ_DEBUG
         case 'j':
           opt_junk = false;
           break;
         case 'J':
           opt_junk = true;
           break;
@@ -4481,25 +4501,23 @@ BaseAllocator::memalign(size_t aAlignmen
 
   return ret;
 }
 
 inline void*
 BaseAllocator::calloc(size_t aNum, size_t aSize)
 {
   void *ret;
-  size_t num_size;
 
   if (malloc_init()) {
-    num_size = 0;
     ret = nullptr;
     goto RETURN;
   }
 
-  num_size = aNum * aSize;
+  size_t num_size = aNum * aSize;
   if (num_size == 0) {
     num_size = 1;
   /*
    * Try to avoid division here.  We know that it isn't possible to
    * overflow during multiplication if neither operand uses any of the
    * most significant half of the bits in a size_t.
    */
   } else if (((aNum | aSize) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
@@ -4643,18 +4661,19 @@ MozJemalloc::malloc_good_size(size_t aSi
   if (aSize < small_min) {
     /* Small (tiny). */
     aSize = pow2_ceil(aSize);
     /*
      * We omit the #ifdefs from arena_t::MallocSmall() --
      * it can be inaccurate with its size in some cases, but this
      * function must be accurate.
      */
-    if (aSize < (1U << TINY_MIN_2POW))
+    if (aSize < (1U << TINY_MIN_2POW)) {
       aSize = (1U << TINY_MIN_2POW);
+    }
   } else if (aSize <= small_max) {
     /* Small (quantum-spaced). */
     aSize = QUANTUM_CEILING(aSize);
   } else if (aSize <= bin_maxclass) {
     /* Small (sub-page). */
     aSize = pow2_ceil(aSize);
   } else if (aSize <= arena_maxclass) {
     /* Large. */
@@ -5037,17 +5056,17 @@ static malloc_table_t replace_malloc_tab
 
 #ifdef XP_WIN
 typedef HMODULE replace_malloc_handle_t;
 
 static replace_malloc_handle_t
 replace_malloc_handle()
 {
   char replace_malloc_lib[1024];
-  if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB", (LPSTR)&replace_malloc_lib,
+  if (GetEnvironmentVariableA("MOZ_REPLACE_MALLOC_LIB", replace_malloc_lib,
                               sizeof(replace_malloc_lib)) > 0) {
     return LoadLibraryA(replace_malloc_lib);
   }
   return nullptr;
 }
 
 #    define REPLACE_MALLOC_GET_FUNC(handle, name) \
       (name##_impl_t*) GetProcAddress(handle, "replace_" # name)
@@ -5114,20 +5133,22 @@ init()
     } \
     return replace_malloc_table.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
   }
 #include "malloc_decls.h"
 
 MOZ_JEMALLOC_API struct ReplaceMallocBridge*
 get_bridge(void)
 {
-  if (MOZ_UNLIKELY(!replace_malloc_initialized))
+  if (MOZ_UNLIKELY(!replace_malloc_initialized)) {
     init();
-  if (MOZ_LIKELY(!replace_get_bridge))
+  }
+  if (MOZ_LIKELY(!replace_get_bridge)) {
     return nullptr;
+  }
   return replace_get_bridge();
 }
 
 /*
  * posix_memalign, aligned_alloc, memalign and valloc all implement some kind
  * of aligned memory allocation. For convenience, a replace-malloc library can
  * skip defining replace_posix_memalign, replace_aligned_alloc and
  * replace_valloc, and default implementations will be automatically derived