Bug 1414168 - Rename arena_run_t fields. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 03 Nov 2017 15:23:44 +0900
changeset 444167 a4392df95024d106fb3472b746c96e31224cdee4
parent 444166 182b840c89560a0d4a53bcd5bc337749e92fdd23
child 444168 78acf48d63c3dacd8885ba93cdf5f1b2efccafe5
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1414168
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1414168 - Rename arena_run_t fields. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -832,45 +832,45 @@ struct GetDoublyLinkedListElement<arena_
   }
 };
 }
 #endif
 
 struct arena_run_t
 {
 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-  uint32_t magic;
+  uint32_t mMagic;
 #define ARENA_RUN_MAGIC 0x384adf93
 
   // On 64-bit platforms, having the arena_bin_t pointer following
-  // the magic field means there's padding between both fields, making
+  // the mMagic field means there's padding between both fields, making
   // the run header larger than necessary.
   // But when MOZ_DIAGNOSTIC_ASSERT_ENABLED is not set, starting the
   // header with this field followed by the arena_bin_t pointer yields
-  // the same padding. We do want the magic field to appear first, so
+  // the same padding. We do want the mMagic field to appear first, so
   // depending whether MOZ_DIAGNOSTIC_ASSERT_ENABLED is set or not, we
   // move some field to avoid padding.
 
   // Number of free regions in run.
-  unsigned nfree;
+  unsigned mNumFree;
 #endif
 
   // Bin this run is associated with.
-  arena_bin_t* bin;
+  arena_bin_t* mBin;
 
   // Index of first element that might have a free region.
-  unsigned regs_minelm;
+  unsigned mRegionsMinElement;
 
 #if !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
   // Number of free regions in run.
-  unsigned nfree;
+  unsigned mNumFree;
 #endif
 
   // Bitmask of in-use regions (0: in use, 1: free).
-  unsigned regs_mask[1]; // Dynamically sized.
+  unsigned mRegionsMask[1]; // Dynamically sized.
 };
 
 struct arena_bin_t
 {
   // Current run being used to service allocations of this bin's size
   // class.
   arena_run_t* mCurrentRun;
 
@@ -885,17 +885,17 @@ struct arena_bin_t
   size_t mSizeClass;
 
   // Total size of a run for this bin's size class.
   size_t mRunSize;
 
   // Total number of regions in a run for this bin's size class.
   uint32_t mRunNumRegions;
 
-  // Number of elements in a run's regs_mask for this bin's size class.
+  // Number of elements in a run's mRegionsMask for this bin's size class.
   uint32_t mRunNumRegionsMask;
 
   // Offset of first region in a run for this bin's size class.
   uint32_t mRunFirstRegionOffset;
 
   // Current number of runs in this bin, full or otherwise.
   unsigned long mNumRuns;
 
@@ -2273,58 +2273,58 @@ choose_arena(size_t size)
 }
 
 static inline void*
 arena_run_reg_alloc(arena_run_t* run, arena_bin_t* bin)
 {
   void* ret;
   unsigned i, mask, bit, regind;
 
-  MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
-  MOZ_ASSERT(run->regs_minelm < bin->mRunNumRegionsMask);
-
-  // Move the first check outside the loop, so that run->regs_minelm can
+  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
+  MOZ_ASSERT(run->mRegionsMinElement < bin->mRunNumRegionsMask);
+
+  // Move the first check outside the loop, so that run->mRegionsMinElement can
   // be updated unconditionally, without the possibility of updating it
   // multiple times.
-  i = run->regs_minelm;
-  mask = run->regs_mask[i];
+  i = run->mRegionsMinElement;
+  mask = run->mRegionsMask[i];
   if (mask != 0) {
     // Usable allocation found.
     bit = CountTrailingZeroes32(mask);
 
     regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
     MOZ_ASSERT(regind < bin->mRunNumRegions);
     ret = (void*)(((uintptr_t)run) + bin->mRunFirstRegionOffset +
                   (bin->mSizeClass * regind));
 
     // Clear bit.
     mask ^= (1U << bit);
-    run->regs_mask[i] = mask;
+    run->mRegionsMask[i] = mask;
 
     return ret;
   }
 
   for (i++; i < bin->mRunNumRegionsMask; i++) {
-    mask = run->regs_mask[i];
+    mask = run->mRegionsMask[i];
     if (mask != 0) {
       // Usable allocation found.
       bit = CountTrailingZeroes32(mask);
 
       regind = ((i << (LOG2(sizeof(int)) + 3)) + bit);
       MOZ_ASSERT(regind < bin->mRunNumRegions);
       ret = (void*)(((uintptr_t)run) + bin->mRunFirstRegionOffset +
                     (bin->mSizeClass * regind));
 
       // Clear bit.
       mask ^= (1U << bit);
-      run->regs_mask[i] = mask;
+      run->mRegionsMask[i] = mask;
 
       // Make a note that nothing before this element
       // contains a free region.
-      run->regs_minelm = i; // Low payoff: + (mask == 0);
+      run->mRegionsMinElement = i; // Low payoff: + (mask == 0);
 
       return ret;
     }
   }
   // Not reached.
   MOZ_DIAGNOSTIC_ASSERT(0);
   return nullptr;
 }
@@ -2352,17 +2352,17 @@ arena_run_reg_dalloc(arena_run_t* run, a
     SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
     SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
     SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
     SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
   };
   // clang-format on
   unsigned diff, regind, elm, bit;
 
-  MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
   static_assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3 >=
                   kNumQuantumClasses,
                 "size_invs doesn't have enough values");
 
   // Avoid doing division with a variable divisor if possible.  Using
   // actual division here can reduce allocator throughput by over 20%!
   diff =
     (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->mRunFirstRegionOffset);
@@ -2402,22 +2402,22 @@ arena_run_reg_dalloc(arena_run_t* run, a
     // if the user increases small_max via the 'S' runtime
     // configuration option.
     regind = diff / size;
   };
   MOZ_DIAGNOSTIC_ASSERT(diff == regind * size);
   MOZ_DIAGNOSTIC_ASSERT(regind < bin->mRunNumRegions);
 
   elm = regind >> (LOG2(sizeof(int)) + 3);
-  if (elm < run->regs_minelm) {
-    run->regs_minelm = elm;
+  if (elm < run->mRegionsMinElement) {
+    run->mRegionsMinElement = elm;
   }
   bit = regind - (elm << (LOG2(sizeof(int)) + 3));
-  MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
-  run->regs_mask[elm] |= (1U << bit);
+  MOZ_DIAGNOSTIC_ASSERT((run->mRegionsMask[elm] & (1U << bit)) == 0);
+  run->mRegionsMask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
 bool
 arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
 {
   arena_chunk_t* chunk;
@@ -2748,17 +2748,17 @@ arena_t::DallocRun(arena_run_t* aRun, bo
 
   chunk = GetChunkForPtr(aRun);
   run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> gPageSize2Pow);
   MOZ_DIAGNOSTIC_ASSERT(run_ind >= gChunkHeaderNumPages);
   MOZ_DIAGNOSTIC_ASSERT(run_ind < gChunkNumPages);
   if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) {
     size = chunk->map[run_ind].bits & ~gPageSizeMask;
   } else {
-    size = aRun->bin->mRunSize;
+    size = aRun->mBin->mRunSize;
   }
   run_pages = (size >> gPageSize2Pow);
 
   // Mark pages as unallocated in the chunk map.
   if (aDirty) {
     size_t i;
 
     for (i = 0; i < run_pages; i++) {
@@ -2906,78 +2906,78 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
   }
   // Don't initialize if a race in arena_t::RunAlloc() allowed an existing
   // run to become usable.
   if (run == aBin->mCurrentRun) {
     return run;
   }
 
   // Initialize run internals.
-  run->bin = aBin;
+  run->mBin = aBin;
 
   for (i = 0; i < aBin->mRunNumRegionsMask - 1; i++) {
-    run->regs_mask[i] = UINT_MAX;
+    run->mRegionsMask[i] = UINT_MAX;
   }
   remainder = aBin->mRunNumRegions & ((1U << (LOG2(sizeof(int)) + 3)) - 1);
   if (remainder == 0) {
-    run->regs_mask[i] = UINT_MAX;
+    run->mRegionsMask[i] = UINT_MAX;
   } else {
     // The last element has spare bits that need to be unset.
-    run->regs_mask[i] =
+    run->mRegionsMask[i] =
       (UINT_MAX >> ((1U << (LOG2(sizeof(int)) + 3)) - remainder));
   }
 
-  run->regs_minelm = 0;
-
-  run->nfree = aBin->mRunNumRegions;
+  run->mRegionsMinElement = 0;
+
+  run->mNumFree = aBin->mRunNumRegions;
 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-  run->magic = ARENA_RUN_MAGIC;
+  run->mMagic = ARENA_RUN_MAGIC;
 #endif
 
   aBin->mNumRuns++;
   return run;
 }
 
 // bin->mCurrentRun must have space available before this function is called.
 void*
 arena_t::MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun)
 {
   void* ret;
 
-  MOZ_DIAGNOSTIC_ASSERT(aRun->magic == ARENA_RUN_MAGIC);
-  MOZ_DIAGNOSTIC_ASSERT(aRun->nfree > 0);
+  MOZ_DIAGNOSTIC_ASSERT(aRun->mMagic == ARENA_RUN_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(aRun->mNumFree > 0);
 
   ret = arena_run_reg_alloc(aRun, aBin);
   MOZ_DIAGNOSTIC_ASSERT(ret);
-  aRun->nfree--;
+  aRun->mNumFree--;
 
   return ret;
 }
 
 // Re-fill aBin->mCurrentRun, then call arena_t::MallocBinEasy().
 void*
 arena_t::MallocBinHard(arena_bin_t* aBin)
 {
   aBin->mCurrentRun = GetNonFullBinRun(aBin);
   if (!aBin->mCurrentRun) {
     return nullptr;
   }
-  MOZ_DIAGNOSTIC_ASSERT(aBin->mCurrentRun->magic == ARENA_RUN_MAGIC);
-  MOZ_DIAGNOSTIC_ASSERT(aBin->mCurrentRun->nfree > 0);
+  MOZ_DIAGNOSTIC_ASSERT(aBin->mCurrentRun->mMagic == ARENA_RUN_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(aBin->mCurrentRun->mNumFree > 0);
 
   return MallocBinEasy(aBin, aBin->mCurrentRun);
 }
 
 void
 arena_bin_t::Init(SizeClass aSizeClass)
 {
   size_t try_run_size;
   unsigned try_nregs, try_mask_nelms, try_reg0_offset;
-  // Size of the run header, excluding regs_mask.
-  static const size_t kFixedHeaderSize = offsetof(arena_run_t, regs_mask);
+  // Size of the run header, excluding mRegionsMask.
+  static const size_t kFixedHeaderSize = offsetof(arena_run_t, mRegionsMask);
 
   MOZ_ASSERT(aSizeClass.Size() <= gMaxBinClass);
 
   try_run_size = gPageSize;
 
   mCurrentRun = nullptr;
   mNonFullRuns.Init();
   mSizeClass = aSizeClass.Size();
@@ -3021,17 +3021,17 @@ arena_bin_t::Init(SizeClass aSizeClass)
     }
 
     // The run header includes one bit per region of the given size. For sizes
     // small enough, the number of regions is large enough that growing the run
     // size barely moves the needle for the overhead because of all those bits.
     // For example, for a size of 8 bytes, adding 4KiB to the run size adds
     // close to 512 bits to the header, which is 64 bytes.
     // With such overhead, there is no way to get to the wanted overhead above,
-    // so we give up if the required size for regs_mask more than doubles the
+    // so we give up if the required size for mRegionsMask more than doubles the
     // size of the run header.
     if (try_mask_nelms * sizeof(unsigned) >= kFixedHeaderSize) {
       break;
     }
 
     // Try more aggressive settings.
     try_run_size += gPageSize;
   }
@@ -3069,17 +3069,17 @@ arena_t::MallocSmall(size_t aSize, bool 
       break;
     default:
       MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
   }
   MOZ_DIAGNOSTIC_ASSERT(aSize == bin->mSizeClass);
 
   {
     MutexAutoLock lock(mLock);
-    if ((run = bin->mCurrentRun) && run->nfree > 0) {
+    if ((run = bin->mCurrentRun) && run->mNumFree > 0) {
       ret = MallocBinEasy(bin, run);
     } else {
       ret = MallocBinHard(bin);
     }
 
     if (!ret) {
       return nullptr;
     }
@@ -3304,18 +3304,18 @@ arena_salloc(const void* ptr)
   MOZ_ASSERT(GetChunkOffsetForPtr(ptr) != 0);
 
   chunk = GetChunkForPtr(ptr);
   pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> gPageSize2Pow);
   mapbits = chunk->map[pageind].bits;
   MOZ_DIAGNOSTIC_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
   if ((mapbits & CHUNK_MAP_LARGE) == 0) {
     arena_run_t* run = (arena_run_t*)(mapbits & ~gPageSizeMask);
-    MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
-    ret = run->bin->mSizeClass;
+    MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
+    ret = run->mBin->mSizeClass;
   } else {
     ret = mapbits & ~gPageSizeMask;
     MOZ_DIAGNOSTIC_ASSERT(ret != 0);
   }
 
   return ret;
 }
 
@@ -3484,40 +3484,40 @@ MozJemalloc::jemalloc_ptr_info(const voi
 
     void* addr = ((char*)chunk) + (pageind << gPageSize2Pow);
     *aInfo = { TagLiveLarge, addr, size };
     return;
   }
 
   // It must be a small allocation.
   auto run = (arena_run_t*)(mapbits & ~gPageSizeMask);
-  MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
+  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
 
   // The allocation size is stored in the run metadata.
-  size_t size = run->bin->mSizeClass;
+  size_t size = run->mBin->mSizeClass;
 
   // Address of the first possible pointer in the run after its headers.
-  uintptr_t reg0_addr = (uintptr_t)run + run->bin->mRunFirstRegionOffset;
+  uintptr_t reg0_addr = (uintptr_t)run + run->mBin->mRunFirstRegionOffset;
   if (aPtr < (void*)reg0_addr) {
     // In the run header.
     *aInfo = { TagUnknown, nullptr, 0 };
     return;
   }
 
   // Position in the run.
   unsigned regind = ((uintptr_t)aPtr - reg0_addr) / size;
 
   // Pointer to the allocation's base address.
   void* addr = (void*)(reg0_addr + regind * size);
 
   // Check if the allocation has been freed.
   unsigned elm = regind >> (LOG2(sizeof(int)) + 3);
   unsigned bit = regind - (elm << (LOG2(sizeof(int)) + 3));
   PtrInfoTag tag =
-    ((run->regs_mask[elm] & (1U << bit))) ? TagFreedSmall : TagLiveSmall;
+    ((run->mRegionsMask[elm] & (1U << bit))) ? TagFreedSmall : TagLiveSmall;
 
   *aInfo = { tag, addr, size };
 }
 
 namespace Debug {
 // Helper for debuggers. We don't want it to be inlined and optimized out.
 MOZ_NEVER_INLINE jemalloc_ptr_info_t*
 jemalloc_ptr_info(const void* aPtr)
@@ -3533,58 +3533,58 @@ arena_t::DallocSmall(arena_chunk_t* aChu
                      void* aPtr,
                      arena_chunk_map_t* aMapElm)
 {
   arena_run_t* run;
   arena_bin_t* bin;
   size_t size;
 
   run = (arena_run_t*)(aMapElm->bits & ~gPageSizeMask);
-  MOZ_DIAGNOSTIC_ASSERT(run->magic == ARENA_RUN_MAGIC);
-  bin = run->bin;
+  MOZ_DIAGNOSTIC_ASSERT(run->mMagic == ARENA_RUN_MAGIC);
+  bin = run->mBin;
   size = bin->mSizeClass;
   MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr) >=
                         uintptr_t(run) + bin->mRunFirstRegionOffset);
   MOZ_DIAGNOSTIC_ASSERT(
     (uintptr_t(aPtr) - (uintptr_t(run) + bin->mRunFirstRegionOffset)) % size ==
     0);
 
   memset(aPtr, kAllocPoison, size);
 
   arena_run_reg_dalloc(run, bin, aPtr, size);
-  run->nfree++;
-
-  if (run->nfree == bin->mRunNumRegions) {
+  run->mNumFree++;
+
+  if (run->mNumFree == bin->mRunNumRegions) {
     // Deallocate run.
     if (run == bin->mCurrentRun) {
       bin->mCurrentRun = nullptr;
     } else if (bin->mRunNumRegions != 1) {
       size_t run_pageind =
         (uintptr_t(run) - uintptr_t(aChunk)) >> gPageSize2Pow;
       arena_chunk_map_t* run_mapelm = &aChunk->map[run_pageind];
 
       // This block's conditional is necessary because if the
       // run only contains one region, then it never gets
       // inserted into the non-full runs tree.
       MOZ_DIAGNOSTIC_ASSERT(bin->mNonFullRuns.Search(run_mapelm) == run_mapelm);
       bin->mNonFullRuns.Remove(run_mapelm);
     }
 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-    run->magic = 0;
+    run->mMagic = 0;
 #endif
     DallocRun(run, true);
     bin->mNumRuns--;
-  } else if (run->nfree == 1 && run != bin->mCurrentRun) {
+  } else if (run->mNumFree == 1 && run != bin->mCurrentRun) {
     // Make sure that bin->mCurrentRun always refers to the lowest
     // non-full run, if one exists.
     if (!bin->mCurrentRun) {
       bin->mCurrentRun = run;
     } else if (uintptr_t(run) < uintptr_t(bin->mCurrentRun)) {
       // Switch mCurrentRun.
-      if (bin->mCurrentRun->nfree > 0) {
+      if (bin->mCurrentRun->mNumFree > 0) {
         arena_chunk_t* runcur_chunk = GetChunkForPtr(bin->mCurrentRun);
         size_t runcur_pageind =
           (uintptr_t(bin->mCurrentRun) - uintptr_t(runcur_chunk)) >>
           gPageSize2Pow;
         arena_chunk_map_t* runcur_mapelm = &runcur_chunk->map[runcur_pageind];
 
         // Insert runcur.
         MOZ_DIAGNOSTIC_ASSERT(!bin->mNonFullRuns.Search(runcur_mapelm));
@@ -4581,21 +4581,21 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
       for (j = 0; j < kNumTinyClasses + kNumQuantumClasses + gNumSubPageClasses;
            j++) {
         arena_bin_t* bin = &arena->mBins[j];
         size_t bin_unused = 0;
 
         for (auto mapelm : bin->mNonFullRuns.iter()) {
           run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
-          bin_unused += run->nfree * bin->mSizeClass;
+          bin_unused += run->mNumFree * bin->mSizeClass;
         }
 
         if (bin->mCurrentRun) {
-          bin_unused += bin->mCurrentRun->nfree * bin->mSizeClass;
+          bin_unused += bin->mCurrentRun->mNumFree * bin->mSizeClass;
         }
 
         arena_unused += bin_unused;
         arena_headers += bin->mNumRuns * bin->mRunFirstRegionOffset;
       }
     }
 
     MOZ_ASSERT(arena_mapped >= arena_committed);