Bug 1415454 - Remove the unused arena_bin_t* argument to arena_t::AllocRun. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 03 Nov 2017 15:54:20 +0900
changeset 444170 b8acdda178182e9dec084f84af572294d6d58ac6
parent 444169 f64defe0d11edb2a76bb6cca295a7fd864542501
child 444171 bfa36a25eaf674064cf16bdcd8ef4eb049359591
push id1618
push userCallek@gmail.com
push dateThu, 11 Jan 2018 17:45:48 +0000
treeherdermozilla-release@882ca853e05a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1415454
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1415454 - Remove the unused arena_bin_t* argument to arena_t::AllocRun. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -996,20 +996,17 @@ public:
 
   arena_t();
 
 private:
   void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
 
   void DeallocChunk(arena_chunk_t* aChunk);
 
-  arena_run_t* AllocRun(arena_bin_t* aBin,
-                        size_t aSize,
-                        bool aLarge,
-                        bool aZero);
+  arena_run_t* AllocRun(size_t aSize, bool aLarge, bool aZero);
 
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
   MOZ_MUST_USE bool SplitRun(arena_run_t* aRun,
                              size_t aSize,
                              bool aLarge,
                              bool aZero);
 
@@ -2600,17 +2597,17 @@ arena_t::DeallocChunk(arena_chunk_t* aCh
   // Dirty page flushing only uses the tree of dirty chunks, so leaving this
   // chunk in the chunks_* trees is sufficient for that purpose.
   mRunsAvail.Remove(&aChunk->map[gChunkHeaderNumPages]);
 
   mSpare = aChunk;
 }
 
 arena_run_t*
-arena_t::AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero)
+arena_t::AllocRun(size_t aSize, bool aLarge, bool aZero)
 {
   arena_run_t* run;
   arena_chunk_map_t* mapelm;
   arena_chunk_map_t key;
 
   MOZ_ASSERT(aSize <= gMaxLargeClass);
   MOZ_ASSERT((aSize & gPageSizeMask) == 0);
 
@@ -2891,17 +2888,17 @@ arena_t::GetNonFullBinRun(arena_bin_t* a
     // run is guaranteed to have available space.
     aBin->mNonFullRuns.Remove(mapelm);
     run = (arena_run_t*)(mapelm->bits & ~gPageSizeMask);
     return run;
   }
   // No existing runs have any space available.
 
   // Allocate a new run.
-  run = AllocRun(aBin, aBin->mRunSize, false, false);
+  run = AllocRun(aBin->mRunSize, false, false);
   if (!run) {
     return nullptr;
   }
   // Don't initialize if a race in arena_t::RunAlloc() allowed an existing
   // run to become usable.
   if (run == aBin->mCurrentRun) {
     return run;
   }
@@ -3077,17 +3074,17 @@ arena_t::MallocLarge(size_t aSize, bool 
 {
   void* ret;
 
   // Large allocation.
   aSize = PAGE_CEILING(aSize);
 
   {
     MutexAutoLock lock(mLock);
-    ret = AllocRun(nullptr, aSize, true, aZero);
+    ret = AllocRun(aSize, true, aZero);
     if (!ret) {
       return nullptr;
     }
     mStats.allocated_large += aSize;
   }
 
   if (aZero == false) {
     if (opt_junk) {
@@ -3131,17 +3128,17 @@ arena_t::Palloc(size_t aAlignment, size_
   size_t offset;
   arena_chunk_t* chunk;
 
   MOZ_ASSERT((aSize & gPageSizeMask) == 0);
   MOZ_ASSERT((aAlignment & gPageSizeMask) == 0);
 
   {
     MutexAutoLock lock(mLock);
-    ret = AllocRun(nullptr, aAllocSize, true, false);
+    ret = AllocRun(aAllocSize, true, false);
     if (!ret) {
       return nullptr;
     }
 
     chunk = GetChunkForPtr(ret);
 
     offset = uintptr_t(ret) & (aAlignment - 1);
     MOZ_ASSERT((offset & gPageSizeMask) == 0);