Bug 1401099 - Move arena_run_dalloc to a method of arena_t. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 18:01:27 +0900
changeset 382256 85d69337743746760c7bb7c45abbf24ced7a119e
parent 382255 297976241b7ceb43eea7d0105931817fc369a9e3
child 382257 164be858d88c36d992bf458cb8713fb8c8368c49
push id32551
push userkwierso@gmail.com
push dateThu, 21 Sep 2017 23:29:53 +0000
treeherdermozilla-central@d6d6fd889f7b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1401099
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1401099 - Move arena_run_dalloc to a method of arena_t. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -771,21 +771,23 @@ public:
    */
   arena_bin_t mBins[1]; /* Dynamically sized. */
 
   bool Init();
 
 private:
   void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
 
+  void DeallocChunk(arena_chunk_t* aChunk);
+
 public:
-  void DeallocChunk(arena_chunk_t* aChunk);
-
   arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
 
+  void DallocRun(arena_run_t* aRun, bool aDirty);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -2891,167 +2893,164 @@ arena_t::Purge(bool aAll)
         mChunksMAdvised.remove(chunk);
       }
       mChunksMAdvised.pushFront(chunk);
     }
 #endif
   }
 }
 
-static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+void
+arena_t::DallocRun(arena_run_t* aRun, bool aDirty)
 {
-	arena_chunk_t *chunk;
-	size_t size, run_ind, run_pages;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
-	    >> pagesize_2pow);
-	MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
-	MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
-	if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
-		size = chunk->map[run_ind].bits & ~pagesize_mask;
-	else
-		size = run->bin->run_size;
-	run_pages = (size >> pagesize_2pow);
-
-	/* Mark pages as unallocated in the chunk map. */
-	if (dirty) {
-		size_t i;
-
-		for (i = 0; i < run_pages; i++) {
-			MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
-			    == 0);
-			chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
-		}
-
-		if (chunk->ndirty == 0) {
-			arena_chunk_tree_dirty_insert(&arena->mChunksDirty,
-			    chunk);
-		}
-		chunk->ndirty += run_pages;
-		arena->mNumDirty += run_pages;
-	} else {
-		size_t i;
-
-		for (i = 0; i < run_pages; i++) {
-			chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
-			    CHUNK_MAP_ALLOCATED);
-		}
-	}
-	chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-	    pagesize_mask);
-	chunk->map[run_ind+run_pages-1].bits = size |
-	    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-
-	/* Try to coalesce forward. */
-	if (run_ind + run_pages < chunk_npages &&
-	    (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
-		size_t nrun_size = chunk->map[run_ind+run_pages].bits &
-		    ~pagesize_mask;
-
-		/*
-		 * Remove successor from tree of available runs; the coalesced run is
-		 * inserted later.
-		 */
-		arena_avail_tree_remove(&arena->mRunsAvail,
-		    &chunk->map[run_ind+run_pages]);
-
-		size += nrun_size;
-		run_pages = size >> pagesize_2pow;
-
-		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
-		    == nrun_size);
-		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+run_pages-1].bits = size |
-		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-	}
-
-	/* Try to coalesce backward. */
-	if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
-	    CHUNK_MAP_ALLOCATED) == 0) {
-		size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
-
-		run_ind -= prun_size >> pagesize_2pow;
-
-		/*
-		 * Remove predecessor from tree of available runs; the coalesced run is
-		 * inserted later.
-		 */
-		arena_avail_tree_remove(&arena->mRunsAvail,
-		    &chunk->map[run_ind]);
-
-		size += prun_size;
-		run_pages = size >> pagesize_2pow;
-
-		MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
-		    prun_size);
-		chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+run_pages-1].bits = size |
-		    (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
-	}
-
-	/* Insert into tree of available runs, now that coalescing is complete. */
-	arena_avail_tree_insert(&arena->mRunsAvail, &chunk->map[run_ind]);
-
-	/* Deallocate chunk if it is now completely unused. */
-	if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
-	    CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
-		arena->DeallocChunk(chunk);
-	}
-
-	/* Enforce mMaxDirty. */
-	if (arena->mNumDirty > arena->mMaxDirty) {
-		arena->Purge(false);
-	}
+  arena_chunk_t* chunk;
+  size_t size, run_ind, run_pages;
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aRun);
+  run_ind = (size_t)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind >= arena_chunk_header_npages);
+  MOZ_DIAGNOSTIC_ASSERT(run_ind < chunk_npages);
+  if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0)
+    size = chunk->map[run_ind].bits & ~pagesize_mask;
+  else
+    size = aRun->bin->run_size;
+  run_pages = (size >> pagesize_2pow);
+
+  /* Mark pages as unallocated in the chunk map. */
+  if (aDirty) {
+    size_t i;
+
+    for (i = 0; i < run_pages; i++) {
+      MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY)
+          == 0);
+      chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY;
+    }
+
+    if (chunk->ndirty == 0) {
+      arena_chunk_tree_dirty_insert(&mChunksDirty,
+          chunk);
+    }
+    chunk->ndirty += run_pages;
+    mNumDirty += run_pages;
+  } else {
+    size_t i;
+
+    for (i = 0; i < run_pages; i++) {
+      chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE |
+          CHUNK_MAP_ALLOCATED);
+    }
+  }
+  chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+      pagesize_mask);
+  chunk->map[run_ind+run_pages-1].bits = size |
+      (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+
+  /* Try to coalesce forward. */
+  if (run_ind + run_pages < chunk_npages &&
+      (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) {
+    size_t nrun_size = chunk->map[run_ind+run_pages].bits &
+        ~pagesize_mask;
+
+    /*
+     * Remove successor from tree of available runs; the coalesced run is
+     * inserted later.
+     */
+    arena_avail_tree_remove(&mRunsAvail,
+        &chunk->map[run_ind+run_pages]);
+
+    size += nrun_size;
+    run_pages = size >> pagesize_2pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind+run_pages-1].bits & ~pagesize_mask)
+        == nrun_size);
+    chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+        pagesize_mask);
+    chunk->map[run_ind+run_pages-1].bits = size |
+        (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+  }
+
+  /* Try to coalesce backward. */
+  if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits &
+      CHUNK_MAP_ALLOCATED) == 0) {
+    size_t prun_size = chunk->map[run_ind-1].bits & ~pagesize_mask;
+
+    run_ind -= prun_size >> pagesize_2pow;
+
+    /*
+     * Remove predecessor from tree of available runs; the coalesced run is
+     * inserted later.
+     */
+    arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+
+    size += prun_size;
+    run_pages = size >> pagesize_2pow;
+
+    MOZ_DIAGNOSTIC_ASSERT((chunk->map[run_ind].bits & ~pagesize_mask) ==
+        prun_size);
+    chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits &
+        pagesize_mask);
+    chunk->map[run_ind+run_pages-1].bits = size |
+        (chunk->map[run_ind+run_pages-1].bits & pagesize_mask);
+  }
+
+  /* Insert into tree of available runs, now that coalescing is complete. */
+  arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind]);
+
+  /* Deallocate chunk if it is now completely unused. */
+  if ((chunk->map[arena_chunk_header_npages].bits & (~pagesize_mask |
+      CHUNK_MAP_ALLOCATED)) == arena_maxclass) {
+    DeallocChunk(chunk);
+  }
+
+  /* Enforce mMaxDirty. */
+  if (mNumDirty > mMaxDirty) {
+    Purge(false);
+  }
 }
 
 static void
 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t head_npages = (oldsize - newsize) >> pagesize_2pow;
 
 	MOZ_ASSERT(oldsize > newsize);
 
 	/*
-	 * Update the chunk map so that arena_run_dalloc() can treat the
+	 * Update the chunk map so that arena_t::DallocRun() can treat the
 	 * leading run as separately allocated.
 	 */
 	chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 	chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 
-	arena_run_dalloc(arena, run, false);
+	arena->DallocRun(run, false);
 }
 
 static void
 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
     size_t oldsize, size_t newsize, bool dirty)
 {
 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = newsize >> pagesize_2pow;
 
 	MOZ_ASSERT(oldsize > newsize);
 
 	/*
-	 * Update the chunk map so that arena_run_dalloc() can treat the
+	 * Update the chunk map so that arena_t::DallocRun() can treat the
 	 * trailing run as separately allocated.
 	 */
 	chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE |
 	    CHUNK_MAP_ALLOCATED;
 	chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE
 	    | CHUNK_MAP_ALLOCATED;
 
-	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
-	    dirty);
+	arena->DallocRun((arena_run_t*)(uintptr_t(run) + newsize), dirty);
 }
 
 static arena_run_t *
 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 {
 	arena_chunk_map_t *mapelm;
 	arena_run_t *run;
 	unsigned i, remainder;
@@ -3752,17 +3751,17 @@ arena_dalloc_small(arena_t *arena, arena
 			 */
 			MOZ_DIAGNOSTIC_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 				run_mapelm);
 			arena_run_tree_remove(&bin->runs, run_mapelm);
 		}
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
 		run->magic = 0;
 #endif
-		arena_run_dalloc(arena, run, true);
+		arena->DallocRun(run, true);
 		bin->stats.curruns--;
 	} else if (run->nfree == 1 && run != bin->runcur) {
 		/*
 		 * Make sure that bin->runcur always refers to the lowest
 		 * non-full run, if one exists.
 		 */
 		if (!bin->runcur)
 			bin->runcur = run;
@@ -3803,17 +3802,17 @@ arena_dalloc_large(arena_t *arena, arena
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 	    pagesize_2pow;
 	size_t size = chunk->map[pageind].bits & ~pagesize_mask;
 
 	memset(ptr, kAllocPoison, size);
 	arena->mStats.allocated_large -= size;
 
-	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+	arena->DallocRun((arena_run_t*)ptr, true);
 }
 
 static inline void
 arena_dalloc(void *ptr, size_t offset)
 {
 	arena_chunk_t *chunk;
 	arena_t *arena;
 	size_t pageind;