Bug 1401099 - Move arena_bin_nonfull_run_get to a method of arena_t. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 18:23:33 +0900
changeset 382262 ded028a534520fa050edc76a8e7c02f35bf8c043
parent 382261 05baed504ac57514ff3d835c1604bcde0b59fd9f
child 382263 9aac426a0bb6a60aba6f2907bdf31bf8a988a9dc
push id32551
push userkwierso@gmail.com
push dateThu, 21 Sep 2017 23:29:53 +0000
treeherdermozilla-central@d6d6fd889f7b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1401099
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1401099 - Move arena_bin_nonfull_run_get to a method of arena_t. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -790,16 +790,20 @@ public:
   void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
 
   void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
 
   inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
 
   void* MallocBinHard(arena_bin_t* aBin);
 
+private:
+  arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
+
+public:
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -3049,67 +3053,69 @@ arena_t::TrimRunTail(arena_chunk_t* aChu
   aChunk->map[pageind].bits = aNewSize | CHUNK_MAP_LARGE |
       CHUNK_MAP_ALLOCATED;
   aChunk->map[pageind+npages].bits = (aOldSize - aNewSize) | CHUNK_MAP_LARGE
       | CHUNK_MAP_ALLOCATED;
 
   DallocRun((arena_run_t*)(uintptr_t(aRun) + aNewSize), aDirty);
 }
 
-static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+arena_run_t*
+arena_t::GetNonFullBinRun(arena_bin_t* aBin)
 {
-	arena_chunk_map_t *mapelm;
-	arena_run_t *run;
-	unsigned i, remainder;
-
-	/* Look for a usable run. */
-	mapelm = arena_run_tree_first(&bin->runs);
-	if (mapelm) {
-		/* run is guaranteed to have available space. */
-		arena_run_tree_remove(&bin->runs, mapelm);
-		run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
-		return (run);
-	}
-	/* No existing runs have any space available. */
-
-	/* Allocate a new run. */
-	run = arena->AllocRun(bin, bin->run_size, false, false);
-	if (!run)
-		return nullptr;
-	/*
-	 * Don't initialize if a race in arena_t::AllocRun() allowed an existing
-	 * run to become usable.
-	 */
-	if (run == bin->runcur)
-		return (run);
-
-	/* Initialize run internals. */
-	run->bin = bin;
-
-	for (i = 0; i < bin->regs_mask_nelms - 1; i++)
-		run->regs_mask[i] = UINT_MAX;
-	remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
-	if (remainder == 0)
-		run->regs_mask[i] = UINT_MAX;
-	else {
-		/* The last element has spare bits that need to be unset. */
-		run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
-		    - remainder));
-	}
-
-	run->regs_minelm = 0;
-
-	run->nfree = bin->nregs;
+  arena_chunk_map_t* mapelm;
+  arena_run_t* run;
+  unsigned i, remainder;
+
+  /* Look for a usable run. */
+  mapelm = arena_run_tree_first(&aBin->runs);
+  if (mapelm) {
+    /* run is guaranteed to have available space. */
+    arena_run_tree_remove(&aBin->runs, mapelm);
+    run = (arena_run_t*)(mapelm->bits & ~pagesize_mask);
+    return run;
+  }
+  /* No existing runs have any space available. */
+
+  /* Allocate a new run. */
+  run = AllocRun(aBin, aBin->run_size, false, false);
+  if (!run)
+    return nullptr;
+  /*
+   * Don't initialize if a race in arena_t::RunAlloc() allowed an existing
+   * run to become usable.
+   */
+  if (run == aBin->runcur) {
+    return run;
+  }
+
+  /* Initialize run internals. */
+  run->bin = aBin;
+
+  for (i = 0; i < aBin->regs_mask_nelms - 1; i++) {
+    run->regs_mask[i] = UINT_MAX;
+  }
+  remainder = aBin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
+  if (remainder == 0) {
+    run->regs_mask[i] = UINT_MAX;
+  } else {
+    /* The last element has spare bits that need to be unset. */
+    run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
+        - remainder));
+  }
+
+  run->regs_minelm = 0;
+
+  run->nfree = aBin->nregs;
 #if defined(MOZ_DEBUG) || defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
-	run->magic = ARENA_RUN_MAGIC;
+  run->magic = ARENA_RUN_MAGIC;
 #endif
 
-	bin->stats.curruns++;
-	return (run);
+  aBin->stats.curruns++;
+  return run;
 }
 
 /* bin->runcur must have space available before this function is called. */
 void*
 arena_t::MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun)
 {
   void* ret;
 
@@ -3122,17 +3128,17 @@ arena_t::MallocBinEasy(arena_bin_t* aBin
 
   return ret;
 }
 
 /* Re-fill aBin->runcur, then call arena_t::MallocBinEasy(). */
 void*
 arena_t::MallocBinHard(arena_bin_t* aBin)
 {
-  aBin->runcur = arena_bin_nonfull_run_get(this, aBin);
+  aBin->runcur = GetNonFullBinRun(aBin);
   if (!aBin->runcur) {
     return nullptr;
   }
   MOZ_DIAGNOSTIC_ASSERT(aBin->runcur->magic == ARENA_RUN_MAGIC);
   MOZ_DIAGNOSTIC_ASSERT(aBin->runcur->nfree > 0);
 
   return MallocBinEasy(aBin, aBin->runcur);
 }