Bug 1401099 - Move arena_run_split to a method of arena_t. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 18:08:23 +0900
changeset 382257 164be858d88c36d992bf458cb8713fb8c8368c49
parent 382256 85d69337743746760c7bb7c45abbf24ced7a119e
child 382258 b73ccc81201f5782935bea97d105786b675ac1d8
push id32551
push userkwierso@gmail.com
push dateThu, 21 Sep 2017 23:29:53 +0000
treeherdermozilla-central@d6d6fd889f7b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1401099
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1401099 - Move arena_run_split to a method of arena_t. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -700,20 +700,20 @@ struct arena_t {
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
   /* All operations on this arena require that lock be locked. */
   malloc_spinlock_t mLock;
 
   arena_stats_t mStats;
 
+private:
   /* Tree of dirty-page-containing chunks this arena manages. */
   arena_chunk_tree_t mChunksDirty;
 
-private:
 #ifdef MALLOC_DOUBLE_PURGE
   /* Head of a linked list of MADV_FREE'd-page-containing chunks this
    * arena manages. */
   mozilla::DoublyLinkedList<arena_chunk_t> mChunksMAdvised;
 #endif
 
   /*
    * In order to avoid rapid chunk allocation/deallocation when an arena
@@ -735,22 +735,24 @@ public:
    * memory is mapped for each arena.
    */
   size_t mNumDirty;
   /*
    * Maximum value allowed for mNumDirty.
    */
   size_t mMaxDirty;
 
+private:
   /*
    * Size/address-ordered tree of this arena's available runs.  This tree
    * is used for first-best-fit run allocation.
    */
   arena_avail_tree_t mRunsAvail;
 
+public:
   /*
    * mBins is used to store rings of free regions of the following sizes,
    * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
    *
    *   mBins[i] | size |
    *   --------+------+
    *        0  |    2 |
    *        1  |    4 |
@@ -778,16 +780,18 @@ private:
 
   void DeallocChunk(arena_chunk_t* aChunk);
 
 public:
   arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
 
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
+  void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -2549,124 +2553,118 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		run->regs_minelm = elm;
 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
 	MOZ_DIAGNOSTIC_ASSERT((run->regs_mask[elm] & (1U << bit)) == 0);
 	run->regs_mask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
-static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
-    bool zero)
+void
+arena_t::SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero)
 {
-	arena_chunk_t *chunk;
-	size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-	old_ndirty = chunk->ndirty;
-	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
-	    >> pagesize_2pow);
-	total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >>
-	    pagesize_2pow;
-	need_pages = (size >> pagesize_2pow);
-	MOZ_ASSERT(need_pages > 0);
-	MOZ_ASSERT(need_pages <= total_pages);
-	rem_pages = total_pages - need_pages;
-
-	arena_avail_tree_remove(&arena->mRunsAvail, &chunk->map[run_ind]);
-
-	/* Keep track of trailing unused pages for later use. */
-	if (rem_pages > 0) {
-		chunk->map[run_ind+need_pages].bits = (rem_pages <<
-		    pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
-		    pagesize_mask);
-		chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
-		    pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
-		    pagesize_mask);
-		arena_avail_tree_insert(&arena->mRunsAvail,
-		    &chunk->map[run_ind+need_pages]);
-	}
-
-	for (i = 0; i < need_pages; i++) {
-		/*
-		 * Commit decommitted pages if necessary.  If a decommitted
-		 * page is encountered, commit all needed adjacent decommitted
-		 * pages in one operation, in order to reduce system call
-		 * overhead.
-		 */
-		if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
-			size_t j;
-
-			/*
-			 * Advance i+j to just past the index of the last page
-			 * to commit.  Clear CHUNK_MAP_DECOMMITTED and
-			 * CHUNK_MAP_MADVISED along the way.
-			 */
-			for (j = 0; i + j < need_pages && (chunk->map[run_ind +
-			    i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
-				/* DECOMMITTED and MADVISED are mutually exclusive. */
-				MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
-					     chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
-
-				chunk->map[run_ind + i + j].bits &=
-				    ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
-			}
+  arena_chunk_t* chunk;
+  size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(aRun);
+  old_ndirty = chunk->ndirty;
+  run_ind = (unsigned)((uintptr_t(aRun) - uintptr_t(chunk)) >> pagesize_2pow);
+  total_pages = (chunk->map[run_ind].bits & ~pagesize_mask) >> pagesize_2pow;
+  need_pages = (aSize >> pagesize_2pow);
+  MOZ_ASSERT(need_pages > 0);
+  MOZ_ASSERT(need_pages <= total_pages);
+  rem_pages = total_pages - need_pages;
+
+  arena_avail_tree_remove(&mRunsAvail, &chunk->map[run_ind]);
+
+  /* Keep track of trailing unused pages for later use. */
+  if (rem_pages > 0) {
+    chunk->map[run_ind+need_pages].bits = (rem_pages <<
+        pagesize_2pow) | (chunk->map[run_ind+need_pages].bits &
+        pagesize_mask);
+    chunk->map[run_ind+total_pages-1].bits = (rem_pages <<
+        pagesize_2pow) | (chunk->map[run_ind+total_pages-1].bits &
+        pagesize_mask);
+    arena_avail_tree_insert(&mRunsAvail, &chunk->map[run_ind+need_pages]);
+  }
+
+  for (i = 0; i < need_pages; i++) {
+    /*
+     * Commit decommitted pages if necessary.  If a decommitted
+     * page is encountered, commit all needed adjacent decommitted
+     * pages in one operation, in order to reduce system call
+     * overhead.
+     */
+    if (chunk->map[run_ind + i].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED) {
+      size_t j;
+
+      /*
+       * Advance i+j to just past the index of the last page
+       * to commit.  Clear CHUNK_MAP_DECOMMITTED and
+       * CHUNK_MAP_MADVISED along the way.
+       */
+      for (j = 0; i + j < need_pages && (chunk->map[run_ind +
+          i + j].bits & CHUNK_MAP_MADVISED_OR_DECOMMITTED); j++) {
+        /* DECOMMITTED and MADVISED are mutually exclusive. */
+        MOZ_ASSERT(!(chunk->map[run_ind + i + j].bits & CHUNK_MAP_DECOMMITTED &&
+               chunk->map[run_ind + i + j].bits & CHUNK_MAP_MADVISED));
+
+        chunk->map[run_ind + i + j].bits &=
+            ~CHUNK_MAP_MADVISED_OR_DECOMMITTED;
+      }
 
 #  ifdef MALLOC_DECOMMIT
-			pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
-			    << pagesize_2pow)), (j << pagesize_2pow));
+      pages_commit((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+                   j << pagesize_2pow);
 #  endif
 
-			arena->mStats.committed += j;
-
-#  ifndef MALLOC_DECOMMIT
-                }
-#  else
-		} else /* No need to zero since commit zeros. */
+      mStats.committed += j;
+
+    }
+#  ifdef MALLOC_DECOMMIT
+    else /* No need to zero since commit zeroes. */
 #  endif
 
-		/* Zero if necessary. */
-		if (zero) {
-			if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
-			    == 0) {
-				memset((void *)((uintptr_t)chunk + ((run_ind
-				    + i) << pagesize_2pow)), 0, pagesize);
-				/* CHUNK_MAP_ZEROED is cleared below. */
-			}
-		}
-
-		/* Update dirty page accounting. */
-		if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
-			chunk->ndirty--;
-			arena->mNumDirty--;
-			/* CHUNK_MAP_DIRTY is cleared below. */
-		}
-
-		/* Initialize the chunk map. */
-		if (large) {
-			chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE
-			    | CHUNK_MAP_ALLOCATED;
-		} else {
-			chunk->map[run_ind + i].bits = (size_t)run
-			    | CHUNK_MAP_ALLOCATED;
-		}
-	}
-
-	/*
-	 * Set the run size only in the first element for large runs.  This is
-	 * primarily a debugging aid, since the lack of size info for trailing
-	 * pages only matters if the application tries to operate on an
-	 * interior pointer.
-	 */
-	if (large)
-		chunk->map[run_ind].bits |= size;
-
-	if (chunk->ndirty == 0 && old_ndirty > 0)
-		arena_chunk_tree_dirty_remove(&arena->mChunksDirty, chunk);
+    /* Zero if necessary. */
+    if (aZero) {
+      if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) == 0) {
+        memset((void*)(uintptr_t(chunk) + ((run_ind + i) << pagesize_2pow)),
+               0, pagesize);
+        /* CHUNK_MAP_ZEROED is cleared below. */
+      }
+    }
+
+    /* Update dirty page accounting. */
+    if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
+      chunk->ndirty--;
+      mNumDirty--;
+      /* CHUNK_MAP_DIRTY is cleared below. */
+    }
+
+    /* Initialize the chunk map. */
+    if (aLarge) {
+      chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+    } else {
+      chunk->map[run_ind + i].bits = size_t(aRun) | CHUNK_MAP_ALLOCATED;
+    }
+  }
+
+  /*
+   * Set the run size only in the first element for large runs.  This is
+   * primarily a debugging aid, since the lack of size info for trailing
+   * pages only matters if the application tries to operate on an
+   * interior pointer.
+   */
+  if (aLarge) {
+    chunk->map[run_ind].bits |= aSize;
+  }
+
+  if (chunk->ndirty == 0 && old_ndirty > 0) {
+    arena_chunk_tree_dirty_remove(&mChunksDirty, chunk);
+  }
 }
 
 void
 arena_t::InitChunk(arena_chunk_t* aChunk, bool aZeroed)
 {
   size_t i;
   /* WARNING: The following relies on !aZeroed meaning "used to be an arena
          * chunk".
@@ -2769,28 +2767,28 @@ arena_t::AllocRun(arena_bin_t* aBin, siz
   mapelm = arena_avail_tree_nsearch(&mRunsAvail, &key);
   if (mapelm) {
     arena_chunk_t* chunk =
         (arena_chunk_t*)CHUNK_ADDR2BASE(mapelm);
     size_t pageind = (uintptr_t(mapelm) - uintptr_t(chunk->map)) /
         sizeof(arena_chunk_map_t);
 
     run = (arena_run_t*)(uintptr_t(chunk) + (pageind << pagesize_2pow));
-    arena_run_split(this, run, aSize, aLarge, aZero);
+    SplitRun(run, aSize, aLarge, aZero);
     return run;
   }
 
   if (mSpare) {
     /* Use the spare. */
     arena_chunk_t* chunk = mSpare;
     mSpare = nullptr;
     run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
     /* Insert the run into the tree of available runs. */
     arena_avail_tree_insert(&mRunsAvail, &chunk->map[arena_chunk_header_npages]);
-    arena_run_split(this, run, aSize, aLarge, aZero);
+    SplitRun(run, aSize, aLarge, aZero);
     return run;
   }
 
   /*
    * No usable runs.  Create a new chunk from which to allocate
    * the run.
    */
   {
@@ -2800,17 +2798,17 @@ arena_t::AllocRun(arena_bin_t* aBin, siz
     if (!chunk) {
       return nullptr;
     }
 
     InitChunk(chunk, zeroed);
     run = (arena_run_t*)(uintptr_t(chunk) + (arena_chunk_header_npages << pagesize_2pow));
   }
   /* Update page map. */
-  arena_run_split(this, run, aSize, aLarge, aZero);
+  SplitRun(run, aSize, aLarge, aZero);
   return run;
 }
 
 void
 arena_t::Purge(bool aAll)
 {
   arena_chunk_t* chunk;
   size_t i, npages;
@@ -3888,17 +3886,17 @@ arena_ralloc_large_grow(arena_t *arena, 
 	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
 	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
 	    ~pagesize_mask) >= size - oldsize) {
 		/*
 		 * The next run is available and sufficiently large.  Split the
 		 * following run, then merge the first part with the existing
 		 * allocation.
 		 */
-		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
+		arena->SplitRun((arena_run_t *)(uintptr_t(chunk) +
 		    ((pageind+npages) << pagesize_2pow)), size - oldsize, true,
 		    false);
 
 		chunk->map[pageind].bits = size | CHUNK_MAP_LARGE |
 		    CHUNK_MAP_ALLOCATED;
 		chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
 		    CHUNK_MAP_ALLOCATED;