Bug 1401099 - Move arena_malloc_small to a method of arena_t. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 19:11:52 +0900
changeset 382263 9aac426a0bb6a60aba6f2907bdf31bf8a988a9dc
parent 382262 ded028a534520fa050edc76a8e7c02f35bf8c043
child 382264 5e4cc07e9af3dfe71643a0638df846cc4b18a8ac
push id32551
push userkwierso@gmail.com
push dateThu, 21 Sep 2017 23:29:53 +0000
treeherdermozilla-central@d6d6fd889f7b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnjn
bugs1401099
milestone58.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1401099 - Move arena_malloc_small to a method of arena_t. r=njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -786,24 +786,26 @@ public:
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
   void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
 
   void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
 
   void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
 
+private:
   inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
 
   void* MallocBinHard(arena_bin_t* aBin);
 
-private:
   arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
 
 public:
+  inline void* MallocSmall(size_t aSize, bool aZero);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -3220,71 +3222,72 @@ arena_bin_run_size_calc(arena_bin_t *bin
 	bin->run_size = good_run_size;
 	bin->nregs = good_nregs;
 	bin->regs_mask_nelms = good_mask_nelms;
 	bin->reg0_offset = good_reg0_offset;
 
 	return (good_run_size);
 }
 
-static inline void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
+void*
+arena_t::MallocSmall(size_t aSize, bool aZero)
 {
-	void *ret;
-	arena_bin_t *bin;
-	arena_run_t *run;
-
-	if (size < small_min) {
-		/* Tiny. */
-		size = pow2_ceil(size);
-		bin = &arena->mBins[ffs((int)(size >> (TINY_MIN_2POW +
-		    1)))];
-		/*
-		 * Bin calculation is always correct, but we may need
-		 * to fix size for the purposes of assertions and/or
-		 * stats accuracy.
-		 */
-		if (size < (1U << TINY_MIN_2POW))
-			size = (1U << TINY_MIN_2POW);
-	} else if (size <= small_max) {
-		/* Quantum-spaced. */
-		size = QUANTUM_CEILING(size);
-		bin = &arena->mBins[ntbins + (size >> opt_quantum_2pow)
-		    - 1];
-	} else {
-		/* Sub-page. */
-		size = pow2_ceil(size);
-		bin = &arena->mBins[ntbins + nqbins
-		    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
-	}
-	MOZ_DIAGNOSTIC_ASSERT(size == bin->reg_size);
-
-	malloc_spin_lock(&arena->mLock);
-	if ((run = bin->runcur) && run->nfree > 0)
-		ret = arena->MallocBinEasy(bin, run);
-	else
-		ret = arena->MallocBinHard(bin);
-
-	if (!ret) {
-		malloc_spin_unlock(&arena->mLock);
-		return nullptr;
-	}
-
-	arena->mStats.allocated_small += size;
-	malloc_spin_unlock(&arena->mLock);
-
-	if (zero == false) {
-		if (opt_junk)
-			memset(ret, kAllocJunk, size);
-		else if (opt_zero)
-			memset(ret, 0, size);
-	} else
-		memset(ret, 0, size);
-
-	return (ret);
+  void* ret;
+  arena_bin_t* bin;
+  arena_run_t* run;
+
+  if (aSize < small_min) {
+    /* Tiny. */
+    aSize = pow2_ceil(aSize);
+    bin = &mBins[ffs((int)(aSize >> (TINY_MIN_2POW + 1)))];
+    /*
+     * Bin calculation is always correct, but we may need
+     * to fix size for the purposes of assertions and/or
+     * stats accuracy.
+     */
+    if (aSize < (1U << TINY_MIN_2POW)) {
+      aSize = 1U << TINY_MIN_2POW;
+    }
+  } else if (aSize <= small_max) {
+    /* Quantum-spaced. */
+    aSize = QUANTUM_CEILING(aSize);
+    bin = &mBins[ntbins + (aSize >> opt_quantum_2pow) - 1];
+  } else {
+    /* Sub-page. */
+    aSize = pow2_ceil(aSize);
+    bin = &mBins[ntbins + nqbins
+        + (ffs((int)(aSize >> opt_small_max_2pow)) - 2)];
+  }
+  MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
+
+  malloc_spin_lock(&mLock);
+  if ((run = bin->runcur) && run->nfree > 0) {
+    ret = MallocBinEasy(bin, run);
+  } else {
+    ret = MallocBinHard(bin);
+  }
+
+  if (!ret) {
+    malloc_spin_unlock(&mLock);
+    return nullptr;
+  }
+
+  mStats.allocated_small += aSize;
+  malloc_spin_unlock(&mLock);
+
+  if (aZero == false) {
+    if (opt_junk) {
+      memset(ret, kAllocJunk, aSize);
+    } else if (opt_zero) {
+      memset(ret, 0, aSize);
+    }
+  } else
+    memset(ret, 0, aSize);
+
+  return ret;
 }
 
 static void *
 arena_malloc_large(arena_t *arena, size_t size, bool zero)
 {
 	void *ret;
 
 	/* Large allocation. */
@@ -3313,17 +3316,17 @@ arena_malloc(arena_t *arena, size_t size
 {
 
 	MOZ_ASSERT(arena);
 	MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 	MOZ_ASSERT(size != 0);
 	MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
 
 	if (size <= bin_maxclass) {
-		return (arena_malloc_small(arena, size, zero));
+		return arena->MallocSmall(size, zero);
 	} else
 		return (arena_malloc_large(arena, size, zero));
 }
 
 static inline void *
 imalloc(size_t size)
 {
 
@@ -4885,23 +4888,23 @@ MozJemalloc::free(void* aPtr)
  */
 
 /* This was added by Mozilla for use by SQLite. */
 template<> inline size_t
 MozJemalloc::malloc_good_size(size_t aSize)
 {
   /*
    * This duplicates the logic in imalloc(), arena_malloc() and
-   * arena_malloc_small().
+   * arena_t::MallocSmall().
    */
   if (aSize < small_min) {
     /* Small (tiny). */
     aSize = pow2_ceil(aSize);
     /*
-     * We omit the #ifdefs from arena_malloc_small() --
+     * We omit the #ifdefs from arena_t::MallocSmall() --
      * it can be inaccurate with its size in some cases, but this
      * function must be accurate.
      */
     if (aSize < (1U << TINY_MIN_2POW))
       aSize = (1U << TINY_MIN_2POW);
   } else if (aSize <= small_max) {
     /* Small (quantum-spaced). */
     aSize = QUANTUM_CEILING(aSize);