Bug 1401099 - Move arena_palloc to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 20:28:23 +0900
changeset 667417 2b376f821475ce4d0bd6070327a8ee06cc361c20
parent 667416 ce6aabbf7259c35c98f837efc55645dfd9ebbcfc
child 667418 cf797a006c8114db087e1d2dc8535acc8f46bec7
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_palloc to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -775,41 +775,45 @@ public:
 
   bool Init();
 
 private:
   void InitChunk(arena_chunk_t* aChunk, bool aZeroed);
 
   void DeallocChunk(arena_chunk_t* aChunk);
 
+  arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
+
 public:
-  arena_run_t* AllocRun(arena_bin_t* aBin, size_t aSize, bool aLarge, bool aZero);
-
   void DallocRun(arena_run_t* aRun, bool aDirty);
 
   void SplitRun(arena_run_t* aRun, size_t aSize, bool aLarge, bool aZero);
 
+private:
   void TrimRunHead(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize);
 
+public:
   void TrimRunTail(arena_chunk_t* aChunk, arena_run_t* aRun, size_t aOldSize, size_t aNewSize, bool dirty);
 
 private:
   inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
 
   void* MallocBinHard(arena_bin_t* aBin);
 
   arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
 
   inline void* MallocSmall(size_t aSize, bool aZero);
 
   void* MallocLarge(size_t aSize, bool aZero);
 
 public:
   inline void* Malloc(size_t aSize, bool aZero);
 
+  void* Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -3345,67 +3349,66 @@ icalloc(size_t size)
 
 	if (size <= arena_maxclass)
 		return choose_arena(size)->Malloc(size, true);
 	else
 		return (huge_malloc(size, true));
 }
 
 /* Only handles large allocations that require more than page alignment. */
-static void *
-arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
+void*
+arena_t::Palloc(size_t aAlignment, size_t aSize, size_t aAllocSize)
 {
-	void *ret;
-	size_t offset;
-	arena_chunk_t *chunk;
-
-	MOZ_ASSERT((size & pagesize_mask) == 0);
-	MOZ_ASSERT((alignment & pagesize_mask) == 0);
-
-	malloc_spin_lock(&arena->mLock);
-	ret = arena->AllocRun(nullptr, alloc_size, true, false);
-	if (!ret) {
-		malloc_spin_unlock(&arena->mLock);
-		return nullptr;
-	}
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
-
-	offset = (uintptr_t)ret & (alignment - 1);
-	MOZ_ASSERT((offset & pagesize_mask) == 0);
-	MOZ_ASSERT(offset < alloc_size);
-	if (offset == 0)
-		arena->TrimRunTail(chunk, (arena_run_t*)ret, alloc_size, size, false);
-	else {
-		size_t leadsize, trailsize;
-
-		leadsize = alignment - offset;
-		if (leadsize > 0) {
-			arena->TrimRunHead(chunk, (arena_run_t*)ret, alloc_size,
-			    alloc_size - leadsize);
-			ret = (void *)((uintptr_t)ret + leadsize);
-		}
-
-		trailsize = alloc_size - leadsize - size;
-		if (trailsize != 0) {
-			/* Trim trailing space. */
-			MOZ_ASSERT(trailsize < alloc_size);
-			arena->TrimRunTail(chunk, (arena_run_t*)ret, size + trailsize,
-			    size, false);
-		}
-	}
-
-	arena->mStats.allocated_large += size;
-	malloc_spin_unlock(&arena->mLock);
-
-	if (opt_junk)
-		memset(ret, kAllocJunk, size);
-	else if (opt_zero)
-		memset(ret, 0, size);
-	return (ret);
+  void* ret;
+  size_t offset;
+  arena_chunk_t* chunk;
+
+  MOZ_ASSERT((aSize & pagesize_mask) == 0);
+  MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
+
+  malloc_spin_lock(&mLock);
+  ret = AllocRun(nullptr, aAllocSize, true, false);
+  if (!ret) {
+    malloc_spin_unlock(&mLock);
+    return nullptr;
+  }
+
+  chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ret);
+
+  offset = uintptr_t(ret) & (aAlignment - 1);
+  MOZ_ASSERT((offset & pagesize_mask) == 0);
+  MOZ_ASSERT(offset < aAllocSize);
+  if (offset == 0) {
+    TrimRunTail(chunk, (arena_run_t*)ret, aAllocSize, aSize, false);
+  } else {
+    size_t leadsize, trailsize;
+
+    leadsize = aAlignment - offset;
+    if (leadsize > 0) {
+      TrimRunHead(chunk, (arena_run_t*)ret, aAllocSize, aAllocSize - leadsize);
+      ret = (void*)(uintptr_t(ret) + leadsize);
+    }
+
+    trailsize = aAllocSize - leadsize - aSize;
+    if (trailsize != 0) {
+      /* Trim trailing space. */
+      MOZ_ASSERT(trailsize < aAllocSize);
+      TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
+    }
+  }
+
+  mStats.allocated_large += aSize;
+  malloc_spin_unlock(&mLock);
+
+  if (opt_junk) {
+    memset(ret, kAllocJunk, aSize);
+  } else if (opt_zero) {
+    memset(ret, 0, aSize);
+  }
+  return ret;
 }
 
 static inline void *
 ipalloc(size_t alignment, size_t size)
 {
 	void *ret;
 	size_t ceil_size;
 
@@ -3481,17 +3484,17 @@ ipalloc(size_t alignment, size_t size)
 			 * the first conditional below to fail, which means
 			 * that the bogus run_size value never gets used for
 			 * anything important.
 			 */
 			run_size = (alignment << 1) - pagesize;
 		}
 
 		if (run_size <= arena_maxclass) {
-			ret = arena_palloc(choose_arena(size), alignment, ceil_size,
+			ret = choose_arena(size)->Palloc(alignment, ceil_size,
 			    run_size);
 		} else if (alignment <= chunksize)
 			ret = huge_malloc(ceil_size, false);
 		else
 			ret = huge_palloc(ceil_size, alignment, false);
 	}
 
 	MOZ_ASSERT(((uintptr_t)ret & (alignment - 1)) == 0);