Bug 1401099 - Move arena_malloc to a method of arena_t. r?njn draft
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 19:20:09 +0900
changeset 667416 ce6aabbf7259c35c98f837efc55645dfd9ebbcfc
parent 667415 d1559626fda1d84931329a05eb24abcb297b2ed2
child 667417 2b376f821475ce4d0bd6070327a8ee06cc361c20
push id80694
push userbmo:mh+mozilla@glandium.org
push dateWed, 20 Sep 2017 02:07:21 +0000
reviewersnjn
bugs1401099
milestone57.0a1
Bug 1401099 - Move arena_malloc to a method of arena_t. r?njn
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -793,21 +793,23 @@ public:
 
 private:
   inline void* MallocBinEasy(arena_bin_t* aBin, arena_run_t* aRun);
 
   void* MallocBinHard(arena_bin_t* aBin);
 
   arena_run_t* GetNonFullBinRun(arena_bin_t* aBin);
 
-public:
   inline void* MallocSmall(size_t aSize, bool aZero);
 
   void* MallocLarge(size_t aSize, bool aZero);
 
+public:
+  inline void* Malloc(size_t aSize, bool aZero);
+
   void Purge(bool aAll);
 
   void HardPurge();
 };
 
 /******************************************************************************/
 /*
  * Data.
@@ -3309,49 +3311,45 @@ arena_t::MallocLarge(size_t aSize, bool 
     } else if (opt_zero) {
       memset(ret, 0, aSize);
     }
   }
 
   return (ret);
 }
 
-static inline void *
-arena_malloc(arena_t *arena, size_t size, bool zero)
+void*
+arena_t::Malloc(size_t aSize, bool aZero)
 {
-
-	MOZ_ASSERT(arena);
-	MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
-	MOZ_ASSERT(size != 0);
-	MOZ_ASSERT(QUANTUM_CEILING(size) <= arena_maxclass);
-
-	if (size <= bin_maxclass) {
-		return arena->MallocSmall(size, zero);
-	} else
-		return arena->MallocLarge(size, zero);
+  MOZ_DIAGNOSTIC_ASSERT(mMagic == ARENA_MAGIC);
+  MOZ_ASSERT(aSize != 0);
+  MOZ_ASSERT(QUANTUM_CEILING(aSize) <= arena_maxclass);
+
+  return (aSize <= bin_maxclass) ? MallocSmall(aSize, aZero)
+                                 : MallocLarge(aSize, aZero);
 }
 
 static inline void *
 imalloc(size_t size)
 {
 
 	MOZ_ASSERT(size != 0);
 
 	if (size <= arena_maxclass)
-		return (arena_malloc(choose_arena(size), size, false));
+		return choose_arena(size)->Malloc(size, false);
 	else
 		return (huge_malloc(size, false));
 }
 
 static inline void *
 icalloc(size_t size)
 {
 
 	if (size <= arena_maxclass)
-		return (arena_malloc(choose_arena(size), size, true));
+		return choose_arena(size)->Malloc(size, true);
 	else
 		return (huge_malloc(size, true));
 }
 
 /* Only handles large allocations that require more than page alignment. */
 static void *
 arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
 {
@@ -3436,17 +3434,17 @@ ipalloc(size_t alignment, size_t size)
 	 */
 	if (ceil_size < size) {
 		/* size_t overflow. */
 		return nullptr;
 	}
 
 	if (ceil_size <= pagesize || (alignment <= pagesize
 	    && ceil_size <= arena_maxclass))
-		ret = arena_malloc(choose_arena(size), ceil_size, false);
+		ret = choose_arena(size)->Malloc(ceil_size, false);
 	else {
 		size_t run_size;
 
 		/*
 		 * We can't achieve sub-page alignment, so round up alignment
 		 * permanently; it makes later calculations simpler.
 		 */
 		alignment = PAGE_CEILING(alignment);
@@ -3999,21 +3997,21 @@ arena_ralloc(void *ptr, size_t size, siz
 			return (ptr);
 	}
 
 	/*
 	 * If we get here, then size and oldsize are different enough that we
 	 * need to move the object.  In that case, fall back to allocating new
 	 * space and copying.
 	 */
-	ret = arena_malloc(choose_arena(size), size, false);
+	ret = choose_arena(size)->Malloc(size, false);
 	if (!ret)
 		return nullptr;
 
-	/* Junk/zero-filling were already done by arena_malloc(). */
+	/* Junk/zero-filling were already done by arena_t::Malloc(). */
 	copysize = (size < oldsize) ? size : oldsize;
 #ifdef VM_COPY_MIN
 	if (copysize >= VM_COPY_MIN)
 		pages_copy(ret, ptr, copysize);
 	else
 #endif
 		memcpy(ret, ptr, copysize);
 	idalloc(ptr);