Bug 1339441 - Take the arena lock earlier in arena_dalloc(). r=glandium
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Wed, 15 Feb 2017 09:48:00 -0500
changeset 343117 1f80ab6a90a07605e8cf27f3ba9f608e8d095105
parent 343116 7f7725dd2c08698040a8dc01ad963bceaa990b79
child 343118 e776663ecbb4df20c2ed612cbaf5d5e2808ab6bb
push id31369
push userkwierso@gmail.com
push dateThu, 16 Feb 2017 00:18:40 +0000
treeherdermozilla-central@e9b926463f9e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs1339441
milestone54.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1339441 - Take the arena lock earlier in arena_dalloc(). r=glandium
memory/mozjemalloc/jemalloc.c
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -4657,19 +4657,16 @@ arena_dalloc_small(arena_t *arena, arena
 	arena->stats.allocated_small -= size;
 	arena->stats.ndalloc_small++;
 #endif
 }
 
 static void
 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
 {
-	/* Large allocation. */
-	malloc_spin_lock(&arena->lock);
-
 #ifdef MALLOC_FILL
 #ifndef MALLOC_STATS
 	if (opt_poison)
 #endif
 #endif
 	{
 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 		    pagesize_2pow;
@@ -4685,17 +4682,16 @@ arena_dalloc_large(arena_t *arena, arena
 		arena->stats.allocated_large -= size;
 #endif
 	}
 #ifdef MALLOC_STATS
 	arena->stats.ndalloc_large++;
 #endif
 
 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
-	malloc_spin_unlock(&arena->lock);
 }
 
 static inline void
 arena_dalloc(void *ptr, size_t offset)
 {
 	arena_chunk_t *chunk;
 	arena_t *arena;
 	size_t pageind;
@@ -4705,26 +4701,28 @@ arena_dalloc(void *ptr, size_t offset)
 	assert(offset != 0);
 	assert(CHUNK_ADDR2OFFSET(ptr) == offset);
 
 	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
 	arena = chunk->arena;
 	assert(arena != NULL);
 	RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
 
+	malloc_spin_lock(&arena->lock);
 	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
 	RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
-		malloc_spin_lock(&arena->lock);
 		arena_dalloc_small(arena, chunk, ptr, mapelm);
-		malloc_spin_unlock(&arena->lock);
-	} else
+	} else {
+		/* Large allocation. */
 		arena_dalloc_large(arena, chunk, ptr);
+	}
+	malloc_spin_unlock(&arena->lock);
 }
 
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
 	assert(ptr != NULL);
@@ -4762,25 +4760,25 @@ arena_ralloc_large_shrink(arena_t *arena
 
 static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = oldsize >> pagesize_2pow;
 
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
 	RELEASE_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
 
 	/* Try to extend the run. */
 	assert(size > oldsize);
-#ifdef MALLOC_BALANCE
-	arena_lock_balance(arena);
-#else
-	malloc_spin_lock(&arena->lock);
-#endif
 	if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits
 	    & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits &
 	    ~pagesize_mask) >= size - oldsize) {
 		/*
 		 * The next run is available and sufficiently large.  Split the
 		 * following run, then merge the first part with the existing
 		 * allocation.
 		 */