Bug 1044077 - Tweak some jemalloc constants. r=glandium a=ritu
authorNicholas Nethercote <nnethercote@mozilla.com>
Tue, 14 Jul 2015 20:35:37 -0700
changeset 289172 a8459b40a444ae016bbf8b3729b9acca3c89861e
parent 289171 22c0d857e4c4ec624a4a1a90828952362e903d77
child 289173 2bf9fc66f663e4659a4af83a157fdf28090bba88
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium, ritu
bugs1044077
milestone42.0a2
Bug 1044077 - Tweak some jemalloc constants. r=glandium a=ritu
memory/mozjemalloc/jemalloc.c
memory/mozjemalloc/jemalloc_types.h
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -4222,17 +4222,17 @@ arena_malloc_small(arena_t *arena, size_
 	arena->stats.nmalloc_small++;
 	arena->stats.allocated_small += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
 	if (zero == false) {
 #ifdef MALLOC_FILL
 		if (opt_junk)
-			memset(ret, 0xa5, size);
+			memset(ret, 0xe4, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 #endif
 	} else
 		memset(ret, 0, size);
 
 	return (ret);
 }
@@ -4258,17 +4258,17 @@ arena_malloc_large(arena_t *arena, size_
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
 	if (zero == false) {
 #ifdef MALLOC_FILL
 		if (opt_junk)
-			memset(ret, 0xa5, size);
+			memset(ret, 0xe4, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 #endif
 	}
 
 	return (ret);
 }
 
@@ -4360,17 +4360,17 @@ arena_palloc(arena_t *arena, size_t alig
 #ifdef MALLOC_STATS
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
 #ifdef MALLOC_FILL
 	if (opt_junk)
-		memset(ret, 0xa5, size);
+		memset(ret, 0xe4, size);
 	else if (opt_zero)
 		memset(ret, 0, size);
 #endif
 	return (ret);
 }
 
 static inline void *
 ipalloc(size_t alignment, size_t size)
@@ -4581,17 +4581,17 @@ arena_dalloc_small(arena_t *arena, arena
 
 	run = (arena_run_t *)(mapelm->bits & ~pagesize_mask);
 	RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
 	bin = run->bin;
 	size = bin->reg_size;
 
 #ifdef MALLOC_FILL
 	if (opt_poison)
-		memset(ptr, 0x5a, size);
+		memset(ptr, 0xe5, size);
 #endif
 
 	arena_run_reg_dalloc(run, bin, ptr, size);
 	run->nfree++;
 
 	if (run->nfree == bin->nregs) {
 		/* Deallocate run. */
 		if (run == bin->runcur)
@@ -4674,17 +4674,17 @@ arena_dalloc_large(arena_t *arena, arena
 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
 		    pagesize_2pow;
 		size_t size = chunk->map[pageind].bits & ~pagesize_mask;
 
 #ifdef MALLOC_FILL
 #ifdef MALLOC_STATS
 		if (opt_poison)
 #endif
-			memset(ptr, 0x5a, size);
+			memset(ptr, 0xe5, size);
 #endif
 #ifdef MALLOC_STATS
 		arena->stats.allocated_large -= size;
 #endif
 	}
 #ifdef MALLOC_STATS
 	arena->stats.ndalloc_large++;
 #endif
@@ -4813,34 +4813,34 @@ arena_ralloc_large(void *ptr, size_t siz
 {
 	size_t psize;
 
 	psize = PAGE_CEILING(size);
 	if (psize == oldsize) {
 		/* Same size class. */
 #ifdef MALLOC_FILL
 		if (opt_poison && size < oldsize) {
-			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
+			memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize -
 			    size);
 		}
 #endif
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
 		arena_t *arena;
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
 		RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 #ifdef MALLOC_FILL
 			/* Fill before shrinking in order avoid a race. */
 			if (opt_poison) {
-				memset((void *)((uintptr_t)ptr + size), 0x5a,
+				memset((void *)((uintptr_t)ptr + size), 0xe5,
 				    oldsize - size);
 			}
 #endif
 			arena_ralloc_large_shrink(arena, chunk, ptr, psize,
 			    oldsize);
 			return (false);
 		} else {
 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
@@ -4900,17 +4900,17 @@ arena_ralloc(void *ptr, size_t size, siz
 	else
 #endif
 		memcpy(ret, ptr, copysize);
 	idalloc(ptr);
 	return (ret);
 IN_PLACE:
 #ifdef MALLOC_FILL
 	if (opt_poison && size < oldsize)
-		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
+		memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize - size);
 	else if (opt_zero && size > oldsize)
 		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
 #endif
 	return (ptr);
 }
 
 static inline void *
 iralloc(void *ptr, size_t size)
@@ -5120,19 +5120,19 @@ huge_palloc(size_t size, size_t alignmen
 	if (csize - psize > 0)
 		pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
 #endif
 
 #ifdef MALLOC_FILL
 	if (zero == false) {
 		if (opt_junk)
 #  ifdef MALLOC_DECOMMIT
-			memset(ret, 0xa5, psize);
+			memset(ret, 0xe4, psize);
 #  else
-			memset(ret, 0xa5, csize);
+			memset(ret, 0xe4, csize);
 #  endif
 		else if (opt_zero)
 #  ifdef MALLOC_DECOMMIT
 			memset(ret, 0, psize);
 #  else
 			memset(ret, 0, csize);
 #  endif
 	}
@@ -5149,17 +5149,17 @@ huge_ralloc(void *ptr, size_t size, size
 
 	/* Avoid moving the allocation if the size class would not change. */
 
 	if (oldsize > arena_maxclass &&
 	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
 		size_t psize = PAGE_CEILING(size);
 #ifdef MALLOC_FILL
 		if (opt_poison && size < oldsize) {
-			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
+			memset((void *)((uintptr_t)ptr + size), 0xe5, oldsize
 			    - size);
 		}
 #endif
 #ifdef MALLOC_DECOMMIT
 		if (psize < oldsize) {
 			extent_node_t *node, key;
 
 			pages_decommit((void *)((uintptr_t)ptr + psize),
--- a/memory/mozjemalloc/jemalloc_types.h
+++ b/memory/mozjemalloc/jemalloc_types.h
@@ -50,18 +50,18 @@ typedef unsigned char jemalloc_bool;
  * sure that the compiled results of jemalloc.c are in sync with this header
  * file.
  */
 typedef struct {
 	/*
 	 * Run-time configuration settings.
 	 */
 	jemalloc_bool	opt_abort;	/* abort(3) on error? */
-	jemalloc_bool	opt_junk;	/* Fill allocated memory with 0xa5/0x5a? */
-	jemalloc_bool	opt_poison;	/* Fill free memory with 0xa5/0x5a? */
+	jemalloc_bool	opt_junk;	/* Fill allocated memory with 0xe4? */
+	jemalloc_bool	opt_poison;	/* Fill free memory with 0xe5? */
 	jemalloc_bool	opt_utrace;	/* Trace all allocation events? */
 	jemalloc_bool	opt_sysv;	/* SysV semantics? */
 	jemalloc_bool	opt_xmalloc;	/* abort(3) on OOM? */
 	jemalloc_bool	opt_zero;	/* Fill allocated memory with 0x0? */
 	size_t	narenas;	/* Number of arenas. */
 	size_t	balance_threshold; /* Arena contention rebalance threshold. */
 	size_t	quantum;	/* Allocation quantum. */
 	size_t	small_max;	/* Max quantum-spaced allocation size. */