Bug 1057754 - Remove the bogus Valgrind annotations from mozjemalloc. r=glandium.
authorNicholas Nethercote <nnethercote@mozilla.com>
Mon, 25 Aug 2014 16:59:43 -0700
changeset 201616 775a14a8e1903322c425e051448925ccc13125a0
parent 201615 2e67ec1837324d864454d874f871b007fd96d9cc
child 201617 8d937c3db92bb26524c7817b6937eb7152d98b44
push id27375
push userryanvm@gmail.com
push dateTue, 26 Aug 2014 19:56:59 +0000
treeherdermozilla-central@f9bfe115fee5 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersglandium
bugs1057754
milestone34.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1057754 - Remove the bogus Valgrind annotations from mozjemalloc. r=glandium.
memory/mozjemalloc/jemalloc.c
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -188,25 +188,18 @@
 
 /*
  * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
  * validation.  There are many possible errors that validation does not even
  * attempt to detect.
  */
 #define MALLOC_VALIDATE
 
-/* Embed no-op macros that support memory allocation tracking via valgrind. */
 #ifdef MOZ_VALGRIND
-#  define MALLOC_VALGRIND
-#endif
-#ifdef MALLOC_VALGRIND
 #  include <valgrind/valgrind.h>
-#else
-#  define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
-#  define VALGRIND_FREELIKE_BLOCK(addr, rzB)
 #endif
 
 /*
  * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
  * re-balances arena load if exponentially averaged contention exceeds a
  * certain threshold.
  */
 /* #define	MALLOC_BALANCE */
@@ -2095,65 +2088,54 @@ base_alloc(size_t size)
 		base_next_decommitted = pbase_next_addr;
 #  ifdef MALLOC_STATS
 		base_committed += (uintptr_t)pbase_next_addr -
 		    (uintptr_t)base_next_decommitted;
 #  endif
 	}
 #endif
 	malloc_mutex_unlock(&base_mtx);
-	VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false);
 
 	return (ret);
 }
 
 static void *
 base_calloc(size_t number, size_t size)
 {
 	void *ret;
 
 	ret = base_alloc(number * size);
-#ifdef MALLOC_VALGRIND
-	if (ret != NULL) {
-		VALGRIND_FREELIKE_BLOCK(ret, 0);
-		VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, true);
-	}
-#endif
 	memset(ret, 0, number * size);
 
 	return (ret);
 }
 
 static extent_node_t *
 base_node_alloc(void)
 {
 	extent_node_t *ret;
 
 	malloc_mutex_lock(&base_mtx);
 	if (base_nodes != NULL) {
 		ret = base_nodes;
 		base_nodes = *(extent_node_t **)ret;
-		VALGRIND_FREELIKE_BLOCK(ret, 0);
-		VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(extent_node_t), 0, false);
 		malloc_mutex_unlock(&base_mtx);
 	} else {
 		malloc_mutex_unlock(&base_mtx);
 		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
 	}
 
 	return (ret);
 }
 
 static void
 base_node_dealloc(extent_node_t *node)
 {
 
 	malloc_mutex_lock(&base_mtx);
-	VALGRIND_FREELIKE_BLOCK(node, 0);
-	VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false);
 	*(extent_node_t **)node = base_nodes;
 	base_nodes = node;
 	malloc_mutex_unlock(&base_mtx);
 }
 
 /******************************************************************************/
 
 #ifdef MALLOC_STATS
@@ -3350,24 +3332,18 @@ arena_run_split(arena_t *arena, arena_ru
 #  endif
 
 #endif
 
 		/* Zero if necessary. */
 		if (zero) {
 			if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED)
 			    == 0) {
-				VALGRIND_MALLOCLIKE_BLOCK((void *)((uintptr_t)
-				    chunk + ((run_ind + i) << pagesize_2pow)),
-				    pagesize, 0, false);
 				memset((void *)((uintptr_t)chunk + ((run_ind
 				    + i) << pagesize_2pow)), 0, pagesize);
-				VALGRIND_FREELIKE_BLOCK((void *)((uintptr_t)
-				    chunk + ((run_ind + i) << pagesize_2pow)),
-				    0);
 				/* CHUNK_MAP_ZEROED is cleared below. */
 			}
 		}
 
 		/* Update dirty page accounting. */
 		if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) {
 			chunk->ndirty--;
 			arena->ndirty--;
@@ -3398,18 +3374,25 @@ arena_run_split(arena_t *arena, arena_ru
 }
 
 static void
 arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
 {
 	arena_run_t *run;
 	size_t i;
 
-	VALGRIND_MALLOCLIKE_BLOCK(chunk, (arena_chunk_header_npages <<
-	    pagesize_2pow), 0, false);
+#ifdef MOZ_VALGRIND
+	if (RUNNING_ON_VALGRIND) {
+		fprintf(stderr, "ERROR: Cannot run Valgrind with jemalloc enabled.\n");
+		fprintf(stderr, "Please build with --disable-jemalloc, or run Valgrind with\n");
+		fprintf(stderr, "--soname-synonyms=somalloc=NONE.\n");
+		jemalloc_crash();
+	}
+#endif
+
 #ifdef MALLOC_STATS
 	arena->stats.mapped += chunksize;
 #endif
 
 	chunk->arena = arena;
 
 	/*
 	 * Claim that no pages are in use, since the header is merely overhead.
@@ -3465,17 +3448,16 @@ arena_chunk_dealloc(arena_t *arena, aren
 #endif
 		}
 
 #ifdef MALLOC_DOUBLE_PURGE
 		/* This is safe to do even if arena->spare is not in the list. */
 		LinkedList_Remove(&arena->spare->chunks_madvised_elem);
 #endif
 
-		VALGRIND_FREELIKE_BLOCK(arena->spare, 0);
 		chunk_dealloc((void *)arena->spare, chunksize);
 #ifdef MALLOC_STATS
 		arena->stats.mapped -= chunksize;
 		arena->stats.committed -= arena_chunk_header_npages;
 #endif
 	}
 
 	/*
@@ -3828,19 +3810,16 @@ arena_bin_nonfull_run_get(arena_t *arena
 		return (NULL);
 	/*
 	 * Don't initialize if a race in arena_run_alloc() allowed an existing
 	 * run to become usable.
 	 */
 	if (run == bin->runcur)
 		return (run);
 
-	VALGRIND_MALLOCLIKE_BLOCK(run, sizeof(arena_run_t) + (sizeof(unsigned) *
-	    (bin->regs_mask_nelms - 1)), 0, false);
-
 	/* Initialize run internals. */
 	run->bin = bin;
 
 	for (i = 0; i < bin->regs_mask_nelms - 1; i++)
 		run->regs_mask[i] = UINT_MAX;
 	remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
 	if (remainder == 0)
 		run->regs_mask[i] = UINT_MAX;
@@ -4086,17 +4065,16 @@ arena_malloc_small(arena_t *arena, size_
 
 #ifdef MALLOC_STATS
 	bin->stats.nrequests++;
 	arena->stats.nmalloc_small++;
 	arena->stats.allocated_small += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
-	VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, zero);
 	if (zero == false) {
 #ifdef MALLOC_FILL
 		if (opt_junk)
 			memset(ret, 0xa5, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 #endif
 	} else
@@ -4123,17 +4101,16 @@ arena_malloc_large(arena_t *arena, size_
 		return (NULL);
 	}
 #ifdef MALLOC_STATS
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
-	VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, zero);
 	if (zero == false) {
 #ifdef MALLOC_FILL
 		if (opt_junk)
 			memset(ret, 0xa5, size);
 		else if (opt_zero)
 			memset(ret, 0, size);
 #endif
 	}
@@ -4227,17 +4204,16 @@ arena_palloc(arena_t *arena, size_t alig
 	}
 
 #ifdef MALLOC_STATS
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
-	VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false);
 #ifdef MALLOC_FILL
 	if (opt_junk)
 		memset(ret, 0xa5, size);
 	else if (opt_zero)
 		memset(ret, 0, size);
 #endif
 	return (ret);
 }
@@ -4478,17 +4454,16 @@ arena_dalloc_small(arena_t *arena, arena
 			 */
 			RELEASE_ASSERT(arena_run_tree_search(&bin->runs, run_mapelm) ==
 				run_mapelm);
 			arena_run_tree_remove(&bin->runs, run_mapelm);
 		}
 #if defined(MALLOC_DEBUG) || defined(MOZ_JEMALLOC_HARD_ASSERTS)
 		run->magic = 0;
 #endif
-		VALGRIND_FREELIKE_BLOCK(run, 0);
 		arena_run_dalloc(arena, run, true);
 #ifdef MALLOC_STATS
 		bin->stats.curruns--;
 #endif
 	} else if (run->nfree == 1 && run != bin->runcur) {
 		/*
 		 * Make sure that bin->runcur always refers to the lowest
 		 * non-full run, if one exists.
@@ -4586,17 +4561,16 @@ arena_dalloc(void *ptr, size_t offset)
 	RELEASE_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
 		malloc_spin_lock(&arena->lock);
 		arena_dalloc_small(arena, chunk, ptr, mapelm);
 		malloc_spin_unlock(&arena->lock);
 	} else
 		arena_dalloc_large(arena, chunk, ptr);
-	VALGRIND_FREELIKE_BLOCK(ptr, 0);
 }
 
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
 	assert(ptr != NULL);
@@ -4789,38 +4763,20 @@ iralloc(void *ptr, size_t size)
 {
 	size_t oldsize;
 
 	assert(ptr != NULL);
 	assert(size != 0);
 
 	oldsize = isalloc(ptr);
 
-#ifndef MALLOC_VALGRIND
 	if (size <= arena_maxclass)
 		return (arena_ralloc(ptr, size, oldsize));
 	else
 		return (huge_ralloc(ptr, size, oldsize));
-#else
-	/*
-	 * Valgrind does not provide a public interface for modifying an
-	 * existing allocation, so use malloc/memcpy/free instead.
-	 */
-	{
-		void *ret = imalloc(size);
-		if (ret != NULL) {
-			if (oldsize < size)
-			    memcpy(ret, ptr, oldsize);
-			else
-			    memcpy(ret, ptr, size);
-			idalloc(ptr);
-		}
-		return (ret);
-	}
-#endif
 }
 
 static bool
 arena_new(arena_t *arena)
 {
 	unsigned i;
 	arena_bin_t *bin;
 	size_t pow2_size, prev_run_size;
@@ -5000,22 +4956,16 @@ huge_malloc(size_t size, bool zero)
 #endif
 	malloc_mutex_unlock(&huge_mtx);
 
 #ifdef MALLOC_DECOMMIT
 	if (csize - psize > 0)
 		pages_decommit((void *)((uintptr_t)ret + psize), csize - psize);
 #endif
 
-#ifdef MALLOC_DECOMMIT
-	VALGRIND_MALLOCLIKE_BLOCK(ret, psize, 0, zero);
-#else
-	VALGRIND_MALLOCLIKE_BLOCK(ret, csize, 0, zero);
-#endif
-
 #ifdef MALLOC_FILL
 	if (zero == false) {
 		if (opt_junk)
 #  ifdef MALLOC_DECOMMIT
 			memset(ret, 0xa5, psize);
 #  else
 			memset(ret, 0xa5, csize);
 #  endif
@@ -5122,22 +5072,16 @@ huge_palloc(size_t alignment, size_t siz
 
 #ifdef MALLOC_DECOMMIT
 	if (chunk_size - psize > 0) {
 		pages_decommit((void *)((uintptr_t)ret + psize),
 		    chunk_size - psize);
 	}
 #endif
 
-#ifdef MALLOC_DECOMMIT
-	VALGRIND_MALLOCLIKE_BLOCK(ret, psize, 0, false);
-#else
-	VALGRIND_MALLOCLIKE_BLOCK(ret, chunk_size, 0, false);
-#endif
-
 #ifdef MALLOC_FILL
 	if (opt_junk)
 #  ifdef MALLOC_DECOMMIT
 		memset(ret, 0xa5, psize);
 #  else
 		memset(ret, 0xa5, chunk_size);
 #  endif
 	else if (opt_zero)
@@ -5270,17 +5214,16 @@ huge_dalloc(void *ptr)
 	huge_allocated -= node->size;
 	huge_mapped -= CHUNK_CEILING(node->size);
 #endif
 
 	malloc_mutex_unlock(&huge_mtx);
 
 	/* Unmap chunk. */
 	chunk_dealloc(node->addr, CHUNK_CEILING(node->size));
-	VALGRIND_FREELIKE_BLOCK(node->addr, 0);
 
 	base_node_dealloc(node);
 }
 
 #ifndef MOZ_MEMORY_NARENAS_DEFAULT_ONE
 #ifdef MOZ_MEMORY_BSD
 static inline unsigned
 malloc_ncpus(void)