Bug 766250 - Part 1: Enable more release-time jemalloc assertions. r=glandium
authorJustin Lebar <justin.lebar@gmail.com>
Wed, 20 Jun 2012 01:22:40 -0400
changeset 101898 cc36f28dad83f9b3e3a81bf90142a89d320af050
parent 101897 453c74176a355451263c827ad160f1369236e1db
child 101899 4a44ebe3e8ffd8e1e4d618669d324f486ebaee17
push idunknown
push userunknown
push dateunknown
reviewersglandium
bugs766250
milestone16.0a1
Bug 766250 - Part 1: Enable more release-time jemalloc assertions. r=glandium
memory/mozjemalloc/jemalloc.c
--- a/memory/mozjemalloc/jemalloc.c
+++ b/memory/mozjemalloc/jemalloc.c
@@ -2473,17 +2473,17 @@ malloc_rtree_new(unsigned bits)
 	malloc_rtree_t *ret;
 	unsigned bits_per_level, height, i;
 
 	bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE /
 	    sizeof(void *)))) - 1;
 	height = bits / bits_per_level;
 	if (height * bits_per_level != bits)
 		height++;
-	assert(height * bits_per_level >= bits);
+	RELEASE_ASSERT(height * bits_per_level >= bits);
 
 	ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) +
 	    (sizeof(unsigned) * (height - 1)));
 	if (ret == NULL)
 		return (NULL);
 
 	malloc_spin_init(&ret->lock);
 	ret->height = height;
@@ -2983,17 +2983,17 @@ choose_arena(void)
 #  ifdef MOZ_MEMORY_WINDOWS
 	ret = (arena_t*)TlsGetValue(tlsIndex);
 #  else
 	ret = arenas_map;
 #  endif
 
 	if (ret == NULL) {
 		ret = choose_arena_hard();
-		assert(ret != NULL);
+		RELEASE_ASSERT(ret != NULL);
 	}
 #else
 	if (isthreaded && narenas > 1) {
 		unsigned long ind;
 
 		/*
 		 * Hash _pthread_self() to one of the arenas.  There is a prime
 		 * number of arenas, so this has a reasonable chance of
@@ -3029,17 +3029,17 @@ choose_arena(void)
 			else
 				ret = arenas[ind];
 			malloc_spin_unlock(&arenas_lock);
 		}
 	} else
 		ret = arenas[0];
 #endif
 
-	assert(ret != NULL);
+	RELEASE_ASSERT(ret != NULL);
 	return (ret);
 }
 
 #ifndef NO_TLS
 /*
  * Choose an arena based on a per-thread value (slow-path code only, called
  * only by choose_arena()).
  */
@@ -3595,37 +3595,37 @@ arena_purge(arena_t *arena)
 #ifdef MALLOC_DEBUG
 	size_t ndirty = 0;
 	rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
 	    chunk) {
 		ndirty += chunk->ndirty;
 	} rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
 	assert(ndirty == arena->ndirty);
 #endif
-	assert(arena->ndirty > opt_dirty_max);
+	RELEASE_ASSERT(arena->ndirty > opt_dirty_max);
 
 #ifdef MALLOC_STATS
 	arena->stats.npurge++;
 #endif
 
 	/*
 	 * Iterate downward through chunks until enough dirty memory has been
 	 * purged.  Terminate as soon as possible in order to minimize the
 	 * number of system calls, even if a chunk has only been partially
 	 * purged.
 	 */
 	while (arena->ndirty > (opt_dirty_max >> 1)) {
 #ifdef MALLOC_DOUBLE_PURGE
 		bool madvised = false;
 #endif
 		chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
-		assert(chunk != NULL);
+		RELEASE_ASSERT(chunk != NULL);
 
 		for (i = chunk_npages - 1; chunk->ndirty > 0; i--) {
-			assert(i >= arena_chunk_header_npages);
+			RELEASE_ASSERT(i >= arena_chunk_header_npages);
 
 			if (chunk->map[i].bits & CHUNK_MAP_DIRTY) {
 #ifdef MALLOC_DECOMMIT
 				const size_t free_operation = CHUNK_MAP_DECOMMITTED;
 #else
 				const size_t free_operation = CHUNK_MAP_MADVISED;
 #endif
 				assert((chunk->map[i].bits &
@@ -4391,24 +4391,24 @@ arena_salloc(const void *ptr)
 	size_t pageind, mapbits;
 
 	assert(ptr != NULL);
 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
 	mapbits = chunk->map[pageind].bits;
-	assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
+	RELEASE_ASSERT((mapbits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapbits & CHUNK_MAP_LARGE) == 0) {
 		arena_run_t *run = (arena_run_t *)(mapbits & ~pagesize_mask);
-		assert(run->magic == ARENA_RUN_MAGIC);
+		RELEASE_ASSERT(run->magic == ARENA_RUN_MAGIC);
 		ret = run->bin->reg_size;
 	} else {
 		ret = mapbits & ~pagesize_mask;
-		assert(ret != 0);
+		RELEASE_ASSERT(ret != 0);
 	}
 
 	return (ret);
 }
 
 #if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
 /*
  * Validate ptr before assuming that it points to an allocation.  Currently,
@@ -4426,17 +4426,17 @@ isalloc_validate(const void *ptr)
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	if (chunk == NULL)
 		return (0);
 
 	if (malloc_rtree_get(chunk_rtree, (uintptr_t)chunk) == NULL)
 		return (0);
 
 	if (chunk != ptr) {
-		assert(chunk->arena->magic == ARENA_MAGIC);
+		RELEASE_ASSERT(chunk->arena->magic == ARENA_MAGIC);
 		return (arena_salloc(ptr));
 	} else {
 		size_t ret;
 		extent_node_t *node;
 		extent_node_t key;
 
 		/* Chunk. */
 		key.addr = (void *)chunk;
@@ -4471,17 +4471,17 @@ isalloc(const void *ptr)
 
 		/* Chunk (huge allocation). */
 
 		malloc_mutex_lock(&huge_mtx);
 
 		/* Extract from tree of huge allocations. */
 		key.addr = __DECONST(void *, ptr);
 		node = extent_tree_ad_search(&huge, &key);
-		assert(node != NULL);
+		RELEASE_ASSERT(node != NULL);
 
 		ret = node->size;
 
 		malloc_mutex_unlock(&huge_mtx);
 	}
 
 	return (ret);
 }
@@ -4684,17 +4684,17 @@ arena_ralloc_large_shrink(arena_t *arena
 
 static bool
 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow;
 	size_t npages = oldsize >> pagesize_2pow;
 
-	assert(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
+	RELEASE_ASSERT(oldsize == (chunk->map[pageind].bits & ~pagesize_mask));
 
 	/* Try to extend the run. */
 	assert(size > oldsize);
 #ifdef MALLOC_BALANCE
 	arena_lock_balance(arena);
 #else
 	malloc_spin_lock(&arena->lock);
 #endif
@@ -4746,17 +4746,17 @@ arena_ralloc_large(void *ptr, size_t siz
 #endif
 		return (false);
 	} else {
 		arena_chunk_t *chunk;
 		arena_t *arena;
 
 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 		arena = chunk->arena;
-		assert(arena->magic == ARENA_MAGIC);
+		RELEASE_ASSERT(arena->magic == ARENA_MAGIC);
 
 		if (psize < oldsize) {
 #ifdef MALLOC_FILL
 			/* Fill before shrinking in order avoid a race. */
 			if (opt_junk) {
 				memset((void *)((uintptr_t)ptr + size), 0x5a,
 				    oldsize - size);
 			}
@@ -6741,17 +6741,17 @@ hard_purge_chunk(arena_chunk_t *chunk)
 	for (i = arena_chunk_header_npages; i < chunk_npages; i++) {
 		/* Find all adjacent pages with CHUNK_MAP_MADVISED set. */
 		size_t npages;
 		for (npages = 0;
 		     chunk->map[i + npages].bits & CHUNK_MAP_MADVISED && i + npages < chunk_npages;
 		     npages++) {
 			/* Turn off the chunk's MADV_FREED bit and turn on its
 			 * DECOMMITTED bit. */
-			assert(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
+			RELEASE_ASSERT(!(chunk->map[i + npages].bits & CHUNK_MAP_DECOMMITTED));
 			chunk->map[i + npages].bits ^= CHUNK_MAP_MADVISED_OR_DECOMMITTED;
 		}
 
 		/* We could use mincore to find out which pages are actually
 		 * present, but it's not clear that's better. */
 		if (npages > 0) {
 			pages_decommit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);
 			pages_commit(((char*)chunk) + (i << pagesize_2pow), npages << pagesize_2pow);