bug 571332 - jemalloc - avoiding the null check in the free method for non-huge allocations. r=jasone
authorIgor Bukanov <igor@mir2.org>
Fri, 11 Jun 2010 16:22:14 +0200
changeset 47438 2e14a43ef3db669efe6d0ddee6417c6bc0a348e3
parent 47437 019cb92eb9b4e6df03787abc0e99a2a5997d61cb
child 47439 7ee77bc4bc8a39f4eec847a39ade34bbc70f100f
push id1
push userroot
push dateTue, 26 Apr 2011 22:38:44 +0000
treeherdermozilla-beta@bfdb6e623a36 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjasone
bugs571332
milestone1.9.3a5pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
bug 571332 - jemalloc - avoiding the null check in the free method for non-huge allocations. r=jasone
memory/jemalloc/jemalloc.c
--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -1,9 +1,9 @@
-/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8; indent-tabs-mode: t -*- */
 /* vim:set softtabstop=8 shiftwidth=8: */
 /*-
  * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
@@ -4202,50 +4202,55 @@ arena_dalloc_large(arena_t *arena, arena
 	arena->stats.ndalloc_large++;
 #endif
 
 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
 	malloc_spin_unlock(&arena->lock);
 }
 
 static inline void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc(void *ptr, size_t offset)
 {
+	arena_chunk_t *chunk;
+	arena_t *arena;
 	size_t pageind;
 	arena_chunk_map_t *mapelm;
 
+	assert(ptr != NULL);
+	assert(offset != 0);
+	assert(CHUNK_ADDR2OFFSET(ptr) == offset);
+
+	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
+	arena = chunk->arena;
 	assert(arena != NULL);
 	assert(arena->magic == ARENA_MAGIC);
-	assert(chunk->arena == arena);
-	assert(ptr != NULL);
-	assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
-	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+
+	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
 	assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
 		malloc_spin_lock(&arena->lock);
 		arena_dalloc_small(arena, chunk, ptr, mapelm);
 		malloc_spin_unlock(&arena->lock);
 	} else
 		arena_dalloc_large(arena, chunk, ptr);
 	VALGRIND_FREELIKE_BLOCK(ptr, 0);
 }
 
 static inline void
 idalloc(void *ptr)
 {
-	arena_chunk_t *chunk;
+	size_t offset;
 
 	assert(ptr != NULL);
 
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (chunk != ptr)
-		arena_dalloc(chunk->arena, chunk, ptr);
+	offset = CHUNK_ADDR2OFFSET(ptr);
+	if (offset != 0)
+		arena_dalloc(ptr, offset);
 	else
 		huge_dalloc(ptr);
 }
 
 static void
 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
     size_t size, size_t oldsize)
 {
@@ -6040,23 +6045,30 @@ RETURN:
 	UTRACE(ptr, size, ret);
 	return (ret);
 }
 
 ZONE_INLINE
 void
 free(void *ptr)
 {
-
+	size_t offset;
+	
 	UTRACE(ptr, 0, 0);
-	if (ptr != NULL) {
-		assert(malloc_initialized);
-
-		idalloc(ptr);
-	}
+
+	/*
+	 * A version of idalloc that checks for NULL pointer but only for
+	 * huge allocations assuming that CHUNK_ADDR2OFFSET(NULL) == 0.
+	 */
+	assert(CHUNK_ADDR2OFFSET(NULL) == 0);
+	offset = CHUNK_ADDR2OFFSET(ptr);
+	if (offset != 0)
+		arena_dalloc(ptr, offset);
+	else if (ptr != NULL)
+		huge_dalloc(ptr);
 }
 
 /*
  * End malloc(3)-compatible functions.
  */
 /******************************************************************************/
 /*
  * Begin non-standard functions.