bug 420678. make malloc_usable_size check for valid allocations. patch from Jason Evans <jasone@canonware.com> r=me a=beltzner
authorpavlov@pavlov.net
Sat, 12 Apr 2008 12:39:11 -0700
changeset 14259 3d22add30ecd69a536c2bcddd4bc94e4a2441c72
parent 14258 dd9998cb5b98da191fdcefd9c48340d40c23bb2b
child 14260 0f55f2dfbb5f21100443bd65c519e704fc44fe92
push id11
push userbsmedberg@mozilla.com
push dateTue, 15 Apr 2008 18:11:53 +0000
treeherdermozilla-central@40e4b99f0dea [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersme, beltzner
bugs420678
milestone1.9pre
bug 420678. make malloc_usable_size check for valid allocations. patch from Jason Evans <jasone@canonware.com> r=me a=beltzner
memory/jemalloc/jemalloc.c
--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -121,16 +121,23 @@
 
    /* Support optional abort() on OOM. */
 #  define MALLOC_XMALLOC
 
    /* Support SYSV semantics. */
 #  define MALLOC_SYSV
 #endif
 
+/*
+ * MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer
+ * validation.  There are many possible errors that validation does not even
+ * attempt to detect.
+ */
+#define MALLOC_VALIDATE
+
 /* Embed no-op macros that support memory allocation tracking via valgrind. */
 #ifdef MOZ_VALGRIND
 #  define MALLOC_VALGRIND
 #endif
 #ifdef MALLOC_VALGRIND
 #  include <valgrind/valgrind.h>
 #else
 #  define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
@@ -185,16 +192,17 @@
 #define	bool BOOL
 #define	false FALSE
 #define	true TRUE
 #define	inline __inline
 #define	SIZE_T_MAX SIZE_MAX
 #define	STDERR_FILENO 2
 #define	PATH_MAX MAX_PATH
 #define	vsnprintf _vsnprintf
+#define	alloca _alloca
 #define	assert(f) /* we can't assert in the CRT */
 
 static unsigned long tlsIndex = 0xffffffff;
 
 #define	__thread
 #define	_pthread_self() __threadid()
 #define	issetugid() 0
 
@@ -3924,16 +3932,105 @@ arena_salloc(const void *ptr)
 		assert(node != NULL);
 		ret = node->size;
 		malloc_spin_unlock(&arena->lock);
 	}
 
 	return (ret);
 }
 
+#if (defined(MALLOC_VALIDATE) || defined(MOZ_MEMORY_DARWIN))
+/*
+ * Validate ptr before assuming that it points to an allocation.  Currently,
+ * the following validation is performed:
+ *
+ * + Check that ptr is not NULL.
+ *
+ * + Check that ptr lies within a mapped chunk.
+ */
+static inline size_t
+isalloc_validate(const void *ptr)
+{
+	arena_chunk_t *chunk;
+
+	if (ptr == NULL)
+		return (0);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		arena_t *arena;
+		unsigned i;
+		arena_t **arenas_snapshot = alloca(narenas * sizeof(arena_t*));
+
+		if (narenas == 1) {
+			/*
+			 * Don't bother with the more expensive snapshotting
+			 * algorithm here, since there is only one arena, and
+			 * there are no race conditions that allow arenas[0] to
+			 * be stale on this processor under any conditions that
+			 * even remotely resemble normal program behavior.
+			 */
+			arenas_snapshot[0] = arenas[0];
+		} else {
+			/*
+			 * Make a copy of the arenas vector while holding
+			 * arenas_lock in order to assure that all elements are
+			 * up to date in this processor's cache.  Do this
+			 * outside the following loop in order to reduce lock
+			 * acquisitions.
+			 */
+			malloc_spin_lock(&arenas_lock);
+			memcpy(&arenas_snapshot, arenas, sizeof(arena_t *) *
+			    narenas);
+			malloc_spin_unlock(&arenas_lock);
+		}
+
+		/* Region. */
+		for (i = 0; i < narenas; i++) {
+			arena = arenas_snapshot[i];
+
+			if (arena != NULL) {
+				/* Make sure ptr is within a chunk. */
+				malloc_spin_lock(&arena->lock);
+				if (RB_FIND(arena_chunk_tree_s, &arena->chunks,
+				    chunk) == chunk) {
+					malloc_spin_unlock(&arena->lock);
+					/*
+					 * We only lock in arena_salloc() for
+					 * large objects, so don't worry about
+					 * the overhead of possibly locking
+					 * twice.
+					 */
+					assert(chunk->arena->magic ==
+					    ARENA_MAGIC);
+					return (arena_salloc(ptr));
+				}
+				malloc_spin_unlock(&arena->lock);
+			}
+		}
+		return (0);
+	} else {
+		size_t ret;
+		extent_node_t *node;
+		extent_node_t key;
+
+		/* Chunk. */
+		key.addr = (void *)chunk;
+		malloc_mutex_lock(&huge_mtx);
+		node = RB_FIND(extent_tree_ad_s, &huge, &key);
+		if (node != NULL)
+			ret = node->size;
+		else
+			ret = 0;
+		malloc_mutex_unlock(&huge_mtx);
+		return (ret);
+	}
+}
+#endif
+
 static inline size_t
 isalloc(const void *ptr)
 {
 	size_t ret;
 	arena_chunk_t *chunk;
 
 	assert(ptr != NULL);
 
@@ -5952,19 +6049,23 @@ VISIBLE
 inline size_t
 moz_malloc_usable_size(const void *ptr)
 #else
 size_t
 malloc_usable_size(const void *ptr)
 #endif
 {
 
+#ifdef MALLOC_VALIDATE
+	return (isalloc_validate(ptr));
+#else
 	assert(ptr != NULL);
 
 	return (isalloc(ptr));
+#endif
 }
 
 #ifdef MOZ_MEMORY_WINDOWS
 void*
 _recalloc(void *ptr, size_t count, size_t size)
 {
 	size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
 	size_t newsize = count * size;
@@ -6073,84 +6174,27 @@ void
 
 #ifdef MOZ_MEMORY_DARWIN
 static malloc_zone_t zone;
 static struct malloc_introspection_t zone_introspect;
 
 static size_t
 zone_size(malloc_zone_t *zone, void *ptr)
 {
-	size_t ret = 0;
-	arena_chunk_t *chunk;
 
 	/*
 	 * There appear to be places within Darwin (such as setenv(3)) that
 	 * cause calls to this function with pointers that *no* zone owns.  If
 	 * we knew that all pointers were owned by *some* zone, we could split
 	 * our zone into two parts, and use one as the default allocator and
 	 * the other as the default deallocator/reallocator.  Since that will
 	 * not work in practice, we must check all pointers to assure that they
 	 * reside within a mapped chunk before determining size.
 	 */
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (chunk != ptr) {
-		arena_t *arena;
-		unsigned i;
-		arena_t *arenas_snapshot[narenas];
-
-		/*
-		 * Make a copy of the arenas vector while holding arenas_lock in
-		 * order to assure that all elements are up to date in this
-		 * processor's cache.  Do this outside the following loop in
-		 * order to reduce lock acquisitions.
-		 */
-		malloc_spin_lock(&arenas_lock);
-		memcpy(&arenas_snapshot, arenas, sizeof(arena_t *) * narenas);
-		malloc_spin_unlock(&arenas_lock);
-
-		/* Region. */
-		for (i = 0; i < narenas; i++) {
-			arena = arenas_snapshot[i];
-
-			if (arena != NULL) {
-				bool own;
-
-				/* Make sure ptr is within a chunk. */
-				malloc_spin_lock(&arena->lock);
-				if (RB_FIND(arena_chunk_tree_s, &arena->chunks,
-				    chunk) == chunk)
-					own = true;
-				else
-					own = false;
-				malloc_spin_unlock(&arena->lock);
-
-				if (own) {
-					ret = arena_salloc(ptr);
-					goto RETURN;
-				}
-			}
-		}
-	} else {
-		extent_node_t *node;
-		extent_node_t key;
-
-		/* Chunk. */
-		key.addr = (void *)chunk;
-		malloc_mutex_lock(&huge_mtx);
-		node = RB_FIND(extent_tree_ad_s, &huge, &key);
-		if (node != NULL)
-			ret = node->size;
-		else
-			ret = 0;
-		malloc_mutex_unlock(&huge_mtx);
-	}
-
-RETURN:
-	return (ret);
+	return (isalloc_validate(ptr));
 }
 
 static void *
 zone_malloc(malloc_zone_t *zone, size_t size)
 {
 
 	return (moz_malloc(size));
 }