Bug 1400146 - Gracefully handle the allocator not being initialized in isalloc_validate. r=njn
authorMike Hommey <mh+mozilla@glandium.org>
Fri, 15 Sep 2017 15:13:52 +0900
changeset 665564 013516394a9cd0ccfed501b78f796cfdf877654d
parent 665563 7f7d9a165b2fd988d4c8153352e0301dc9f364c5
child 665565 50770364778192f030136c3ac91528fbe994df28
push id80115
push userbmo:eoger@fastmail.com
push dateFri, 15 Sep 2017 18:29:01 +0000
reviewersnjn
bugs1400146, 1399921
milestone57.0a1
Bug 1400146 - Gracefully handle the allocator not being initialized in isalloc_validate. r=njn isalloc_validate is the function behind malloc_usable_size. If for some reason malloc_usable_size is called before mozjemalloc is initialized, this can lead to an unexpected crash. The chance of this actually happening is rather slim on Linux and Windows (although still possible), and impossible on Mac, due to the fact the earlier something can end up calling it is after the mozjemalloc zone is registered, which happens after initialization. ... except with bug 1399921, which reorders that initialization, and puts the zone registration first. There's then a slim chance for the zone allocator to call into zone_size, which calls malloc_usable_size, to determine whether a pointer allocated by some other zone belongs to mozjemalloc's. And it turns out that does happen, during the startup of the plugin-container process on OSX 10.10 (but not more recent versions).
memory/build/mozjemalloc.cpp
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -3505,46 +3505,51 @@ arena_salloc(const void *ptr)
  * Validate ptr before assuming that it points to an allocation.  Currently,
  * the following validation is performed:
  *
  * + Check that ptr is not nullptr.
  *
  * + Check that ptr lies within a mapped chunk.
  */
 static inline size_t
-isalloc_validate(const void *ptr)
+isalloc_validate(const void* ptr)
 {
-	arena_chunk_t *chunk;
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (!chunk)
-		return (0);
-
-	if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk))
-		return (0);
-
-	if (chunk != ptr) {
-		MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
-		return (arena_salloc(ptr));
-	} else {
-		size_t ret;
-		extent_node_t *node;
-		extent_node_t key;
-
-		/* Chunk. */
-		key.addr = (void *)chunk;
-		malloc_mutex_lock(&huge_mtx);
-		node = extent_tree_ad_search(&huge, &key);
-		if (node)
-			ret = node->size;
-		else
-			ret = 0;
-		malloc_mutex_unlock(&huge_mtx);
-		return (ret);
-	}
+  /* If the allocator is not initialized, the pointer can't belong to it. */
+  if (malloc_initialized == false) {
+    return 0;
+  }
+
+  arena_chunk_t* chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ptr);
+  if (!chunk) {
+    return 0;
+  }
+
+  if (!malloc_rtree_get(chunk_rtree, (uintptr_t)chunk)) {
+    return 0;
+  }
+
+  if (chunk != ptr) {
+    MOZ_DIAGNOSTIC_ASSERT(chunk->arena->magic == ARENA_MAGIC);
+    return arena_salloc(ptr);
+  } else {
+    size_t ret;
+    extent_node_t* node;
+    extent_node_t key;
+
+    /* Chunk. */
+    key.addr = (void*)chunk;
+    malloc_mutex_lock(&huge_mtx);
+    node = extent_tree_ad_search(&huge, &key);
+    if (node)
+      ret = node->size;
+    else
+      ret = 0;
+    malloc_mutex_unlock(&huge_mtx);
+    return ret;
+  }
 }
 
 static inline size_t
 isalloc(const void *ptr)
 {
 	size_t ret;
 	arena_chunk_t *chunk;