Bug 431743: Upstream jemalloc fixes, r=benjamin
Incorporate upstream jemalloc fixes from FreeBSD:
* Detect overflow for huge allocations via sbrk(2).
* Fix deadlock for base (internal) allocations in OOM case.
* Fix bitmap vector initialization for small allocation runs.
--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -258,17 +258,17 @@ typedef unsigned long long uintmax_t;
#ifndef MOZ_MEMORY_WINDOWS
#ifndef MOZ_MEMORY_SOLARIS
#include <sys/cdefs.h>
#endif
#ifndef __DECONST
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
#ifndef MOZ_MEMORY
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.162 2008/02/06 02:59:54 jasone Exp $");
+__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 179704 2008-06-10 15:46:18Z jasone $");
#include "libc_private.h"
#ifdef MALLOC_DEBUG
# define _LOCK_DEBUG
#endif
#include "spinlock.h"
#include "namespace.h"
#endif
#include <sys/mman.h>
@@ -1753,18 +1753,20 @@ base_alloc(size_t size)
size_t csize;
/* Round size up to nearest multiple of the cacheline size. */
csize = CACHELINE_CEILING(size);
malloc_mutex_lock(&base_mtx);
/* Make sure there's enough space for the allocation. */
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize))
+ if (base_pages_alloc(csize)) {
+ malloc_mutex_unlock(&base_mtx);
return (NULL);
+ }
}
/* Allocate. */
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
#ifdef MALLOC_DECOMMIT
/* Make sure enough pages are committed for the new allocation. */
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) {
void *pbase_next_addr =
@@ -2123,16 +2125,23 @@ pages_unmap(void *addr, size_t size)
}
#endif
#ifdef MALLOC_DSS
static void *
chunk_alloc_dss(size_t size)
{
+ /*
+ * sbrk() uses a signed increment argument, so take care not to
+ * interpret a huge allocation request as a negative increment.
+ */
+ if ((intptr_t)size < 0)
+ return (NULL);
+
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
intptr_t incr;
/*
* The loop is necessary to recover from races with other
* threads that are using the DSS for something other than
* malloc.
@@ -3394,20 +3403,22 @@ arena_bin_nonfull_run_get(arena_t *arena
return (NULL);
VALGRIND_MALLOCLIKE_BLOCK(run, sizeof(arena_run_t) + (sizeof(unsigned) *
bin->regs_mask_nelms - 1), 0, false);
/* Initialize run internals. */
run->bin = bin;
- for (i = 0; i < bin->regs_mask_nelms; i++)
+ for (i = 0; i < bin->regs_mask_nelms - 1; i++)
run->regs_mask[i] = UINT_MAX;
remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
- if (remainder != 0) {
+ if (remainder == 0)
+ run->regs_mask[i] = UINT_MAX;
+ else {
/* The last element has spare bits that need to be unset. */
run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
- remainder));
}
run->regs_minelm = 0;
run->nfree = bin->nregs;