--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -364,34 +364,34 @@ typedef long ssize_t;
#include "un-namespace.h"
#endif
#endif
#include "jemalloc.h"
#undef bool
-#define bool jemalloc_bool
+#ifdef _MSC_VER
+#define bool BOOL
+#endif
#ifdef MOZ_MEMORY_DARWIN
static const bool __isthreaded = true;
#endif
#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
#define JEMALLOC_USES_MAP_ALIGN /* Required on Solaris 10. Might improve performance elsewhere. */
#endif
#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
#define JEMALLOC_USES_MAP_ALIGN /* Required for Windows CE < 6 */
#endif
#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
-#include "qr.h"
-#include "ql.h"
#ifdef MOZ_MEMORY_WINDOWS
/* MSVC++ does not support C99 variable-length arrays. */
# define RB_NO_C99_VARARRAYS
#endif
#include "rb.h"
#ifdef MALLOC_DEBUG
/* Disable inlining to make debugging easier. */
@@ -485,28 +485,16 @@ static const bool __isthreaded = true;
#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
#define CHUNK_2POW_DEFAULT 21
#else
#define CHUNK_2POW_DEFAULT 20
#endif
/* Maximum number of dirty pages per arena. */
#define DIRTY_MAX_DEFAULT (1U << 10)
-/* Default reserve chunks. */
-#define RESERVE_MIN_2POW_DEFAULT 1
-/*
- * Default range (in chunks) between reserve_min and reserve_max, in addition
- * to the mandatory one chunk per arena.
- */
-#ifdef MALLOC_PAGEFILE
-# define RESERVE_RANGE_2POW_DEFAULT 5
-#else
-# define RESERVE_RANGE_2POW_DEFAULT 0
-#endif
-
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing,
* so over-estimates are okay (up to a point), but under-estimates will
* negatively affect performance.
*/
#define CACHELINE_2POW 6
#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
@@ -761,40 +749,16 @@ struct malloc_rtree_s {
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
};
#endif
/******************************************************************************/
/*
- * Reserve data structures.
- */
-
-/* Callback registration. */
-typedef struct reserve_reg_s reserve_reg_t;
-struct reserve_reg_s {
- /* Linkage for list of all registered callbacks. */
- ql_elm(reserve_reg_t) link;
-
- /* Callback function pointer. */
- reserve_cb_t *cb;
-
- /* Opaque application data pointer. */
- void *ctx;
-
- /*
- * Sequence number of condition notification most recently sent to this
- * callback.
- */
- uint64_t seq;
-};
-
-/******************************************************************************/
-/*
* Arena data structures.
*/
typedef struct arena_s arena_t;
typedef struct arena_bin_s arena_bin_t;
/* Each element of the chunk map corresponds to one page within the chunk. */
typedef struct arena_chunk_map_s arena_chunk_map_t;
@@ -948,22 +912,16 @@ struct arena_s {
#else
pthread_mutex_t lock;
#endif
#ifdef MALLOC_STATS
arena_stats_t stats;
#endif
- /*
- * Chunk allocation sequence number, used to detect races with other
- * threads during chunk allocation, and then discard unnecessary chunks.
- */
- uint64_t chunk_seq;
-
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t chunks_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk trees
* until it is deleted.
@@ -1071,53 +1029,20 @@ static extent_tree_t huge;
#ifdef MALLOC_STATS
/* Huge allocation statistics. */
static uint64_t huge_nmalloc;
static uint64_t huge_ndalloc;
static size_t huge_allocated;
#endif
-/****************/
-/*
- * Memory reserve.
- */
-
#ifdef MALLOC_PAGEFILE
static char pagefile_templ[PATH_MAX];
#endif
-/* Protects reserve-related data structures. */
-static malloc_mutex_t reserve_mtx;
-
-/*
- * Bounds on acceptable reserve size, and current reserve size. Reserve
- * depletion may cause (reserve_cur < reserve_min).
- */
-static size_t reserve_min;
-static size_t reserve_cur;
-static size_t reserve_max;
-
-/* List of registered callbacks. */
-static ql_head(reserve_reg_t) reserve_regs;
-
-/*
- * Condition notification sequence number, used to determine whether all
- * registered callbacks have been notified of the most current condition.
- */
-static uint64_t reserve_seq;
-
-/*
- * Trees of chunks currently in the memory reserve. Depending on function,
- * different tree orderings are needed, which is why there are two trees with
- * the same contents.
- */
-static extent_tree_t reserve_chunks_szad;
-static extent_tree_t reserve_chunks_ad;
-
/****************************/
/*
* base (internal allocation).
*/
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
@@ -1125,34 +1050,32 @@ static extent_tree_t reserve_chunks_ad;
*/
static void *base_pages;
static void *base_next_addr;
#ifdef MALLOC_DECOMMIT
static void *base_next_decommitted;
#endif
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
-static reserve_reg_t *base_reserve_regs;
static malloc_mutex_t base_mtx;
#ifdef MALLOC_STATS
static size_t base_mapped;
#endif
/********/
/*
* Arenas.
*/
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
static arena_t **arenas;
static unsigned narenas;
-static unsigned narenas_2pow;
#ifndef NO_TLS
# ifdef MALLOC_BALANCE
static unsigned narenas_2pow;
# else
static unsigned next_arena;
# endif
#endif
#ifdef MOZ_MEMORY
@@ -1196,18 +1119,16 @@ static bool opt_junk = false;
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
#ifdef MALLOC_BALANCE
static uint64_t opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
#endif
static bool opt_print_stats = false;
static size_t opt_quantum_2pow = QUANTUM_2POW_MIN;
static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
-static int opt_reserve_min_lshift = 0;
-static int opt_reserve_range_lshift = 0;
#ifdef MALLOC_PAGEFILE
static bool opt_pagefile = false;
#endif
#ifdef MALLOC_UTRACE
static bool opt_utrace = false;
#endif
#ifdef MALLOC_SYSV
static bool opt_sysv = false;
@@ -1257,31 +1178,27 @@ static void wrtmessage(const char *p1, c
static void malloc_printf(const char *format, ...);
#endif
static bool base_pages_alloc_mmap(size_t minsize);
static bool base_pages_alloc(size_t minsize);
static void *base_alloc(size_t size);
static void *base_calloc(size_t number, size_t size);
static extent_node_t *base_node_alloc(void);
static void base_node_dealloc(extent_node_t *node);
-static reserve_reg_t *base_reserve_reg_alloc(void);
-static void base_reserve_reg_dealloc(reserve_reg_t *reg);
#ifdef MALLOC_STATS
static void stats_print(arena_t *arena);
#endif
static void *pages_map(void *addr, size_t size, int pfd);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap(size_t size, bool pagefile);
#ifdef MALLOC_PAGEFILE
static int pagefile_init(size_t size);
static void pagefile_close(int pfd);
#endif
-static void *chunk_recycle_reserve(size_t size, bool zero);
static void *chunk_alloc(size_t size, bool zero, bool pagefile);
-static extent_node_t *chunk_dealloc_reserve(void *chunk, size_t size);
static void chunk_dealloc_mmap(void *chunk, size_t size);
static void chunk_dealloc(void *chunk, size_t size);
#ifndef NO_TLS
static arena_t *choose_arena_hard(void);
#endif
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, bool zero);
static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
@@ -1318,20 +1235,16 @@ static void *huge_malloc(size_t size, bo
static void *huge_palloc(size_t alignment, size_t size);
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
static void huge_dalloc(void *ptr);
static void malloc_print_stats(void);
#ifndef MOZ_MEMORY_WINDOWS
static
#endif
bool malloc_init_hard(void);
-static void reserve_shrink(void);
-static uint64_t reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq);
-static uint64_t reserve_crit(size_t size, const char *fname, uint64_t seq);
-static void reserve_fail(size_t size, const char *fname);
void _malloc_prefork(void);
void _malloc_postfork(void);
/*
* End function prototypes.
*/
/******************************************************************************/
@@ -1942,48 +1855,16 @@ base_node_dealloc(extent_node_t *node)
malloc_mutex_lock(&base_mtx);
VALGRIND_FREELIKE_BLOCK(node, 0);
VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
malloc_mutex_unlock(&base_mtx);
}
-static reserve_reg_t *
-base_reserve_reg_alloc(void)
-{
- reserve_reg_t *ret;
-
- malloc_mutex_lock(&base_mtx);
- if (base_reserve_regs != NULL) {
- ret = base_reserve_regs;
- base_reserve_regs = *(reserve_reg_t **)ret;
- VALGRIND_FREELIKE_BLOCK(ret, 0);
- VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(reserve_reg_t), 0, false);
- malloc_mutex_unlock(&base_mtx);
- } else {
- malloc_mutex_unlock(&base_mtx);
- ret = (reserve_reg_t *)base_alloc(sizeof(reserve_reg_t));
- }
-
- return (ret);
-}
-
-static void
-base_reserve_reg_dealloc(reserve_reg_t *reg)
-{
-
- malloc_mutex_lock(&base_mtx);
- VALGRIND_FREELIKE_BLOCK(reg, 0);
- VALGRIND_MALLOCLIKE_BLOCK(reg, sizeof(reserve_reg_t *), 0, false);
- *(reserve_reg_t **)reg = base_reserve_regs;
- base_reserve_regs = reg;
- malloc_mutex_unlock(&base_mtx);
-}
-
/******************************************************************************/
#ifdef MALLOC_STATS
static void
stats_print(arena_t *arena)
{
unsigned i, gap_start;
@@ -2618,125 +2499,23 @@ pagefile_close(int pfd)
": (malloc) Error in close(): ", buf, "\n");
if (opt_abort)
abort();
}
}
#endif
static void *
-chunk_recycle_reserve(size_t size, bool zero)
-{
- extent_node_t *node, key;
-
-#ifdef MALLOC_DECOMMIT
- if (size != chunksize)
- return (NULL);
-#endif
-
- key.addr = NULL;
- key.size = size;
- malloc_mutex_lock(&reserve_mtx);
- node = extent_tree_szad_nsearch(&reserve_chunks_szad, &key);
- if (node != NULL) {
- void *ret = node->addr;
-
- /* Remove node from the tree. */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
-#ifndef MALLOC_DECOMMIT
- if (node->size == size) {
-#else
- assert(node->size == size);
-#endif
- extent_tree_ad_remove(&reserve_chunks_ad, node);
- base_node_dealloc(node);
-#ifndef MALLOC_DECOMMIT
- } else {
- /*
- * Insert the remainder of node's address range as a
- * smaller chunk. Its position within reserve_chunks_ad
- * does not change.
- */
- assert(node->size > size);
- node->addr = (void *)((uintptr_t)node->addr + size);
- node->size -= size;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
- }
-#endif
- reserve_cur -= size;
- /*
- * Try to replenish the reserve if this allocation depleted it.
- */
-#ifndef MALLOC_DECOMMIT
- if (reserve_cur < reserve_min) {
- size_t diff = reserve_min - reserve_cur;
-#else
- while (reserve_cur < reserve_min) {
-# define diff chunksize
-#endif
- void *chunk;
-
- malloc_mutex_unlock(&reserve_mtx);
- chunk = chunk_alloc_mmap(diff, true);
- malloc_mutex_lock(&reserve_mtx);
- if (chunk == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_notify(RESERVE_CND_LOW,
- size, seq);
- if (seq == 0)
- goto MALLOC_OUT;
- } while (reserve_cur < reserve_min);
- } else {
- extent_node_t *node;
-
- node = chunk_dealloc_reserve(chunk, diff);
- if (node == NULL) {
- uint64_t seq = 0;
-
- pages_unmap(chunk, diff);
- do {
- seq = reserve_notify(
- RESERVE_CND_LOW, size, seq);
- if (seq == 0)
- goto MALLOC_OUT;
- } while (reserve_cur < reserve_min);
- }
- }
- }
-MALLOC_OUT:
- malloc_mutex_unlock(&reserve_mtx);
-
-#ifdef MALLOC_DECOMMIT
- pages_commit(ret, size);
-# undef diff
-#else
- if (zero)
- memset(ret, 0, size);
-#endif
- return (ret);
- }
- malloc_mutex_unlock(&reserve_mtx);
-
- return (NULL);
-}
-
-static void *
chunk_alloc(size_t size, bool zero, bool pagefile)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
- ret = chunk_recycle_reserve(size, zero);
- if (ret != NULL)
- goto RETURN;
-
ret = chunk_alloc_mmap(size, pagefile);
if (ret != NULL) {
goto RETURN;
}
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
@@ -2755,117 +2534,40 @@ RETURN:
}
}
#endif
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
-static extent_node_t *
-chunk_dealloc_reserve(void *chunk, size_t size)
-{
- extent_node_t *node;
-
-#ifdef MALLOC_DECOMMIT
- if (size != chunksize)
- return (NULL);
-#else
- extent_node_t *prev, key;
-
- key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&reserve_chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
- /*
- * Coalesce chunk with the following address range. This does
- * not change the position within reserve_chunks_ad, so only
- * remove/insert from/into reserve_chunks_szad.
- */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
- } else {
-#endif
- /* Coalescing forward failed, so insert a new node. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
- node->addr = chunk;
- node->size = size;
- extent_tree_ad_insert(&reserve_chunks_ad, node);
- extent_tree_szad_insert(&reserve_chunks_szad, node);
-#ifndef MALLOC_DECOMMIT
- }
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&reserve_chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within reserve_chunks_ad, so only
- * remove/insert node from/into reserve_chunks_szad.
- */
- extent_tree_szad_remove(&reserve_chunks_szad, prev);
- extent_tree_ad_remove(&reserve_chunks_ad, prev);
-
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
-
- base_node_dealloc(prev);
- }
-#endif
-
-#ifdef MALLOC_DECOMMIT
- pages_decommit(chunk, size);
-#else
- madvise(chunk, size, MADV_FREE);
-#endif
-
- reserve_cur += size;
- if (reserve_cur > reserve_max)
- reserve_shrink();
-
- return (node);
-}
-
static void
chunk_dealloc_mmap(void *chunk, size_t size)
{
pages_unmap(chunk, size);
}
static void
chunk_dealloc(void *chunk, size_t size)
{
- extent_node_t *node;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef MALLOC_STATS
stats_chunks.curchunks -= (size / chunksize);
#endif
#ifdef MALLOC_VALIDATE
malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
#endif
- /* Try to merge chunk into the reserve. */
- malloc_mutex_lock(&reserve_mtx);
- node = chunk_dealloc_reserve(chunk, size);
- malloc_mutex_unlock(&reserve_mtx);
- if (node == NULL)
- chunk_dealloc_mmap(chunk, size);
+ chunk_dealloc_mmap(chunk, size);
}
/*
* End chunk management functions.
*/
/******************************************************************************/
/*
* Begin arena.
@@ -3463,59 +3165,18 @@ arena_run_alloc(arena_t *arena, arena_bi
return (run);
}
/*
* No usable runs. Create a new chunk from which to allocate
* the run.
*/
if (chunk == NULL) {
- uint64_t chunk_seq;
-
- /*
- * Record the chunk allocation sequence number in order
- * to detect races.
- */
- arena->chunk_seq++;
- chunk_seq = arena->chunk_seq;
-
- /*
- * Drop the arena lock while allocating a chunk, since
- * reserve notifications may cause recursive
- * allocation. Dropping the lock here opens an
- * allocataion race, but we recover.
- */
- malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, true,
true);
- malloc_mutex_lock(&arena->lock);
-
- /*
- * Check whether a race allowed a usable run to appear.
- */
- if (bin != NULL && (run = bin->runcur) != NULL &&
- run->nfree > 0) {
- if (chunk != NULL)
- chunk_dealloc(chunk, chunksize);
- return (run);
- }
-
- /*
- * If this thread raced with another such that multiple
- * chunks were allocated, make sure that there is still
- * inadequate space before using this chunk.
- */
- if (chunk_seq != arena->chunk_seq)
- continue;
-
- /*
- * Check for an error *after* checking for a race,
- * since a race could also cause a transient OOM
- * condition.
- */
if (chunk == NULL)
return (NULL);
}
arena_chunk_init(arena, chunk);
run = (arena_run_t *)((uintptr_t)chunk +
(arena_chunk_header_npages << pagesize_2pow));
/* Update page map. */
@@ -4785,18 +4446,16 @@ arena_new(arena_t *arena)
if (malloc_spin_init(&arena->lock))
return (true);
#ifdef MALLOC_STATS
memset(&arena->stats, 0, sizeof(arena_stats_t));
#endif
- arena->chunk_seq = 0;
-
/* Initialize chunks. */
arena_chunk_tree_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
arena->ndirty = 0;
arena_avail_tree_new(&arena->runs_avail);
@@ -5435,32 +5094,16 @@ malloc_print_stats(void)
#ifdef MOZ_MEMORY_WINDOWS
malloc_printf("Allocated: %lu, mapped: %lu\n",
allocated, mapped);
#else
malloc_printf("Allocated: %zu, mapped: %zu\n",
allocated, mapped);
#endif
- malloc_mutex_lock(&reserve_mtx);
- malloc_printf("Reserve: min "
- "cur max\n");
-#ifdef MOZ_MEMORY_WINDOWS
- malloc_printf(" %12lu %12lu %12lu\n",
- CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
- reserve_cur >> opt_chunk_2pow,
- reserve_max >> opt_chunk_2pow);
-#else
- malloc_printf(" %12zu %12zu %12zu\n",
- CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
- reserve_cur >> opt_chunk_2pow,
- reserve_max >> opt_chunk_2pow);
-#endif
- malloc_mutex_unlock(&reserve_mtx);
-
#ifdef MALLOC_BALANCE
malloc_printf("Arena balance reassignments: %llu\n",
nbalance);
#endif
/* Print chunk stats. */
{
chunk_stats_t chunks_stats;
@@ -5721,22 +5364,16 @@ MALLOC_OUT:
opt_dirty_max >>= 1;
break;
case 'F':
if (opt_dirty_max == 0)
opt_dirty_max = 1;
else if ((opt_dirty_max << 1) != 0)
opt_dirty_max <<= 1;
break;
- case 'g':
- opt_reserve_range_lshift--;
- break;
- case 'G':
- opt_reserve_range_lshift++;
- break;
#ifdef MALLOC_FILL
case 'j':
opt_junk = false;
break;
case 'J':
opt_junk = true;
break;
#endif
@@ -5780,22 +5417,16 @@ MALLOC_OUT:
if (opt_quantum_2pow > QUANTUM_2POW_MIN)
opt_quantum_2pow--;
break;
case 'Q':
if (opt_quantum_2pow < pagesize_2pow -
1)
opt_quantum_2pow++;
break;
- case 'r':
- opt_reserve_min_lshift--;
- break;
- case 'R':
- opt_reserve_min_lshift++;
- break;
case 's':
if (opt_small_max_2pow >
QUANTUM_2POW_MIN)
opt_small_max_2pow--;
break;
case 'S':
if (opt_small_max_2pow < pagesize_2pow
- 1)
@@ -5935,17 +5566,16 @@ MALLOC_OUT:
huge_allocated = 0;
#endif
/* Initialize base allocation data structures. */
#ifdef MALLOC_STATS
base_mapped = 0;
#endif
base_nodes = NULL;
- base_reserve_regs = NULL;
malloc_mutex_init(&base_mtx);
#ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
narenas = 1;
#else
if (ncpus > 1) {
/*
* For SMP systems, create four times as many arenas as there
@@ -6062,37 +5692,16 @@ MALLOC_OUT:
malloc_spin_init(&arenas_lock);
#ifdef MALLOC_VALIDATE
chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
if (chunk_rtree == NULL)
return (true);
#endif
- /*
- * Configure and initialize the memory reserve. This needs to happen
- * late during initialization, since chunks are allocated.
- */
- malloc_mutex_init(&reserve_mtx);
- reserve_min = 0;
- reserve_cur = 0;
- reserve_max = 0;
- if (RESERVE_RANGE_2POW_DEFAULT + opt_reserve_range_lshift >= 0) {
- reserve_max += chunksize << (RESERVE_RANGE_2POW_DEFAULT +
- opt_reserve_range_lshift);
- }
- ql_new(&reserve_regs);
- reserve_seq = 0;
- extent_tree_szad_new(&reserve_chunks_szad);
- extent_tree_ad_new(&reserve_chunks_ad);
- if (RESERVE_MIN_2POW_DEFAULT + opt_reserve_min_lshift >= 0) {
- reserve_min_set(chunksize << (RESERVE_MIN_2POW_DEFAULT +
- opt_reserve_min_lshift));
- }
-
malloc_initialized = true;
#ifndef MOZ_MEMORY_WINDOWS
malloc_mutex_unlock(&init_lock);
#endif
return (false);
}
/* XXX Why not just expose malloc_print_stats()? */
@@ -6454,22 +6063,16 @@ jemalloc_stats(jemalloc_stats_t *stats)
#endif
;
stats->quantum = quantum;
stats->small_max = small_max;
stats->large_max = arena_maxclass;
stats->chunksize = chunksize;
stats->dirty_max = opt_dirty_max;
- malloc_mutex_lock(&reserve_mtx);
- stats->reserve_min = reserve_min;
- stats->reserve_max = reserve_max;
- stats->reserve_cur = reserve_cur;
- malloc_mutex_unlock(&reserve_mtx);
-
/*
* Gather current memory usage statistics.
*/
stats->mapped = 0;
stats->committed = 0;
stats->allocated = 0;
stats->dirty = 0;
@@ -6517,464 +6120,16 @@ jemalloc_stats(jemalloc_stats_t *stats)
}
}
#ifndef MALLOC_DECOMMIT
stats->committed = stats->mapped;
#endif
}
-void *
-xmalloc(size_t size)
-{
- void *ret;
-
- if (malloc_init())
- reserve_fail(size, "xmalloc");
-
- if (size == 0) {
-#ifdef MALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef MALLOC_SYSV
- else {
- _malloc_message(_getprogname(),
- ": (malloc) Error in xmalloc(): ",
- "invalid size 0", "\n");
- abort();
- }
-#endif
- }
-
- ret = imalloc(size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xmalloc", seq);
- ret = imalloc(size);
- } while (ret == NULL);
- }
-
- UTRACE(0, size, ret);
- return (ret);
-}
-
-void *
-xcalloc(size_t num, size_t size)
-{
- void *ret;
- size_t num_size;
-
- num_size = num * size;
- if (malloc_init())
- reserve_fail(num_size, "xcalloc");
-
- if (num_size == 0) {
-#ifdef MALLOC_SYSV
- if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
- num_size = 1;
-#ifdef MALLOC_SYSV
- else {
- _malloc_message(_getprogname(),
- ": (malloc) Error in xcalloc(): ",
- "invalid size 0", "\n");
- abort();
- }
-#endif
- /*
- * Try to avoid division here. We know that it isn't possible to
- * overflow during multiplication if neither operand uses any of the
- * most significant half of the bits in a size_t.
- */
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
- /* size_t overflow. */
- _malloc_message(_getprogname(),
- ": (malloc) Error in xcalloc(): ",
- "size overflow", "\n");
- abort();
- }
-
- ret = icalloc(num_size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(num_size, "xcalloc", seq);
- ret = icalloc(num_size);
- } while (ret == NULL);
- }
-
- UTRACE(0, num_size, ret);
- return (ret);
-}
-
-void *
-xrealloc(void *ptr, size_t size)
-{
- void *ret;
-
- if (size == 0) {
-#ifdef MALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef MALLOC_SYSV
- else {
- if (ptr != NULL)
- idalloc(ptr);
- _malloc_message(_getprogname(),
- ": (malloc) Error in xrealloc(): ",
- "invalid size 0", "\n");
- abort();
- }
-#endif
- }
-
- if (ptr != NULL) {
- assert(malloc_initialized);
-
- ret = iralloc(ptr, size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xrealloc", seq);
- ret = iralloc(ptr, size);
- } while (ret == NULL);
- }
- } else {
- if (malloc_init())
- reserve_fail(size, "xrealloc");
-
- ret = imalloc(size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xrealloc", seq);
- ret = imalloc(size);
- } while (ret == NULL);
- }
- }
-
- UTRACE(ptr, size, ret);
- return (ret);
-}
-
-void *
-xmemalign(size_t alignment, size_t size)
-{
- void *ret;
-
- assert(((alignment - 1) & alignment) == 0 && alignment >=
- sizeof(void *));
-
- if (malloc_init())
- reserve_fail(size, "xmemalign");
-
- ret = ipalloc(alignment, size);
- if (ret == NULL) {
- uint64_t seq = 0;
-
- do {
- seq = reserve_crit(size, "xmemalign", seq);
- ret = ipalloc(alignment, size);
- } while (ret == NULL);
- }
-
- UTRACE(0, size, ret);
- return (ret);
-}
-
-static void
-reserve_shrink(void)
-{
- extent_node_t *node;
-
- assert(reserve_cur > reserve_max);
-#ifdef MALLOC_DEBUG
- {
- extent_node_t *node;
- size_t reserve_size;
-
- reserve_size = 0;
- rb_foreach_begin(extent_node_t, link_szad, &reserve_chunks_szad,
- node) {
- reserve_size += node->size;
- } rb_foreach_end(extent_node_t, link_szad, &reserve_chunks_szad,
- node)
- assert(reserve_size == reserve_cur);
-
- reserve_size = 0;
- rb_foreach_begin(extent_node_t, link_ad, &reserve_chunks_ad,
- node) {
- reserve_size += node->size;
- } rb_foreach_end(extent_node_t, link_ad, &reserve_chunks_ad,
- node)
- assert(reserve_size == reserve_cur);
- }
-#endif
-
- /* Discard chunks until the the reserve is below the size limit. */
- rb_foreach_reverse_begin(extent_node_t, link_ad, &reserve_chunks_ad,
- node) {
-#ifndef MALLOC_DECOMMIT
- if (node->size <= reserve_cur - reserve_max) {
-#endif
- extent_node_t *tnode = extent_tree_ad_prev(
- &reserve_chunks_ad, node);
-
-#ifdef MALLOC_DECOMMIT
- assert(node->size <= reserve_cur - reserve_max);
-#endif
-
- /* Discard the entire [multi-]chunk. */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- extent_tree_ad_remove(&reserve_chunks_ad, node);
- reserve_cur -= node->size;
- pages_unmap(node->addr, node->size);
-#ifdef MALLOC_STATS
- stats_chunks.curchunks -= (node->size / chunksize);
-#endif
- base_node_dealloc(node);
- if (reserve_cur == reserve_max)
- break;
-
- rb_foreach_reverse_prev(extent_node_t, link_ad,
- extent_ad_comp, &reserve_chunks_ad, tnode);
-#ifndef MALLOC_DECOMMIT
- } else {
- /* Discard the end of the multi-chunk. */
- extent_tree_szad_remove(&reserve_chunks_szad, node);
- node->size -= reserve_cur - reserve_max;
- extent_tree_szad_insert(&reserve_chunks_szad, node);
- pages_unmap((void *)((uintptr_t)node->addr +
- node->size), reserve_cur - reserve_max);
-#ifdef MALLOC_STATS
- stats_chunks.curchunks -= ((reserve_cur - reserve_max) /
- chunksize);
-#endif
- reserve_cur = reserve_max;
- break;
- }
-#endif
- assert(reserve_cur > reserve_max);
- } rb_foreach_reverse_end(extent_node_t, link_ad, &reserve_chunks_ad,
- node)
-}
-
-/* Send a condition notification. */
-static uint64_t
-reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq)
-{
- reserve_reg_t *reg;
-
- /* seq is used to keep track of distinct condition-causing events. */
- if (seq == 0) {
- /* Allocate new sequence number. */
- reserve_seq++;
- seq = reserve_seq;
- }
-
- /*
- * Advance to the next callback registration and send a notification,
- * unless one has already been sent for this condition-causing event.
- */
- reg = ql_first(&reserve_regs);
- if (reg == NULL)
- return (0);
- ql_first(&reserve_regs) = ql_next(&reserve_regs, reg, link);
- if (reg->seq == seq)
- return (0);
- reg->seq = seq;
- malloc_mutex_unlock(&reserve_mtx);
- reg->cb(reg->ctx, cnd, size);
- malloc_mutex_lock(&reserve_mtx);
-
- return (seq);
-}
-
-/* Allocation failure due to OOM. Try to free some memory via callbacks. */
-static uint64_t
-reserve_crit(size_t size, const char *fname, uint64_t seq)
-{
-
- /*
- * Send one condition notification. Iteration is handled by the
- * caller of this function.
- */
- malloc_mutex_lock(&reserve_mtx);
- seq = reserve_notify(RESERVE_CND_CRIT, size, seq);
- malloc_mutex_unlock(&reserve_mtx);
-
- /* If no notification could be sent, then no further recourse exists. */
- if (seq == 0)
- reserve_fail(size, fname);
-
- return (seq);
-}
-
-/* Permanent allocation failure due to OOM. */
-static void
-reserve_fail(size_t size, const char *fname)
-{
- uint64_t seq = 0;
-
- /* Send fail notifications. */
- malloc_mutex_lock(&reserve_mtx);
- do {
- seq = reserve_notify(RESERVE_CND_FAIL, size, seq);
- } while (seq != 0);
- malloc_mutex_unlock(&reserve_mtx);
-
- /* Terminate the application. */
- _malloc_message(_getprogname(),
- ": (malloc) Error in ", fname, "(): out of memory\n");
- abort();
-}
-
-bool
-reserve_cb_register(reserve_cb_t *cb, void *ctx)
-{
- reserve_reg_t *reg = base_reserve_reg_alloc();
- if (reg == NULL)
- return (true);
-
- ql_elm_new(reg, link);
- reg->cb = cb;
- reg->ctx = ctx;
- reg->seq = 0;
-
- malloc_mutex_lock(&reserve_mtx);
- ql_head_insert(&reserve_regs, reg, link);
- malloc_mutex_unlock(&reserve_mtx);
-
- return (false);
-}
-
-bool
-reserve_cb_unregister(reserve_cb_t *cb, void *ctx)
-{
- reserve_reg_t *reg = NULL;
-
- malloc_mutex_lock(&reserve_mtx);
- ql_foreach(reg, &reserve_regs, link) {
- if (reg->cb == cb && reg->ctx == ctx) {
- ql_remove(&reserve_regs, reg, link);
- break;
- }
- }
- malloc_mutex_unlock(&reserve_mtx);
-
- if (reg != NULL)
- base_reserve_reg_dealloc(reg);
- return (false);
- return (true);
-}
-
-size_t
-reserve_cur_get(void)
-{
- size_t ret;
-
- malloc_mutex_lock(&reserve_mtx);
- ret = reserve_cur;
- malloc_mutex_unlock(&reserve_mtx);
-
- return (ret);
-}
-
-size_t
-reserve_min_get(void)
-{
- size_t ret;
-
- malloc_mutex_lock(&reserve_mtx);
- ret = reserve_min;
- malloc_mutex_unlock(&reserve_mtx);
-
- return (ret);
-}
-
-bool
-reserve_min_set(size_t min)
-{
-
- min = CHUNK_CEILING(min);
-
- malloc_mutex_lock(&reserve_mtx);
- /* Keep |reserve_max - reserve_min| the same. */
- if (min < reserve_min) {
- reserve_max -= reserve_min - min;
- reserve_min = min;
- } else {
- /* Protect against wrap-around. */
- if (reserve_max + min - reserve_min < reserve_max) {
- reserve_min = SIZE_T_MAX - (reserve_max - reserve_min)
- - chunksize + 1;
- reserve_max = SIZE_T_MAX - chunksize + 1;
- } else {
- reserve_max += min - reserve_min;
- reserve_min = min;
- }
- }
-
- /* Resize the reserve if necessary. */
- if (reserve_cur < reserve_min) {
- size_t size = reserve_min - reserve_cur;
-
- /* Force the reserve to grow by allocating/deallocating. */
- malloc_mutex_unlock(&reserve_mtx);
-#ifdef MALLOC_DECOMMIT
- {
- void **chunks;
- size_t i, n;
-
- n = size >> opt_chunk_2pow;
- chunks = (void**)imalloc(n * sizeof(void *));
- if (chunks == NULL)
- return (true);
- for (i = 0; i < n; i++) {
- chunks[i] = huge_malloc(chunksize, false);
- if (chunks[i] == NULL) {
- size_t j;
-
- for (j = 0; j < i; j++) {
- huge_dalloc(chunks[j]);
- }
- idalloc(chunks);
- return (true);
- }
- }
- for (i = 0; i < n; i++)
- huge_dalloc(chunks[i]);
- idalloc(chunks);
- }
-#else
- {
- void *x = huge_malloc(size, false);
- if (x == NULL) {
- return (true);
- }
- huge_dalloc(x);
- }
-#endif
- } else if (reserve_cur > reserve_max) {
- reserve_shrink();
- malloc_mutex_unlock(&reserve_mtx);
- } else
- malloc_mutex_unlock(&reserve_mtx);
-
- return (false);
-}
-
#ifdef MOZ_MEMORY_WINDOWS
void*
_recalloc(void *ptr, size_t count, size_t size)
{
size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
size_t newsize = count * size;
/*