--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -1,9 +1,10 @@
-/* -*- Mode: C; tab-width: 4; c-basic-offset: 4 -*- */
+/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */
+/* vim:set softtabstop=8 shiftwidth=8: */
/*-
* Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
@@ -157,41 +158,32 @@
/*
* MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
* re-balances arena load if exponentially averaged contention exceeds a
* certain threshold.
*/
/* #define MALLOC_BALANCE */
-/*
- * MALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
- * segment (DSS). In an ideal world, this functionality would be completely
- * unnecessary, but we are burdened by history and the lack of resource limits
- * for anonymous mapped memory.
- */
-/*
- * Uniformly disable sbrk(2) use in Mozilla, since it has various problems
- * across platforms:
- *
- * Linux: sbrk() fails to detect error conditions when using large amounts of
- * memory, resulting in memory corruption.
- *
- * Darwin: sbrk() is severely limited in how much memory it can allocate, and
- * its use is strongly discouraged.
- *
- * Solaris: sbrk() does not necessarily discard pages when the DSS is shrunk,
- * which makes it possible to get non-zeroed pages when re-expanding
- * the DSS. This is incompatible with jemalloc's assumptions, and a
- * fix would require chunk_alloc_dss() to optionally zero memory as
- * chunk_recycle_dss() does (though the cost could be reduced by
- * keeping track of the DSS high water mark and zeroing only when
- * below that mark).
- */
-/* #define MALLOC_DSS */
+#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN))
+ /*
+ * MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary
+ * files, so that if a chunk is mapped, it is guaranteed to be swappable.
+ * This avoids asynchronous OOM failures that are due to VM over-commit.
+ *
+ * XXX OS X over-commits, so we should probably use mmap() instead of
+ * vm_allocate(), so that MALLOC_PAGEFILE works.
+ */
+# define MALLOC_PAGEFILE
+#endif
+
+#ifdef MALLOC_PAGEFILE
+/* Write size when initializing a page file. */
+# define MALLOC_PAGEFILE_WRITE_SIZE 512
+#endif
#ifdef MOZ_MEMORY_LINUX
#define _GNU_SOURCE /* For mremap(2). */
#define issetugid() 0
#if 0 /* Enable in order to test decommit code on Linux. */
# define MALLOC_DECOMMIT
#endif
#endif
@@ -280,16 +272,19 @@ typedef long ssize_t;
#endif
#include "spinlock.h"
#include "namespace.h"
#endif
#include <sys/mman.h>
#ifndef MADV_FREE
# define MADV_FREE MADV_DONTNEED
#endif
+#ifndef MAP_NOSYNC
+# define MAP_NOSYNC 0
+#endif
#include <sys/param.h>
#ifndef MOZ_MEMORY
#include <sys/stddef.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#ifndef MOZ_MEMORY_SOLARIS
#include <sys/sysctl.h>
@@ -358,16 +353,18 @@ static const bool __isthreaded = true;
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
#ifndef MOZ_MEMORY_WINDOWS
#include <assert.h>
#endif
+#include "qr.h"
+#include "ql.h"
#ifdef MOZ_MEMORY_WINDOWS
/* MSVC++ does not support C99 variable-length arrays. */
# define RB_NO_C99_VARARRAYS
#endif
#include "rb.h"
#ifdef MALLOC_DEBUG
/* Disable inlining to make debugging easier. */
@@ -453,16 +450,28 @@ static const bool __isthreaded = true;
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define CHUNK_2POW_DEFAULT 20
/* Maximum number of dirty pages per arena. */
#define DIRTY_MAX_DEFAULT (1U << 10)
+/* Default reserve chunks. */
+#define RESERVE_MIN_2POW_DEFAULT 1
+/*
+ * Default range (in chunks) between reserve_min and reserve_max, in addition
+ * to the mandatory one chunk per arena.
+ */
+#ifdef MALLOC_PAGEFILE
+# define RESERVE_RANGE_2POW_DEFAULT 5
+#else
+# define RESERVE_RANGE_2POW_DEFAULT 0
+#endif
+
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing,
* so over-estimates are okay (up to a point), but under-estimates will
* negatively affect performance.
*/
#define CACHELINE_2POW 6
#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
@@ -696,16 +705,40 @@ struct extent_node_s {
/* Total region size. */
size_t size;
};
typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
/*
+ * Reserve data structures.
+ */
+
+/* Callback registration. */
+typedef struct reserve_reg_s reserve_reg_t;
+struct reserve_reg_s {
+ /* Linkage for list of all registered callbacks. */
+ ql_elm(reserve_reg_t) link;
+
+ /* Callback function pointer. */
+ reserve_cb_t *cb;
+
+ /* Opaque application data pointer. */
+ void *ctx;
+
+ /*
+ * Sequence number of most condition notification most recently sent to
+ * this callback.
+ */
+ uint64_t seq;
+};
+
+/******************************************************************************/
+/*
* Arena data structures.
*/
typedef struct arena_s arena_t;
typedef struct arena_bin_s arena_bin_t;
/*
* Each map element contains several flags, plus page position for runs that
@@ -832,44 +865,38 @@ struct arena_s {
#else
pthread_mutex_t lock;
#endif
#ifdef MALLOC_STATS
arena_stats_t stats;
#endif
+ /*
+ * Chunk allocation sequence number, used to detect races with other
+ * threads during chunk allocation, and then discard unnecessary chunks.
+ */
+ uint64_t chunk_seq;
+
/* Tree of all chunks this arena manages. */
arena_chunk_tree_t chunks_all;
/*
* Tree of dirty-page-containing chunks this arena manages. This tree
* is maintained in addition to chunks_all in order to make
* deallocation O(lg d), where 'd' is the size of chunks_dirty.
*
* Without this tree, deallocation would be O(a), where 'a' is the size
* of chunks_all. Since dirty pages are purged in descending memory
* order, it would not be difficult to trigger something approaching
* worst case behavior with a series of large deallocations.
*/
arena_chunk_tree_t chunks_dirty;
/*
- * In order to avoid rapid chunk allocation/deallocation when an arena
- * oscillates right on the cusp of needing a new chunk, cache the most
- * recently freed chunk. The spare is left in the arena's chunk trees
- * until it is deleted.
- *
- * There is one spare chunk per arena, rather than one spare total, in
- * order to avoid interactions between multiple threads that could make
- * a single spare inadequate.
- */
- arena_chunk_t *spare;
-
- /*
* Current count of pages within unused runs that are potentially
* dirty, and for which madvise(... MADV_FREE) has not been called. By
* tracking this, we can institute a limit on how much dirty unused
* memory is mapped for each arena.
*/
size_t ndirty;
/*
@@ -955,46 +982,60 @@ static size_t arena_maxclass; /* Max si
*/
/* Protects chunk-related data structures. */
static malloc_mutex_t huge_mtx;
/* Tree of chunks that are stand-alone huge allocations. */
static extent_tree_t huge;
-#ifdef MALLOC_DSS
-/*
- * Protects sbrk() calls. This avoids malloc races among threads, though it
- * does not protect against races with threads that call sbrk() directly.
- */
-static malloc_mutex_t dss_mtx;
-/* Base address of the DSS. */
-static void *dss_base;
-/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
-static void *dss_prev;
-/* Current upper limit on DSS addresses. */
-static void *dss_max;
-
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t dss_chunks_szad;
-static extent_tree_t dss_chunks_ad;
-#endif
-
#ifdef MALLOC_STATS
/* Huge allocation statistics. */
static uint64_t huge_nmalloc;
static uint64_t huge_ndalloc;
static size_t huge_allocated;
#endif
+/****************/
+/*
+ * Memory reserve.
+ */
+
+#ifdef MALLOC_PAGEFILE
+static char pagefile_templ[PATH_MAX];
+#endif
+
+/* Protects reserve-related data structures. */
+static malloc_mutex_t reserve_mtx;
+
+/*
+ * Bounds on acceptable reserve size, and current reserve size. Reserve
+ * depletion may cause (reserve_cur < reserve_min).
+ */
+static size_t reserve_min;
+static size_t reserve_cur;
+static size_t reserve_max;
+
+/* List of registered callbacks. */
+static ql_head(reserve_reg_t) reserve_regs;
+
+/*
+ * Condition notification sequence number, used to determine whether all
+ * registered callbacks have been notified of the most current condition.
+ */
+static uint64_t reserve_seq;
+
+/*
+ * Trees of chunks currently in the memory reserve. Depending on function,
+ * different tree orderings are needed, which is why there are two trees with
+ * the same contents.
+ */
+static extent_tree_t reserve_chunks_szad;
+static extent_tree_t reserve_chunks_ad;
+
/****************************/
/*
* base (internal allocation).
*/
/*
* Current pages that are being used for internal memory allocations. These
* pages are carved up in cacheline-size quanta, so that there is no chance of
@@ -1002,32 +1043,34 @@ static size_t huge_allocated;
*/
static void *base_pages;
static void *base_next_addr;
#ifdef MALLOC_DECOMMIT
static void *base_next_decommitted;
#endif
static void *base_past_addr; /* Addr immediately past base_pages. */
static extent_node_t *base_nodes;
+static reserve_reg_t *base_reserve_regs;
static malloc_mutex_t base_mtx;
#ifdef MALLOC_STATS
static size_t base_mapped;
#endif
/********/
/*
* Arenas.
*/
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
*/
static arena_t **arenas;
static unsigned narenas;
+static unsigned narenas_2pow;
#ifndef NO_TLS
# ifdef MALLOC_BALANCE
static unsigned narenas_2pow;
# else
static unsigned next_arena;
# endif
#endif
#ifdef MOZ_MEMORY
@@ -1063,28 +1106,29 @@ static bool opt_abort = true;
static bool opt_junk = true;
#endif
#else
static bool opt_abort = false;
#ifdef MALLOC_FILL
static bool opt_junk = false;
#endif
#endif
-#ifdef MALLOC_DSS
-static bool opt_dss = true;
-static bool opt_mmap = true;
-#endif
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
#ifdef MALLOC_BALANCE
static uint64_t opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
#endif
static bool opt_print_stats = false;
static size_t opt_quantum_2pow = QUANTUM_2POW_MIN;
static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
+static int opt_reserve_min_lshift = 0;
+static int opt_reserve_range_lshift = 0;
+#ifdef MALLOC_PAGEFILE
+static bool opt_pagefile = true;
+#endif
#ifdef MALLOC_UTRACE
static bool opt_utrace = false;
#endif
#ifdef MALLOC_SYSV
static bool opt_sysv = false;
#endif
#ifdef MALLOC_XMALLOC
static bool opt_xmalloc = false;
@@ -1125,51 +1169,48 @@ static void wrtmessage(const char *p1, c
#ifdef MALLOC_STATS
#ifdef MOZ_MEMORY_DARWIN
/* Avoid namespace collision with OS X's malloc APIs. */
#define malloc_printf moz_malloc_printf
#endif
static void malloc_printf(const char *format, ...);
#endif
static char *umax2s(uintmax_t x, char *s);
-#ifdef MALLOC_DSS
-static bool base_pages_alloc_dss(size_t minsize);
-#endif
static bool base_pages_alloc_mmap(size_t minsize);
static bool base_pages_alloc(size_t minsize);
static void *base_alloc(size_t size);
static void *base_calloc(size_t number, size_t size);
static extent_node_t *base_node_alloc(void);
static void base_node_dealloc(extent_node_t *node);
+static reserve_reg_t *base_reserve_reg_alloc(void);
+static void base_reserve_reg_dealloc(reserve_reg_t *reg);
#ifdef MALLOC_STATS
static void stats_print(arena_t *arena);
#endif
-static void *pages_map(void *addr, size_t size);
+static void *pages_map(void *addr, size_t size, int pfd);
static void pages_unmap(void *addr, size_t size);
-#ifdef MALLOC_DSS
-static void *chunk_alloc_dss(size_t size);
-static void *chunk_recycle_dss(size_t size, bool zero);
-#endif
-static void *chunk_alloc_mmap(size_t size);
-static void *chunk_alloc(size_t size, bool zero);
-#ifdef MALLOC_DSS
-static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
-static bool chunk_dealloc_dss(void *chunk, size_t size);
-#endif
+static void *chunk_alloc_mmap(size_t size, bool pagefile);
+#ifdef MALLOC_PAGEFILE
+static int pagefile_init(size_t size);
+static void pagefile_close(int pfd);
+#endif
+static void *chunk_recycle_reserve(size_t size, bool zero);
+static void *chunk_alloc(size_t size, bool zero, bool pagefile);
+static extent_node_t *chunk_dealloc_reserve(void *chunk, size_t size);
static void chunk_dealloc_mmap(void *chunk, size_t size);
static void chunk_dealloc(void *chunk, size_t size);
#ifndef NO_TLS
static arena_t *choose_arena_hard(void);
#endif
static extent_node_t *arena_chunk_node_alloc(arena_chunk_t *chunk);
static void arena_chunk_node_dealloc(arena_chunk_t *chunk,
extent_node_t *node);
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool small, bool zero);
-static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
+static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool small,
bool zero);
static void arena_purge(arena_t *arena);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
@@ -1199,16 +1240,20 @@ static void *huge_malloc(size_t size, bo
static void *huge_palloc(size_t alignment, size_t size);
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize);
static void huge_dalloc(void *ptr);
static void malloc_print_stats(void);
#ifndef MOZ_MEMORY_WINDOWS
static
#endif
bool malloc_init_hard(void);
+static void reserve_shrink(void);
+static uint64_t reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq);
+static uint64_t reserve_crit(size_t size, const char *fname, uint64_t seq);
+static void reserve_fail(size_t size, const char *fname);
/*
* End function prototypes.
*/
/******************************************************************************/
/*
* Begin mutex. We can't use normal pthread mutexes in all places, because
* they require malloc()ed memory, which causes bootstrapping issues in some
@@ -1645,114 +1690,72 @@ pages_commit(void *addr, size_t size)
# else
if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
MAP_ANON, -1, 0) == MAP_FAILED)
abort();
# endif
}
#endif
-#ifdef MALLOC_DSS
-static bool
-base_pages_alloc_dss(size_t minsize)
-{
-
- /*
- * Do special DSS allocation here, since base allocations don't need to
- * be chunk-aligned.
- */
- malloc_mutex_lock(&dss_mtx);
- if (dss_prev != (void *)-1) {
- intptr_t incr;
- size_t csize = CHUNK_CEILING(minsize);
-
- do {
- /* Get the current end of the DSS. */
- dss_max = sbrk(0);
-
- /*
- * Calculate how much padding is necessary to
- * chunk-align the end of the DSS. Don't worry about
- * dss_max not being chunk-aligned though.
- */
- incr = (intptr_t)chunksize
- - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
- assert(incr >= 0);
- if ((size_t)incr < minsize)
- incr += csize;
-
- dss_prev = sbrk(incr);
- if (dss_prev == dss_max) {
- /* Success. */
- dss_max = (void *)((intptr_t)dss_prev + incr);
- base_pages = dss_prev;
- base_next_addr = base_pages;
- base_past_addr = dss_max;
-#ifdef MALLOC_STATS
- base_mapped += incr;
-#endif
- malloc_mutex_unlock(&dss_mtx);
- return (false);
- }
- } while (dss_prev != (void *)-1);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- return (true);
-}
-#endif
-
static bool
base_pages_alloc_mmap(size_t minsize)
{
+ bool ret;
size_t csize;
#ifdef MALLOC_DECOMMIT
size_t pminsize;
#endif
+ int pfd;
assert(minsize != 0);
- csize = PAGE_CEILING(minsize);
- base_pages = pages_map(NULL, csize);
- if (base_pages == NULL)
- return (true);
+ csize = CHUNK_CEILING(minsize);
+#ifdef MALLOC_PAGEFILE
+ if (opt_pagefile) {
+ pfd = pagefile_init(csize);
+ if (pfd == -1)
+ return (true);
+ } else
+#endif
+ pfd = -1;
+ base_pages = pages_map(NULL, csize, pfd);
+ if (base_pages == NULL) {
+ ret = true;
+ goto RETURN;
+ }
base_next_addr = base_pages;
base_past_addr = (void *)((uintptr_t)base_pages + csize);
#ifdef MALLOC_DECOMMIT
/*
* Leave enough pages for minsize committed, since otherwise they would
* have to be immediately recommitted.
*/
pminsize = PAGE_CEILING(minsize);
base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize);
if (pminsize < csize)
pages_decommit(base_next_decommitted, csize - pminsize);
#endif
#ifdef MALLOC_STATS
base_mapped += csize;
#endif
+ ret = false;
+RETURN:
+#ifdef MALLOC_PAGEFILE
+ if (pfd != -1)
+ pagefile_close(pfd);
+#endif
return (false);
}
static bool
base_pages_alloc(size_t minsize)
{
-#ifdef MALLOC_DSS
- if (opt_dss) {
- if (base_pages_alloc_dss(minsize) == false)
- return (false);
- }
-
- if (opt_mmap && minsize != 0)
-#endif
- {
- if (base_pages_alloc_mmap(minsize) == false)
- return (false);
- }
+ if (base_pages_alloc_mmap(minsize) == false)
+ return (false);
return (true);
}
static void *
base_alloc(size_t size)
{
void *ret;
@@ -1833,16 +1836,48 @@ base_node_dealloc(extent_node_t *node)
malloc_mutex_lock(&base_mtx);
VALGRIND_FREELIKE_BLOCK(node, 0);
VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
malloc_mutex_unlock(&base_mtx);
}
+static reserve_reg_t *
+base_reserve_reg_alloc(void)
+{
+ reserve_reg_t *ret;
+
+ malloc_mutex_lock(&base_mtx);
+ if (base_reserve_regs != NULL) {
+ ret = base_reserve_regs;
+ base_reserve_regs = *(reserve_reg_t **)ret;
+ VALGRIND_FREELIKE_BLOCK(ret, 0);
+ VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(reserve_reg_t), 0, false);
+ malloc_mutex_unlock(&base_mtx);
+ } else {
+ malloc_mutex_unlock(&base_mtx);
+ ret = (reserve_reg_t *)base_alloc(sizeof(reserve_reg_t));
+ }
+
+ return (ret);
+}
+
+static void
+base_reserve_reg_dealloc(reserve_reg_t *reg)
+{
+
+ malloc_mutex_lock(&base_mtx);
+ VALGRIND_FREELIKE_BLOCK(reg, 0);
+ VALGRIND_MALLOCLIKE_BLOCK(reg, sizeof(reserve_reg_t *), 0, false);
+ *(reserve_reg_t **)reg = base_reserve_regs;
+ base_reserve_regs = reg;
+ malloc_mutex_unlock(&base_mtx);
+}
+
/******************************************************************************/
#ifdef MALLOC_STATS
static void
stats_print(arena_t *arena)
{
unsigned i, gap_start;
@@ -2001,17 +2036,17 @@ rb_wrap(static, extent_tree_ad_, extent_
*/
/******************************************************************************/
/*
* Begin chunk management functions.
*/
#ifdef MOZ_MEMORY_WINDOWS
static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, int pfd)
{
void *ret;
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
return (ret);
}
@@ -2024,17 +2059,17 @@ pages_unmap(void *addr, size_t size)
_malloc_message(_getprogname(),
": (malloc) Error in VirtualFree()\n", "", "");
if (opt_abort)
abort();
}
}
#elif (defined(MOZ_MEMORY_DARWIN))
static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, int pfd)
{
void *ret;
kern_return_t err;
int flags;
if (addr != NULL) {
ret = addr;
flags = 0;
@@ -2076,26 +2111,34 @@ pages_copy(void *dest, const void *src,
assert(n >= VM_COPY_MIN);
assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
(vm_address_t)dest);
}
#else /* MOZ_MEMORY_DARWIN */
static void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, int pfd)
{
void *ret;
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
+#ifdef MALLOC_PAGEFILE
+ if (pfd != -1) {
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_NOSYNC, pfd, 0);
+ } else
+#endif
+ {
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANON, -1, 0);
+ }
assert(ret != NULL);
if (ret == MAP_FAILED)
ret = NULL;
else if (addr != NULL && ret != addr) {
/*
* We succeeded in mapping memory, but not in the right place.
*/
@@ -2127,427 +2170,380 @@ pages_unmap(void *addr, size_t size)
_malloc_message(_getprogname(),
": (malloc) Error in munmap(): ", buf, "\n");
if (opt_abort)
abort();
}
}
#endif
-#ifdef MALLOC_DSS
-static void *
-chunk_alloc_dss(size_t size)
-{
-
- /*
- * sbrk() uses a signed increment argument, so take care not to
- * interpret a huge allocation request as a negative increment.
- */
- if ((intptr_t)size < 0)
- return (NULL);
-
- malloc_mutex_lock(&dss_mtx);
- if (dss_prev != (void *)-1) {
- intptr_t incr;
-
- /*
- * The loop is necessary to recover from races with other
- * threads that are using the DSS for something other than
- * malloc.
- */
- do {
- void *ret;
-
- /* Get the current end of the DSS. */
- dss_max = sbrk(0);
-
- /*
- * Calculate how much padding is necessary to
- * chunk-align the end of the DSS.
- */
- incr = (intptr_t)size
- - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
- if (incr == (intptr_t)size)
- ret = dss_max;
- else {
- ret = (void *)((intptr_t)dss_max + incr);
- incr += size;
- }
-
- dss_prev = sbrk(incr);
- if (dss_prev == dss_max) {
- /* Success. */
- dss_max = (void *)((intptr_t)dss_prev + incr);
- malloc_mutex_unlock(&dss_mtx);
- return (ret);
- }
- } while (dss_prev != (void *)-1);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- return (NULL);
-}
-
-static void *
-chunk_recycle_dss(size_t size, bool zero)
-{
- extent_node_t *node, key;
-
- key.addr = NULL;
- key.size = size;
- malloc_mutex_lock(&dss_mtx);
- node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
- if (node != NULL) {
- void *ret = node->addr;
-
- /* Remove node from the tree. */
- extent_tree_szad_remove(&dss_chunks_szad, node);
- if (node->size == size) {
- extent_tree_ad_remove(&dss_chunks_ad, node);
- base_node_dealloc(node);
- } else {
- /*
- * Insert the remainder of node's address range as a
- * smaller chunk. Its position within dss_chunks_ad
- * does not change.
- */
- assert(node->size > size);
- node->addr = (void *)((uintptr_t)node->addr + size);
- node->size -= size;
- extent_tree_szad_insert(&dss_chunks_szad, node);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- if (zero)
- memset(ret, 0, size);
- return (ret);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- return (NULL);
-}
-#endif
-
-#ifdef MOZ_MEMORY_WINDOWS
static inline void *
-chunk_alloc_mmap(size_t size)
+chunk_alloc_mmap(size_t size, bool pagefile)
{
void *ret;
size_t offset;
+ int pfd;
+
+#ifdef MALLOC_PAGEFILE
+ if (opt_pagefile && pagefile) {
+ pfd = pagefile_init(size);
+ if (pfd == -1)
+ return (NULL);
+ } else
+#endif
+ pfd = -1;
/*
* Windows requires that there be a 1:1 mapping between VM
* allocation/deallocation operations. Therefore, take care here to
* acquire the final result via one mapping operation. This means
* unmapping any preliminary result that is not correctly aligned.
+ *
+ * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
+ * since it reduces the number of page files.
*/
- ret = pages_map(NULL, size);
+ ret = pages_map(NULL, size, pfd);
if (ret == NULL)
- return (NULL);
+ goto RETURN;
offset = CHUNK_ADDR2OFFSET(ret);
if (offset != 0) {
/* Deallocate, then try to allocate at (ret + size - offset). */
pages_unmap(ret, size);
- ret = pages_map((void *)((uintptr_t)ret + size - offset), size);
+ ret = pages_map((void *)((uintptr_t)ret + size - offset), size,
+ pfd);
while (ret == NULL) {
/*
* Over-allocate in order to map a memory region that
* is definitely large enough.
*/
- ret = pages_map(NULL, size + chunksize);
+ ret = pages_map(NULL, size + chunksize, -1);
if (ret == NULL)
- return (NULL);
+ goto RETURN;
/*
* Deallocate, then allocate the correct size, within
* the over-sized mapping.
*/
offset = CHUNK_ADDR2OFFSET(ret);
pages_unmap(ret, size + chunksize);
if (offset == 0)
- ret = pages_map(ret, size);
+ ret = pages_map(ret, size, pfd);
else {
ret = pages_map((void *)((uintptr_t)ret +
- chunksize - offset), size);
+ chunksize - offset), size, pfd);
}
/*
* Failure here indicates a race with another thread, so
* try again.
*/
}
}
+RETURN:
+#ifdef MALLOC_PAGEFILE
+ if (pfd != -1)
+ pagefile_close(pfd);
+#endif
return (ret);
}
-#else
-static inline void *
-chunk_alloc_mmap(size_t size)
+
+#ifdef MALLOC_PAGEFILE
+static int
+pagefile_init(size_t size)
{
- void *ret;
- size_t offset;
+ int ret;
+ size_t i;
+ char pagefile_path[PATH_MAX];
+ char zbuf[MALLOC_PAGEFILE_WRITE_SIZE];
/*
- * Ideally, there would be a way to specify alignment to mmap() (like
- * NetBSD has), but in the absence of such a feature, we have to work
- * hard to efficiently create aligned mappings. The reliable, but
- * expensive method is to create a mapping that is over-sized, then
- * trim the excess. However, that always results in at least one call
- * to pages_unmap().
- *
- * A more optimistic approach is to try mapping precisely the right
- * amount, then try to append another mapping if alignment is off. In
- * practice, this works out well as long as the application is not
- * interleaving mappings via direct mmap() calls. If we do run into a
- * situation where there is an interleaved mapping and we are unable to
- * extend an unaligned mapping, our best option is to momentarily
- * revert to the reliable-but-expensive method. This will tend to
- * leave a gap in the memory map that is too small to cause later
- * problems for the optimistic method.
+ * Create a temporary file, then immediately unlink it so that it will
+ * not persist.
*/
-
- ret = pages_map(NULL, size);
- if (ret == NULL)
- return (NULL);
-
- offset = CHUNK_ADDR2OFFSET(ret);
- if (offset != 0) {
- /* Try to extend chunk boundary. */
- if (pages_map((void *)((uintptr_t)ret + size),
- chunksize - offset) == NULL) {
- /*
- * Extension failed. Clean up, then revert to the
- * reliable-but-expensive method.
- */
- pages_unmap(ret, size);
-
- /* Beware size_t wrap-around. */
- if (size + chunksize <= size)
- return NULL;
-
- ret = pages_map(NULL, size + chunksize);
- if (ret == NULL)
- return (NULL);
-
- /* Clean up unneeded leading/trailing space. */
- offset = CHUNK_ADDR2OFFSET(ret);
- if (offset != 0) {
- /* Leading space. */
- pages_unmap(ret, chunksize - offset);
-
- ret = (void *)((uintptr_t)ret +
- (chunksize - offset));
-
- /* Trailing space. */
- pages_unmap((void *)((uintptr_t)ret + size),
- offset);
- } else {
- /* Trailing space only. */
- pages_unmap((void *)((uintptr_t)ret + size),
- chunksize);
+ strcpy(pagefile_path, pagefile_templ);
+ ret = mkstemp(pagefile_path);
+ if (ret == -1)
+ return (ret);
+ if (unlink(pagefile_path)) {
+ char buf[STRERROR_BUF];
+
+ strerror_r(errno, buf, sizeof(buf));
+ _malloc_message(_getprogname(), ": (malloc) Error in unlink(\"",
+ pagefile_path, "\"):");
+ _malloc_message(buf, "\n", "", "");
+ if (opt_abort)
+ abort();
+ }
+
+ /*
+ * Write sequential zeroes to the file in order to assure that disk
+ * space is committed, with minimal fragmentation. It would be
+ * sufficient to write one zero per disk block, but that potentially
+ * results in more system calls, for no real gain.
+ */
+ memset(zbuf, 0, sizeof(zbuf));
+ for (i = 0; i < size; i += sizeof(zbuf)) {
+ if (write(ret, zbuf, sizeof(zbuf)) != sizeof(zbuf)) {
+ if (errno != ENOSPC) {
+ char buf[STRERROR_BUF];
+
+ strerror_r(errno, buf, sizeof(buf));
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in write(): ", buf, "\n");
+ if (opt_abort)
+ abort();
}
- } else {
- /* Clean up unneeded leading space. */
- pages_unmap(ret, chunksize - offset);
- ret = (void *)((uintptr_t)ret + (chunksize - offset));
+ pagefile_close(ret);
+ return (-1);
}
}
return (ret);
}
+
+static void
+pagefile_close(int pfd)
+{
+
+ if (close(pfd)) {
+ char buf[STRERROR_BUF];
+
+ strerror_r(errno, buf, sizeof(buf));
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in close(): ", buf, "\n");
+ if (opt_abort)
+ abort();
+ }
+}
#endif
static void *
-chunk_alloc(size_t size, bool zero)
+chunk_recycle_reserve(size_t size, bool zero)
+{
+ extent_node_t *node, key;
+
+#ifdef MALLOC_DECOMMIT
+ if (size != chunksize)
+ return (NULL);
+#endif
+
+ key.addr = NULL;
+ key.size = size;
+ malloc_mutex_lock(&reserve_mtx);
+ node = extent_tree_szad_nsearch(&reserve_chunks_szad, &key);
+ if (node != NULL) {
+ void *ret = node->addr;
+
+ /* Remove node from the tree. */
+ extent_tree_szad_remove(&reserve_chunks_szad, node);
+#ifndef MALLOC_DECOMMIT
+ if (node->size == size) {
+#else
+ assert(node->size == size);
+#endif
+ extent_tree_ad_remove(&reserve_chunks_ad, node);
+ base_node_dealloc(node);
+#ifndef MALLOC_DECOMMIT
+ } else {
+ /*
+ * Insert the remainder of node's address range as a
+ * smaller chunk. Its position within reserve_chunks_ad
+ * does not change.
+ */
+ assert(node->size > size);
+ node->addr = (void *)((uintptr_t)node->addr + size);
+ node->size -= size;
+ extent_tree_szad_insert(&reserve_chunks_szad, node);
+ }
+#endif
+ reserve_cur -= size;
+ /*
+ * Try to replenish the reserve if this allocation depleted it.
+ */
+#ifndef MALLOC_DECOMMIT
+ if (reserve_cur < reserve_min) {
+ size_t diff = reserve_min - reserve_cur;
+#else
+ while (reserve_cur < reserve_min) {
+# define diff chunksize
+#endif
+ void *chunk;
+
+ malloc_mutex_unlock(&reserve_mtx);
+ chunk = chunk_alloc_mmap(diff, true);
+ malloc_mutex_lock(&reserve_mtx);
+ if (chunk == NULL) {
+ uint64_t seq = 0;
+
+ do {
+ seq = reserve_notify(RESERVE_CND_LOW,
+ size, seq);
+ } while (reserve_cur < reserve_min && seq != 0);
+ } else {
+ extent_node_t *node;
+
+ node = chunk_dealloc_reserve(chunk, diff);
+ if (node == NULL) {
+ uint64_t seq = 0;
+
+ pages_unmap(chunk, diff);
+ do {
+ seq = reserve_notify(
+ RESERVE_CND_LOW, size, seq);
+ } while (reserve_cur < reserve_min &&
+ seq != 0);
+ }
+ }
+ }
+ malloc_mutex_unlock(&reserve_mtx);
+
+#ifdef MALLOC_DECOMMIT
+ pages_commit(ret, size);
+# undef diff
+#else
+ if (zero)
+ memset(ret, 0, size);
+#endif
+ return (ret);
+ }
+ malloc_mutex_unlock(&reserve_mtx);
+
+ return (NULL);
+}
+
+static void *
+chunk_alloc(size_t size, bool zero, bool pagefile)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
-#ifdef MALLOC_DSS
- if (opt_dss) {
- ret = chunk_recycle_dss(size, zero);
- if (ret != NULL) {
- goto RETURN;
- }
-
- ret = chunk_alloc_dss(size);
- if (ret != NULL)
- goto RETURN;
- }
-
- if (opt_mmap)
-#endif
- {
- ret = chunk_alloc_mmap(size);
- if (ret != NULL)
- goto RETURN;
+ ret = chunk_recycle_reserve(size, zero);
+ if (ret != NULL)
+ goto RETURN;
+
+ ret = chunk_alloc_mmap(size, pagefile);
+ if (ret != NULL) {
+#ifdef MALLOC_STATS
+ stats_chunks.nchunks += (size / chunksize);
+#endif
+ goto RETURN;
}
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
#ifdef MALLOC_STATS
- if (ret != NULL) {
- stats_chunks.nchunks += (size / chunksize);
+ if (ret != NULL)
stats_chunks.curchunks += (size / chunksize);
- }
if (stats_chunks.curchunks > stats_chunks.highchunks)
stats_chunks.highchunks = stats_chunks.curchunks;
#endif
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
-#ifdef MALLOC_DSS
static extent_node_t *
-chunk_dealloc_dss_record(void *chunk, size_t size)
+chunk_dealloc_reserve(void *chunk, size_t size)
{
- extent_node_t *node, *prev, key;
+ extent_node_t *node;
+
+#ifdef MALLOC_DECOMMIT
+ if (size != chunksize)
+ return (NULL);
+#else
+ extent_node_t *prev, key;
key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
+ node = extent_tree_ad_nsearch(&reserve_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
- * not change the position within dss_chunks_ad, so only
- * remove/insert from/into dss_chunks_szad.
+ * not change the position within reserve_chunks_ad, so only
+ * remove/insert from/into reserve_chunks_szad.
*/
- extent_tree_szad_remove(&dss_chunks_szad, node);
+ extent_tree_szad_remove(&reserve_chunks_szad, node);
node->addr = chunk;
node->size += size;
- extent_tree_szad_insert(&dss_chunks_szad, node);
+ extent_tree_szad_insert(&reserve_chunks_szad, node);
} else {
- /*
- * Coalescing forward failed, so insert a new node. Drop
- * dss_mtx during node allocation, since it is possible that a
- * new base chunk will be allocated.
- */
- malloc_mutex_unlock(&dss_mtx);
+#endif
+ /* Coalescing forward failed, so insert a new node. */
node = base_node_alloc();
- malloc_mutex_lock(&dss_mtx);
if (node == NULL)
return (NULL);
node->addr = chunk;
node->size = size;
- extent_tree_ad_insert(&dss_chunks_ad, node);
- extent_tree_szad_insert(&dss_chunks_szad, node);
+ extent_tree_ad_insert(&reserve_chunks_ad, node);
+ extent_tree_szad_insert(&reserve_chunks_szad, node);
+#ifndef MALLOC_DECOMMIT
}
/* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&dss_chunks_ad, node);
+ prev = extent_tree_ad_prev(&reserve_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
- * not change the position within dss_chunks_ad, so only
- * remove/insert node from/into dss_chunks_szad.
+ * not change the position within reserve_chunks_ad, so only
+ * remove/insert node from/into reserve_chunks_szad.
*/
- extent_tree_szad_remove(&dss_chunks_szad, prev);
- extent_tree_ad_remove(&dss_chunks_ad, prev);
-
- extent_tree_szad_remove(&dss_chunks_szad, node);
+ extent_tree_szad_remove(&reserve_chunks_szad, prev);
+ extent_tree_ad_remove(&reserve_chunks_ad, prev);
+
+ extent_tree_szad_remove(&reserve_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
- extent_tree_szad_insert(&dss_chunks_szad, node);
+ extent_tree_szad_insert(&reserve_chunks_szad, node);
base_node_dealloc(prev);
}
+#endif
+
+#ifdef MALLOC_DECOMMIT
+ pages_decommit(chunk, size);
+#else
+ madvise(chunk, size, MADV_FREE);
+#endif
+
+ reserve_cur += size;
+ if (reserve_cur > reserve_max)
+ reserve_shrink();
return (node);
}
-static bool
-chunk_dealloc_dss(void *chunk, size_t size)
-{
-
- malloc_mutex_lock(&dss_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)dss_base
- && (uintptr_t)chunk < (uintptr_t)dss_max) {
- extent_node_t *node;
-
- /* Try to coalesce with other unused chunks. */
- node = chunk_dealloc_dss_record(chunk, size);
- if (node != NULL) {
- chunk = node->addr;
- size = node->size;
- }
-
- /* Get the current end of the DSS. */
- dss_max = sbrk(0);
-
- /*
- * Try to shrink the DSS if this chunk is at the end of the
- * DSS. The sbrk() call here is subject to a race condition
- * with threads that use brk(2) or sbrk(2) directly, but the
- * alternative would be to leak memory for the sake of poorly
- * designed multi-threaded programs.
- */
- if ((void *)((uintptr_t)chunk + size) == dss_max
- && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
- /* Success. */
- dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
-
- if (node != NULL) {
- extent_tree_szad_remove(&dss_chunks_szad, node);
- extent_tree_ad_remove(&dss_chunks_ad, node);
- base_node_dealloc(node);
- }
- malloc_mutex_unlock(&dss_mtx);
- } else {
- malloc_mutex_unlock(&dss_mtx);
-#ifdef MOZ_MEMORY_WINDOWS
- VirtualAlloc(chunk, size, MEM_RESET, PAGE_READWRITE);
-#elif (defined(MOZ_MEMORY_DARWIN))
- mmap(chunk, size, PROT_READ | PROT_WRITE, MAP_PRIVATE
- | MAP_ANON | MAP_FIXED, -1, 0);
-#else
- madvise(chunk, size, MADV_FREE);
-#endif
- }
-
- return (false);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- return (true);
-}
-#endif
-
static void
chunk_dealloc_mmap(void *chunk, size_t size)
{
pages_unmap(chunk, size);
}
static void
chunk_dealloc(void *chunk, size_t size)
{
+ extent_node_t *node;
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef MALLOC_STATS
stats_chunks.curchunks -= (size / chunksize);
#endif
-#ifdef MALLOC_DSS
- if (opt_dss) {
- if (chunk_dealloc_dss(chunk, size) == false)
- return;
- }
-
- if (opt_mmap)
-#endif
+ /* Try to merge chunk into the reserve. */
+ node = chunk_dealloc_reserve(chunk, size);
+ if (node == NULL)
chunk_dealloc_mmap(chunk, size);
}
/*
* End chunk management functions.
*/
/******************************************************************************/
/*
@@ -2956,17 +2952,17 @@ arena_run_split(arena_t *arena, arena_ru
CHUNK_MAP_DECOMMITTED;
}
pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
<< pagesize_2pow)), (j << pagesize_2pow));
# ifdef MALLOC_STATS
arena->stats.ncommit++;
# endif
- }
+ } else /* No need to zero since commit zeros. */
#endif
/* Zero if necessary. */
if (zero) {
if ((chunk->map[run_ind + i] & CHUNK_MAP_UNTOUCHED)
== 0) {
VALGRIND_MALLOCLIKE_BLOCK((void *)((uintptr_t)
chunk + ((run_ind + i) << pagesize_2pow)),
@@ -3009,163 +3005,183 @@ arena_run_split(arena_t *arena, arena_ru
arena_chunk_node_dealloc(chunk, nodeB);
}
chunk->pages_used += need_pages;
if (chunk->ndirty == 0 && old_ndirty > 0)
arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
}
-static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
+static void
+arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
{
- arena_chunk_t *chunk;
extent_node_t *node;
- if (arena->spare != NULL) {
- chunk = arena->spare;
- arena->spare = NULL;
- } else {
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
- if (chunk == NULL)
- return (NULL);
- VALGRIND_MALLOCLIKE_BLOCK(chunk, (arena_chunk_header_npages <<
- pagesize_2pow), 0, false);
+ VALGRIND_MALLOCLIKE_BLOCK(chunk, (arena_chunk_header_npages <<
+ pagesize_2pow), 0, false);
#ifdef MALLOC_STATS
- arena->stats.mapped += chunksize;
-#endif
-
- chunk->arena = arena;
-
- arena_chunk_tree_all_insert(&arena->chunks_all, chunk);
-
- /*
- * Claim that no pages are in use, since the header is merely
- * overhead.
- */
- chunk->pages_used = 0;
- chunk->ndirty = 0;
-
- /*
- * Initialize the map to contain one maximal free untouched
- * run.
- */
- memset(chunk->map, (CHUNK_MAP_LARGE | CHUNK_MAP_POS_MASK),
- arena_chunk_header_npages);
- memset(&chunk->map[arena_chunk_header_npages],
- (CHUNK_MAP_UNTOUCHED
+ arena->stats.mapped += chunksize;
+#endif
+
+ chunk->arena = arena;
+
+ arena_chunk_tree_all_insert(&arena->chunks_all, chunk);
+
+ /*
+ * Claim that no pages are in use, since the header is merely overhead.
+ */
+ chunk->pages_used = 0;
+ chunk->ndirty = 0;
+
+ /* Initialize the map to contain one maximal free untouched run. */
+ memset(chunk->map, (CHUNK_MAP_LARGE | CHUNK_MAP_POS_MASK),
+ arena_chunk_header_npages);
+ memset(&chunk->map[arena_chunk_header_npages], (CHUNK_MAP_UNTOUCHED
#ifdef MALLOC_DECOMMIT
- | CHUNK_MAP_DECOMMITTED
-#endif
- ), (chunk_npages -
- arena_chunk_header_npages));
-
- /* Initialize the tree of unused extent nodes. */
- extent_tree_ad_new(&chunk->nodes);
- chunk->nodes_past = (extent_node_t *)QUANTUM_CEILING(
- (uintptr_t)&chunk->map[chunk_npages]);
+ | CHUNK_MAP_DECOMMITTED
+#endif
+ ), (chunk_npages -
+ arena_chunk_header_npages));
+
+ /* Initialize the tree of unused extent nodes. */
+ extent_tree_ad_new(&chunk->nodes);
+ chunk->nodes_past = (extent_node_t *)QUANTUM_CEILING(
+ (uintptr_t)&chunk->map[chunk_npages]);
#ifdef MALLOC_DECOMMIT
- /*
- * Start out decommitted, in order to force a closer
- * correspondence between dirty pages and committed untouched
- * pages.
- */
- pages_decommit((void *)((uintptr_t)chunk +
- (arena_chunk_header_npages << pagesize_2pow)),
- ((chunk_npages - arena_chunk_header_npages) <<
- pagesize_2pow));
+ /*
+ * Start out decommitted, in order to force a closer correspondence
+ * between dirty pages and committed untouched pages.
+ */
+ pages_decommit((void *)((uintptr_t)chunk +
+ (arena_chunk_header_npages << pagesize_2pow)),
+ ((chunk_npages - arena_chunk_header_npages) <<
+ pagesize_2pow));
# ifdef MALLOC_STATS
- arena->stats.ndecommit++;
- arena->stats.decommitted += (chunk_npages -
- arena_chunk_header_npages);
+ arena->stats.ndecommit++;
+ arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
# endif
#endif
- }
/* Insert the run into the runs_avail_* red-black trees. */
node = arena_chunk_node_alloc(chunk);
node->addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
pagesize_2pow));
node->size = chunksize - (arena_chunk_header_npages << pagesize_2pow);
extent_tree_szad_insert(&arena->runs_avail_szad, node);
extent_tree_ad_insert(&arena->runs_avail_ad, node);
-
- return (chunk);
}
static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
extent_node_t *node, key;
- if (arena->spare != NULL) {
- arena_chunk_tree_all_remove(&chunk->arena->chunks_all,
- arena->spare);
- if (arena->spare->ndirty > 0) {
- arena_chunk_tree_dirty_remove(
- &chunk->arena->chunks_dirty, arena->spare);
- arena->ndirty -= arena->spare->ndirty;
- }
- VALGRIND_FREELIKE_BLOCK(arena->spare, 0);
- chunk_dealloc((void *)arena->spare, chunksize);
-#ifdef MALLOC_STATS
- arena->stats.mapped -= chunksize;
-#endif
- }
-
/*
* Remove run from the runs trees, regardless of whether this chunk
* will be cached, so that the arena does not use it. Dirty page
* flushing only uses the chunks_dirty tree, so leaving this chunk in
* the chunks_* trees is sufficient for that purpose.
*/
key.addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
pagesize_2pow));
node = extent_tree_ad_search(&arena->runs_avail_ad, &key);
assert(node != NULL);
extent_tree_szad_remove(&arena->runs_avail_szad, node);
extent_tree_ad_remove(&arena->runs_avail_ad, node);
arena_chunk_node_dealloc(chunk, node);
- arena->spare = chunk;
+ arena_chunk_tree_all_remove(&chunk->arena->chunks_all,
+ chunk);
+ if (chunk->ndirty > 0) {
+ arena_chunk_tree_dirty_remove(
+ &chunk->arena->chunks_dirty, chunk);
+ arena->ndirty -= chunk->ndirty;
+ }
+ VALGRIND_FREELIKE_BLOCK(chunk, 0);
+ chunk_dealloc((void *)chunk, chunksize);
+#ifdef MALLOC_STATS
+ arena->stats.mapped -= chunksize;
+#endif
}
static arena_run_t *
arena_run_alloc(arena_t *arena, size_t size, bool small, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
extent_node_t *node, key;
assert(size <= (chunksize - (arena_chunk_header_npages <<
pagesize_2pow)));
assert((size & pagesize_mask) == 0);
- /* Search the arena's chunks for the lowest best fit. */
- key.addr = NULL;
- key.size = size;
- node = extent_tree_szad_nsearch(&arena->runs_avail_szad, &key);
- if (node != NULL) {
- run = (arena_run_t *)node->addr;
+ chunk = NULL;
+ while (true) {
+ /* Search the arena's chunks for the lowest best fit. */
+ key.addr = NULL;
+ key.size = size;
+ node = extent_tree_szad_nsearch(&arena->runs_avail_szad, &key);
+ if (node != NULL) {
+ if (chunk != NULL)
+ chunk_dealloc(chunk, chunksize);
+ run = (arena_run_t *)node->addr;
+ arena_run_split(arena, run, size, small, zero);
+ return (run);
+ }
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate
+ * the run.
+ */
+ if (chunk == NULL) {
+ uint64_t chunk_seq;
+
+ /*
+ * Record the chunk allocation sequence number in order
+ * to detect races.
+ */
+ arena->chunk_seq++;
+ chunk_seq = arena->chunk_seq;
+
+ /*
+ * Drop the arena lock while allocating a chunk, since
+ * reserve notifications may cause recursive
+ * allocation. Dropping the lock here opens an
+ * allocataion race, but we recover.
+ */
+ malloc_mutex_unlock(&arena->lock);
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, true,
+ true);
+ malloc_mutex_lock(&arena->lock);
+
+ /*
+ * If this thread raced with another such that multiple
+ * chunks were allocated, make sure that there is still
+ * inadequate space before using this chunk.
+ */
+ if (chunk_seq != arena->chunk_seq)
+ continue;
+
+ /*
+ * Check for an error *after* checking for a race,
+ * since a race could also cause a transient OOM
+ * condition.
+ */
+ if (chunk == NULL)
+ return (NULL);
+ }
+
+ arena_chunk_init(arena, chunk);
+ run = (arena_run_t *)((uintptr_t)chunk +
+ (arena_chunk_header_npages << pagesize_2pow));
+ /* Update page map. */
arena_run_split(arena, run, size, small, zero);
return (run);
}
-
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(arena);
- if (chunk == NULL)
- return (NULL);
- run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
- pagesize_2pow));
- /* Update page map. */
- arena_run_split(arena, run, size, small, zero);
- return (run);
}
static void
arena_purge(arena_t *arena)
{
arena_chunk_t *chunk;
size_t i, npages;
#ifdef MALLOC_DEBUG
@@ -4491,20 +4507,21 @@ arena_new(arena_t *arena)
if (malloc_spin_init(&arena->lock))
return (true);
#ifdef MALLOC_STATS
memset(&arena->stats, 0, sizeof(arena_stats_t));
#endif
+ arena->chunk_seq = 0;
+
/* Initialize chunks. */
arena_chunk_tree_all_new(&arena->chunks_all);
arena_chunk_tree_dirty_new(&arena->chunks_dirty);
- arena->spare = NULL;
arena->ndirty = 0;
extent_tree_szad_new(&arena->runs_avail_szad);
extent_tree_ad_new(&arena->runs_avail_ad);
extent_tree_ad_new(&arena->runs_alloced_ad);
#ifdef MALLOC_BALANCE
@@ -4622,17 +4639,17 @@ huge_malloc(size_t size, bool zero)
return (NULL);
}
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
- ret = chunk_alloc(csize, zero);
+ ret = chunk_alloc(csize, zero, true);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
}
/* Insert node into huge. */
node->addr = ret;
#ifdef MALLOC_DECOMMIT
@@ -4690,16 +4707,17 @@ static void *
huge_palloc(size_t alignment, size_t size)
{
void *ret;
size_t alloc_size, chunk_size, offset;
#ifdef MALLOC_DECOMMIT
size_t psize;
#endif
extent_node_t *node;
+ int pfd;
/*
* This allocation requires alignment that is even larger than chunk
* alignment. This means that huge_malloc() isn't good enough.
*
* Allocate almost twice as many chunks as are demanded by the size or
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
@@ -4713,73 +4731,53 @@ huge_palloc(size_t alignment, size_t siz
else
alloc_size = (alignment << 1) - chunksize;
/* Allocate an extent node with which to track the chunk. */
node = base_node_alloc();
if (node == NULL)
return (NULL);
-#ifdef MOZ_MEMORY_WINDOWS
/*
* Windows requires that there be a 1:1 mapping between VM
* allocation/deallocation operations. Therefore, take care here to
* acquire the final result via one mapping operation.
+ *
+ * The MALLOC_PAGEFILE code also benefits from this mapping algorithm,
+ * since it reduces the number of page files.
*/
+#ifdef MALLOC_PAGEFILE
+ if (opt_pagefile) {
+ pfd = pagefile_init(size);
+ if (pfd == -1)
+ return (NULL);
+ } else
+#endif
+ pfd = -1;
do {
void *over;
- over = chunk_alloc(alloc_size, false);
+ over = chunk_alloc(alloc_size, false, false);
if (over == NULL) {
base_node_dealloc(node);
- return (NULL);
+ ret = NULL;
+ goto RETURN;
}
offset = (uintptr_t)over & (alignment - 1);
assert((offset & chunksize_mask) == 0);
assert(offset < alloc_size);
ret = (void *)((uintptr_t)over + offset);
chunk_dealloc(over, alloc_size);
- ret = pages_map(ret, chunk_size);
+ ret = pages_map(ret, chunk_size, pfd);
/*
* Failure here indicates a race with another thread, so try
* again.
*/
} while (ret == NULL);
-#else
- ret = chunk_alloc(alloc_size, false);
- if (ret == NULL) {
- base_node_dealloc(node);
- return (NULL);
- }
-
- offset = (uintptr_t)ret & (alignment - 1);
- assert((offset & chunksize_mask) == 0);
- assert(offset < alloc_size);
- if (offset == 0) {
- /* Trim trailing space. */
- chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- - chunk_size);
- } else {
- size_t trailsize;
-
- /* Trim leading space. */
- chunk_dealloc(ret, alignment - offset);
-
- ret = (void *)((uintptr_t)ret + (alignment - offset));
-
- trailsize = alloc_size - (alignment - offset) - chunk_size;
- if (trailsize != 0) {
- /* Trim trailing space. */
- assert(trailsize < alloc_size);
- chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
- trailsize);
- }
- }
-#endif
/* Insert node into huge. */
node->addr = ret;
#ifdef MALLOC_DECOMMIT
psize = PAGE_CEILING(size);
node->size = psize;
#else
node->size = chunk_size;
@@ -4820,16 +4818,21 @@ huge_palloc(size_t alignment, size_t siz
else if (opt_zero)
# ifdef MALLOC_DECOMMIT
memset(ret, 0, psize);
# else
memset(ret, 0, chunk_size);
# endif
#endif
+RETURN:
+#ifdef MALLOC_PAGEFILE
+ if (pfd != -1)
+ pagefile_close(pfd);
+#endif
return (ret);
}
static void *
huge_ralloc(void *ptr, size_t size, size_t oldsize)
{
void *ret;
size_t copysize;
@@ -4930,22 +4933,20 @@ huge_dalloc(void *ptr)
#ifdef MALLOC_STATS
huge_ndalloc++;
huge_allocated -= node->size;
#endif
malloc_mutex_unlock(&huge_mtx);
/* Unmap chunk. */
-#ifdef MALLOC_DSS
#ifdef MALLOC_FILL
- if (opt_dss && opt_junk)
+ if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
-#endif
#ifdef MALLOC_DECOMMIT
chunk_dealloc(node->addr, CHUNK_CEILING(node->size));
#else
chunk_dealloc(node->addr, node->size);
#endif
VALGRIND_FREELIKE_BLOCK(node->addr, 0);
base_node_dealloc(node);
@@ -5069,24 +5070,21 @@ malloc_print_stats(void)
#ifdef NDEBUG
"disabled",
#else
"enabled",
#endif
"\n", "");
_malloc_message("Boolean MALLOC_OPTIONS: ",
opt_abort ? "A" : "a", "", "");
-#ifdef MALLOC_DSS
- _malloc_message(opt_dss ? "D" : "d", "", "", "");
-#endif
#ifdef MALLOC_FILL
_malloc_message(opt_junk ? "J" : "j", "", "", "");
#endif
-#ifdef MALLOC_DSS
- _malloc_message(opt_mmap ? "M" : "m", "", "", "");
+#ifdef MALLOC_PAGEFILE
+ _malloc_message(opt_pagefile ? "o" : "O", "", "", "");
#endif
_malloc_message("P", "", "", "");
#ifdef MALLOC_UTRACE
_malloc_message(opt_utrace ? "U" : "u", "", "", "");
#endif
#ifdef MALLOC_SYSV
_malloc_message(opt_sysv ? "V" : "v", "", "", "");
#endif
@@ -5154,16 +5152,32 @@ malloc_print_stats(void)
#ifdef MOZ_MEMORY_WINDOWS
malloc_printf("Allocated: %lu, mapped: %lu\n",
allocated, mapped);
#else
malloc_printf("Allocated: %zu, mapped: %zu\n",
allocated, mapped);
#endif
+ malloc_mutex_lock(&reserve_mtx);
+ malloc_printf("Reserve: min "
+ "cur max\n");
+#ifdef MOZ_MEMORY_WINDOWS
+ malloc_printf(" %12lu %12lu %12lu\n",
+ CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
+ reserve_cur >> opt_chunk_2pow,
+ reserve_max >> opt_chunk_2pow);
+#else
+ malloc_printf(" %12zu %12zu %12zu\n",
+ CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
+ reserve_cur >> opt_chunk_2pow,
+ reserve_max >> opt_chunk_2pow);
+#endif
+ malloc_mutex_unlock(&reserve_mtx);
+
#ifdef MALLOC_BALANCE
malloc_printf("Arena balance reassignments: %llu\n",
nbalance);
#endif
/* Print chunk stats. */
{
chunk_stats_t chunks_stats;
@@ -5284,16 +5298,47 @@ malloc_init_hard(void)
/*
* We assume that pagesize is a power of 2 when calculating
* pagesize_mask and pagesize_2pow.
*/
assert(((result - 1) & result) == 0);
pagesize_mask = result - 1;
pagesize_2pow = ffs((int)result) - 1;
+#ifdef MALLOC_PAGEFILE
+ /*
+ * Determine where to create page files. It is insufficient to
+ * unconditionally use P_tmpdir (typically "/tmp"), since for some
+ * operating systems /tmp is a separate filesystem that is rather small.
+ * Therefore prefer, in order, the following locations:
+ *
+ * 1) MALLOC_TMPDIR
+ * 2) TMPDIR
+ * 3) P_tmpdir
+ */
+ {
+ char *s;
+ size_t slen;
+ static const char suffix[] = "/jemalloc.XXXXXX";
+
+ if ((s = getenv("MALLOC_TMPDIR")) == NULL && (s =
+ getenv("TMPDIR")) == NULL)
+ s = P_tmpdir;
+ slen = strlen(s);
+ if (slen + sizeof(suffix) > sizeof(pagefile_templ)) {
+ _malloc_message(_getprogname(),
+ ": (malloc) Page file path too long\n",
+ "", "");
+ abort();
+ }
+ memcpy(pagefile_templ, s, slen);
+ memcpy(&pagefile_templ[slen], suffix, sizeof(suffix));
+ }
+#endif
+
for (i = 0; i < 3; i++) {
unsigned j;
/* Get runtime configuration. */
switch (i) {
case 0:
#ifndef MOZ_MEMORY_WINDOWS
if ((linklen = readlink("/etc/malloc.conf", buf,
@@ -5384,35 +5429,31 @@ MALLOC_OUT:
#ifdef MALLOC_BALANCE
if (opt_balance_threshold == 0)
opt_balance_threshold = 1;
else if ((opt_balance_threshold << 1)
> opt_balance_threshold)
opt_balance_threshold <<= 1;
#endif
break;
- case 'd':
-#ifdef MALLOC_DSS
- opt_dss = false;
-#endif
- break;
- case 'D':
-#ifdef MALLOC_DSS
- opt_dss = true;
-#endif
- break;
case 'f':
opt_dirty_max >>= 1;
break;
case 'F':
if (opt_dirty_max == 0)
opt_dirty_max = 1;
else if ((opt_dirty_max << 1) != 0)
opt_dirty_max <<= 1;
break;
+ case 'g':
+ opt_reserve_range_lshift--;
+ break;
+ case 'G':
+ opt_reserve_range_lshift++;
+ break;
#ifdef MALLOC_FILL
case 'j':
opt_junk = false;
break;
case 'J':
opt_junk = true;
break;
#endif
@@ -5425,47 +5466,53 @@ MALLOC_OUT:
if (opt_chunk_2pow > pagesize_2pow + 1)
opt_chunk_2pow--;
break;
case 'K':
if (opt_chunk_2pow + 1 <
(sizeof(size_t) << 3))
opt_chunk_2pow++;
break;
- case 'm':
-#ifdef MALLOC_DSS
- opt_mmap = false;
-#endif
- break;
- case 'M':
-#ifdef MALLOC_DSS
- opt_mmap = true;
-#endif
- break;
case 'n':
opt_narenas_lshift--;
break;
case 'N':
opt_narenas_lshift++;
break;
+#ifdef MALLOC_PAGEFILE
+ case 'o':
+ /* Do not over-commit. */
+ opt_pagefile = true;
+ break;
+ case 'O':
+ /* Allow over-commit. */
+ opt_pagefile = false;
+ break;
+#endif
case 'p':
opt_print_stats = false;
break;
case 'P':
opt_print_stats = true;
break;
case 'q':
if (opt_quantum_2pow > QUANTUM_2POW_MIN)
opt_quantum_2pow--;
break;
case 'Q':
if (opt_quantum_2pow < pagesize_2pow -
1)
opt_quantum_2pow++;
break;
+ case 'r':
+ opt_reserve_min_lshift--;
+ break;
+ case 'R':
+ opt_reserve_min_lshift++;
+ break;
case 's':
if (opt_small_max_2pow >
QUANTUM_2POW_MIN)
opt_small_max_2pow--;
break;
case 'S':
if (opt_small_max_2pow < pagesize_2pow
- 1)
@@ -5513,22 +5560,16 @@ MALLOC_OUT:
"in malloc options: '", cbuf,
"'\n");
}
}
}
}
}
-#ifdef MALLOC_DSS
- /* Make sure that there is some method for acquiring memory. */
- if (opt_dss == false && opt_mmap == false)
- opt_mmap = true;
-#endif
-
/* Take care to call atexit() only once. */
if (opt_print_stats) {
#ifndef MOZ_MEMORY_WINDOWS
/* Print statistics at exit. */
atexit(malloc_print_stats);
#endif
}
@@ -5587,44 +5628,28 @@ MALLOC_OUT:
assert(quantum >= sizeof(void *));
assert(quantum <= pagesize);
assert(chunksize >= pagesize);
assert(quantum * 4 <= chunksize);
/* Initialize chunks data. */
malloc_mutex_init(&huge_mtx);
extent_tree_ad_new(&huge);
-#ifdef MALLOC_DSS
- malloc_mutex_init(&dss_mtx);
- dss_base = sbrk(0);
- dss_prev = dss_base;
- dss_max = dss_base;
- extent_tree_szad_new(&dss_chunks_szad);
- extent_tree_ad_new(&dss_chunks_ad);
-#endif
#ifdef MALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
/* Initialize base allocation data structures. */
#ifdef MALLOC_STATS
base_mapped = 0;
#endif
-#ifdef MALLOC_DSS
- /*
- * Allocate a base chunk here, since it doesn't actually have to be
- * chunk-aligned. Doing this before allocating any other chunks allows
- * the use of space that would otherwise be wasted.
- */
- if (opt_dss)
- base_pages_alloc(0);
-#endif
base_nodes = NULL;
+ base_reserve_regs = NULL;
malloc_mutex_init(&base_mtx);
#ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
narenas = 1;
#else
if (ncpus > 1) {
/*
* For SMP systems, create four times as many arenas as there
@@ -5735,16 +5760,37 @@ MALLOC_OUT:
* called for other threads. The seed value doesn't really matter.
*/
#ifdef MALLOC_BALANCE
SPRN(balance, 42);
#endif
malloc_spin_init(&arenas_lock);
+ /*
+ * Configure and initialize the memory reserve. This needs to happen
+ * late during initialization, since chunks are allocated.
+ */
+ malloc_mutex_init(&reserve_mtx);
+ reserve_min = 0;
+ reserve_cur = 0;
+ reserve_max = chunksize * narenas;
+ if (RESERVE_RANGE_2POW_DEFAULT + opt_reserve_range_lshift >= 0) {
+ reserve_max += chunksize << (RESERVE_RANGE_2POW_DEFAULT +
+ opt_reserve_range_lshift);
+ }
+ ql_new(&reserve_regs);
+ reserve_seq = 0;
+ extent_tree_szad_new(&reserve_chunks_szad);
+ extent_tree_ad_new(&reserve_chunks_ad);
+ if (RESERVE_MIN_2POW_DEFAULT + opt_reserve_min_lshift >= 0) {
+ reserve_min_set(chunksize << (RESERVE_MIN_2POW_DEFAULT +
+ opt_reserve_min_lshift));
+ }
+
malloc_initialized = true;
#ifndef MOZ_MEMORY_WINDOWS
malloc_mutex_unlock(&init_lock);
#endif
return (false);
}
/* XXX Why not just expose malloc_print_stats()? */
@@ -6066,31 +6112,21 @@ jemalloc_stats(jemalloc_stats_t *stats)
size_t i;
assert(stats != NULL);
/*
* Gather runtime settings.
*/
stats->opt_abort = opt_abort;
- stats->opt_dss =
-#ifdef MALLOC_DSS
- opt_dss ? true :
-#endif
- false;
stats->opt_junk =
#ifdef MALLOC_FILL
opt_junk ? true :
#endif
false;
- stats->opt_mmap =
-#ifdef MALLOC_DSS
- opt_mmap == false ? false :
-#endif
- true;
stats->opt_utrace =
#ifdef MALLOC_UTRACE
opt_utrace ? true :
#endif
false;
stats->opt_sysv =
#ifdef MALLOC_SYSV
opt_sysv ? true :
@@ -6115,16 +6151,22 @@ jemalloc_stats(jemalloc_stats_t *stats)
#endif
;
stats->quantum = quantum;
stats->small_max = small_max;
stats->large_max = arena_maxclass;
stats->chunksize = chunksize;
stats->dirty_max = opt_dirty_max;
+ malloc_mutex_lock(&reserve_mtx);
+ stats->reserve_min = reserve_min;
+ stats->reserve_max = reserve_max;
+ stats->reserve_cur = reserve_cur;
+ malloc_mutex_unlock(&reserve_mtx);
+
/*
* Gather current memory usage statistics.
*/
stats->mapped = 0;
stats->committed = 0;
stats->allocated = 0;
stats->dirty = 0;
@@ -6172,16 +6214,457 @@ jemalloc_stats(jemalloc_stats_t *stats)
}
}
#ifndef MALLOC_DECOMMIT
stats->committed = stats->mapped;
#endif
}
+void *
+xmalloc(size_t size)
+{
+ void *ret;
+
+ if (malloc_init())
+ reserve_fail(size, "xmalloc");
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in xmalloc(): ",
+ "invalid size 0", "\n");
+ abort();
+ }
+#endif
+ }
+
+ ret = imalloc(size);
+ if (ret == NULL) {
+ uint64_t seq = 0;
+
+ do {
+ seq = reserve_crit(size, "xmalloc", seq);
+ ret = imalloc(size);
+ } while (ret == NULL);
+ }
+
+ UTRACE(0, size, ret);
+ return (ret);
+}
+
+void *
+xcalloc(size_t num, size_t size)
+{
+ void *ret;
+ size_t num_size;
+
+ num_size = num * size;
+ if (malloc_init())
+ reserve_fail(num_size, "xcalloc");
+
+ if (num_size == 0) {
+#ifdef MALLOC_SYSV
+ if ((opt_sysv == false) && ((num == 0) || (size == 0)))
+#endif
+ num_size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in xcalloc(): ",
+ "invalid size 0", "\n");
+ abort();
+ }
+#endif
+ /*
+ * Try to avoid division here. We know that it isn't possible to
+ * overflow during multiplication if neither operand uses any of the
+ * most significant half of the bits in a size_t.
+ */
+ } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
+ && (num_size / size != num)) {
+ /* size_t overflow. */
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in xcalloc(): ",
+ "size overflow", "\n");
+ abort();
+ }
+
+ ret = icalloc(num_size);
+ if (ret == NULL) {
+ uint64_t seq = 0;
+
+ do {
+ seq = reserve_crit(num_size, "xcalloc", seq);
+ ret = icalloc(num_size);
+ } while (ret == NULL);
+ }
+
+ UTRACE(0, num_size, ret);
+ return (ret);
+}
+
+void *
+xrealloc(void *ptr, size_t size)
+{
+ void *ret;
+
+ if (size == 0) {
+#ifdef MALLOC_SYSV
+ if (opt_sysv == false)
+#endif
+ size = 1;
+#ifdef MALLOC_SYSV
+ else {
+ if (ptr != NULL)
+ idalloc(ptr);
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in xrealloc(): ",
+ "invalid size 0", "\n");
+ abort();
+ }
+#endif
+ }
+
+ if (ptr != NULL) {
+ assert(malloc_initialized);
+
+ ret = iralloc(ptr, size);
+ if (ret == NULL) {
+ uint64_t seq = 0;
+
+ do {
+ seq = reserve_crit(size, "xrealloc", seq);
+ ret = iralloc(ptr, size);
+ } while (ret == NULL);
+ }
+ } else {
+ if (malloc_init())
+ reserve_fail(size, "xrealloc");
+
+ ret = imalloc(size);
+ if (ret == NULL) {
+ uint64_t seq = 0;
+
+ do {
+ seq = reserve_crit(size, "xrealloc", seq);
+ ret = imalloc(size);
+ } while (ret == NULL);
+ }
+ }
+
+ UTRACE(ptr, size, ret);
+ return (ret);
+}
+
+void *
+xmemalign(size_t alignment, size_t size)
+{
+ void *ret;
+
+ assert(((alignment - 1) & alignment) == 0 && alignment >=
+ sizeof(void *));
+
+ if (malloc_init())
+ reserve_fail(size, "xmemalign");
+
+ ret = ipalloc(alignment, size);
+ if (ret == NULL) {
+ uint64_t seq = 0;
+
+ do {
+ seq = reserve_crit(size, "xmemalign", seq);
+ ret = ipalloc(alignment, size);
+ } while (ret == NULL);
+ }
+
+ UTRACE(0, size, ret);
+ return (ret);
+}
+
+static void
+reserve_shrink(void)
+{
+ extent_node_t *node;
+
+ assert(reserve_cur > reserve_max);
+#ifdef MALLOC_DEBUG
+ {
+ extent_node_t *node;
+ size_t reserve_size;
+
+ reserve_size = 0;
+ rb_foreach_begin(extent_node_t, link_szad, &reserve_chunks_szad,
+ node) {
+ reserve_size += node->size;
+ } rb_foreach_end(extent_node_t, link_szad, &reserve_chunks_szad,
+ node)
+ assert(reserve_size == reserve_cur);
+
+ reserve_size = 0;
+ rb_foreach_begin(extent_node_t, link_ad, &reserve_chunks_ad,
+ node) {
+ reserve_size += node->size;
+ } rb_foreach_end(extent_node_t, link_ad, &reserve_chunks_ad,
+ node)
+ assert(reserve_size == reserve_cur);
+ }
+#endif
+
+ /* Discard chunks until the the reserve is below the size limit. */
+ rb_foreach_reverse_begin(extent_node_t, link_ad, &reserve_chunks_ad,
+ node) {
+#ifndef MALLOC_DECOMMIT
+ if (node->size <= reserve_cur - reserve_max) {
+#endif
+ extent_node_t *tnode = extent_tree_ad_prev(
+ &reserve_chunks_ad, node);
+
+#ifdef MALLOC_DECOMMIT
+ assert(node->size <= reserve_cur - reserve_max);
+#endif
+
+ /* Discard the entire [multi-]chunk. */
+ extent_tree_szad_remove(&reserve_chunks_szad, node);
+ extent_tree_ad_remove(&reserve_chunks_ad, node);
+ reserve_cur -= node->size;
+ pages_unmap(node->addr, node->size);
+ base_node_dealloc(node);
+ if (reserve_cur == reserve_max)
+ break;
+
+ rb_foreach_reverse_prev(extent_node_t, link_ad,
+ extent_ad_comp, &reserve_chunks_ad, tnode);
+#ifndef MALLOC_DECOMMIT
+ } else {
+ /* Discard the end of the multi-chunk. */
+ extent_tree_szad_remove(&reserve_chunks_szad, node);
+ node->size -= reserve_cur - reserve_max;
+ extent_tree_szad_insert(&reserve_chunks_szad, node);
+ pages_unmap((void *)((uintptr_t)node->addr +
+ node->size), reserve_cur - reserve_max);
+ reserve_cur = reserve_max;
+ break;
+ }
+#endif
+ assert(reserve_cur > reserve_max);
+ } rb_foreach_reverse_end(extent_node_t, link_ad, &reserve_chunks_ad,
+ node)
+}
+
+/* Send a condition notification. */
+static uint64_t
+reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq)
+{
+ reserve_reg_t *reg;
+
+ /* seq is used to keep track of distinct condition-causing events. */
+ if (seq == 0) {
+ /* Allocate new sequence number. */
+ reserve_seq++;
+ seq = reserve_seq;
+ }
+
+ /*
+ * Advance to the next callback registration and send a notification,
+ * unless one has already been sent for this condition-causing event.
+ */
+ reg = ql_first(&reserve_regs);
+ if (reg == NULL)
+ return (0);
+ ql_first(&reserve_regs) = ql_next(&reserve_regs, reg, link);
+ if (reg->seq == seq)
+ return (0);
+ reg->seq = seq;
+ malloc_mutex_unlock(&reserve_mtx);
+ reg->cb(reg->ctx, cnd, size);
+ malloc_mutex_lock(&reserve_mtx);
+
+ return (seq);
+}
+
+/* Allocation failure due to OOM. Try to free some memory via callbacks. */
+static uint64_t
+reserve_crit(size_t size, const char *fname, uint64_t seq)
+{
+
+ /*
+ * Send one condition notification. Iteration is handled by the
+ * caller of this function.
+ */
+ malloc_mutex_lock(&reserve_mtx);
+ seq = reserve_notify(RESERVE_CND_CRIT, size, seq);
+ malloc_mutex_unlock(&reserve_mtx);
+
+ /* If no notification could be sent, then no further recourse exists. */
+ if (seq == 0)
+ reserve_fail(size, fname);
+
+ return (seq);
+}
+
+/* Permanent allocation failure due to OOM. */
+static void
+reserve_fail(size_t size, const char *fname)
+{
+ uint64_t seq = 0;
+
+ /* Send fail notifications. */
+ malloc_mutex_lock(&reserve_mtx);
+ do {
+ seq = reserve_notify(RESERVE_CND_FAIL, size, seq);
+ } while (seq != 0);
+ malloc_mutex_unlock(&reserve_mtx);
+
+ /* Terminate the application. */
+ _malloc_message(_getprogname(),
+ ": (malloc) Error in ", fname, "(): out of memory\n");
+ abort();
+}
+
+bool
+reserve_cb_register(reserve_cb_t *cb, void *ctx)
+{
+ reserve_reg_t *reg = base_reserve_reg_alloc();
+ if (reg == NULL)
+ return (true);
+
+ ql_elm_new(reg, link);
+ reg->cb = cb;
+ reg->ctx = ctx;
+ reg->seq = 0;
+
+ malloc_mutex_lock(&reserve_mtx);
+ ql_head_insert(&reserve_regs, reg, link);
+ malloc_mutex_unlock(&reserve_mtx);
+
+ return (false);
+}
+
+bool
+reserve_cb_unregister(reserve_cb_t *cb, void *ctx)
+{
+ reserve_reg_t *reg = NULL;
+
+ malloc_mutex_lock(&reserve_mtx);
+ ql_foreach(reg, &reserve_regs, link) {
+ if (reg->cb == cb && reg->ctx == ctx) {
+ ql_remove(&reserve_regs, reg, link);
+ break;
+ }
+ }
+ malloc_mutex_unlock(&reserve_mtx);
+
+ if (reg != NULL)
+ base_reserve_reg_dealloc(reg);
+ return (false);
+ return (true);
+}
+
+size_t
+reserve_cur_get(void)
+{
+ size_t ret;
+
+ malloc_mutex_lock(&reserve_mtx);
+ ret = reserve_cur;
+ malloc_mutex_unlock(&reserve_mtx);
+
+ return (ret);
+}
+
+size_t
+reserve_min_get(void)
+{
+ size_t ret;
+
+ malloc_mutex_lock(&reserve_mtx);
+ ret = reserve_min;
+ malloc_mutex_unlock(&reserve_mtx);
+
+ return (ret);
+}
+
+bool
+reserve_min_set(size_t min)
+{
+
+ min = CHUNK_CEILING(min);
+
+ malloc_mutex_lock(&reserve_mtx);
+ /* Keep |reserve_max - reserve_min| the same. */
+ if (min < reserve_min) {
+ reserve_max -= reserve_min - min;
+ reserve_min = min;
+ } else {
+ /* Protect against wrap-around. */
+ if (reserve_max + min - reserve_min < reserve_max) {
+ reserve_min = SIZE_T_MAX - (reserve_max - reserve_min)
+ - chunksize + 1;
+ reserve_max = SIZE_T_MAX - chunksize + 1;
+ } else {
+ reserve_max += min - reserve_min;
+ reserve_min = min;
+ }
+ }
+
+ /* Resize the reserve if necessary. */
+ if (reserve_cur < reserve_min) {
+ size_t size = reserve_min - reserve_cur;
+
+ /* Force the reserve to grow by allocating/deallocating. */
+ malloc_mutex_unlock(&reserve_mtx);
+#ifdef MALLOC_DECOMMIT
+ {
+ void **chunks;
+ size_t i, n;
+
+ n = size >> opt_chunk_2pow;
+ chunks = imalloc(n * sizeof(void *));
+ if (chunks == NULL)
+ return (true);
+ for (i = 0; i < n; i++) {
+ chunks[i] = huge_malloc(chunksize, false);
+ if (chunks[i] == NULL) {
+ size_t j;
+
+ for (j = 0; j < i; j++) {
+ huge_dalloc(chunks[j]);
+ }
+ idalloc(chunks);
+ return (true);
+ }
+ }
+ for (i = 0; i < n; i++)
+ huge_dalloc(chunks[i]);
+ idalloc(chunks);
+ }
+#else
+ {
+ void *x = huge_malloc(size, false);
+ if (x == NULL) {
+ return (true);
+ }
+ huge_dalloc(x);
+ }
+#endif
+ } else if (reserve_cur > reserve_max) {
+ reserve_shrink();
+ malloc_mutex_unlock(&reserve_mtx);
+ } else
+ malloc_mutex_unlock(&reserve_mtx);
+
+ return (false);
+}
+
#ifdef MOZ_MEMORY_WINDOWS
void*
_recalloc(void *ptr, size_t count, size_t size)
{
size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
size_t newsize = count * size;
/*
@@ -6217,17 +6700,16 @@ void*
size_t
_msize(const void *ptr)
{
return malloc_usable_size(ptr);
}
#endif
-
/*
* End non-standard functions.
*/
/******************************************************************************/
/*
* Begin library-private functions, used by threading libraries for protection
* of malloc during fork(). These functions are only called if the program is
* running in threaded mode, so there is no need to check whether the program
@@ -6246,33 +6728,25 @@ void
if (arenas[i] != NULL)
malloc_spin_lock(&arenas[i]->lock);
}
malloc_spin_unlock(&arenas_lock);
malloc_mutex_lock(&base_mtx);
malloc_mutex_lock(&huge_mtx);
-
-#ifdef MALLOC_DSS
- malloc_mutex_lock(&dss_mtx);
-#endif
}
void
_malloc_postfork(void)
{
unsigned i;
/* Release all mutexes, now that fork() has completed. */
-#ifdef MALLOC_DSS
- malloc_mutex_unlock(&dss_mtx);
-#endif
-
malloc_mutex_unlock(&huge_mtx);
malloc_mutex_unlock(&base_mtx);
malloc_spin_lock(&arenas_lock);
for (i = 0; i < narenas; i++) {
if (arenas[i] != NULL)
malloc_spin_unlock(&arenas[i]->lock);