Bug 515211: remove memory reserve feature from jemalloc, r=jasone
☠☠ backed out by fdc781e5774d ☠ ☠
authorDavid Mandelin <dmandelin@mozilla.com>
Fri, 30 Oct 2009 11:02:38 -0700
changeset 34411 109b74e8e90201e3061449731e10fd502fb32a27
parent 34410 d3dc1d0d635959d5271ca2c5e7a5a1280a9949b9
child 34412 b64cff03aa0a0790aaeb200de6e6e568360877e8
child 34413 fdc781e5774d92e5cd542b878ef2d33eceb00f4f
push id1
push userroot
push dateTue, 26 Apr 2011 22:38:44 +0000
treeherdermozilla-beta@bfdb6e623a36 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjasone
bugs515211
milestone1.9.3a1pre
Bug 515211: remove memory reserve feature from jemalloc, r=jasone
build/wince/shunt/mozce_shunt.def.in
memory/jemalloc/Makefile.in
memory/jemalloc/jemalloc.c
memory/jemalloc/jemalloc.h
--- a/build/wince/shunt/mozce_shunt.def.in
+++ b/build/wince/shunt/mozce_shunt.def.in
@@ -42,22 +42,13 @@ malloc
 valloc
 calloc
 realloc
 free
 posix_memalign
 memalign
 malloc_usable_size
 jemalloc_stats
-xmalloc
-xcalloc
-xrealloc
-xmemalign
-reserve_cb_register
-reserve_cb_unregister
-reserve_cur_get
-reserve_min_get
-reserve_min_set
 _strdup
 _wcsdup
 _strndup
 _wcsndup
 #endif
--- a/memory/jemalloc/Makefile.in
+++ b/memory/jemalloc/Makefile.in
@@ -87,19 +87,19 @@ endif
 	$(foreach i,dll mt xdll xmt,$(EXTRACT_CMD))
 # truly awful
 #XXX: get ed into mozillabuild, bug 415123
 	$(PERL) $(srcdir)/apply-ed-patches.pl $(srcdir)/$(CRTDIFF) \
 	$(CRT_OBJ_DIR) $(srcdir)/ed.exe
 
 $(MOZ_CRT_DLL): \
   $(CRT_OBJ_DIR)/jemalloc.c $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h \
-  $(srcdir)/ql.h $(srcdir)/qr.h $(srcdir)/rb.h
-	cp $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h $(srcdir)/ql.h \
-	$(srcdir)/qr.h $(srcdir)/rb.h $(CRT_OBJ_DIR)
+  $(srcdir)/rb.h
+	cp $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h $(srcdir)/rb.h \
+	$(CRT_OBJ_DIR)
 # this pretty much sucks, but nmake and make don't play well together
 	$(PYTHON) $(srcdir)/build-crt.py $(CRT_OBJ_DIR)
 # XXX: these don't link right for some reason; the problem is likely
 # that  not all the standard symbols are exported; looks like MSFT
 # never updated the sample.def files; could probably fix if someone
 # were ever bored enough. :-)
 	rm -f $(addsuffix .lib, $(addprefix $(CRT_OBJ_DIR)/build/intel/, $(MOZ_CRT_STATIC_LIBS)))
 	rm -f $(addsuffix .pdb, $(addprefix $(CRT_OBJ_DIR)/build/intel/, $(MOZ_CRT_STATIC_LIBS)))
--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -364,34 +364,34 @@ typedef long ssize_t;
 #include "un-namespace.h"
 #endif
 
 #endif
 
 #include "jemalloc.h"
 
 #undef bool
-#define bool jemalloc_bool
+#ifdef _MSC_VER
+#define bool BOOL
+#endif
 
 #ifdef MOZ_MEMORY_DARWIN
 static const bool __isthreaded = true;
 #endif
 
 #if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
 #define JEMALLOC_USES_MAP_ALIGN	 /* Required on Solaris 10. Might improve performance elsewhere. */
 #endif
 
 #if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
 #define JEMALLOC_USES_MAP_ALIGN	 /* Required for Windows CE < 6 */
 #endif
 
 #define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
 
-#include "qr.h"
-#include "ql.h"
 #ifdef MOZ_MEMORY_WINDOWS
    /* MSVC++ does not support C99 variable-length arrays. */
 #  define RB_NO_C99_VARARRAYS
 #endif
 #include "rb.h"
 
 #ifdef MALLOC_DEBUG
    /* Disable inlining to make debugging easier. */
@@ -485,28 +485,16 @@ static const bool __isthreaded = true;
 #if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6)
 #define	CHUNK_2POW_DEFAULT	21
 #else
 #define	CHUNK_2POW_DEFAULT	20
 #endif
 /* Maximum number of dirty pages per arena. */
 #define	DIRTY_MAX_DEFAULT	(1U << 10)
 
-/* Default reserve chunks. */
-#define	RESERVE_MIN_2POW_DEFAULT	1
-/*
- * Default range (in chunks) between reserve_min and reserve_max, in addition
- * to the mandatory one chunk per arena.
- */
-#ifdef MALLOC_PAGEFILE
-#  define RESERVE_RANGE_2POW_DEFAULT	5
-#else
-#  define RESERVE_RANGE_2POW_DEFAULT	0
-#endif
-
 /*
  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
  * so over-estimates are okay (up to a point), but under-estimates will
  * negatively affect performance.
  */
 #define	CACHELINE_2POW		6
 #define	CACHELINE		((size_t)(1U << CACHELINE_2POW))
 
@@ -761,40 +749,16 @@ struct malloc_rtree_s {
 	void			**root;
 	unsigned		height;
 	unsigned		level2bits[1]; /* Dynamically sized. */
 };
 #endif
 
 /******************************************************************************/
 /*
- * Reserve data structures.
- */
-
-/* Callback registration. */
-typedef struct reserve_reg_s reserve_reg_t;
-struct reserve_reg_s {
-	/* Linkage for list of all registered callbacks. */
-	ql_elm(reserve_reg_t)	link;
-
-	/* Callback function pointer. */
-	reserve_cb_t		*cb;
-
-	/* Opaque application data pointer. */
-	void			*ctx;
-
-	/*
-	 * Sequence number of condition notification most recently sent to this
-	 * callback.
-	 */
-	uint64_t		seq;
-};
-
-/******************************************************************************/
-/*
  * Arena data structures.
  */
 
 typedef struct arena_s arena_t;
 typedef struct arena_bin_s arena_bin_t;
 
 /* Each element of the chunk map corresponds to one page within the chunk. */
 typedef struct arena_chunk_map_s arena_chunk_map_t;
@@ -948,22 +912,16 @@ struct arena_s {
 #else
 	pthread_mutex_t		lock;
 #endif
 
 #ifdef MALLOC_STATS
 	arena_stats_t		stats;
 #endif
 
-	/*
-	 * Chunk allocation sequence number, used to detect races with other
-	 * threads during chunk allocation, and then discard unnecessary chunks.
-	 */
-	uint64_t		chunk_seq;
-
 	/* Tree of dirty-page-containing chunks this arena manages. */
 	arena_chunk_tree_t	chunks_dirty;
 
 	/*
 	 * In order to avoid rapid chunk allocation/deallocation when an arena
 	 * oscillates right on the cusp of needing a new chunk, cache the most
 	 * recently freed chunk.  The spare is left in the arena's chunk trees
 	 * until it is deleted.
@@ -1071,53 +1029,20 @@ static extent_tree_t	huge;
 
 #ifdef MALLOC_STATS
 /* Huge allocation statistics. */
 static uint64_t		huge_nmalloc;
 static uint64_t		huge_ndalloc;
 static size_t		huge_allocated;
 #endif
 
-/****************/
-/*
- * Memory reserve.
- */
-
 #ifdef MALLOC_PAGEFILE
 static char		pagefile_templ[PATH_MAX];
 #endif
 
-/* Protects reserve-related data structures. */
-static malloc_mutex_t	reserve_mtx;
-
-/*
- * Bounds on acceptable reserve size, and current reserve size.  Reserve
- * depletion may cause (reserve_cur < reserve_min).
- */
-static size_t		reserve_min;
-static size_t		reserve_cur;
-static size_t		reserve_max;
-
-/* List of registered callbacks. */
-static ql_head(reserve_reg_t) reserve_regs;
-
-/*
- * Condition notification sequence number, used to determine whether all
- * registered callbacks have been notified of the most current condition.
- */
-static uint64_t		reserve_seq;
-
-/*
- * Trees of chunks currently in the memory reserve.  Depending on function,
- * different tree orderings are needed, which is why there are two trees with
- * the same contents.
- */
-static extent_tree_t	reserve_chunks_szad;
-static extent_tree_t	reserve_chunks_ad;
-
 /****************************/
 /*
  * base (internal allocation).
  */
 
 /*
  * Current pages that are being used for internal memory allocations.  These
  * pages are carved up in cacheline-size quanta, so that there is no chance of
@@ -1125,34 +1050,32 @@ static extent_tree_t	reserve_chunks_ad;
  */
 static void		*base_pages;
 static void		*base_next_addr;
 #ifdef MALLOC_DECOMMIT
 static void		*base_next_decommitted;
 #endif
 static void		*base_past_addr; /* Addr immediately past base_pages. */
 static extent_node_t	*base_nodes;
-static reserve_reg_t	*base_reserve_regs;
 static malloc_mutex_t	base_mtx;
 #ifdef MALLOC_STATS
 static size_t		base_mapped;
 #endif
 
 /********/
 /*
  * Arenas.
  */
 
 /*
  * Arenas that are used to service external requests.  Not all elements of the
  * arenas array are necessarily used; arenas are created lazily as needed.
  */
 static arena_t		**arenas;
 static unsigned		narenas;
-static unsigned		narenas_2pow;
 #ifndef NO_TLS
 #  ifdef MALLOC_BALANCE
 static unsigned		narenas_2pow;
 #  else
 static unsigned		next_arena;
 #  endif
 #endif
 #ifdef MOZ_MEMORY
@@ -1196,18 +1119,16 @@ static bool	opt_junk = false;
 static size_t	opt_dirty_max = DIRTY_MAX_DEFAULT;
 #ifdef MALLOC_BALANCE
 static uint64_t	opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
 #endif
 static bool	opt_print_stats = false;
 static size_t	opt_quantum_2pow = QUANTUM_2POW_MIN;
 static size_t	opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
 static size_t	opt_chunk_2pow = CHUNK_2POW_DEFAULT;
-static int	opt_reserve_min_lshift = 0;
-static int	opt_reserve_range_lshift = 0;
 #ifdef MALLOC_PAGEFILE
 static bool	opt_pagefile = false;
 #endif
 #ifdef MALLOC_UTRACE
 static bool	opt_utrace = false;
 #endif
 #ifdef MALLOC_SYSV
 static bool	opt_sysv = false;
@@ -1257,31 +1178,27 @@ static void	wrtmessage(const char *p1, c
 static void	malloc_printf(const char *format, ...);
 #endif
 static bool	base_pages_alloc_mmap(size_t minsize);
 static bool	base_pages_alloc(size_t minsize);
 static void	*base_alloc(size_t size);
 static void	*base_calloc(size_t number, size_t size);
 static extent_node_t *base_node_alloc(void);
 static void	base_node_dealloc(extent_node_t *node);
-static reserve_reg_t *base_reserve_reg_alloc(void);
-static void	base_reserve_reg_dealloc(reserve_reg_t *reg);
 #ifdef MALLOC_STATS
 static void	stats_print(arena_t *arena);
 #endif
 static void	*pages_map(void *addr, size_t size, int pfd);
 static void	pages_unmap(void *addr, size_t size);
 static void	*chunk_alloc_mmap(size_t size, bool pagefile);
 #ifdef MALLOC_PAGEFILE
 static int	pagefile_init(size_t size);
 static void	pagefile_close(int pfd);
 #endif
-static void	*chunk_recycle_reserve(size_t size, bool zero);
 static void	*chunk_alloc(size_t size, bool zero, bool pagefile);
-static extent_node_t *chunk_dealloc_reserve(void *chunk, size_t size);
 static void	chunk_dealloc_mmap(void *chunk, size_t size);
 static void	chunk_dealloc(void *chunk, size_t size);
 #ifndef NO_TLS
 static arena_t	*choose_arena_hard(void);
 #endif
 static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
     bool large, bool zero);
 static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
@@ -1318,20 +1235,16 @@ static void	*huge_malloc(size_t size, bo
 static void	*huge_palloc(size_t alignment, size_t size);
 static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
 static void	huge_dalloc(void *ptr);
 static void	malloc_print_stats(void);
 #ifndef MOZ_MEMORY_WINDOWS
 static
 #endif
 bool		malloc_init_hard(void);
-static void	reserve_shrink(void);
-static uint64_t	reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq);
-static uint64_t	reserve_crit(size_t size, const char *fname, uint64_t seq);
-static void	reserve_fail(size_t size, const char *fname);
 
 void		_malloc_prefork(void);
 void		_malloc_postfork(void);
 
 /*
  * End function prototypes.
  */
 /******************************************************************************/
@@ -1942,48 +1855,16 @@ base_node_dealloc(extent_node_t *node)
 	malloc_mutex_lock(&base_mtx);
 	VALGRIND_FREELIKE_BLOCK(node, 0);
 	VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false);
 	*(extent_node_t **)node = base_nodes;
 	base_nodes = node;
 	malloc_mutex_unlock(&base_mtx);
 }
 
-static reserve_reg_t *
-base_reserve_reg_alloc(void)
-{
-	reserve_reg_t *ret;
-
-	malloc_mutex_lock(&base_mtx);
-	if (base_reserve_regs != NULL) {
-		ret = base_reserve_regs;
-		base_reserve_regs = *(reserve_reg_t **)ret;
-		VALGRIND_FREELIKE_BLOCK(ret, 0);
-		VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(reserve_reg_t), 0, false);
-		malloc_mutex_unlock(&base_mtx);
-	} else {
-		malloc_mutex_unlock(&base_mtx);
-		ret = (reserve_reg_t *)base_alloc(sizeof(reserve_reg_t));
-	}
-
-	return (ret);
-}
-
-static void
-base_reserve_reg_dealloc(reserve_reg_t *reg)
-{
-
-	malloc_mutex_lock(&base_mtx);
-	VALGRIND_FREELIKE_BLOCK(reg, 0);
-	VALGRIND_MALLOCLIKE_BLOCK(reg, sizeof(reserve_reg_t *), 0, false);
-	*(reserve_reg_t **)reg = base_reserve_regs;
-	base_reserve_regs = reg;
-	malloc_mutex_unlock(&base_mtx);
-}
-
 /******************************************************************************/
 
 #ifdef MALLOC_STATS
 static void
 stats_print(arena_t *arena)
 {
 	unsigned i, gap_start;
 
@@ -2618,125 +2499,23 @@ pagefile_close(int pfd)
 		    ": (malloc) Error in close(): ", buf, "\n");
 		if (opt_abort)
 			abort();
 	}
 }
 #endif
 
 static void *
-chunk_recycle_reserve(size_t size, bool zero)
-{
-	extent_node_t *node, key;
-
-#ifdef MALLOC_DECOMMIT
-	if (size != chunksize)
-		return (NULL);
-#endif
-
-	key.addr = NULL;
-	key.size = size;
-	malloc_mutex_lock(&reserve_mtx);
-	node = extent_tree_szad_nsearch(&reserve_chunks_szad, &key);
-	if (node != NULL) {
-		void *ret = node->addr;
-
-		/* Remove node from the tree. */
-		extent_tree_szad_remove(&reserve_chunks_szad, node);
-#ifndef MALLOC_DECOMMIT
-		if (node->size == size) {
-#else
-			assert(node->size == size);
-#endif
-			extent_tree_ad_remove(&reserve_chunks_ad, node);
-			base_node_dealloc(node);
-#ifndef MALLOC_DECOMMIT
-		} else {
-			/*
-			 * Insert the remainder of node's address range as a
-			 * smaller chunk.  Its position within reserve_chunks_ad
-			 * does not change.
-			 */
-			assert(node->size > size);
-			node->addr = (void *)((uintptr_t)node->addr + size);
-			node->size -= size;
-			extent_tree_szad_insert(&reserve_chunks_szad, node);
-		}
-#endif
-		reserve_cur -= size;
-		/*
-		 * Try to replenish the reserve if this allocation depleted it.
-		 */
-#ifndef MALLOC_DECOMMIT
-		if (reserve_cur < reserve_min) {
-			size_t diff = reserve_min - reserve_cur;
-#else
-		while (reserve_cur < reserve_min) {
-#  define diff chunksize
-#endif
-			void *chunk;
-
-			malloc_mutex_unlock(&reserve_mtx);
-			chunk = chunk_alloc_mmap(diff, true);
-			malloc_mutex_lock(&reserve_mtx);
-			if (chunk == NULL) {
-				uint64_t seq = 0;
-
-				do {
-					seq = reserve_notify(RESERVE_CND_LOW,
-					    size, seq);
-					if (seq == 0)
-						goto MALLOC_OUT;
-				} while (reserve_cur < reserve_min);
-			} else {
-				extent_node_t *node;
-
-				node = chunk_dealloc_reserve(chunk, diff);
-				if (node == NULL) {
-					uint64_t seq = 0;
-
-					pages_unmap(chunk, diff);
-					do {
-						seq = reserve_notify(
-						    RESERVE_CND_LOW, size, seq);
-						if (seq == 0)
-							goto MALLOC_OUT;
-					} while (reserve_cur < reserve_min);
-				}
-			}
-		}
-MALLOC_OUT:
-		malloc_mutex_unlock(&reserve_mtx);
-
-#ifdef MALLOC_DECOMMIT
-		pages_commit(ret, size);
-#  undef diff
-#else
-		if (zero)
-			memset(ret, 0, size);
-#endif
-		return (ret);
-	}
-	malloc_mutex_unlock(&reserve_mtx);
-
-	return (NULL);
-}
-
-static void *
 chunk_alloc(size_t size, bool zero, bool pagefile)
 {
 	void *ret;
 
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
-	ret = chunk_recycle_reserve(size, zero);
-	if (ret != NULL)
-		goto RETURN;
-
 	ret = chunk_alloc_mmap(size, pagefile);
 	if (ret != NULL) {
 		goto RETURN;
 	}
 
 	/* All strategies for allocation failed. */
 	ret = NULL;
 RETURN:
@@ -2755,117 +2534,40 @@ RETURN:
 		}
 	}
 #endif
 
 	assert(CHUNK_ADDR2BASE(ret) == ret);
 	return (ret);
 }
 
-static extent_node_t *
-chunk_dealloc_reserve(void *chunk, size_t size)
-{
-	extent_node_t *node;
-
-#ifdef MALLOC_DECOMMIT
-	if (size != chunksize)
-		return (NULL);
-#else
-	extent_node_t *prev, key;
-
-	key.addr = (void *)((uintptr_t)chunk + size);
-	node = extent_tree_ad_nsearch(&reserve_chunks_ad, &key);
-	/* Try to coalesce forward. */
-	if (node != NULL && node->addr == key.addr) {
-		/*
-		 * Coalesce chunk with the following address range.  This does
-		 * not change the position within reserve_chunks_ad, so only
-		 * remove/insert from/into reserve_chunks_szad.
-		 */
-		extent_tree_szad_remove(&reserve_chunks_szad, node);
-		node->addr = chunk;
-		node->size += size;
-		extent_tree_szad_insert(&reserve_chunks_szad, node);
-	} else {
-#endif
-		/* Coalescing forward failed, so insert a new node. */
-		node = base_node_alloc();
-		if (node == NULL)
-			return (NULL);
-		node->addr = chunk;
-		node->size = size;
-		extent_tree_ad_insert(&reserve_chunks_ad, node);
-		extent_tree_szad_insert(&reserve_chunks_szad, node);
-#ifndef MALLOC_DECOMMIT
-	}
-
-	/* Try to coalesce backward. */
-	prev = extent_tree_ad_prev(&reserve_chunks_ad, node);
-	if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
-	    chunk) {
-		/*
-		 * Coalesce chunk with the previous address range.  This does
-		 * not change the position within reserve_chunks_ad, so only
-		 * remove/insert node from/into reserve_chunks_szad.
-		 */
-		extent_tree_szad_remove(&reserve_chunks_szad, prev);
-		extent_tree_ad_remove(&reserve_chunks_ad, prev);
-
-		extent_tree_szad_remove(&reserve_chunks_szad, node);
-		node->addr = prev->addr;
-		node->size += prev->size;
-		extent_tree_szad_insert(&reserve_chunks_szad, node);
-
-		base_node_dealloc(prev);
-	}
-#endif
-
-#ifdef MALLOC_DECOMMIT
-	pages_decommit(chunk, size);
-#else
-	madvise(chunk, size, MADV_FREE);
-#endif
-
-	reserve_cur += size;
-	if (reserve_cur > reserve_max)
-		reserve_shrink();
-
-	return (node);
-}
-
 static void
 chunk_dealloc_mmap(void *chunk, size_t size)
 {
 
 	pages_unmap(chunk, size);
 }
 
 static void
 chunk_dealloc(void *chunk, size_t size)
 {
-	extent_node_t *node;
 
 	assert(chunk != NULL);
 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
 #ifdef MALLOC_STATS
 	stats_chunks.curchunks -= (size / chunksize);
 #endif
 #ifdef MALLOC_VALIDATE
 	malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL);
 #endif
 
-	/* Try to merge chunk into the reserve. */
-	malloc_mutex_lock(&reserve_mtx);
-	node = chunk_dealloc_reserve(chunk, size);
-	malloc_mutex_unlock(&reserve_mtx);
-	if (node == NULL)
-		chunk_dealloc_mmap(chunk, size);
+	chunk_dealloc_mmap(chunk, size);
 }
 
 /*
  * End chunk management functions.
  */
 /******************************************************************************/
 /*
  * Begin arena.
@@ -3463,59 +3165,18 @@ arena_run_alloc(arena_t *arena, arena_bi
 			return (run);
 		}
 
 		/*
 		 * No usable runs.  Create a new chunk from which to allocate
 		 * the run.
 		 */
 		if (chunk == NULL) {
-			uint64_t chunk_seq;
-
-			/*
-			 * Record the chunk allocation sequence number in order
-			 * to detect races.
-			 */
-			arena->chunk_seq++;
-			chunk_seq = arena->chunk_seq;
-
-			/*
-			 * Drop the arena lock while allocating a chunk, since
-			 * reserve notifications may cause recursive
-			 * allocation.  Dropping the lock here opens an
-			 * allocataion race, but we recover.
-			 */
-			malloc_mutex_unlock(&arena->lock);
 			chunk = (arena_chunk_t *)chunk_alloc(chunksize, true,
 			    true);
-			malloc_mutex_lock(&arena->lock);
-
-			/*
-			 * Check whether a race allowed a usable run to appear.
-			 */
-			if (bin != NULL && (run = bin->runcur) != NULL &&
-			    run->nfree > 0) {
-				if (chunk != NULL)
-					chunk_dealloc(chunk, chunksize);
-				return (run);
-			}
-
-			/*
-			 * If this thread raced with another such that multiple
-			 * chunks were allocated, make sure that there is still
-			 * inadequate space before using this chunk.
-			 */
-			if (chunk_seq != arena->chunk_seq)
-				continue;
-
-			/*
-			 * Check for an error *after* checking for a race,
-			 * since a race could also cause a transient OOM
-			 * condition.
-			 */
 			if (chunk == NULL)
 				return (NULL);
 		}
 
 		arena_chunk_init(arena, chunk);
 		run = (arena_run_t *)((uintptr_t)chunk +
 		    (arena_chunk_header_npages << pagesize_2pow));
 		/* Update page map. */
@@ -4785,18 +4446,16 @@ arena_new(arena_t *arena)
 
 	if (malloc_spin_init(&arena->lock))
 		return (true);
 
 #ifdef MALLOC_STATS
 	memset(&arena->stats, 0, sizeof(arena_stats_t));
 #endif
 
-	arena->chunk_seq = 0;
-
 	/* Initialize chunks. */
 	arena_chunk_tree_dirty_new(&arena->chunks_dirty);
 	arena->spare = NULL;
 
 	arena->ndirty = 0;
 
 	arena_avail_tree_new(&arena->runs_avail);
 
@@ -5435,32 +5094,16 @@ malloc_print_stats(void)
 #ifdef MOZ_MEMORY_WINDOWS
 			malloc_printf("Allocated: %lu, mapped: %lu\n",
 			    allocated, mapped);
 #else
 			malloc_printf("Allocated: %zu, mapped: %zu\n",
 			    allocated, mapped);
 #endif
 
-			malloc_mutex_lock(&reserve_mtx);
-			malloc_printf("Reserve:    min          "
-			    "cur          max\n");
-#ifdef MOZ_MEMORY_WINDOWS
-			malloc_printf("   %12lu %12lu %12lu\n",
-			    CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
-			    reserve_cur >> opt_chunk_2pow,
-			    reserve_max >> opt_chunk_2pow);
-#else
-			malloc_printf("   %12zu %12zu %12zu\n",
-			    CHUNK_CEILING(reserve_min) >> opt_chunk_2pow,
-			    reserve_cur >> opt_chunk_2pow,
-			    reserve_max >> opt_chunk_2pow);
-#endif
-			malloc_mutex_unlock(&reserve_mtx);
-
 #ifdef MALLOC_BALANCE
 			malloc_printf("Arena balance reassignments: %llu\n",
 			    nbalance);
 #endif
 
 			/* Print chunk stats. */
 			{
 				chunk_stats_t chunks_stats;
@@ -5721,22 +5364,16 @@ MALLOC_OUT:
 					opt_dirty_max >>= 1;
 					break;
 				case 'F':
 					if (opt_dirty_max == 0)
 						opt_dirty_max = 1;
 					else if ((opt_dirty_max << 1) != 0)
 						opt_dirty_max <<= 1;
 					break;
-				case 'g':
-					opt_reserve_range_lshift--;
-					break;
-				case 'G':
-					opt_reserve_range_lshift++;
-					break;
 #ifdef MALLOC_FILL
 				case 'j':
 					opt_junk = false;
 					break;
 				case 'J':
 					opt_junk = true;
 					break;
 #endif
@@ -5780,22 +5417,16 @@ MALLOC_OUT:
 					if (opt_quantum_2pow > QUANTUM_2POW_MIN)
 						opt_quantum_2pow--;
 					break;
 				case 'Q':
 					if (opt_quantum_2pow < pagesize_2pow -
 					    1)
 						opt_quantum_2pow++;
 					break;
-				case 'r':
-					opt_reserve_min_lshift--;
-					break;
-				case 'R':
-					opt_reserve_min_lshift++;
-					break;
 				case 's':
 					if (opt_small_max_2pow >
 					    QUANTUM_2POW_MIN)
 						opt_small_max_2pow--;
 					break;
 				case 'S':
 					if (opt_small_max_2pow < pagesize_2pow
 					    - 1)
@@ -5935,17 +5566,16 @@ MALLOC_OUT:
 	huge_allocated = 0;
 #endif
 
 	/* Initialize base allocation data structures. */
 #ifdef MALLOC_STATS
 	base_mapped = 0;
 #endif
 	base_nodes = NULL;
-	base_reserve_regs = NULL;
 	malloc_mutex_init(&base_mtx);
 
 #ifdef MOZ_MEMORY_NARENAS_DEFAULT_ONE
 	narenas = 1;
 #else
 	if (ncpus > 1) {
 		/*
 		 * For SMP systems, create four times as many arenas as there
@@ -6062,37 +5692,16 @@ MALLOC_OUT:
 	malloc_spin_init(&arenas_lock);
 
 #ifdef MALLOC_VALIDATE
 	chunk_rtree = malloc_rtree_new((SIZEOF_PTR << 3) - opt_chunk_2pow);
 	if (chunk_rtree == NULL)
 		return (true);
 #endif
 
-	/*
-	 * Configure and initialize the memory reserve.  This needs to happen
-	 * late during initialization, since chunks are allocated.
-	 */
-	malloc_mutex_init(&reserve_mtx);
-	reserve_min = 0;
-	reserve_cur = 0;
-	reserve_max = 0;
-	if (RESERVE_RANGE_2POW_DEFAULT + opt_reserve_range_lshift >= 0) {
-		reserve_max += chunksize << (RESERVE_RANGE_2POW_DEFAULT +
-		    opt_reserve_range_lshift);
-	}
-	ql_new(&reserve_regs);
-	reserve_seq = 0;
-	extent_tree_szad_new(&reserve_chunks_szad);
-	extent_tree_ad_new(&reserve_chunks_ad);
-	if (RESERVE_MIN_2POW_DEFAULT + opt_reserve_min_lshift >= 0) {
-		reserve_min_set(chunksize << (RESERVE_MIN_2POW_DEFAULT +
-		    opt_reserve_min_lshift));
-	}
-
 	malloc_initialized = true;
 #ifndef MOZ_MEMORY_WINDOWS
 	malloc_mutex_unlock(&init_lock);
 #endif
 	return (false);
 }
 
 /* XXX Why not just expose malloc_print_stats()? */
@@ -6454,22 +6063,16 @@ jemalloc_stats(jemalloc_stats_t *stats)
 #endif
 	    ;
 	stats->quantum = quantum;
 	stats->small_max = small_max;
 	stats->large_max = arena_maxclass;
 	stats->chunksize = chunksize;
 	stats->dirty_max = opt_dirty_max;
 
-	malloc_mutex_lock(&reserve_mtx);
-	stats->reserve_min = reserve_min;
-	stats->reserve_max = reserve_max;
-	stats->reserve_cur = reserve_cur;
-	malloc_mutex_unlock(&reserve_mtx);
-
 	/*
 	 * Gather current memory usage statistics.
 	 */
 	stats->mapped = 0;
 	stats->committed = 0;
 	stats->allocated = 0;
 	stats->dirty = 0;
 
@@ -6517,464 +6120,16 @@ jemalloc_stats(jemalloc_stats_t *stats)
 		}
 	}
 
 #ifndef MALLOC_DECOMMIT
 	stats->committed = stats->mapped;
 #endif
 }
 
-void *
-xmalloc(size_t size)
-{
-	void *ret;
-
-	if (malloc_init())
-		reserve_fail(size, "xmalloc");
-
-	if (size == 0) {
-#ifdef MALLOC_SYSV
-		if (opt_sysv == false)
-#endif
-			size = 1;
-#ifdef MALLOC_SYSV
-		else {
-			_malloc_message(_getprogname(),
-			    ": (malloc) Error in xmalloc(): ",
-			    "invalid size 0", "\n");
-			abort();
-		}
-#endif
-	}
-
-	ret = imalloc(size);
-	if (ret == NULL) {
-		uint64_t seq = 0;
-
-		do {
-			seq = reserve_crit(size, "xmalloc", seq);
-			ret = imalloc(size);
-		} while (ret == NULL);
-	}
-
-	UTRACE(0, size, ret);
-	return (ret);
-}
-
-void *
-xcalloc(size_t num, size_t size)
-{
-	void *ret;
-	size_t num_size;
-
-	num_size = num * size;
-	if (malloc_init())
-		reserve_fail(num_size, "xcalloc");
-
-	if (num_size == 0) {
-#ifdef MALLOC_SYSV
-		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
-			num_size = 1;
-#ifdef MALLOC_SYSV
-		else {
-			_malloc_message(_getprogname(),
-			    ": (malloc) Error in xcalloc(): ",
-			    "invalid size 0", "\n");
-			abort();
-		}
-#endif
-	/*
-	 * Try to avoid division here.  We know that it isn't possible to
-	 * overflow during multiplication if neither operand uses any of the
-	 * most significant half of the bits in a size_t.
-	 */
-	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
-	    && (num_size / size != num)) {
-		/* size_t overflow. */
-		_malloc_message(_getprogname(),
-		    ": (malloc) Error in xcalloc(): ",
-		    "size overflow", "\n");
-		abort();
-	}
-
-	ret = icalloc(num_size);
-	if (ret == NULL) {
-		uint64_t seq = 0;
-
-		do {
-			seq = reserve_crit(num_size, "xcalloc", seq);
-			ret = icalloc(num_size);
-		} while (ret == NULL);
-	}
-
-	UTRACE(0, num_size, ret);
-	return (ret);
-}
-
-void *
-xrealloc(void *ptr, size_t size)
-{
-	void *ret;
-
-	if (size == 0) {
-#ifdef MALLOC_SYSV
-		if (opt_sysv == false)
-#endif
-			size = 1;
-#ifdef MALLOC_SYSV
-		else {
-			if (ptr != NULL)
-				idalloc(ptr);
-			_malloc_message(_getprogname(),
-			    ": (malloc) Error in xrealloc(): ",
-			    "invalid size 0", "\n");
-			abort();
-		}
-#endif
-	}
-
-	if (ptr != NULL) {
-		assert(malloc_initialized);
-
-		ret = iralloc(ptr, size);
-		if (ret == NULL) {
-			uint64_t seq = 0;
-
-			do {
-				seq = reserve_crit(size, "xrealloc", seq);
-				ret = iralloc(ptr, size);
-			} while (ret == NULL);
-		}
-	} else {
-		if (malloc_init())
-			reserve_fail(size, "xrealloc");
-
-		ret = imalloc(size);
-		if (ret == NULL) {
-			uint64_t seq = 0;
-
-			do {
-				seq = reserve_crit(size, "xrealloc", seq);
-				ret = imalloc(size);
-			} while (ret == NULL);
-		}
-	}
-
-	UTRACE(ptr, size, ret);
-	return (ret);
-}
-
-void *
-xmemalign(size_t alignment, size_t size)
-{
-	void *ret;
-
-	assert(((alignment - 1) & alignment) == 0 && alignment >=
-	    sizeof(void *));
-
-	if (malloc_init())
-		reserve_fail(size, "xmemalign");
-
-	ret = ipalloc(alignment, size);
-	if (ret == NULL) {
-		uint64_t seq = 0;
-
-		do {
-			seq = reserve_crit(size, "xmemalign", seq);
-			ret = ipalloc(alignment, size);
-		} while (ret == NULL);
-	}
-
-	UTRACE(0, size, ret);
-	return (ret);
-}
-
-static void
-reserve_shrink(void)
-{
-	extent_node_t *node;
-
-	assert(reserve_cur > reserve_max);
-#ifdef MALLOC_DEBUG
-	{
-		extent_node_t *node;
-		size_t reserve_size;
-
-		reserve_size = 0;
-		rb_foreach_begin(extent_node_t, link_szad, &reserve_chunks_szad,
-		    node) {
-			reserve_size += node->size;
-		} rb_foreach_end(extent_node_t, link_szad, &reserve_chunks_szad,
-		    node)
-		assert(reserve_size == reserve_cur);
-
-		reserve_size = 0;
-		rb_foreach_begin(extent_node_t, link_ad, &reserve_chunks_ad,
-		    node) {
-			reserve_size += node->size;
-		} rb_foreach_end(extent_node_t, link_ad, &reserve_chunks_ad,
-		    node)
-		assert(reserve_size == reserve_cur);
-	}
-#endif
-
-	/* Discard chunks until the the reserve is below the size limit. */
-	rb_foreach_reverse_begin(extent_node_t, link_ad, &reserve_chunks_ad,
-	    node) {
-#ifndef MALLOC_DECOMMIT
-		if (node->size <= reserve_cur - reserve_max) {
-#endif
-			extent_node_t *tnode = extent_tree_ad_prev(
-			    &reserve_chunks_ad, node);
-
-#ifdef MALLOC_DECOMMIT
-			assert(node->size <= reserve_cur - reserve_max);
-#endif
-
-			/* Discard the entire [multi-]chunk. */
-			extent_tree_szad_remove(&reserve_chunks_szad, node);
-			extent_tree_ad_remove(&reserve_chunks_ad, node);
-			reserve_cur -= node->size;
-			pages_unmap(node->addr, node->size);
-#ifdef MALLOC_STATS
-			stats_chunks.curchunks -= (node->size / chunksize);
-#endif
-			base_node_dealloc(node);
-			if (reserve_cur == reserve_max)
-				break;
-
-			rb_foreach_reverse_prev(extent_node_t, link_ad,
-			    extent_ad_comp, &reserve_chunks_ad, tnode);
-#ifndef MALLOC_DECOMMIT
-		} else {
-			/* Discard the end of the multi-chunk. */
-			extent_tree_szad_remove(&reserve_chunks_szad, node);
-			node->size -= reserve_cur - reserve_max;
-			extent_tree_szad_insert(&reserve_chunks_szad, node);
-			pages_unmap((void *)((uintptr_t)node->addr +
-			    node->size), reserve_cur - reserve_max);
-#ifdef MALLOC_STATS
-			stats_chunks.curchunks -= ((reserve_cur - reserve_max) /
-			    chunksize);
-#endif
-			reserve_cur = reserve_max;
-			break;
-		}
-#endif
-		assert(reserve_cur > reserve_max);
-	} rb_foreach_reverse_end(extent_node_t, link_ad, &reserve_chunks_ad,
-	    node)
-}
-
-/* Send a condition notification. */
-static uint64_t
-reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq)
-{
-	reserve_reg_t *reg;
-
-	/* seq is used to keep track of distinct condition-causing events. */
-	if (seq == 0) {
-		/* Allocate new sequence number. */
-		reserve_seq++;
-		seq = reserve_seq;
-	}
-
-	/*
-	 * Advance to the next callback registration and send a notification,
-	 * unless one has already been sent for this condition-causing event.
-	 */
-	reg = ql_first(&reserve_regs);
-	if (reg == NULL)
-		return (0);
-	ql_first(&reserve_regs) = ql_next(&reserve_regs, reg, link);
-	if (reg->seq == seq)
-		return (0);
-	reg->seq = seq;
-	malloc_mutex_unlock(&reserve_mtx);
-	reg->cb(reg->ctx, cnd, size);
-	malloc_mutex_lock(&reserve_mtx);
-
-	return (seq);
-}
-
-/* Allocation failure due to OOM.  Try to free some memory via callbacks. */
-static uint64_t
-reserve_crit(size_t size, const char *fname, uint64_t seq)
-{
-
-	/*
-	 * Send one condition notification.  Iteration is handled by the
-	 * caller of this function.
-	 */
-	malloc_mutex_lock(&reserve_mtx);
-	seq = reserve_notify(RESERVE_CND_CRIT, size, seq);
-	malloc_mutex_unlock(&reserve_mtx);
-
-	/* If no notification could be sent, then no further recourse exists. */
-	if (seq == 0)
-		reserve_fail(size, fname);
-
-	return (seq);
-}
-
-/* Permanent allocation failure due to OOM. */
-static void
-reserve_fail(size_t size, const char *fname)
-{
-	uint64_t seq = 0;
-
-	/* Send fail notifications. */
-	malloc_mutex_lock(&reserve_mtx);
-	do {
-		seq = reserve_notify(RESERVE_CND_FAIL, size, seq);
-	} while (seq != 0);
-	malloc_mutex_unlock(&reserve_mtx);
-
-	/* Terminate the application. */
-	_malloc_message(_getprogname(),
-	    ": (malloc) Error in ", fname, "(): out of memory\n");
-	abort();
-}
-
-bool
-reserve_cb_register(reserve_cb_t *cb, void *ctx)
-{
-	reserve_reg_t *reg = base_reserve_reg_alloc();
-	if (reg == NULL)
-		return (true);
-
-	ql_elm_new(reg, link);
-	reg->cb = cb;
-	reg->ctx = ctx;
-	reg->seq = 0;
-
-	malloc_mutex_lock(&reserve_mtx);
-	ql_head_insert(&reserve_regs, reg, link);
-	malloc_mutex_unlock(&reserve_mtx);
-
-	return (false);
-}
-
-bool
-reserve_cb_unregister(reserve_cb_t *cb, void *ctx)
-{
-	reserve_reg_t *reg = NULL;
-
-	malloc_mutex_lock(&reserve_mtx);
-	ql_foreach(reg, &reserve_regs, link) {
-		if (reg->cb == cb && reg->ctx == ctx) {
-			ql_remove(&reserve_regs, reg, link);
-			break;
-		}
-	}
-	malloc_mutex_unlock(&reserve_mtx);
-
-	if (reg != NULL)
-		base_reserve_reg_dealloc(reg);
-		return (false);
-	return (true);
-}
-
-size_t
-reserve_cur_get(void)
-{
-	size_t ret;
-
-	malloc_mutex_lock(&reserve_mtx);
-	ret = reserve_cur;
-	malloc_mutex_unlock(&reserve_mtx);
-
-	return (ret);
-}
-
-size_t
-reserve_min_get(void)
-{
-	size_t ret;
-
-	malloc_mutex_lock(&reserve_mtx);
-	ret = reserve_min;
-	malloc_mutex_unlock(&reserve_mtx);
-
-	return (ret);
-}
-
-bool
-reserve_min_set(size_t min)
-{
-
-	min = CHUNK_CEILING(min);
-
-	malloc_mutex_lock(&reserve_mtx);
-	/* Keep |reserve_max - reserve_min| the same. */
-	if (min < reserve_min) {
-		reserve_max -= reserve_min - min;
-		reserve_min = min;
-	} else {
-		/* Protect against wrap-around. */
-		if (reserve_max + min - reserve_min < reserve_max) {
-			reserve_min = SIZE_T_MAX - (reserve_max - reserve_min)
-			    - chunksize + 1;
-			reserve_max = SIZE_T_MAX - chunksize + 1;
-		} else {
-			reserve_max += min - reserve_min;
-			reserve_min = min;
-		}
-	}
-
-	/* Resize the reserve if necessary. */
-	if (reserve_cur < reserve_min) {
-		size_t size = reserve_min - reserve_cur;
-
-		/* Force the reserve to grow by allocating/deallocating. */
-		malloc_mutex_unlock(&reserve_mtx);
-#ifdef MALLOC_DECOMMIT
-		{
-			void **chunks;
-			size_t i, n;
-
-			n = size >> opt_chunk_2pow;
-			chunks = (void**)imalloc(n * sizeof(void *));
-			if (chunks == NULL)
-				return (true);
-			for (i = 0; i < n; i++) {
-				chunks[i] = huge_malloc(chunksize, false);
-				if (chunks[i] == NULL) {
-					size_t j;
-
-					for (j = 0; j < i; j++) {
-						huge_dalloc(chunks[j]);
-					}
-					idalloc(chunks);
-					return (true);
-				}
-			}
-			for (i = 0; i < n; i++)
-				huge_dalloc(chunks[i]);
-			idalloc(chunks);
-		}
-#else
-		{
-			void *x = huge_malloc(size, false);
-			if (x == NULL) {
-				return (true);
-			}
-			huge_dalloc(x);
-		}
-#endif
-	} else if (reserve_cur > reserve_max) {
-		reserve_shrink();
-		malloc_mutex_unlock(&reserve_mtx);
-	} else
-		malloc_mutex_unlock(&reserve_mtx);
-
-	return (false);
-}
-
 #ifdef MOZ_MEMORY_WINDOWS
 void*
 _recalloc(void *ptr, size_t count, size_t size)
 {
 	size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
 	size_t newsize = count * size;
 
 	/*
--- a/memory/jemalloc/jemalloc.h
+++ b/memory/jemalloc/jemalloc.h
@@ -64,158 +64,36 @@ typedef struct {
 	jemalloc_bool	opt_zero;	/* Fill allocated memory with 0x0? */
 	size_t	narenas;	/* Number of arenas. */
 	size_t	balance_threshold; /* Arena contention rebalance threshold. */
 	size_t	quantum;	/* Allocation quantum. */
 	size_t	small_max;	/* Max quantum-spaced allocation size. */
 	size_t	large_max;	/* Max sub-chunksize allocation size. */
 	size_t	chunksize;	/* Size of each virtual memory mapping. */
 	size_t	dirty_max;	/* Max dirty pages per arena. */
-	size_t	reserve_min;	/* reserve_low callback threshold. */
-	size_t	reserve_max;	/* Maximum reserve size before unmapping. */
 
 	/*
 	 * Current memory usage statistics.
 	 */
 	size_t	mapped;		/* Bytes mapped (not necessarily committed). */
 	size_t	committed;	/* Bytes committed (readable/writable). */
 	size_t	allocated;	/* Bytes allocted (in use by application). */
 	size_t	dirty;		/* Bytes dirty (committed unused pages). */
-	size_t	reserve_cur;	/* Current memory reserve. */
 } jemalloc_stats_t;
 
 #ifndef MOZ_MEMORY_DARWIN
 void	*malloc(size_t size);
 void	*valloc(size_t size);
 void	*calloc(size_t num, size_t size);
 void	*realloc(void *ptr, size_t size);
 void	free(void *ptr);
 #endif
 
 int	posix_memalign(void **memptr, size_t alignment, size_t size);
 void	*memalign(size_t alignment, size_t size);
 size_t	malloc_usable_size(const void *ptr);
 void	jemalloc_stats(jemalloc_stats_t *stats);
 
-/* The x*() functions never return NULL. */
-void	*xmalloc(size_t size);
-void	*xcalloc(size_t num, size_t size);
-void	*xrealloc(void *ptr, size_t size);
-void	*xmemalign(size_t alignment, size_t size);
-
-/*
- * The allocator maintains a memory reserve that is used to satisfy allocation
- * requests when no additional memory can be acquired from the operating
- * system.  Under normal operating conditions, the reserve size is at least
- * reserve_min bytes.  If the reserve is depleted or insufficient to satisfy an
- * allocation request, then condition notifications are sent to one or more of
- * the registered callback functions:
- *
- *   RESERVE_CND_LOW: The reserve had to be used to satisfy an allocation
- *                    request, which dropped the reserve size below the
- *                    minimum.  The callee should try to free memory in order
- *                    to restore the reserve.
- *
- *   RESERVE_CND_CRIT: The reserve was not large enough to satisfy a pending
- *                     allocation request.  Some callee must free adequate
- *                     memory in order to prevent application failure (unless
- *                     the condition spontaneously desists due to concurrent
- *                     deallocation).
- *
- *   RESERVE_CND_FAIL: An allocation request could not be satisfied, despite all
- *                     attempts.  The allocator is about to terminate the
- *                     application.
- *
- * The order in which the callback functions are called is only loosely
- * specified: in the absence of interposing callback
- * registrations/unregistrations, enabled callbacks will be called in an
- * arbitrary round-robin order.
- *
- * Condition notifications are sent to callbacks only while conditions exist.
- * For example, just before the allocator sends a RESERVE_CND_LOW condition
- * notification to a callback, the reserve is in fact depleted.  However, due
- * to allocator concurrency, the reserve may have been restored by the time the
- * callback function executes.  Furthermore, if the reserve is restored at some
- * point during the delivery of condition notifications to callbacks, no
- * further deliveries will occur, since the condition no longer exists.
- *
- * Callback functions can freely call back into the allocator (i.e. the
- * allocator releases all internal resources before calling each callback
- * function), though allocation is discouraged, since recursive callbacks are
- * likely to result, which places extra burden on the application to avoid
- * deadlock.
- *
- * Callback functions must be thread-safe, since it is possible that multiple
- * threads will call into the same callback function concurrently.
- */
-
-/* Memory reserve condition types. */
-typedef enum {
-	RESERVE_CND_LOW,
-	RESERVE_CND_CRIT,
-	RESERVE_CND_FAIL
-} reserve_cnd_t;
-
-/*
- * Reserve condition notification callback function type definition.
- *
- * Inputs:
- *   ctx: Opaque application data, as passed to reserve_cb_register().
- *   cnd: Condition type being delivered.
- *   size: Allocation request size for the allocation that caused the condition.
- */
-typedef void reserve_cb_t(void *ctx, reserve_cnd_t cnd, size_t size);
-
-/*
- * Register a callback function.
- *
- * Inputs:
- *   cb: Callback function pointer.
- *   ctx: Opaque application data, passed to cb().
- *
- * Output:
- *   ret: If true, failure due to OOM; success otherwise.
- */
-jemalloc_bool	reserve_cb_register(reserve_cb_t *cb, void *ctx);
-
-/*
- * Unregister a callback function.
- *
- * Inputs:
- *   cb: Callback function pointer.
- *   ctx: Opaque application data, same as that passed to reserve_cb_register().
- *
- * Output:
- *   ret: False upon success, true if the {cb,ctx} registration could not be
- *        found.
- */
-jemalloc_bool	reserve_cb_unregister(reserve_cb_t *cb, void *ctx);
-
-/*
- * Get the current reserve size.
- *
- * ret: Current reserve size.
- */
-size_t	reserve_cur_get(void);
-
-/*
- * Get the minimum acceptable reserve size.  If the reserve drops below this
- * value, the RESERVE_CND_LOW condition notification is sent to the callbacks.
- *
- * ret: Minimum acceptable reserve size.
- */
-size_t	reserve_min_get(void);
-
-/*
- * Set the minimum acceptable reserve size.
- *
- * min: Reserve threshold.  This value may be internally rounded up.
- * ret: False if the reserve was successfully resized; true otherwise.  Note
- *      that failure to resize the reserve also results in a RESERVE_CND_LOW
- *      condition.
- */
-jemalloc_bool	reserve_min_set(size_t min);
-
 #ifdef __cplusplus
 } /* extern "C" */
 #endif
 
 #endif /* _JEMALLOC_H_ */