bug 416261. updating jemalloc to latest version. r=me
authorpavlov@pavlov.net
Fri, 08 Feb 2008 21:46:59 -0800
changeset 11449 87f5bb93303cd327dc54f8a5ac75a5dc8520b14d
parent 11448 4553d8f15cfe9034a4e72a4180397bd92402b857
child 11450 da1880c11e2baa195bb653e82805504f821d0185
push id1
push userbsmedberg@mozilla.com
push dateThu, 20 Mar 2008 16:49:24 +0000
treeherdermozilla-central@61007906a1f8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersme
bugs416261
milestone1.9b4pre
bug 416261. updating jemalloc to latest version. r=me
memory/jemalloc/crtsp1.diff
memory/jemalloc/jemalloc.c
--- a/memory/jemalloc/crtsp1.diff
+++ b/memory/jemalloc/crtsp1.diff
@@ -7,20 +7,20 @@
 # http://www.microsoft.com/downloads/details.aspx?familyid=200b2fd9-ae1a-4a14-984d-389c36f85647&displaylang=en
 diff -re crt/src/crt0.c crt-sp1/src/crt0.c
 273c
 	/*
 	 * this used to happen in _mtinit, but we need it before malloc
 	 */
 	_init_pointers();       /* initialize global function pointers */
 
-        if ( malloc_init() )                /* initialize heap */
+        if ( malloc_init_hard() )                /* initialize heap */
 .
 101a
-extern BOOL malloc_init(void);
+extern BOOL malloc_init_hard(void);
 .
 diff -re crt/src/crt0dat.c crt-sp1/src/crt0dat.c
 789d
 778d
 diff -re crt/src/crtexe.c crt-sp1/src/crtexe.c
 333,335d
 diff -re crt/src/crtheap.c crt-sp1/src/crtheap.c
 61c
@@ -39,20 +39,20 @@ 340d
 310,311d
 300d
 287c
             /*
              * this used to happen in _mtinit, but we need it before malloc
              */
             _init_pointers();       /* initialize global function pointers */
 
-            if ( malloc_init() )   /* initialize heap */
+            if ( malloc_init_hard() )   /* initialize heap */
 .
 43a
-extern BOOL malloc_init(void);
+extern BOOL malloc_init_hard(void);
 extern void malloc_shutdown(void);
 
 .
 diff -re crt/src/dllcrt0.c crt-sp1/src/dllcrt0.c
 236,237d
 183d
 173d
 158d
--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -107,16 +107,28 @@
    /*
     * MALLOC_DEBUG enables assertions and other sanity checks, and disables
     * inline functions.
     */
 #  define MALLOC_DEBUG
 
    /* MALLOC_STATS enables statistics calculation. */
 #  define MALLOC_STATS
+
+   /* Memory filling (junk/zero). */
+#  define MALLOC_FILL
+
+   /* Allocation tracing. */
+#  define MALLOC_UTRACE
+
+   /* Support optional abort() on OOM. */
+#  define MALLOC_XMALLOC
+
+   /* Support SYSV semantics. */
+#  define MALLOC_SYSV
 #endif
 
 /*
  * MALLOC_LAZY_FREE enables the use of a per-thread vector of slots that free()
  * can atomically stuff object pointers into.  This can reduce arena lock
  * contention.
  */
 /* #define	MALLOC_LAZY_FREE */
@@ -133,50 +145,62 @@
  * segment (DSS).  In an ideal world, this functionality would be completely
  * unnecessary, but we are burdened by history and the lack of resource limits
  * for anonymous mapped memory.
  */
 #if (!defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_MEMORY_WINDOWS))
 #define	MALLOC_DSS
 #endif
 
+#ifdef MOZ_MEMORY_LINUX
+#define	_GNU_SOURCE /* For mremap(2). */
+#define	issetugid() 0
+#if 0 /* Enable in order to test decommit code on Linux. */
+#  define MALLOC_DECOMMIT
+/*
+ * The decommit code for Unix doesn't bother to make sure deallocated DSS
+ * chunks are writable.
+ */
+#  undef MALLOC_DSS
+#endif
+#endif
+
 #include <sys/types.h>
 
 #include <errno.h>
 #include <limits.h>
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
-
 #ifdef MOZ_MEMORY_WINDOWS
 #include <cruntime.h>
 #include <internal.h>
 #include <windows.h>
 #include <io.h>
 #include "tree.h"
 
 #pragma warning( disable: 4267 4996 4146 )
 
-#define bool BOOL
-#define false FALSE
-#define true TRUE
-#define inline __inline
-#define SIZE_T_MAX ULONG_MAX
-#define STDERR_FILENO 2
-#define PATH_MAX MAX_PATH
-#define vsnprintf _vsnprintf
-#define assert(f) /* we can't assert in the CRT */
+#define	bool BOOL
+#define	false FALSE
+#define	true TRUE
+#define	inline __inline
+#define	SIZE_T_MAX SIZE_MAX
+#define	STDERR_FILENO 2
+#define	PATH_MAX MAX_PATH
+#define	vsnprintf _vsnprintf
+#define	assert(f) /* we can't assert in the CRT */
 
 static unsigned long tlsIndex = 0xffffffff;
 
-#define __thread
-#define _pthread_self() __threadid()
-#define issetugid() 0
+#define	__thread
+#define	_pthread_self() __threadid()
+#define	issetugid() 0
 
 /* use MSVC intrinsics */
 #pragma intrinsic(_BitScanForward)
 static __forceinline int
 ffs(int x)
 {
 	unsigned long i;
 
@@ -184,52 +208,43 @@ ffs(int x)
 		return (i + 1);
 
 	return (0);
 }
 
 /* Implement getenv without using malloc */
 static char mozillaMallocOptionsBuf[64];
 
-#define getenv xgetenv
+#define	getenv xgetenv
 static char *
 getenv(const char *name)
 {
 
 	if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
 		    sizeof(mozillaMallocOptionsBuf)) > 0)
 		return (mozillaMallocOptionsBuf);
 
 	return (NULL);
 }
 
+typedef unsigned char uint8_t;
 typedef unsigned uint32_t;
 typedef unsigned long long uint64_t;
 typedef unsigned long long uintmax_t;
 
-#define MALLOC_DECOMMIT
-#endif
-
-/* XXX Temporary, for testing on Linux. */
-#if 0
-#  define MALLOC_DECOMMIT
-/*
- * The decommit code for Unix doesn't bother to make sure deallocated DSS
- * chunks are writable.
- */
-#  undef MALLOC_DSS
+#define	MALLOC_DECOMMIT
 #endif
 
 #ifndef MOZ_MEMORY_WINDOWS
 #include <sys/cdefs.h>
 #ifndef __DECONST
 #  define __DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
 #endif
 #ifndef MOZ_MEMORY
-__FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.161 2008/01/03 23:22:13 jasone Exp $");
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.162 2008/02/06 02:59:54 jasone Exp $");
 #include "libc_private.h"
 #ifdef MALLOC_DEBUG
 #  define _LOCK_DEBUG
 #endif
 #include "spinlock.h"
 #include "namespace.h"
 #endif
 #include <sys/mman.h>
@@ -254,17 +269,17 @@ typedef unsigned long long uintmax_t;
 #include <machine/atomic.h>
 #include <machine/cpufunc.h>
 #include <machine/vmparam.h>
 #endif
 
 #include <errno.h>
 #include <limits.h>
 #ifndef SIZE_T_MAX
-#  define SIZE_T_MAX	ULONG_MAX
+#  define SIZE_T_MAX	SIZE_MAX
 #endif
 #include <pthread.h>
 #ifdef MOZ_MEMORY_DARWIN
 #define _pthread_self pthread_self
 #define _pthread_mutex_init pthread_mutex_init
 #define _pthread_mutex_trylock pthread_mutex_trylock
 #define _pthread_mutex_lock pthread_mutex_lock
 #define _pthread_mutex_unlock pthread_mutex_unlock
@@ -318,16 +333,22 @@ static const bool __isthreaded = true;
    /* Disable inlining to make debugging easier. */
 #ifdef inline
 #undef inline
 #endif
 
 #  define inline
 #endif
 
+#ifndef MOZ_MEMORY_WINDOWS
+#define	VISIBLE __attribute__((visibility("default")))
+#else
+#define	VISIBLE
+#endif
+
 /* Size of stack-allocated buffer passed to strerror_r(). */
 #define	STRERROR_BUF		64
 
 /* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
 #  define QUANTUM_2POW_MIN      4
 #ifdef MOZ_MEMORY_SIZEOF_PTR_2POW
 #  define SIZEOF_PTR_2POW		MOZ_MEMORY_SIZEOF_PTR_2POW
 #else
@@ -399,16 +420,19 @@ static const bool __isthreaded = true;
 #endif
 
 /*
  * Size and alignment of memory chunks that are allocated by the OS's virtual
  * memory system.
  */
 #define	CHUNK_2POW_DEFAULT	20
 
+/* Maximum number of dirty pages per arena. */
+#define	DIRTY_MAX_DEFAULT	(1U << 9)
+
 /*
  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
  * so over-estimates are okay (up to a point), but under-estimates will
  * negatively affect performance.
  */
 #define	CACHELINE_2POW		6
 #define	CACHELINE		((size_t)(1U << CACHELINE_2POW))
 
@@ -439,17 +463,21 @@ static const bool __isthreaded = true;
  *
  *   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
  */
 #define	RUN_BFP			12
 /*                                    \/   Implicit binary fixed point. */
 #define	RUN_MAX_OVRHD		0x0000003dU
 #define	RUN_MAX_OVRHD_RELAX	0x00001800U
 
-/* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
+/*
+ * Put a cap on small object run size.  This overrides RUN_MAX_OVRHD.  Note
+ * that small runs must be small enough that page offsets can fit within the
+ * CHUNK_MAP_POS_MASK bits.
+ */
 #define	RUN_MAX_SMALL_2POW	15
 #define	RUN_MAX_SMALL		(1U << RUN_MAX_SMALL_2POW)
 
 #ifdef MALLOC_LAZY_FREE
    /* Default size of each arena's lazy free cache. */
 #  define LAZY_FREE_2POW_DEFAULT 8
    /*
     * Number of pseudo-random probes to conduct before considering the cache to
@@ -532,16 +560,18 @@ typedef malloc_spinlock_t malloc_mutex_t
 
 /* Set to true once the allocator has been initialized. */
 static bool malloc_initialized = false;
 
 #if defined(MOZ_MEMORY_WINDOWS)
 /* No init lock for Windows. */
 #elif defined(MOZ_MEMORY_DARWIN)
 static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
+#elif defined(MOZ_MEMORY_LINUX)
+static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
 #elif defined(MOZ_MEMORY)
 static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
 #else
 static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
 #endif
 
 /******************************************************************************/
 /*
@@ -575,21 +605,32 @@ struct malloc_bin_stats_s {
 };
 
 typedef struct arena_stats_s arena_stats_t;
 struct arena_stats_s {
 	/* Number of bytes currently mapped. */
 	size_t		mapped;
 
 	/*
-	 * Total number of bytes purged in order to keep dirty unused memory
-	 * under control, and the number of madvise calls made while purging.
+	 * Total number of purge sweeps, total number of madvise calls made,
+	 * and total pages purged in order to keep dirty unused memory under
+	 * control.
 	 */
-	uint64_t	npurged;
+	uint64_t	npurge;
 	uint64_t	nmadvise;
+	uint64_t	purged;
+#ifdef MALLOC_DECOMMIT
+	/*
+	 * Total number of decommit/commit operations, and total number of
+	 * pages decommitted.
+	 */
+	uint64_t	ndecommit;
+	uint64_t	ncommit;
+	uint64_t	decommitted;
+#endif
 
 	/* Per-size-category statistics. */
 	size_t		allocated_small;
 	uint64_t	nmalloc_small;
 	uint64_t	ndalloc_small;
 
 	size_t		allocated_large;
 	uint64_t	nmalloc_large;
@@ -622,104 +663,80 @@ struct chunk_stats_s {
 /******************************************************************************/
 /*
  * Extent data structures.
  */
 
 /* Tree of extents. */
 typedef struct extent_node_s extent_node_t;
 struct extent_node_s {
+	/* Linkage for the size/address-ordered tree. */
+	RB_ENTRY(extent_node_s) link_szad;
+
 	/* Linkage for the address-ordered tree. */
 	RB_ENTRY(extent_node_s) link_ad;
 
-	/* Linkage for the size/address-ordered tree. */
-	RB_ENTRY(extent_node_s) link_szad;
-
 	/* Pointer to the extent that this tree node is responsible for. */
 	void	*addr;
 
 	/* Total region size. */
 	size_t	size;
-
-	/*
-	 * Number of dirty bytes in unused run.  This field is only used by the
-	 * runs_avail_* tree nodes.
-	 */
-	size_t	ndirty;
 };
-typedef struct extent_tree_ad_s extent_tree_ad_t;
-RB_HEAD(extent_tree_ad_s, extent_node_s);
 typedef struct extent_tree_szad_s extent_tree_szad_t;
 RB_HEAD(extent_tree_szad_s, extent_node_s);
-
-/*
- * Magazine of extent nodes.  Arenas each use an unpredictable number of extent
- * nodes, and they need to be allocated somehow via base_alloc().  We use
- * magazines in order to amortize the locking cost of acquiring the nodes from
- * a single source for all threads.
- */
-typedef struct node_mag_s node_mag_t;
-struct node_mag_s {
-	/* Used to link a stack of magazines. */
-	node_mag_t	*next;
-
-	/* Slots nodes[0..nnodes) contain pointers to available nodes. */
-#define	NODE_MAG_NNODES	254 /* Strange value plays nicely with base_alloc(). */
-	unsigned	nnodes;
-	extent_node_t	*nodes[NODE_MAG_NNODES];
-};
+typedef struct extent_tree_ad_s extent_tree_ad_t;
+RB_HEAD(extent_tree_ad_s, extent_node_s);
 
 /******************************************************************************/
 /*
  * Arena data structures.
  */
 
 typedef struct arena_s arena_t;
 typedef struct arena_bin_s arena_bin_t;
 
-typedef struct arena_chunk_map_s arena_chunk_map_t;
-struct arena_chunk_map_s {
-	/*
-	 * Number of pages in run.  For a free run that has never been touched,
-	 * this is NPAGES_EMPTY for the central pages, which allows us to avoid
-	 * zero-filling untouched pages for calloc().
-	 */
-#define	NPAGES_EMPTY ((uint32_t)0x0U)
-	uint32_t	npages;
-	/*
-	 * Position within run.  For a free run, this is POS_EMPTY/POS_FREE for
-	 * the first and last pages.  The special values make it possible to
-	 * quickly coalesce free runs.  POS_EMPTY indicates that the run has
-	 * never been touched, which allows us to avoid zero-filling untouched
-	 * pages for calloc().
-	 *
-	 * This is the limiting factor for chunksize; there can be at most 2^31
-	 * pages in a run.
-	 *
-	 * POS_EMPTY is assumed by arena_run_dalloc() to be less than POS_FREE.
-	 */
-#define	POS_EMPTY ((uint32_t)0xfffffffeU)
-#define	POS_FREE ((uint32_t)0xffffffffU)
-	uint32_t	pos;
-};
+/*
+ * Each map element contains several flags, plus page position for runs that
+ * service small allocations.
+ */
+typedef uint8_t arena_chunk_map_t;
+#define	CHUNK_MAP_UNTOUCHED	0x80U
+#define	CHUNK_MAP_DIRTY		0x40U
+#define	CHUNK_MAP_LARGE		0x20U
+#ifdef MALLOC_DECOMMIT
+#define	CHUNK_MAP_DECOMMITTED	0x10U
+#define	CHUNK_MAP_POS_MASK	0x0fU
+#else
+#define	CHUNK_MAP_POS_MASK	0x1fU
+#endif
 
 /* Arena chunk header. */
 typedef struct arena_chunk_s arena_chunk_t;
 struct arena_chunk_s {
 	/* Arena that owns the chunk. */
-	arena_t *arena;
+	arena_t		*arena;
 
 	/* Linkage for the arena's chunk tree. */
 	RB_ENTRY(arena_chunk_s) link;
 
 	/*
 	 * Number of pages in use.  This is maintained in order to make
 	 * detection of empty chunks fast.
 	 */
-	uint32_t pages_used;
+	size_t		pages_used;
+
+	/* Number of dirty pages. */
+	size_t		ndirty;
+
+	/*
+	 * Tree of extent nodes that are embedded in the arena chunk header
+	 * page(s).  These nodes are used by arena_chunk_node_alloc().
+	 */
+	extent_tree_ad_t nodes;
+	extent_node_t	*nodes_past;
 
 	/*
 	 * Map of pages within chunk that keeps track of free/large/small.  For
 	 * free runs, only the map entries for the first and last pages are
 	 * kept up to date, so that free runs can be quickly coalesced.
 	 */
 	arena_chunk_map_t map[1]; /* Dynamically sized. */
 };
@@ -801,66 +818,49 @@ struct arena_s {
 	pthread_mutex_t		lock;
 #endif
 
 #ifdef MALLOC_STATS
 	arena_stats_t		stats;
 #endif
 
 	/*
-	 * Node magazines, used by arena_node_[de]alloc().  In order to reduce
-	 * lock contention for base_node_mag_[de]alloc(), we keep up to two
-	 * full magazines on hand.
-	 */
-	node_mag_t		*node_mag_cur;
-	node_mag_t		*node_mag_full;
-
-	/*
 	 * Tree of chunks this arena manages.
 	 */
 	arena_chunk_tree_t	chunks;
 
 	/*
 	 * In order to avoid rapid chunk allocation/deallocation when an arena
 	 * oscillates right on the cusp of needing a new chunk, cache the most
-	 * recently freed chunk.
+	 * recently freed chunk.  The spare is left in the arena's chunk tree
+	 * until it is deleted.
 	 *
 	 * There is one spare chunk per arena, rather than one spare total, in
 	 * order to avoid interactions between multiple threads that could make
 	 * a single spare inadequate.
 	 */
 	arena_chunk_t		*spare;
 
 	/*
-	 * Current count of bytes within unused runs that are potentially
+	 * Current count of pages within unused runs that are potentially
 	 * dirty, and for which madvise(... MADV_FREE) has not been called.  By
 	 * tracking this, we can institute a limit on how much dirty unused
 	 * memory is mapped for each arena.
 	 */
 	size_t			ndirty;
-	/*
-	 * Number of dirty bytes for spare.  All other dirty bytes are tracked
-	 * in the runs_avail_* tree nodes.
-	 */
-	size_t			spare_ndirty;
 
 	/*
 	 * Trees of this arena's available runs.  Two trees are maintained
 	 * using one set of nodes, since one is needed for first-best-fit run
 	 * allocation, and the other is needed for coalescing.
 	 */
 	extent_tree_szad_t	runs_avail_szad;
 	extent_tree_ad_t	runs_avail_ad;
 
-	/*
-	 * Tree of this arena's allocated (in-use) runs.  This tree is
-	 * maintained solely to guarantee that a node is available in
-	 * arena_run_dalloc(), since there is no way to recover from OOM during
-	 * deallocation.
-	 */
+	/* Tree of this arena's allocated (in-use) runs. */
 	extent_tree_ad_t	runs_alloced_ad;
 
 #ifdef MALLOC_BALANCE
 	/*
 	 * The arena load balancing machinery needs to keep track of how much
 	 * lock contention there is.  This value is exponentially averaged.
 	 */
 	uint32_t		contention;
@@ -925,18 +925,18 @@ static size_t		small_max;
 
 /* Various quantum-related settings. */
 static size_t		quantum;
 static size_t		quantum_mask; /* (quantum - 1). */
 
 /* Various chunk-related settings. */
 static size_t		chunksize;
 static size_t		chunksize_mask; /* (chunksize - 1). */
-static unsigned		chunk_npages;
-static unsigned		arena_chunk_header_npages;
+static size_t		chunk_npages;
+static size_t		arena_chunk_header_npages;
 static size_t		arena_maxclass; /* Max size class for arenas. */
 
 /********/
 /*
  * Chunks.
  */
 
 /* Protects chunk-related data structures. */
@@ -956,21 +956,21 @@ static void		*dss_base;
 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
 static void		*dss_prev;
 /* Current upper limit on DSS addresses. */
 static void		*dss_max;
 
 /*
  * Trees of chunks that were previously allocated (trees differ only in node
  * ordering).  These are used when allocating chunks, in an attempt to re-use
- * address space.  Depending on funcition, different tree orderings are needed,
+ * address space.  Depending on function, different tree orderings are needed,
  * which is why there are two trees with the same contents.
  */
+static extent_tree_szad_t dss_chunks_szad;
 static extent_tree_ad_t	dss_chunks_ad;
-static extent_tree_szad_t dss_chunks_szad;
 #endif
 
 #ifdef MALLOC_STATS
 /* Huge allocation statistics. */
 static uint64_t		huge_nmalloc;
 static uint64_t		huge_ndalloc;
 static size_t		huge_allocated;
 #endif
@@ -983,19 +983,17 @@ static size_t		huge_allocated;
 /*
  * Current pages that are being used for internal memory allocations.  These
  * pages are carved up in cacheline-size quanta, so that there is no chance of
  * false cache line sharing.
  */
 static void		*base_pages;
 static void		*base_next_addr;
 static void		*base_past_addr; /* Addr immediately past base_pages. */
-static node_mag_t	*base_node_mags_avail; /* LIFO cache of full mags. */
-static node_mag_t	*base_node_mag; /* For base_node_[de]alloc(). */
-static node_mag_t	*base_node_mag_partial; /* For OOM leak prevention. */
+static extent_node_t	*base_nodes;
 static malloc_mutex_t	base_mtx;
 #ifdef MALLOC_STATS
 static size_t		base_mapped;
 #endif
 
 /********/
 /*
  * Arenas.
@@ -1036,81 +1034,79 @@ static chunk_stats_t	stats_chunks;
 #endif
 
 /*******************************/
 /*
  * Runtime configuration options.
  */
 const char	*_malloc_options
 #ifdef MOZ_MEMORY_WINDOWS
-= "A10n3F"
+= "A10n2F"
 #elif (defined(MOZ_MEMORY_DARWIN))
 = "AP10n"
 #endif
 ;
 
-#ifdef MALLOC_DECOMMIT
-static bool opt_decommit = true;
-#endif
-
 #ifndef MALLOC_PRODUCTION
 static bool	opt_abort = true;
+#ifdef MALLOC_FILL
 static bool	opt_junk = true;
+#endif
 #else
 static bool	opt_abort = false;
+#ifdef MALLOC_FILL
 static bool	opt_junk = false;
 #endif
+#endif
 #ifdef MALLOC_DSS
 static bool	opt_dss = true;
 static bool	opt_mmap = true;
 #endif
-static size_t	opt_free_max = (1U << CHUNK_2POW_DEFAULT);
+static size_t	opt_dirty_max = DIRTY_MAX_DEFAULT;
 #ifdef MALLOC_LAZY_FREE
 static int	opt_lazy_free_2pow = LAZY_FREE_2POW_DEFAULT;
 #endif
 #ifdef MALLOC_BALANCE
 static uint64_t	opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
 #endif
 static bool	opt_print_stats = false;
 static size_t	opt_quantum_2pow = QUANTUM_2POW_MIN;
 static size_t	opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
 static size_t	opt_chunk_2pow = CHUNK_2POW_DEFAULT;
+#ifdef MALLOC_UTRACE
 static bool	opt_utrace = false;
+#endif
+#ifdef MALLOC_SYSV
 static bool	opt_sysv = false;
+#endif
+#ifdef MALLOC_XMALLOC
 static bool	opt_xmalloc = false;
+#endif
+#ifdef MALLOC_FILL
 static bool	opt_zero = false;
+#endif
 static int	opt_narenas_lshift = 0;
 
+#ifdef MALLOC_UTRACE
 typedef struct {
 	void	*p;
 	size_t	s;
 	void	*r;
 } malloc_utrace_t;
 
-#if 0
-#define UTRACE(a, b, c) do {						\
-	if (a == NULL && b == 0 && c == NULL)				\
-		malloc_printf("%d x USER malloc_init()\n", getpid());	\
-	else if (a == NULL && c != 0) {					\
-		malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(),	\
-		    c, b);						\
-	} else if (a != NULL && c != NULL) {				\
-		malloc_printf("%d x USER %p = realloc(%p, %zu)\n",	\
-		    getpid(), c, a, b);					\
-	} else								\
-		malloc_printf("%d x USER free(%p)\n", getpid(), a);	\
-} while (0)
-#elif (defined(MOZ_MEMORY))
-#define	UTRACE(a, b, c)
-#else
 #define	UTRACE(a, b, c)							\
 	if (opt_utrace) {						\
-		malloc_utrace_t ut = {a, b, c};				\
+		malloc_utrace_t ut;					\
+		ut.p = (a);						\
+		ut.s = (b);						\
+		ut.r = (c);						\
 		utrace(&ut, sizeof(ut));				\
 	}
+#else
+#define	UTRACE(a, b, c)
 #endif
 
 /******************************************************************************/
 /*
  * Begin function prototypes for non-inline static functions.
  */
 
 static bool	malloc_mutex_init(malloc_mutex_t *mutex);
@@ -1120,63 +1116,94 @@ static void	wrtmessage(const char *p1, c
 #ifdef MALLOC_STATS
 #ifdef MOZ_MEMORY_DARWIN
 /* Avoid namespace collision with OS X's malloc APIs. */
 #define malloc_printf xmalloc_printf
 #endif
 static void	malloc_printf(const char *format, ...);
 #endif
 static char	*umax2s(uintmax_t x, char *s);
+#ifdef MALLOC_DSS
+static bool	base_pages_alloc_dss(size_t minsize);
+#endif
+static bool	base_pages_alloc_mmap(size_t minsize);
 static bool	base_pages_alloc(size_t minsize);
 static void	*base_alloc(size_t size);
 static void	*base_calloc(size_t number, size_t size);
 static extent_node_t *base_node_alloc(void);
 static void	base_node_dealloc(extent_node_t *node);
 #ifdef MALLOC_STATS
 static void	stats_print(arena_t *arena);
 #endif
 static void	*pages_map(void *addr, size_t size);
 static void	pages_unmap(void *addr, size_t size);
+#ifdef MALLOC_DSS
+static void	*chunk_alloc_dss(size_t size);
+static void	*chunk_recycle_dss(size_t size, bool zero);
+#endif
+static void	*chunk_alloc_mmap(size_t size);
 static void	*chunk_alloc(size_t size, bool zero);
+#ifdef MALLOC_DSS
+static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
+static bool	chunk_dealloc_dss(void *chunk, size_t size);
+#endif
+static void	chunk_dealloc_mmap(void *chunk, size_t size);
 static void	chunk_dealloc(void *chunk, size_t size);
 #ifndef NO_TLS
 static arena_t	*choose_arena_hard(void);
 #endif
-static bool	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
-    bool zero);
+static extent_node_t *arena_chunk_node_alloc(arena_chunk_t *chunk);
+static void	arena_chunk_node_dealloc(arena_chunk_t *chunk,
+    extent_node_t *node);
+static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
+    bool small, bool zero);
 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
 static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
-static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool zero);
+static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool small,
+    bool zero);
 static void	arena_purge(arena_t *arena);
-static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size,
-    size_t ndirty);
+static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
+    extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize);
+static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
+    extent_node_t *nodeA, arena_run_t *run, size_t oldsize, size_t newsize,
+    bool dirty);
 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
 static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
-static void	*arena_malloc(arena_t *arena, size_t size, bool zero);
+#ifdef MALLOC_BALANCE
+static void	arena_lock_balance_hard(arena_t *arena);
+#endif
+static void	*arena_malloc_large(arena_t *arena, size_t size, bool zero);
 static void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
     size_t alloc_size);
 static size_t	arena_salloc(const void *ptr);
+#ifdef MALLOC_LAZY_FREE
+static void	arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t pageind, arena_chunk_map_t *mapelm);
+#endif
+static void	arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr);
+static void	arena_ralloc_resize_shrink(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t size, size_t oldsize);
+static bool	arena_ralloc_resize_grow(arena_t *arena, arena_chunk_t *chunk,
+    void *ptr, size_t size, size_t oldsize);
 static bool	arena_ralloc_resize(void *ptr, size_t size, size_t oldsize);
 static void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
-static void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
 static bool	arena_new(arena_t *arena);
 static arena_t	*arenas_extend(unsigned ind);
 static void	*huge_malloc(size_t size, bool zero);
 static void	*huge_palloc(size_t alignment, size_t size);
 static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
 static void	huge_dalloc(void *ptr);
-static void	*imalloc(size_t size);
-static void	*ipalloc(size_t alignment, size_t size);
-static void	*icalloc(size_t size);
-static size_t	isalloc(const void *ptr);
-static void	*iralloc(void *ptr, size_t size);
-static void	idalloc(void *ptr);
 static void	malloc_print_stats(void);
-static bool	malloc_init_hard(void);
+#ifndef MOZ_MEMORY_WINDOWS
+static
+#endif
+bool		malloc_init_hard(void);
 
 /*
  * End function prototypes.
  */
 /******************************************************************************/
 /*
  * Begin mutex.  We can't use normal pthread mutexes in all places, because
  * they require malloc()ed memory, which causes bootstrapping issues in some
@@ -1187,16 +1214,26 @@ static bool
 malloc_mutex_init(malloc_mutex_t *mutex)
 {
 #if defined(MOZ_MEMORY_WINDOWS)
 	if (__isthreaded)
 		if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
 			return (true);
 #elif defined(MOZ_MEMORY_DARWIN)
 	mutex->lock = OS_SPINLOCK_INIT;
+#elif defined(MOZ_MEMORY_LINUX)
+	pthread_mutexattr_t attr;
+	if (pthread_mutexattr_init(&attr) != 0)
+		return (true);
+	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+	if (pthread_mutex_init(mutex, &attr) != 0) {
+		pthread_mutexattr_destroy(&attr);
+		return (true);
+	}
+	pthread_mutexattr_destroy(&attr);
 #elif defined(MOZ_MEMORY)
 	if (pthread_mutex_init(mutex, NULL) != 0)
 		return (true);
 #else
 	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
 
 	mutex->lock = lock;
 #endif
@@ -1239,16 +1276,26 @@ static bool
 malloc_spin_init(malloc_spinlock_t *lock)
 {
 #if defined(MOZ_MEMORY_WINDOWS)
 	if (__isthreaded)
 		if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
 			return (true);
 #elif defined(MOZ_MEMORY_DARWIN)
 	lock->lock = OS_SPINLOCK_INIT;
+#elif defined(MOZ_MEMORY_LINUX)
+	pthread_mutexattr_t attr;
+	if (pthread_mutexattr_init(&attr) != 0)
+		return (true);
+	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+	if (pthread_mutex_init(lock, &attr) != 0) {
+		pthread_mutexattr_destroy(&attr);
+		return (true);
+	}
+	pthread_mutexattr_destroy(&attr);
 #elif defined(MOZ_MEMORY)
 	if (pthread_mutex_init(lock, NULL) != 0)
 		return (true);
 #else
 	lock->lock = _SPINLOCK_INITIALIZER;
 #endif
 	return (false);
 }
@@ -1289,17 +1336,16 @@ malloc_spin_unlock(malloc_spinlock_t *lo
  */
 /******************************************************************************/
 /*
  * Begin spin lock.  Spin locks here are actually adaptive mutexes that block
  * after a period of spinning, because unbounded spinning would allow for
  * priority inversion.
  */
 
-
 #if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
 #  define	malloc_spin_init	malloc_mutex_init
 #  define	malloc_spin_lock	malloc_mutex_lock
 #  define	malloc_spin_unlock	malloc_mutex_unlock
 #endif
 
 #ifndef MOZ_MEMORY
 /*
@@ -1480,16 +1526,39 @@ PRN_DEFINE(lazy_free, lazy_free_x, 12345
 #endif
 
 #ifdef MALLOC_BALANCE
 /* Define the PRNG used for arena assignment. */
 static __thread uint32_t balance_x;
 PRN_DEFINE(balance, balance_x, 1297, 1301)
 #endif
 
+#ifdef MALLOC_UTRACE
+static int
+utrace(const void *addr, size_t len)
+{
+	malloc_utrace_t *ut = (malloc_utrace_t *)addr;
+
+	assert(len == sizeof(malloc_utrace_t));
+
+	if (ut->p == NULL && ut->s == 0 && ut->r == NULL)
+		malloc_printf("%d x USER malloc_init()\n", getpid());
+	else if (ut->p == NULL && ut->r != NULL) {
+		malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(), ut->r,
+		    ut->s);
+	} else if (ut->p != NULL && ut->r != NULL) {
+		malloc_printf("%d x USER %p = realloc(%p, %zu)\n", getpid(),
+		    ut->r, ut->p, ut->s);
+	} else
+		malloc_printf("%d x USER free(%p)\n", getpid(), ut->p);
+
+	return (0);
+}
+#endif
+
 static inline const char *
 _getprogname(void)
 {
 
 	return ("<jemalloc>");
 }
 
 static void
@@ -1550,17 +1619,17 @@ umax2s(uintmax_t x, char *s)
 	} while (x > 0);
 
 	return (&s[i]);
 }
 
 /******************************************************************************/
 
 #ifdef MALLOC_DSS
-static inline bool
+static bool
 base_pages_alloc_dss(size_t minsize)
 {
 
 	/*
 	 * Do special DSS allocation here, since base allocations don't need to
 	 * be chunk-aligned.
 	 */
 	malloc_mutex_lock(&dss_mtx);
@@ -1599,17 +1668,17 @@ base_pages_alloc_dss(size_t minsize)
 		} while (dss_prev != (void *)-1);
 	}
 	malloc_mutex_unlock(&dss_mtx);
 
 	return (true);
 }
 #endif
 
-static inline bool
+static bool
 base_pages_alloc_mmap(size_t minsize)
 {
 	size_t csize;
 
 	assert(minsize != 0);
 	csize = PAGE_CEILING(minsize);
 	base_pages = pages_map(NULL, csize);
 	if (base_pages == NULL)
@@ -1638,216 +1707,142 @@ base_pages_alloc(size_t minsize)
 	{
 		if (base_pages_alloc_mmap(minsize) == false)
 			return (false);
 	}
 
 	return (true);
 }
 
-static inline void *
-base_alloc_locked(size_t size)
+static void *
+base_alloc(size_t size)
 {
 	void *ret;
 	size_t csize;
 
 	/* Round size up to nearest multiple of the cacheline size. */
 	csize = CACHELINE_CEILING(size);
 
+	malloc_mutex_lock(&base_mtx);
 	/* Make sure there's enough space for the allocation. */
 	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
 		if (base_pages_alloc(csize))
 			return (NULL);
 	}
-
 	/* Allocate. */
 	ret = base_next_addr;
 	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
-
-	return (ret);
-}
-
-static void *
-base_alloc(size_t size)
-{
-	void *ret;
-
-	malloc_mutex_lock(&base_mtx);
-	ret = base_alloc_locked(size);
 	malloc_mutex_unlock(&base_mtx);
 
 	return (ret);
 }
 
 static void *
 base_calloc(size_t number, size_t size)
 {
 	void *ret;
 
 	ret = base_alloc(number * size);
 	memset(ret, 0, number * size);
 
 	return (ret);
 }
 
-static inline node_mag_t *
-base_node_mag_alloc_locked(void)
-{
-	node_mag_t *ret;
-
-	if (base_node_mags_avail != NULL) {
-		ret = base_node_mags_avail;
-		base_node_mags_avail = base_node_mags_avail->next;
-	} else {
-		extent_node_t *node;
-		unsigned i;
-
-		if (base_node_mag_partial == NULL) {
-			ret = base_alloc_locked(sizeof(node_mag_t));
-			if (ret == NULL)
-				return (NULL);
-		} else {
-			/*
-			 * Try to complete partial initalization that was
-			 * impeded by OOM.
-			 */
-			ret = base_node_mag_partial;
-			base_node_mag_partial = NULL;
-		}
-		ret->next = NULL;
-		ret->nnodes = NODE_MAG_NNODES;
-		for (i = 0; i < NODE_MAG_NNODES; i++) {
-			if (ret->nodes[i] == NULL) {
-				node = (extent_node_t *)base_alloc_locked(
-				    sizeof(extent_node_t));
-				if (node == NULL) {
-					/*
-					 * Stash the magazine for later
-					 * completion of initialization.
-					 */
-					base_node_mag_partial = ret;
-					return (NULL);
-				}
-				ret->nodes[i] = node;
-			}
-		}
-	}
-
-	return (ret);
-}
-
-static inline node_mag_t *
-base_node_mag_alloc(void)
-{
-	node_mag_t *ret;
-
-	malloc_mutex_lock(&base_mtx);
-	ret = base_node_mag_alloc_locked();
-	malloc_mutex_unlock(&base_mtx);
-
-	return (ret);
-}
-
-static inline void
-base_node_mag_dealloc_locked(node_mag_t *mag)
-{
-
-	mag->next = base_node_mags_avail;
-	base_node_mags_avail = mag;
-}
-
-static inline void
-base_node_mag_dealloc(node_mag_t *mag)
-{
-
-	malloc_mutex_lock(&base_mtx);
-	base_node_mag_dealloc_locked(mag);
-	malloc_mutex_unlock(&base_mtx);
-}
-
 static extent_node_t *
 base_node_alloc(void)
 {
 	extent_node_t *ret;
 
 	malloc_mutex_lock(&base_mtx);
-	if (base_node_mag == NULL || base_node_mag->nnodes == 0) {
-		node_mag_t *node_mag = base_node_mag_alloc_locked();
-		if (node_mag == NULL) {
-			malloc_mutex_unlock(&base_mtx);
-			return (NULL);
-		}
-		node_mag->next = base_node_mag;
-		base_node_mag = node_mag;
+	if (base_nodes != NULL) {
+		ret = base_nodes;
+		base_nodes = *(extent_node_t **)ret;
+		malloc_mutex_unlock(&base_mtx);
+	} else {
+		malloc_mutex_unlock(&base_mtx);
+		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
 	}
-	base_node_mag->nnodes--;
-	ret = base_node_mag->nodes[base_node_mag->nnodes];
-	malloc_mutex_unlock(&base_mtx);
 
 	return (ret);
 }
 
 static void
 base_node_dealloc(extent_node_t *node)
 {
 
 	malloc_mutex_lock(&base_mtx);
-	if (base_node_mag->nnodes == NODE_MAG_NNODES) {
-		/*
-		 * Move full magazine to base_node_mags_avail.  This will leave
-		 * an empty magazine in base_node_mag.
-		 */
-		node_mag_t *node_mag = base_node_mag;
-		base_node_mag = base_node_mag->next;
-		base_node_mag_dealloc_locked(node_mag);
-	}
-	base_node_mag->nodes[base_node_mag->nnodes] = node;
-	base_node_mag->nnodes++;
+	*(extent_node_t **)node = base_nodes;
+	base_nodes = node;
 	malloc_mutex_unlock(&base_mtx);
 }
 
 /******************************************************************************/
 
 #ifdef MALLOC_STATS
 static void
 stats_print(arena_t *arena)
 {
 	unsigned i, gap_start;
 
+#ifdef MOZ_MEMORY_WINDOWS
+	malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s,"
+	    " %I64u madvise%s, %I64u page%s purged\n",
+	    arena->ndirty, arena->ndirty == 1 ? "" : "s",
+	    arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
+	    arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
+	    arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
+#  ifdef MALLOC_DECOMMIT
+	malloc_printf("decommit: %I64u decommit%s, %I64u commit%s,"
+	    " %I64u page%s decommitted\n",
+	    arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
+	    arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
+	    arena->stats.decommitted,
+	    (arena->stats.decommitted == 1) ? "" : "s");
+#  endif
+
 	malloc_printf("            allocated      nmalloc      ndalloc\n");
-#ifdef MOZ_MEMORY_WINDOWS
 	malloc_printf("small:   %12Iu %12I64u %12I64u\n",
 	    arena->stats.allocated_small, arena->stats.nmalloc_small,
 	    arena->stats.ndalloc_small);
 	malloc_printf("large:   %12Iu %12I64u %12I64u\n",
 	    arena->stats.allocated_large, arena->stats.nmalloc_large,
 	    arena->stats.ndalloc_large);
 	malloc_printf("total:   %12Iu %12I64u %12I64u\n",
 	    arena->stats.allocated_small + arena->stats.allocated_large,
 	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
 	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
 	malloc_printf("mapped:  %12Iu\n", arena->stats.mapped);
-	malloc_printf("dirty:   %12Iu\n", arena->ndirty);
-	malloc_printf("purged:  %12I64u\n", arena->stats.npurged);
-	malloc_printf("nmadvise:%12I64u\n", arena->stats.nmadvise);
 #else
+	malloc_printf("dirty: %zu page%s dirty, %llu sweep%s,"
+	    " %llu madvise%s, %llu page%s purged\n",
+	    arena->ndirty, arena->ndirty == 1 ? "" : "s",
+	    arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
+	    arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
+	    arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
+#  ifdef MALLOC_DECOMMIT
+	malloc_printf("decommit: %llu decommit%s, %llu commit%s,"
+	    " %llu page%s decommitted\n",
+	    arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
+	    arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
+	    arena->stats.decommitted,
+	    (arena->stats.decommitted == 1) ? "" : "s");
+#  endif
+
+	malloc_printf("            allocated      nmalloc      ndalloc\n");
 	malloc_printf("small:   %12zu %12llu %12llu\n",
 	    arena->stats.allocated_small, arena->stats.nmalloc_small,
 	    arena->stats.ndalloc_small);
 	malloc_printf("large:   %12zu %12llu %12llu\n",
 	    arena->stats.allocated_large, arena->stats.nmalloc_large,
 	    arena->stats.ndalloc_large);
 	malloc_printf("total:   %12zu %12llu %12llu\n",
 	    arena->stats.allocated_small + arena->stats.allocated_large,
 	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
 	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
 	malloc_printf("mapped:  %12zu\n", arena->stats.mapped);
-	malloc_printf("dirty:   %12zu\n", arena->ndirty);
-	malloc_printf("purged:  %12llu\n", arena->stats.npurged);
-	malloc_printf("nmadvise:%12llu\n", arena->stats.nmadvise);
 #endif
 	malloc_printf("bins:     bin   size regs pgs  requests   newruns"
 	    "    reruns maxruns curruns\n");
 	for (i = 0, gap_start = UINT_MAX; i < ntbins + nqbins + nsbins; i++) {
 		if (arena->bins[i].stats.nrequests == 0) {
 			if (gap_start == UINT_MAX)
 				gap_start = i;
 		} else {
@@ -1898,28 +1893,16 @@ stats_print(arena_t *arena)
  * End Utility functions/macros.
  */
 /******************************************************************************/
 /*
  * Begin extent tree code.
  */
 
 static inline int
-extent_ad_comp(extent_node_t *a, extent_node_t *b)
-{
-	uintptr_t a_addr = (uintptr_t)a->addr;
-	uintptr_t b_addr = (uintptr_t)b->addr;
-
-	return ((a_addr > b_addr) - (a_addr < b_addr));
-}
-
-/* Generate red-black tree code for address-ordered extents. */
-RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, link_ad, extent_ad_comp)
-
-static inline int
 extent_szad_comp(extent_node_t *a, extent_node_t *b)
 {
 	int ret;
 	size_t a_size = a->size;
 	size_t b_size = b->size;
 
 	ret = (a_size > b_size) - (a_size < b_size);
 	if (ret == 0) {
@@ -1931,16 +1914,28 @@ extent_szad_comp(extent_node_t *a, exten
 
 	return (ret);
 }
 
 /* Generate red-black tree code for size/address-ordered extents. */
 RB_GENERATE_STATIC(extent_tree_szad_s, extent_node_s, link_szad,
     extent_szad_comp)
 
+static inline int
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
+{
+	uintptr_t a_addr = (uintptr_t)a->addr;
+	uintptr_t b_addr = (uintptr_t)b->addr;
+
+	return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Generate red-black tree code for address-ordered extents. */
+RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, link_ad, extent_ad_comp)
+
 /*
  * End extent tree code.
  */
 /******************************************************************************/
 /*
  * Begin chunk management functions.
  */
 
@@ -2001,16 +1996,29 @@ pages_unmap(void *addr, size_t size)
 	if (err != KERN_SUCCESS) {
 		malloc_message(_getprogname(),
 		    ": (malloc) Error in vm_deallocate(): ",
 		    mach_error_string(err), "\n");
 		if (opt_abort)
 			abort();
 	}
 }
+
+#define	VM_COPY_MIN (pagesize << 5)
+static inline void
+pages_copy(void *dest, const void *src, size_t n)
+{
+
+	assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest);
+	assert(n >= VM_COPY_MIN);
+	assert((void *)((uintptr_t)src & ~pagesize_mask) == src);
+
+	vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n,
+	    (vm_address_t)dest);
+}
 #else /* MOZ_MEMORY_DARWIN */
 static void *
 pages_map(void *addr, size_t size)
 {
 	void *ret;
 
 	/*
 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
@@ -2054,18 +2062,46 @@ pages_unmap(void *addr, size_t size)
 		_malloc_message(_getprogname(),
 		    ": (malloc) Error in munmap(): ", buf, "\n");
 		if (opt_abort)
 			abort();
 	}
 }
 #endif
 
+#ifdef MALLOC_DECOMMIT
+static inline void
+pages_decommit(void *addr, size_t size)
+{
+
+#ifdef MOZ_MEMORY_WINDOWS
+	VirtualFree(addr, size, MEM_DECOMMIT);
+#else
+	if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1,
+	    0) == MAP_FAILED)
+		abort();
+#endif
+}
+
+static inline void
+pages_commit(void *addr, size_t size)
+{
+
+#  ifdef MOZ_MEMORY_WINDOWS
+	VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
+#  else
+	if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE |
+	    MAP_ANON, -1, 0) == MAP_FAILED)
+		abort();
+#  endif
+}
+#endif
+
 #ifdef MALLOC_DSS
-static inline void *
+static void *
 chunk_alloc_dss(size_t size)
 {
 
 	malloc_mutex_lock(&dss_mtx);
 	if (dss_prev != (void *)-1) {
 		intptr_t incr;
 
 		/*
@@ -2101,17 +2137,17 @@ chunk_alloc_dss(size_t size)
 			}
 		} while (dss_prev != (void *)-1);
 	}
 	malloc_mutex_unlock(&dss_mtx);
 
 	return (NULL);
 }
 
-static inline void *
+static void *
 chunk_recycle_dss(size_t size, bool zero)
 {
 	extent_node_t *node, key;
 
 	key.addr = NULL;
 	key.size = size;
 	malloc_mutex_lock(&dss_mtx);
 	node = RB_NFIND(extent_tree_szad_s, &dss_chunks_szad, &key);
@@ -2181,18 +2217,18 @@ chunk_alloc_mmap(size_t size)
 			 * Deallocate, then allocate the correct size, within
 			 * the over-sized mapping.
 			 */
 			offset = CHUNK_ADDR2OFFSET(ret);
 			pages_unmap(ret, size + chunksize);
 			if (offset == 0)
 				ret = pages_map(ret, size);
 			else {
-				ret = pages_map((void *)((uintptr_t)ret + chunksize
-				    - offset), size);
+				ret = pages_map((void *)((uintptr_t)ret +
+				    chunksize - offset), size);
 			}
 			/*
 			 * Failure here indicates a race with another thread, so
 			 * try again.
 			 */
 		}
 	}
 
@@ -2278,17 +2314,16 @@ chunk_alloc_mmap(size_t size)
 static void *
 chunk_alloc(size_t size, bool zero)
 {
 	void *ret;
 
 	assert(size != 0);
 	assert((size & chunksize_mask) == 0);
 
-
 #ifdef MALLOC_DSS
 	if (opt_dss) {
 		ret = chunk_recycle_dss(size, zero);
 		if (ret != NULL) {
 			goto RETURN;
 		}
 
 		ret = chunk_alloc_dss(size);
@@ -2316,17 +2351,17 @@ RETURN:
 		stats_chunks.highchunks = stats_chunks.curchunks;
 #endif
 
 	assert(CHUNK_ADDR2BASE(ret) == ret);
 	return (ret);
 }
 
 #ifdef MALLOC_DSS
-static inline extent_node_t *
+static extent_node_t *
 chunk_dealloc_dss_record(void *chunk, size_t size)
 {
 	extent_node_t *node, *prev, key;
 
 	key.addr = (void *)((uintptr_t)chunk + size);
 	node = RB_NFIND(extent_tree_ad_s, &dss_chunks_ad, &key);
 	/* Try to coalesce forward. */
 	if (node != NULL && node->addr == key.addr) {
@@ -2360,31 +2395,31 @@ chunk_dealloc_dss_record(void *chunk, si
 	prev = RB_PREV(extent_tree_ad_s, &dss_chunks_ad, node);
 	if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
 	    chunk) {
 		/*
 		 * Coalesce chunk with the previous address range.  This does
 		 * not change the position within dss_chunks_ad, so only
 		 * remove/insert node from/into dss_chunks_szad.
 		 */
+		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, prev);
 		RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, prev);
-		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, prev);
 
 		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
 		node->addr = prev->addr;
 		node->size += prev->size;
 		RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
 
 		base_node_dealloc(prev);
 	}
 
 	return (node);
 }
 
-static inline bool
+static bool
 chunk_dealloc_dss(void *chunk, size_t size)
 {
 
 	malloc_mutex_lock(&dss_mtx);
 	if ((uintptr_t)chunk >= (uintptr_t)dss_base
 	    && (uintptr_t)chunk < (uintptr_t)dss_max) {
 		extent_node_t *node;
 
@@ -2406,45 +2441,44 @@ chunk_dealloc_dss(void *chunk, size_t si
 		 * designed multi-threaded programs.
 		 */
 		if ((void *)((uintptr_t)chunk + size) == dss_max
 		    && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
 			/* Success. */
 			dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
 
 			if (node != NULL) {
-				RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad,
+				RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad,
 				    node);
-				RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad,
+				RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad,
 				    node);
 				base_node_dealloc(node);
 			}
 			malloc_mutex_unlock(&dss_mtx);
 		} else {
 			malloc_mutex_unlock(&dss_mtx);
 #ifdef MOZ_MEMORY_WINDOWS
 			VirtualAlloc(chunk, size, MEM_RESET, PAGE_READWRITE);
 #elif (defined(MOZ_MEMORY_DARWIN))
 			mmap(chunk, size, PROT_READ | PROT_WRITE, MAP_PRIVATE
 			    | MAP_ANON | MAP_FIXED, -1, 0);
-			//XXXmsync(chunk, size, MS_DEACTIVATE);
 #else
 			madvise(chunk, size, MADV_FREE);
 #endif
 		}
 
 		return (false);
 	}
 	malloc_mutex_unlock(&dss_mtx);
 
 	return (true);
 }
 #endif
 
-static inline void
+static void
 chunk_dealloc_mmap(void *chunk, size_t size)
 {
 
 	pages_unmap(chunk, size);
 }
 
 static void
 chunk_dealloc(void *chunk, size_t size)
@@ -2489,22 +2523,17 @@ choose_arena(void)
 
 	/*
 	 * We can only use TLS if this is a PIC library, since for the static
 	 * library version, libc's malloc is used by TLS allocation, which
 	 * introduces a bootstrapping issue.
 	 */
 #ifndef NO_TLS
 	if (__isthreaded == false) {
-	    /*
-	     * Avoid the overhead of TLS for single-threaded operation.  If the
-	     * app switches to threaded mode, the initial thread may end up
-	     * being assigned to some other arena, but this one-time switch
-	     * shouldn't cause significant issues.
-	     */
+	    /* Avoid the overhead of TLS for single-threaded operation. */
 	    return (arenas[0]);
 	}
 
 #  ifdef MOZ_MEMORY_WINDOWS
 	ret = TlsGetValue(tlsIndex);
 #  else
 	ret = arenas_map;
 #  endif
@@ -2653,59 +2682,42 @@ arena_run_comp(arena_run_t *a, arena_run
 	assert(b != NULL);
 
 	return ((a_run > b_run) - (a_run < b_run));
 }
 
 /* Generate red-black tree code for arena runs. */
 RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp)
 
-static inline extent_node_t *
-arena_node_alloc(arena_t *arena)
+static extent_node_t *
+arena_chunk_node_alloc(arena_chunk_t *chunk)
 {
 	extent_node_t *ret;
-	node_mag_t *node_mag = arena->node_mag_cur;
-
-	if (node_mag == NULL || node_mag->nnodes == 0) {
-		if (arena->node_mag_full != NULL) {
-			arena->node_mag_full->next = node_mag;
-			node_mag = arena->node_mag_full;
-			arena->node_mag_cur = node_mag;
-			arena->node_mag_full = NULL;
-		} else {
-			node_mag = base_node_mag_alloc();
-			if (node_mag == NULL)
-				return (NULL);
-			node_mag->next = arena->node_mag_cur;
-			arena->node_mag_cur = node_mag;
-		}
+
+	ret = RB_MIN(extent_tree_ad_s, &chunk->nodes);
+	if (ret != NULL)
+		RB_REMOVE(extent_tree_ad_s, &chunk->nodes, ret);
+	else {
+		ret = chunk->nodes_past;
+		chunk->nodes_past = (extent_node_t *)
+		    ((uintptr_t)chunk->nodes_past + sizeof(extent_node_t));
+		assert((uintptr_t)ret + sizeof(extent_node_t) <=
+		    (uintptr_t)chunk + (arena_chunk_header_npages <<
+		    pagesize_2pow));
 	}
 
-	node_mag->nnodes--;
-	ret = node_mag->nodes[node_mag->nnodes];
-
 	return (ret);
 }
 
-static inline void
-arena_node_dealloc(arena_t *arena, extent_node_t *node)
+static void
+arena_chunk_node_dealloc(arena_chunk_t *chunk, extent_node_t *node)
 {
-	node_mag_t *node_mag = arena->node_mag_cur;
-
-	if (node_mag->nnodes == NODE_MAG_NNODES) {
-		if (arena->node_mag_full != NULL)
-			base_node_mag_dealloc(arena->node_mag_full);
-		arena->node_mag_full = node_mag;
-		node_mag = node_mag->next;
-		arena->node_mag_cur = node_mag;
-	}
-	assert(node_mag->nnodes < NODE_MAG_NNODES);
-
-	node_mag->nodes[node_mag->nnodes] = node;
-	node_mag->nnodes++;
+
+	node->addr = (void *)node;
+	RB_INSERT(extent_tree_ad_s, &chunk->nodes, node);
 }
 
 static inline void *
 arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
 {
 	void *ret;
 	unsigned i, mask, bit, regind;
 
@@ -2830,17 +2842,17 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		};
 
 		if (size <= 128)
 			regind = (diff >> log2_table[size - 1]);
 		else if (size <= 32768)
 			regind = diff >> (8 + log2_table[(size >> 8) - 1]);
 		else {
 			/*
-			 * The page size is too large for us to use the lookup
+			 * The run size is too large for us to use the lookup
 			 * table.  Use real division.
 			 */
 			regind = diff / size;
 		}
 	} else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
 	    << QUANTUM_2POW_MIN) + 2) {
 		regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
 		regind >>= SIZE_INV_SHIFT;
@@ -2861,249 +2873,183 @@ arena_run_reg_dalloc(arena_run_t *run, a
 		run->regs_minelm = elm;
 	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
 	assert((run->regs_mask[elm] & (1U << bit)) == 0);
 	run->regs_mask[elm] |= (1U << bit);
 #undef SIZE_INV
 #undef SIZE_INV_SHIFT
 }
 
-static bool
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+static void
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool small,
+    bool zero)
 {
 	arena_chunk_t *chunk;
-	unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
-	unsigned i;
-	uint32_t pos_beg, pos_end;
+	size_t run_ind, total_pages, need_pages, rem_pages, i;
 	extent_node_t *nodeA, *nodeB, key;
 
 	/* Insert a node into runs_alloced_ad for the first part of the run. */
-	nodeA = arena_node_alloc(arena);
-	if (nodeA == NULL)
-		return (true);
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+	nodeA = arena_chunk_node_alloc(chunk);
 	nodeA->addr = run;
 	nodeA->size = size;
 	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
 
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
-	    >> pagesize_2pow);
-	total_pages = chunk->map[run_ind].npages;
-	need_pages = (unsigned)(size >> pagesize_2pow);
-	assert(need_pages > 0);
-	assert(need_pages <= total_pages);
-	rem_pages = total_pages - need_pages;
-
 	key.addr = run;
 	nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
 	assert(nodeB != NULL);
 
+	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+	    >> pagesize_2pow);
+	total_pages = nodeB->size >> pagesize_2pow;
+	need_pages = (size >> pagesize_2pow);
+	assert(need_pages > 0);
+	assert(need_pages <= total_pages);
+	assert(need_pages <= CHUNK_MAP_POS_MASK || small == false);
+	rem_pages = total_pages - need_pages;
+
+	for (i = 0; i < need_pages; i++) {
 #ifdef MALLOC_DECOMMIT
-	if (opt_decommit) {
-		if (nodeB->ndirty != nodeB->size) {
+		/*
+		 * Commit decommitted pages if necessary.  If a decommitted
+		 * page is encountered, commit all needed adjacent decommitted
+		 * pages in one operation, in order to reduce system call
+		 * overhead.
+		 */
+		if (chunk->map[run_ind + i] & CHUNK_MAP_DECOMMITTED) {
+			size_t j;
+
 			/*
-			 * Commit the part of the run that is being allocated.
+			 * Advance i+j to just past the index of the last page
+			 * to commit.  Clear CHUNK_MAP_DECOMMITTED along the
+			 * way.
 			 */
-#  ifdef MOZ_MEMORY_WINDOWS
-			VirtualAlloc(run, size, MEM_COMMIT, PAGE_READWRITE);
-#  else
-			if (mmap(run, size, PROT_READ | PROT_WRITE,
-			    MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
-			    MAP_FAILED)
-				abort();
+			for (j = 0; i + j < need_pages && (chunk->map[run_ind +
+			    i + j] & CHUNK_MAP_DECOMMITTED); j++) {
+				chunk->map[run_ind + i + j] ^=
+				    CHUNK_MAP_DECOMMITTED;
+			}
+
+			pages_commit((void *)((uintptr_t)chunk + ((run_ind + i)
+			    << pagesize_2pow)), (j << pagesize_2pow));
+#  ifdef MALLOC_STATS
+			arena->stats.ncommit++;
 #  endif
-			if (nodeB->ndirty != 0 && nodeB->size > size) {
-				/*
-				 * Decommit the unused portion of the run in
-				 * order to assure a uniform state where all
-				 * pages in each part of the split are either
-				 * completely committed or completely
-				 * decommitted.
-				 */
-#  ifdef MOZ_MEMORY_WINDOWS
-				VirtualFree((void *)((uintptr_t)run + size),
-				    nodeB->size - size, MEM_DECOMMIT);
-#  else
-				if (mmap((void *)((uintptr_t)run + size),
-				    nodeB->size - size, PROT_NONE, MAP_FIXED |
-				    MAP_PRIVATE | MAP_ANON, -1, 0) ==
-				    MAP_FAILED)
-					abort();
-#  endif
-#  ifdef MALLOC_STATS
-				arena->stats.npurged += nodeB->ndirty;
-				arena->stats.nmadvise++;
-#  endif
-				arena->ndirty -= nodeB->ndirty;
-				nodeB->ndirty = 0;
+		}
+#endif
+
+		/* Zero if necessary. */
+		if (zero) {
+			if ((chunk->map[run_ind + i] & CHUNK_MAP_UNTOUCHED)
+			    == 0) {
+				memset((void *)((uintptr_t)chunk + ((run_ind
+				    + i) << pagesize_2pow)), 0, pagesize);
+				/* CHUNK_MAP_UNTOUCHED is cleared below. */
 			}
 		}
-	}
-#endif
-
-	/* Split enough pages from the front of run to fit allocation size. */
-	map_offset = run_ind;
-	pos_beg = chunk->map[map_offset].pos;
-	pos_end = chunk->map[map_offset + total_pages - 1].pos;
-	if (zero == false) {
-		for (i = 0; i < need_pages; i++) {
-			chunk->map[map_offset + i].npages = need_pages;
-			chunk->map[map_offset + i].pos = i;
-		}
-	} else {
-		/*
-		 * Handle first page specially, since we need to look for
-		 * POS_EMPTY rather than NPAGES_EMPTY.
-		 */
-		i = 0;
-		if (chunk->map[map_offset + i].pos != POS_EMPTY) {
-			memset((void *)((uintptr_t)chunk + ((map_offset + i) <<
-			    pagesize_2pow)), 0, pagesize);
+
+		/* Update dirty page accounting. */
+		if (chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) {
+			chunk->ndirty--;
+			arena->ndirty--;
 		}
-		chunk->map[map_offset + i].npages = need_pages;
-		chunk->map[map_offset + i].pos = i;
-
-		/* Handle central pages. */
-		for (i++; i < need_pages - 1; i++) {
-			if (chunk->map[map_offset + i].npages != NPAGES_EMPTY) {
-				memset((void *)((uintptr_t)chunk + ((map_offset
-				    + i) << pagesize_2pow)), 0, pagesize);
-			}
-			chunk->map[map_offset + i].npages = need_pages;
-			chunk->map[map_offset + i].pos = i;
-		}
-
-		/*
-		 * Handle last page specially, since we need to look for
-		 * POS_EMPTY rather than NPAGES_EMPTY.
-		 */
-		if (i < need_pages) {
-			if (chunk->map[map_offset + i].npages != POS_EMPTY) {
-				memset((void *)((uintptr_t)chunk + ((map_offset
-				    + i) << pagesize_2pow)), 0, pagesize);
-			}
-			chunk->map[map_offset + i].npages = need_pages;
-			chunk->map[map_offset + i].pos = i;
-		}
+
+		/* Initialize the chunk map. */
+		if (small)
+			chunk->map[run_ind + i] = (uint8_t)i;
+		else
+			chunk->map[run_ind + i] = CHUNK_MAP_LARGE;
 	}
 
 	/* Keep track of trailing unused pages for later use. */
+	RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
 	if (rem_pages > 0) {
-		/* Update map for trailing pages. */
-		map_offset += need_pages;
-		chunk->map[map_offset].npages = rem_pages;
-		chunk->map[map_offset].pos = pos_beg;
-		chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
-		chunk->map[map_offset + rem_pages - 1].pos = pos_end;
-
 		/*
 		 * Update nodeB in runs_avail_*.  Its position within
 		 * runs_avail_ad does not change.
 		 */
-		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
 		nodeB->addr = (void *)((uintptr_t)nodeB->addr + size);
 		nodeB->size -= size;
-		if (nodeB->ndirty > nodeB->size) {
-			arena->ndirty -= nodeB->ndirty - nodeB->size;
-			nodeB->ndirty = nodeB->size;
-		}
 		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
 	} else {
 		/* Remove nodeB from runs_avail_*. */
-		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
 		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
-		arena->ndirty -= nodeB->ndirty;
-		arena_node_dealloc(arena, nodeB);
+		arena_chunk_node_dealloc(chunk, nodeB);
 	}
 
 	chunk->pages_used += need_pages;
-
-	return (false);
 }
 
 static arena_chunk_t *
 arena_chunk_alloc(arena_t *arena)
 {
 	arena_chunk_t *chunk;
 	extent_node_t *node;
 
-	node = arena_node_alloc(arena);
-	if (node == NULL)
-		return (NULL);
-
 	if (arena->spare != NULL) {
 		chunk = arena->spare;
 		arena->spare = NULL;
-		node->ndirty = arena->spare_ndirty;
-		arena->spare_ndirty = 0;
 	} else {
-		unsigned i;
-
 		chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
-		if (chunk == NULL) {
-			arena_node_dealloc(arena, node);
+		if (chunk == NULL)
 			return (NULL);
-		}
 #ifdef MALLOC_STATS
 		arena->stats.mapped += chunksize;
 #endif
 
 		chunk->arena = arena;
 
 		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
 
 		/*
 		 * Claim that no pages are in use, since the header is merely
 		 * overhead.
 		 */
 		chunk->pages_used = 0;
+		chunk->ndirty = 0;
 
 		/*
-		 * Initialize enough of the map to support one maximal free run.
+		 * Initialize the map to contain one maximal free untouched
+		 * run.
 		 */
-		i = arena_chunk_header_npages;
-		chunk->map[i].npages = chunk_npages - arena_chunk_header_npages;
-		chunk->map[i].pos = POS_EMPTY;
-
-		/* Mark the free run's central pages as untouched. */
-		for (i++; i < chunk_npages - 1; i++)
-			chunk->map[i].npages = NPAGES_EMPTY;
-
-		/* Take care when (chunk_npages == 2). */
-		if (i < chunk_npages) {
-			chunk->map[i].npages = chunk_npages -
-			    arena_chunk_header_npages;
-			chunk->map[i].pos = POS_EMPTY;
-		}
-
-		node->ndirty = 0;
+		memset(chunk->map, (CHUNK_MAP_LARGE | CHUNK_MAP_POS_MASK),
+		    arena_chunk_header_npages);
+		memset(&chunk->map[arena_chunk_header_npages],
+		    (CHUNK_MAP_UNTOUCHED
 #ifdef MALLOC_DECOMMIT
-#  ifdef MOZ_MEMORY_WINDOWS
-		if (opt_decommit) {
-			VirtualFree((void *)((uintptr_t)chunk +
-			    (arena_chunk_header_npages << pagesize_2pow)),
-			    chunksize - (arena_chunk_header_npages <<
-			    pagesize_2pow), MEM_DECOMMIT);
-		} else {
-			VirtualAlloc((void *)((uintptr_t)chunk +
-			    (arena_chunk_header_npages << pagesize_2pow)),
-			    chunksize - (arena_chunk_header_npages <<
-			    pagesize_2pow), MEM_RESET, PAGE_READWRITE);
-		}
-#  else
-		if (mmap((void *)((uintptr_t)chunk + (arena_chunk_header_npages
-		    << pagesize_2pow)), chunksize - (arena_chunk_header_npages
-		    << pagesize_2pow), PROT_NONE, MAP_FIXED | MAP_PRIVATE |
-		    MAP_ANON, -1, 0) == MAP_FAILED)
-			abort();
+		    | CHUNK_MAP_DECOMMITTED
+#endif
+		    ), (chunk_npages -
+		    arena_chunk_header_npages));
+
+		/* Initialize the tree of unused extent nodes. */
+		RB_INIT(&chunk->nodes);
+		chunk->nodes_past = (extent_node_t *)QUANTUM_CEILING(
+		    (uintptr_t)&chunk->map[chunk_npages]);
+
+#ifdef MALLOC_DECOMMIT
+		/*
+		 * Start out decommitted, in order to force a closer
+		 * correspondence between dirty pages and committed untouched
+		 * pages.
+		 */
+		pages_decommit((void *)((uintptr_t)chunk +
+		    (arena_chunk_header_npages << pagesize_2pow)),
+		    ((chunk_npages - arena_chunk_header_npages) <<
+		    pagesize_2pow));
+#  ifdef MALLOC_STATS
+		arena->stats.ndecommit++;
+		arena->stats.decommitted += (chunk_npages -
+		    arena_chunk_header_npages);
 #  endif
 #endif
 	}
 
 	/* Insert the run into the runs_avail_* red-black trees. */
+	node = arena_chunk_node_alloc(chunk);
 	node->addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
 	    pagesize_2pow));
 	node->size = chunksize - (arena_chunk_header_npages << pagesize_2pow);
 	RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
 	RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
 
 	return (chunk);
 }
@@ -3111,328 +3057,308 @@ arena_chunk_alloc(arena_t *arena)
 static void
 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
 {
 	extent_node_t *node, key;
 
 	if (arena->spare != NULL) {
 		RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks,
 		    arena->spare);
-		arena->ndirty -= arena->spare_ndirty;
+		arena->ndirty -= arena->spare->ndirty;
 		chunk_dealloc((void *)arena->spare, chunksize);
 #ifdef MALLOC_STATS
 		arena->stats.mapped -= chunksize;
 #endif
 	}
 
 	/*
 	 * Remove run from the runs trees, regardless of whether this chunk
-	 * will be cached, so that the arena does not use it.
+	 * will be cached, so that the arena does not use it.  Dirty page
+	 * flushing only uses the chunks tree, so leaving this chunk in that
+	 * tree is sufficient for that purpose.
 	 */
 	key.addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
 	    pagesize_2pow));
 	node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
 	assert(node != NULL);
 	RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
 	RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, node);
+	arena_chunk_node_dealloc(chunk, node);
 
 	arena->spare = chunk;
-	arena->spare_ndirty = node->ndirty;
-
-	arena_node_dealloc(arena, node);
 }
 
 static arena_run_t *
-arena_run_alloc(arena_t *arena, size_t size, bool zero)
+arena_run_alloc(arena_t *arena, size_t size, bool small, bool zero)
 {
 	arena_chunk_t *chunk;
 	arena_run_t *run;
 	extent_node_t *node, key;
 
 	assert(size <= (chunksize - (arena_chunk_header_npages <<
 	    pagesize_2pow)));
 	assert((size & pagesize_mask) == 0);
 
 	/* Search the arena's chunks for the lowest best fit. */
 	key.addr = NULL;
 	key.size = size;
 	node = RB_NFIND(extent_tree_szad_s, &arena->runs_avail_szad, &key);
 	if (node != NULL) {
 		run = (arena_run_t *)node->addr;
-		if (arena_run_split(arena, run, size, zero))
-			return (NULL);
+		arena_run_split(arena, run, size, small, zero);
 		return (run);
 	}
 
 	/*
 	 * No usable runs.  Create a new chunk from which to allocate the run.
 	 */
 	chunk = arena_chunk_alloc(arena);
 	if (chunk == NULL)
 		return (NULL);
 	run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
 	    pagesize_2pow));
-
 	/* Update page map. */
-	if (arena_run_split(arena, run, size, zero)) {
-		arena_chunk_dealloc(arena, chunk);
-		return (NULL);
-	}
+	arena_run_split(arena, run, size, small, zero);
 	return (run);
 }
 
 static void
 arena_purge(arena_t *arena)
 {
-	extent_node_t *node;
+	arena_chunk_t *chunk;
 #ifdef MALLOC_DEBUG
 	size_t ndirty;
 
-	ndirty = arena->spare_ndirty;
-	RB_FOREACH(node, extent_tree_ad_s, &arena->runs_avail_ad) {
-		ndirty += node->ndirty;
+	ndirty = 0;
+	RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
+		ndirty += chunk->ndirty;
 	}
 	assert(ndirty == arena->ndirty);
 #endif
-	assert(arena->ndirty > opt_free_max);
+	assert(arena->ndirty > opt_dirty_max);
+
+#ifdef MALLOC_STATS
+	arena->stats.npurge++;
+#endif
 
 	/*
-	 * Purge the spare first, even if it isn't at the lowest address of
-	 * anything currently mapped by the arena.
+	 * Iterate downward through chunks until enough dirty memory has been
+	 * purged.
 	 */
-	if (arena->spare_ndirty > 0) {
-		assert(arena->spare != NULL);
+	RB_FOREACH_REVERSE(chunk, arena_chunk_tree_s, &arena->chunks) {
+		if (chunk->ndirty > 0) {
+			size_t i;
+
+			for (i = chunk_npages - 1; i >=
+			    arena_chunk_header_npages; i--) {
+				if (chunk->map[i] & CHUNK_MAP_DIRTY) {
+					size_t npages;
+
+					chunk->map[i] = (CHUNK_MAP_LARGE |
 #ifdef MALLOC_DECOMMIT
-#  ifdef MOZ_MEMORY_WINDOWS
-		/*
-		 * Tell the kernel that we don't need the data in this
-		 * run, but only if requested via runtime
-		 * configuration.
-		 */
-		if (opt_decommit) {
-			VirtualFree((void *)((uintptr_t)arena->spare +
-			    (arena_chunk_header_npages <<
-			    pagesize_2pow)), chunksize -
-			    (arena_chunk_header_npages <<
-			    pagesize_2pow), MEM_DECOMMIT);
-		} else {
-			VirtualAlloc((void *)((uintptr_t)arena->spare +
-			    (arena_chunk_header_npages <<
-			    pagesize_2pow)), chunksize -
-			    (arena_chunk_header_npages <<
-			    pagesize_2pow), MEM_RESET, PAGE_READWRITE);
-		}
-#  else
-		if (mmap((void *)((uintptr_t)arena->spare +
-		    (arena_chunk_header_npages << pagesize_2pow)),
-		    chunksize - (arena_chunk_header_npages <<
-		    pagesize_2pow), PROT_NONE, MAP_FIXED | MAP_PRIVATE |
-		    MAP_ANON, -1, 0) == MAP_FAILED)
-			abort();
+					    CHUNK_MAP_DECOMMITTED |
+#endif
+					    CHUNK_MAP_POS_MASK);
+					chunk->ndirty--;
+					arena->ndirty--;
+					/* Find adjacent dirty run(s). */
+					for (npages = 1; i >
+					    arena_chunk_header_npages &&
+					    (chunk->map[i - 1] &
+					    CHUNK_MAP_DIRTY); npages++) {
+						i--;
+						chunk->map[i] = (CHUNK_MAP_LARGE
+#ifdef MALLOC_DECOMMIT
+						    | CHUNK_MAP_DECOMMITTED
+#endif
+						    | CHUNK_MAP_POS_MASK);
+						chunk->ndirty--;
+						arena->ndirty--;
+					}
+
+#ifdef MALLOC_DECOMMIT
+					pages_decommit((void *)((uintptr_t)
+					    chunk + (i << pagesize_2pow)),
+					    (npages << pagesize_2pow));
+#  ifdef MALLOC_STATS
+					arena->stats.ndecommit++;
+					arena->stats.decommitted += npages;
 #  endif
-#elif (defined(MOZ_MEMORY_DARWIN))
-		mmap((void *)((uintptr_t)arena->spare +
-		    (arena_chunk_header_npages << pagesize_2pow)),
-		    chunksize - (arena_chunk_header_npages <<
-		    pagesize_2pow), PROT_READ | PROT_WRITE, MAP_PRIVATE
-		    | MAP_ANON | MAP_FIXED, -1, 0);
-		//msync((void *)((uintptr_t)arena->spare +
-		//    (arena_chunk_header_npages << pagesize_2pow)),
-		//    chunksize - (arena_chunk_header_npages <<
-		//    pagesize_2pow), MS_DEACTIVATE);
 #else
-		madvise((void *)((uintptr_t)arena->spare +
-		    (arena_chunk_header_npages << pagesize_2pow)),
-		    chunksize - (arena_chunk_header_npages <<
-		    pagesize_2pow), MADV_FREE);
+					madvise((void *)((uintptr_t)chunk + (i
+					    << pagesize_2pow)), pagesize *
+					    npages, MADV_FREE);
 #endif
 #ifdef MALLOC_STATS
-		arena->stats.npurged += arena->spare_ndirty;
-		arena->stats.nmadvise++;
-#endif
-		arena->ndirty -= arena->spare_ndirty;
-		arena->spare_ndirty = 0;
-		if (arena->ndirty <= (opt_free_max >> 1))
-			return;
-	}
-
-	/*
-	 * Iterate backward through runs until enough dirty memory has been
-	 * purged.
-	 */
-	RB_FOREACH_REVERSE(node, extent_tree_ad_s, &arena->runs_avail_ad) {
-		if (node->ndirty > 0) {
-#ifdef MALLOC_DECOMMIT
-#  ifdef MOZ_MEMORY_WINDOWS
-			/*
-			 * Tell the kernel that we don't need the data
-			 * in this run, but only if requested via
-			 * runtime configuration.
-			 */
-			if (opt_decommit) {
-				VirtualFree(node->addr, node->size,
-				    MEM_DECOMMIT);
-			} else {
-				VirtualAlloc(node->addr, node->size,
-				    MEM_RESET, PAGE_READWRITE);
+					arena->stats.nmadvise++;
+					arena->stats.purged += npages;
+#endif
+				}
 			}
-#  else
-			if (mmap(node->addr, node->size, PROT_NONE,
-			    MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0)
-			    == MAP_FAILED)
-				abort();
-#  endif
-#elif (defined(MOZ_MEMORY_DARWIN))
-			mmap(node->addr, node->size, PROT_READ | PROT_WRITE,
-			    MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
-			//msync(node->addr, node->size, MS_DEACTIVATE);
-#else
-			madvise(node->addr, node->size, MADV_FREE);
-#endif
-#ifdef MALLOC_STATS
-			arena->stats.npurged += node->ndirty;
-			arena->stats.nmadvise++;
-#endif
-			arena->ndirty -= node->ndirty;
-			node->ndirty = 0;
-			if (arena->ndirty <= (opt_free_max >> 1))
-				return;
 		}
 	}
 }
 
 static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size, size_t ndirty)
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
 {
 	arena_chunk_t *chunk;
-	extent_node_t *node, key;
-	unsigned run_ind, run_pages;
+	extent_node_t *nodeA, *nodeB, *nodeC, key;
+	size_t size, run_ind, run_pages;
+
+	/* Remove run from runs_alloced_ad. */
+	key.addr = run;
+	nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+	assert(nodeB != NULL);
+	RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+	size = nodeB->size;
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
-
 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
 	    >> pagesize_2pow);
 	assert(run_ind >= arena_chunk_header_npages);
 	assert(run_ind < (chunksize >> pagesize_2pow));
 	run_pages = (size >> pagesize_2pow);
-	assert(run_pages == chunk->map[run_ind].npages);
 
 	/* Subtract pages from count of pages used in chunk. */
 	chunk->pages_used -= run_pages;
 
-	/* Mark run as deallocated. */
-	assert(chunk->map[run_ind].npages == run_pages);
-	chunk->map[run_ind].pos = POS_FREE;
-	assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
-	chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
-
-	/* Remove run from runs_alloced_ad. */
-	key.addr = run;
-	node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
-	assert(node != NULL);
-	RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, node);
-
-	/* Try to coalesce with neighboring runs. */
-	if (run_ind > arena_chunk_header_npages &&
-	    chunk->map[run_ind - 1].pos >= POS_EMPTY) {
-		unsigned prev_npages;
-
-		/* Coalesce with previous run. */
-		prev_npages = chunk->map[run_ind - 1].npages;
+	if (dirty) {
+		size_t i;
+
+		for (i = 0; i < run_pages; i++) {
+			assert((chunk->map[run_ind + i] & CHUNK_MAP_DIRTY) ==
+			    0);
+			chunk->map[run_ind + i] |= CHUNK_MAP_DIRTY;
+			chunk->ndirty++;
+			arena->ndirty++;
+		}
+	}
+#ifdef MALLOC_DEBUG
+	/* Set map elements to a bogus value in order to aid error detection. */
+	{
+		size_t i;
+
+		for (i = 0; i < run_pages; i++) {
+			chunk->map[run_ind + i] |= (CHUNK_MAP_LARGE |
+			    CHUNK_MAP_POS_MASK);
+		}
+	}
+#endif
+
+	/* Try to coalesce forward. */
+	key.addr = (void *)((uintptr_t)run + size);
+	nodeC = RB_NFIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+	if (nodeC != NULL && nodeC->addr == key.addr) {
 		/*
-		 * The way run allocation currently works (lowest best fit),
-		 * it is impossible for a free run to have empty (untouched)
-		 * pages followed by dirty pages.  If the run allocation policy
-		 * changes, then we will need to account for it here.
+		 * Coalesce forward.  This does not change the position within
+		 * runs_avail_ad, so only remove/insert from/into
+		 * runs_avail_szad.
 		 */
-		assert(chunk->map[run_ind - 1].pos != POS_EMPTY);
-#if 0 /* Currently unnecessary. */
-		if (prev_npages > 1 && chunk->map[run_ind - 1].pos == POS_EMPTY)
-			chunk->map[run_ind - 1].npages = NPAGES_EMPTY;
-#endif
-		run_ind -= prev_npages;
-		assert(chunk->map[run_ind].npages == prev_npages);
-		assert(chunk->map[run_ind].pos >= POS_EMPTY);
-		run_pages += prev_npages;
-
-		chunk->map[run_ind].npages = run_pages;
-		assert(chunk->map[run_ind].pos >= POS_EMPTY);
-		chunk->map[run_ind + run_pages - 1].npages = run_pages;
-		assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
-
-		/*
-		 * Update node in runs_avail_*.  Its position does not change
-		 * in runs_avail_ad.
-		 */
-		arena_node_dealloc(arena, node);
-		key.addr = (void *)((uintptr_t)run - (prev_npages <<
-		    pagesize_2pow));
-		node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-		assert(node != NULL);
-		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
-		node->size = (run_pages << pagesize_2pow);
-		node->ndirty += ndirty;
-		assert(node->ndirty <= node->size);
-		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
+		nodeC->addr = (void *)run;
+		nodeC->size += size;
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
+		arena_chunk_node_dealloc(chunk, nodeB);
+		nodeB = nodeC;
 	} else {
 		/*
-		 * Coalescing backward failed, so insert node into runs_avail_*.
+		 * Coalescing forward failed, so insert nodeB into runs_avail_*.
 		 */
-		node->ndirty = ndirty;
-		assert(node->ndirty <= node->size);
-		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
-		RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+		RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
 	}
 
-	if (run_ind + run_pages < chunk_npages &&
-	    chunk->map[run_ind + run_pages].pos >= POS_EMPTY) {
-		unsigned next_npages;
-		extent_node_t *nodeB;
-
-		/* Coalesce with next run. */
-		next_npages = chunk->map[run_ind + run_pages].npages;
-		if (next_npages > 1 && chunk->map[run_ind + run_pages].pos ==
-		    POS_EMPTY)
-			chunk->map[run_ind + run_pages].npages = NPAGES_EMPTY;
-		run_pages += next_npages;
-		assert(chunk->map[run_ind + run_pages - 1].npages ==
-		    next_npages);
-		assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
-
-		chunk->map[run_ind].npages = run_pages;
-		assert(chunk->map[run_ind].pos >= POS_EMPTY);
-		chunk->map[run_ind + run_pages - 1].npages = run_pages;
-		assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
-
+	/* Try to coalesce backward. */
+	nodeA = RB_PREV(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+	if (nodeA != NULL && (void *)((uintptr_t)nodeA->addr + nodeA->size) ==
+	    (void *)run) {
 		/*
-		 * Update node.  Its position does not change in runs_avail_ad.
+		 * Coalesce with previous run.  This does not change nodeB's
+		 * position within runs_avail_ad, so only remove/insert
+		 * from/into runs_avail_szad.
 		 */
-		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
-		node->size = (run_pages << pagesize_2pow);
-		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
-		/* Delete the subsumed run's node. */
-		nodeB = RB_NEXT(extent_tree_ad_s, &arena->runs_avail_ad, node);
-		assert(nodeB->size == (next_npages << pagesize_2pow));
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeA);
+		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeA);
+
 		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
-		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
-		node->ndirty += nodeB->ndirty;
-		assert(node->ndirty <= node->size);
-		arena_node_dealloc(arena, nodeB);
+		nodeB->addr = nodeA->addr;
+		nodeB->size += nodeA->size;
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+
+		arena_chunk_node_dealloc(chunk, nodeA);
 	}
 
 	/* Deallocate chunk if it is now completely unused. */
 	if (chunk->pages_used == 0)
 		arena_chunk_dealloc(arena, chunk);
 
-	/* Enforce opt_free_max. */
-	arena->ndirty += ndirty;
-	if (arena->ndirty > opt_free_max)
+	/* Enforce opt_dirty_max. */
+	if (arena->ndirty > opt_dirty_max)
 		arena_purge(arena);
 }
 
+static void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeB,
+    arena_run_t *run, size_t oldsize, size_t newsize)
+{
+	extent_node_t *nodeA;
+
+	assert(nodeB->addr == run);
+	assert(nodeB->size == oldsize);
+	assert(oldsize > newsize);
+
+	/*
+	 * Update the run's node in runs_alloced_ad.  Its position does not
+	 * change.
+	 */
+	nodeB->addr = (void *)((uintptr_t)run + (oldsize - newsize));
+	nodeB->size = newsize;
+
+	/*
+	 * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
+	 * treat the leading run as separately allocated.
+	 */
+	nodeA = arena_chunk_node_alloc(chunk);
+	nodeA->addr = (void *)run;
+	nodeA->size = oldsize - newsize;
+	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+
+	arena_run_dalloc(arena, (arena_run_t *)run, false);
+}
+
+static void
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
+    arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+{
+	extent_node_t *nodeB;
+
+	assert(nodeA->addr == run);
+	assert(nodeA->size == oldsize);
+	assert(oldsize > newsize);
+
+	/*
+	 * Update the run's node in runs_alloced_ad.  Its position does not
+	 * change.
+	 */
+	nodeA->size = newsize;
+
+	/*
+	 * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
+	 * treat the trailing run as separately allocated.
+	 */
+	nodeB = arena_chunk_node_alloc(chunk);
+	nodeB->addr = (void *)((uintptr_t)run + newsize);
+	nodeB->size = oldsize - newsize;
+	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+
+	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
+	    dirty);
+}
+
 static arena_run_t *
 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
 {
 	arena_run_t *run;
 	unsigned i, remainder;
 
 	/* Look for a usable run. */
 	if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
@@ -3441,17 +3367,17 @@ arena_bin_nonfull_run_get(arena_t *arena
 #ifdef MALLOC_STATS
 		bin->stats.reruns++;
 #endif
 		return (run);
 	}
 	/* No existing runs have any space available. */
 
 	/* Allocate a new run. */
-	run = arena_run_alloc(arena, bin->run_size, false);
+	run = arena_run_alloc(arena, bin->run_size, true, false);
 	if (run == NULL)
 		return (NULL);
 
 	/* Initialize run internals. */
 	run->bin = bin;
 
 	for (i = 0; i < bin->regs_mask_nelms; i++)
 		run->regs_mask[i] = UINT_MAX;
@@ -3602,549 +3528,791 @@ arena_lock_balance(arena_t *arena)
 		/*
 		 * Calculate the exponentially averaged contention for this
 		 * arena.  Due to integer math always rounding down, this value
 		 * decays somewhat faster then normal.
 		 */
 		arena->contention = (((uint64_t)arena->contention
 		    * (uint64_t)((1U << BALANCE_ALPHA_INV_2POW)-1))
 		    + (uint64_t)contention) >> BALANCE_ALPHA_INV_2POW;
-		if (arena->contention >= opt_balance_threshold) {
-			uint32_t ind;
-
-			arena->contention = 0;
+		if (arena->contention >= opt_balance_threshold)
+			arena_lock_balance_hard(arena);
+	}
+}
+
+static void
+arena_lock_balance_hard(arena_t *arena)
+{
+	uint32_t ind;
+
+	arena->contention = 0;
 #ifdef MALLOC_STATS
-			arena->stats.nbalance++;
-#endif
-			ind = PRN(balance, narenas_2pow);
-			if (arenas[ind] != NULL) {
+	arena->stats.nbalance++;
+#endif
+	ind = PRN(balance, narenas_2pow);
+	if (arenas[ind] != NULL) {
 #ifdef MOZ_MEMORY_WINDOWS
-				TlsSetValue(tlsIndex, arenas[ind]);
+		TlsSetValue(tlsIndex, arenas[ind]);
 #else
-				arenas_map = arenas[ind];
-#endif
-			} else {
-				malloc_spin_lock(&arenas_lock);
-				if (arenas[ind] != NULL) {
+		arenas_map = arenas[ind];
+#endif
+	} else {
+		malloc_spin_lock(&arenas_lock);
+		if (arenas[ind] != NULL) {
 #ifdef MOZ_MEMORY_WINDOWS
-					TlsSetValue(tlsIndex, arenas[ind]);
+			TlsSetValue(tlsIndex, arenas[ind]);
 #else
-					arenas_map = arenas[ind];
-#endif
-				} else {
+			arenas_map = arenas[ind];
+#endif
+		} else {
 #ifdef MOZ_MEMORY_WINDOWS
-					TlsSetValue(tlsIndex,
-					    arenas_extend(ind));
+			TlsSetValue(tlsIndex, arenas_extend(ind));
 #else
-					arenas_map = arenas_extend(ind);
-#endif
-				}
-				malloc_spin_unlock(&arenas_lock);
-			}
+			arenas_map = arenas_extend(ind);
+#endif
 		}
+		malloc_spin_unlock(&arenas_lock);
 	}
 }
 #endif
 
+static inline void *
+arena_malloc_small(arena_t *arena, size_t size, bool zero)
+{
+	void *ret;
+	arena_bin_t *bin;
+	arena_run_t *run;
+
+	if (size < small_min) {
+		/* Tiny. */
+		size = pow2_ceil(size);
+		bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
+		    1)))];
+#if (!defined(NDEBUG) || defined(MALLOC_STATS))
+		/*
+		 * Bin calculation is always correct, but we may need
+		 * to fix size for the purposes of assertions and/or
+		 * stats accuracy.
+		 */
+		if (size < (1U << TINY_MIN_2POW))
+			size = (1U << TINY_MIN_2POW);
+#endif
+	} else if (size <= small_max) {
+		/* Quantum-spaced. */
+		size = QUANTUM_CEILING(size);
+		bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
+		    - 1];
+	} else {
+		/* Sub-page. */
+		size = pow2_ceil(size);
+		bin = &arena->bins[ntbins + nqbins
+		    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
+	}
+	assert(size == bin->reg_size);
+
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
+	if ((run = bin->runcur) != NULL && run->nfree > 0)
+		ret = arena_bin_malloc_easy(arena, bin, run);
+	else
+		ret = arena_bin_malloc_hard(arena, bin);
+
+	if (ret == NULL) {
+		malloc_spin_unlock(&arena->lock);
+		return (NULL);
+	}
+
+#ifdef MALLOC_STATS
+	bin->stats.nrequests++;
+	arena->stats.nmalloc_small++;
+	arena->stats.allocated_small += size;
+#endif
+	malloc_spin_unlock(&arena->lock);
+
+	if (zero == false) {
+#ifdef MALLOC_FILL
+		if (opt_junk)
+			memset(ret, 0xa5, size);
+		else if (opt_zero)
+			memset(ret, 0, size);
+#endif
+	} else
+		memset(ret, 0, size);
+
+	return (ret);
+}
+
 static void *
+arena_malloc_large(arena_t *arena, size_t size, bool zero)
+{
+	void *ret;
+
+	/* Large allocation. */
+	size = PAGE_CEILING(size);
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
+	ret = (void *)arena_run_alloc(arena, size, false, zero);
+	if (ret == NULL) {
+		malloc_spin_unlock(&arena->lock);
+		return (NULL);
+	}
+#ifdef MALLOC_STATS
+	arena->stats.nmalloc_large++;
+	arena->stats.allocated_large += size;
+#endif
+	malloc_spin_unlock(&arena->lock);
+
+	if (zero == false) {
+#ifdef MALLOC_FILL
+		if (opt_junk)
+			memset(ret, 0xa5, size);
+		else if (opt_zero)
+			memset(ret, 0, size);
+#endif
+	}
+
+	return (ret);
+}
+
+static inline void *
 arena_malloc(arena_t *arena, size_t size, bool zero)
 {
-	void *ret;
 
 	assert(arena != NULL);
 	assert(arena->magic == ARENA_MAGIC);
 	assert(size != 0);
 	assert(QUANTUM_CEILING(size) <= arena_maxclass);
 
 	if (size <= bin_maxclass) {
-		arena_bin_t *bin;
-		arena_run_t *run;
-
-		/* Small allocation. */
-
-		if (size < small_min) {
-			/* Tiny. */
-			size = pow2_ceil(size);
-			bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
-			    1)))];
-#if (!defined(NDEBUG) || defined(MALLOC_STATS))
-			/*
-			 * Bin calculation is always correct, but we may need
-			 * to fix size for the purposes of assertions and/or
-			 * stats accuracy.
-			 */
-			if (size < (1U << TINY_MIN_2POW))
-				size = (1U << TINY_MIN_2POW);
-#endif
-		} else if (size <= small_max) {
-			/* Quantum-spaced. */
-			size = QUANTUM_CEILING(size);
-			bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
-			    - 1];
-		} else {
-			/* Sub-page. */
-			size = pow2_ceil(size);
-			bin = &arena->bins[ntbins + nqbins
-			    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
-		}
-		assert(size == bin->reg_size);
-
-#ifdef MALLOC_BALANCE
-		arena_lock_balance(arena);
-#else
-		malloc_spin_lock(&arena->lock);
-#endif
-		if ((run = bin->runcur) != NULL && run->nfree > 0)
-			ret = arena_bin_malloc_easy(arena, bin, run);
-		else
-			ret = arena_bin_malloc_hard(arena, bin);
-
-		if (ret == NULL) {
-			malloc_spin_unlock(&arena->lock);
-			return (NULL);
-		}
-
-#ifdef MALLOC_STATS
-		bin->stats.nrequests++;
-		arena->stats.nmalloc_small++;
-		arena->stats.allocated_small += size;
-#endif
-		malloc_spin_unlock(&arena->lock);
-
-		if (zero == false) {
-			if (opt_junk)
-				memset(ret, 0xa5, size);
-			else if (opt_zero)
-				memset(ret, 0, size);
-		} else
-			memset(ret, 0, size);
-	} else {
-		/* Large allocation. */
-		size = PAGE_CEILING(size);
-#ifdef MALLOC_BALANCE
-		arena_lock_balance(arena);
-#else
-		malloc_spin_lock(&arena->lock);
-#endif
-		ret = (void *)arena_run_alloc(arena, size, zero);
-		if (ret == NULL) {
-			malloc_spin_unlock(&arena->lock);
-			return (NULL);
-		}
-#ifdef MALLOC_STATS
-		arena->stats.nmalloc_large++;
-		arena->stats.allocated_large += size;
-#endif
-		malloc_spin_unlock(&arena->lock);
-
-		if (zero == false) {
-			if (opt_junk)
-				memset(ret, 0xa5, size);
-			else if (opt_zero)
-				memset(ret, 0, size);
-		}
-	}
-
-	return (ret);
+		return (arena_malloc_small(arena, size, zero));
+	} else
+		return (arena_malloc_large(arena, size, zero));
 }
 
-static inline void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
-    extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize)
+static inline void *
+imalloc(size_t size)
 {
-	unsigned i, pageind, npages;
-
-	assert(nodeB->addr == run);
-	assert(nodeB->size == oldsize);
-	assert(oldsize > newsize);
-
-	/*
-	 * Update the run's node in runs_alloced_ad.  Its position does not
-	 * change.
-	 */
-	nodeB->addr = (void *)((uintptr_t)run + (oldsize - newsize));
-	nodeB->size = newsize;
-
-	/* Update the map for the run to be kept. */
-	pageind = (((uintptr_t)nodeB->addr - (uintptr_t)chunk) >>
-	    pagesize_2pow);
-	npages = newsize >> pagesize_2pow;
-	for (i = 0; i < npages; i++) {
-		chunk->map[pageind + i].npages = npages;
-		chunk->map[pageind + i].pos = i;
-	}
-
-	/*
-	 * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
-	 * treat the leading run as separately allocated.
-	 */
-	nodeA->addr = (void *)run;
-	nodeA->size = oldsize - newsize;
-	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
-
-	/*
-	 * Modifiy the map such that arena_run_dalloc() sees the leading run as
-	 * separately allocated.
-	 */
-	pageind = (((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow);
-	npages = (oldsize - newsize) >> pagesize_2pow;
-	chunk->map[pageind].npages = npages;
-	assert(chunk->map[pageind].pos == 0);
-	pageind += npages - 1;
-	chunk->map[pageind].npages = npages;
-	assert(chunk->map[pageind].pos == npages - 1);
-
-	arena_run_dalloc(arena, (arena_run_t *)run, oldsize - newsize,
-	    oldsize - newsize);
+
+	assert(size != 0);
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(choose_arena(), size, false));
+	else
+		return (huge_malloc(size, false));
 }
 
-static inline void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
-    extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize,
-    size_t ndirty)
+static inline void *
+icalloc(size_t size)
 {
-	unsigned i, pageind, npages;
-
-	assert(nodeA->addr == run);
-	assert(nodeA->size == oldsize);
-	assert(oldsize > newsize);
-
-	/*
-	 * Update the run's node in runs_alloced_ad.  Its position does not
-	 * change.
-	 */
-	nodeA->size = newsize;
-
-	/* Update the map for the run to be kept. */
-	pageind = (((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow);
-	npages = newsize >> pagesize_2pow;
-	for (i = 0; i < npages; i++) {
-		chunk->map[pageind + i].npages = npages;
-		assert(chunk->map[pageind + i].pos == i);
-	}
-
-	/*
-	 * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
-	 * treat the trailing run as separately allocated.
-	 */
-	nodeB->addr = (void *)((uintptr_t)run + newsize);
-	nodeB->size = oldsize - newsize;
-	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
-
-	/*
-	 * Modify the map such that arena_run_dalloc() sees the trailing run as
-	 * separately allocated.
-	 */
-	pageind = (((uintptr_t)run + newsize - (uintptr_t)chunk) >>
-	    pagesize_2pow);
-	npages = (oldsize - newsize) >> pagesize_2pow;
-	chunk->map[pageind].npages = npages;
-	chunk->map[pageind].pos = 0;
-	pageind += npages - 1;
-	chunk->map[pageind].npages = npages;
-	chunk->map[pageind].pos = npages - 1;
-
-	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
-	    oldsize - newsize, ndirty);
+
+	if (size <= arena_maxclass)
+		return (arena_malloc(choose_arena(), size, true));
+	else
+		return (huge_malloc(size, true));
 }
 
 /* Only handles large allocations that require more than page alignment. */
 static void *
 arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
 {
 	void *ret;
 	size_t offset;
 	arena_chunk_t *chunk;
-	extent_node_t *node, *nodeB, key;
+	extent_node_t *node, key;
 
 	assert((size & pagesize_mask) == 0);
 	assert((alignment & pagesize_mask) == 0);
 
 #ifdef MALLOC_BALANCE
 	arena_lock_balance(arena);
 #else
 	malloc_spin_lock(&arena->lock);
 #endif
-	ret = (void *)arena_run_alloc(arena, alloc_size, false);
+	ret = (void *)arena_run_alloc(arena, alloc_size, false, false);
 	if (ret == NULL) {
 		malloc_spin_unlock(&arena->lock);
 		return (NULL);
 	}
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
 
 	offset = (uintptr_t)ret & (alignment - 1);
 	assert((offset & pagesize_mask) == 0);
 	assert(offset < alloc_size);
 	if (offset == 0) {
 		/*
-		 * Allocate node in advance, in order to simplify OOM recovery.
-		 */
-		if ((nodeB = arena_node_alloc(arena)) == NULL) {
-			arena_run_dalloc(arena, ret, alloc_size, 0);
-			malloc_spin_unlock(&arena->lock);
-			return (NULL);
-		}
-
-		/*
 		 * Update the run's node in runs_alloced_ad.  Its position
 		 * does not change.
 		 */
 		key.addr = ret;
 		node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
 		assert(node != NULL);
 
-		arena_run_trim_tail(arena, chunk, node, nodeB, ret, alloc_size,
-		    size, alloc_size - size);
+		arena_run_trim_tail(arena, chunk, node, ret, alloc_size, size,
+		    false);
 	} else {
-		extent_node_t *nodeA;
 		size_t leadsize, trailsize;
 
 		/*
-		 * Allocate nodes in advance, in order to simplify OOM recovery.
-		 */
-		if ((nodeA = arena_node_alloc(arena)) == NULL) {
-			arena_run_dalloc(arena, ret, alloc_size, 0);
-			malloc_spin_unlock(&arena->lock);
-			return (NULL);
-		}
-		if ((nodeB = arena_node_alloc(arena)) == NULL) {
-			arena_node_dealloc(arena, nodeA);
-			arena_run_dalloc(arena, ret, alloc_size, 0);
-			malloc_spin_unlock(&arena->lock);
-			return (NULL);
-		}
-
-		/*
 		 * Update the run's node in runs_alloced_ad.  Its position
 		 * does not change.
 		 */
 		key.addr = ret;
 		node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
 		assert(node != NULL);
 
 		leadsize = alignment - offset;
 		if (leadsize > 0) {
-			arena_run_trim_head(arena, chunk, nodeA, node, ret,
-			    alloc_size, alloc_size - leadsize);
+			arena_run_trim_head(arena, chunk, node, ret, alloc_size,
+			    alloc_size - leadsize);
 			ret = (void *)((uintptr_t)ret + leadsize);
 		}
 
 		trailsize = alloc_size - leadsize - size;
 		if (trailsize != 0) {
 			/* Trim trailing space. */
 			assert(trailsize < alloc_size);
-			arena_run_trim_tail(arena, chunk, node, nodeB, ret,
-			    size + trailsize, size, trailsize);
-		} else
-			arena_node_dealloc(arena, nodeB);
+			arena_run_trim_tail(arena, chunk, node, ret, size +
+			    trailsize, size, false);
+		}
 	}
 
 #ifdef MALLOC_STATS
 	arena->stats.nmalloc_large++;
 	arena->stats.allocated_large += size;
 #endif
 	malloc_spin_unlock(&arena->lock);
 
+#ifdef MALLOC_FILL
 	if (opt_junk)
 		memset(ret, 0xa5, size);
 	else if (opt_zero)
 		memset(ret, 0, size);
+#endif
+	return (ret);
+}
+
+static inline void *
+ipalloc(size_t alignment, size_t size)
+{
+	void *ret;
+	size_t ceil_size;
+
+	/*
+	 * Round size up to the nearest multiple of alignment.
+	 *
+	 * This done, we can take advantage of the fact that for each small
+	 * size class, every object is aligned at the smallest power of two
+	 * that is non-zero in the base two representation of the size.  For
+	 * example:
+	 *
+	 *   Size |   Base 2 | Minimum alignment
+	 *   -----+----------+------------------
+	 *     96 |  1100000 |  32
+	 *    144 | 10100000 |  32
+	 *    192 | 11000000 |  64
+	 *
+	 * Depending on runtime settings, it is possible that arena_malloc()
+	 * will further round up to a power of two, but that never causes
+	 * correctness issues.
+	 */
+	ceil_size = (size + (alignment - 1)) & (-alignment);
+	/*
+	 * (ceil_size < size) protects against the combination of maximal
+	 * alignment and size greater than maximal alignment.
+	 */
+	if (ceil_size < size) {
+		/* size_t overflow. */
+		return (NULL);
+	}
+
+	if (ceil_size <= pagesize || (alignment <= pagesize
+	    && ceil_size <= arena_maxclass))
+		ret = arena_malloc(choose_arena(), ceil_size, false);
+	else {
+		size_t run_size;
+
+		/*
+		 * We can't achieve sub-page alignment, so round up alignment
+		 * permanently; it makes later calculations simpler.
+		 */
+		alignment = PAGE_CEILING(alignment);
+		ceil_size = PAGE_CEILING(size);
+		/*
+		 * (ceil_size < size) protects against very large sizes within
+		 * pagesize of SIZE_T_MAX.
+		 *
+		 * (ceil_size + alignment < ceil_size) protects against the
+		 * combination of maximal alignment and ceil_size large enough
+		 * to cause overflow.  This is similar to the first overflow
+		 * check above, but it needs to be repeated due to the new
+		 * ceil_size value, which may now be *equal* to maximal
+		 * alignment, whereas before we only detected overflow if the
+		 * original size was *greater* than maximal alignment.
+		 */
+		if (ceil_size < size || ceil_size + alignment < ceil_size) {
+			/* size_t overflow. */
+			return (NULL);
+		}
+
+		/*
+		 * Calculate the size of the over-size run that arena_palloc()
+		 * would need to allocate in order to guarantee the alignment.
+		 */
+		if (ceil_size >= alignment)
+			run_size = ceil_size + alignment - pagesize;
+		else {
+			/*
+			 * It is possible that (alignment << 1) will cause
+			 * overflow, but it doesn't matter because we also
+			 * subtract pagesize, which in the case of overflow
+			 * leaves us with a very large run_size.  That causes
+			 * the first conditional below to fail, which means
+			 * that the bogus run_size value never gets used for
+			 * anything important.
+			 */
+			run_size = (alignment << 1) - pagesize;
+		}
+
+		if (run_size <= arena_maxclass) {
+			ret = arena_palloc(choose_arena(), alignment, ceil_size,
+			    run_size);
+		} else if (alignment <= chunksize)
+			ret = huge_malloc(ceil_size, false);
+		else
+			ret = huge_palloc(alignment, ceil_size);
+	}
+
+	assert(((uintptr_t)ret & (alignment - 1)) == 0);
 	return (ret);
 }
 
 /* Return the size of the allocation pointed to by ptr. */
 static size_t
 arena_salloc(const void *ptr)
 {
 	size_t ret;
 	arena_chunk_t *chunk;
-	arena_chunk_map_t *mapelm;
-	unsigned pageind;
+	arena_chunk_map_t mapelm;
+	size_t pageind;
 
 	assert(ptr != NULL);
 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
 
-	/*
-	 * No arena data structures that we query here can change in a way that
-	 * affects this function, so we don't need to lock.
-	 */
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
-	mapelm = &chunk->map[pageind];
-	if (mapelm->pos != 0 || ptr != (void *)(((uintptr_t)chunk) + (pageind <<
-	    pagesize_2pow))) {
+	mapelm = chunk->map[pageind];
+	if ((mapelm & CHUNK_MAP_LARGE) == 0) {
 		arena_run_t *run;
 
-		pageind -= mapelm->pos;
-
+		/* Small allocation size is in the run header. */
+		pageind -= (mapelm & CHUNK_MAP_POS_MASK);
 		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
 		    pagesize_2pow));
 		assert(run->magic == ARENA_RUN_MAGIC);
 		ret = run->bin->reg_size;
-	} else
-		ret = mapelm->npages << pagesize_2pow;
+	} else {
+		arena_t *arena = chunk->arena;
+		extent_node_t *node, key;
+
+		/* Large allocation size is in the extent tree. */
+		assert((mapelm & CHUNK_MAP_POS_MASK) == 0);
+		arena = chunk->arena;
+		malloc_spin_lock(&arena->lock);
+		key.addr = (void *)ptr;
+		node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+		assert(node != NULL);
+		ret = node->size;
+		malloc_spin_unlock(&arena->lock);
+	}
+
+	return (ret);
+}
+
+static inline size_t
+isalloc(const void *ptr)
+{
+	size_t ret;
+	arena_chunk_t *chunk;
+
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		/* Region. */
+		assert(chunk->arena->magic == ARENA_MAGIC);
+
+		ret = arena_salloc(ptr);
+	} else {
+		extent_node_t *node, key;
+
+		/* Chunk (huge allocation). */
+
+		malloc_mutex_lock(&huge_mtx);
+
+		/* Extract from tree of huge allocations. */
+		key.addr = __DECONST(void *, ptr);
+		node = RB_FIND(extent_tree_ad_s, &huge, &key);
+		assert(node != NULL);
+
+		ret = node->size;
+
+		malloc_mutex_unlock(&huge_mtx);
+	}
 
 	return (ret);
 }
 
-/*
- * Try to resize a large allocation, in order to avoid copying.  This will fail
- * if growing an object, and following pages are already in use.
- */
-static bool
-arena_ralloc_resize(void *ptr, size_t size, size_t oldsize)
+static inline void
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t pageind, arena_chunk_map_t mapelm)
+{
+	arena_run_t *run;
+	arena_bin_t *bin;
+	size_t size;
+
+	pageind -= (mapelm & CHUNK_MAP_POS_MASK);
+
+	run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
+	assert(run->magic == ARENA_RUN_MAGIC);
+	bin = run->bin;
+	size = bin->reg_size;
+
+#ifdef MALLOC_FILL
+	if (opt_junk)
+		memset(ptr, 0x5a, size);
+#endif
+
+	arena_run_reg_dalloc(run, bin, ptr, size);
+	run->nfree++;
+
+	if (run->nfree == bin->nregs) {
+		/* Deallocate run. */
+		if (run == bin->runcur)
+			bin->runcur = NULL;
+		else if (bin->nregs != 1) {
+			/*
+			 * This block's conditional is necessary because if the
+			 * run only contains one region, then it never gets
+			 * inserted into the non-full runs tree.
+			 */
+			RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+		}
+#ifdef MALLOC_DEBUG
+		run->magic = 0;
+#endif
+		arena_run_dalloc(arena, run, true);
+#ifdef MALLOC_STATS
+		bin->stats.curruns--;
+#endif
+	} else if (run->nfree == 1 && run != bin->runcur) {
+		/*
+		 * Make sure that bin->runcur always refers to the lowest
+		 * non-full run, if one exists.
+		 */
+		if (bin->runcur == NULL)
+			bin->runcur = run;
+		else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+			/* Switch runcur. */
+			if (bin->runcur->nfree > 0) {
+				/* Insert runcur. */
+				RB_INSERT(arena_run_tree_s, &bin->runs,
+				    bin->runcur);
+			}
+			bin->runcur = run;
+		} else
+			RB_INSERT(arena_run_tree_s, &bin->runs, run);
+	}
+#ifdef MALLOC_STATS
+	arena->stats.allocated_small -= size;
+	arena->stats.ndalloc_small++;
+#endif
+}
+
+#ifdef MALLOC_LAZY_FREE
+static inline void
+arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t pageind, arena_chunk_map_t *mapelm)
+{
+	void **free_cache = arena->free_cache;
+	unsigned i, slot;
+
+	if (__isthreaded == false || opt_lazy_free_2pow < 0) {
+		malloc_spin_lock(&arena->lock);
+		arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+		malloc_spin_unlock(&arena->lock);
+		return;
+	}
+
+	for (i = 0; i < LAZY_FREE_NPROBES; i++) {
+		slot = PRN(lazy_free, opt_lazy_free_2pow);
+		if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot],
+		    (uintptr_t)NULL, (uintptr_t)ptr)) {
+			return;
+		}
+	}
+
+	arena_dalloc_lazy_hard(arena, chunk, ptr, pageind, mapelm);
+}
+
+static void
+arena_dalloc_lazy_hard(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t pageind, arena_chunk_map_t *mapelm)
+{
+	void **free_cache = arena->free_cache;
+	unsigned i, slot;
+
+	malloc_spin_lock(&arena->lock);
+	arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+
+	/*
+	 * Check whether another thread already cleared the cache.  It is
+	 * possible that another thread cleared the cache *and* this slot was
+	 * already refilled, which could result in a mostly fruitless cache
+	 * sweep, but such a sequence of events causes no correctness issues.
+	 */
+	if ((ptr = (void *)atomic_readandclear_ptr(
+	    (uintptr_t *)&free_cache[slot]))
+	    != NULL) {
+		unsigned lazy_free_mask;
+		
+		/*
+		 * Clear the cache, since we failed to find a slot.  It is
+		 * possible that other threads will continue to insert objects
+		 * into the cache while this one sweeps, but that is okay,
+		 * since on average the cache is still swept with the same
+		 * frequency.
+		 */
+
+		/* Handle pointer at current slot. */
+		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+		pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
+		    pagesize_2pow);
+		mapelm = &chunk->map[pageind];
+		arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+
+		/* Sweep remainder of slots. */
+		lazy_free_mask = (1U << opt_lazy_free_2pow) - 1;
+		for (i = (slot + 1) & lazy_free_mask;
+		     i != slot;
+		     i = (i + 1) & lazy_free_mask) {
+			ptr = (void *)atomic_readandclear_ptr(
+			    (uintptr_t *)&free_cache[i]);
+			if (ptr != NULL) {
+				chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+				pageind = (((uintptr_t)ptr - (uintptr_t)chunk)
+				    >> pagesize_2pow);
+				mapelm = &chunk->map[pageind];
+				arena_dalloc_small(arena, chunk, ptr, pageind,
+				    *mapelm);
+			}
+		}
+	}
+
+	malloc_spin_unlock(&arena->lock);
+}
+#endif
+
+static void
+arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+	/* Large allocation. */
+	malloc_spin_lock(&arena->lock);
+
+#ifdef MALLOC_FILL
+#ifndef MALLOC_STATS
+	if (opt_junk)
+#endif
+#endif
+	{
+		extent_node_t *node, key;
+		size_t size;
+
+		key.addr = ptr;
+		node = RB_FIND(extent_tree_ad_s,
+		    &arena->runs_alloced_ad, &key);
+		assert(node != NULL);
+		size = node->size;
+#ifdef MALLOC_FILL
+#ifdef MALLOC_STATS
+		if (opt_junk)
+#endif
+			memset(ptr, 0x5a, size);
+#endif
+#ifdef MALLOC_STATS
+		arena->stats.allocated_large -= size;
+#endif
+	}
+#ifdef MALLOC_STATS
+	arena->stats.ndalloc_large++;
+#endif
+
+	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+	malloc_spin_unlock(&arena->lock);
+}
+
+static inline void
+arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+	size_t pageind;
+	arena_chunk_map_t *mapelm;
+
+	assert(arena != NULL);
+	assert(arena->magic == ARENA_MAGIC);
+	assert(chunk->arena == arena);
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+	mapelm = &chunk->map[pageind];
+	if ((*mapelm & CHUNK_MAP_LARGE) == 0) {
+		/* Small allocation. */
+#ifdef MALLOC_LAZY_FREE
+		arena_dalloc_lazy(arena, chunk, ptr, pageind, mapelm);
+#else
+		malloc_spin_lock(&arena->lock);
+		arena_dalloc_small(arena, chunk, ptr, pageind, *mapelm);
+		malloc_spin_unlock(&arena->lock);
+#endif
+	} else {
+		assert((*mapelm & CHUNK_MAP_POS_MASK) == 0);
+		arena_dalloc_large(arena, chunk, ptr);
+	}
+}
+
+static inline void
+idalloc(void *ptr)
 {
 	arena_chunk_t *chunk;
-	arena_t *arena;
-	extent_node_t *nodeA, *nodeB, key;
+
+	assert(ptr != NULL);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	arena = chunk->arena;
-	assert(arena->magic == ARENA_MAGIC);
-
-	if (size < oldsize) {
-		/*
-		 * Shrink the run, and make trailing pages available for other
-		 * allocations.
-		 */
-		key.addr = (void *)((uintptr_t)ptr);
+	if (chunk != ptr)
+		arena_dalloc(chunk->arena, chunk, ptr);
+	else
+		huge_dalloc(ptr);
+}
+
+static void
+arena_ralloc_resize_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t size, size_t oldsize)
+{
+	extent_node_t *node, key;
+
+	assert(size < oldsize);
+
+	/*
+	 * Shrink the run, and make trailing pages available for other
+	 * allocations.
+	 */
+	key.addr = (void *)((uintptr_t)ptr);
 #ifdef MALLOC_BALANCE
-		arena_lock_balance(arena);
+	arena_lock_balance(arena);
 #else
-		malloc_spin_lock(&arena->lock);
-#endif
-		nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
-		    &key);
-		assert(nodeA != NULL);
-		if ((nodeB = arena_node_alloc(arena)) == NULL) {
-			malloc_spin_unlock(&arena->lock);
-			return (true);
-		}
-		arena_run_trim_tail(arena, chunk, nodeA, nodeB,
-		    (arena_run_t *)ptr, oldsize, size, oldsize - size);
+	malloc_spin_lock(&arena->lock);
+#endif
+	node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+	assert(node != NULL);
+	arena_run_trim_tail(arena, chunk, node, (arena_run_t *)ptr, oldsize,
+	    size, true);
 #ifdef MALLOC_STATS
-		arena->stats.allocated_large -= oldsize - size;
-#endif
-		malloc_spin_unlock(&arena->lock);
-		return (false);
-	}
+	arena->stats.allocated_large -= oldsize - size;
+#endif
+	malloc_spin_unlock(&arena->lock);
+}
+
+static bool
+arena_ralloc_resize_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    size_t size, size_t oldsize)
+{
+	extent_node_t *nodeC, key;
 
 	/* Try to extend the run. */
 	assert(size > oldsize);
 	key.addr = (void *)((uintptr_t)ptr + oldsize);
 #ifdef MALLOC_BALANCE
 	arena_lock_balance(arena);
 #else
 	malloc_spin_lock(&arena->lock);
 #endif
-	nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
-	if (nodeB != NULL && oldsize + nodeB->size >= size) {
-		unsigned i, pageind, npages;
+	nodeC = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+	if (nodeC != NULL && oldsize + nodeC->size >= size) {
+		extent_node_t *nodeA, *nodeB;
 
 		/*
-		 * The next run is available and sufficiently large.
-		 * Merge the two adjacent runs, then trim if necessary.
+		 * The next run is available and sufficiently large.  Split the
+		 * following run, then merge the first part with the existing
+		 * allocation.  This results in a bit more tree manipulation
+		 * than absolutely necessary, but it substantially simplifies
+		 * the code.
 		 */
-
-		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
-		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
-		arena->ndirty -= nodeB->ndirty;
+		arena_run_split(arena, (arena_run_t *)nodeC->addr, size -
+		    oldsize, false, false);
 
 		key.addr = ptr;
 		nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
 		    &key);
 		assert(nodeA != NULL);
+
+		key.addr = (void *)((uintptr_t)ptr + oldsize);
+		nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+		    &key);
+		assert(nodeB != NULL);
+
 		nodeA->size += nodeB->size;
 
-		chunk->pages_used += (nodeB->size >> pagesize_2pow);
-
-		/* Update the portion of the map that will be retained. */
-		pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
-		    pagesize_2pow);
-		npages = size >> pagesize_2pow;
-		for (i = 0; i < npages; i++) {
-			chunk->map[pageind + i].npages = npages;
-			chunk->map[pageind + i].pos = i;
-		}
-
-#ifdef MALLOC_DECOMMIT
-		if (opt_decommit) {
-			if (nodeB->ndirty != nodeB->size) {
-				/*
-				 * Commit the part of the run that is being
-				 * allocated.
-				 */
-#  ifdef MOZ_MEMORY_WINDOWS
-				VirtualAlloc(nodeB->addr, nodeA->size - oldsize,
-				    MEM_COMMIT, PAGE_READWRITE);
-#  else
-				if (mmap(nodeB->addr, nodeA->size - oldsize,
-				    PROT_READ | PROT_WRITE, MAP_FIXED |
-				    MAP_PRIVATE | MAP_ANON, -1, 0) ==
-				    MAP_FAILED)
-					abort();
-#  endif
-				if (nodeB->ndirty != 0 && nodeA->size > size) {
-					/*
-					 * Decommit the unused portion of the
-					 * run in order to assure a uniform
-					 * state where all pages in each part
-					 * of the split are either completely
-					 * committed or completely decommitted.
-					 */
-#  ifdef MOZ_MEMORY_WINDOWS
-					VirtualFree((void *)((uintptr_t)ptr +
-					    size), nodeA->size - size,
-					    MEM_DECOMMIT);
-#  else
-					if (mmap((void *)((uintptr_t)ptr +
-					    size), nodeA->size - size,
-					    PROT_NONE, MAP_FIXED | MAP_PRIVATE
-					    | MAP_ANON, -1, 0) == MAP_FAILED)
-						abort();
-#  endif
-#  ifdef MALLOC_STATS
-					arena->stats.npurged += nodeB->ndirty;
-					arena->stats.nmadvise++;
-#  endif
-					nodeB->ndirty = 0;
-				}
-			}
-		}
-#endif
-		/* Trim if necessary. */
-		if (nodeA->size > size) {
-			size_t ndirty;
-
-			if (nodeB->ndirty == 0)
-				ndirty = 0;
-			else if (nodeB->ndirty >= nodeA->size - size)
-				ndirty = nodeA->size - size;
-			else
-				ndirty = nodeB->ndirty;
-			arena_run_trim_tail(arena, chunk, nodeA, nodeB,
-			    (arena_run_t *)ptr, nodeA->size, size, ndirty);
-		} else
-			arena_node_dealloc(arena, nodeB);
+		RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+		arena_chunk_node_dealloc(chunk, nodeB);
+
 #ifdef MALLOC_STATS
 		arena->stats.allocated_large += size - oldsize;
 #endif
 		malloc_spin_unlock(&arena->lock);
 		return (false);
 	}
 	malloc_spin_unlock(&arena->lock);
 
 	return (true);
 }
 
+/*
+ * Try to resize a large allocation, in order to avoid copying.  This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_resize(void *ptr, size_t size, size_t oldsize)
+{
+	arena_chunk_t *chunk;
+	arena_t *arena;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	arena = chunk->arena;
+	assert(arena->magic == ARENA_MAGIC);
+
+	if (size < oldsize) {
+		arena_ralloc_resize_shrink(arena, chunk, ptr, size, oldsize);
+		return (false);
+	} else {
+		return (arena_ralloc_resize_grow(arena, chunk, ptr, size,
+		    oldsize));
+	}
+}
+
 static void *
 arena_ralloc(void *ptr, size_t size, size_t oldsize)
 {
 	void *ret;
+	size_t copysize;
 
 	/* Try to avoid moving the allocation. */
 	if (size < small_min) {
 		if (oldsize < small_min &&
 		    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
 		    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
 			goto IN_PLACE; /* Same size class. */
 	} else if (size <= small_max) {
@@ -4174,252 +4342,84 @@ arena_ralloc(void *ptr, size_t size, siz
 	 * need to move the object.  In that case, fall back to allocating new
 	 * space and copying.
 	 */
 	ret = arena_malloc(choose_arena(), size, false);
 	if (ret == NULL)
 		return (NULL);
 
 	/* Junk/zero-filling were already done by arena_malloc(). */
-	if (size < oldsize)
-		memcpy(ret, ptr, size);
+	copysize = (size < oldsize) ? size : oldsize;
+#ifdef VM_COPY_MIN
+	if (copysize >= VM_COPY_MIN)
+		pages_copy(ret, ptr, copysize);
 	else
-		memcpy(ret, ptr, oldsize);
+#endif
+		memcpy(ret, ptr, copysize);
 	idalloc(ptr);
 	return (ret);
 IN_PLACE:
+#ifdef MALLOC_FILL
 	if (opt_junk && size < oldsize)
 		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
 	else if (opt_zero && size > oldsize)
 		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
+#endif
 	return (ptr);
 }
 
-static inline void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    unsigned pageind, arena_chunk_map_t *mapelm)
-{
-	arena_run_t *run;
-	arena_bin_t *bin;
-	size_t size;
-
-	pageind -= mapelm->pos;
-
-	run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
-	assert(run->magic == ARENA_RUN_MAGIC);
-	bin = run->bin;
-	size = bin->reg_size;
-
-	if (opt_junk)
-		memset(ptr, 0x5a, size);
-
-	arena_run_reg_dalloc(run, bin, ptr, size);
-	run->nfree++;
-
-	if (run->nfree == bin->nregs) {
-		/* Deallocate run. */
-		if (run == bin->runcur)
-			bin->runcur = NULL;
-		else if (bin->nregs != 1) {
-			/*
-			 * This block's conditional is necessary because if the
-			 * run only contains one region, then it never gets
-			 * inserted into the non-full runs tree.
-			 */
-			RB_REMOVE(arena_run_tree_s, &bin->runs, run);
-		}
-#ifdef MALLOC_DEBUG
-		run->magic = 0;
-#endif
-		arena_run_dalloc(arena, run, bin->run_size, bin->run_size);
-#ifdef MALLOC_STATS
-		bin->stats.curruns--;
-#endif
-	} else if (run->nfree == 1 && run != bin->runcur) {
-		/*
-		 * Make sure that bin->runcur always refers to the lowest
-		 * non-full run, if one exists.
-		 */
-		if (bin->runcur == NULL)
-			bin->runcur = run;
-		else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
-			/* Switch runcur. */
-			if (bin->runcur->nfree > 0) {
-				/* Insert runcur. */
-				RB_INSERT(arena_run_tree_s, &bin->runs,
-				    bin->runcur);
-			}
-			bin->runcur = run;
-		} else
-			RB_INSERT(arena_run_tree_s, &bin->runs, run);
-	}
-#ifdef MALLOC_STATS
-	arena->stats.allocated_small -= size;
-	arena->stats.ndalloc_small++;
-#endif
-}
-
-#ifdef MALLOC_LAZY_FREE
-static inline void
-arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr,
-    unsigned pageind, arena_chunk_map_t *mapelm)
+static inline void *
+iralloc(void *ptr, size_t size)
 {
-	void **free_cache = arena->free_cache;
-	unsigned i, slot;
-
-	if (!__isthreaded || opt_lazy_free_2pow < 0) {
-		malloc_spin_lock(&arena->lock);
-		arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
-		malloc_spin_unlock(&arena->lock);
-		return;
-	}
-
-	for (i = 0; i < LAZY_FREE_NPROBES; i++) {
-		slot = PRN(lazy_free, opt_lazy_free_2pow);
-		if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot],
-		    (uintptr_t)NULL, (uintptr_t)ptr)) {
-			return;
-		}
-	}
-
-	malloc_spin_lock(&arena->lock);
-	arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
-
-	/*
-	 * Check whether another thread already cleared the cache.  It is
-	 * possible that another thread cleared the cache *and* this slot was
-	 * already refilled, which could result in a mostly fruitless cache
-	 * sweep, but such a sequence of events causes no correctness issues.
-	 */
-	if ((ptr = (void *)atomic_readandclear_ptr(
-	    (uintptr_t *)&free_cache[slot]))
-	    != NULL) {
-		unsigned lazy_free_mask;
-		
-		/*
-		 * Clear the cache, since we failed to find a slot.  It is
-		 * possible that other threads will continue to insert objects
-		 * into the cache while this one sweeps, but that is okay,
-		 * since on average the cache is still swept with the same
-		 * frequency.
-		 */
-
-		/* Handle pointer at current slot. */
-		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-		pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
-		    pagesize_2pow);
-		mapelm = &chunk->map[pageind];
-		arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
-
-		/* Sweep remainder of slots. */
-		lazy_free_mask = (1U << opt_lazy_free_2pow) - 1;
-		for (i = (slot + 1) & lazy_free_mask;
-		     i != slot;
-		     i = (i + 1) & lazy_free_mask) {
-			ptr = (void *)atomic_readandclear_ptr(
-			    (uintptr_t *)&free_cache[i]);
-			if (ptr != NULL) {
-				chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-				pageind = (((uintptr_t)ptr - (uintptr_t)chunk)
-				    >> pagesize_2pow);
-				mapelm = &chunk->map[pageind];
-				arena_dalloc_small(arena, chunk, ptr, pageind,
-				    mapelm);
-			}
-		}
-	}
-
-	malloc_spin_unlock(&arena->lock);
-}
-#endif
-
-static void
-arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
-	unsigned pageind;
-	arena_chunk_map_t *mapelm;
-
-	assert(arena != NULL);
-	assert(arena->magic == ARENA_MAGIC);
-	assert(chunk->arena == arena);
+	size_t oldsize;
+
 	assert(ptr != NULL);
-	assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
-	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
-	mapelm = &chunk->map[pageind];
-	if (mapelm->pos != 0 || ptr != (void *)(((uintptr_t)chunk) + (pageind <<
-	    pagesize_2pow))) {
-		/* Small allocation. */
-#ifdef MALLOC_LAZY_FREE
-		arena_dalloc_lazy(arena, chunk, ptr, pageind, mapelm);
-#else
-		malloc_spin_lock(&arena->lock);
-		arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
-		malloc_spin_unlock(&arena->lock);
-#endif
-	} else {
-		size_t size;
-
-		/* Large allocation. */
-
-		size = mapelm->npages << pagesize_2pow;
-		assert((((uintptr_t)ptr) & pagesize_mask) == 0);
-
-		if (opt_junk)
-			memset(ptr, 0x5a, size);
-
-		malloc_spin_lock(&arena->lock);
-		arena_run_dalloc(arena, (arena_run_t *)ptr, size, size);
-#ifdef MALLOC_STATS
-		arena->stats.allocated_large -= size;
-		arena->stats.ndalloc_large++;
-#endif
-		malloc_spin_unlock(&arena->lock);
-	}
+	assert(size != 0);
+
+	oldsize = isalloc(ptr);
+
+	if (size <= arena_maxclass)
+		return (arena_ralloc(ptr, size, oldsize));
+	else
+		return (huge_ralloc(ptr, size, oldsize));
 }
 
 static bool
 arena_new(arena_t *arena)
 {
 	unsigned i;
 	arena_bin_t *bin;
 	size_t pow2_size, prev_run_size;
 
 	if (malloc_spin_init(&arena->lock))
 		return (true);
 
 #ifdef MALLOC_STATS
 	memset(&arena->stats, 0, sizeof(arena_stats_t));
 #endif
 
-	arena->node_mag_cur = NULL;
-	arena->node_mag_full = NULL;
-
 	/* Initialize chunks. */
 	RB_INIT(&arena->chunks);
 	arena->spare = NULL;
 
 	arena->ndirty = 0;
-	arena->spare_ndirty = 0;
 
 	RB_INIT(&arena->runs_avail_szad);
 	RB_INIT(&arena->runs_avail_ad);
 	RB_INIT(&arena->runs_alloced_ad);
 
 #ifdef MALLOC_BALANCE
 	arena->contention = 0;
 #endif
 #ifdef MALLOC_LAZY_FREE
 	if (opt_lazy_free_2pow >= 0) {
-		arena->free_cache = (void **) base_alloc(sizeof(void *)
+		arena->free_cache = (void **) base_calloc(1, sizeof(void *)
 		    * (1U << opt_lazy_free_2pow));
 		if (arena->free_cache == NULL)
 			return (true);
-		memset(arena->free_cache, 0, sizeof(void *)
-		    * (1U << opt_lazy_free_2pow));
 	} else
 		arena->free_cache = NULL;
 #endif
 
 	/* Initialize bins. */
 	prev_run_size = pagesize;
 
 	/* (2^n)-spaced tiny bins. */
@@ -4522,17 +4522,17 @@ huge_malloc(size_t size, bool zero)
 	/* Allocate one or more contiguous chunks for this request. */
 
 	csize = CHUNK_CEILING(size);
 	if (csize == 0) {
 		/* size is large enough to cause size_t wrap-around. */
 		return (NULL);
 	}
 
-	/* Allocate a chunk node with which to track the chunk. */
+	/* Allocate an extent node with which to track the chunk. */
 	node = base_node_alloc();
 	if (node == NULL)
 		return (NULL);
 
 	ret = chunk_alloc(csize, zero);
 	if (ret == NULL) {
 		base_node_dealloc(node);
 		return (NULL);
@@ -4545,22 +4545,24 @@ huge_malloc(size_t size, bool zero)
 	malloc_mutex_lock(&huge_mtx);
 	RB_INSERT(extent_tree_ad_s, &huge, node);
 #ifdef MALLOC_STATS
 	huge_nmalloc++;
 	huge_allocated += csize;
 #endif
 	malloc_mutex_unlock(&huge_mtx);
 
+#ifdef MALLOC_FILL
 	if (zero == false) {
 		if (opt_junk)
 			memset(ret, 0xa5, csize);
 		else if (opt_zero)
 			memset(ret, 0, csize);
 	}
+#endif
 
 	return (ret);
 }
 
 /* Only handles large allocations that require more than chunk alignment. */
 static void *
 huge_palloc(size_t alignment, size_t size)
 {
@@ -4580,17 +4582,17 @@ huge_palloc(size_t alignment, size_t siz
 
 	chunk_size = CHUNK_CEILING(size);
 
 	if (size >= alignment)
 		alloc_size = chunk_size + alignment - chunksize;
 	else
 		alloc_size = (alignment << 1) - chunksize;
 
-	/* Allocate a chunk node with which to track the chunk. */
+	/* Allocate an extent node with which to track the chunk. */
 	node = base_node_alloc();
 	if (node == NULL)
 		return (NULL);
 
 	ret = chunk_alloc(alloc_size, false);
 	if (ret == NULL) {
 		base_node_dealloc(node);
 		return (NULL);
@@ -4627,62 +4629,63 @@ huge_palloc(size_t alignment, size_t siz
 	malloc_mutex_lock(&huge_mtx);
 	RB_INSERT(extent_tree_ad_s, &huge, node);
 #ifdef MALLOC_STATS
 	huge_nmalloc++;
 	huge_allocated += chunk_size;
 #endif
 	malloc_mutex_unlock(&huge_mtx);
 
+#ifdef MALLOC_FILL
 	if (opt_junk)
 		memset(ret, 0xa5, chunk_size);
 	else if (opt_zero)
 		memset(ret, 0, chunk_size);
+#endif
 
 	return (ret);
 }
 
 static void *
 huge_ralloc(void *ptr, size_t size, size_t oldsize)
 {
 	void *ret;
+	size_t copysize;
 
 	/* Avoid moving the allocation if the size class would not change. */
 	if (oldsize > arena_maxclass &&
 	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
+#ifdef MALLOC_FILL
 		if (opt_junk && size < oldsize) {
 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
 			    - size);
 		} else if (opt_zero && size > oldsize) {
 			memset((void *)((uintptr_t)ptr + oldsize), 0, size
 			    - oldsize);
 		}
+#endif
 		return (ptr);
 	}
 
 	/*
 	 * If we get here, then size and oldsize are different enough that we
 	 * need to use a different size class.  In that case, fall back to
 	 * allocating new space and copying.
 	 */
 	ret = huge_malloc(size, false);
 	if (ret == NULL)
 		return (NULL);
 
-	if (CHUNK_ADDR2BASE(ptr) == ptr) {
-		/* The old allocation is a chunk. */
-		if (size < oldsize)
-			memcpy(ret, ptr, size);
-		else
-			memcpy(ret, ptr, oldsize);
-	} else {
-		/* The old allocation is a region. */
-		assert(oldsize < size);
-		memcpy(ret, ptr, oldsize);
-	}
+	copysize = (size < oldsize) ? size : oldsize;
+#ifdef VM_COPY_MIN
+	if (copysize >= VM_COPY_MIN)
+		pages_copy(ret, ptr, copysize);
+	else
+#endif
+		memcpy(ret, ptr, copysize);
 	idalloc(ptr);
 	return (ret);
 }
 
 static void
 huge_dalloc(void *ptr)
 {
 	extent_node_t *node, key;
@@ -4700,216 +4703,26 @@ huge_dalloc(void *ptr)
 	huge_ndalloc++;
 	huge_allocated -= node->size;
 #endif
 
 	malloc_mutex_unlock(&huge_mtx);
 
 	/* Unmap chunk. */
 #ifdef MALLOC_DSS
+#ifdef MALLOC_FILL
 	if (opt_dss && opt_junk)
 		memset(node->addr, 0x5a, node->size);
 #endif
+#endif
 	chunk_dealloc(node->addr, node->size);
 
 	base_node_dealloc(node);
 }
 
-static void *
-imalloc(size_t size)
-{
-	void *ret;
-
-	assert(size != 0);
-
-	if (size <= arena_maxclass)
-		ret = arena_malloc(choose_arena(), size, false);
-	else
-		ret = huge_malloc(size, false);
-
-	return (ret);
-}
-
-static void *
-ipalloc(size_t alignment, size_t size)
-{
-	void *ret;
-	size_t ceil_size;
-
-	/*
-	 * Round size up to the nearest multiple of alignment.
-	 *
-	 * This done, we can take advantage of the fact that for each small
-	 * size class, every object is aligned at the smallest power of two
-	 * that is non-zero in the base two representation of the size.  For
-	 * example:
-	 *
-	 *   Size |   Base 2 | Minimum alignment
-	 *   -----+----------+------------------
-	 *     96 |  1100000 |  32
-	 *    144 | 10100000 |  32
-	 *    192 | 11000000 |  64
-	 *
-	 * Depending on runtime settings, it is possible that arena_malloc()
-	 * will further round up to a power of two, but that never causes
-	 * correctness issues.
-	 */
-	ceil_size = (size + (alignment - 1)) & (-alignment);
-	/*
-	 * (ceil_size < size) protects against the combination of maximal
-	 * alignment and size greater than maximal alignment.
-	 */
-	if (ceil_size < size) {
-		/* size_t overflow. */
-		return (NULL);
-	}
-
-	if (ceil_size <= pagesize || (alignment <= pagesize
-	    && ceil_size <= arena_maxclass))
-		ret = arena_malloc(choose_arena(), ceil_size, false);
-	else {
-		size_t run_size;
-
-		/*
-		 * We can't achieve sub-page alignment, so round up alignment
-		 * permanently; it makes later calculations simpler.
-		 */
-		alignment = PAGE_CEILING(alignment);
-		ceil_size = PAGE_CEILING(size);
-		/*
-		 * (ceil_size < size) protects against very large sizes within
-		 * pagesize of SIZE_T_MAX.
-		 *
-		 * (ceil_size + alignment < ceil_size) protects against the
-		 * combination of maximal alignment and ceil_size large enough
-		 * to cause overflow.  This is similar to the first overflow
-		 * check above, but it needs to be repeated due to the new
-		 * ceil_size value, which may now be *equal* to maximal
-		 * alignment, whereas before we only detected overflow if the
-		 * original size was *greater* than maximal alignment.
-		 */
-		if (ceil_size < size || ceil_size + alignment < ceil_size) {
-			/* size_t overflow. */
-			return (NULL);
-		}
-
-		/*
-		 * Calculate the size of the over-size run that arena_palloc()
-		 * would need to allocate in order to guarantee the alignment.
-		 */
-		if (ceil_size >= alignment)
-			run_size = ceil_size + alignment - pagesize;
-		else {
-			/*
-			 * It is possible that (alignment << 1) will cause
-			 * overflow, but it doesn't matter because we also
-			 * subtract pagesize, which in the case of overflow
-			 * leaves us with a very large run_size.  That causes
-			 * the first conditional below to fail, which means
-			 * that the bogus run_size value never gets used for
-			 * anything important.
-			 */
-			run_size = (alignment << 1) - pagesize;
-		}
-
-		if (run_size <= arena_maxclass) {
-			ret = arena_palloc(choose_arena(), alignment, ceil_size,
-			    run_size);
-		} else if (alignment <= chunksize)
-			ret = huge_malloc(ceil_size, false);
-		else
-			ret = huge_palloc(alignment, ceil_size);
-	}
-
-	assert(((uintptr_t)ret & (alignment - 1)) == 0);
-	return (ret);
-}
-
-static void *
-icalloc(size_t size)
-{
-	void *ret;
-
-	if (size <= arena_maxclass)
-		ret = arena_malloc(choose_arena(), size, true);
-	else
-		ret = huge_malloc(size, true);
-
-	return (ret);
-}
-
-static size_t
-isalloc(const void *ptr)
-{
-	size_t ret;
-	arena_chunk_t *chunk;
-
-	assert(ptr != NULL);
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (chunk != ptr) {
-		/* Region. */
-		assert(chunk->arena->magic == ARENA_MAGIC);
-
-		ret = arena_salloc(ptr);
-	} else {
-		extent_node_t *node, key;
-
-		/* Chunk (huge allocation). */
-
-		malloc_mutex_lock(&huge_mtx);
-
-		/* Extract from tree of huge allocations. */
-		key.addr = __DECONST(void *, ptr);
-		node = RB_FIND(extent_tree_ad_s, &huge, &key);
-		assert(node != NULL);
-
-		ret = node->size;
-
-		malloc_mutex_unlock(&huge_mtx);
-	}
-
-	return (ret);
-}
-
-static void *
-iralloc(void *ptr, size_t size)
-{
-	void *ret;
-	size_t oldsize;
-
-	assert(ptr != NULL);
-	assert(size != 0);
-
-	oldsize = isalloc(ptr);
-
-	if (size <= arena_maxclass)
-		ret = arena_ralloc(ptr, size, oldsize);
-	else
-		ret = huge_ralloc(ptr, size, oldsize);
-
-	return (ret);
-}
-
-static void
-idalloc(void *ptr)
-{
-	arena_chunk_t *chunk;
-
-	assert(ptr != NULL);
-
-	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
-	if (chunk != ptr) {
-		/* Region. */
-		arena_dalloc(chunk->arena, chunk, ptr);
-	} else
-		huge_dalloc(ptr);
-}
-
-
 #ifdef MOZ_MEMORY_BSD
 static inline unsigned
 malloc_ncpus(void)
 {
 	unsigned ret;
 	int mib[2];
 	size_t len;
 
@@ -4963,16 +4776,17 @@ malloc_ncpus(void)
 					ret++;
 				}
 			} else
 				column = -1;
 		}
 	}
 	if (ret == 0)
 		ret = 1; /* Something went wrong in the parser. */
+	close(fd);
 
 	return (ret);
 }
 #elif (defined(MOZ_MEMORY_DARWIN))
 #include <mach/mach_init.h>
 #include <mach/mach_host.h>
 
 static inline unsigned
@@ -5067,24 +4881,36 @@ malloc_print_stats(void)
 		    "enabled",
 #endif
 		    "\n", "");
 		_malloc_message("Boolean MALLOC_OPTIONS: ",
 		    opt_abort ? "A" : "a", "", "");
 #ifdef MALLOC_DSS
 		_malloc_message(opt_dss ? "D" : "d", "", "", "");
 #endif
+#ifdef MALLOC_FILL
 		_malloc_message(opt_junk ? "J" : "j", "", "", "");
+#endif
 #ifdef MALLOC_DSS
 		_malloc_message(opt_mmap ? "M" : "m", "", "", "");
 #endif
-		_malloc_message(opt_utrace ? "PU" : "Pu",
-		    opt_sysv ? "V" : "v",
-		    opt_xmalloc ? "X" : "x",
-		    opt_zero ? "Z\n" : "z\n");
+		_malloc_message("P", "", "", "");
+#ifdef MALLOC_UTRACE
+		_malloc_message(opt_utrace ? "U" : "u", "", "", "");
+#endif
+#ifdef MALLOC_SYSV
+		_malloc_message(opt_sysv ? "V" : "v", "", "", "");
+#endif
+#ifdef MALLOC_XMALLOC
+		_malloc_message(opt_xmalloc ? "X" : "x", "", "", "");
+#endif
+#ifdef MALLOC_FILL
+		_malloc_message(opt_zero ? "Z" : "z", "", "", "");
+#endif
+		_malloc_message("\n", "", "", "");
 
 		_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
 		_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
 #ifdef MALLOC_LAZY_FREE
 		if (opt_lazy_free_2pow >= 0) {
 			_malloc_message("Lazy free slots: ",
 			    umax2s(1U << opt_lazy_free_2pow, s), "\n", "");
 		} else
@@ -5094,18 +4920,18 @@ malloc_print_stats(void)
 		_malloc_message("Arena balance threshold: ",
 		    umax2s(opt_balance_threshold, s), "\n", "");
 #endif
 		_malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
 		    "\n", "");
 		_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
 		_malloc_message("Max small size: ", umax2s(small_max, s), "\n",
 		    "");
-		_malloc_message("Max free per arena: ", umax2s(opt_free_max, s),
-		    "\n", "");
+		_malloc_message("Max dirty pages per arena: ",
+		    umax2s(opt_dirty_max, s), "\n", "");
 
 		_malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
 		_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
 
 #ifdef MALLOC_STATS
 		{
 			size_t allocated, mapped;
 #ifdef MALLOC_BALANCE
@@ -5197,30 +5023,34 @@ malloc_print_stats(void)
 	}
 }
 
 /*
  * FreeBSD's pthreads implementation calls malloc(3), so the malloc
  * implementation has to take pains to avoid infinite recursion during
  * initialization.
  */
-#ifndef MOZ_MEMORY_WINDOWS
-static inline
-#endif
-bool
+#if (defined(MOZ_MEMORY_WINDOWS) || defined(MOZ_MEMORY_DARWIN))
+#define	malloc_init() false
+#else
+static inline bool
 malloc_init(void)
 {
 
 	if (malloc_initialized == false)
 		return (malloc_init_hard());
 
 	return (false);
 }
-
-static bool
+#endif
+
+#ifndef MOZ_MEMORY_WINDOWS
+static
+#endif
+bool
 malloc_init_hard(void)
 {
 	unsigned i;
 	char buf[PATH_MAX + 1];
 	const char *opts;
 	long result;
 #ifndef MOZ_MEMORY_WINDOWS
 	int linklen;
@@ -5386,48 +5216,44 @@ MALLOC_OUT:
 #endif
 					break;
 				case 'D':
 #ifdef MALLOC_DSS
 					opt_dss = true;
 #endif
 					break;
 				case 'f':
-					opt_free_max >>= 1;
+					opt_dirty_max >>= 1;
 					break;
 				case 'F':
-					if (opt_free_max == 0)
-						opt_free_max = 1;
-					else if ((opt_free_max << 1) != 0)
-						opt_free_max <<= 1;
+					if (opt_dirty_max == 0)
+						opt_dirty_max = 1;
+					else if ((opt_dirty_max << 1) != 0)
+						opt_dirty_max <<= 1;
 					break;
+#ifdef MALLOC_FILL
 				case 'j':
 					opt_junk = false;
 					break;
 				case 'J':
 					opt_junk = true;
 					break;
+#endif
 				case 'k':
 					/*
 					 * Chunks always require at least one
 					 * header page, so chunks can never be
 					 * smaller than two pages.
 					 */
 					if (opt_chunk_2pow > pagesize_2pow + 1)
 						opt_chunk_2pow--;
 					break;
 				case 'K':
-					/*
-					 * There must be fewer pages in a chunk
-					 * than can be recorded by the pos
-					 * field of arena_chunk_map_t, in order
-					 * to make POS_EMPTY/POS_FREE special.
-					 */
-					if (opt_chunk_2pow - pagesize_2pow
-					    < (sizeof(uint32_t) << 3) - 1)
+					if (opt_chunk_2pow + 1 <
+					    (sizeof(size_t) << 3))
 						opt_chunk_2pow++;
 					break;
 				case 'l':
 #ifdef MALLOC_LAZY_FREE
 					if (opt_lazy_free_2pow >= 0)
 						opt_lazy_free_2pow--;
 #endif
 					break;
@@ -5473,43 +5299,51 @@ MALLOC_OUT:
 					    QUANTUM_2POW_MIN)
 						opt_small_max_2pow--;
 					break;
 				case 'S':
 					if (opt_small_max_2pow < pagesize_2pow
 					    - 1)
 						opt_small_max_2pow++;
 					break;
+#ifdef MALLOC_UTRACE
 				case 'u':
 					opt_utrace = false;
 					break;
 				case 'U':
 					opt_utrace = true;
 					break;
+#endif
+#ifdef MALLOC_SYSV
 				case 'v':
 					opt_sysv = false;
 					break;
 				case 'V':
 					opt_sysv = true;
 					break;
+#endif
+#ifdef MALLOC_XMALLOC
 				case 'x':
 					opt_xmalloc = false;
 					break;
 				case 'X':
 					opt_xmalloc = true;
 					break;
+#endif
+#ifdef MALLOC_FILL
 				case 'z':
 					opt_zero = false;
 					break;
 				case 'Z':
 					opt_zero = true;
 					break;
+#endif
 				default: {
 					char cbuf[2];
-					
+
 					cbuf[0] = opts[j];
 					cbuf[1] = '\0';
 					_malloc_message(_getprogname(),
 					    ": (malloc) Unsupported character "
 					    "in malloc options: '", cbuf,
 					    "'\n");
 				}
 				}
@@ -5553,23 +5387,30 @@ MALLOC_OUT:
 		small_min = 1;
 	assert(small_min <= quantum);
 
 	/* Set variables according to the value of opt_chunk_2pow. */
 	chunksize = (1LU << opt_chunk_2pow);
 	chunksize_mask = chunksize - 1;
 	chunk_npages = (chunksize >> pagesize_2pow);
 	{
-		unsigned header_size;
-
-		header_size = sizeof(arena_chunk_t) + (sizeof(arena_chunk_map_t)
-		    * (chunk_npages - 1));
-		arena_chunk_header_npages = (header_size >> pagesize_2pow);
-		if ((header_size & pagesize_mask) != 0)
-			arena_chunk_header_npages++;
+		size_t header_size;
+
+		/*
+		 * Compute the header size such that it is large
+		 * enough to contain the page map and enough nodes for the
+		 * worst case: one node per non-header page plus one extra for
+		 * situations where we briefly have one more node allocated
+		 * than we will need.
+		 */
+		header_size = sizeof(arena_chunk_t) +
+		    (sizeof(arena_chunk_map_t) * (chunk_npages - 1)) +
+		    (sizeof(extent_node_t) * chunk_npages);
+		arena_chunk_header_npages = (header_size >> pagesize_2pow) +
+		    ((header_size & pagesize_mask) != 0);
 	}
 	arena_maxclass = chunksize - (arena_chunk_header_npages <<
 	    pagesize_2pow);
 #ifdef MALLOC_LAZY_FREE
 	/*
 	 * Make sure that allocating the free_cache does not exceed the limits
 	 * of what base_alloc() can handle.
 	 */
@@ -5592,18 +5433,18 @@ MALLOC_OUT:
 	/* Initialize chunks data. */
 	malloc_mutex_init(&huge_mtx);
 	RB_INIT(&huge);
 #ifdef MALLOC_DSS
 	malloc_mutex_init(&dss_mtx);
 	dss_base = sbrk(0);
 	dss_prev = dss_base;
 	dss_max = dss_base;
+	RB_INIT(&dss_chunks_szad);
 	RB_INIT(&dss_chunks_ad);
-	RB_INIT(&dss_chunks_szad);
 #endif
 #ifdef MALLOC_STATS
 	huge_nmalloc = 0;
 	huge_ndalloc = 0;
 	huge_allocated = 0;
 #endif
 
 	/* Initialize base allocation data structures. */
@@ -5614,19 +5455,17 @@ MALLOC_OUT:
 	/*
 	 * Allocate a base chunk here, since it doesn't actually have to be
 	 * chunk-aligned.  Doing this before allocating any other chunks allows
 	 * the use of space that would otherwise be wasted.
 	 */
 	if (opt_dss)
 		base_pages_alloc(0);
 #endif
-	base_node_mags_avail = NULL;
-	base_node_mag = NULL;
-	base_node_mag_partial = NULL;
+	base_nodes = NULL;
 	malloc_mutex_init(&base_mtx);
 
 	if (ncpus > 1) {
 		/*
 		 * For SMP systems, create four times as many arenas as there
 		 * are CPUs by default.
 		 */
 		opt_narenas_lshift += 2;
@@ -5748,122 +5587,134 @@ MALLOC_OUT:
 	return (false);
 }
 
 /* XXX Why not just expose malloc_print_stats()? */
 #ifdef MOZ_MEMORY_WINDOWS
 void
 malloc_shutdown()
 {
+
 	malloc_print_stats();
 }
 #endif
 
 /*
  * End general internal functions.
  */
 /******************************************************************************/
 /*
  * Begin malloc(3)-compatible functions.
  */
+
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline void *
 moz_malloc(size_t size)
 #else
 void *
 malloc(size_t size)
 #endif
 {
 	void *ret;
 
 	if (malloc_init()) {
 		ret = NULL;
 		goto RETURN;
 	}
 
 	if (size == 0) {
+#ifdef MALLOC_SYSV
 		if (opt_sysv == false)
+#endif
 			size = 1;
+#ifdef MALLOC_SYSV
 		else {
 			ret = NULL;
 			goto RETURN;
 		}
+#endif
 	}
 
 	ret = imalloc(size);
 
 RETURN:
 	if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			    ": (malloc) Error in malloc(): out of memory\n", "",
 			    "");
 			abort();
 		}
+#endif
 		errno = ENOMEM;
 	}
 
 	UTRACE(0, size, ret);
 	return (ret);
 }
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline int
 moz_posix_memalign(void **memptr, size_t alignment, size_t size)
 #else
 int
 posix_memalign(void **memptr, size_t alignment, size_t size)
 #endif
 {
 	int ret;
 	void *result;
 
 	if (malloc_init())
 		result = NULL;
 	else {
 		/* Make sure that alignment is a large enough power of 2. */
 		if (((alignment - 1) & alignment) != 0
 		    || alignment < sizeof(void *)) {
+#ifdef MALLOC_XMALLOC
 			if (opt_xmalloc) {
 				_malloc_message(_getprogname(),
 				    ": (malloc) Error in posix_memalign(): "
 				    "invalid alignment\n", "", "");
 				abort();
 			}
+#endif
 			result = NULL;
 			ret = EINVAL;
 			goto RETURN;
 		}
 
 		result = ipalloc(alignment, size);
 	}
 
 	if (result == NULL) {
+#ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			": (malloc) Error in posix_memalign(): out of memory\n",
 			"", "");
 			abort();
 		}
+#endif
 		ret = ENOMEM;
 		goto RETURN;
 	}
 
 	*memptr = result;
 	ret = 0;
 
 RETURN:
 	UTRACE(0, size, result);
 	return (ret);
 }
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline void *
 moz_memalign(size_t alignment, size_t size)
 #else
 void *
 memalign(size_t alignment, size_t size)
 #endif
 {
 	void *ret;
@@ -5873,34 +5724,34 @@ memalign(size_t alignment, size_t size)
 #else
 	if (posix_memalign(&ret, alignment, size) != 0)
 #endif
 		return (NULL);
 
 	return ret;
 }
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline void *
 moz_valloc(size_t size)
 #else
 void *
 valloc(size_t size)
 #endif
 {
 #ifdef MOZ_MEMORY_DARWIN
 	return (moz_memalign(pagesize, size));
 #else
 	return (memalign(pagesize, size));
 #endif
 }
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline void *
 moz_calloc(size_t num, size_t size)
 #else
 void *
 calloc(size_t num, size_t size)
 #endif
 {
 	void *ret;
@@ -5909,111 +5760,127 @@ calloc(size_t num, size_t size)
 	if (malloc_init()) {
 		num_size = 0;
 		ret = NULL;
 		goto RETURN;
 	}
 
 	num_size = num * size;
 	if (num_size == 0) {
+#ifdef MALLOC_SYSV
 		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
+#endif
 			num_size = 1;
+#ifdef MALLOC_SYSV
 		else {
 			ret = NULL;
 			goto RETURN;
 		}
+#endif
 	/*
 	 * Try to avoid division here.  We know that it isn't possible to
 	 * overflow during multiplication if neither operand uses any of the
 	 * most significant half of the bits in a size_t.
 	 */
 	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
 	    && (num_size / size != num)) {
 		/* size_t overflow. */
 		ret = NULL;
 		goto RETURN;
 	}
 
 	ret = icalloc(num_size);
 
 RETURN:
 	if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
 		if (opt_xmalloc) {
 			_malloc_message(_getprogname(),
 			    ": (malloc) Error in calloc(): out of memory\n", "",
 			    "");
 			abort();
 		}
+#endif
 		errno = ENOMEM;
 	}
 
 	UTRACE(0, num_size, ret);
 	return (ret);
 }
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline void *
 moz_realloc(void *ptr, size_t size)
 #else
 void *
 realloc(void *ptr, size_t size)
 #endif
 {
 	void *ret;
 
 	if (size == 0) {
+#ifdef MALLOC_SYSV
 		if (opt_sysv == false)
+#endif
 			size = 1;
+#ifdef MALLOC_SYSV
 		else {
 			if (ptr != NULL)
 				idalloc(ptr);
 			ret = NULL;
 			goto RETURN;
 		}
+#endif
 	}
 
 	if (ptr != NULL) {
 		assert(malloc_initialized);
 
 		ret = iralloc(ptr, size);
 
 		if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
 			if (opt_xmalloc) {
 				_malloc_message(_getprogname(),
 				    ": (malloc) Error in realloc(): out of "
 				    "memory\n", "", "");
 				abort();
 			}
+#endif
 			errno = ENOMEM;
 		}
 	} else {
 		if (malloc_init())
 			ret = NULL;
 		else
 			ret = imalloc(size);
 
 		if (ret == NULL) {
+#ifdef MALLOC_XMALLOC
 			if (opt_xmalloc) {
 				_malloc_message(_getprogname(),
 				    ": (malloc) Error in realloc(): out of "
 				    "memory\n", "", "");
 				abort();
 			}
+#endif
 			errno = ENOMEM;
 		}
 	}
 
+#ifdef MALLOC_SYSV
 RETURN:
+#endif
 	UTRACE(ptr, size, ret);
 	return (ret);
 }
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline void
 moz_free(void *ptr)
 #else
 void
 free(void *ptr)
 #endif
 {
 
@@ -6028,18 +5895,18 @@ free(void *ptr)
 /*
  * End malloc(3)-compatible functions.
  */
 /******************************************************************************/
 /*
  * Begin non-standard functions.
  */
 
+VISIBLE
 #ifdef MOZ_MEMORY_DARWIN
-__attribute__((visibility("default"))) 
 inline size_t
 moz_malloc_usable_size(const void *ptr)
 #else
 size_t
 malloc_usable_size(const void *ptr)
 #endif
 {
 
@@ -6047,19 +5914,32 @@ malloc_usable_size(const void *ptr)
 
 	return (isalloc(ptr));
 }
 
 #ifdef MOZ_MEMORY_WINDOWS
 void*
 _recalloc(void *ptr, size_t count, size_t size)
 {
+	size_t oldsize = (ptr != NULL) ? isalloc(ptr) : 0;
 	size_t newsize = count * size;
 
+	/*
+	 * In order for all trailing bytes to be zeroed, the caller needs to
+	 * use calloc(), followed by recalloc().  However, the current calloc()
+	 * implementation only zeros the bytes requested, so if recalloc() is
+	 * to work 100% correctly, calloc() will need to change to zero
+	 * trailing bytes.
+	 */
+
 	ptr = realloc(ptr, newsize);
+	if (ptr != NULL && oldsize < newsize) {
+		memset((void *)((uintptr_t)ptr + oldsize), 0, newsize -
+		    oldsize);
+	}
 
 	return ptr;
 }
 
 /*
  * This impl of _expand doesn't ever actually expand or shrink blocks: it
  * simply replies that you may continue using a shrunk block.
  */
@@ -6070,16 +5950,17 @@ void*
 		return ptr;
 
 	return NULL;
 }
 
 size_t
 _msize(const void *ptr)
 {
+
 	return malloc_usable_size(ptr);
 }
 #endif
 
 
 /*
  * End non-standard functions.
  */
@@ -6327,24 +6208,24 @@ create_zone(void)
 	zone_introspect.log = NULL;
 	zone_introspect.force_lock = (void *)zone_force_lock;
 	zone_introspect.force_unlock = (void *)zone_force_unlock;
 	zone_introspect.statistics = NULL;
 
 	return (&zone);
 }
 
-__attribute__((visibility("default")))
+__attribute__((constructor))
 void
 jemalloc_darwin_init(void)
 {
 	extern unsigned malloc_num_zones;
 	extern malloc_zone_t **malloc_zones;
 
-	if (malloc_init())
+	if (malloc_init_hard())
 		abort();
 
 	/*
 	 * The following code is *not* thread-safe, so it's critical that
 	 * initialization be manually triggered.
 	 */
 
 	/* Register the custom zones. */
@@ -6355,15 +6236,9 @@ jemalloc_darwin_init(void)
 	 * Shift malloc_zones around so that zone is first, which makes it the
 	 * default zone.
 	 */
 	assert(malloc_num_zones > 1);
 	memmove(&malloc_zones[1], &malloc_zones[0],
 		sizeof(malloc_zone_t *) * (malloc_num_zones - 1));
 	malloc_zones[0] = &zone;
 }
-
-static void
-jemalloc_darwin_exit(void)
-{
-}
-
-#endif
+#endif