--- a/memory/jemalloc/Makefile.in
+++ b/memory/jemalloc/Makefile.in
@@ -68,18 +68,18 @@ libs:: $(CRT_OBJ_DIR)/build/intel/mozcrt
done
# truly awful
#XXX: get ed into mozillabuild, bug 415123
$(PERL) $(srcdir)/apply-ed-patches.pl $(srcdir)/crtsp1.diff \
$(CRT_OBJ_DIR) $(srcdir)/ed.exe
$(CRT_OBJ_DIR)/build/intel/mozcrt19.dll: \
$(CRT_OBJ_DIR)/jemalloc.c $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h \
- $(srcdir)/tree.h
- cp $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h $(srcdir)/tree.h $(CRT_OBJ_DIR)
+ $(srcdir)/rb.h
+ cp $(srcdir)/jemalloc.c $(srcdir)/jemalloc.h $(srcdir)/rb.h $(CRT_OBJ_DIR)
# this pretty much sucks, but nmake and make don't play well together
$(PYTHON) $(srcdir)/build-crt.py $(CRT_OBJ_DIR)
#XXX: these don't link right for some reason
rm $(CRT_OBJ_DIR)/build/intel/{libcmt,libcpmt}.lib
else
# Using a pre-built DLL, so just install it.
libs:: $(WIN32_CUSTOM_CRT_DIR)/mozcrt19.dll
$(INSTALL) $< $(FINAL_TARGET)
@@ -94,16 +94,19 @@ MODULE_OPTIMIZE_FLAGS = -xO5
endif
endif
LIBRARY_NAME = jemalloc
# Build jemalloc as a shared lib. This is mandatory for Darwin, since a library
# init function is used on that platform.
FORCE_SHARED_LIB= 1
+# Make C99 variable-length arrays and inline declarations compile without
+# warnings.
+OS_CFLAGS += -std=c99 -fgnu89-inline
CSRCS = \
jemalloc.c \
$(NULL)
#XXX: PGO on Linux causes problems here
# See bug 419470
NO_PROFILE_GUIDED_OPTIMIZE = 1
--- a/memory/jemalloc/jemalloc.c
+++ b/memory/jemalloc/jemalloc.c
@@ -41,17 +41,18 @@
*
* + Memory is managed in chunks and runs (chunks can be split into runs),
* rather than as individual pages. This provides a constant-time
* mechanism for associating allocations with particular arenas.
*
* Allocation requests are rounded up to the nearest size class, and no record
* of the original request size is maintained. Allocations are broken into
* categories according to size class. Assuming runtime defaults, 4 kB pages
- * and a 16 byte quantum, the size classes in each category are as follows:
+ * and a 16 byte quantum on a 32-bit system, the size classes in each category
+ * are as follows:
*
* |=====================================|
* | Category | Subcategory | Size |
* |=====================================|
* | Small | Tiny | 2 |
* | | | 4 |
* | | | 8 |
* | |----------------+---------|
@@ -65,19 +66,19 @@
* | |----------------+---------|
* | | Sub-page | 1 kB |
* | | | 2 kB |
* |=====================================|
* | Large | 4 kB |
* | | 8 kB |
* | | 12 kB |
* | | ... |
+ * | | 1008 kB |
* | | 1012 kB |
* | | 1016 kB |
- * | | 1020 kB |
* |=====================================|
* | Huge | 1 MB |
* | | 2 MB |
* | | 3 MB |
* | | ... |
* |=====================================|
*
* A different mechanism is used for each category:
@@ -197,17 +198,16 @@
#include <stdlib.h>
#include <string.h>
#ifdef MOZ_MEMORY_WINDOWS
#include <cruntime.h>
#include <internal.h>
#include <windows.h>
#include <io.h>
-#include "tree.h"
#pragma warning( disable: 4267 4996 4146 )
#define false FALSE
#define true TRUE
#define inline __inline
#define SIZE_T_MAX SIZE_MAX
#define STDERR_FILENO 2
@@ -248,16 +248,17 @@ getenv(const char *name)
return (NULL);
}
typedef unsigned char uint8_t;
typedef unsigned uint32_t;
typedef unsigned long long uint64_t;
typedef unsigned long long uintmax_t;
+typedef long ssize_t;
#define MALLOC_DECOMMIT
#endif
#ifndef MOZ_MEMORY_WINDOWS
#ifndef MOZ_MEMORY_SOLARIS
#include <sys/cdefs.h>
#endif
@@ -281,20 +282,16 @@ typedef unsigned long long uintmax_t;
#ifndef MOZ_MEMORY
#include <sys/stddef.h>
#endif
#include <sys/time.h>
#include <sys/types.h>
#ifndef MOZ_MEMORY_SOLARIS
#include <sys/sysctl.h>
#endif
-#include "tree.h"
-#ifndef MOZ_MEMORY
-#include <sys/tree.h>
-#endif
#include <sys/uio.h>
#ifndef MOZ_MEMORY
#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
#include <machine/atomic.h>
#include <machine/cpufunc.h>
#include <machine/vmparam.h>
#endif
@@ -354,16 +351,22 @@ static const bool __isthreaded = true;
# ifndef NDEBUG
# define NDEBUG
# endif
#endif
#ifndef MOZ_MEMORY_WINDOWS
#include <assert.h>
#endif
+#ifdef MOZ_MEMORY_WINDOWS
+ /* MSVC++ does not support C99 variable-length arrays. */
+# define RB_NO_C99_VARARRAYS
+#endif
+#include "rb.h"
+
#ifdef MALLOC_DEBUG
/* Disable inlining to make debugging easier. */
#ifdef inline
#undef inline
#endif
# define inline
#endif
@@ -671,31 +674,28 @@ struct chunk_stats_s {
/*
* Extent data structures.
*/
/* Tree of extents. */
typedef struct extent_node_s extent_node_t;
struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
- RB_ENTRY(extent_node_s) link_szad;
+ rb_node(extent_node_t) link_szad;
/* Linkage for the address-ordered tree. */
- RB_ENTRY(extent_node_s) link_ad;
+ rb_node(extent_node_t) link_ad;
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
/* Total region size. */
size_t size;
};
-typedef struct extent_tree_szad_s extent_tree_szad_t;
-RB_HEAD(extent_tree_szad_s, extent_node_s);
-typedef struct extent_tree_ad_s extent_tree_ad_t;
-RB_HEAD(extent_tree_ad_s, extent_node_s);
+typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
/*
* Arena data structures.
*/
typedef struct arena_s arena_t;
typedef struct arena_bin_s arena_bin_t;
@@ -716,49 +716,48 @@ typedef uint8_t arena_chunk_map_t;
#endif
/* Arena chunk header. */
typedef struct arena_chunk_s arena_chunk_t;
struct arena_chunk_s {
/* Arena that owns the chunk. */
arena_t *arena;
- /* Linkage for the arena's chunk tree. */
- RB_ENTRY(arena_chunk_s) link;
+ /* Linkage for the arena's chunks_all tree. */
+ rb_node(arena_chunk_t) link_all;
/*
* Number of pages in use. This is maintained in order to make
* detection of empty chunks fast.
*/
size_t pages_used;
/* Number of dirty pages. */
size_t ndirty;
/*
* Tree of extent nodes that are embedded in the arena chunk header
* page(s). These nodes are used by arena_chunk_node_alloc().
*/
- extent_tree_ad_t nodes;
+ extent_tree_t nodes;
extent_node_t *nodes_past;
/*
* Map of pages within chunk that keeps track of free/large/small. For
* free runs, only the map entries for the first and last pages are
* kept up to date, so that free runs can be quickly coalesced.
*/
arena_chunk_map_t map[1]; /* Dynamically sized. */
};
-typedef struct arena_chunk_tree_s arena_chunk_tree_t;
-RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
+typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
typedef struct arena_run_s arena_run_t;
struct arena_run_s {
/* Linkage for run trees. */
- RB_ENTRY(arena_run_s) link;
+ rb_node(arena_run_t) link;
#ifdef MALLOC_DEBUG
uint32_t magic;
# define ARENA_RUN_MAGIC 0x384adf93
#endif
/* Bin this run is associated with. */
arena_bin_t *bin;
@@ -767,18 +766,17 @@ struct arena_run_s {
unsigned regs_minelm;
/* Number of free regions in run. */
unsigned nfree;
/* Bitmask of in-use regions (0: in use, 1: free). */
unsigned regs_mask[1]; /* Dynamically sized. */
};
-typedef struct arena_run_tree_s arena_run_tree_t;
-RB_HEAD(arena_run_tree_s, arena_run_s);
+typedef rb_tree(arena_run_t) arena_run_tree_t;
struct arena_bin_s {
/*
* Current run being used to service allocations of this bin's size
* class.
*/
arena_run_t *runcur;
@@ -824,20 +822,18 @@ struct arena_s {
#else
pthread_mutex_t lock;
#endif
#ifdef MALLOC_STATS
arena_stats_t stats;
#endif
- /*
- * Tree of chunks this arena manages.
- */
- arena_chunk_tree_t chunks;
+ /* Tree of all chunks this arena manages. */
+ arena_chunk_tree_t chunks_all;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk tree
* until it is deleted.
*
* There is one spare chunk per arena, rather than one spare total, in
@@ -854,21 +850,21 @@ struct arena_s {
*/
size_t ndirty;
/*
* Trees of this arena's available runs. Two trees are maintained
* using one set of nodes, since one is needed for first-best-fit run
* allocation, and the other is needed for coalescing.
*/
- extent_tree_szad_t runs_avail_szad;
- extent_tree_ad_t runs_avail_ad;
+ extent_tree_t runs_avail_szad;
+ extent_tree_t runs_avail_ad;
/* Tree of this arena's allocated (in-use) runs. */
- extent_tree_ad_t runs_alloced_ad;
+ extent_tree_t runs_alloced_ad;
#ifdef MALLOC_BALANCE
/*
* The arena load balancing machinery needs to keep track of how much
* lock contention there is. This value is exponentially averaged.
*/
uint32_t contention;
#endif
@@ -935,17 +931,17 @@ static size_t arena_maxclass; /* Max si
/*
* Chunks.
*/
/* Protects chunk-related data structures. */
static malloc_mutex_t huge_mtx;
/* Tree of chunks that are stand-alone huge allocations. */
-static extent_tree_ad_t huge;
+static extent_tree_t huge;
#ifdef MALLOC_DSS
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
*/
static malloc_mutex_t dss_mtx;
/* Base address of the DSS. */
@@ -956,18 +952,18 @@ static void *dss_prev;
static void *dss_max;
/*
* Trees of chunks that were previously allocated (trees differ only in node
* ordering). These are used when allocating chunks, in an attempt to re-use
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
-static extent_tree_szad_t dss_chunks_szad;
-static extent_tree_ad_t dss_chunks_ad;
+static extent_tree_t dss_chunks_szad;
+static extent_tree_t dss_chunks_ad;
#endif
#ifdef MALLOC_STATS
/* Huge allocation statistics. */
static uint64_t huge_nmalloc;
static uint64_t huge_ndalloc;
static size_t huge_allocated;
#endif
@@ -1966,31 +1962,32 @@ extent_szad_comp(extent_node_t *a, exten
uintptr_t b_addr = (uintptr_t)b->addr;
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
return (ret);
}
-/* Generate red-black tree code for size/address-ordered extents. */
-RB_GENERATE_STATIC(extent_tree_szad_s, extent_node_s, link_szad,
- extent_szad_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t,
+ link_szad, extent_szad_comp)
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
{
uintptr_t a_addr = (uintptr_t)a->addr;
uintptr_t b_addr = (uintptr_t)b->addr;
return ((a_addr > b_addr) - (a_addr < b_addr));
}
-/* Generate red-black tree code for address-ordered extents. */
-RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, link_ad, extent_ad_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
+ extent_ad_comp)
/*
* End extent tree code.
*/
/******************************************************************************/
/*
* Begin chunk management functions.
*/
@@ -2180,35 +2177,35 @@ chunk_alloc_dss(size_t size)
static void *
chunk_recycle_dss(size_t size, bool zero)
{
extent_node_t *node, key;
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&dss_mtx);
- node = RB_NFIND(extent_tree_szad_s, &dss_chunks_szad, &key);
+ node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
if (node != NULL) {
void *ret = node->addr;
/* Remove node from the tree. */
- RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_szad_remove(&dss_chunks_szad, node);
if (node->size == size) {
- RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, node);
+ extent_tree_ad_remove(&dss_chunks_ad, node);
base_node_dealloc(node);
} else {
/*
* Insert the remainder of node's address range as a
* smaller chunk. Its position within dss_chunks_ad
* does not change.
*/
assert(node->size > size);
node->addr = (void *)((uintptr_t)node->addr + size);
node->size -= size;
- RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_szad_insert(&dss_chunks_szad, node);
}
malloc_mutex_unlock(&dss_mtx);
if (zero)
memset(ret, 0, size);
return (ret);
}
malloc_mutex_unlock(&dss_mtx);
@@ -2392,61 +2389,61 @@ RETURN:
#ifdef MALLOC_DSS
static extent_node_t *
chunk_dealloc_dss_record(void *chunk, size_t size)
{
extent_node_t *node, *prev, key;
key.addr = (void *)((uintptr_t)chunk + size);
- node = RB_NFIND(extent_tree_ad_s, &dss_chunks_ad, &key);
+ node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within dss_chunks_ad, so only
* remove/insert from/into dss_chunks_szad.
*/
- RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_szad_remove(&dss_chunks_szad, node);
node->addr = chunk;
node->size += size;
- RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_szad_insert(&dss_chunks_szad, node);
} else {
/*
* Coalescing forward failed, so insert a new node. Drop
* dss_mtx during node allocation, since it is possible that a
* new base chunk will be allocated.
*/
malloc_mutex_unlock(&dss_mtx);
node = base_node_alloc();
malloc_mutex_lock(&dss_mtx);
if (node == NULL)
return (NULL);
node->addr = chunk;
node->size = size;
- RB_INSERT(extent_tree_ad_s, &dss_chunks_ad, node);
- RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_ad_insert(&dss_chunks_ad, node);
+ extent_tree_szad_insert(&dss_chunks_szad, node);
}
/* Try to coalesce backward. */
- prev = RB_PREV(extent_tree_ad_s, &dss_chunks_ad, node);
+ prev = extent_tree_ad_prev(&dss_chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within dss_chunks_ad, so only
* remove/insert node from/into dss_chunks_szad.
*/
- RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, prev);
- RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, prev);
-
- RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_szad_remove(&dss_chunks_szad, prev);
+ extent_tree_ad_remove(&dss_chunks_ad, prev);
+
+ extent_tree_szad_remove(&dss_chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
- RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+ extent_tree_szad_insert(&dss_chunks_szad, node);
base_node_dealloc(prev);
}
return (node);
}
static bool
@@ -2476,20 +2473,18 @@ chunk_dealloc_dss(void *chunk, size_t si
* designed multi-threaded programs.
*/
if ((void *)((uintptr_t)chunk + size) == dss_max
&& (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
/* Success. */
dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
if (node != NULL) {
- RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad,
- node);
- RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad,
- node);
+ extent_tree_szad_remove(&dss_chunks_szad, node);
+ extent_tree_ad_remove(&dss_chunks_ad, node);
base_node_dealloc(node);
}
malloc_mutex_unlock(&dss_mtx);
} else {
malloc_mutex_unlock(&dss_mtx);
#ifdef MOZ_MEMORY_WINDOWS
VirtualAlloc(chunk, size, MEM_RESET, PAGE_READWRITE);
#elif (defined(MOZ_MEMORY_DARWIN))
@@ -2633,22 +2628,17 @@ choose_arena(void)
static arena_t *
choose_arena_hard(void)
{
arena_t *ret;
assert(__isthreaded);
#ifdef MALLOC_BALANCE
- /*
- * Seed the PRNG used for arena load balancing. We can get away with
- * using the same seed here as for the lazy_free PRNG without
- * introducing autocorrelation because the PRNG parameters are
- * distinct.
- */
+ /* Seed the PRNG used for arena load balancing. */
SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));
#endif
if (narenas > 1) {
#ifdef MALLOC_BALANCE
unsigned ind;
ind = PRN(balance, narenas_2pow);
@@ -2685,42 +2675,44 @@ arena_chunk_comp(arena_chunk_t *a, arena
uintptr_t b_chunk = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
}
-/* Generate red-black tree code for arena chunks. */
-RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_chunk_tree_all_, arena_chunk_tree_t,
+ arena_chunk_t, link_all, arena_chunk_comp)
static inline int
arena_run_comp(arena_run_t *a, arena_run_t *b)
{
uintptr_t a_run = (uintptr_t)a;
uintptr_t b_run = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_run > b_run) - (a_run < b_run));
}
-/* Generate red-black tree code for arena runs. */
-RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp)
+/* Wrap red-black tree macros in functions. */
+rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_run_t, link,
+ arena_run_comp)
static extent_node_t *
arena_chunk_node_alloc(arena_chunk_t *chunk)
{
extent_node_t *ret;
- ret = RB_MIN(extent_tree_ad_s, &chunk->nodes);
+ ret = extent_tree_ad_first(&chunk->nodes);
if (ret != NULL)
- RB_REMOVE(extent_tree_ad_s, &chunk->nodes, ret);
+ extent_tree_ad_remove(&chunk->nodes, ret);
else {
ret = chunk->nodes_past;
chunk->nodes_past = (extent_node_t *)
((uintptr_t)chunk->nodes_past + sizeof(extent_node_t));
assert((uintptr_t)ret + sizeof(extent_node_t) <=
(uintptr_t)chunk + (arena_chunk_header_npages <<
pagesize_2pow));
}
@@ -2728,17 +2720,17 @@ arena_chunk_node_alloc(arena_chunk_t *ch
return (ret);
}
static void
arena_chunk_node_dealloc(arena_chunk_t *chunk, extent_node_t *node)
{
node->addr = (void *)node;
- RB_INSERT(extent_tree_ad_s, &chunk->nodes, node);
+ extent_tree_ad_insert(&chunk->nodes, node);
}
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
{
void *ret;
unsigned i, mask, bit, regind;
@@ -2907,20 +2899,20 @@ arena_run_split(arena_t *arena, arena_ru
size_t run_ind, total_pages, need_pages, rem_pages, i;
extent_node_t *nodeA, *nodeB, key;
/* Insert a node into runs_alloced_ad for the first part of the run. */
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
nodeA = arena_chunk_node_alloc(chunk);
nodeA->addr = run;
nodeA->size = size;
- RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+ extent_tree_ad_insert(&arena->runs_alloced_ad, nodeA);
key.addr = run;
- nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ nodeB = extent_tree_ad_search(&arena->runs_avail_ad, &key);
assert(nodeB != NULL);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
>> pagesize_2pow);
total_pages = nodeB->size >> pagesize_2pow;
need_pages = (size >> pagesize_2pow);
assert(need_pages > 0);
assert(need_pages <= total_pages);
@@ -2982,28 +2974,28 @@ arena_run_split(arena_t *arena, arena_ru
/* Initialize the chunk map. */
if (small)
chunk->map[run_ind + i] = (uint8_t)i;
else
chunk->map[run_ind + i] = CHUNK_MAP_LARGE;
}
/* Keep track of trailing unused pages for later use. */
- RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ extent_tree_szad_remove(&arena->runs_avail_szad, nodeB);
if (rem_pages > 0) {
/*
* Update nodeB in runs_avail_*. Its position within
* runs_avail_ad does not change.
*/
nodeB->addr = (void *)((uintptr_t)nodeB->addr + size);
nodeB->size -= size;
- RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ extent_tree_szad_insert(&arena->runs_avail_szad, nodeB);
} else {
/* Remove nodeB from runs_avail_*. */
- RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+ extent_tree_ad_remove(&arena->runs_avail_ad, nodeB);
arena_chunk_node_dealloc(chunk, nodeB);
}
chunk->pages_used += need_pages;
}
static arena_chunk_t *
arena_chunk_alloc(arena_t *arena)
@@ -3021,17 +3013,17 @@ arena_chunk_alloc(arena_t *arena)
VALGRIND_MALLOCLIKE_BLOCK(chunk, (arena_chunk_header_npages <<
pagesize_2pow), 0, false);
#ifdef MALLOC_STATS
arena->stats.mapped += chunksize;
#endif
chunk->arena = arena;
- RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
+ arena_chunk_tree_all_insert(&arena->chunks_all, chunk);
/*
* Claim that no pages are in use, since the header is merely
* overhead.
*/
chunk->pages_used = 0;
chunk->ndirty = 0;
@@ -3045,17 +3037,17 @@ arena_chunk_alloc(arena_t *arena)
(CHUNK_MAP_UNTOUCHED
#ifdef MALLOC_DECOMMIT
| CHUNK_MAP_DECOMMITTED
#endif
), (chunk_npages -
arena_chunk_header_npages));
/* Initialize the tree of unused extent nodes. */
- RB_INIT(&chunk->nodes);
+ extent_tree_ad_new(&chunk->nodes);
chunk->nodes_past = (extent_node_t *)QUANTUM_CEILING(
(uintptr_t)&chunk->map[chunk_npages]);
#ifdef MALLOC_DECOMMIT
/*
* Start out decommitted, in order to force a closer
* correspondence between dirty pages and committed untouched
* pages.
@@ -3072,29 +3064,29 @@ arena_chunk_alloc(arena_t *arena)
#endif
}
/* Insert the run into the runs_avail_* red-black trees. */
node = arena_chunk_node_alloc(chunk);
node->addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
pagesize_2pow));
node->size = chunksize - (arena_chunk_header_npages << pagesize_2pow);
- RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
- RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
+ extent_tree_szad_insert(&arena->runs_avail_szad, node);
+ extent_tree_ad_insert(&arena->runs_avail_ad, node);
return (chunk);
}
static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
extent_node_t *node, key;
if (arena->spare != NULL) {
- RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks,
+ arena_chunk_tree_all_remove(&chunk->arena->chunks_all,
arena->spare);
arena->ndirty -= arena->spare->ndirty;
VALGRIND_FREELIKE_BLOCK(arena->spare, 0);
chunk_dealloc((void *)arena->spare, chunksize);
#ifdef MALLOC_STATS
arena->stats.mapped -= chunksize;
#endif
}
@@ -3102,20 +3094,20 @@ arena_chunk_dealloc(arena_t *arena, aren
/*
* Remove run from the runs trees, regardless of whether this chunk
* will be cached, so that the arena does not use it. Dirty page
* flushing only uses the chunks tree, so leaving this chunk in that
* tree is sufficient for that purpose.
*/
key.addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
pagesize_2pow));
- node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ node = extent_tree_ad_search(&arena->runs_avail_ad, &key);
assert(node != NULL);
- RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
- RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, node);
+ extent_tree_szad_remove(&arena->runs_avail_szad, node);
+ extent_tree_ad_remove(&arena->runs_avail_ad, node);
arena_chunk_node_dealloc(chunk, node);
arena->spare = chunk;
}
static arena_run_t *
arena_run_alloc(arena_t *arena, size_t size, bool small, bool zero)
{
@@ -3125,17 +3117,17 @@ arena_run_alloc(arena_t *arena, size_t s
assert(size <= (chunksize - (arena_chunk_header_npages <<
pagesize_2pow)));
assert((size & pagesize_mask) == 0);
/* Search the arena's chunks for the lowest best fit. */
key.addr = NULL;
key.size = size;
- node = RB_NFIND(extent_tree_szad_s, &arena->runs_avail_szad, &key);
+ node = extent_tree_szad_nsearch(&arena->runs_avail_szad, &key);
if (node != NULL) {
run = (arena_run_t *)node->addr;
arena_run_split(arena, run, size, small, zero);
return (run);
}
/*
* No usable runs. Create a new chunk from which to allocate the run.
@@ -3153,32 +3145,33 @@ arena_run_alloc(arena_t *arena, size_t s
static void
arena_purge(arena_t *arena)
{
arena_chunk_t *chunk;
#ifdef MALLOC_DEBUG
size_t ndirty;
ndirty = 0;
- RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
+ rb_foreach_begin(arena_chunk_t, link_all, &arena->chunks_all, chunk) {
ndirty += chunk->ndirty;
- }
+ } rb_foreach_end(arena_chunk_t, link_all, &arena->chunks_all, chunk)
assert(ndirty == arena->ndirty);
#endif
assert(arena->ndirty > opt_dirty_max);
#ifdef MALLOC_STATS
arena->stats.npurge++;
#endif
/*
* Iterate downward through chunks until enough dirty memory has been
* purged.
*/
- RB_FOREACH_REVERSE(chunk, arena_chunk_tree_s, &arena->chunks) {
+ rb_foreach_reverse_begin(arena_chunk_t, link_all, &arena->chunks_all,
+ chunk) {
if (chunk->ndirty > 0) {
size_t i;
for (i = chunk_npages - 1; i >=
arena_chunk_header_npages; i--) {
if (chunk->map[i] & CHUNK_MAP_DIRTY) {
size_t npages;
@@ -3219,31 +3212,32 @@ arena_purge(arena_t *arena)
#endif
#ifdef MALLOC_STATS
arena->stats.nmadvise++;
arena->stats.purged += npages;
#endif
}
}
}
- }
+ } rb_foreach_reverse_end(arena_chunk_t, link_all, &arena->chunks_all,
+ chunk)
}
static void
arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
{
arena_chunk_t *chunk;
extent_node_t *nodeA, *nodeB, *nodeC, key;
size_t size, run_ind, run_pages;
/* Remove run from runs_alloced_ad. */
key.addr = run;
- nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ nodeB = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(nodeB != NULL);
- RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+ extent_tree_ad_remove(&arena->runs_alloced_ad, nodeB);
size = nodeB->size;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
>> pagesize_2pow);
assert(run_ind >= arena_chunk_header_npages);
assert(run_ind < (chunksize >> pagesize_2pow));
run_pages = (size >> pagesize_2pow);
@@ -3271,53 +3265,53 @@ arena_run_dalloc(arena_t *arena, arena_r
chunk->map[run_ind + i] |= (CHUNK_MAP_LARGE |
CHUNK_MAP_POS_MASK);
}
}
#endif
/* Try to coalesce forward. */
key.addr = (void *)((uintptr_t)run + size);
- nodeC = RB_NFIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ nodeC = extent_tree_ad_nsearch(&arena->runs_avail_ad, &key);
if (nodeC != NULL && nodeC->addr == key.addr) {
/*
* Coalesce forward. This does not change the position within
* runs_avail_ad, so only remove/insert from/into
* runs_avail_szad.
*/
- RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
+ extent_tree_szad_remove(&arena->runs_avail_szad, nodeC);
nodeC->addr = (void *)run;
nodeC->size += size;
- RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeC);
+ extent_tree_szad_insert(&arena->runs_avail_szad, nodeC);
arena_chunk_node_dealloc(chunk, nodeB);
nodeB = nodeC;
} else {
/*
* Coalescing forward failed, so insert nodeB into runs_avail_*.
*/
- RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
- RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+ extent_tree_szad_insert(&arena->runs_avail_szad, nodeB);
+ extent_tree_ad_insert(&arena->runs_avail_ad, nodeB);
}
/* Try to coalesce backward. */
- nodeA = RB_PREV(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+ nodeA = extent_tree_ad_prev(&arena->runs_avail_ad, nodeB);
if (nodeA != NULL && (void *)((uintptr_t)nodeA->addr + nodeA->size) ==
(void *)run) {
/*
* Coalesce with previous run. This does not change nodeB's
* position within runs_avail_ad, so only remove/insert
* from/into runs_avail_szad.
*/
- RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeA);
- RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeA);
-
- RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ extent_tree_szad_remove(&arena->runs_avail_szad, nodeA);
+ extent_tree_ad_remove(&arena->runs_avail_ad, nodeA);
+
+ extent_tree_szad_remove(&arena->runs_avail_szad, nodeB);
nodeB->addr = nodeA->addr;
nodeB->size += nodeA->size;
- RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+ extent_tree_szad_insert(&arena->runs_avail_szad, nodeB);
arena_chunk_node_dealloc(chunk, nodeA);
}
/* Deallocate chunk if it is now completely unused. */
if (chunk->pages_used == 0)
arena_chunk_dealloc(arena, chunk);
@@ -3345,17 +3339,17 @@ arena_run_trim_head(arena_t *arena, aren
/*
* Insert a node into runs_alloced_ad so that arena_run_dalloc() can
* treat the leading run as separately allocated.
*/
nodeA = arena_chunk_node_alloc(chunk);
nodeA->addr = (void *)run;
nodeA->size = oldsize - newsize;
- RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+ extent_tree_ad_insert(&arena->runs_alloced_ad, nodeA);
arena_run_dalloc(arena, (arena_run_t *)run, false);
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
{
@@ -3373,32 +3367,33 @@ arena_run_trim_tail(arena_t *arena, aren
/*
* Insert a node into runs_alloced_ad so that arena_run_dalloc() can
* treat the trailing run as separately allocated.
*/
nodeB = arena_chunk_node_alloc(chunk);
nodeB->addr = (void *)((uintptr_t)run + newsize);
nodeB->size = oldsize - newsize;
- RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+ extent_tree_ad_insert(&arena->runs_alloced_ad, nodeB);
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
dirty);
}
static arena_run_t *
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
unsigned i, remainder;
/* Look for a usable run. */
- if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
+ run = arena_run_tree_first(&bin->runs);
+ if (run != NULL) {
/* run is guaranteed to have available space. */
- RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+ arena_run_tree_remove(&bin->runs, run);
#ifdef MALLOC_STATS
bin->stats.reruns++;
#endif
return (run);
}
/* No existing runs have any space available. */
/* Allocate a new run. */
@@ -3781,30 +3776,30 @@ arena_palloc(arena_t *arena, size_t alig
assert((offset & pagesize_mask) == 0);
assert(offset < alloc_size);
if (offset == 0) {
/*
* Update the run's node in runs_alloced_ad. Its position
* does not change.
*/
key.addr = ret;
- node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ node = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(node != NULL);
arena_run_trim_tail(arena, chunk, node, ret, alloc_size, size,
false);
} else {
size_t leadsize, trailsize;
/*
* Update the run's node in runs_alloced_ad. Its position
* does not change.
*/
key.addr = ret;
- node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ node = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(node != NULL);
leadsize = alignment - offset;
if (leadsize > 0) {
arena_run_trim_head(arena, chunk, node, ret, alloc_size,
alloc_size - leadsize);
ret = (void *)((uintptr_t)ret + leadsize);
}
@@ -3957,17 +3952,17 @@ arena_salloc(const void *ptr)
arena_t *arena = chunk->arena;
extent_node_t *node, key;
/* Large allocation size is in the extent tree. */
assert((mapelm & CHUNK_MAP_POS_MASK) == 0);
arena = chunk->arena;
malloc_spin_lock(&arena->lock);
key.addr = (void *)ptr;
- node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ node = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(node != NULL);
ret = node->size;
malloc_spin_unlock(&arena->lock);
}
return (ret);
}
@@ -4011,18 +4006,18 @@ isalloc_validate(const void *ptr)
malloc_spin_unlock(&arenas_lock);
}
for (i = 0; i < narenas; i++) {
arena = arenas[i];
if (arena != NULL) {
/* Make sure ptr is within a chunk. */
malloc_spin_lock(&arena->lock);
- if (RB_FIND(arena_chunk_tree_s, &arena->chunks,
- chunk) == chunk) {
+ if (arena_chunk_tree_all_search(
+ &arena->chunks_all, chunk) == chunk) {
malloc_spin_unlock(&arena->lock);
/*
* We only lock in arena_salloc() for
* large objects, so don't worry about
* the overhead of possibly locking
* twice.
*/
assert(chunk->arena->magic ==
@@ -4036,17 +4031,17 @@ isalloc_validate(const void *ptr)
} else {
size_t ret;
extent_node_t *node;
extent_node_t key;
/* Chunk. */
key.addr = (void *)chunk;
malloc_mutex_lock(&huge_mtx);
- node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ node = extent_tree_ad_search(&huge, &key);
if (node != NULL)
ret = node->size;
else
ret = 0;
malloc_mutex_unlock(&huge_mtx);
return (ret);
}
}
@@ -4070,17 +4065,17 @@ isalloc(const void *ptr)
extent_node_t *node, key;
/* Chunk (huge allocation). */
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = __DECONST(void *, ptr);
- node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
ret = node->size;
malloc_mutex_unlock(&huge_mtx);
}
return (ret);
@@ -4114,17 +4109,17 @@ arena_dalloc_small(arena_t *arena, arena
if (run == bin->runcur)
bin->runcur = NULL;
else if (bin->nregs != 1) {
/*
* This block's conditional is necessary because if the
* run only contains one region, then it never gets
* inserted into the non-full runs tree.
*/
- RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+ arena_run_tree_remove(&bin->runs, run);
}
#ifdef MALLOC_DEBUG
run->magic = 0;
#endif
VALGRIND_FREELIKE_BLOCK(run, 0);
arena_run_dalloc(arena, run, true);
#ifdef MALLOC_STATS
bin->stats.curruns--;
@@ -4135,22 +4130,21 @@ arena_dalloc_small(arena_t *arena, arena
* non-full run, if one exists.
*/
if (bin->runcur == NULL)
bin->runcur = run;
else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
if (bin->runcur->nfree > 0) {
/* Insert runcur. */
- RB_INSERT(arena_run_tree_s, &bin->runs,
- bin->runcur);
+ arena_run_tree_insert(&bin->runs, bin->runcur);
}
bin->runcur = run;
} else
- RB_INSERT(arena_run_tree_s, &bin->runs, run);
+ arena_run_tree_insert(&bin->runs, run);
}
#ifdef MALLOC_STATS
arena->stats.allocated_small -= size;
arena->stats.ndalloc_small++;
#endif
}
static void
@@ -4164,18 +4158,17 @@ arena_dalloc_large(arena_t *arena, arena
if (opt_junk)
#endif
#endif
{
extent_node_t *node, key;
size_t size;
key.addr = ptr;
- node = RB_FIND(extent_tree_ad_s,
- &arena->runs_alloced_ad, &key);
+ node = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(node != NULL);
size = node->size;
#ifdef MALLOC_FILL
#ifdef MALLOC_STATS
if (opt_junk)
#endif
memset(ptr, 0x5a, size);
#endif
@@ -4244,17 +4237,17 @@ arena_ralloc_large_shrink(arena_t *arena
* allocations.
*/
key.addr = (void *)((uintptr_t)ptr);
#ifdef MALLOC_BALANCE
arena_lock_balance(arena);
#else
malloc_spin_lock(&arena->lock);
#endif
- node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+ node = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(node != NULL);
arena_run_trim_tail(arena, chunk, node, (arena_run_t *)ptr, oldsize,
size, true);
#ifdef MALLOC_STATS
arena->stats.allocated_large -= oldsize - size;
#endif
malloc_spin_unlock(&arena->lock);
}
@@ -4268,43 +4261,41 @@ arena_ralloc_large_grow(arena_t *arena,
/* Try to extend the run. */
assert(size > oldsize);
key.addr = (void *)((uintptr_t)ptr + oldsize);
#ifdef MALLOC_BALANCE
arena_lock_balance(arena);
#else
malloc_spin_lock(&arena->lock);
#endif
- nodeC = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+ nodeC = extent_tree_ad_search(&arena->runs_avail_ad, &key);
if (nodeC != NULL && oldsize + nodeC->size >= size) {
extent_node_t *nodeA, *nodeB;
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation. This results in a bit more tree manipulation
* than absolutely necessary, but it substantially simplifies
* the code.
*/
arena_run_split(arena, (arena_run_t *)nodeC->addr, size -
oldsize, false, false);
key.addr = ptr;
- nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
- &key);
+ nodeA = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(nodeA != NULL);
key.addr = (void *)((uintptr_t)ptr + oldsize);
- nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
- &key);
+ nodeB = extent_tree_ad_search(&arena->runs_alloced_ad, &key);
assert(nodeB != NULL);
nodeA->size += nodeB->size;
- RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+ extent_tree_ad_remove(&arena->runs_alloced_ad, nodeB);
arena_chunk_node_dealloc(chunk, nodeB);
#ifdef MALLOC_STATS
arena->stats.allocated_large += size - oldsize;
#endif
malloc_spin_unlock(&arena->lock);
return (false);
}
@@ -4465,68 +4456,68 @@ arena_new(arena_t *arena)
if (malloc_spin_init(&arena->lock))
return (true);
#ifdef MALLOC_STATS
memset(&arena->stats, 0, sizeof(arena_stats_t));
#endif
/* Initialize chunks. */
- RB_INIT(&arena->chunks);
+ arena_chunk_tree_all_new(&arena->chunks_all);
arena->spare = NULL;
arena->ndirty = 0;
- RB_INIT(&arena->runs_avail_szad);
- RB_INIT(&arena->runs_avail_ad);
- RB_INIT(&arena->runs_alloced_ad);
+ extent_tree_szad_new(&arena->runs_avail_szad);
+ extent_tree_ad_new(&arena->runs_avail_ad);
+ extent_tree_ad_new(&arena->runs_alloced_ad);
#ifdef MALLOC_BALANCE
arena->contention = 0;
#endif
/* Initialize bins. */
prev_run_size = pagesize;
/* (2^n)-spaced tiny bins. */
for (i = 0; i < ntbins; i++) {
bin = &arena->bins[i];
bin->runcur = NULL;
- RB_INIT(&bin->runs);
+ arena_run_tree_new(&bin->runs);
bin->reg_size = (1U << (TINY_MIN_2POW + i));
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
/* Quantum-spaced bins. */
for (; i < ntbins + nqbins; i++) {
bin = &arena->bins[i];
bin->runcur = NULL;
- RB_INIT(&bin->runs);
+ arena_run_tree_new(&bin->runs);
bin->reg_size = quantum * (i - ntbins + 1);
pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
}
/* (2^n)-spaced sub-page bins. */
for (; i < ntbins + nqbins + nsbins; i++) {
bin = &arena->bins[i];
bin->runcur = NULL;
- RB_INIT(&bin->runs);
+ arena_run_tree_new(&bin->runs);
bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
#ifdef MALLOC_STATS
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
#endif
@@ -4610,17 +4601,17 @@ huge_malloc(size_t size, bool zero)
#ifdef MALLOC_DECOMMIT
psize = PAGE_CEILING(size);
node->size = psize;
#else
node->size = csize;
#endif
malloc_mutex_lock(&huge_mtx);
- RB_INSERT(extent_tree_ad_s, &huge, node);
+ extent_tree_ad_insert(&huge, node);
#ifdef MALLOC_STATS
huge_nmalloc++;
# ifdef MALLOC_DECOMMIT
huge_allocated += psize;
# else
huge_allocated += csize;
# endif
#endif
@@ -4753,17 +4744,17 @@ huge_palloc(size_t alignment, size_t siz
#ifdef MALLOC_DECOMMIT
psize = PAGE_CEILING(size);
node->size = psize;
#else
node->size = chunk_size;
#endif
malloc_mutex_lock(&huge_mtx);
- RB_INSERT(extent_tree_ad_s, &huge, node);
+ extent_tree_ad_insert(&huge, node);
#ifdef MALLOC_STATS
huge_nmalloc++;
# ifdef MALLOC_DECOMMIT
huge_allocated += psize;
# else
huge_allocated += chunk_size;
# endif
#endif
@@ -4824,34 +4815,34 @@ huge_ralloc(void *ptr, size_t size, size
extent_node_t *node, key;
pages_decommit((void *)((uintptr_t)ptr + psize),
oldsize - psize);
/* Update recorded size. */
malloc_mutex_lock(&huge_mtx);
key.addr = __DECONST(void *, ptr);
- node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->size == oldsize);
# ifdef MALLOC_STATS
huge_allocated -= oldsize - psize;
# endif
node->size = psize;
malloc_mutex_unlock(&huge_mtx);
} else if (psize > oldsize) {
extent_node_t *node, key;
pages_commit((void *)((uintptr_t)ptr + oldsize),
psize - oldsize);
/* Update recorded size. */
malloc_mutex_lock(&huge_mtx);
key.addr = __DECONST(void *, ptr);
- node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->size == oldsize);
# ifdef MALLOC_STATS
huge_allocated += psize - oldsize;
# endif
node->size = psize;
malloc_mutex_unlock(&huge_mtx);
}
@@ -4889,20 +4880,20 @@ static void
huge_dalloc(void *ptr)
{
extent_node_t *node, key;
malloc_mutex_lock(&huge_mtx);
/* Extract from tree of huge allocations. */
key.addr = ptr;
- node = RB_FIND(extent_tree_ad_s, &huge, &key);
+ node = extent_tree_ad_search(&huge, &key);
assert(node != NULL);
assert(node->addr == ptr);
- RB_REMOVE(extent_tree_ad_s, &huge, node);
+ extent_tree_ad_remove(&huge, node);
#ifdef MALLOC_STATS
huge_ndalloc++;
huge_allocated -= node->size;
#endif
malloc_mutex_unlock(&huge_mtx);
@@ -5558,24 +5549,24 @@ MALLOC_OUT:
/* Various sanity checks that regard configuration. */
assert(quantum >= sizeof(void *));
assert(quantum <= pagesize);
assert(chunksize >= pagesize);
assert(quantum * 4 <= chunksize);
/* Initialize chunks data. */
malloc_mutex_init(&huge_mtx);
- RB_INIT(&huge);
+ extent_tree_ad_new(&huge);
#ifdef MALLOC_DSS
malloc_mutex_init(&dss_mtx);
dss_base = sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
- RB_INIT(&dss_chunks_szad);
- RB_INIT(&dss_chunks_ad);
+ extent_tree_szad_new(&dss_chunks_szad);
+ extent_tree_ad_new(&dss_chunks_ad);
#endif
#ifdef MALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
/* Initialize base allocation data structures. */
@@ -5695,17 +5686,17 @@ MALLOC_OUT:
TlsSetValue(tlsIndex, arenas[0]);
#else
arenas_map = arenas[0];
#endif
#endif
/*
* Seed here for the initial thread, since choose_arena_hard() is only
- * called for other threads. The seed values don't really matter.
+ * called for other threads. The seed value doesn't really matter.
*/
#ifdef MALLOC_BALANCE
SPRN(balance, 42);
#endif
malloc_spin_init(&arenas_lock);
malloc_initialized = true;
@@ -6118,25 +6109,27 @@ jemalloc_stats(jemalloc_stats_t *stats)
arena_t *arena = arenas[i];
if (arena != NULL) {
arena_chunk_t *chunk;
malloc_spin_lock(&arena->lock);
stats->allocated += arena->stats.allocated_small;
stats->allocated += arena->stats.allocated_large;
#ifdef MALLOC_DECOMMIT
- RB_FOREACH(chunk, arena_chunk_tree_s, &arena->chunks) {
+ rb_foreach_begin(arena_chunk_t, link_all,
+ &arena->chunks_all, chunk) {
size_t j;
for (j = 0; j < chunk_npages; j++) {
if ((chunk->map[j] &
CHUNK_MAP_DECOMMITTED) == 0)
stats->committed += pagesize;
}
- }
+ } rb_foreach_end(arena_chunk_t, link_all,
+ &arena->chunks_all, chunk)
#endif
stats->dirty += (arena->ndirty << pagesize_2pow);
malloc_spin_unlock(&arena->lock);
}
}
#ifndef MALLOC_DECOMMIT
stats->committed = stats->mapped;
new file mode 100644
--- /dev/null
+++ b/memory/jemalloc/rb.h
@@ -0,0 +1,982 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2008 Jason Evans <jasone@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice(s), this list of conditions and the following disclaimer
+ * unmodified other than the allowable addition of one or more
+ * copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice(s), this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ******************************************************************************
+ *
+ * cpp macro implementation of left-leaning red-black trees.
+ *
+ * Usage:
+ *
+ * (Optional.)
+ * #define SIZEOF_PTR ...
+ * #define SIZEOF_PTR_2POW ...
+ * #define RB_NO_C99_VARARRAYS
+ *
+ * (Optional, see assert(3).)
+ * #define NDEBUG
+ *
+ * (Required.)
+ * #include <assert.h>
+ * #include <rb.h>
+ * ...
+ *
+ * All operations are done non-recursively. Parent pointers are not used, and
+ * color bits are stored in the least significant bit of right-child pointers,
+ * thus making node linkage as compact as is possible for red-black trees.
+ *
+ * Some macros use a comparison function pointer, which is expected to have the
+ * following prototype:
+ *
+ * int (a_cmp *)(a_type *a_node, a_type *a_other);
+ * ^^^^^^
+ * or a_key
+ *
+ * Interpretation of comparision function return values:
+ *
+ * -1 : a_node < a_other
+ * 0 : a_node == a_other
+ * 1 : a_node > a_other
+ *
+ * In all cases, the a_node or a_key macro argument is the first argument to the
+ * comparison function, which makes it possible to write comparison functions
+ * that treat the first argument specially.
+ *
+ ******************************************************************************/
+
+#ifndef RB_H_
+#define RB_H_
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 178995 2008-05-14 18:33:13Z jasone $");
+#endif
+
+/* Node structure. */
+#define rb_node(a_type) \
+struct { \
+ a_type *rbn_left; \
+ a_type *rbn_right_red; \
+}
+
+/* Root structure. */
+#define rb_tree(a_type) \
+struct { \
+ a_type *rbt_root; \
+ a_type rbt_nil; \
+}
+
+/* Left accessors. */
+#define rbp_left_get(a_type, a_field, a_node) \
+ ((a_node)->a_field.rbn_left)
+#define rbp_left_set(a_type, a_field, a_node, a_left) do { \
+ (a_node)->a_field.rbn_left = a_left; \
+} while (0)
+
+/* Right accessors. */
+#define rbp_right_get(a_type, a_field, a_node) \
+ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
+ & ((ssize_t)-2)))
+#define rbp_right_set(a_type, a_field, a_node, a_right) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
+ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
+} while (0)
+
+/* Color accessors. */
+#define rbp_red_get(a_type, a_field, a_node) \
+ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
+ & ((size_t)1)))
+#define rbp_color_set(a_type, a_field, a_node, a_red) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
+ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
+ | ((ssize_t)a_red)); \
+} while (0)
+#define rbp_red_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
+ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \
+} while (0)
+#define rbp_black_set(a_type, a_field, a_node) do { \
+ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
+ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
+} while (0)
+
+/* Node initializer. */
+#define rbp_node_new(a_type, a_field, a_tree, a_node) do { \
+ rbp_left_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
+ rbp_right_set(a_type, a_field, (a_node), &(a_tree)->rbt_nil); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+} while (0)
+
+/* Tree initializer. */
+#define rb_new(a_type, a_field, a_tree) do { \
+ (a_tree)->rbt_root = &(a_tree)->rbt_nil; \
+ rbp_node_new(a_type, a_field, a_tree, &(a_tree)->rbt_nil); \
+ rbp_black_set(a_type, a_field, &(a_tree)->rbt_nil); \
+} while (0)
+
+/* Tree operations. */
+#define rbp_black_height(a_type, a_field, a_tree, r_height) do { \
+ a_type *rbp_bh_t; \
+ for (rbp_bh_t = (a_tree)->rbt_root, (r_height) = 0; \
+ rbp_bh_t != &(a_tree)->rbt_nil; \
+ rbp_bh_t = rbp_left_get(a_type, a_field, rbp_bh_t)) { \
+ if (rbp_red_get(a_type, a_field, rbp_bh_t) == false) { \
+ (r_height)++; \
+ } \
+ } \
+} while (0)
+
+#define rbp_first(a_type, a_field, a_tree, a_root, r_node) do { \
+ for ((r_node) = (a_root); \
+ rbp_left_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
+ (r_node) = rbp_left_get(a_type, a_field, (r_node))) { \
+ } \
+} while (0)
+
+#define rbp_last(a_type, a_field, a_tree, a_root, r_node) do { \
+ for ((r_node) = (a_root); \
+ rbp_right_get(a_type, a_field, (r_node)) != &(a_tree)->rbt_nil; \
+ (r_node) = rbp_right_get(a_type, a_field, (r_node))) { \
+ } \
+} while (0)
+
+#define rbp_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ if (rbp_right_get(a_type, a_field, (a_node)) \
+ != &(a_tree)->rbt_nil) { \
+ rbp_first(a_type, a_field, a_tree, rbp_right_get(a_type, \
+ a_field, (a_node)), (r_node)); \
+ } else { \
+ a_type *rbp_n_t = (a_tree)->rbt_root; \
+ assert(rbp_n_t != &(a_tree)->rbt_nil); \
+ (r_node) = &(a_tree)->rbt_nil; \
+ while (true) { \
+ int rbp_n_cmp = (a_cmp)((a_node), rbp_n_t); \
+ if (rbp_n_cmp < 0) { \
+ (r_node) = rbp_n_t; \
+ rbp_n_t = rbp_left_get(a_type, a_field, rbp_n_t); \
+ } else if (rbp_n_cmp > 0) { \
+ rbp_n_t = rbp_right_get(a_type, a_field, rbp_n_t); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_n_t != &(a_tree)->rbt_nil); \
+ } \
+ } \
+} while (0)
+
+#define rbp_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ if (rbp_left_get(a_type, a_field, (a_node)) != &(a_tree)->rbt_nil) {\
+ rbp_last(a_type, a_field, a_tree, rbp_left_get(a_type, \
+ a_field, (a_node)), (r_node)); \
+ } else { \
+ a_type *rbp_p_t = (a_tree)->rbt_root; \
+ assert(rbp_p_t != &(a_tree)->rbt_nil); \
+ (r_node) = &(a_tree)->rbt_nil; \
+ while (true) { \
+ int rbp_p_cmp = (a_cmp)((a_node), rbp_p_t); \
+ if (rbp_p_cmp < 0) { \
+ rbp_p_t = rbp_left_get(a_type, a_field, rbp_p_t); \
+ } else if (rbp_p_cmp > 0) { \
+ (r_node) = rbp_p_t; \
+ rbp_p_t = rbp_right_get(a_type, a_field, rbp_p_t); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_p_t != &(a_tree)->rbt_nil); \
+ } \
+ } \
+} while (0)
+
+#define rb_first(a_type, a_field, a_tree, r_node) do { \
+ rbp_first(a_type, a_field, a_tree, (a_tree)->rbt_root, (r_node)); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_last(a_type, a_field, a_tree, r_node) do { \
+ rbp_last(a_type, a_field, a_tree, (a_tree)->rbt_root, r_node); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_next(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ rbp_next(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_prev(a_type, a_field, a_cmp, a_tree, a_node, r_node) do { \
+ rbp_prev(a_type, a_field, a_cmp, a_tree, (a_node), (r_node)); \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+#define rb_search(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+ int rbp_se_cmp; \
+ (r_node) = (a_tree)->rbt_root; \
+ while ((r_node) != &(a_tree)->rbt_nil \
+ && (rbp_se_cmp = (a_cmp)((a_key), (r_node))) != 0) { \
+ if (rbp_se_cmp < 0) { \
+ (r_node) = rbp_left_get(a_type, a_field, (r_node)); \
+ } else { \
+ (r_node) = rbp_right_get(a_type, a_field, (r_node)); \
+ } \
+ } \
+ if ((r_node) == &(a_tree)->rbt_nil) { \
+ (r_node) = NULL; \
+ } \
+} while (0)
+
+/*
+ * Find a match if it exists. Otherwise, find the next greater node, if one
+ * exists.
+ */
+#define rb_nsearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+ a_type *rbp_ns_t = (a_tree)->rbt_root; \
+ (r_node) = NULL; \
+ while (rbp_ns_t != &(a_tree)->rbt_nil) { \
+ int rbp_ns_cmp = (a_cmp)((a_key), rbp_ns_t); \
+ if (rbp_ns_cmp < 0) { \
+ (r_node) = rbp_ns_t; \
+ rbp_ns_t = rbp_left_get(a_type, a_field, rbp_ns_t); \
+ } else if (rbp_ns_cmp > 0) { \
+ rbp_ns_t = rbp_right_get(a_type, a_field, rbp_ns_t); \
+ } else { \
+ (r_node) = rbp_ns_t; \
+ break; \
+ } \
+ } \
+} while (0)
+
+/*
+ * Find a match if it exists. Otherwise, find the previous lesser node, if one
+ * exists.
+ */
+#define rb_psearch(a_type, a_field, a_cmp, a_tree, a_key, r_node) do { \
+ a_type *rbp_ps_t = (a_tree)->rbt_root; \
+ (r_node) = NULL; \
+ while (rbp_ps_t != &(a_tree)->rbt_nil) { \
+ int rbp_ps_cmp = (a_cmp)((a_key), rbp_ps_t); \
+ if (rbp_ps_cmp < 0) { \
+ rbp_ps_t = rbp_left_get(a_type, a_field, rbp_ps_t); \
+ } else if (rbp_ps_cmp > 0) { \
+ (r_node) = rbp_ps_t; \
+ rbp_ps_t = rbp_right_get(a_type, a_field, rbp_ps_t); \
+ } else { \
+ (r_node) = rbp_ps_t; \
+ break; \
+ } \
+ } \
+} while (0)
+
+#define rbp_rotate_left(a_type, a_field, a_node, r_node) do { \
+ (r_node) = rbp_right_get(a_type, a_field, (a_node)); \
+ rbp_right_set(a_type, a_field, (a_node), \
+ rbp_left_get(a_type, a_field, (r_node))); \
+ rbp_left_set(a_type, a_field, (r_node), (a_node)); \
+} while (0)
+
+#define rbp_rotate_right(a_type, a_field, a_node, r_node) do { \
+ (r_node) = rbp_left_get(a_type, a_field, (a_node)); \
+ rbp_left_set(a_type, a_field, (a_node), \
+ rbp_right_get(a_type, a_field, (r_node))); \
+ rbp_right_set(a_type, a_field, (r_node), (a_node)); \
+} while (0)
+
+#define rbp_lean_left(a_type, a_field, a_node, r_node) do { \
+ bool rbp_ll_red; \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ rbp_ll_red = rbp_red_get(a_type, a_field, (a_node)); \
+ rbp_color_set(a_type, a_field, (r_node), rbp_ll_red); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+} while (0)
+
+#define rbp_lean_right(a_type, a_field, a_node, r_node) do { \
+ bool rbp_lr_red; \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_lr_red = rbp_red_get(a_type, a_field, (a_node)); \
+ rbp_color_set(a_type, a_field, (r_node), rbp_lr_red); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+} while (0)
+
+#define rbp_move_red_left(a_type, a_field, a_node, r_node) do { \
+ a_type *rbp_mrl_t, *rbp_mrl_u; \
+ rbp_mrl_t = rbp_left_get(a_type, a_field, (a_node)); \
+ rbp_red_set(a_type, a_field, rbp_mrl_t); \
+ rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
+ rbp_mrl_u = rbp_left_get(a_type, a_field, rbp_mrl_t); \
+ if (rbp_red_get(a_type, a_field, rbp_mrl_u)) { \
+ rbp_rotate_right(a_type, a_field, rbp_mrl_t, rbp_mrl_u); \
+ rbp_right_set(a_type, a_field, (a_node), rbp_mrl_u); \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ rbp_mrl_t = rbp_right_get(a_type, a_field, (a_node)); \
+ if (rbp_red_get(a_type, a_field, rbp_mrl_t)) { \
+ rbp_black_set(a_type, a_field, rbp_mrl_t); \
+ rbp_red_set(a_type, a_field, (a_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrl_t); \
+ rbp_left_set(a_type, a_field, (r_node), rbp_mrl_t); \
+ } else { \
+ rbp_black_set(a_type, a_field, (a_node)); \
+ } \
+ } else { \
+ rbp_red_set(a_type, a_field, (a_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ } \
+} while (0)
+
+#define rbp_move_red_right(a_type, a_field, a_node, r_node) do { \
+ a_type *rbp_mrr_t; \
+ rbp_mrr_t = rbp_left_get(a_type, a_field, (a_node)); \
+ if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
+ a_type *rbp_mrr_u, *rbp_mrr_v; \
+ rbp_mrr_u = rbp_right_get(a_type, a_field, rbp_mrr_t); \
+ rbp_mrr_v = rbp_left_get(a_type, a_field, rbp_mrr_u); \
+ if (rbp_red_get(a_type, a_field, rbp_mrr_v)) { \
+ rbp_color_set(a_type, a_field, rbp_mrr_u, \
+ rbp_red_get(a_type, a_field, (a_node))); \
+ rbp_black_set(a_type, a_field, rbp_mrr_v); \
+ rbp_rotate_left(a_type, a_field, rbp_mrr_t, rbp_mrr_u); \
+ rbp_left_set(a_type, a_field, (a_node), rbp_mrr_u); \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
+ rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
+ } else { \
+ rbp_color_set(a_type, a_field, rbp_mrr_t, \
+ rbp_red_get(a_type, a_field, (a_node))); \
+ rbp_red_set(a_type, a_field, rbp_mrr_u); \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
+ rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
+ } \
+ rbp_red_set(a_type, a_field, (a_node)); \
+ } else { \
+ rbp_red_set(a_type, a_field, rbp_mrr_t); \
+ rbp_mrr_t = rbp_left_get(a_type, a_field, rbp_mrr_t); \
+ if (rbp_red_get(a_type, a_field, rbp_mrr_t)) { \
+ rbp_black_set(a_type, a_field, rbp_mrr_t); \
+ rbp_rotate_right(a_type, a_field, (a_node), (r_node)); \
+ rbp_rotate_left(a_type, a_field, (a_node), rbp_mrr_t); \
+ rbp_right_set(a_type, a_field, (r_node), rbp_mrr_t); \
+ } else { \
+ rbp_rotate_left(a_type, a_field, (a_node), (r_node)); \
+ } \
+ } \
+} while (0)
+
+#define rb_insert(a_type, a_field, a_cmp, a_tree, a_node) do { \
+ a_type rbp_i_s; \
+ a_type *rbp_i_g, *rbp_i_p, *rbp_i_c, *rbp_i_t, *rbp_i_u; \
+ int rbp_i_cmp = 0; \
+ rbp_i_g = &(a_tree)->rbt_nil; \
+ rbp_left_set(a_type, a_field, &rbp_i_s, (a_tree)->rbt_root); \
+ rbp_right_set(a_type, a_field, &rbp_i_s, &(a_tree)->rbt_nil); \
+ rbp_black_set(a_type, a_field, &rbp_i_s); \
+ rbp_i_p = &rbp_i_s; \
+ rbp_i_c = (a_tree)->rbt_root; \
+ /* Iteratively search down the tree for the insertion point, */\
+ /* splitting 4-nodes as they are encountered. At the end of each */\
+ /* iteration, rbp_i_g->rbp_i_p->rbp_i_c is a 3-level path down */\
+ /* the tree, assuming a sufficiently deep tree. */\
+ while (rbp_i_c != &(a_tree)->rbt_nil) { \
+ rbp_i_t = rbp_left_get(a_type, a_field, rbp_i_c); \
+ rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
+ if (rbp_red_get(a_type, a_field, rbp_i_t) \
+ && rbp_red_get(a_type, a_field, rbp_i_u)) { \
+ /* rbp_i_c is the top of a logical 4-node, so split it. */\
+ /* This iteration does not move down the tree, due to the */\
+ /* disruptiveness of node splitting. */\
+ /* */\
+ /* Rotate right. */\
+ rbp_rotate_right(a_type, a_field, rbp_i_c, rbp_i_t); \
+ /* Pass red links up one level. */\
+ rbp_i_u = rbp_left_get(a_type, a_field, rbp_i_t); \
+ rbp_black_set(a_type, a_field, rbp_i_u); \
+ if (rbp_left_get(a_type, a_field, rbp_i_p) == rbp_i_c) { \
+ rbp_left_set(a_type, a_field, rbp_i_p, rbp_i_t); \
+ rbp_i_c = rbp_i_t; \
+ } else { \
+ /* rbp_i_c was the right child of rbp_i_p, so rotate */\
+ /* left in order to maintain the left-leaning */\
+ /* invariant. */\
+ assert(rbp_right_get(a_type, a_field, rbp_i_p) \
+ == rbp_i_c); \
+ rbp_right_set(a_type, a_field, rbp_i_p, rbp_i_t); \
+ rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_u); \
+ if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
+ rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_u); \
+ } else { \
+ assert(rbp_right_get(a_type, a_field, rbp_i_g) \
+ == rbp_i_p); \
+ rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_u); \
+ } \
+ rbp_i_p = rbp_i_u; \
+ rbp_i_cmp = (a_cmp)((a_node), rbp_i_p); \
+ if (rbp_i_cmp < 0) { \
+ rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_p); \
+ } else { \
+ assert(rbp_i_cmp > 0); \
+ rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_p); \
+ } \
+ continue; \
+ } \
+ } \
+ rbp_i_g = rbp_i_p; \
+ rbp_i_p = rbp_i_c; \
+ rbp_i_cmp = (a_cmp)((a_node), rbp_i_c); \
+ if (rbp_i_cmp < 0) { \
+ rbp_i_c = rbp_left_get(a_type, a_field, rbp_i_c); \
+ } else { \
+ assert(rbp_i_cmp > 0); \
+ rbp_i_c = rbp_right_get(a_type, a_field, rbp_i_c); \
+ } \
+ } \
+ /* rbp_i_p now refers to the node under which to insert. */\
+ rbp_node_new(a_type, a_field, a_tree, (a_node)); \
+ if (rbp_i_cmp > 0) { \
+ rbp_right_set(a_type, a_field, rbp_i_p, (a_node)); \
+ rbp_lean_left(a_type, a_field, rbp_i_p, rbp_i_t); \
+ if (rbp_left_get(a_type, a_field, rbp_i_g) == rbp_i_p) { \
+ rbp_left_set(a_type, a_field, rbp_i_g, rbp_i_t); \
+ } else if (rbp_right_get(a_type, a_field, rbp_i_g) == rbp_i_p) {\
+ rbp_right_set(a_type, a_field, rbp_i_g, rbp_i_t); \
+ } \
+ } else { \
+ rbp_left_set(a_type, a_field, rbp_i_p, (a_node)); \
+ } \
+ /* Update the root and make sure that it is black. */\
+ (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_i_s); \
+ rbp_black_set(a_type, a_field, (a_tree)->rbt_root); \
+} while (0)
+
+#define rb_remove(a_type, a_field, a_cmp, a_tree, a_node) do { \
+ a_type rbp_r_s; \
+ a_type *rbp_r_p, *rbp_r_c, *rbp_r_xp, *rbp_r_t, *rbp_r_u; \
+ int rbp_r_cmp; \
+ rbp_left_set(a_type, a_field, &rbp_r_s, (a_tree)->rbt_root); \
+ rbp_right_set(a_type, a_field, &rbp_r_s, &(a_tree)->rbt_nil); \
+ rbp_black_set(a_type, a_field, &rbp_r_s); \
+ rbp_r_p = &rbp_r_s; \
+ rbp_r_c = (a_tree)->rbt_root; \
+ rbp_r_xp = &(a_tree)->rbt_nil; \
+ /* Iterate down the tree, but always transform 2-nodes to 3- or */\
+ /* 4-nodes in order to maintain the invariant that the current */\
+ /* node is not a 2-node. This allows simple deletion once a leaf */\
+ /* is reached. Handle the root specially though, since there may */\
+ /* be no way to convert it from a 2-node to a 3-node. */\
+ rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
+ if (rbp_r_cmp < 0) { \
+ rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
+ && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
+ /* Apply standard transform to prepare for left move. */\
+ rbp_move_red_left(a_type, a_field, rbp_r_c, rbp_r_t); \
+ rbp_black_set(a_type, a_field, rbp_r_t); \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ /* Move left. */\
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
+ } \
+ } else { \
+ if (rbp_r_cmp == 0) { \
+ assert((a_node) == rbp_r_c); \
+ if (rbp_right_get(a_type, a_field, rbp_r_c) \
+ == &(a_tree)->rbt_nil) { \
+ /* Delete root node (which is also a leaf node). */\
+ if (rbp_left_get(a_type, a_field, rbp_r_c) \
+ != &(a_tree)->rbt_nil) { \
+ rbp_lean_right(a_type, a_field, rbp_r_c, rbp_r_t); \
+ rbp_right_set(a_type, a_field, rbp_r_t, \
+ &(a_tree)->rbt_nil); \
+ } else { \
+ rbp_r_t = &(a_tree)->rbt_nil; \
+ } \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
+ } else { \
+ /* This is the node we want to delete, but we will */\
+ /* instead swap it with its successor and delete the */\
+ /* successor. Record enough information to do the */\
+ /* swap later. rbp_r_xp is the a_node's parent. */\
+ rbp_r_xp = rbp_r_p; \
+ rbp_r_cmp = 1; /* Note that deletion is incomplete. */\
+ } \
+ } \
+ if (rbp_r_cmp == 1) { \
+ if (rbp_red_get(a_type, a_field, rbp_left_get(a_type, \
+ a_field, rbp_right_get(a_type, a_field, rbp_r_c))) \
+ == false) { \
+ rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
+ if (rbp_red_get(a_type, a_field, rbp_r_t)) { \
+ /* Standard transform. */\
+ rbp_move_red_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ } else { \
+ /* Root-specific transform. */\
+ rbp_red_set(a_type, a_field, rbp_r_c); \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_u)) { \
+ rbp_black_set(a_type, a_field, rbp_r_u); \
+ rbp_rotate_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ rbp_rotate_left(a_type, a_field, rbp_r_c, \
+ rbp_r_u); \
+ rbp_right_set(a_type, a_field, rbp_r_t, \
+ rbp_r_u); \
+ } else { \
+ rbp_red_set(a_type, a_field, rbp_r_t); \
+ rbp_rotate_left(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ } \
+ } \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t); \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ /* Move right. */\
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
+ } \
+ } \
+ } \
+ if (rbp_r_cmp != 0) { \
+ while (true) { \
+ assert(rbp_r_p != &(a_tree)->rbt_nil); \
+ rbp_r_cmp = (a_cmp)((a_node), rbp_r_c); \
+ if (rbp_r_cmp < 0) { \
+ rbp_r_t = rbp_left_get(a_type, a_field, rbp_r_c); \
+ if (rbp_r_t == &(a_tree)->rbt_nil) { \
+ /* rbp_r_c now refers to the successor node to */\
+ /* relocate, and rbp_r_xp/a_node refer to the */\
+ /* context for the relocation. */\
+ if (rbp_left_get(a_type, a_field, rbp_r_xp) \
+ == (a_node)) { \
+ rbp_left_set(a_type, a_field, rbp_r_xp, \
+ rbp_r_c); \
+ } else { \
+ assert(rbp_right_get(a_type, a_field, \
+ rbp_r_xp) == (a_node)); \
+ rbp_right_set(a_type, a_field, rbp_r_xp, \
+ rbp_r_c); \
+ } \
+ rbp_left_set(a_type, a_field, rbp_r_c, \
+ rbp_left_get(a_type, a_field, (a_node))); \
+ rbp_right_set(a_type, a_field, rbp_r_c, \
+ rbp_right_get(a_type, a_field, (a_node))); \
+ rbp_color_set(a_type, a_field, rbp_r_c, \
+ rbp_red_get(a_type, a_field, (a_node))); \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, \
+ &(a_tree)->rbt_nil); \
+ } else { \
+ assert(rbp_right_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c); \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ &(a_tree)->rbt_nil); \
+ } \
+ break; \
+ } \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_t) == false \
+ && rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
+ rbp_move_red_left(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
+ } else { \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_left_get(a_type, a_field, rbp_r_c); \
+ } \
+ } else { \
+ /* Check whether to delete this node (it has to be */\
+ /* the correct node and a leaf node). */\
+ if (rbp_r_cmp == 0) { \
+ assert((a_node) == rbp_r_c); \
+ if (rbp_right_get(a_type, a_field, rbp_r_c) \
+ == &(a_tree)->rbt_nil) { \
+ /* Delete leaf node. */\
+ if (rbp_left_get(a_type, a_field, rbp_r_c) \
+ != &(a_tree)->rbt_nil) { \
+ rbp_lean_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ rbp_right_set(a_type, a_field, rbp_r_t, \
+ &(a_tree)->rbt_nil); \
+ } else { \
+ rbp_r_t = &(a_tree)->rbt_nil; \
+ } \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } else { \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } \
+ break; \
+ } else { \
+ /* This is the node we want to delete, but we */\
+ /* will instead swap it with its successor */\
+ /* and delete the successor. Record enough */\
+ /* information to do the swap later. */\
+ /* rbp_r_xp is a_node's parent. */\
+ rbp_r_xp = rbp_r_p; \
+ } \
+ } \
+ rbp_r_t = rbp_right_get(a_type, a_field, rbp_r_c); \
+ rbp_r_u = rbp_left_get(a_type, a_field, rbp_r_t); \
+ if (rbp_red_get(a_type, a_field, rbp_r_u) == false) { \
+ rbp_move_red_right(a_type, a_field, rbp_r_c, \
+ rbp_r_t); \
+ if (rbp_left_get(a_type, a_field, rbp_r_p) \
+ == rbp_r_c) { \
+ rbp_left_set(a_type, a_field, rbp_r_p, rbp_r_t);\
+ } else { \
+ rbp_right_set(a_type, a_field, rbp_r_p, \
+ rbp_r_t); \
+ } \
+ rbp_r_c = rbp_r_t; \
+ } else { \
+ rbp_r_p = rbp_r_c; \
+ rbp_r_c = rbp_right_get(a_type, a_field, rbp_r_c); \
+ } \
+ } \
+ } \
+ } \
+ /* Update root. */\
+ (a_tree)->rbt_root = rbp_left_get(a_type, a_field, &rbp_r_s); \
+} while (0)
+
+/*
+ * The rb_wrap() macro provides a convenient way to wrap functions around the
+ * cpp macros. The main benefits of wrapping are that 1) repeated macro
+ * expansion can cause code bloat, especially for rb_{insert,remove)(), and
+ * 2) type, linkage, comparison functions, etc. need not be specified at every
+ * call point.
+ */
+
+#define rb_wrap(a_attr, a_prefix, a_tree_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_tree_type *tree) { \
+ rb_new(a_type, a_field, tree); \
+} \
+a_attr a_type * \
+a_prefix##first(a_tree_type *tree) { \
+ a_type *ret; \
+ rb_first(a_type, a_field, tree, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##last(a_tree_type *tree) { \
+ a_type *ret; \
+ rb_last(a_type, a_field, tree, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##next(a_tree_type *tree, a_type *node) { \
+ a_type *ret; \
+ rb_next(a_type, a_field, a_cmp, tree, node, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##prev(a_tree_type *tree, a_type *node) { \
+ a_type *ret; \
+ rb_prev(a_type, a_field, a_cmp, tree, node, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##search(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_search(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##nsearch(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_nsearch(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr a_type * \
+a_prefix##psearch(a_tree_type *tree, a_type *key) { \
+ a_type *ret; \
+ rb_psearch(a_type, a_field, a_cmp, tree, key, ret); \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##insert(a_tree_type *tree, a_type *node) { \
+ rb_insert(a_type, a_field, a_cmp, tree, node); \
+} \
+a_attr void \
+a_prefix##remove(a_tree_type *tree, a_type *node) { \
+ rb_remove(a_type, a_field, a_cmp, tree, node); \
+}
+
+/*
+ * The iterators simulate recursion via an array of pointers that store the
+ * current path. This is critical to performance, since a series of calls to
+ * rb_{next,prev}() would require time proportional to (n lg n), whereas this
+ * implementation only requires time proportional to (n).
+ *
+ * Since the iterators cache a path down the tree, any tree modification may
+ * cause the cached path to become invalid. In order to continue iteration,
+ * use something like the following sequence:
+ *
+ * {
+ * a_type *node, *tnode;
+ *
+ * rb_foreach_begin(a_type, a_field, a_tree, node) {
+ * ...
+ * rb_next(a_type, a_field, a_cmp, a_tree, node, tnode);
+ * rb_remove(a_type, a_field, a_cmp, a_tree, node);
+ * rb_foreach_next(a_type, a_field, a_cmp, a_tree, tnode);
+ * ...
+ * } rb_foreach_end(a_type, a_field, a_tree, node)
+ * }
+ *
+ * Note that this idiom is not advised if every iteration modifies the tree,
+ * since in that case there is no algorithmic complexity improvement over a
+ * series of rb_{next,prev}() calls, thus making the setup overhead wasted
+ * effort.
+ */
+
+#ifdef RB_NO_C99_VARARRAYS
+ /*
+ * Avoid using variable-length arrays, at the cost of using more stack space.
+ * Size the path arrays such that they are always large enough, even if a
+ * tree consumes all of memory. Since each node must contain a minimum of
+ * two pointers, there can never be more nodes than:
+ *
+ * 1 << ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1))
+ *
+ * Since the depth of a tree is limited to 3*lg(#nodes), the maximum depth
+ * is:
+ *
+ * (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+ *
+ * This works out to a maximum depth of 87 and 180 for 32- and 64-bit
+ * systems, respectively (approximatly 348 and 1440 bytes, respectively).
+ */
+# define rbp_compute_f_height(a_type, a_field, a_tree)
+# define rbp_f_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+# define rbp_compute_fr_height(a_type, a_field, a_tree)
+# define rbp_fr_height (3 * ((SIZEOF_PTR<<3) - (SIZEOF_PTR_2POW+1)))
+#else
+# define rbp_compute_f_height(a_type, a_field, a_tree) \
+ /* Compute the maximum possible tree depth (3X the black height). */\
+ unsigned rbp_f_height; \
+ rbp_black_height(a_type, a_field, a_tree, rbp_f_height); \
+ rbp_f_height *= 3;
+# define rbp_compute_fr_height(a_type, a_field, a_tree) \
+ /* Compute the maximum possible tree depth (3X the black height). */\
+ unsigned rbp_fr_height; \
+ rbp_black_height(a_type, a_field, a_tree, rbp_fr_height); \
+ rbp_fr_height *= 3;
+#endif
+
+#define rb_foreach_begin(a_type, a_field, a_tree, a_var) { \
+ rbp_compute_f_height(a_type, a_field, a_tree) \
+ { \
+ /* Initialize the path to contain the left spine. */\
+ a_type *rbp_f_path[rbp_f_height]; \
+ a_type *rbp_f_node; \
+ bool rbp_f_synced = false; \
+ unsigned rbp_f_depth = 0; \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
+ rbp_f_depth++; \
+ while ((rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } \
+ /* While the path is non-empty, iterate. */\
+ while (rbp_f_depth > 0) { \
+ (a_var) = rbp_f_path[rbp_f_depth-1];
+
+/* Only use if modifying the tree during iteration. */
+#define rb_foreach_next(a_type, a_field, a_cmp, a_tree, a_node) \
+ /* Re-initialize the path to contain the path to a_node. */\
+ rbp_f_depth = 0; \
+ if (a_node != NULL) { \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = (a_tree)->rbt_root; \
+ rbp_f_depth++; \
+ rbp_f_node = rbp_f_path[0]; \
+ while (true) { \
+ int rbp_f_cmp = (a_cmp)((a_node), \
+ rbp_f_path[rbp_f_depth-1]); \
+ if (rbp_f_cmp < 0) { \
+ rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]); \
+ } else if (rbp_f_cmp > 0) { \
+ rbp_f_node = rbp_right_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_f_node != &(a_tree)->rbt_nil); \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } \
+ } \
+ rbp_f_synced = true;
+
+#define rb_foreach_end(a_type, a_field, a_tree, a_var) \
+ if (rbp_f_synced) { \
+ rbp_f_synced = false; \
+ continue; \
+ } \
+ /* Find the successor. */\
+ if ((rbp_f_node = rbp_right_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
+ /* The successor is the left-most node in the right */\
+ /* subtree. */\
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ while ((rbp_f_node = rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1])) != &(a_tree)->rbt_nil) { \
+ rbp_f_path[rbp_f_depth] = rbp_f_node; \
+ rbp_f_depth++; \
+ } \
+ } else { \
+ /* The successor is above the current node. Unwind */\
+ /* until a left-leaning edge is removed from the */\
+ /* path, or the path is empty. */\
+ for (rbp_f_depth--; rbp_f_depth > 0; rbp_f_depth--) { \
+ if (rbp_left_get(a_type, a_field, \
+ rbp_f_path[rbp_f_depth-1]) \
+ == rbp_f_path[rbp_f_depth]) { \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ } \
+}
+
+#define rb_foreach_reverse_begin(a_type, a_field, a_tree, a_var) { \
+ rbp_compute_fr_height(a_type, a_field, a_tree) \
+ { \
+ /* Initialize the path to contain the right spine. */\
+ a_type *rbp_fr_path[rbp_fr_height]; \
+ a_type *rbp_fr_node; \
+ bool rbp_fr_synced = false; \
+ unsigned rbp_fr_depth = 0; \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
+ rbp_fr_depth++; \
+ while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } \
+ /* While the path is non-empty, iterate. */\
+ while (rbp_fr_depth > 0) { \
+ (a_var) = rbp_fr_path[rbp_fr_depth-1];
+
+/* Only use if modifying the tree during iteration. */
+#define rb_foreach_reverse_prev(a_type, a_field, a_cmp, a_tree, a_node) \
+ /* Re-initialize the path to contain the path to a_node. */\
+ rbp_fr_depth = 0; \
+ if (a_node != NULL) { \
+ if ((a_tree)->rbt_root != &(a_tree)->rbt_nil) { \
+ rbp_fr_path[rbp_fr_depth] = (a_tree)->rbt_root; \
+ rbp_fr_depth++; \
+ rbp_fr_node = rbp_fr_path[0]; \
+ while (true) { \
+ int rbp_fr_cmp = (a_cmp)((a_node), \
+ rbp_fr_path[rbp_fr_depth-1]); \
+ if (rbp_fr_cmp < 0) { \
+ rbp_fr_node = rbp_left_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1]); \
+ } else if (rbp_fr_cmp > 0) { \
+ rbp_fr_node = rbp_right_get(a_type, a_field,\
+ rbp_fr_path[rbp_fr_depth-1]); \
+ } else { \
+ break; \
+ } \
+ assert(rbp_fr_node != &(a_tree)->rbt_nil); \
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } \
+ } \
+ rbp_fr_synced = true;
+
+#define rb_foreach_reverse_end(a_type, a_field, a_tree, a_var) \
+ if (rbp_fr_synced) { \
+ rbp_fr_synced = false; \
+ continue; \
+ } \
+ if (rbp_fr_depth == 0) { \
+ /* rb_foreach_reverse_sync() was called with a NULL */\
+ /* a_node. */\
+ break; \
+ } \
+ /* Find the predecessor. */\
+ if ((rbp_fr_node = rbp_left_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) { \
+ /* The predecessor is the right-most node in the left */\
+ /* subtree. */\
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ while ((rbp_fr_node = rbp_right_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1])) != &(a_tree)->rbt_nil) {\
+ rbp_fr_path[rbp_fr_depth] = rbp_fr_node; \
+ rbp_fr_depth++; \
+ } \
+ } else { \
+ /* The predecessor is above the current node. Unwind */\
+ /* until a right-leaning edge is removed from the */\
+ /* path, or the path is empty. */\
+ for (rbp_fr_depth--; rbp_fr_depth > 0; rbp_fr_depth--) {\
+ if (rbp_right_get(a_type, a_field, \
+ rbp_fr_path[rbp_fr_depth-1]) \
+ == rbp_fr_path[rbp_fr_depth]) { \
+ break; \
+ } \
+ } \
+ } \
+ } \
+ } \
+}
+
+#endif /* RB_H_ */
deleted file mode 100644
--- a/memory/jemalloc/tree.h
+++ /dev/null
@@ -1,743 +0,0 @@
-/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
-/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
-/* $FreeBSD: src/sys/sys/tree.h,v 1.7 2007/12/28 07:03:26 jasone Exp $ */
-
-/*-
- * Copyright 2002 Niels Provos <provos@citi.umich.edu>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SYS_TREE_H_
-#define _SYS_TREE_H_
-
-/*
- * This file defines data structures for different types of trees:
- * splay trees and red-black trees.
- *
- * A splay tree is a self-organizing data structure. Every operation
- * on the tree causes a splay to happen. The splay moves the requested
- * node to the root of the tree and partly rebalances it.
- *
- * This has the benefit that request locality causes faster lookups as
- * the requested nodes move to the top of the tree. On the other hand,
- * every lookup causes memory writes.
- *
- * The Balance Theorem bounds the total access time for m operations
- * and n inserts on an initially empty tree as O((m + n)lg n). The
- * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
- *
- * A red-black tree is a binary search tree with the node color as an
- * extra attribute. It fulfills a set of conditions:
- * - every search path from the root to a leaf consists of the
- * same number of black nodes,
- * - each red node (except for the root) has a black parent,
- * - each leaf node is black.
- *
- * Every operation on a red-black tree is bounded as O(lg n).
- * The maximum height of a red-black tree is 2lg (n+1).
- */
-
-#define SPLAY_HEAD(name, type) \
-struct name { \
- struct type *sph_root; /* root of the tree */ \
-}
-
-#define SPLAY_INITIALIZER(root) \
- { NULL }
-
-#define SPLAY_INIT(root) do { \
- (root)->sph_root = NULL; \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_ENTRY(type) \
-struct { \
- struct type *spe_left; /* left element */ \
- struct type *spe_right; /* right element */ \
-}
-
-#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
-#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
-#define SPLAY_ROOT(head) (head)->sph_root
-#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
-
-/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
-#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
- SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
- SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
- (head)->sph_root = tmp; \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
- SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
- SPLAY_LEFT(tmp, field) = (head)->sph_root; \
- (head)->sph_root = tmp; \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_LINKLEFT(head, tmp, field) do { \
- SPLAY_LEFT(tmp, field) = (head)->sph_root; \
- tmp = (head)->sph_root; \
- (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_LINKRIGHT(head, tmp, field) do { \
- SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
- tmp = (head)->sph_root; \
- (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
-} while (/*CONSTCOND*/ 0)
-
-#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
- SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
- SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
- SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
- SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
-} while (/*CONSTCOND*/ 0)
-
-/* Generates prototypes and inline functions */
-
-#define SPLAY_PROTOTYPE(name, type, field, cmp) \
-void name##_SPLAY(struct name *, struct type *); \
-void name##_SPLAY_MINMAX(struct name *, int); \
-struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
-struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
- \
-/* Finds the node with the same key as elm */ \
-static __inline struct type * \
-name##_SPLAY_FIND(struct name *head, struct type *elm) \
-{ \
- if (SPLAY_EMPTY(head)) \
- return(NULL); \
- name##_SPLAY(head, elm); \
- if ((cmp)(elm, (head)->sph_root) == 0) \
- return (head->sph_root); \
- return (NULL); \
-} \
- \
-static __inline struct type * \
-name##_SPLAY_NEXT(struct name *head, struct type *elm) \
-{ \
- name##_SPLAY(head, elm); \
- if (SPLAY_RIGHT(elm, field) != NULL) { \
- elm = SPLAY_RIGHT(elm, field); \
- while (SPLAY_LEFT(elm, field) != NULL) { \
- elm = SPLAY_LEFT(elm, field); \
- } \
- } else \
- elm = NULL; \
- return (elm); \
-} \
- \
-static __inline struct type * \
-name##_SPLAY_MIN_MAX(struct name *head, int val) \
-{ \
- name##_SPLAY_MINMAX(head, val); \
- return (SPLAY_ROOT(head)); \
-}
-
-/* Main splay operation.
- * Moves node close to the key of elm to top
- */
-#define SPLAY_GENERATE(name, type, field, cmp) \
-struct type * \
-name##_SPLAY_INSERT(struct name *head, struct type *elm) \
-{ \
- if (SPLAY_EMPTY(head)) { \
- SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
- } else { \
- int __comp; \
- name##_SPLAY(head, elm); \
- __comp = (cmp)(elm, (head)->sph_root); \
- if(__comp < 0) { \
- SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
- SPLAY_RIGHT(elm, field) = (head)->sph_root; \
- SPLAY_LEFT((head)->sph_root, field) = NULL; \
- } else if (__comp > 0) { \
- SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
- SPLAY_LEFT(elm, field) = (head)->sph_root; \
- SPLAY_RIGHT((head)->sph_root, field) = NULL; \
- } else \
- return ((head)->sph_root); \
- } \
- (head)->sph_root = (elm); \
- return (NULL); \
-} \
- \
-struct type * \
-name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
-{ \
- struct type *__tmp; \
- if (SPLAY_EMPTY(head)) \
- return (NULL); \
- name##_SPLAY(head, elm); \
- if ((cmp)(elm, (head)->sph_root) == 0) { \
- if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
- (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
- } else { \
- __tmp = SPLAY_RIGHT((head)->sph_root, field); \
- (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
- name##_SPLAY(head, elm); \
- SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
- } \
- return (elm); \
- } \
- return (NULL); \
-} \
- \
-void \
-name##_SPLAY(struct name *head, struct type *elm) \
-{ \
- struct type __node, *__left, *__right, *__tmp; \
- int __comp; \
-\
- SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
- __left = __right = &__node; \
-\
- while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
- if (__comp < 0) { \
- __tmp = SPLAY_LEFT((head)->sph_root, field); \
- if (__tmp == NULL) \
- break; \
- if ((cmp)(elm, __tmp) < 0){ \
- SPLAY_ROTATE_RIGHT(head, __tmp, field); \
- if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
- break; \
- } \
- SPLAY_LINKLEFT(head, __right, field); \
- } else if (__comp > 0) { \
- __tmp = SPLAY_RIGHT((head)->sph_root, field); \
- if (__tmp == NULL) \
- break; \
- if ((cmp)(elm, __tmp) > 0){ \
- SPLAY_ROTATE_LEFT(head, __tmp, field); \
- if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
- break; \
- } \
- SPLAY_LINKRIGHT(head, __left, field); \
- } \
- } \
- SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
-} \
- \
-/* Splay with either the minimum or the maximum element \
- * Used to find minimum or maximum element in tree. \
- */ \
-void name##_SPLAY_MINMAX(struct name *head, int __comp) \
-{ \
- struct type __node, *__left, *__right, *__tmp; \
-\
- SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
- __left = __right = &__node; \
-\
- while (1) { \
- if (__comp < 0) { \
- __tmp = SPLAY_LEFT((head)->sph_root, field); \
- if (__tmp == NULL) \
- break; \
- if (__comp < 0){ \
- SPLAY_ROTATE_RIGHT(head, __tmp, field); \
- if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
- break; \
- } \
- SPLAY_LINKLEFT(head, __right, field); \
- } else if (__comp > 0) { \
- __tmp = SPLAY_RIGHT((head)->sph_root, field); \
- if (__tmp == NULL) \
- break; \
- if (__comp > 0) { \
- SPLAY_ROTATE_LEFT(head, __tmp, field); \
- if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
- break; \
- } \
- SPLAY_LINKRIGHT(head, __left, field); \
- } \
- } \
- SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
-}
-
-#define SPLAY_NEGINF -1
-#define SPLAY_INF 1
-
-#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
-#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
-#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
-#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
-#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
- : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
-#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
- : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
-
-#define SPLAY_FOREACH(x, name, head) \
- for ((x) = SPLAY_MIN(name, head); \
- (x) != NULL; \
- (x) = SPLAY_NEXT(name, head, x))
-
-/* Macros that define a red-black tree */
-#define RB_HEAD(name, type) \
-struct name { \
- struct type *rbh_root; /* root of the tree */ \
-}
-
-#define RB_INITIALIZER(root) \
- { NULL }
-
-#define RB_INIT(root) do { \
- (root)->rbh_root = NULL; \
-} while (/*CONSTCOND*/ 0)
-
-#define RB_BLACK 0
-#define RB_RED 1
-#define RB_ENTRY(type) \
-struct { \
- struct type *rbe_left; /* left element */ \
- struct type *rbe_right; /* right element */ \
- struct type *rbe_parent; /* parent element */ \
- int rbe_color; /* node color */ \
-}
-
-#define RB_LEFT(elm, field) (elm)->field.rbe_left
-#define RB_RIGHT(elm, field) (elm)->field.rbe_right
-#define RB_PARENT(elm, field) (elm)->field.rbe_parent
-#define RB_COLOR(elm, field) (elm)->field.rbe_color
-#define RB_ROOT(head) (head)->rbh_root
-#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
-
-#define RB_SET(elm, parent, field) do { \
- RB_PARENT(elm, field) = parent; \
- RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
- RB_COLOR(elm, field) = RB_RED; \
-} while (/*CONSTCOND*/ 0)
-
-#define RB_SET_BLACKRED(black, red, field) do { \
- RB_COLOR(black, field) = RB_BLACK; \
- RB_COLOR(red, field) = RB_RED; \
-} while (/*CONSTCOND*/ 0)
-
-#ifndef RB_AUGMENT
-#define RB_AUGMENT(x) do {} while (0)
-#endif
-
-#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
- (tmp) = RB_RIGHT(elm, field); \
- if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
- RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
- } \
- RB_AUGMENT(elm); \
- if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
- if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
- RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
- else \
- RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
- } else \
- (head)->rbh_root = (tmp); \
- RB_LEFT(tmp, field) = (elm); \
- RB_PARENT(elm, field) = (tmp); \
- RB_AUGMENT(tmp); \
- if ((RB_PARENT(tmp, field))) \
- RB_AUGMENT(RB_PARENT(tmp, field)); \
-} while (/*CONSTCOND*/ 0)
-
-#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
- (tmp) = RB_LEFT(elm, field); \
- if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
- RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
- } \
- RB_AUGMENT(elm); \
- if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
- if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
- RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
- else \
- RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
- } else \
- (head)->rbh_root = (tmp); \
- RB_RIGHT(tmp, field) = (elm); \
- RB_PARENT(elm, field) = (tmp); \
- RB_AUGMENT(tmp); \
- if ((RB_PARENT(tmp, field))) \
- RB_AUGMENT(RB_PARENT(tmp, field)); \
-} while (/*CONSTCOND*/ 0)
-
-/* Generates prototypes and inline functions */
-#define RB_PROTOTYPE(name, type, field, cmp) \
- RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
-#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
- RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
-#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
-attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \
-attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
-attr struct type *name##_RB_REMOVE(struct name *, struct type *); \
-attr struct type *name##_RB_INSERT(struct name *, struct type *); \
-attr struct type *name##_RB_FIND(struct name *, struct type *); \
-attr struct type *name##_RB_NFIND(struct name *, struct type *); \
-attr struct type *name##_RB_NEXT(struct type *); \
-attr struct type *name##_RB_PREV(struct type *); \
-attr struct type *name##_RB_MINMAX(struct name *, int); \
- \
-
-/* Main rb operation.
- * Moves node close to the key of elm to top
- */
-#define RB_GENERATE(name, type, field, cmp) \
- RB_GENERATE_INTERNAL(name, type, field, cmp,)
-#define RB_GENERATE_STATIC(name, type, field, cmp) \
- RB_GENERATE_INTERNAL(name, type, field, cmp, static)
-#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
-attr void \
-name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
-{ \
- struct type *parent, *gparent, *tmp; \
- while ((parent = RB_PARENT(elm, field)) != NULL && \
- RB_COLOR(parent, field) == RB_RED) { \
- gparent = RB_PARENT(parent, field); \
- if (parent == RB_LEFT(gparent, field)) { \
- tmp = RB_RIGHT(gparent, field); \
- if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
- RB_COLOR(tmp, field) = RB_BLACK; \
- RB_SET_BLACKRED(parent, gparent, field);\
- elm = gparent; \
- continue; \
- } \
- if (RB_RIGHT(parent, field) == elm) { \
- RB_ROTATE_LEFT(head, parent, tmp, field);\
- tmp = parent; \
- parent = elm; \
- elm = tmp; \
- } \
- RB_SET_BLACKRED(parent, gparent, field); \
- RB_ROTATE_RIGHT(head, gparent, tmp, field); \
- } else { \
- tmp = RB_LEFT(gparent, field); \
- if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
- RB_COLOR(tmp, field) = RB_BLACK; \
- RB_SET_BLACKRED(parent, gparent, field);\
- elm = gparent; \
- continue; \
- } \
- if (RB_LEFT(parent, field) == elm) { \
- RB_ROTATE_RIGHT(head, parent, tmp, field);\
- tmp = parent; \
- parent = elm; \
- elm = tmp; \
- } \
- RB_SET_BLACKRED(parent, gparent, field); \
- RB_ROTATE_LEFT(head, gparent, tmp, field); \
- } \
- } \
- RB_COLOR(head->rbh_root, field) = RB_BLACK; \
-} \
- \
-attr void \
-name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
-{ \
- struct type *tmp; \
- while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
- elm != RB_ROOT(head)) { \
- if (RB_LEFT(parent, field) == elm) { \
- tmp = RB_RIGHT(parent, field); \
- if (RB_COLOR(tmp, field) == RB_RED) { \
- RB_SET_BLACKRED(tmp, parent, field); \
- RB_ROTATE_LEFT(head, parent, tmp, field);\
- tmp = RB_RIGHT(parent, field); \
- } \
- if ((RB_LEFT(tmp, field) == NULL || \
- RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
- (RB_RIGHT(tmp, field) == NULL || \
- RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
- RB_COLOR(tmp, field) = RB_RED; \
- elm = parent; \
- parent = RB_PARENT(elm, field); \
- } else { \
- if (RB_RIGHT(tmp, field) == NULL || \
- RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
- struct type *oleft; \
- if ((oleft = RB_LEFT(tmp, field)) \
- != NULL) \
- RB_COLOR(oleft, field) = RB_BLACK;\
- RB_COLOR(tmp, field) = RB_RED; \
- RB_ROTATE_RIGHT(head, tmp, oleft, field);\
- tmp = RB_RIGHT(parent, field); \
- } \
- RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
- RB_COLOR(parent, field) = RB_BLACK; \
- if (RB_RIGHT(tmp, field)) \
- RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
- RB_ROTATE_LEFT(head, parent, tmp, field);\
- elm = RB_ROOT(head); \
- break; \
- } \
- } else { \
- tmp = RB_LEFT(parent, field); \
- if (RB_COLOR(tmp, field) == RB_RED) { \
- RB_SET_BLACKRED(tmp, parent, field); \
- RB_ROTATE_RIGHT(head, parent, tmp, field);\
- tmp = RB_LEFT(parent, field); \
- } \
- if ((RB_LEFT(tmp, field) == NULL || \
- RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
- (RB_RIGHT(tmp, field) == NULL || \
- RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
- RB_COLOR(tmp, field) = RB_RED; \
- elm = parent; \
- parent = RB_PARENT(elm, field); \
- } else { \
- if (RB_LEFT(tmp, field) == NULL || \
- RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
- struct type *oright; \
- if ((oright = RB_RIGHT(tmp, field)) \
- != NULL) \
- RB_COLOR(oright, field) = RB_BLACK;\
- RB_COLOR(tmp, field) = RB_RED; \
- RB_ROTATE_LEFT(head, tmp, oright, field);\
- tmp = RB_LEFT(parent, field); \
- } \
- RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
- RB_COLOR(parent, field) = RB_BLACK; \
- if (RB_LEFT(tmp, field)) \
- RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
- RB_ROTATE_RIGHT(head, parent, tmp, field);\
- elm = RB_ROOT(head); \
- break; \
- } \
- } \
- } \
- if (elm) \
- RB_COLOR(elm, field) = RB_BLACK; \
-} \
- \
-attr struct type * \
-name##_RB_REMOVE(struct name *head, struct type *elm) \
-{ \
- struct type *child, *parent, *old = elm; \
- int color; \
- if (RB_LEFT(elm, field) == NULL) \
- child = RB_RIGHT(elm, field); \
- else if (RB_RIGHT(elm, field) == NULL) \
- child = RB_LEFT(elm, field); \
- else { \
- struct type *left; \
- elm = RB_RIGHT(elm, field); \
- while ((left = RB_LEFT(elm, field)) != NULL) \
- elm = left; \
- child = RB_RIGHT(elm, field); \
- parent = RB_PARENT(elm, field); \
- color = RB_COLOR(elm, field); \
- if (child) \
- RB_PARENT(child, field) = parent; \
- if (parent) { \
- if (RB_LEFT(parent, field) == elm) \
- RB_LEFT(parent, field) = child; \
- else \
- RB_RIGHT(parent, field) = child; \
- RB_AUGMENT(parent); \
- } else \
- RB_ROOT(head) = child; \
- if (RB_PARENT(elm, field) == old) \
- parent = elm; \
- (elm)->field = (old)->field; \
- if (RB_PARENT(old, field)) { \
- if (RB_LEFT(RB_PARENT(old, field), field) == old)\
- RB_LEFT(RB_PARENT(old, field), field) = elm;\
- else \
- RB_RIGHT(RB_PARENT(old, field), field) = elm;\
- RB_AUGMENT(RB_PARENT(old, field)); \
- } else \
- RB_ROOT(head) = elm; \
- RB_PARENT(RB_LEFT(old, field), field) = elm; \
- if (RB_RIGHT(old, field)) \
- RB_PARENT(RB_RIGHT(old, field), field) = elm; \
- if (parent) { \
- left = parent; \
- do { \
- RB_AUGMENT(left); \
- } while ((left = RB_PARENT(left, field)) != NULL); \
- } \
- goto color; \
- } \
- parent = RB_PARENT(elm, field); \
- color = RB_COLOR(elm, field); \
- if (child) \
- RB_PARENT(child, field) = parent; \
- if (parent) { \
- if (RB_LEFT(parent, field) == elm) \
- RB_LEFT(parent, field) = child; \
- else \
- RB_RIGHT(parent, field) = child; \
- RB_AUGMENT(parent); \
- } else \
- RB_ROOT(head) = child; \
-color: \
- if (color == RB_BLACK) \
- name##_RB_REMOVE_COLOR(head, parent, child); \
- return (old); \
-} \
- \
-/* Inserts a node into the RB tree */ \
-attr struct type * \
-name##_RB_INSERT(struct name *head, struct type *elm) \
-{ \
- struct type *tmp; \
- struct type *parent = NULL; \
- int comp = 0; \
- tmp = RB_ROOT(head); \
- while (tmp) { \
- parent = tmp; \
- comp = (cmp)(elm, parent); \
- if (comp < 0) \
- tmp = RB_LEFT(tmp, field); \
- else if (comp > 0) \
- tmp = RB_RIGHT(tmp, field); \
- else \
- return (tmp); \
- } \
- RB_SET(elm, parent, field); \
- if (parent != NULL) { \
- if (comp < 0) \
- RB_LEFT(parent, field) = elm; \
- else \
- RB_RIGHT(parent, field) = elm; \
- RB_AUGMENT(parent); \
- } else \
- RB_ROOT(head) = elm; \
- name##_RB_INSERT_COLOR(head, elm); \
- return (NULL); \
-} \
- \
-/* Finds the node with the same key as elm */ \
-attr struct type * \
-name##_RB_FIND(struct name *head, struct type *elm) \
-{ \
- struct type *tmp = RB_ROOT(head); \
- int comp; \
- while (tmp) { \
- comp = cmp(elm, tmp); \
- if (comp < 0) \
- tmp = RB_LEFT(tmp, field); \
- else if (comp > 0) \
- tmp = RB_RIGHT(tmp, field); \
- else \
- return (tmp); \
- } \
- return (NULL); \
-} \
- \
-/* Finds the first node greater than or equal to the search key */ \
-attr struct type * \
-name##_RB_NFIND(struct name *head, struct type *elm) \
-{ \
- struct type *tmp = RB_ROOT(head); \
- struct type *res = NULL; \
- int comp; \
- while (tmp) { \
- comp = cmp(elm, tmp); \
- if (comp < 0) { \
- res = tmp; \
- tmp = RB_LEFT(tmp, field); \
- } \
- else if (comp > 0) \
- tmp = RB_RIGHT(tmp, field); \
- else \
- return (tmp); \
- } \
- return (res); \
-} \
- \
-/* ARGSUSED */ \
-attr struct type * \
-name##_RB_NEXT(struct type *elm) \
-{ \
- if (RB_RIGHT(elm, field)) { \
- elm = RB_RIGHT(elm, field); \
- while (RB_LEFT(elm, field)) \
- elm = RB_LEFT(elm, field); \
- } else { \
- if (RB_PARENT(elm, field) && \
- (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
- elm = RB_PARENT(elm, field); \
- else { \
- while (RB_PARENT(elm, field) && \
- (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
- elm = RB_PARENT(elm, field); \
- elm = RB_PARENT(elm, field); \
- } \
- } \
- return (elm); \
-} \
- \
-/* ARGSUSED */ \
-attr struct type * \
-name##_RB_PREV(struct type *elm) \
-{ \
- if (RB_LEFT(elm, field)) { \
- elm = RB_LEFT(elm, field); \
- while (RB_RIGHT(elm, field)) \
- elm = RB_RIGHT(elm, field); \
- } else { \
- if (RB_PARENT(elm, field) && \
- (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
- elm = RB_PARENT(elm, field); \
- else { \
- while (RB_PARENT(elm, field) && \
- (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
- elm = RB_PARENT(elm, field); \
- elm = RB_PARENT(elm, field); \
- } \
- } \
- return (elm); \
-} \
- \
-attr struct type * \
-name##_RB_MINMAX(struct name *head, int val) \
-{ \
- struct type *tmp = RB_ROOT(head); \
- struct type *parent = NULL; \
- while (tmp) { \
- parent = tmp; \
- if (val < 0) \
- tmp = RB_LEFT(tmp, field); \
- else \
- tmp = RB_RIGHT(tmp, field); \
- } \
- return (parent); \
-}
-
-#define RB_NEGINF -1
-#define RB_INF 1
-
-#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
-#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
-#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
-#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
-#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
-#define RB_PREV(name, x, y) name##_RB_PREV(y)
-#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
-#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
-
-#define RB_FOREACH(x, name, head) \
- for ((x) = RB_MIN(name, head); \
- (x) != NULL; \
- (x) = name##_RB_NEXT(x))
-
-#define RB_FOREACH_REVERSE(x, name, head) \
- for ((x) = RB_MAX(name, head); \
- (x) != NULL; \
- (x) = name##_RB_PREV(x))
-
-#endif /* _SYS_TREE_H_ */