Adding the memory/jemalloc directory to hg. See CVS for previous history.
authorjst@mozilla.com
Wed, 06 Feb 2008 15:06:50 -0800
changeset 11286 7a9cb183d093b27901a09e97c2661f80baef148d
parent 11285 7965ba2acd6d810b8ccb7fdf2810f2f4b581624c
child 11287 f131313f5c1c6ca2719f2c3c8b10017d203162a3
push id1
push userbsmedberg@mozilla.com
push dateThu, 20 Mar 2008 16:49:24 +0000
treeherdermozilla-central@61007906a1f8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone1.9b4pre
Adding the memory/jemalloc directory to hg. See CVS for previous history.
memory/jemalloc/Makefile.in
memory/jemalloc/apply-ed-patches.pl
memory/jemalloc/build-crt.py
memory/jemalloc/crtsp1.diff
memory/jemalloc/ed.exe
memory/jemalloc/jemalloc.c
memory/jemalloc/tree.h
new file mode 100644
--- /dev/null
+++ b/memory/jemalloc/Makefile.in
@@ -0,0 +1,102 @@
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Mozilla Foundation
+# Portions created by the Initial Developer are Copyright (C) 2008
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#  Ted Mielczarek <ted.mielczarek@gmail.com>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+DEPTH		= ../..
+topsrcdir	= @top_srcdir@
+srcdir		= @srcdir@
+VPATH		= @srcdir@
+
+include $(DEPTH)/config/autoconf.mk
+
+MODULE		= jemalloc
+
+ifeq (WINNT,$(OS_TARGET))
+# Two options for Windows, either you build the CRT from source,
+# or you use a pre-built DLL.
+ifneq (,$(WIN32_CRT_SRC_DIR))
+# Building the CRT from source
+CRT_OBJ_DIR=./$(shell basename "$(WIN32_CRT_SRC_DIR)")
+libs:: $(CRT_OBJ_DIR)/build/intel/mozcrt19.dll
+	$(INSTALL) $< $(FINAL_TARGET)
+
+# patch if necessary
+$(CRT_OBJ_DIR)/jemalloc.c: $(srcdir)/crtsp1.diff
+	rm -rf $(CRT_OBJ_DIR)
+	cp -R "$(WIN32_CRT_SRC_DIR)" .
+	# per http://forums.microsoft.com/MSDN/ShowPost.aspx?PostID=1189363&SiteID=1
+	for i in dll mt xdll xmt; do \
+	  pushd $(CRT_OBJ_DIR)/intel/$${i}_lib && lib -extract:..\\build\\intel\\$${i}_obj\\unhandld.obj eh.lib && popd; \
+	done
+	# truly awful
+	#XXX: get ed into mozillabuild, bug 415123
+	$(PERL) $(srcdir)/apply-ed-patches.pl $(srcdir)/crtsp1.diff \
+	$(CRT_OBJ_DIR) $(srcdir)/ed.exe
+
+$(CRT_OBJ_DIR)/build/intel/mozcrt19.dll: \
+  $(CRT_OBJ_DIR)/jemalloc.c $(srcdir)/jemalloc.c $(srcdir)/tree.h
+	cp $(srcdir)/jemalloc.c $(srcdir)/tree.h $(CRT_OBJ_DIR)
+# this pretty much sucks, but nmake and make don't play well together
+	$(PYTHON) $(srcdir)/build-crt.py $(CRT_OBJ_DIR)
+	#XXX: these don't link right for some reason
+	rm $(CRT_OBJ_DIR)/build/intel/{libcmt,libcpmt}.lib
+else
+# Using a pre-built DLL, so just install it.
+libs:: $(WIN32_CUSTOM_CRT_DIR)/mozcrt19.dll
+	$(INSTALL) $< $(FINAL_TARGET)
+endif
+
+else
+# for other platforms, just build jemalloc as a shared lib
+
+MODULE_OPTIMIZE_FLAGS = -O2
+LIBRARY_NAME	= jemalloc
+EXPORT_LIBRARY	= 1
+LIBXUL_LIBRARY	= 0
+FORCE_SHARED_LIB= 1
+
+CSRCS		= \
+		jemalloc.c \
+		$(NULL)
+
+endif
+
+include $(topsrcdir)/config/rules.mk
+
+ifeq (Darwin,$(OS_TARGET))
+LDFLAGS += -init _jemalloc_darwin_init
+endif
\ No newline at end of file
new file mode 100755
--- /dev/null
+++ b/memory/jemalloc/apply-ed-patches.pl
@@ -0,0 +1,84 @@
+#!/bin/perl
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla build system.
+#
+# The Initial Developer of the Original Code is
+# Mozilla Foundation.
+# Portions created by the Initial Developer are Copyright (C) 2008
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#  Ted Mielczarek <ted.mielczarek@gmail.com>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+# Usage: apply-ed-patches.pl <source patch> <target directory> <path to ed>
+
+use FileHandle;
+
+sub do_patch {
+  my ($ed, $target_file, $patch_file, $fh) = @_;
+  # these keep winding up read only for me
+  chmod 0666, $target_file;
+  print $fh "w\n";
+  $fh->close();
+  print "$ed - $target_file < $patch_file\n";
+  system "$ed - $target_file < $patch_file\n";
+}
+
+my $header_done = 0;
+my ($target_file,$patch_file) = ('','');
+my $source_patch = $ARGV[0];
+my $srcdir = $ARGV[1];
+my $ed = $ARGV[2];
+$srcdir = "$srcdir/" unless $srcdir =~ m|/$|;
+my $pfh = new FileHandle($source_patch, 'r');
+while(<$pfh>) {
+  # skip initial comment header
+  next if !$header_done && /^#/;
+  $header_done = 1;
+
+  next if /^Only in/;
+  if (/^diff -re (\S+)/) {
+    my $new_file = $1;
+    $new_file =~ s|^crt/src/||;
+    $new_file = "$srcdir$new_file";
+    my $new_patch_file = "$new_file.patch";
+
+    if ($target_file ne '') {
+      do_patch $ed, $target_file, $patch_file, $fh;
+    }
+    $target_file = $new_file;
+    $patch_file = $new_patch_file;
+    $fh = new FileHandle($patch_file, 'w');
+    next;
+  }
+
+  print $fh $_ if $fh;
+}
+
+do_patch $ed, $target_file, $patch_file, $fh;
new file mode 100644
--- /dev/null
+++ b/memory/jemalloc/build-crt.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+
+import os, sys
+
+if 'MAKEFLAGS' in os.environ:
+  del os.environ['MAKEFLAGS']
+os.chdir(sys.argv[1])
+sys.exit(os.system('nmake dll_ mt RETAIL_DLL_NAME=mozcrt19 RETAIL_LIB_NAME=msvcrt'))
new file mode 100755
--- /dev/null
+++ b/memory/jemalloc/crtsp1.diff
@@ -0,0 +1,165 @@
+# The Microsoft C Runtime source code to which this document refers is available
+# directly from Microsoft Corporation, under a separate license.
+# Please ensure that if you are using that source code, you have appropriate
+# rights to use it.  By providing you access to this file, Mozilla Corporation
+# and its affiliates do not purport to grant any rights in that source code. 
+# Binaries are available under separate licenses at 
+# http://www.microsoft.com/downloads/details.aspx?familyid=200b2fd9-ae1a-4a14-984d-389c36f85647&displaylang=en
+diff -re crt/src/crt0.c crt-sp1/src/crt0.c
+273c
+	/*
+	 * this used to happen in _mtinit, but we need it before malloc
+	 */
+	_init_pointers();       /* initialize global function pointers */
+
+        if ( malloc_init() )                /* initialize heap */
+.
+101a
+extern BOOL malloc_init(void);
+.
+diff -re crt/src/crt0dat.c crt-sp1/src/crt0dat.c
+789d
+778d
+diff -re crt/src/crtexe.c crt-sp1/src/crtexe.c
+333,335d
+diff -re crt/src/crtheap.c crt-sp1/src/crtheap.c
+61c
+    pv = calloc(count, size);
+.
+58,59d
+diff -re crt/src/crtlib.c crt-sp1/src/crtlib.c
+779,786d
+416d
+400a
+		malloc_shutdown();
+
+.
+359d
+340d
+310,311d
+300d
+287c
+            /*
+             * this used to happen in _mtinit, but we need it before malloc
+             */
+            _init_pointers();       /* initialize global function pointers */
+
+            if ( malloc_init() )   /* initialize heap */
+.
+43a
+extern BOOL malloc_init(void);
+extern void malloc_shutdown(void);
+
+.
+diff -re crt/src/dllcrt0.c crt-sp1/src/dllcrt0.c
+236,237d
+183d
+173d
+158d
+153,155d
+diff -re crt/src/intel/_sample_.def crt-sp1/src/intel/_sample_.def
+1208d
+723,724d
+336,341d
+324d
+313,314d
+81d
+9c
+LIBRARY MOZCRT19
+.
+diff -re crt/src/internal.h crt-sp1/src/internal.h
+407a
+#endif
+.
+403a
+#if 0
+.
+diff -re crt/src/makefile crt-sp1/src/makefile
+1745a
+$(DEFFILE_DIR)\$(RETAIL_LIB_NAME).def : $(DEFFILE_DIR)\_sample_.def
+	copy $** $@
+.
+1228,1230d
+754c
+dll_ :: $(OBJROOT) $(OBJCPUDIR) $(OBJDIR_DLL) $(OBJDIR_DLL)\$(PURE_OBJ_DIR) \
+   	$(OBJDIR_DLL)\$(CPP_OBJ_DIR) \
+   	$(RELDIR_CPU) $(PDBDIR_CPU_DLL) $(MAKE_DIRS_DLL)
+.
+334c
+CC_OPTS_BASE=-c -nologo -Zlp8 -W3 -GFy -DWIND32
+.
+307,309c
+LINKER=link
+LINKLIB=link -lib
+LINKIMPLIB=link -lib
+.
+302,304c
+LINKER=link -nologo
+LINKLIB=link -lib -nologo
+LINKIMPLIB=link -lib -nologo
+.
+209d
+21,22c
+RETAIL_DLL_NAME=MOZCRT19
+RETAIL_LIB_NAME=msvcrt
+.
+diff -re crt/src/makefile.inc crt-sp1/src/makefile.inc
+1623a
+
+$(OBJDIR)\unhandld.obj: $(PREOBJDIR)\unhandld.obj
+        copy $(PREOBJDIR)\unhandld.obj $@
+.
+618d
+402d
+342,353c
+        $(OBJDIR)\jemalloc.obj \
+.
+334,335d
+329,330d
+327d
+323d
+320d
+diff -re crt/src/makefile.sub crt-sp1/src/makefile.sub
+69c
+CFLAGS=$(CFLAGS) -O2 -DMOZ_MEMORY=1 -DMOZ_MEMORY_WINDOWS=1
+.
+diff -re crt/src/malloc.h crt-sp1/src/malloc.h
+189a
+#endif
+.
+177a
+
+#if 0
+.
+161d
+83a
+#endif
+.
+70a
+#if 0
+.
+diff -re crt/src/mlock.c crt-sp1/src/mlock.c
+274c
+#endif
+.
+262a
+#if 0
+.
+diff -re crt/src/new.cpp crt-sp1/src/new.cpp
+60d
+52,55d
+37,38c
+        break;
+.
+diff -re crt/src/nothrownew.cpp crt-sp1/src/nothrownew.cpp
+37a
+#endif
+.
+31a
+#if 1
+        break;
+#else
+
+.
+diff -re crt/src/tidtable.c crt-sp1/src/tidtable.c
+393,394d
new file mode 100755
index 0000000000000000000000000000000000000000..fc812ea2dbbfe486643cf38a8f6ee68886e24819
GIT binary patch
literal 116736
zc%1CLdstNE_Bg&bj5;E?Q9)5b#~Mq@3sX{zfT7}DQNv3qUQkde1cE(Y3UO>^lHGQc
z$}Xo<o%q-(JEv22BeRZyD5BjI>*VFsvFtMAXj&E`3iDfQy?gK31L*1dJipKLeE<0R
zILz$5-^+T}TJO5O>wWk1`4wC!$8iSu`28HWi(~)Nxc~mo4xjD=cX#LB>H6uQUHr69
z2hCoYS7ItEE?iNZy~?yKyP%*@G%d|F7266-c?G8Q8JVV4g*myyBO=1gN*goBbKEq(
z6E~)7-WavrQ=AaejSorUUgSA$AAG{$W~B1j$usEicL;+Ue;Cxt7kIjv9uY$0;y4p4
zi+>_nkx2a@4)ffQ?${~MEfM~}p#OjRH(bnJE5f(=1)c%)BF_Z|!Tv3Q|HE^##ccRJ
z6UIto$1*SRLFLjou6p>=k`nwK*$w_w2R~Q;*|A(6Ymml#Sm}F52kjM?6fZ*<5GSXU
za=Y4>`#&7^|NHlU*}pl`5010O0|qma)*t=S(J`E7CH$1mTB;Y;N{!m{kihdv?VihX
zj8c@@AAJEHOJ^6>x>s}V4{r{%uRi~xqvx59vu5bP-Xo3keEJm6InEk9xiZheJ#Ob1
zPL96t<`~XBz3IUCCafV%Ge=6Z&Ef7jzrmOBK)TsrfMTAWPw-qN_tqHBRV^;s9{Cp5
z`~>=QHHgs?6q4shO0j0o$;Wx+fus49*k6vrDpD3apOPSjn>`=so>z;}`sXw8`Jdsr
z=MgLvfqh6ukMt<qnN8IQ-WWLo0fGTTH6ws)@XQ?!aMyYULLr3V(vv*re&;3)KQYq6
zCV9Hia#FHgwKU2-<dD?IU~UPU_CSeLH~cJFk=hBq#O~5D*>+W4*aTSIYv6G3*b1Nu
z=e_5f#{hjzym$lP;7jGE(vgC1|KSh(@Xz-iR^@e7w?kFm>54scFVL(JpKVf_?cu5x
zoDV{E<$>=_T`N>_A(V7AihuUJBtdiAq{h@I5bRe|x8c_--c+buq5lWtU+A3()K;$3
zhTIr$o}4qvwA8kI__Cp^O_t#ni^*cOjvQgJjIi8pN*Fonw!~2*N1C$pSDD7IEi#$8
z!6jkbIHqihbF*_yV(zLU)AGFhTyBO<G!-s4t;$_hSiDXxQ<9r|zphL$e&$)K=K@1U
z%B5!0M!&so3@12aV1&DIl$9^SwY=CUMK8f0Yr^H|m*9uxm~_PQOV=8~^)r-`*F;uE
zRs+Y*<*j?m3jLkR;X<mFBRKeW9Tc3eLZOY#sJx`Z>pShDAK_-c7MaZRS_L2wu>31>
zdK|VR$6*88vG!EgNr|p(F_q6?Yt=&e+fZ;vh~Rn_$FQ1{M6*$jE<*59v5HC{oH0_0
z*)x>dUu&>l5T|+;!-Z@_uFi#DFiDJZB62=f+SfCUbJV3tBh8+B;33|shg))7+QBiz
z#697zrP{GCa>1g-wbBLF-JkeG#8D5zOPygf%t*NFJFW;2&p#jHIPX1-gOCqBn;!(U
z;#j<Q;^n7XRfvvmRUnEEf@p6V%qvgbRtC{G2vI6Q1Y@&f<wV3!hO1GcAA;*%XjVbi
z+P;7&=OorUfioPzxl;#D%Q^)d-xuyG<cBtd0xIGB-NJL;xvhS`)F{OfPY}kXL>x81
z6;5&r$cfZIc$f1;Rq0d<^x=itatyX}ev2~3h@dgnq#**n+rq|p7H)Z6(3!%%Yl`#4
zTpj;T%PWA(!a`GiVZjP+&OBt_SUKOE0Aq^5K~CO+ol6G<=Q`j7`+6=+@`(`&+mEv+
zhuNB?Z{mO62wnI$2+oPf!x6~fu7lzX91Wt1jZhN|=``I@#(5s)Xl&)jwNiN303+Ux
zsIeTBh#4emlGrmsZHgFks2N0LxX0nv7&Chp(t_j^L+^HM!1E@QkcU8d&wFkKZ>d3e
zJCKH4@HaQZ+ux&sHxh|AfQNJ3j9X)YTS0HFiIk&>a1jLCJfk#&rpNOL(S<*`$MJeb
z_8sty#Lg42rBQ0<aY3EerXfX#xdTM=ZNy)D0SvE*y!FdwXd)7kb7}K+c>B!e0B?Jf
zp(0~MMLs#sg3#d(kxDYkS!QYvpZGha!Cl$t??g|#!7PfA?lnl`3{cAG*cV<~;TCV=
zkUI=)l3;B)+62EQu*qUQDr|WTsThGxxeYyzG+Apm6j@KKH&;f1MsrW$E2Kj*XFcJb
z%$NR7?Mc+Yc6yxQcO8@>NIpfVQ*Pjn5L-8@J%7O>uou*s_s$wnA3xDGO+4pnb>e*{
zMPQSaPC&f%xO9lxF8d#aU#G}v2J49pO}<AMTBIXKE?Yk?9b!EpY<UQ$Iza+}Dfm*$
zdovM}t2s-<E)?pnlhiDOUf41Q9$Ff(+E^T3>18iMHMwAQc79%tDLW^pI2X)od=B`x
zRLfw3Y`l!^h_}em_`R{2u3{_FtcS!fxhK1}{Q`Q}s5~<u4Un+0eStBfi7|n8AA|L<
z3#Xp%Kgt9zEXpmorhbCCZE3Jn1NCF1Xs#YmUjcsri+7Pc1FZ{o##b#Iw;d-Lbl(%%
zvbTiX4+M{a8Bpt4@u(_gS3P<iPF(UR<HXGfa0CGiRr)>3jF`RUD&VgufG^80EYV9Q
z1z(j65}Vm_kU%FvfMVfA5(_xN5H$)t4Q9_ui9|t|r9mQj;;Zw#E2$8FAYBLI*Cht=
z_<nj6V|xtJrz>>ugv+zp^zpz*u-^;t?K=~UZ=5etmXoSb*LpP?6g%m~de+G}i7eJ3
zaNqM4G_)OiM@{Z$<`It%_TYnrdiLN)cu>ayM_L*gDWya`EthTrsOmi3u4=Tj<$3;5
zuF8<N%dew5UM|;BPSe;+unI^pBebj`L7kANH!&pmHX#zA*q&+t#`Et-;CA9Z`1w9^
z@oqcO4FB%J=TbCI)9v_^(Xaf^JK@H-jaP8kWxqJ~MZvW9G;UJKcXiWs$afwddY7_^
zs^DL!icWwjI=2%4X2QgaLa|j|fxXD08EF_JMc~8o$MBwXT>5pY>tI=bg^WT~h)`9I
zPs+oj<745`hO-N#3yW$iI{v5GpDXg0<|_v1o&UxDROLibwkYNn7npK#^Yd1Lk}l@J
zKT`B@bR>vWp{k$#ib-%@0>JE7!UX5fh<WKg4)yc?-N>TiZIxIuDMoPa$A=$wMtNLR
z&*t9fX1`xfrK`H+)Sd8M*4@$ob6Psg^jag*H|cl39~!J>4Sp*}BU`x|Y;(~h0hC87
zDm*+$m4ja0@F7AXY}+eTok-3yCx|^J%6BOJyzG05<UO=M$$-VE^>yEnmJ1KlwIpXm
z0wa;%GaAbHq_pigByR%R{z&okIQ{HA<l;GM%R3VOgmFb-WY7nj?l*;T8HJ{z;@s7F
zg|-sYvcgrXvI}y!IR&|Ei*lEVxjClXf}CN6%ZF)#NA)im{w*ibB66LxWmMWpF^M-^
zl@k%H_mTTQsu+;QeegK^x%jg1bhYq!O;WvhqiXHAk;eei>Tn_y!>iz|K>D2B#3`Kb
z{s{ki{!GXboI)rQW*=Yn`?sS?Rxq|U3!qpEraj!UCv)Ow!ScrQ_nWc)+L4|chSQH*
zUDaj%n6X2Oy%elUFjTfJqNfpT5cbiWZNJYU?bWdYy1b%!B$5J*F3++>8qwAs56mx5
zH{b>+8wCSPH^-C>k|SmUVPINC*-OLxVPYgRvartzCy0j6WGGC{K=&2ULzJsgaP|Q#
zI!et^yx=^`v!7sU|CJB;l5W&kAcP+37KFs9@E3fon97;xB{jqFqQ-*Tl&`o=b)a_?
zw`o+zZc`nM&fu{*nA`Lya&jWJC8v_xgtce5{1RQ+a#XWq5||~!L7Kz>n?{j$4+92C
z2~WpjQ~@D<4BBM3ywh6)K0|W>usEO!k>=Z{+AsT7srJiX0Vgp4RFv-(PwT^~xHdT2
zI#^E*&>XBw4>NK7rvZ9J`zBT%hHy`Jos)(I+f}hJfG{>~j5^>J0&qT$3e*S<s3UFt
zm^$ot1oYtD57l8VAk{y51ps&-!=^9|Tw~#Z>!jdn!uA2WzYWkUlU`UCEiR?*Zfzb(
zN1(Rz;#C0cw-#1)58U?ES{l4rE$!^tSUKXt0kUUf<mglI%Y83zJzBO@;eexZboBug
zU(62&TyCtz5xB#jSE4}%&zg-$eX&T2!}>Crpxd$5<n{(>5+Lp-)+Nx~1XfWTmXxAo
z_bgtDru&MWq$1S_X!L!eBK5@mDpKF|4A`i;a^{U~^%~Ka>9sx^JEHFqJFhtR0lz+&
zzGSp`SLqzq_4#`G&bU87Uxt*Z=T)~X6I%EF1PuhsmrOZ&C*cSOYCW_*lMP=sJOReQ
z&F-<hFCs`EZpYHrdTH<acy}7_D0N)rL?6`auha{F!abjt;;<C?0}G`3TFXISQ-#tk
zXeY3^h?3MX+JF*~IgJrkia1QST@Bl*6ai+NItlK8sfZ=E2V*7<Fw<4@XH>-G04jXm
zfIl(vJ{2{G5gYGr0FwvX@x2_357ZAp?<i1rG2&b~5qsCmbIU>NFYE7+QxOn%fxq%_
zo9EWpO`_R7lebh`&j^319y4jsVe2oY-tAaU?vJh7J|ua3z_5vGO~RsHC^~6Sz3<qy
z?T@^|KN|kZ$X%0<F@WqMa2#@SbHcPcc?Bi8#UkF38rAVy&dsy<nN5P)0i=NU7ZAbI
z;CG!&W@s|Ge{vjJ4Im3+z#af=9dIN?;A5~2kc9XYCeTO;eNgg9i5`H<1e_0s5GLk@
zEyu|@C5JO2Qn6#^o=5c|+FXA54snLG2Br-bnBszlJMbU*uYIp5WK-!a<^}tY=_@t=
z5#z>ahN$||h&t1AV;KQ-z_#t0G8OxZpgE_;3Dn*?V{bpGeH~k&_2oKA<=1|O$aFP|
zCUgy52gMlIIWbBmwJL_AeslQ+S0zG~h&Y6jW6_RvofLail66*X8->QL>!58o+EG1d
z0IDcaBCNH8S&*aD9PXK23JlF^ioF=!Tn%DZ)~Jmi4@O2eiseewA%GBxrn1W~c4noS
z%Yl7%ttTH+@_0M>NnEququccU<zObyW}Cx(_VyN<J^-6nPz2A&6w}9}(kULr1<8Ne
zX24*b%^;LpK=H^6fxnW+Neej#S_j?XmctK*vWP(`(5saufOLYju55+EQqht1OgY?+
z=0PhdxnI_IdY^tonccBGg;nP~L>i)0io*sNw{6F&0hWta*&rR*;5Jo$k$-W8ajx@Z
zd2ASH_87r69xz3#VT3yG9Xb(xVma_iy~-=E<*K~$s^`Lksu$GwU|Vf*=MYU>cpqfi
zA{lPU!$@0zZN42VlNA#%XZ`@^PFt?$+yNRis@bWa?AP_3c9Q~$4WW1ebmsj8+QZ=!
z&K-M0bY@vCLkOB>Q?3`y{$?FB%XkXq5$Y2))N4Un4fR4f`otK`{*+S)kMXF+@!=-h
ze6+cMgadFk5ogPB*h-yYVAw>M5-|#+6GNb7S7+z}=%Xoe4yK4Y@4CY5>@*q7h8S#+
z2I-l%P9@C3b=M)x%yo<~lY<Bo26w%7Xokr@fTRsz-jzD;_%Mg#yf^3~&hj!f0B(TG
z4#UmmYoUjCTgfd3TqFoi4;oiXuup_wB?5iGdkAh`%OP4F><@(l{ZKd_ssNXCEV!b!
zOVU^IpF8~ghDZ=0CaGRZ1{KR3iEy)fw%~_(Z}1FQfI`$}WYv9N0G>?;khuQJA`BoN
zD7#<fkjf>Dq|R)Z!yElHtY(GFz$f945`+A64P%gQcsCB)P#9!Mb;k_S62chdd#q4u
zRv?)n$6*CD1`%Ojt--}Oyi=Fn#~%j5#|%7Rm;q?;^hdv}&Qc9tOL<@PV!(ST4MBN_
zR;<0WH*lTnez-v+at|GC-Cy=VyFm?w0vlUsEN9vc%Cnjc>JDBTRJ1mz_5w#JujkpI
z-r!X@C{E7C1bFBFgYFj(qsk~tkrVN@J3Y~lE*>iI@_RY*XxWh6>-}uh(_qvvLBlYf
zvcA<DqEg8{2N$N61g7#5#lz};y{X)b1|{Yy1B%cF)PapX48&1?Ih7G0m4v8?P$%8u
z{cB5LGQ5!u!s#n@CTv`y3a4t%ew!L@|Jc?x+)gu+h1>VobfQvQ2U=^pj^or9VC!P3
z_QvVpxEbN_sc>9gtgQicI2|v8GV=a{(@|%Ceh<KV(^>=b`farp?+tKQ;T2C%Lp!md
zql%04`K|gPyo-<K>g0wK+wayAXg^bH{~XMFWr8&YwLcRZc>$i5h&g!$qfBkrN$GZ*
z_;SEL3}^C;1fe{|Fj<;n046EF73T`Y9Ob@3)$QY?zK&MV)aAfCl?ed7dtQj;9Proj
z0DmDvBY#b}9)G=E!Y0jAXtTs&OUlA0)($vGf=b6lK*woPqcUB7DG%>6(Rt9;?E|uT
z$aZzK!c0y<WOX`)Iw1~Af>h?1!zwjN6gyGnoQmmwfLPxS70qWCs?4y}lTf0ni91TJ
zqbA}@I?~^#i?5@<ui@=&U8MK+vKL;{n<sK-Ll55fp^-`isxb|erU@fZ$Kk&9yx2u*
z@eW6q9;np}-(DZ8zAOYQ($Yxr{^GVF<sU|9A?3_sHmVCF^rMQPQK<o<LTrGL>K)-{
z^Ml1Za3b%;hjrf1fQP#n!^0xFcFW?jmo6_T7FTC+Sz(G<I1^V|OeuGdpFQX9jL{~s
z(6p?uK+G$!<qjV{+{6ve&*>WEf6dXnnS65!Iv;3=U)qm>NWrxPWPJv)=X$s%e!jFH
zqfuZ`0@@}fr~bAR9Gb0#aG^d}5Q0JD(3F9~77lnRWrN_vw3C!9VNbo_JlEEKC0xrh
z%~PbAW-vp`Uk}_fm52G$Oc~<<09&~nM{&oKw&9K+3}QUPnY_lFA+0gH8rLqChtZAk
zraI2-A8*q9HD&(IH&2!3n_VZ@_Gbu{%S?4$!u%6V`sgn(3dUhsQ;z*6DIexl@&i0u
ze@CZ?QPxpoqOgexAH4v?Rq|c-nn=lah1=foD*crCZ-)7g>ZH6kug(840Bn|jj0w;+
z(OE6HK879!SHqv0rq1q332jmHcw5v6RlVri@m*3)bEtbt0u~1KIMclL?rRk6qLiX0
z*G31GavTWpZox(QwgXBkf^mM^?)D(O{ZhNzVQ|}<&1RwPwlogtPS|QFBn8-G7`s_s
z24CJ5;AR*;^PCLkERAAnYac=_@5w+IM%dHMThBzkF+`)tM{3rM;%jZXElr_PxaYnC
zG-il<8U)uIs9RaV>;~5)xR4|4%o2Bv!mH@lFoc;LeVA4^U!&E{K}vrEH0Z#ekHBYY
zmke553#x#gJ(pJTI#$8;B!I#5?PYY@S=?6o6fMw=Odq`gbwI@@eT1`)M*Rqs_dX0o
ziebjufGG!;1gRgkQmAti^-G_++ky3yQkdEK=8^Wh1_{m~Xjfsr$8@r~a1e{{0I<^v
zcouVT9zZ~V`%(Obod=evb2TzYJ)K&>WEm8ht@*UlIEH1-5Lz2-Lw99EYZO0Ma6kg^
z?p2sy2C^;QqdHk&PwrX{Wql2vs8y=L(r*<>(Qsz4ppA+EuDC-pTDq=clC<y+y~eN2
zR~IQOv5<Jb!iApQ`Ks2b&R1HFZr3u?Ja6Q)mNy|xFR}QTf*kie9!m_vW{(AuNf;pt
zZDLzWuq=~yw9%JbgKmXNpLBDAJqa+IA41Xb1jPKQsYD-&E#VA$k-;BBfe3Kfub?Q&
zHz%PY$@z~T?OBYOB&kbOFx$FA<rM2QbCR&-JLFypZ!k(`VNAiko`X)`P#tv`;jQ-*
zNi#4%LYXMGQ2~l7latV8hoYb^lF%6tBcSc1_$*K&>E@I)mBJN_zWY2Qv1}3&YSjID
z`T~3jdiqXdYX^hxXOw8VQ?D)mbDv7%ztAE9aLeXAAjmYoCLb^s2@QOJr96elamESG
zu5gh&jG}TPpr|ZTaJ~R#>kM7OCb|~`cRQ~5#T%Ku#?~|p!k*p}aE##$9XCydBDU}N
zQgh^Z3`@aKO3hJ%^Y744GHA}SP2LyL9GHF|ngi53*3P(3u?U6***W{ua5Q7?V`J=*
zr03%+IG}qlbf7Fd3eMTM#zV0~6jQk3<Hdvv&#63h&it6C;FKwrEFDf^@?jPw#>?|q
ze>XyZ-d9xn8x1YEk%kO6V<q(cY>R&VI3PbpB6KbPCN_(4p+Jnn0%GUnQgc#ScU%Ap
zs8W@i&V8lHJn`G)R3ZU}4)TBm+mHn5ow%^^?u7~s23A1D1Qdb7p6(M6XrwO&XcYYC
zo1-S;@`4<SP>7FG-;etq_I$BYO>R1|vTbq`;hZHmeY#SIb1k`P(@J#-b(CL^_(gaX
zM~FWuxX_*OP7Nd!k^MA}Wjkh>hhWAdu$LJj6>m`EUl==8d8v9OjYXib{HW#rd16+o
zs~`hbwzVJ$b|yf3=wT1!zpPN_#7;H-Z5mftw5~XB#Y)jMWLZ2$zi%H(F>uqEf}G;q
zHKyr>g~ho;O|!G}@6R=5ing4*!r`Vc)0q5x6ICk#|E46jcy(^h@US-ijTT>Dm7Q0h
zM%eH8FA?@zxRts2MO;yF;i@8$d%%{LD{>_u0t-Y=EY8l!6Y~lSvh%ssxy2>$i<?+9
zs)TD7*=PS|7q2rd8C+r-HVm7**|fZ{*t7~p16mJ$uFA%y0BG*6Fs@`-abA%))I_2D
z!8uHU;%8oVaY0_eicutnaYseo^1R$@E`|>-(GnE!x*&U1E*A#PPc9I1i;HbApgASk
zD{`@U(}Ec@XHTAyu@G(T1rsJu8^5qE{=!VAVH1Y}__~3cw0|~GCfTOtK%%AD%kE#3
zU7UkU^+hnMr2y&j+-%VXl)*NNunmUL)Xdp;PtKUAZ$r70U1G{cdjJ5<UMA+P&NZ<b
z*gy$1fNc;p@CPFZG*DtIDk3i;Cog+NL1BrQw+zUk41@K-uy1PnFDI|0C_j6hNyKd|
zSRza<u8=mkxS|ZzXF3Xog=LD_#iA(}P-I%Vj@)ga7;b_=teWzcn^wSZ3Wl3ZccUk+
zv{bm<R07@RtN9IKP>#wk&0UdKP-0q>C$0qiE*W(5a4mn_QZ4<U<~TUMzu;N01WdKM
zESQnDaJjb>orbP&cUn)*#Vqa+X>VDiJU(1bvX_o(DHfdZUC?3P%&bAuyYlpKc|6({
z;Y)V1WT)AwX)L&g_LK+g#U{V_kfW(c%!NN2rh=lbbu>-1MJOu{Xsoi_@S@Z-)mw-|
zNOa;nB!?2Fu!d*(GG1}398D#*Ix>f)RMg(y6(rGiVze@P3C7yzl2Ci8nhCX~m>-k)
zB3?BLu6}rx`r@1DNrlI+H@d@jNz=o{p>kh)@-W+A$9g0GKD8ZzZI?xW-<yb~y%@uE
zj1M>Xr;|<fG=Qx-;r9WYp5Kp?rTaaqt;sXUMn`Bp=dvkk;VSRM?Dv^&s!0C&V^f1{
z`WC#mUb*+;mOux2Q|b8yOt(6>Ofg|yzk)joR8YX(#*$VBAyGTqlfbtvWY4kl-$FT!
zzX2!Zufuia5|(A=dI&B&H~oxijMVztukr3+Tx&@@b&x{h-Q~CnhE6ALGm;aZt7dxT
z^J9KLx(euFczSkXD|%~3cs40H9Ba)0lbfYA&|;g&0+N@KmBl~FhIqZwA~^5HtPuFI
z^)~u(Rd7DRTps2HwO#6ZCN;3sm6y#2@I98y!gM*pNwH$oB69R8GA*F@aLnBh6L<YN
zRWbLdT}5|3#@azQ-}5`@b?Nw}<5C2U)msOc*@Z;+9aMXAu%2dAw(F=`>*&^wI~u||
z!aNGkX6VQp?N^&WssK*v4?xCNVt4ISTOr?GNecN6CbDlAtM(meT*37jSye<FP@K1Y
z)z*u&RBuO$YEE4Znxo+gHa#~iVe0ptRK2-2ZAk#R6=PWH(rN04;G}ZEC}dC|HPBNM
zZwicC?;$0ywh$oivl>Q^{UIVd7we$YjDyz~2cY{kEr|4SI>>N!nn5-p0GZy>e`>J?
znWy<nv_=VE{ts|cBIxfz0Vx%+=iPl(g?ty7kt}LxFO9*)s0&wFtoCCKh0Ib0JQ^0O
z+lDCLAFjL)T*dVnC=NY;hZHDU+{Ue*fyEYIMS12_CSbbK#ZJNl2~O$=2U5y56C7Lw
zwPLTfrG8t)GU|J7h0^l+aJi3t6ekW=k@?rV7^y$K5{Nc_`vtJ-hgVqtKcwmn%$uMQ
zUoA@iPn3!qm2#vnuz7@R#s$g>8;lBsCwFkw6|%R1voY%xo4SI~&t=^bDE+8K@ncGo
z$03Dw#4C+DUb$x><CW$hUU?YqdRv+qSoR8%uOzR#P5^#f+)O<hMpG4**<kQ|vyk{A
z0q%P4UPQky;O_~GG`{mLzDzaEpeB4jQh3?>X_Mx!&9zixM*a3B00l({wqv&Bc1+PE
zf6Vg%IkHmS+SQ=Nu-{*##GMq5#wt>D5nHr`n&Tw8B~*o^3>6J2$+q6^DZE7MTlP`O
zws$3xvA%yeu7pU($~x^rqEWw-M1%Dgv9m;JqVvHFXl)gqtaYdNZ(Xpcmh~e?V4uEy
z8MakRbBod5Kx3mgyI@5w*DlRnAK#oakCZWUqJ|*=!S-O~Ge;@00|V(vys)RQ;GAY)
zI?kvBStc*!l@jRC+pS?@*D-8Zz8YMI4eC$WV{uQo<Y<~BY;pr5_|2{zlVCzOa#PZ{
zq0|0%K5g56Y%1JSe$ScOzAUGFW4XET`@N}u>_he2^gZ`~X-t1~+;TJ$FyJpMlyQt=
zv>eC>({lX+m6og`P_z?J)K*Uwx(dz{N+1&j1B$q53(%mk-7BYJamQV}n5wl0(qI^S
z+}F`MOdRBBT_nbi59Tk{nZ@rt4WfFw?}8ltZ)CWB!h41i$0?(4FG`Fstsu5dE8EcZ
z1M_(<6`t6oTCp!~Jlr7m#D2PBKL-A5-}d;geSh)HzZcBnq#JDCW8L!;^*N3s?p1tK
zH5{bteX8S>ffn*B=~fclS_}Yr&(IQ!I9&R4jC*_t!=|vu##0V74*AfXF!cSdbGuj|
zwLgV}u<uY{bl<~J_t!Cw*5RzC*iELmj~7>L*!=UJXFYtPj$A+H_4le{-Y<5eF$*qS
z67q=m&?p5LMtr<Iv?u^HG{oNtMFe!3%sQpS7BP$@nYR>&NP_2lGE7$5nk6`K?Z|ab
zC{KqgD?uo1PC`}a@g7#znBr0P{v(+}fR=-f))B(C8VUpUjnyNd9OF*1f+nlARd5M#
zLt#U2C_YRi4EF{M&qoYbT?uT>Q5+@k`IaxwTgwg7JHe_v8=Rj*+2hM>#l`3$YF@Ce
zY@2q}=8M6BD~1y2)vNb%a>1i4AUBUo5BxT%Z9%j57Zk23(2~@zy*)sm(q3X)3T=rt
zYF7!l7C`f+75Rlrvo)8ngv(}u*WmS9OCt(svR8BhIT_|;55d{DpKcO^rrb5}ngY2R
zW<BP?9+5~TxyLnvgC}f~B1jaE%t*tox6DlA-d^y(^J!GterMMIULX3;r~jp&(RZ`&
zAC{(Zzhz#(>{GMTxRF`y?_=G=%l;TVO*7cHFei=M1D}eyX<XG@*2YWo(zsUmJakVQ
zH{jkh?rHcW&QIggl)BXmSpToUCvs65w>>+J+q5*$e{F6WH)%QihL5E|*uy)T3IwNv
zyzYVxqN6EaTsGF#ApRg78l&1BVYb`V6(;e9N-|);3aq;^u$dr4Y7WFrJ^!9dDILIV
z?@iPD8Q*6rAxd9^<8y<`ZjS?Z*)}^q5AmIId~Wo4<Y5>(uvELw*}~LrZJl;0HXtQ^
zkrBjSK%<f+;GxmunbU{1Lp-nTL1DHQ*m%hBD~6HLD&36{(A6l=maxrPTVFy8)YG4t
zXl!Y#e7Ii~NGg&r)={t_9}%<w5#$Bot{`YV7bdtOD38cCT#mbN5bd~FHePi_NpKx!
zi6Nf>N<3%aiMO8Q8kU}bI^kb%&sU>E%(IA?Z2`m}LJvc2@82-)J(ibh-D$kCiDYeS
znC-lGbt`URRN!5T@Ma>sV;Q_Uf(ouhBykA^2gQDjsLV|{jQ}15U)_5%<`JM=_kFJe
z>um(<A3A!GaS49Q)&+>VJw4?JQu9_GJOn;i>Ws}~2_9K+-!s0C9_7vtTH2yqaE{=K
zFNLi!^dnzz;(9y|=j-6%Y-OvEEG00FZH+Z}PRDC3DwHq4eb+~T3p^>a;m?W#ftAei
zyYSgx>*&r^x-*Oxbvrw&#oj4{#GV+{kRuY5)aB&6F}aAAW6z0^xbdc&9E}grzsgiy
zBDgQ!C0)R|GU~Lhvj*tFvn!M1Xm|4-{DT1&bUdZFm^X0`U=+rqc0jpGY}NN6Ht9jq
zzk&W_fEeCip-Gg^E)V_yn*4|~`4-Dx#qt}_u^dLa)5d}FoQulyL1H|jTBEQPld%FA
z->67}Vq~0bCJ8=PF`W#U9*j_-r%F+&_!@~^aZK%Lx`OP2o^lc=IL|PzPnRBp?yTaA
zhs{L(zk(oZe6<bvYKxAqq6xwVVQZ)kLNN=Wn~u<>Ft!eYZWF@5!d|wR%AKU%Y@I46
z?ZiYiG-WcOfTh~^tFGhsvE#RO9S6m+{5^F%)aT0osaR+PpR>}=a<p$}5=o9LQsoi|
ziMwd8mr#DXnHI<t8!m4ExR}$SH_z5tAWZA7BNl?eov8<RWpn!}^(?1MDJJvt#bN~e
z+csd|r2~6PM_`xVr0a*%V7F9{VOs~-<_5ueTHX5QJr2J-H8Wu}Z|FwjAnsk~XWZ+>
z5q+-)B?ae;i0@$}LC*=U$Kh%>Mr*uVFXMgUk8{FSL><bVM3OtBLGJA4g5}Pk3?_HJ
z$Eo*e&mg%o4%)IO4H6$}L)hg=*m;Pd2_4AK0ayzre(yk3<M*Lt=XwVKI3zEI(UYNO
zPdDh^_b=V3-#{9^8Z6p-Plx&Xc@xZUIpW_eweCGbCeGKf6#iCc!|#`Mvmq6M9F9Q7
zb-;bjRhXes(iAk$j$z*odj3J`S{hu9Vvf@4JZyCmwt8nrJbgnGMyrTBPKrkN1i$y}
zCBW+s8K{~m#^BTSw+H*%6+BjSbG)Z7p%|iYLIzJO3xLE5LC04+F14F(`3TxV1Z`Re
zR4K;_!|W{I+bmOHg>;*Bui(7r5{pAo{0#gL?`VX21VHWkQU~v=2=Dd)yuzM-?kPMQ
zn4|!Myg8Dz9RW2Hff|EAC3FDP#YX5ChUNlMk2^6@Gg(11dG|C15oSMj{E=R<DrY><
zgj>^HhnTI6?GSSx!mt2gn8INAOod@Iz);A#qHJjvdSW|>MYfZ4idonAZBQ1R?f78m
z{*KW7RR<lDRY*3FSH~lC4gB&uQh|b6)R*VvS)KYjicghXCR}z<tOw`Iq`P;rh3fKu
z!92IdF{knn{FbOa>(?xBw!vV%SbBdO49rCgOhgRa(Gl0?W50=ZwgVu7_UoaLwZd)B
zEz^Jr&gw?{5svo#V5YqVC+P6sS#WVQUJUx3(hP!8@maVI6+iygj!<_Ym<b5xzz&$D
z?l;7`(R+7m5cdCt@Ei_?Clf(<;Wt{|*D*?lzW?Rz`%lCEE!e-QBQ%XC07AS+T7m%h
z5&`&JZ>6i!H}`j)JerP;&%(yXvBp1F8@~(M_FhD2PBEi1MouKb-e~LYxbnb9EhLQJ
zff65~Mdbb4MJ)p0dlx$Q{R2fjdA8`&I>)v#Eq|J%Y%{r}nfA7DIht%i(qEo0!-M4h
z)QjzvqeoSaM>xgae4kV8%^*8t;KhKTVLOh=QUlA!*26w}kQh!g3oO6LN$6)2w;vRv
zP-6(1sVF$jIFE_vk$Pw_CNqPv>v7$Tsr;3t06h)g$#lev;FRIFveHT`77zWZ8jIDQ
ztjWOF`F=h9yjH(rD30*!4Mj!O#0&4G&*4zlV4#Swu|jY@j61L3$JPb(BTSqDe}v7W
z@yY`n+*R0gTPRz=Ya4^QJb~x7ABIs=AWCo^58*f#igJF2+r$S~!?W{_&x|k^`!XtE
z2+<`+>{P7DCqr=KC>?}wsSXD!Jh^wuplEE<o)jjA+1K)*;FgA{8w*{HfJj^xJ0==0
z)u90{MtN?FR~8|i=P@@OpId9iTfwO5jaa^M1I7%g7Qp=EcpeOJyiyX#cWApdI@++Y
zN!hSb0N<z&Mp06%l88w`(@Gqqqn_W6-)s<~(QzS?t{p#yh}|3)LL8?q(zP1?>fvYx
zMm&l92S2v%qaVPCufUJ-pw+%&5kSgp1k+-PQ8(MyUT8Pl3vfa%#tE6xfm*odLcq+s
zS&leN114w~E-{l$G5Eep1f~kVfahaP0AZ>3;9o)6P#KU7j(P(PN#(r>$a2j^ED_&P
zAL9GgQE&A9o1y1atM}i^=5p*FoH)Gg*^v+!m-JVRSN{3D8n4v$e~ti<HtP0Uj2o`*
zhggO#PH?8Z;If}@a|~GFdENdGY;4i_Kx2)=v@IYA+%lB53FjH4>+#~o^U8iNPycaB
zVAZT_<e(5TrTkg^NToAlacL39=e_bX+izroqTXXa<N0BjL=oj^Dz^RP_)MFyr@?e{
zd=}z^Ny7Wi1QY%K&N`wyen!1U+IASpxm7>tWJm7LNMV*api{Qi%c|f(Wt-O+P__%v
zT4Bv8oFq}R3O~(3ewu*%l-L138Gk0-&NiZwjy)U$X#MgW;Il5hjgGBGw%Og0O6hrC
zr&7jXkGElugF0~dp2DV2-UXc$oeNS}UtsH>1zK0woMou1p#pMMC?FlvNr%#ebLb>G
zuB>1g3XUril~W}^W{lXa!)_NWcn+S6v5qTq#XkN{*bAM{0c3K5gYP|lR-Kdo#4fdA
zCE>8*aab|!h9$n|-E%f*G+*Orz6y+n!d@ifk=>p`cHDMWogc!UTO2J92+l7sXwouE
zaDL3=%^teI%wOAR&sWDV?OA&s_p5daQj|BG)wv(8929J6hZrssc{;o3Ak(mH@v*H=
z4Oe40zpFa+Yp%l>&c{AQg|*kBhtjU6MZY1gLkJvK=IwfGdYXD#4xV=iB24VG>z(N=
zBLG$Ge5PV29MutWv`<y7;s`nJlrl?h^5N8~P!fw`;0p2T65t2fE8b2%WE9G{@LoQn
zYT!zp-i=oEZnq%4tHjzU@#*;xov7P`qHgCIHSqS8qM$8m>}DRu{-*F`BPm_nK;ayk
z%Z)<$^I!;A6WLsT?APlBPJ->e>1TqbF8C|s`vZ&hgWH~NV>Hk1$Lj@7g6v?_cN%`b
z@wU@Uvk{*02+zn4=Jg(|K!S(KHWGaRRAgTO%H<8395o2X2YNV^2=YN&m^!335oEHM
z`D1M&$i4I+->QKuRQ0f*vDll9t4lQKa}fGD2z`1-*z|jVJhU~Hg-#CBA+{uK>iJ{V
z!(}1TVe&=8&EB$KgGTcvj^^({rhYr~>e*B@uXZZtmDrQ5Y<6Sddgr0~+LXeUG<}*r
zN>ua_U~zs5=_6>sd%s6rzKl`h4QGA`&JU*Z2HE_#Md9HFm?zC9JDd2GEP;U>?ZX?w
zUDajAWEyOlS@re<IU3(2YYVE%`5l&O-^GkV)2eK7*-F|^Kopp#9#E0MsD)cUT#eFk
zaWtCT^$1-FZ)r$oy#ANH?)m;f7hNZ9@z#%RUt_n<mHKtu70JrtgYI}FxP9Su=9C}s
zbp9Kin8l=?cwCOCU`r2HEhVy)QqqG@%u*ca$q$goJRw%HN#I?>M04<=So7#mEKE9y
zf7|cZjv<kbO<LPcs`cKb?6+c5|L5Sd6(%SvmVskFwf$j$rx?wK7M#@~ZT6?3=nptf
zTS9_G|Le)RmB8enmB2bJ3rItw3CvteL!0vm_r(B4Yq9EQYPyaaSp-_IsPX%^D|SQ;
zba0eXf)3#>&{A_0o@6d;#Vya37sy$F>kZnj<jPaZPV0ww(!CpN@c=!FpGa1wuf8Nh
z{X$@S^{H!E>$s&9t@ZVc9#Mh30tEZcV0=~ez%3ZmeR))<OiL_&sN~MnRz>X(@nN=q
zX~pUcq0rnZ#gq7ZKja2>%}!EtXZi;n@*dTl()6Ag1X-~^U~fKhqiVe3UJ1{EBsHP_
z)1<Zu^*d9wg!<|vmIw88o_>x!nAGlAA@J;5+nB#2b>Iv^aBkGu(wA-s?0{x?2T($w
zU9VFy*Z&G#dcSMY+;k5vq~ZP@O9LI_(1P;`S4MD&%lj2A@8Hid2)74X@_o<H-bVb`
zS^@zg!F8JIk*>GGAihv6Y+4P;g^P}kgv+D^>g;99Fax|ew?r(?qg8n|Uz&-QN+sW!
z4kSTq=6L5ovaWjG+u=P5|7zRE1ZU?Ew$-RIvSti7Huy*ku7>%4g`Kan&-vp!826r|
z!vU}rWiLI}4(z25aBsKHjC+6WM7pa+*)3QXI*1%(aERGafvWu%>1c*oR)>yy^I*aG
z7@ci^{kUg@SdVCD861_P3El>l^t<sgfmyG@8>PbAUx7C@0Pipj-oXC)0K8`v-}orb
zY^FkE7;egA1-da&+RKgvw2#UXoNhdn7zW|OJwcTT7!sa`2NdK45PLz{K%RcR4dlg%
z4A5;P7O))!u~@r<LkhBXgxIE#o+hNq!>}9$(!hLwG7*)e2^%36t~2yO>!qr5${?|O
z%3v`}I;`dd(aGSx^Xg<eY0_^W|HnXeCOzi)Wu&^r=hDcw+sAe5V`oO{X0?`q`O?Tg
zwtalLKQOOnn)KVpZy0#}?c+B<4_?e?6{^Pa;Bz9u3?mr9loK51=!C{aXwDm}6B>I5
zs4}b-re+&{VqJW6J9P0zOTg3@oS{s-d*10q-%_0?EK%7d$e1=f37B?ed#1&MVXn!v
z%Wr4AP>$Py8Pi5<OxwXJQ!@a8sdygDB<wj&yiwDyLRI$(j6ur72Z9+AGjTeYqcP)c
zKeCe--4l54e{^X1H(<f{u8Nb_<5Zl#&>=!zzYP$Q=?{ptfU<oX9}R()t#yL)6Hc4N
zg}n9406r=S9Wc_(+;t8lnRgpQ$GVn3hK><{j<}BJu5{jeUUzcFxj0-wQHT3ctUBC1
z9Uge`kp;$7g46thayV`z2F_@|h^vv}dcYD_Nj<Z<M?;RaT90Z6>HjOh0`KCOuTVe7
zbS%J5gj{A}V|HD|n8r2bmNw>lV9XN;F4niZi0A78_l~I6;PYtp{6HlqQZ2s5`dRU}
z-XC?yISlCVy{01PfBLJ)d9n@A>@ddvNdV+bQF!5>`Jm$Th<4(lRm)}F`5$U?-v3;_
z#<@W=vBiqZti>bP;`1$S&jVDBPe8NY#Ex-mlYgoBP)zvB>anGQ6VS_{O>}rH6Xg%K
zhS`3V>f*m~w1%)$4Ms5c$N_qP0^z!+lATWr_9#agi1#T+nuGMq+79HJXtZ9meaTJ&
zVlEnb*}6mhQOX6GGjnue?_Ts>({<L>kbasuQ_=C^aGevB!{Ihv^FYP{z#rj???T%i
z)$Yzf>%Je=zCP@$_Vu|vdAQ)Z1U1lJ6kO+8Df^(t^QYbV4e4(J<M*m$n+!Vp!)#r^
z8F;@{-J*czQr{7HcsKa>+o~t|9&U3!;~Yza<(%R!O*4<wR^aze8FU0IhyM@T@Jjnq
z0l!xP_sMvADM%H3y9k{J{XEg*fD-e04+%XtvND0h?=99E-AgQ@P0HNccRicor(taF
zMbN2E*p70R3)T+cHefT%S59>r6GB}hO<-=-P#DL^dvPG4!$AR-Map;ZEy;8Q+gLIe
z5XL!pG}-EaxnNT4urIG?E{s!P%;T>M<3J6@TNJyd2hZv9FXcomzx+UYA{T~RgGZvL
zNMR803>OCii)$5g2I(LV!#Y#gbVGY!;tO;f;sBL7)ipgML7GR|YqpGQs{cX2nnD}@
zRy+R<EL`7d?fkbsDtCUuj3eDLQU@kdZ)n(HX&`fkT5E;+zMs|B4)<1D`${eFH5BlA
zbXLJ@Y8h`erXioZl~)eLy?ATet>t6`v90Al-rA83^dj8#Hnv_v+AYPWx9C*yEQIBC
z-H}Nq4VDV+7}^fiavy9e8I@%t%r9kjv1krZ*Zlym1s*Kb6nbEBstoHT!MPVOYW-Yr
z?$#tAa@n60(YO8u$_J44drDql2Odz00%gg_sU&LswgrqfWLQek*PspAe1SJwsDsYx
zt+)AxM{72p8gIylsf7mskRjUoH^1Na7;&0NK}l+(=@#0N6OYr-Jp<z`Bvh2gifox?
zHhMm%g0`uD<P_UXorHcbS`VZYr2#25l70Yrl+1DzKvJz=fV5A4p7vQuwVqMWD<?(t
zlA7o|csvqexYR?wo#lK;8F=n>jP)xy!yv^=(+$$?(jtT4ex_Aj_J}{~PEY2&n_Ah{
zE$?%zLG0#h{9WtY&SLB-P@<i@Ri!Q4xs%1j>R+EDEgQo^3ltDPAhFOD1FAm$u=NDk
z6dQV2KVBap9g9C6f0#DE6!9S0w>-@$=Mzc4X1E&4mTUH>BGmxQo<?;&6BQ}Fui(#=
zgzuE`dZBWLs<L4exuyflZYB^UMY<^8mEk(MVLQ`;h%FWNiP+OIY`0N5AZ#u0(_)Bv
z+==eIv){m8_f-YXU)|SJCF1j{6jLL@>=jtshp<!b1s5F$aV?Ia<Js7G0Oe|5<-PRB
zv(49f`a(N@1YZMS#sds!#S(__gD$RK{<4NgEQVaIgo07Kc*2IT9c}1C13Ar>gYH8A
zxMa2@_LrK;gFZ%{-1n>>SM$I>+5M~<ESG9s4Yo4rc06g17zX%^1$vq3BGJNK{O9~v
z%znY6I;dIgT*F}N0?%>^ZzI1q_taY#i+X6*g*u-x*^C70ZLh#AD@5Fqr076o5N=r#
z_ET#6Lwo9R+Zq|Llsf+RU(V;Mn4=5b=9`mnwR0FxN5YIuV~Z?hA(m?7-r_$oQFy?!
z@=v%_A=O%&loN~SXno~<Ab|Uo+fY5ePqEpD=#@(O=5TpxB-;=-LEV`C%iCJ9wX*|3
zq`wg_(n8Kb!S$TprgfqHDfhBAi{Ry8&tN{$!AW$np)c^4wOKkUxG-njauBs^J)@3#
zniAVb+Q8rrY`iro8*dZJ(m2nnW_4>Q9<v0Lx@R!Yd7juAfc9mbJ<-dzxqaOHWoUwm
zde{1c<MV)TdQk^X0?&DD0GQFz(9RAtJVS@Mnxy^MUgOi?S9+ua;-r9lc`KC37jd!N
zTZ)vsOP!@L2B}DSSx^S!SXX0!oy@>aJ>|QkZ>;BSyHH95pQG0x0B4<2IqRkFDrX)1
zBXj>PjsFEbe=h1CnEQ<XGx6<$5r~0}m+`MSv(5YN*t{QOko!t8at~=NtxvstBi}B&
zBiNkUc!geXa@@AZvCp_b8&peoy*c%Hgl0}<6pAbHga?=qMK&=IpV$1WM1rvu|FU<&
z=sYeRJr#O@D!L&=mG`nD@42UF<JV$ou_EgOzEKCm7!1ScCEvliw7sGo*P;&PHXO=-
z)S(!(q11OQzfa$wxJb@FLcb@}UOtJ?)9;1wpuCDn8}&?HGl=JW)#`4kRoTV&o0hH2
z#*;sCi?#C&XgvzoGnDsVML9<CViq<QqDYGqH=toJ-nzR8eR=1zrL7c;?(FY0vz|`x
zi3ItxnQp6Z#=`24J*<zuBo0zfr8%jr!uDX>&q93L+rN)X2Tvrzlc9%a3WKMQ9QFKJ
z_HqHttBHhuy>C3peDmPG-#778N?Ryv3&ESZ_nUyMr8g&JnN0_j_r2X~!VaYxF;>nD
zm&X~US>bYKq%_7T)l0J?wU<CGHL2%{tu$RH9J#>M(IP{%34K!ETNNsQSI(@Vv6<L9
z&OPe;J1WzZ098L_9jA`g6EH$kGmv>;Ysm$n`t0mqtdh}QN2cLBJA|rGsacM|BJK@*
zN|xBW+z{rh7DLDRzNfwvE*}R4ec!8GIH4PGq4uthm@t(K|KZ6Rs4lfE9eACkmN^4E
z!h$gHI$}gUX!d@c;kzeHKN~8#29{YET437$c>wqxKeVb+-f_0nJ)2&5YLvH`?HsLv
zt+kv)Lr2{~@2RBCPqFk`vTwNy<ki@53#W7pNV3+kueA097Ns6Oy#4#cXe|Vf@kjE}
z=@wCVY_*tUPaY)pxGH_~(Rr8lS`a#(o`OjMG#CdcZx@RsTfCGpn3AsrrAH_;p<)1S
z_T8hGcO=4mwVJ%t@w4lsXwlA)QTOZ{gU)r>6US4_H^r&v$EbV!Bd9zIuI9MF9{+*?
zfj$09;kR$^Tsa!Wg5_W)zD)|KcvQSeIvmi)){$<MBVwO1(ox*(?6};w%(oq@ELc=a
z?~`+n^Gi`wNZRW=&TP?0k_NaOcG9|6>}+4_&r*m%@&-W1K(oQ~nV)3S4~#nB=->ZP
zocjmgKGpsxe*LnTvg!0Xn^4>F13QfXsUTE^*)N*}=L}q;xf~`q2Vm@DzOwNQ*9lVK
zW>>hGR4+Jr$}~ET^jMCQz1C6<B%E8hq(;d=i{8OZGUq`?C=)X8-~032cBM#>wpt#6
zcP!Q0cT_8DGB5`#XOg!WK@Do}Xh}DR-%625IuC^Eu|#`(oi5V<Z?n<jqx--yul2`7
zPu99`w`XC0b^0&s-*)=%-K0(bx&7JnuiK>O`^f%yLJMl<e?cF#gkPbIR&Xs)BDr4#
zRr$If&tB?C0e^Vp+c~>9J1-wi2Rt&XBtN@krD;f3JU6FcX?A{g!Lr;O(^AlZ_veZw
zrXdR!#<zW6Ewe6&Ai%*U>4Z?#P1tkUdPy7uBokBG_^-mA8b{Lt!TBR)W@QOZWIYg;
zy@Wl@<wu}aCwp_4?YP1N|EaXYaN@sm&)}^!W%p?>Wr{9R!hnnU4#R*f1b?b86Yy`5
zfVT&h9f~?$c31-vGgs-EHY*g>D4UKzE{68QR0X_KXAsz{FBltQu6JcoK=M=YFcojK
z&A~t`_qE4Br+=x+<KdlP+PPJEjGBwZhj5L;Gp4V4JmJK?e48`Ki8pHLIL}dib-bay
z+~>ELKcE_>=v2dYgI+b*LIbLyO@xXT3vAFlo!B^zGNzK1W%-9Cy&+Kqg?!J@cGPH2
zF(zG+O3^<_cpOnhCP*oxP!(x!Uc|!j_U47+gTMn~48<et%?reVX;fgL*b{l<oaG??
z?kpWzARVriYJ82to_l!5mEmHT<H{(IkXKS|pE;^SSRG-{A<?X}SKu1lTwu(-N~5-O
z*5fv>?>YE;+V@n2a+hrj^Ht)dH_sn<X|yIEOY&9}WUFuJv;G$u8Ib3Wvnik^(>0ws
z+l<HLnB4F|X#toqD{5;9u?)=F_y0^@Rb)-mK*zq6uJyHRxZIrKD{V#Gq#4u4cc4$p
zJJzRW_f$W{^iWG4{f_h{-0vEVbS907`9Ir~h~ZD1l4}XI=#-(OwS(dq|JY&yJW~jZ
zSLk)FcYvEhS>ed>>XE`D*^y*-z?Ub3@|@OH!*wcNf7XXg#RKB&D8(w^$~~!zRJ-vq
zF3WECle@@B>jS;1RGnB(*{x9W4y<=l47Zow?jIp`sx#a^V%!14?TD7;2hwjh*ox}X
zZ|{uTzYnnk<n+MMF;dWM&8y(g-saaf?|PPO#<!U%iBNj`T0tn;*RBS!zlzAC-!bH=
zdX@I$5CRX1lzoPCNyVRX#F~mm&)(jF^XGn)f!RJ|?<)oUw@pvL29<!pG@&@k(Y=GB
z{~Jvnm9I~e-8+J5GKyilmeJ&DG^0tKMw7Khlw)F)Lb-BMk5KY|pror2X)>D8Wb}v$
z2MnVXnv5ozjP9&#k#0wmEcSZvVLRH;<da8Ln&|v5LUDt5lZs-T9lLcno~+`<voJb<
z<c-jj?~^wA2=Glc=}jWg+)B}yrr5PwE9zwR2+xqAI}Bu2<Z?$-$m($6y?Tc?M53sT
z@LsyPC)_sT?Zb{UMkyMPcgLC4!TYsJY)cj4@!wAW?C8uSy}hbvERjOdCgc<#Jz5}<
z2fC0f=}dwT-~9>J(osZ3XG%6@5v^hByFo)zl=s~v9TS|-uqx%p?Ou!hvT^k-Aa$>-
z5PQ-qpX>E@_JPirq3C{)mr_}}c)O%gkYTyKK!bH*8KaRbW3(&FzdY!B%`>kj%NP9>
ztK9gQUWW{YM{F1@{)ETDjAdzX-YldzCPaZ1MDOaQ)Wz%Dqd~`<z<gyVmqe-WqY<jQ
zmQOSH!h^5sm7LlK`N4uSG!)U1W$u;Km&L!e_cKM$#QB)fCbg;J5VKGIalK{Av>wbp
zG1C&@%s|Ev*6t`3Yl5;&IV?m=Z35E^OP5Ctpc@mUnlw<7v?R_p9@(40$Xbb}an)=9
zKT>c_#)V6QdM@TUtqLq$64VC;XaDOhTs|5}@r2^*K-^C?G)94;F(fEnAtfS}7|?Zw
z0bocS!YP7p=~XR}zE@$MYHwxnH7iT)zPCLiBGuKq#K^W5(cO=07SWJMW)aPOTyGJ5
zC-91c-+^zhT13S<i|DZ{0EcD~Ez{uW_^m9<pn322l)hwS^e+$Z&1jMahA^XuYfWol
z`?BE5YrnqZx$-P6hVK=7xT?$IbjSaRk%1-oN1!v`7vSGC2<0DPY9-Bwez?}Zj)!i#
zw5l2U8QvbcCCNQ72=89qlBD~xGAi#&?AYoK*CRJ|hq2vFr&8b)xR)1P=kX{89Ho1H
zh-wE5uG<ykF6VK*)3Fk(_iCklNiES@cYctvzDb*GY3)i~4RX)&vk2w7tj2WnNM20B
zukLK6LOUuuRn5>Q>mc7u`sufyv>g`fj`)rsQ0Jc1v%}E{^+1?+SPK&+mXiUM=WSF@
z4HV5uAn-=wA?tMLrgC)b3O?w-mLldwo7pyn@syDsUuXTj>}BPod3s$NZG)^}nI;y$
zdNj<3`VCVzBTZPqIL>~Ea#s-jJId626+9us<rfzQ4r_SL9>`a@R$R0{zOmAeZyTGh
zWAC?`VC-I%5uBG)_HO7nMP*sUHQrw+xZZ7>-20M_<)%V6-Y0Y`^8K`)zL;FFJg)%H
z`PWNjQ+{D#ktSbpJ!j+Dq3B^hXHI83hlc7cWYuD({H_OenJt2|7{z3Tvh`{VrmsE}
z>1L!f$WC(_YmM^j`Abr%nXQ^AODky7oM#7;x^Bj-c17O5W}b%BKPNb!#`6K8cU-iy
zK1a9NzE$K!(2heZm*7T^^VS1p>$J3TJDVxal<U2f?B;GvF$|#ml_iI?+z_lCAn=Wv
zVmPKKhK~Y(YWY*jVeGDlLumEgNFeZlMM6V9d=E8y21Ad4fuXog#@FFarOCkbL%@#h
z-sm(>f}ur<cj7T3h%oXYJ|z6Pas4?<hc#tS!n<k8`JO26^3C{KN`{X3-`%q#+T_dF
z6}3}+J+;#-jNv4Vzb;M!`7*fI4sn9se?Z>2&Xx7VLBf0uQ%7dAX|E{9<VqvbjzNXP
z;XkMIRiR(P98h~GUkes(946d|=ArKNL>`7X#C6VQkQy!38}}i%s>^iRjt#Vk)Qv>1
zfPC>ii886>X}wIktE(FI->puc@ZVwjOw=WloxG?@?16Y0+f{up#0_2BCbW=BWC<<N
zU3Dr^6U?6*+o?ps^-$Z~w)=EKWBqRc&b>Ntrd`x#Gud+;>c)xCkM}OrNsZS+$u^K<
z-i5k^!(Q0aXq~G1?pJl-JzK8v!m<B!fqrW75G4=}_<D}uSy<9;7SbE%t~CoQyD*M>
zhLU)R<DxZ=3q)1oG>&^&;W)R>K2+bXI-eMen92=+@za#~Hyge&e*^kbM3Q_@3<+IC
zOcwK0-{WX=!%!R!@rTPVNVD*Km91<`0-+SMzcjRtG*j>~bptSyQ^n~=fI{zmI_hot
zMa5~l;`Vlk*G-+3H_o-&Rg@WtJ>jmRqh+{Hy18wBr@G&vh{Xq!<Y5&I_gmgUFdK!c
zIbfpRBsj;1Dz+*aw}@?b84pA&Y)7=8uSc}Aofy$BbJrCU0h{_!C9UoOGLh$Sj7H^A
z-KAEtZ^ldK;}0aKBK*SU44fNFG}C!$dl?pfJsCEGfD@M}fJJD41x#n{{c=M<Y4?^I
z<y4YG7r>q7+U8naxB$fcM$3O!cbNP3dpdVg!^+{E+ODmURA9>&euGXbXm5#m-*6p6
z`5JGE%SBMMjH){|P&!WazY}`!Uep;0H$io8na)V~{b!Xi@hzhrGUczK&<Gm0;~5Vn
z2wV2UY_K<5g{>c=t-5Eg!;_GlZcY)lzJ?olO3{!zV31=EFcL;t;!j`-ly0xbfdC;f
zBdP;J9w1}rkx<5EXxB)o*AxvAL4FXvj9YNO!azx2JqcA&$>V_8Cg8);pS+Sz0`>qb
z`VOfWIekv^gWBTs0E++9A%G5i(<{Cdn+7OPG06S&qbzAZ%Ga(p$`?b}DF4x-jWSgm
zWgrhUHfR?^8-Iok`mL+#dg8T*zU69Q=)Hmbf%Q_)S#{_ybU1Vmuk2#b$5(6xIfZ|4
z1xuGZfwkJa-w)+i3NzassdzXZ+U~K+hFW|*xicCdairqud0=$niRhKju=OPMZIE;-
zzOq<)MbvEkL@Xx#o&m<>mW^a_SKzX8)M|J5lVp@$hWXDsns~waJS~^-!j^pc5xQZb
zyv~pz-`gbjk$S;Tc~+A=P|7sO>x}ZOtMXz4-ZjeiUX|w=l-u_jq-1(*aCpO|c?M;D
z*TPE|;Rfd8J>j0tOFv8XHJ*VPe9e)|)=$gUxX1J0tRtK?HQs^l6yBY_igOQLUGqbT
zbmr2DBYt=I!~Ah~?5(PO+zSQ-Xfer|1__hmGBMj=h&<~zfM`||!ZPcsl!<|FlfxG-
z&H9aU;sAUo1v<@96K-wU@V<Ij5}0PhWO6iOUXDN{gJO@Jf$ML~Hv~Q$h7TonpaMI^
zu%iu1n1PZKBtYNr>7~}jGn&-$>?gemW+R&a_`Whg8zOJMfgv2xC=Zk$G)NB_yeJL<
zz4Ai_X`npD2!eN<!Mj~?g`NMSx9}GFIcDK`(P?ZSs;W@d(-INon7sA`S_IL^D3#l4
z@GP;?p^oYtl~dBT<IS4yLe#v&bFIuX#VrlQUDS{}<!*Yz65zb%rw}P>G|JE7Vsvsx
z8!=&v7?w4PuUj6OR`X+sXBHK>)4*DiBdDd=yT`f>f0F2y`u?S7pi{O`^+GGxCM?tP
zk`|VEtX0|K_*XqfrnagBoZ@KqvxeCwI;F>yyWO$>Bvwu7%ZpP2rEK5hyA#Pv#rLQX
zknH?Tnx>2sJjtzeOhmO5jja*BEY<Y>FIntslt;r~gSA!Mf#4UOg1ZCoS0|`ty&`>M
z^DrnFguaK_mG2SYnVklX+ns~+I~p7HEN#KJvD3tt>IBzBOgArUQ8D9exejL5v~+|S
z3*7b2^mq8atTqS}Z_(nBG+Esv?{<g3%aGQM7f0!k_Kk8_;z>n<#SxoC5XPbrV+vK(
z_QY<4q;80$T~Z^CWzv6CG<|bbLlZ;LQ=0r$-b&PAj2q-{;;00xCW7(X%-&d~g@^)2
zV6U*{XBXq~)|mRKKM8wTVSYY4`#O-Pr9D&cn3m;dmz0?3!0q-+-CB9P1AQ&eEzT`i
zhDUP~6uCL31*XDc(}G31J45hJ))3vDtcP?k6^MAqH%#fQhqBfOhJj&wd@mL^on|_W
zXJxjngmwn&<D*)hL~-u2TzY3y2MD-EZmg0YyNcd&g?-U_LHp`=x4T6f`<Lde(2Xgc
zMj6kw(dQsT!a-tSa*3gQ7GKkBEa%H-h19D2mx<XYm=_0y!f|a^DRggW%@tO({9eO?
zK)wy|vyAFAixw1c;VQ;GCDeLC?7`47WwP~zP!&?n+kThum4&60+Y$vF>U{t;{v6hT
znO<s_PmRR7r*yKOz_k!Dydt<hu8zclXBCCnC8!G6s}FVa3mXfE$uZ3w={OUTJlkws
z-P!)2KTAv!s(M<g?af2P8|}@dVwVN%{D_U;a^Sfx$77<MC&e=rET8DR#{DMSZ_bmp
z3%-W8dEY+$`foVB+Yev(DbiRB?g_k5HNbu)OW2lf4h4fXs_(%;HJ0keI+K`WzY;IT
zZv2jQ(jdmzN0*A>Fj$OD(wZad@SMHW9LkA5x(<rp*{!9%lkB<<uJ_ul@xBi<`M~HG
zIc4oYx8d=cQz4;1NJn*~`rK+1yE;5kQ`R+Fs#)8$ZQjQL_-4BvVr3q8a<R=NLe<Px
zp(-g&;;qeP{Vdg&(lNb;=Wx@Q<5V{-f8W}?CQpxV&~a6J#V$hCA)(4(0ScOHnnL;g
zZEk1LZMfo(;gcAAvR9}IPmt=z4sPP{EMS85=qahD{Ft%6d^~itdb_3C_sFhpaG#cq
zh+fQ|n+8KzQU@UN@Mj<=Tjg0s`GH8Og~!wkJfOkz#3d5g32JcG`33WPM|NW0Noo{>
zQ;sv@nA`0cb(z*+mk3pvvGzko`**G7nX%UMg7Y0hQ<sU3rdVOqV)k+o!#~ovia*1j
z&G0e4p2m%X&%^NfQTeQe`+sipdG8JO`T2GBQLpiX&nuMQ(L2+)IQXRQY~KdZZ;3r4
zmd8l?G$oQQRL$V)3>L(RiIdI)%&uu{X*3D=C>gKu6g8m=o==8}218toZ?;_0^_ZEr
zpKdkQmM_McoIscbrz1&Dk-c5jD%d3u7;0t2cCCjR-?kcS%BROtPPyeI<BB@N?fkfn
zS1>`j>>{GEaa?$st!v$cFwUFfC)a67<HT4niRpEcsnmF%iSF1h(T2kV#YWS&bZqS)
zU>WH+xEV<`r$Xavjy5;$<wXN_Fm>HG-aa_-$93_6bm#+&<&OOnyJtObhztUQm4H4z
zRr@&6dYwLOS|4w>Dt+Lqq5nar8!m>qF2H5YX(O1;QC902^dXA-!Mt2zpAyT%yd4Pk
zj~xmVYx?#f`je-bN7CAB0&WtQ4DuMnuCF0E!U-CC&1eOBv?Wgd#%V9T#n*@?)O;JF
zo~7p*{X6AvCb&+DVMlUkVI#@6gT1=*H`;FoptS)KcP;$V!ocAj=MCOF5$AhmK$$)@
z-x~Qtt_FAMs1RZ6C$vD+C8fJ)Oo<X)|AfnltgSxS&<QMsuW810(HC2YuM(`Kk6jg9
zkK@|YSX^>R@;-uRe;zP+%kbdslNI(4c7a{Ni``Xf{7g|o#IWzI?zurvu`^yP9;*K0
z?*ad<)I1Z=mxJ?Oo`=$FrCf-M`bhxMJ#NDQ7zXCFjugrt#0_*ug{{TpY54{BWb`<{
zx%4l2q7i*MS)h|{dWEo60DQj^Fr-m%eygktyb5=~ryVJ{|H7W}g6l~p>bBrQEIhjh
z1V#c;Of(M_+!RTV8UWhno>F|{@IBJ6@yFv2Nw`;Yws|&xf=xle`2#jBniI+_pwq_U
zT@ZpsdrCZK8_4EQ20iW>cx-^jLpbrLc<2eEjr`FddG2al+ZllsSEE{m0!2_QA6yAw
zv?b)|&NgRZ{D8np2MwKWo~<5kMmeNY*kag;wPE01DU!s|)XI)BI_qlOjgv+^?P@IR
zk}-~6MLuDw_ls6V9=pzok06R{b_N!e_PvXv5VlOB*&Q|(2PnAk-Le_%!3f<0(dRWW
zDU}7V-4kEqj_T(9xRvhQ19A(8cTcGm=`0<?Qt^ioD~eRZ@d1|V;Z_fK)DS$=jn1?8
zj-xKBeP@sY@U)ljQ`#Q~PS&o=c&-{7q2TI@rFm7qAd!E-(`Qc7NQA9FwJL|DdOw5P
z6`(Ok3R~Vp_7}|~<G=B~+!B;;G-ns;6I%45Ll>SipF%cqn(cVCnXcq_%vYK6S;>O1
zMmZiXvgl%JjQmg(m^iUx_%Ef`%(#<jH)h64+ssaE=|eLf)gWy)mvfW^OUmx$3)II>
ze6rWopsb&HJ8J+~{QpF~2MxZ%8rg+?jf+9|xZfZSW7ofN>@fM6U1*gIkF%9OG+)3t
zMvL_|dMs?c9UjtIgvqZF03{QEX8De415D5&n4ltHd+h<k8%kDB=>fyv{S;>W8~b_{
zzs5U^p?1@0sHL=0Tm$`Ip#E)E==*#8c}^S-{o_wMC2$-4gyDGD@I7oCXYuPfsw@W;
zfo9G!l+zGSKm$AQ$EV5fu)43AU!oy34y1u%RlZev$NVCr5H<Xc`8jswQRe^i1GNsU
z&nY;O0u9IvPXGzF!k-5KF%bv)-o_w9a$2*@v4V5{05UUTg{shTFm+?es#s(qt0L=N
zdQZ+!_&N-q|9p_fefJ^z{PfQ>?$Dkzu5fP}$L&kwO5pPWe460XZ9n_O!zc1U8h3wv
z8dn3KlZVo{KA)s<nV+U{FT&?AeENR|?ZIdJ=U{IfPU9XwlE!UoNaM(uXFMiU-7?li
z!7{<==tOA1QCQ!hF$vDpPK4fPm{l66#g7rH`i;eU5__5yn`FCly6qz%vXdbRCxg`^
zY&yfHdA>b-V@z5yn<T4$rn9b8d-ok;Rac{QL2!;{%gD&)84`OSk9^zJD0Yz;Q0}6u
za=zm?IqGfoajBz8@=8jNUy}R4CQ@;V^eg83iBCWkkeH$y3O~{0I1>t&YvqwoNTqHJ
zRh8nC$;q3kQpsVcP3;23@RY}e$mdQ*)^u}h$z1eb@SvYAj;0>MCamLViY)2sXbQKD
zqW7!A%*Ha~0VWLI;;X%cs)>Bfk4ATRXLq_a)Y$+t86S!1_M|AmNpJ3AtZm)KQ2+2-
zD1V+Kh6XmIdz)Usdz*>z0PjdCuZ;2Zr0%}mj;3ha2XFDdjnMT<e7V;Bp|qzmOPOrY
z<1<8(;=Vw}9RU`$HYvQjx;qT>IaU;}gM`3M&{PgIT}mcm)D9r&S}E!cIreoa_SNy~
zBtHS2HE~SGb_vm>p-?p@c1NV(`VCq<>~Wk56<j$XB$2RInxknzNvB-{7%O0PsWW&z
zxVrzQ4=yGc<k1)t1F>F7*G74f0V&frgsour2qBJRJ<W-=e|On_D5QK&EVPfU$(RtD
zp){?a55|tO*vhadml>o!@?0<w-sB0aXIROz0a+7Y&mWUQv=$j|wh{c&;niJ~p;-3|
z4m`dTkW?xgUPs%+xs!Ml_$}%UM>AZ%s1slxGHjicXBfmaRwof^O##djq|cgMfEn+^
zUyuy1LKW)T$!bnTLuZ7QG0mErl9}jR6Q@Wi>2Q1#R@zLJ#>+jtGZ-764G6pVy>3Hq
z-*H6{s^h%CAM;hH<D6uWXBvYgyYKHZBewO+lABeaG7a)LD7S-j0vnW4YkcyE;vdz(
z`gh&{b{PSnEyPFsVNgFv3GpUbk^B4OH><$nSN!jiXX8H9x9n`j{W8N={C?k$YLkr7
z<2~L?m``kx{*y^!cNNH9WIqS+SWY+7ta}B=Ni?iOk?Am{=zcoiYdioejWfu}$a}b9
zpoB7=htqANJk<cE$+M_jpM=Uj_0$+oD?m?g!1GS`(`<b%z-NLp5?_0xFdZX?wO*N-
z;!*Eo_@a~`5iH(@l0+GxP&IHOiZ`JuGEJ$JbV6`_2(_l$4tVX&%uJD)O0&MB^!XgV
zaO53TJyD+*rRS7BeSc{y^Z&`HgR`k|EI4+zDpDV1ip&M{fm6{1@%GYA_%e~iW<oC3
zvp*Eht-A>tp?VGC)BuH8og{n82<R%LnRwF4AS3yGY)wy?JfJNe(P;}mtL}ZFi7<-4
z?N-jWM`keFM~6}LK-DKqidmiN_`w!bM4CGOT5bIFo<l{qVF~+#egg)1GOZsf_;`zr
znTB#u341>mjDlW5Rcr``TZAf~P}N(gYOeVyw0yStZfi?PT=|LWGochENw@IQamTk^
zjyCP!#ZGq7oC$hCG~W%PH5Q!8OwOIvnU~0P@H7Q5^C81bld$PGma3C)pXTBM=s8Cx
zq${`(ss>PoWLw62x+%bT%xtoL3i?SwLad*5x}w_32h@<(s_G_G{Q_?Ddyely9nGPF
zljcj7IbWzc(r9=hR4isv?kRlJ`$7Oq#A_-2Nu~gus?`AH*MqyeN2prJ*~6bT2vyTL
zyZO;eczffAG(<9f*)YX02Vf(_`!=34&Yt?tDUJD_B?V-7+V>c3{@F}Q*}aY6m7-WW
zMfX0?YxsN8e#Z}?j&HAa^-AII!C2T^a;8xj6Cmn=BM_X<P?FVa*(e0(N|qW{Bs1dU
zBN`C$M?TgIlTx-|ns(0f+!dMO-^$1&IJ<>1D+x-KA8I`WqO26FoM|+?VX*ap8_}%q
zUmFc?8^rU@2ElbOL^1uzrBLmE)~-<HMZ2ld@TyVlhs3cz)Y3_er0D4kj2FauxBSk=
z<fsWrX2>Y{35fg}(>Sq~b9L$FS)A1Br~?L^7y?27A32(O3Y+c<A+tqr_{oP37o3ee
z30o|||Li)a6khj~eROx*WA?JpPBvMn0$mJ{j0$BrNNj-Ut3ACVQk-Z$Uv7As_~w4z
zJv)N49x1t<90sIXI$(9EEDhZ%N0nR8lp4_llV-cg%mQmNS*RMv^9QV#i$8Tw;9-KH
z6XVVZRSYGWp+?xVmY0qRRRz3aG}ioNR0q77u*SE<Ux+{AE}h&3CX8b<n?K08?%5l7
z>2okyrvX)`$rP0l-uT6OQP^|@1cW;r^)@Z)q?~5qnQhFGs#DBSwuHydTl&*!@*MCs
zt^nczY5)*{JZ(c4zENTq(nSSK=~(;)Ykjc`nsTzy5a%3S^ALbd20}VaonPgYXW++L
zDdkOhB>n=E^)Un7bQgyz#_qUeCMTuR$Be5FDOiy%NKtP>PxL_CXpq?qb{83xTKv~i
z{zjwBraSx$_ip`-L<Dy2dtYWV$UQbh*fSqQ(k%+oF(mXg2&P%U+<1_yyV#&`h4(0i
zfSRHO*C7^mn#KFx2eGG2VsBatdep#B9}r)^!@zt_;p_j8xp#q!s@fjM&%mIAq6ZTd
z4HauFQB+2;K(Ro;P*G9wkzr{{X(1AvQ8Uz`nMux_rmSm^+wI<Kx5u?xx2NHwLJ$KV
zWvJyN%S<!pIKC}J1?IQb+WVY&2wC2~_y6w`&N=(+=h|znz1G@muiZw$+YLBTD^E5M
zA^{D)pjhx3=pXe;SS3987_k9}J&i=C-4}@eEq0`RN_HPxB_zP4WN_idw9S4{icp23
zwg3_mpU*E0*}xqlDc5ehN9v+VTkTb8D}>GqCGL`|%&nom)dHz;8SJ(&2U|Z9`BJ^u
z!Ne8$fR|Hb#5Bg7OZ;M4ko<+VTu8&nr;IIxn1>IVtTQ}#{ogt<57uAKdptTeMY=p=
zhWqgb&e9W40eCWG<-!{ztog0&73aNc`zprP&&#V+wvP5!Ko~3`^~N)8sJc7!e@d*9
zI`dUPu1fq2`j{c@Qz9WZjo98E#TU~#ZZF}$1<+38B32?_INEa6+rG`*8Af&(9_x~$
zqD6ZQda<GJle$7L^RyA|9naIO^K2H;z7u_l8qbl;NPTC(biV;(LCu{jLsB1Tca2ZG
zVVV%o#=VA@wcOdp{W6z7>QbYU=@L&(t(R^J(64M0&r-A0eUE|%E>z)wE$;4b9n5^d
z8&z(h{b*VUqQvqkKqq%p9x*thhB`C*1u1!#87mgcC%~5&;K=e0<k65t_cdqEoKqDI
z0pZ2m!<nMvbVfII4r7*sqDaePk02dWUqda!LPXmnrWLRC@;OfWUjDKx=B&!~p2ZY2
z?K2tIS)8ns$+HUnhA_UjGjWHw;s7uKXFTJ37(eR}<9kms-c_UWJ#L&Pf6GI(P!rz+
z@_X@Tg5g?kN6=KK^T_q`6`7?&uQDgL2Hsbq@;=u8#2t$k|98BPExS+<77$tgzRs<3
zz?^3^RtHtcdC!C~#)Wk#_nh`JzE8w5l)#FDe8doERA-e>+5d#DV+_vZ`KIl+0}}Ab
zP?x1~v@8v<RtHu7fPvDYbYG|tVdM2(mH7N5`_E4j)SKkDE+t;@TbEw@md(%Ffcg0;
zhyotr{OG>(o1gmg&CE~oZ=Iicf6V-t4sr<=CP^-SA9qEgV2;9cr=bP|IS;B6vctix
zmaFL9xu}6xE$?x}e(1>F_;cb3@$p3%uJxQlax?&6%$DO{a6ZxqPtzRw59M>R{)N$r
zCyIVh=X-PmqgqC>I6*i&M3v5Qn$#4IySwfX%N5<C5D=E!KRQRY8fH@|@QxeIdb!&D
zCCXH~GsD(0V*Z4Q$u{H}81-2v+FsLXhKRcKD2rrB3KebbATcRein0RV2tpQNdDIgw
z+EDMPPVs~(YpxLMAx7|><*{HLyX#IYOAk)cOXHJ*&7X;7<8)|oI#8NvyfyKgr66LC
z)hBLW8UZ>*-w<2cpdfST2uO#t;*)p)2r@|q5U}-$m(44XVO6I$hAT_RUVg96v`s9l
zN~~Pk7dKDEvTRe3ZMW2!vR_)-Nh~Xl2@%USU)*P}F}F?`mT8PD{zfdjka$kCCvYLU
zIt8lhN7W%%$LK4MfP{G|@$#}~OSgl%14@oq28GfF>rmMdZMc7^;C1iLg~I!7)Zre&
z;Y#;GCBdb8Ebd@{zqkVT9W0kY39Ayz_CR;_l}CdU4ixn$-BICY6<>jhZx10Y%3$9u
zuBrr$1dmU=)WoER>SXAhQ;Uz1FeXHL8F8R`B&3_^{MWc3x`Ki8I<9;M@YJm_dw@Ff
z?rELq60M~9zRl@%zE1v*j$%m0*jE_Ao(1r%D8)Od;=fQC>V^G`U|&`VmZ5p&(I7gd
zgxX5h0Y8Gh>kJtJ*Robx^PcE-%eC?@c@-zv#QMS~RAS{;Pxqp^zQp<qt@BBYOp#z;
zlBG9^Sj|@x>ziId(sW9`Am_brPzg4I2-Z|5OMeM-7U`9LHF#<D^WU%@<&A0<c3%MO
z`T*?uXs`ojmwDQ6F<Q6pny`!5?+3fKr(1;Gp8p@f?y*b0up>#~ZlIZYw1fMd{nWC^
zu_!dSN_CZu2Khwk1RcC8oz7GIM*C~WRl*Ars#AgmW#vWhdPJ2y!Ycd8p!>y4I<#I{
z0ENcNC!oI42|=>E^zL9~*hN)Lc?9Sdn~=XiyL3`3U882MMhV7*pSb+8SH4i$WRTC6
z&Oi|w^OWADQ#u?cc#~MH3RNWxuD-I`Y;M3Cr&_eq9Xxk=kYzpX42$+l$FDX7i7P@*
zH*fAZb&8g?HB`x69`|}~sujz1?ziQ-tL$ILy)rVlxt;ae+_;sFhQXvtX}PL6tW}4=
zMC;M2Jetns3$Z+1P{&{!;xh&&i6ovCSEQV3cKf66Db2bsmZy3^2m6E%p|$%z(A>)m
zs%!uUe<C@=cL25C0gz#rnSZr;!00)=#_^gV>owLh*6~&v?3eyT{U$lNLMjZDllvZn
z&u=q1`IKn8@ffv+-$rwzvlOQ_by@17&F%;fZ$iOyLL8)G`ACoZxX*kYg!%H9yPt%w
zENhB~G#IX_Ah7AKKusSJZHF01sKeYwYWvSInoy>fh_=69-C^bq<<se<S9e;p=N==t
ztnt<@W$=!VQZc13SN~wzytsUny7-!RNQ(QsKLGal49Q*Xiw7mRU$vHhY{g=Aa&sM5
zLvCbAE<PWJ8Oi+UZB4W-I!Z$(2<#SZbB_|aP={1p$UfQ(R7wTSI5;-(gtUNbSvV1<
z-F$o;BI2seDo~OF5KZc#mD8y}!6o`Y;x|g@dFnhHZo0?)y?3kt6dyP|?eoFPq0WKM
zAh3!-0EYKNGdS?(Q6J?_w7qncn)T7<MB5Y1K|mI?eV<W@+ob}HTt@!a5U4^C>b!FJ
ztUuv@?~4k%IUOS&x}V~7A(jWbALroU97*ZM_E}!`ozjvY3CY-2DvR9C@x-*-eoDf4
zX*r26tELe~V~IN!27RD|npO*B+nt-ptl;9Rinv-(_&eDufcvFEK@Qm`rW_%k`MEk)
zJ>Mv!ZLmCJ4sDAiej!>19wCErF)XHYP;GF8y@j4)a@fBeNo(Y!x4b)siv9>q#A+|c
zGyh#V^8<<Pb8#eVj$*)(qrUci+8E+HNKD1&_HJEmjLO@U14q0&UTnA=9=d9o?E=P}
zRvNx1NMc|39^>#Y$*Zs!pH1v}F+TTZ&k{a&WzPlp+>SlZ!Dk(No{7)D?4;*BiXY9M
zr{VK=>^U24T=s2JGRhnF-4dQ+mjL7#6Sn~7s}CAi?H&ow<<6n|kh6_5As>3~PQ#1U
z(Pq@=5FN-tsJQG<uhI&bmTOEch(JF6D*W_5J8T(GNBq(SYei6Ey<Camvx|}g?zhre
z>syI7kiZ0MeS2vjs+Z0j^6aI%UhmN%P62APap0+?ONC$f9`;}9oS>saH{~OA&Iuru
z+)b~QhYh-_#XBv<9UI$8v10jXkiw@oq17I0M<=x%&ajRo*A)xA`nPn@sQa&w$C=r&
zk(6Bgo}+$uVDq{I@^*B7vLv=Qzd53%hFP;8VMcj7a9NOtCbQ`2v@zf}{wWk~1kcCh
z7>y{Vg6y4b3{U)~D3-;$N>pKjL1!Y^jqP`-ooa`!yNL%14^ZvaEfENoDm3<_8N<c$
zWRR^U=&(<looQ`?G12|SRq>&CgqUa^VW<y9j_HC{L-ZQMU-Bh6FOV<EntkL;GJb!u
zurlB;ivK<if8zuGRyC8~_wOVAd(I~<unuw|&o&xCV06MGopA}H(0VRA`5H!iSX#+8
zkfd8p6}D|s2P`6s<O|Htn*~ILL_K~xmO0!IS}W^6bk|@JXrWI3VX%42N>c?0&;SDG
z=peDY5a8Jjg94!BdtvcWKnHkn|D7@vqOu<b+#51Y4<bZxj?iPIC5$_0euB)fwHTXB
zEW3d@K!y!+mxJ^irgy)n`dgzrwM6p~MbV||(79Ub?sNYSy2Er#70dJq3HqVt&~fII
zrV2cu!5qJT35*ZV>9N=tSC?i?(6QJ#;&Yp8?jjH6n!60_C!E!OVr<E=6XP2_C7*r!
zL9l>Dg3OQB+(mcP;hSE{wTzFlSdVQg`2x-Dm3hX5U<@UbAeKS;5naU*z2%29hFzuO
zG3a(Ys#jZg5EH`PoOlYhg)upRw1p7b7Lvl`&0dOLkNt6<vOE~I-2G2vH#$83w;gJ@
zE5cr}5l=QtyG#eY@pdrStz!|z-5sn(yr%f6J5a_f{l?s(G-5FK1YKB5-m3W73ZF|i
zyN^0LQ83#!;yV4^*26mY=gpm0F6ORQTm@0rBIewMCufgsCqr<!TvMH9yi+ho0#l8^
zM_u4~iH?8=$&N9487<^=7%syx2E8XU@$huEG0QO~0^YJ6lOo}5nqy3qe16wy0IYl#
zl5;AKgcx;bb;+qX+{$wFABH`KD$CJJmK}OaF*IBXA4L17G?kggJDn0N4?x;{M*;XM
zo-aazx157ocB!`yj_$P=Emd@yf`<kptl)0!uIjHm9;#oil1AGr7T;~!op_=MF%>;%
z=lbw}hyHmkE6DnN4*fH7+CKe=#1n<b5fL_RrM<$rxH5gX6*c!tS5NDfVs;vU(I-^t
z-UApi(;4+2flp<Zt$fvk=eF#5E@rgXv6U$t=9HoT2d}KIq@c=rxg9=LO(KHF$a+Aj
zu#)u@%SJ>QFNFn_j)<bGL%Yfo^mGDaf&q`ASniLyRHLK8r!W|#5D45y)cCjPHJhd#
zc+WH@XwLr3JC@@2!5>a0uA@otiyS^48Gw52jd>4-QjV=!$?8jPp*~g~G-F=hRrP%L
zHLJ%j{#T~45Ab~;M#Zw-QaT}uPE;cAB#<E6q*&i*XUF>)(T=FMC%?}^lT9+yRfU2U
zUo8qmDm*8HEHf6!6|Wn^_!bn5Kr)6ede9L?r2)E=afS}mpdjol&`CEXHZ6>&BP~-s
zw4E7ibX|e3)oBAgYCQe3FhOB>^u>Wq<T0+SUVC?M4epJ6T=HGA;C{_@xQ5QlsFC*^
z!S)LCeKo-Zx76^ORL0O!36St9-ZDGtq9Zk;CN-R=+)$Gm!50BFJ|Aw1^{E!L`ycT=
z65ls_-wpWw!mPY}^L$}eUcua1!u@%(@*fsv=Szio!kl>z&Xa@(3uhI~74jA=$}JQM
z3g_iZLV-Ddwj>l4%$l7mm<tLZeKE^=)%h9j+RHYpVD<|H!Pg^<JJgZq=!6KmD0neI
zQ-Yj%oJ!~L-<kBQH(G*#Xn)wEIq}*nHX!5V{*$1x2W!4grrjH&*{8LFCbV*|M9TwF
zFUa{}H6s~r4qt;oOh-l%G(ppO<TlO8#qpRPQtVln5{zzg18!sNBbvi-4t6G(OUUKa
z8Kz!160oWS=uk~E%MA>I8ztn%VIZ%Hw)qrot<4CTB?I<KrOXltGAf2|k;Jk~Vp&*g
zo+_3x+lSTF##&z}uJ{)_Tr=}<vasQ&WI_1x|NgJP;{QJTM6ytFGFf=$OtLVe&YPb7
z%YWaNZ+-uZzanTaIyi-%V~_lA+s40&PAS4M_~RR}99dEmzoFnnh`$EWX3=T8)UsRE
z%!WF$^f6bQMY_bj;uCuj6Gedn9)Oe~t3GMuo)C5eFI;koK91HeY+W-#kI$+P;}ku^
z<0KaF<nc-kr{&GOLHlkwT)HoDAJ7Wh(4Fgy?TF`2Vw#*`o$VM9dM*j^b7C$zqdLer
zq`>CcQL@i5=0R8EI|7f*Lx&cms(x%sq#)*!4o`y=Fx{M~pgs<Z)#-vEor8ra$X?ET
z9p*sIHv@lYC(u$eM?QngXv~+^Gr<&N2BReRttO)ZsrCoSA@4mlN|#XS<r)=S@pZJ1
z`9jV&&<4k+bd57N9pRODwA3&cODKnPMT-td0<S|OHdwi*HwEZJY@||W^#A84v2qf1
ziEh!>o5icjQz!Mb-=sFlSFF`RIIX7y-}p`&d?r}semj^4+etRhe1gZ}1dn7B%qz~o
zBlMfXShw}AS2ocW0GIq|dMo73GAOT0@LJ3&X(E6VCN$=IXKZWe|D2cu&Ul@Sh6N9w
zWS%gGtJ|2Ehlw3%l3~b@h>KxC%pu)*oM3gZir-)_p}qV{;x2JTcih-Z7j)_HkGf=}
zJ7;)g7vNHXqpM?rK4!oCeR5*0_{0}2;5!CK`BiZ6mFN*KI1o>sE!V)|hwq~8VW=>1
zk1sg7sNm=j2#yW}4i)DC3NeSANu2>1^AOMN+W;H@mYDB#mkAuKE3!-JMf-uwwW)M6
zuIL5nO~FcQXu4^$XhWAFbrIQxp-lUVwoQ<hbgyVz2fv(81WC6i<wyfBvNwz%BE77>
zgrk3;CGj!ypA^(CEr~g1i{(+>WA4)wXuh9l%f|}uG*{By&uTFPg7h|@1iV(<-kbHO
zS#C5Y7-sT`tu4$>kA`c^4(CSWyxa(p_2*BT=SHIKiz_fYz>((vW{w220+KdER-UMF
zq+3}n1u;zwVU^BE_jz=vc&?0B^R*Y4wa@xlaLi6?b+G0?&|x!K_n5Q;_yK64@unQ&
zO~9~XzE^n@o-KQ-k}99W2LwDIfruZgy`u8;=16}3<?AB(cRp^3<X2oKl26Bu%clWp
zV_OkgBc6Zn3~LR=y=Z;iR36Nzysw8KMbaLC5g;&E>CW7liN%9J=)3e?5U#cKwS;W%
zlXK$lUiwySYzf(BUiw3j4Pb@F*;|?Ca6!x!-8UMt0p|-<+N0Ywxf8h<HZ7)=2BQ|e
zRKb=SJR79^?UI);qjAJfLV^bbXWna1awI(A+BeKb0x|cqU;uIIE!+>rF*FAC*vmL3
zIUY9+*3+u!jF)+!4X&TICM2#;mebnmc`nE!D>1v|%n!r33Ede{h9_N0nmoi@{~Ru2
zgf(oH98_4>obk`1B{BO$gL6VKAZp@v#?n6)y;G%b$|x7T=pGTuCwYnwx^t<$2+^Im
z`IhxKccdk##KmZb&Ety~aUKEPGYQ?BoUxsOH~$vNMdji?JVI^Y1W<2=KIXD@b1)2M
z#b7p>A?aw|m{~eQphYd+!{)<B%ON^IB2c-95`K}`EBWz6SqL{Pj|P(#2^ucO%hMDg
z4c9-b^LTc>#<U*Rgwm-g06X$8657r|<in`U>iTmUBLtApF%rq?hZe}`{<`a0SrgW^
zw6b=nV=F76okFN}0C`UbSTkX0#Y9dh(eXvpJ14N9B46qPWWmWv3L{DK0FxwJU_mL2
z0rg~4(-G@V=R6wtGg9(J>lr=qXJqN8KPIL`vSWq$W)+sOux<$VqLgUw4C7GJe(`u#
zY6}3Hy$qR^R-vZ1R2~j;^l&5vIiw)Hd_ea-4$@xdEbif9+gw~!9oOrkQ~y^SG8V`Q
z3U?F-p`{>R_a!s^p-3UFIF7r+#X$zNSkBa4>hn!$2NfF?ww7pI_lct|%<7Xr|5hfc
z^2Y~1(sqFHIIY}zJ<YYcfjI1EKC8%5PSdO&LaC`U%oofmW0HJf=|T&O$|SEjt1k!0
z%ESDYOI3Vqm5$cJ_|maLIsv9{HoQz0l=T;w+dXRNe+ognjWG-V8IS*rqir7x^FZk*
zmP4`>Y~8BkDR-7a0;Z0mRCpW5x6OuS!g4iFEbHo=phMlAYxVYuQs#}JYV=pcvZ&TP
zjaS-NdRmp-MEf&(!P~qIuSijw7HzqBD2g?QD+%t+pFB@C%y9tl+k?DZP8A_ge+Wim
z#%XU(gm~`!b%_uUf7p@;k#_zM(HfY-<S!;)L!Ri))=>$)8ioXG8Z=Y^U@vSE%fs%#
zDWHg`!92bEz5$5`MB8$vg`q7!4;VoQC>G<kx?9mM%$arM4S`LfnBrnwvK~NN5sVth
zffgug=(*oPQ4F+5R^cDC#3z-rTmw;%Qv$4FI}ju}TbxU4mmi-bB>62G2&X8Q+NWXA
zEAnPX%E^)yl6|2T0013jKcb5{hq8!ub9)qFaXpG$N0>NXv-OGg@n^N|V~r63*eZin
zM${Ze2A@SQHVte|^s(@oU_0<$OJMup%>OmmevJ_k<gy8JXbunN{RJ$$LBR8zAHkY&
z>lqF@(Y6p&Dl%?Z^H}r*3Ao=3v8)?$kMf9M=!XRTNXJiN`LZ^NmqmMuSNBU6?SmVs
zRWyeqG9!dyPFPO_)8!sFeME;%vUExeo<F(V#$6$n-{5}JdTD_2bAzT{ishr5l$}~U
zOU{CFMElY!tbgScC_zQ_@uv|}?$$XU#n>m3%=Sv`yQQ{_>0JP(*UECGpA=%ba;G_r
zvp?mz%XFd+^D6zWXk&=p(9U;RJ%3935-!@uVQ~MqPty?d0+EX?>@7+zYffDkMJ{`%
zCDfXF>i42nvyzL-?0l7{=22@TLo6=21Suy^YO-Ars^qqtLGo9er)ZMLqD^^mBA-#4
zBdlcwYRpZ)rmYv8ye@5h<bPUHpn9JCKUbg-JxDJEuxE%*;6DGyK0{M=is#(dg%S_H
z-4aTqL4}Fg#ti8JP4LNZ<iQ^frzYmKGpdVy8}uH=+*#^C(R!eRFa_kgp&|&PQ67BE
z2-L7~o4)|-jH&iV6a_G35|IE@hs$AmkJngq=6!v-W$3-)a}|9mOGTcF3ATA{Xhg*=
z;G2&s&>XLxdg8jIhyHK1Bt5h`@jsRx{8fQyQ^f`}_co-<XD+;DS=yu*ZT&+v*Qk$M
zb3so-pD7A5J{`(@l2_)dE48UIA9Crkda+D@J5S@BOeeM1n5xZY&{*sY(RQanlU-m@
z7>mAmt+ns{McecEHWVIiZmm_Pm)h_Q_elN4GWD1s+c+XtP~i?1%l24*Xid?2_Z&4u
z^;LSKbUAdWc_(r7_P!i_smtRTVLG^pMhsv1&}7wBZib7+CPFDn9kz#WR5iDG#d|kY
zx=S6XG#u-dZns=%B@I&dw#2d>O;x3~u+@SFgZ1szE-=l)T&f&Cju(Z)vQqkF{A%CI
zO_nPjsT(&SHSKU_c_2ZqHUG@JXc^nqU>Vz5PY_XVY0_K(@qDD^&Kc%S4Vzv%l#B<W
zv0i!ahE|B$>?q@fH}F(@s(fhEYRJOAuzY?QM#y_Ny%CUseMy0$Z);zC%OpdY51K7=
zCzM&=Y?(WtOnI|qhC`Vx&6Y`mGTWOiGYrbqHd|&Wl-b*CnZ!-sVmhLTe266_cwr;&
zg(Ta4<Q(`)b+EK8T3Wf5_MVpZftL2NmiCgChF*r$BDIZa(qfjUR(+q#-nXdl)7blV
z^*w{VuUFp_*!$b+dmr}xhT7%}6`ODmUVOHaV}HU{(LUoCZ9OrnNlY`QX_S+Cj8>zx
zjRY6w9-B(x64)|)GkiCpxR)wOw>sLPqmMOF2V@`2_Lf`g*uK=_f3ZWYAwk3kMqqAo
z3$hM{+*g8F?qHsKG|#<HN4ZllcX!G?403PaxoIm4a!2vpZ?fFUn0pB2z5{YE3i8?I
zhTNy$V7VV-x$pGv_Yj_2&vRGs++$hpI|OO8PruLcIFeI&zn`Vtex2Q@^Y82eR-vtd
zF~s5ion6dx2l3n~y!o9hcanc+;{%%S$aAk|xrYhT%|4w)p@Hsq<8>c<oh(2LA?Hxc
z>Fx7c!N1QRvz(cbGf|LwYW<a#YR+7c>p7O|7RZ$#NC{d$CG^D6Th#GWE~%>wx>JP$
zT>KpQMY1>;M+^}Ikeli5D)FE}NPOy_AlEBn@Tg38@&OwxuG25;jg`4KaJR5-kn|Y3
zg+1<b>G=}tVS|FP=o~{Rtv#eYYCRmTwDAPgg}_s)k$L)67o^xQEuh8`P5w2q(xFZM
zby^SWm72!D8l!2CDzL`X#()};1{6<2KrfMwl#e0RvP1`~>YjS8xcDt|&;LM@VmLp&
zq4{FKs)l!ZquZEj26QqR0FRM=_a=oyAwx1C=AnxK@ys7^EuCS`*A{B~RA|yUy=fTR
z&@XIbS<-ZmXiLG=C5^*AVX3VwOB!L#hN7bFmU`YB=9-VHo`bo?Xn_b7Gp^Z7n(j4!
zp<IM^H@5Y18D;BX^}IQT6=6denRp)1^CGU+lRdWGQU|f@vZZ*SM=-Z;+Fm-@<328y
zo!9-Myn2|y=VMarRQYD%VXoMv;JHhQJM4g3)0UZ4_JCqHwDbU><u3Vv^72Kmt(k6$
z+FS?S9jntT8|v^%L)ItdPrrCPpCW&xX5~7*VpH3=@1wO}_dQ)T=UlAgI^|WqFt=Lk
zCyOfzbmR+@7n~wA>QjU*@OL&OMfe(i-wb8Hfq#QqUHebXTh*HVmKpr(t7xI_|K{(*
zHrLKm)t2`s?2m8%&369pAGA*q{@E!-@Y_Q8$1$(1ttZV9!+Iv~hTYDmNeTYG<sIT*
z-?VG^?z%SZcK^L4o%5~ley`5?Yd$Nt2Kub<O}n<r-QW3dx7fd8VkT`Lctrc-KY71j
zwS};T!fg7~kc_FHeuofdLj5L^t(YKUamR^eRX{K^#TB1_%Pzp>TMqn)JKvmRX9UnY
z*Wfz*8rwOX`(5)k6ySG8Yh@7H1Fp0%H#5JzE~WXlzqO<^i{FycY$T=G$gC*<X(P-9
zx2RNmEh-%x%#dP?sd2`4ktNMw$CPILP}#iFY}AzI7QAh6=;Uv+B7{w6>-5qYg7U<{
zU-5I$sg{%G6y)0u{tgA1bmI;$Yf3Tgs<iUoRXO&J4;E)KES5OZ+UwA+KnOcWMOc3?
z6vTCzSr!VAbX&sWuF9Xkxn9Ce{pPv|x9eY9BHZ%_{ujIL-IUb_ydE^7t;ZFh#IKL>
zcyQMRiNpif1qtz`mLTEU|A(+JvShd5<N}b0Ql8(>Swr(S6ZuNYuUT0~?7uGV5c!vu
zxWl=9|6>Ebzm+vod1Bvhq5tM!aofJ@vYdv!*b)jH+xx#DhBnHd_g*i{OWk{2L+<)Q
zi}e5X|6<5DDyzS~u1$qWUtbrx$2{K>x(9vzhioeR_KkCYbnl|P@Re8o2*M%WP^TRk
zFF7jQfENhhzoPBZFPe?nxul7!G@@;FD`v5CPDcl?I1h95iltEDw!&5@zEj|K&2qTy
zR}7}AKcr(gX~%bhwtZtOACp6b(KYsY(vW9qStVm!<>{W|dP#JZK%6tl!Zi$M=8{@c
zFfxsClBqe(g72@%K6OEd^n>UkK(5=xinjOl<kcHT7gj=Lz)(wHLHHEIIG01OpZ>Ez
zdSq;3u`y13+753%KA7f*RO|1?P(f4RM0=|}n*SRqs?=8%)0Ew$4DpUSGnk*jlibfh
z>f$eiN8Db@M!;KID%vnU{|34^lM$;=heVqLY9E%PD#*-QC4Zweo=1(h9f4B6I&BAk
z#o+5(x4i-DW&u&fC(i6f4c3z*+K<7L@)5dYWCI587Hxfl$xAX8uMHop^9n-R0(8gK
z&MeLGpiR53YY+LkXIrAl{9XTJn!I`s880k2!}oLfhH}i8WSqdRzw`p$c@6@H6;Mux
z*GA)+An|EDW`l1y>zbdjj|W2=^k8VoCuni)b+NW_&$I-_TWkN2m0aE4X$FigykSrC
zVu-2N)yh}Um!Xq~gNE!YK$3W5)44Q6eCiF%iLuS=V_epXAiBN`T9X_3wE@=8Ldbt@
z@$)bd#dPd04tMg&PJ&8RwlNH{c7|71_Vc_I@u}PP^Idp&mCr$jPLLr+!P?xQUX4dx
zEan3C!u8H`%<O~TiuwKP61fOar5^aK9N))+pg=FTT3x})WRG{dd=qV#^PL%c)lQ%7
za$Da9wx}tuH~4whinjH{Taf|!ZP}|N4cvr<U}2;k(Ta1EtJtx$+?N$^UP-)YqLwYw
z7+b0I9Mvn6uHBCHVZ{<>#gZ{rx{3Eh^7f=oHTBpr7_WN;Uh4roLVoHpUrm6^DkYcw
zY=IN3hxKktK!WnaC6A|^*es9zpyy5uqJ0W(mk&Ne3OX%s#|0bSK?Up74M6fr<Qa;$
z#XWFKexSwg5-ooATX$-UA9?C>tDSa@m9o8-LJLIWB=&tuJ5{D^-=>8maV!;-AGXne
z{H|DnVi@$owdZL;SU*b$yzji)LLo7dt@TTmU09#;E8nNX_qbpAp1ABnbx4=Z@IpsT
zU&X_4O5;|#Wwho0@O&rkCgsq1?s#iG?#1HOM+vAx6M5ws`$%K0^+D2LdxaB_+l)rk
za6E-icRnW>U9F!cW{(G|qfzb0<Cl2q6X88I9Lpd=oqGze!tJovT>)h%5sJIqm<siE
zfco6;K!2hrl7M;$6v}OT3K5a6C)t4im<@DiTPCz$i|4YpSNV)bnqTmMu&CetDws-6
zCh2Gq#Qg$i4%E2Tn82ooPF>;+(W|?pXg5R?1z?FNqbsWeg$t=L$HAL+*7&6eOqHwb
z6^kP=@pUMTCkO{U#jmpLDxY_-_T*XzYprAp_JXbfz7evyzw_RIH-6HnX_<H5mpXpp
z#Prl`p?`n>^OwDghDI+api2X1_MY9l=+@{t3v!F{Z<V5F<>f6{6g?|HdRF0s=J~n#
zQk&-P+4C*mzyE(%9!nSIK4P9%m|Jwsf<EQV`GvW&7d)6hZ%OVP|G~go_N&<WE9Gf0
zZG7R=T-T!e_mA#9`&T;X2cN0YzZ*c!l%EGZn1BDg2h9u2MXZDK^73*YoRz0$<b9$q
z{QAVJzxV>(zefM_xfEWXUm7JDr#ZU6_I`?BxnrofYO`3jeN?Xl6#O*Pm<4j}Q?pPK
z>%SUum^||E_wY8_Q=)BJTS#rUnyy$j=lSy&xxY}Ag2X*d?td1Ti|LfyzhX0851bVV
zZ0(aj0joK@na;FN6U#bSF83F0^=<HO^nRl4yEe41f)S4{$-`?Q8?cUPj6Y#}uV7WM
z$b?!FfDH>;?-HriWvy82iPSQ_;jfkI0o@@KDj$|-%o1$}U`e3(mmiZ;-T_DayPIZ0
z?=HMyPI(1uNKXvGqB2!3+O~w^-5*j%;8WRnjC)-hjjbZ2<S*oQSW0e(*;x|{LrEIX
zcIe-+B=naCIyzx5?7O8NuOb-JX-uqs_10e6zoF@IpA^fobhh2nHjFz&ftf=IP@O|D
zKA%n}4@W?deJahEgI4KzC_fLu>KqtsA>w^H)YV_=41J!1la_(^&o2qWLoo(+(I0ic
z6HN}ktqT`dn#6r5IFiy<7G$fCIwuX6ZeVD1msr*sQuIl~%|FBVa!jWjJwO#d0IMc?
z($-NBlPfWMNR$aPy7FYUQ?{iw$Wztf=`6QoU=6!#Q8FNjE)O&*=w?!Dk~`wDOa;zw
zCmfUWAsvI2<xUuYK}X9+yrW1DGWA5~c%9r1f$A87b>eZ%TS{S(2*EeJkk5WfYD2;y
z22tvWwL7v5_T6Um&Wv}A2vhL9qoW|qR3Q&i@|#F0Nr0&uiC1GeivQhUS+46(p`79o
zX&mkFid#pdwKADP%t-Gxh~)!FQtyHcz95qBw<V{`rBA3Z#+B*m6<W72meBYxx!oEV
z)cQM7Hu$BH{0ib7{dx7DVfA5-L0IQ|k#u)Go;7pDK%MS?d*kWGTbJo|qP+<^j<~n<
zM!dqzIXy`3PUw)dhEcBa9lJ~@0f_c?M?2~hOwg4lv_4_ui^JsM*eB!-$Q7PLuHa0Q
zhbogB$$XOS$bPGV^^pQM42FJU56}Aaa0TeR?w8qqBs&Nf&*&6;=7?WoSqAXD)zQ5|
z<pbsY5Ma2xe{N^n@)Tr_m0^={7sKl}ElZ&6BsSGl9nA&?ZV=0d;*@|K?VyD=t}B}d
z^yL1<rdGEx3zE}iHYbd=ZBrx!4e=?*aH<X`(`2ookriM)No-{QfqL9!j^VuZY0d>Y
zv8*R!aj>Y1<rm~%NN3R7wTa(n87i1tc|x!)MC(SR#4AHZ+t2urFqMTH6U#DmmP<pW
z*78~91W2it#X*n-7JtNfsWU6iW)h?cMm=9K>HvW@ONyjfAFX3d$Pu+t?v8ogyCFRR
zWRf0o|26D#@oAQ&0<=?(Sm*w*d?^0BO(RC+ru&;6w_B@d>?^wxY$?3W`&M)$Knk?y
z18cxu5MIqEK7qX-CfXC3#mC6mNi_eB;=dGY0WYDmJr3-G4~nXB5DCj)%7mF>nGU#2
zIPjgYW;jkO1fv}Vrb?J#hDUNeZq>(NGzo*N&c?XTP95huok0uTW!{Z}Y)K8zGe#q~
zq0r!&Mk5-<hv)>xUfyJ?kI^wr4~#d%xkx7up&rt#qx@Q9^`V)WbZH{Ho_D4^gB3;o
zBHAfBakkN@yl_QjD(0?UE@jcGQ7&B};x`h3F$Q2j27pmchT%lic4g!RTEDB$<1i8H
zd3+hD-ns<opRU@{@Bz30ROMU(O9kOW?4RZSG+VcMG%nDULk{Nlm&diP1hFdxvK>RH
z8+k7@SgVu0+>Piwo^iK-lt4!|nzpT<ii-wKoHF1N6;H5x%sssjdrqsL&ADQ@LMGj8
zPUDQFM!bUrj9jG$Ip1{i5^y{=^{!mU8P^p@=a?PHQj;FX8)>AQFx(i-Ry)KsicA)Q
z@y8`2a+OG>zR~-FSV=j~H2MNex)E1*=Lp@a)S;Z`1V%v0Y|NGi13G2yE)VhHjZ&Kr
z?-a|cl%#r(=fk1ypD|n(0j?7bMzn7Bk`RwglW#$QL0=P~uO>&Q!%QX^L`9LmnXrVZ
z8zVYKi~J7t!4{MkuzBV4OEj7=Vt*7~XAG7A)Np@(HR!vQ1b{y53TNaD^lX|V-ojZ%
z(6_}3bGxxw#%<#b%MtWid@2tA_OQmo^6`m<;6Db}V;IwIEa09wjN;`_(I-yO)11T-
z#INA-!+Q;hcN@%Z$CR+d31Q}=%1;+*H1Vi6DhGUCP%5Ef`!7<5VVS5U@dBXn`f|2R
z6ylkHf$*K~s89~qc|5P6<Tfk>1v1h0A4s4HRbItAaY!IkGSH~q2_H$=OWZ8wfOLn~
z&t_A+A8qNO<+Q(#7#F)T9n;vlv9u$}2l{`*7xQt;Vx1uUu#!oj%Bo+GgG0)f?o){;
zq%zjB`yCvHsZJh(!eFY>6LM`N_q81|$%k$?`RGz4NVa?{Z~g3de<sE+0+Lq)U14k_
zRW((0U{q1*og-c7&lpoFs><hn=qLvXtPJ}d-On<KBu+*hIRa@LZCw$GEIc3HB`!W?
zWx|F&5%lh5k81ImRMnd9CI|jbx4MSKe3p1(*%DJ-;_0RRFtkyO8;O|YflLYlWze0|
zop(e%?a;q4GV!#me_D=uq4;PFTIisnMEG6S38PpgLN}KG)i)kjypfv2@M}rNn4P+6
zc?gN>!*M_`OcJF{b;E!@mi7XE!S7AiUC{mFi1^1yOm~F6D~J6<j(E5D2N+S9PBKQi
zOSyi=;H$<CG{UkW(PiL$Fvyr0$aZlyEFf_W@<q9J1FmKl-Xg{Vc=OCM0V8=u2ZZEP
z9_@<u!D3kd1hnemXOi-L!}hu2={uUL+=qPiMb@8KpZSjC@v@LO5%hfm17LMM?=#;%
z<#oTY{DE`4JZnG9_2OgLw>|>Dn++pP`ScI#3LkB~+-YHiEB`Tdlv+;Lbgi`e#LATq
z%*)FadJpJZG!XxpM*j)f>0`5mfztc}A$x3A+Ng>6!_`(6PwH)Uv*71!V_>U}ERE<a
zhbeTYZ9I_C`tX<(;Zw3l^1Lq_YjqUt*q*IpkzG-s!nIdl%#I#N?ZvXRI8V|*DK{xc
znzhnd+{uvmiE#^LFC0b|<0n~FZyQk^LAFa&0w|1pvQrE#%dXrdcv`+>3wD$+&^3FB
zA1K^M*?2}ve}e`GVS_(IgJ<1Ofo9wrYH>1~tLv|i5f<rj0Z^lpPQ8h|MLY5E=q1O6
zv+%D6U4*2EczM_TP_SWRU+hrz1~O6ayYPlY!UHBcSrj*|&R|w2iV4x>n4#n-)p~^-
zk8Jy4`E>o`Q5*v@C<-&Ewp0&8ldF}F@Q!0h8;EIXy$GIF(lYXx{KS~7Bj0+hy+8Nj
zV^~F>N{&!#muU>CKvD@PHanlxqUxyS?qTJ+^597Rz3dBJFx4qH<63`6iE5%g-iG~I
z8_oJ7+VgyR{&y`EV`IBZfj_Y6#ii`rqZqz1ME*>Cq6*f0x)4~|(5MCU*@XYjx_|bZ
z+y@?<JMW=~^XBI-D0rl>NHQ;6wD{2_DI-$T(nsEvF>3Uf%(3IfXHA$mDSPsisnhPB
z9%mXfIDW`&w<jbD9N}BGU#SM3F{8$%MbFERzVE&VbMx<$a_1N1&zhers1E~Xw@^o~
z=$n4Mikq5?*Y+fn&La?0U2iS|bPuORV3igS@~g!JoMllB{1VVjVk}x9442s{fL{RD
zZgk}@@<aNPEm$$pgQHypE;z%((N3~8w4NQ*6;1L!x!TobA=FSMpRc^q)3jN>WI7=4
zk#`1n$GjbfU^$aVrZ<`1eCGqhKjMxFNd0%PLD1Ug`mqpIs?yqiDMIPk7IWPp38=O~
z+j|4Q(SFw7sg{*g%Vb?kwbU2=dM(p@`x4YrEgxK?mK%Kg65LWPdDp1r^GAI8qGxt$
z|6HiO-ok2WwuV1Z4KGp+GeWE^Ho8u<jY3^ZeS1ppeG3w87}tZp1&cO}QT%aSYzi@E
z^x}urJi=b_=p@$wGvrI?Alg4A*&L^wAx_EWSv}y>E<xUNmrUs;p89_V3-V5xzKf4v
zfD*3L65<~o;#MAOr2QM$L5bv+a2fppB9wWJTF_^X$W93uqZ%IhZ&An~9<{k%ldva2
z>`bled5Nvta-^w3xuua*Tzv%#X`)id{Wp^R3mZXy2XDM>95u?Sp-eS4SlBAIngLcn
zB7{~Xp5JxPLe#Rgd@uXwi!!>uQ+F09sx-(Q^KK+}gD8snc}c!f`9nAmlK9${aCt9~
z*Y-+JPt$n@H2ILJiP4Z!(!hE%p6@-4k0m2*0sOd*n_INA5174R{;&bLi*tt!xPO*3
zY{2aKbA&PJ6UU`z4jwchEi+TND_0r`YkZ0%70$cgEaeuZFHZ4^U&i=NF&o+{9qo7h
z7zb@mT8P4}p1dlUz@k4F$INCK^vOWi)~?hMX1da}?0rlX)?#BHk2JYqLl&FQIQgRe
z)P=WHyMxq|LTo-sg|%p;VMfo_7=073$%8+PB!><N2+@mXs+?@JC0mNsH67(CBoCN$
z>ITLurJM%SHoQ^qxk?hEhtY0WCSAlAFBci)F?x_dli*vj{BW53SUBBfmuXCsiz4MQ
zC^=`!4`W2*Ecvl$9NUYmd1kB-?HGBk0lT<?b~3-e@D_zD?MJt7jGUK0XXHFsgb?Tm
z>YToQ{BiPBlShO2+YAC6;wk2<AU<;tgZN&FK^&lq(#<|#J<hBUU@Gu!`Xjjd+v^BA
z?1op`*8`zyIkBj;!64m;wi@`5lq1?Uar}K`5VLx`R73>d2kkR|<LeIfH;;DLwOuwH
ztJt7TSiI4bZR}$`87Wuct*LjBd|L-fVz{Kci@E?)SIl}262mck9V;<VX2Qh665oTp
z%Jn|8s&@VcTnnn8tn~^DzH5Hfy&pT}{;nE^#6+M0dHD}(_>Q=qx?p}mVQx`T?i`$e
zN%NNEreN}jdHJ&nAI*NW0A90FvNO`BWeYy~R}KKY0D<|Rz6_neHj&hqgg}W4VrjD;
zA@xd<s~m-4jwJ^9EH0bF^d!Y6;c)&@M8mPx>qXnmdY0Q!7~xnFE}sj%WbT7#XR0%K
z?59x7ogvr`mL3r8Zw8~RYTAvr&dTZG@?_LP3QPxa>)RPb+_J!O2guYbg2+Fj04-$_
zKkGF>AZA0s`_DeYtZdWZTLTk0t2cc)IE4-gR*GenK=wdZtx9$XtC??-X`5qsG;p8L
z9*%n>9Qon$e$!S{W2j5Czl`|VF0P6cQAr-31z7qOROR|>A)B(#u=Vc-v%S;X3i&LH
zwsCATpa94|nVd4BjGkJ=-731|va60-PL>atJf?&4d3k4Oll*<?J^)EuMC~wMeWQFr
zz8HGm+y<Q<9E;-|bK}W}qdRs9FCqfDKZ%*5Li1>@;#hYHeX@=5l}f10lySC&!dyA9
zkZamQ@cwn^XVPp`ztCcj_>W<gt!Rrv{Sl>bE9hjx#W`$CT>ctqQm&R=;=6k=ioN*m
z_qtkVQZUV$c^cG`BrnZD#7)e{Q299&H5~-4G`guE7I_3z)(1qXYKpFW0PXwYyZgj<
zUAkS);&y_2tt%g9kPt+TC$@`;&<JNtr}a>&Wm_Uiph9HAqLYQnv9s8i0mPAp7D{qO
z%44xWKDmX&bu>bx=@=zv2n=^JIb~<jx|mIe<I~8;<lV0w1>tfh0_PCK59M4VKhFf~
z?FP1FBhNCOa`h`<6IO%x@H5AUTM8Jz+6?H1hVTODYBnzD@X@oI@q;;zPRUhdZhMS7
z1c!1=H+RQb3St!_vSZZ6k4K>t(TGko@kSN%U5)vC)BXzz08cgIu)n5ssWn+{-0#sU
zG334X8OK}CKECAsL9T;=1jnO(GG7p0rT$bf6@_OXf@#V<+O-PeYZnb-d1d8KhQH{l
zJM5D$n+}5NBnTe4Hgvz#Ro-Q)s8qt`^QP?}H&?=f|7FQ>D2#!R7`>Z6mCwqT6|sT)
zv*TTfK+|*`1>pbN57B(#1t(HXD!De*q=K*)A}H@%fjPjuhzYvy@J_t3f^D0*qnCLY
z<jG;m6Oiu@>ZW^5$;l)G`ClX$NJL0BFy9JWZ^jhgj-_8GVssTo6o;9Pg*POp2zVyr
zm2;IbPETqiU8+EGfL>re8}RE|ffRExns%C@a<OdJ=w3j4t}_pD%Rg(rWDEACDnMU?
z_0&{132jQiLsXcAB20+>{wPR%pv7m0(HWi0P7?;tQQTgG*1B+2t4AD6xmhE<C{B9O
zob=S~m_9^$Xrsc5ueZdC?^ae_b$<rMM@Mg+Vq+wr?Qj+O5&Vr06Dr3va86HLAKCq8
zv1~Ixf2bL?82!3W`pP4CKL@j|X>FKP>UNFKW3*lEOWULKIIou?V6Nz*3?OXc@fa`t
z6-GqTLi<4}#4-$t8Axsjuc<!pehluJL&O~SFt=GSVs1yo49lv44YDove-jb(+i28q
zbSDZ0WIRqpBR%i_VWmpXKhE=^X9|woiE<N%P-bfM%<W}3UmaK<O}o)os*ANgSZak1
zN?}zS(RJ2+f%xA>JW1w0?zOkV*iF0ZD#HQfqXDrhK+E1NSO8hKV?e*UN}afFvpJ+4
zUXzOcRDeI+K`Rb@wqTX7Lq{!g(wc<4Z=$_U8m&z+fUX-G+-=RS=qtVJxxAm;x7fw^
znhO`q_qXbFS|0LQy4C4?>>)m#s@w<D8jsT&={v16HD<jG`WJ#7?S<@W2CI@OjG`^s
zqcZB-m~y3!B6}XqZH=_epHatBy3|HBh`QfIPUsj;lLf0l82kQb_aQc4>ip{G`poY<
zoZog-gpfv=rp+(P+H)A|Dlkyggg67Kdy;|a&!k_R$L&)bz!9d0Vjh@Nxjkq&@hk`k
zG9Y=RUY=rrv?P{B%y49wJOwQ?83xiW(=csHq&zYL(lY5K8lJM~$tc?K{>Yk<eMp*y
zrjw!>A<wz>58$JB&TF2J{wtSltFqJT<DyEoAV}ygAMC?iym)+M9|f1a$)?>?k)I;p
z$uZ`TfK+Up8F~n&I5Tt~9`{huT}};`A2Y~BVI1%I<p(XIKknz4cAM}5@GOe+*%Yb+
z(eDh1R#0o`FXaA&_gg#`hnm<fkn$dmH|-AHNy_*0FJc8r@;TM`5xPhIrsRZRx#R1S
z1Bc%Kn~uZ_z^Kc!$k`bhsC4zr=(Sm%kl<sS!Fn{~%=npjGRESJmoX?J-aUMw(%o}R
zuM5!hv?Sah_Gvm^v%2hpPVEWZsWva4^Y6f;S9Ktxr4H=84+E%LaQx1&XYq3Copf4a
zr(BeY5}B#arvqufwFC7t13Qr1T<fHdu?{%npH(4n0wA26h3gbK3!k%}k~7le`o#$V
zlSa8xe0x`@yr<-t2NmwKl~+PTT~aG+rOvtsMaVhKjcW8Xd3uhz_Y;Cyt-A$xIq6%b
z3W_O4W`|D%BE?$xX#hdCK>ps^<hk(HdUibJa1asMS4c1;B6XaRJp6!{OS79BfK!XM
z>A^&Om{|^|T{6f!p7I*_jhOarW+SNFn8mTwYqX~$7wdx91&gR}HOPlRnvSDxnhsug
z-Bbt66g9+|fVr8Wr$fI{Ysmp(oooPls3LkWhsjIBYtq?`H_faCRV@+2ldRvq^e_$R
zVJ&L~N#fo}LjGOyvN+RjIX^yhj~6u%5!5+t9wCn3O9;W5xh7R4&R|p#C3Aq{J++4j
zFAvEDL>7K{`I`x2W^>(Wkv2Za`L%BR7vynZMl;<=ZmApnawz66pA2WjUn`FU$@TKC
znvq^2*#$&$LLQg4IOW<z!Vub2!HA?}3(|nk{X`nIe8lgEfwh?3>rM7h=WM1}vHO;k
z3C#3K8m`l4TM`!wbp$m5mn|V-OrdH}n(T_E7`knN+F+EToMU$*)AKQSCJ9J+nVw+5
zQl8b~5i<Kpm}(tZdVe$4fynz+iE)-QlNoj$C?|*cSO%oq9BA`x!75NUiy2n7grO|X
zz{Tye!|qpAX7euh$18o6<pQ8!_a_1OZTa$_lPD2<Vq9%N{<De%gbQ!s%IaK8i7;8X
z1x))bCp;eR#4g$#ET$=n5Wor+<Ig9?VgsEXd(A)+gvBd!$Zs3hu*z(#K&gQaiM)+M
zMpZZzebPXne~%Si$rP^DtiH9ZhV`sW1<SbwZvn7JLpR7j0dj9;(uY6}si1G8<rF~w
zT^A-^g!H7{zetiKK0^TlWeXU4fu3w5xPcmul2@QifrVW&xmba0QCw98Gh#X=mhBmB
zI!NM0#td}d#hS-O5pXZO4s4BaOpqT!8)0fQVVHX8T<ACQRD&i#EC`bqh4b6rt|dJO
za7?VM8IFm`e*nk)=x)v<u)b?@%+}L<SQVA{gE;1Z=DN`$$FxlSwQl?u9P?k(n(0Q1
z9P`o?m1AP|YjDi0-@-9zuBYb!JrOwq=j4L>9Vbw5FWGXEi?hDs#v9DvmC*#Eafyp!
z1tFy<yBfGXs{s@;V1J-Bk@pBK=}bS1w;v?U>>}!B7v5sZY@}&7YFKh@;87WkyF#z}
zwAK#1n0H}9O$h73@*CM^Aach@&XZB=2|_!2u;AU;3*_0Zk7h7cy^2h#P}SNxx@J(W
zas7J+<BcAVltW*EU*4hs|L*L!r#~1o^w92CWrZ;BpAg3`Qv^cSPNev1*X@`T=iZH<
z$KZwIBO~1vYTX+#RBVlVwdN0l?1S46A^|Mu_BYNK95FLg_8&>6RmcWKP*aUScbx5j
zwXVsm;c`T@IQ*h|<S=iXH>Rok*zP0#`(NDu3Voue56HZ^K5y#borar$5ruEuGxq*)
zxh<VM+IsPXytT5vXXWKqF`MN{dYacrI*{ah9BHMaQ^9Ab_nKQN&j3_bnkuRS+DF$q
za?e6{E08g}gOc182HOd@9+9oWs---q+D}~vr(Wff<6ED0-YJ%C$z|Z%T5`Mtf<hB6
ztO9s8$-!MZ-lhXe>PbaZnNb^t6;<9Wg*)1o{6IKx2`vOtZwITF&NaocO5)i@l+P(A
zL_XXuR*!h0!0<Y(AIStt>0@e(pB!sMPmj&+e|g~r6K}1bBtDIogjnk*F3qymi&9tk
zXSrmp4_!XqTHktUDv0dMdx8L8+>zYveEiAe+NzB-&vn-N5UC^lv%K|2q!stEjm#Ts
zBd##+eRNZ&yVhFYZu!sF`mm+jto3au(4X{)wZ5%+<3_sY^F3?*P0I&b>pL&qYOTLP
zdItVk-U06f-LUZn>R<=+O1HyWAF*uBM%?LAiV1ir(qG`8<<PuQdfHk)M6^E$>2y22
zwLVp}jiiSx(Vhhl8)<(#)kFK+!|~|3XhUjZk5B&_{vBQd|16(799A}ABOU-2ogJx)
zJkb`*9+pR0>!*q~PZPe~E!yjvct|ltp`?P1QB?Cm_O`s;#%LPAd}SzAP$=4~aK~UT
z{4?A}S$C_C%UchB9HH^;@Mo|CG4%CW{Q7r%wEqbUzJ^m-H+(03%*BuRxP093e^Uyc
zR-nh_Hw=G`Qtrhu{tN5<boe?-Igfeh@yXlavHvI3!}j8e7;NAn^iLZ8Fi`qNBV`ZA
z?D)RC+wgsq(iXvqX0G8gAX%A-8NS8>6!y{feItKgE^h3Ix!%KELorumYaB==M%+7s
z&c)vQD#Kin(-`}m*?2X*G5cQx&};~NawofnWvldQs=&+v_p5Syp~m0PpA(zGYJL4V
zK~jRHAs+Yef>cL#JZ1_E7-`G(+9cYv!fTf!$>4*RqOy~VKvu1$Vb8<mYP3nciX+7@
z%S4ceA9Vd?67~><TGvzX$UKh2IkSCRVEYGRSy~TtY7>)kq_)-i9y!%%J#>Ox=eh?Q
zMoGr?2#8&<4DXl1)ax+EXA7FkCfas0f=KBwT-8^V)!aLk`l;aM2(D)kfvR1XCn66w
zRk%NLotdcmtGUm(j!bNhhqLt~jCgmRG17DjR$Ylq#OPZeAsH@KyO8VL>iXwIKgQ{3
z=2}mnKY*{!g7BTf7C0E^o0Itt6{;B{)D_UmGEiOV()$;y5rWWM4gI#a{uBflaa?EC
z!$|(@>P?Lk=P7ZmK?M8E)d2_E=Tq_~eA7D~X0^-Kj;Cu2y`V1edQ#A|jdbXd2e8@I
zYl>>_Cy4?N5l)?#KWD+BqUhYh!UcuVz4PV_jqZ&n2z~vJV`<MpGWJ24h3;|9G1B3f
zmf8+%m%%5BwlE|%6`1AU6X`^efw|A)CV_QxGU9vU1cquQ46Y7-u{gUX7VX=t{XVYM
z*&J`MgVorz-`b_lzq3nkWU(%t&c3Qk`O=)cyaDhhOqBA*Nt2{mlDQ~*LE3_%bOfq!
zHGM-T&P@VccETj83>&^OiD|OqCaI!K7hLtZ21^2&HCt6ttNiy@G<c}>UYdASy>%0{
z{qqtgg~s!JbG=;6_mo-$9)dU!fL~AgeE`fI&j9$JNml{TU*7WdwMd&+^zg{S+}ufc
z2QB%!<P6AJxM*HJd>)rQa*{AIB^w^$Um-OOURC~Ba`F?V2-g~@$H|icDyb8F_K`<T
zW^g$)ftfq1?Wbz^;U&>F6s2k4R;F++O}lEwW6an-o*cPK>IR>jSvqo!8$V7Es`bG+
z@@Lg)!8$?JZJqokryOGrx3o_Kn$1Qz)@d`o$1LpOoF^MNh>1b6{-A-~hPT7m>m(yK
z2Z|M4eT^G}nZ^VXkZ|*4FY<8cx7{PPMRqxmitAnLdLol{0)RzZKQ!FGK?zcO*D3U>
zt8Xhl<pOn}!Bhd&&SVaq-;@88F9PU&y>pt=8UVC25VVUYFy(<5;4<<SU?Cy+zCwYV
zNg_F7QC@HnRWKKAN_Z6>JTK<Em+GOhh$!AD|J73<M3}Cp8F*lgfWh9t1`FCi+d8a+
z8FXC_V`SAD*H@Xi44EqU25CHRB93p6#`31v25B~JLd2udNPPD)$k?|ale~CbuEyi4
zFoHBd@?>~VGiJbBCj2JAZxZfc@X1DVA1aA=XJCitPG-ALcZ^}KfMra)#vC=R5Oq-x
z<O%Y<dih?1<6eWj3`PAkQyqD)Ws`eLn5rV%MtR#v3702CK-Ns2F~c|GJ-!(u<q7DO
zmcXx^PNIx-!b<NNG)n7}JTh9IVk8%}COU`&jhj3ap@i2OGK#bX%ADFqo)k;XOT2lX
zjNfMXWQ>z1#gp$6%b4Js@xu&eeMb}7lkAGt5ec|OQ!^qBmpu7{a1>VdTB|d~bvec<
z;<{oWq3KD^ml*{u!NYQ793asIbck7*3`M64q<BylV4&8T97LDYOo3he^ER6Xp0VUJ
zWTr@aH)S%He&ikA^Vv9?^I<lf?EW>x!g-_EW!${?H9>}CkP^`~9WxhUW<H=Wqbo_x
z%wSRz+zK-{a+lIY{xyq~nNN!NTqSO>fszd+<j%@A5<6jFC~?z12y~#7p&q(jLknbL
zEv6gR(OV!^?x8d^&;hG(Cq`tED2i2>6SGL{dO;j{#yF&;2&Godn*({%XahOSiHm+3
zQ~p3<2S9GmWK1UL7ZS%F9);o<i8;z9l+Yl{zj&7Mqcr*K5I2)fHB}J50U<UU+KFrU
zZy1j!(}(dqMt%m_6iKLnSR_&?`qO0^lPRDMU?3W?`MvZrAsr268D!y9w>z8RKsYm<
z#`<uc7c*7P)4C4JOWj<*Tp+G<ZwBK!)-Boh+(-TpG*Rv|n!e!mzXwEIje=sEZk#ty
z=*oFBZr(E99LISx%tF@ngK7U5A21aG5yXS)jcx*XUNMO?<b+sOGQ*b{^YfDg4g{|T
z62{Cm##}dsfu|b5^XM1`o;npgH@fzoC-B^P7Xy#)IRRK088W|2j?(~@H4LXw(~vs?
zU!KPAtdy6cw@Hp{qRH#%-a!7ju||iD@;VsN0a0KD6d;%RV)Mn8-N2eNhT;079_Al~
z@olo1uY@CqSZ*}A+Ogm^322)`JqvNhH8YZC3Nt68^EFxOAT!$Ppg@9yIa<V%yDn4;
z(v1>^B#}c0uOf;2s=jl;8)`>WUz&@I<D*|jal2PEnbCpI%@epAOGirrXc7SDHs~+W
zkK2FJRhla&HVQ(QVI&$o(dwldyzr{gY$FRox*aWj)$YAa{)!$tPzdnfp|fJ31tB{?
z_JBx%9DKq#+_I2sq+y5xuZ~3Q4O-a7vRpTmg~<o8BpMN-ls@U@&z5zt+&NH+^{pRP
zJIL`sI`83CDi_o?(2T}({Xi@;J_rH#LOL<kLFqL-#J+O|9@cQ(cNde`s)_vZ^3<^-
z@luo_o`LWW(BYreijuL*SyYX*A*S9rSLgna;AdGLG*GmUU}uOb?gi?3DHSit>H?L*
znA<@MrL8D<zTf^7!f05hnN=xx1xs5HA~8PHD_P7{DF?-4(<$6DTIqP97=2z>EJpv7
zU8ZxU{h^iSwqn`S1&~mhX>4mLhVtgnbZbqxwK00xmw1_ITRgummRHIBv4zs^6%<7z
zj0i|OZ*?=rio3-!L-|?gplzEpb|t!LlwYz|hs)*If_%C1a!>hr2_9V~7`N;R?1*U_
zntGqYo@o~0!D#@?s#K{B3r$kG%e)pcl@qfZf&+8C4}f9!SeJ)jj}$ZvMi0#iz#oco
zhP|8xWyX$RM@X4#Ij$F_!Sa59kAV~$^j|9G@tq6~CPd;z;4(%FLz{fDaj`n!5!Gm$
zPi8jfS>#6??K+i$c3woO*;scg`8@gxlQ~_242!K>iv1Oi$)*b8$az5|4OxPiT@sHD
zPSfGUcgBw1!Y~Ez+Qb}|#faibY?lrb@P_VzqODC3bAb<nU%dNOEbC!ADB8|JcUSWD
z&vFIwy?_>*E0FOidKe(udcy-|t!7!IZtz*^1n?M1bpVA!VhALfPgl1>1tXfBki^Cb
zFo4b}&|+y1RA*|)75Y<w?sv&VG3tQYz9ZOLrFX9MG&MC{tnIbA<cJ0Slaz~_EdXnC
ztI8AM)~X<3xAkCf%nlIW!()_^$`Gg_@xZbvsEVwj`gGqB>sUU5nUnUV5yuzH+S|63
z?hx%=JQNsK1*U4`%^IQgxnv6hoEd_kg6CNUt{y4Kylz6iP~kr45>qsj&Kcf+v23Ky
zR$=ZBFo{N(+=zhcTslJ-nH~tK6F3D58dj4AN?#`BNIQ}Si1z23FnYyE40H1_q_2bd
zvOf-K?*9<|Q%yc|cQ1Ux8oS_p*|VCogFc);kL3Ao<N3~De@-a3vPkd@H>T<=mj_}*
z9}I-qsDY+5oi=jeDcxEfgm*9Coxx&R3iM61;Z5L(Qy=1QPH7d3Q-Mw4wIA42Pi(52
zcj%(6lz*Y&|AZ-c$)InJ2Y8MQESric*(^o0jiCoX$`Ju&2B|q>SPt_MrKfgEQXPN0
zM&I$fUsuzAY4C$f62%Vbp~TZKDBi#>I}C;41D@Of>@Qr|(hzX|{VIEC%t?<YS$u9Y
za160*bAUf9`b)~&SvV09=m(}fYy?F$GXE@Zty)<fG!SsfTmK~V-@%m;nNAd25!c*_
zvlc9W19jXOV84O-M72Ka7K5qco+_;J<VtURDF5~c2DYDb&G!5JTKfZ=X+Ou?zF&R(
z{^-SYb=e<KpW44)t1m}t=Tjep&S~#?FkCHB42f2EfjB#-V~Ag-(1Zi0O(ui%^1fmj
zuyAN5LhTRKAMUL`LOy%Xz0`!;btFkOTbt_KHLlsX8(U?nb8UxoIa2j^;p-6{?gNQo
zK6fKUnE>D0-ARl-*f~w-vn*Y`o~h;g2bBNf4ptuc_-`tIs>#2+(_7v*d0}(a`keQt
z_ywkes)se{*T-2b8o37FUO!HEM(Ekv_~0GHx6zNY%5`EGQ=g9v6PS;01o=>N|7lZ2
z)x)`k`MG(62a%EUu3RZ|LH>hTv!uD>@PM+fe>W5%5g4j~S=}=i*8BN)Xtv11bD7QU
zp*vI?XJ-`$2vL*B9vOy5&7`hZcSQBaIT%JDA?eODjDhENwE0xIx0C!3#rqMhMxngQ
zK?xvWzLQNw6?7vL&3OQ$M~3pV9>Yj{Ok-ORV|dQ<<d9xR6Ibi!VGgi+1B$X)cDav{
zTAV!;)nfUwd#AsBlZ1_0rte_HUI&Y$4Z38ElgG1E?%=1qU=2sd6w@}O7j#ElRVfda
z$LK+>euNyTK@|S{4%MI<sd-*V#wgAEq)M<<_AS}lYux=!GCMyCAc-PsGu;kdY|W1Z
z`#Emn*FO&-FAMHoF;!o(rIwauShK5Yf(f3fOj5k`BaQg}L>#elpT{qvD8qM(ZUD5{
z)D4A7*YR!?M5YUuCUFPcfe!Z)Lu}VNmTfg<Gaqn&8vwtoU;^A6_^EEKauC=1H*&b?
z#1;JZuQ1D76bT`g+qVrm!B0Id_&4CFOy$tOytGju<2Y@xI0lo_7-cjaq!Q1i2%gR`
z9Oqiv7<b)2oN3~EicSG5`Aj_Ez6w1kNKYp1E7RvZBl?pJ=YDZwDY+bBGu>4Dq)o2~
zqi0eC@xaAJ)DsMB-j+?$Rm#4eMX1nwCWOHNDCFoJ>ekuOB<7o13{8kgToVk4C)vhW
z#~^HF3ckAjhLGN0vIQZEGwX^yN}Bp}h#Lv6W!=<p7T?lINV!hcchJEikHtZGGXYwv
z2tnE&a68*BKm+za1y0zIJ|)5DG$C4(Anl)t#M3%9e0(5}A1ndx^!<f5B&1k9CSu?i
zq=>1Bg0}J2Z<vbk2K5Z6tC9JtRJ(fIt}Q68pkbN|%JGXH&qxWw(MEH%N%{G9<`w|&
zG(cwYTe-4#o_PTO6YwuOAfE1|+`@&qh0*Z?;$oxI^QGLvg2H)4xzXu!=219KpYPE^
zG<y-EQ|2t3l|MUoj+T%%OPV!17mI62lcZVsb7mFJQL7T7r{>=8eYv_Vzgo1kj0N+3
za%2@Qcp$f^2%cx<vGb^t?w*vLJ~k)STv!MtQy1h*g$wc~<w`mHF$apwG0&F9Q+UxL
zfxqI(R$=O>apSUu%#?9sQ}AI@`ot;e6XC&keH%M(cHx4e1rJEt-Xa^|E&2Bs6$~2`
z7dJTifm!qC<vki3-RzO`tIH1_V2WNePnwGfg9jBgU+h=PkHzS!`T6tA^S!eaJt-aj
zvP?qs_(>B7`R;cTe~DwhF+}D&T7e?J8aNd46`NTv(8J?t^bjQ4-V(?|T*~F~+1jl_
zZr16=&uAws9I7Fw8az_L_t7fmvr|RA;u%qVJ|6>Ryo|-3Wd1nCES9Da&kemLTjJR1
zW#GIxK;XPM#+p<E4zngT3`bg%8eSBgnhvQE(Sl|_qOL0sX!Z(6C+wf~a<E)0+J=*N
zyI!>Y9K;@0+=ps#JM6rDn`oban<dw1aa?wFYJ}hzQj5_JL2GV@T60-Q-|7@02>^Bo
zWwv0cHx9Ix54>~;8~AIVvG>LBu1rENli}F85GlGYqz{x2lG+0?ABIvc`(V%kKSt8d
zPZ8W_UwM%xBuGlOSD1HOhGTAFac^GCd;``Vmd|SMhK3&trbJ-bllasdpN^2dhnF2F
zuA{<hg3R3>7>>8>V!i~-w-KL4<I{WiG!>uTz^8}t>E*hR80sq(SdK|DJ~`ouynS$&
z3GZv<re|6k_%fu4C9Cr(7z3IXTqlr+)&M^@Rp2FJ0+SGl=i@nF+|@&=hK%`asnM#b
z&1Z+2*4@sR8FQPGV%GY?)}{(&Z6nEqvB*<jy`8Z~7JmL)DCw%WO_j_>@#G_z{HB&1
z&6C}j{H&TR+Iyn=%}ZRARpI_moU)xNC@^Iuro3*g4-xI3Lr=6x%ms>)60xF>a9`va
z2sbDWQ9LfLI<>Q)*nUHSX<5qMkgcIySODt`-J<yYh1Zo)6mo=6eYa?DYPfbq<Fb*q
zfVEerMhZ$OdixB=@`FWt*>5gBc)$?&7-<ret>`1#P@asvGLUJb)BW&_ofFp$nF)`j
zQ-dCmZ&wn~Tb}re^%4;4J4D7WvRA1Mqwtkipf{J>NPQs9eA0S(y7_p6{C&9vY7y6U
ze+}C}KcWk-KOpy1j$gTUZxa)Ut~()La&9z%gk9o6*Q<qnN-jSE-Cfk)m({sTaiZT?
zz$W@%CYb2M<Pfj`NzM!A(VpMX@(`bv|1=5)Uvde(78dp{xtsuOcTro`vT~7Q+`GKl
zIBfQ)w^^nig}G4bd&Tk@*-V@9w{p2uKH0)|y#MHxRV{zV{RW22&3|7!7`>XqNoS*X
z5~Queu}Ki>E0{9J#0>mC&F8zWnePcc--TwrNBVpZ4*1RwMRkg+rqh<;?8sg%J10^_
z=hat2v5j;mOecJFZ5zb)b3mqBjm&)=&5XJx%Jjy<f6&VK4m|`(J#kfR$Lgo%+<)HC
z1z+0XRJea)5QRJiu<}UL;h#k}tB7Lmy1oZweHC+x>)IKiw2l>f7ubjSq{cczk#%$c
zMt5N2AZmy*eZC>d+t4O;Ju|<&k)qOl!>QK1V*Snd{udUKk%}sr9$#9s5BG3B1x?(v
zzKz-^=i`W$<aN1dDZ7}Jic^_TkIER@NYRvnh{{r?v$zWz&^c5&PZ#SV_~_<yUNe#!
zIXNzBw^8(tsf>}`!gt7z)UBxVNhGoG3369e7P9pw91qr*cBB1P?t}|7N)tgQey|NE
zvJDII5JB5gBoa9Aq&Y!~&dyM`<aWecFtdu!`3t0?TuCTgBt!#0iS9iI|Gz5U588+w
zoepELNl??62k=}SGgUh}{rEofjtC#X;)$Ry3SPZH`+2Abw)>Iyx#>sTVtRDoE0}=0
zU6GnL1scT4eu9wvDt>Zr#ZARbHN;EJU17%;(>DBGw(#c3<Mq%&u{_lv_heypLH}({
z8~<423CQhwq`y};bacmRo$<KTMmh70QRSx=GbVFmKb8xNu*Oltcs-2ozT7HrmMeYa
zN!%I85p8dxy;qk4Z}OoLwhFULzMTaj67A+7Z-uNCsBiK$>T~G=>szE(EBsK`Ooe*2
z!ds}qg#q>5dUbsUwLbN3Y`^-}w&nHR)J%O+zHeP8<waqp3dAoUY`j54x&{7O*4eSj
zIFpA}Y6Z2)^-5wBPPh4|n!A{)%MR|iF|wKdG*0NE2<L)u+D6S|n&E%?;c0!#5?!1$
zJSoRqN>4YUO@E1p?t_M>0=(@^motL)rz}f?Oy=(`OM(ZP4^-<yQ>>eVm5D4p-ux+~
z$C=kv>!RI1sUt>2n+mPy02<0KO;l-|l!8s{P~WJ*Pn*;RKWyR+zTMP(gU>bb1~*WH
zA70hqPkniV@qP`q@oTW5QEl*iBX7{%*nERuHD04Zv)W)`qi=(w8hslarZs3%8|>Rc
zgB_b|Q1z$b#uw7ki({DwUX|-{7U@HEEugQ<Vwv%+nyhhno)|L?L8^m7N)5;{y^Er~
z|06eQ<EM|JwIU6Vb~z+tmZKvuplHV!ydKx+n1mi8eH>%ZB`MZ13HKc19An5_802Fn
zLN^h#@l7<)aSw`uWxvb}MbYv%Vp*`QLh7U0r=vyNMBD}p6>X#WcHjv3O{IM1!|3^_
z7P=Yt2O$4Hbu9l&I+p)gonQVwf%)qMmjATC^8Y0G<VW#-54*;p3+0#YP70MK15r-k
zc}~iV@yDYR8jkXjrNsV?Jcd~xpTjO;+~L=N5Lk^dSvFUm=xN=OMu+>MjSqne<vp&~
zZ^4sTc3SLlzqB{mfnw#&&?I!>Ht0ex=t4wk5Z)}8YTIW1345ww<c#W|oDsO^aEv{~
z!Ji5K2D!iTCdA{fy9zhPx}jZ-8jbUB^nD{W+DG2($1kz{LC}78+&b`SpJE@WZ7YFo
z?+<Ld2HVEVB3RpNd;7ILgtcvyHwVanztVoM-`{?DquPEvwjajYhlTQ^o2mWi-_!mh
zjlZjXMpZyg8R)a)XoK;r9Mdscw9zpguYBs`n2xsISjTk4m^jCD^cabkchF{-Xgf;x
zg&_V;<Mffm=_BL+Tl$y=z{v*SWckrY4*7%n(nmCR62g!fk#roDZG2?$co}qQLgr0)
z#37^7)$<-CkF{*Jp19c$TUy_wlE=#bOZo`+Z}pj8|3ms%b(3G)oqGMB(+9&(?8SO^
zb4fJLN0@&<oY5+sX#ND)e&X+^4_-d78x?@<$|7$(P=0<9eYgUbzLMVR(zl}b)k~kX
zx4QI28Od{PBG2yU$h9c&d;yu~X*-TMI^VHf7|o;O%n0m6gOPi{yMlNpB%e++8TmFD
z-VK1A_yYM{cfS6bD(nYIy$ru6qgOpzCn3uM8<UfRd6Q<R>&0j3M5m4G;~-at+bbm6
z1o#5fJXfiaT4rLu&$4T9J!Mq&2q?#5e~|Xz-Bsn|#cjtVqpL@>z$mpEcoEsV$;TDC
zW9&t{4L2w5mwH(idtyNjW|5)L?a?)|Czob}Q{7*fXhioi|4qCP(Y9MUWw|_Da#=3N
znzy=t@P&8adClMZ?r!pX)#A~!L#z?VOQH9-M6*rLYS-sInQ!A?*qt1H=0BRUhi_J$
z3N<7NsGnqUqpw#XUeYA>XH2mNNO*#I8jykfO+uxV0;JH>hhNmRqRH9sTR*?2!8wVx
zQ7?BFeDp!Q*<RXS9VCp9JG-~?`=W3@qS-kuPw)4>N)c9~=Q920>Afc4{cXQ@{l*>3
zUg-a2hkwQh{}zT9P5dgqAvwYLWrtS}8id=<^$yw}@V+PDecD5=cK`0@Ex$M5`@Vqp
zDFOK>2fR-Rc%K^ZJ~7}uJK%j%!25xK_P-8z&kD#NH0{9B$6x%i!$01H_q-8XbOG;i
z!3=H{>}RBI;?cIxJiW8S-;TofG3zG?0q;Qp?>8BqzA{I5W4P^<LFhN2rsJT1`i^fq
z@#v+EJ3Bo2IOcz|?a_eu9|PWx1iT*$c)wBbuRki_-4xLNPXXm`2>9MU;JsbIdxwDc
zjsfps0q<hKdw9V6xq$xk3V>hdfc#wp-a7@n2j&m_9)gEneBOft=C3xOKf40ny9oaE
z=>y6i4#@vQ!27IAQ%c*!*LHaGVWDFDzL0-z?)$-s%D68({L2k>-t$h|)t&wozvBP3
zcm45ARd@R&w9wK52^NL6YE;S(1=b=8<<}D0Kw&LJAmBE!r0s2+ktSVo!>?D~RRJZ7
zK!pvQqH8BQEQ;b#i$g7fEg++X7Aerp0tJh7Yh?%(`H=TH=O%6Ph3fY1kN4gCddtH-
z&pFTep6~gdbMC#lNp7SX?Ye19_eRxg86~|EH*5`_zeoRcbkm49&H9aq&#<qzHLm>a
z#jj`NYqmZEKKKvE!%2B`iYd3x2eBIOA5lLsdaH`?K7?xs??rfT!ut}gCHxk`8R3ur
zC3HL(*`kQPi0FHg^6rGI36CH=jPM?WcOyKK@NmK#AAHI(VSKgr@o4Zd&;Pvb<mNlZ
z1@Tk&#{Ti^&DGkqI#!f9tR~0u-l;GC8pKbN@-wBXllKMd^ZFb<zr2(8QH9RO1j36a
zT^ze1`jEDy8tu>TSzb%#Bc1QP$^0%=rCob9X#V|)=ywzT3E>rl-$K@3U&2fFMrdwX
zSgn2TOU!?}^OBzQM@9Tw`__i-JKl@cyt4`RN8R3I0?{9~esyWwk7G4&Td^L7f4G#c
z=O;=16@=eQ>>b#3_q+Mi<21k9g>wCrLjC=1cQ2bAr&%Mx{(XlBgolOpr#Io<3GYL=
zhP2n4@Oua!Ncb%zJ~4#%Bb<@;?;`f@CR|I#D}t1-B=LKS@NPuUh`sKlypoJp4^pls
zJdE&tbiDR_{)dgNhqTY{=j&mODxB!Y5uQwVFBZxtO&dMF>Ah<0hs$8kI{9xZ+CS<a
ziMMY1#rWX)qdzb>$)0*h`}hOO`D6fTPt$hbH;)>swX0%K{<ifi{kyw711zzcUGK6#
ztGb)tP&Z!wK;DAm(R=>m7vYnMKYK`gs|f#?@Q(=JOZXAOkAYte;#$(5;*5gcWAduC
z8zNAi-)l!tQm!WacEY<6ek<W6KbEgN_hGg6z3KdX8*ytNQm!NX@rIv%vU`2!@%1Hg
z{L_;7-9h*e!iN&xpYTt}{EQ>ztH}EC5dJZ#UqyH&;kOa{x6<PW;RomF;x9cKr}6GX
z{fXm-4j|=4uMgWaa8$MSokEnSe_CJi`iOPGydTlW6Mj44cM{&0@Wj7gEe&43I#Pa7
zRTEsy7|D3EcA6)c$B_DMLt<w}H67Bv#mn;}M;GeHOzj!OBUz}v#`@LK;PXo?u~$~R
zV6<^e=k;tfDUTw&@oen-+g>@OeSfrR65Gw#ezKnC&!`{$$)wKX!{ZNsb$8T|8ts->
zS;_urU06?IFOrVeJx71h1M^|qaK)dZ$oBsj`C-uY7~v&R&zj2~z<B;#8PEM>K9xLt
z?A6;QS8IQLO<BL}-ofTz|DGWJ{gTXI^{#K8|FSg}@lp6{QZ7wyQoS~?Gk#B#^0B0U
zV+dbP^c7=wy;63v^Ll+S8P5p9HGh4m`g#_Q?@z-2ZfOrzEcorkyG)0)WjK*qRYQNs
zdXd)?g!d)(!U&(gAawrpAmyWe7Ftdy3n%((L@<cNLqqh5V_nhl_a4%|eirdaT{NMF
zTrW)|^>^zZDtJ&nq%9i7*H7eCRqy-cb1`w6XQnIj#Y^JdpR^Z6cr@XI2=7kgH6iM=
zwL$00iuF&;mS4pA!_3EL>ODnIUpYE+X0^8XD%zi<v1`crwj1Fp!ut}gCR`hK^Vcf@
zZXnkKy-5596TXgIpYJBG2Q9Zo*S_(m8tpG9;{3J$v_1T@(Dv)e`Snx6j}o3puTRPA
zss4oDLHGc|`w-rj@Vf|C5gtW&FT&M?-${5F;n9SL6CO`^cfxNWTtoQXghvv74}E<~
z`0a#i3GYXE9O1VTt|Pn$;jx6@MtBV2-3X7Mug^aSJ^sH;u8+(+S%rZ=-wywK&bn^(
zJ{0CvE#DoXdV(>%Ki~PqypDUUX3yD^XF4h8%UdE!2fk%sUd_rQ?l4cwygfdRx1Y$F
z7Pfj)*h@UQa=-ihg)^SZirzEtTB&9m|A>=M9?EY_yKk!s8>rc2V9h68*L~i3*SHye
zTh<rz0vG<sefh$ly}e?POsZJ9U;gAhp1XP>(|gmO2!rt}7s{3AhXw3=`{~{iAGI1R
zTJnERi|6r16GWr81RBllY&59BQ$Y<@iv~TH_1;I&-bu9gzUcq(^r@4kq@>Kff8=mu
z+VEM^(x(ldGCOVN@QkU`ry$esPif5o1M`_gaBg^>uIKDENOtm9@}BsC-g(M%EBIq0
zy75oKmFg?`rw~)&IsZn=EAS#Erc@8=nH67^GE96ABc*?-sgesK5?|mu<vV97LrT;5
zF!W33YUOby@%(%L!jaB+J?xp+E?o4uuZds(p4aYmi)a*A8OsgWN;NRM+Ff2{+{I~?
zv4ZbB#`@A2-kx!<lDkjIZ7A(8at|uGHA-${X}rj-6Li!-kFlv#ha8Wwxpbh|x0DVM
z`?k_yVt=W5brk<VUd8g3u-ugu?ktQK4@vVY;r{nC9^r4YlwVh#127*&kjZhL+j(bJ
zr-Z#!A@&h3HS%A+PVhf3pkDcfy5__v<!im~Rs`M8hduszNY9=NJDdC9>`vZ)GMC@U
zOV7>`6KdA-7L(T<5%m7S%8T0#ZuNG{s`T)3)seI%ts-)PKm|e*2*y7Ug4M=y?$fV&
z@<7y!K%8_(B^_BUwe!y?hzHFz^H)*HZJrbE&o6vV=9lN}YI8Y%8cEX0KJOa&TVMQf
zE*)R0PVX7`N@9}endnwfe~KOpFE_|q-jz$s$Bl7EdFruH_V0<Ty>OwqHN4B;)eCG7
zc~0SIGJ!w;+NpW`Jl`*mWW&WTf}9_^8);9Y=b$&js}J)vD#sJ?me(+^Ii||k$X)Pe
zM0n>#R~efGG2$rWF*kTV^Hh2=>fM>i^$W*&_IXZRx$H65ug+-3S{=}#tw*4}=0<Nu
zlgHfb$!PH!o4w|i<$H&9{QezgRg`$+E$Ij>RC?OI=7uYmQS-{>3)?U)lIJxnjPqnP
zdX_bLxveG)mB-w2;qActtM3~dJbRY^{Z4nnxMdBC!js+g&M2?Be(1%xWepu3J%#G$
z`FAmkVZL04daI5Pg>!Yar#)?b-pQz|ZC-V}_FUvgXqZ#p!K<}jAn$XfG_uOmzUufD
zcSEgI?>TnmoX2=D`Rbyv$yenOtBuY4Lx$o(1<=C6ea3?Wj$z8d#A%plj{L}DZmexR
zKZbwy(^#KuJh<?HH{;;YFXPZm#zFs)Ea-SX#53;_;*Y?4-0e!lmtDgccq*%m{KLdp
z#4Td6WHh-=7=U~I15o9;oV4Gwtl5j|MiFmRH@A3~UF$gBAl|){c<)EVzeL3AMdWvT
zjZNIbg^J`83wtK{F1+Lay{Pv;THbaihSA+Gxpq;HBww<-$@!e}D>myfJ{XjpJf1^$
zdomhCEE^HiW7s!%PSiGY9$5w3wdZ(N?H59DMx>h~_i*N^zjDP>>#3@hny#EnKCwuf
ze4@|F2N0@p#-=_`^26(l^Mg9R^yf=hRU7*L-D1KYBU~oDknk0R+Xycrd^6#%5x$A=
z+2sAk5>nno_^$}RMmQt(1`^(%@KJ=P5&kgYJ7~N}xrT5P;qiozA-tW~4<qGz!ea=}
zAv~J!y~O@58egI>COnmJ8{v(_UOFk439ljgL{dJ4@W%*$mhcv0Z#*g25ne&`5v07F
zl)p*W7t!w{<@JPbA$&fm|1{w%2){(v6Dc1?cr(#ILdsVWo=<oY;j@XoeMiXrC*?7O
zw-J3K;dO-95I&y9mz1{<9!BREJ$?}VCBoYYPb2j!NO?Ko>2$r3az@G<NV%Srn+V@a
z^v#4v6Ma140}0m=zJu6HrSq50FT#fqK8&t6!uu0GitsUnKTP;R;_sV;HxXV!_-4X4
zg>V)_?mrM-LU<|RHH04~{0QNv2tQ5u8N!<gXZ=F?SyFzE@Mgl#6aEF^7YP3w;jM(X
z5&k9N7YYA6;g<;iitx*XUm?7m@UIEKO87T~Un9Ke#WSlGKTxgR#_w;A=$rNQ<%{d%
zZm-q8)SFezShDqS+wncIkKA@x`|A(b{EZvGnDN<+wnNVAN3>79jPm@^)tc3(RNjmu
z+LiOz{D|=Nc@^Y-9ld`?Ki{#3@V$h8On4>XJ;?n(dOxr`;VQx-3D=)}=FKz9VgDAU
zd*j%~Ed98P-?W{H)2vv<-|v~zUPSmW2hW`xXE>r={v6_Qynl_C@F%bQ^e;=NfWM&d
zFq67@Qv4C^2g6v&{w)(q*(6)E=i?gfZ@B(b^C`SOn0J0r&h@|-ruf|lw#^`%6Y1}_
z|5saaUp*_TG<0yy?fTI21j5JbSsRcrAv8a*4ff}kR2alt@x#CapCKs(?hmTx0+I>W
z2k|LDx}R_M+oRj9gcm;*>eri3>seY5Zz~QhH~mg8=6zu9*Zf}3rc5{Mq#WHs$;qEg
zp-UVwGPDiN7CjpPi~>wR0Z;^N1YQOH2<!tIfs26VRp<a2SPRtjU|nu1KYr-vHHn6h
za>GsZ!CT{EgFs;W&NZ#SF-y0Nwi3DpY<_>d`1ji$tLdCE?I%@i(-9Rb4$9lrpki+#
zFa2<5UJLSUwJIhD<%Ru4#eRjnRYyAW;*qzd?q;i0*~(G252z2)v>@FML?2bLcpwqb
z1G9k~pb&TrC<aP^9l&0o4rm720nMkV2j~G4kORm-F|Y;L1=IjdKsyj~40=Ehn1Fmh
z28w`UU=y$d*bCGFO+Z*Z>H)g?PTz+iJqAbxOu!>RJ|F`{LAs}r-UPe`>;et~jX(>~
z4ul;?AAopZ2rwQ<10Df#fI{FgU<FVNYy!3bJAhrlKA;Y01e$?M06T%00x^IN7y={$
zV}Mk^1Uv%d12RwqJPkYxYzAHf%7MK=4bT8I1D61Xc@quv2L=L(z<3}Hm<`MaY`|h*
z1yIcA8L;JKXH0e=y$@&z%KLtPd_O<BnjcT7W=eS}Hit|4k+cN-dCq4PN_J5G(;F{y
z{*YU8E}3Joxy7$>;M{1)$uY=|0;Hpu!6{9)+9X4v)n%AIi~mUXASMi^S?oDB34A<D
zaY`0hO0_zrY}w&lf_ka`JW-YLYm`)Jp*35Y>hM31iu=Nhi><QB>ByE`{2JyBHrZ<T
zH@sb>MLX<mk)CXIy5xX4_JPQoE-m(F)v(D9XMsh|fGJC!qB+X=uZ*Nyvz!(u?|TD(
z${{z;pKu7p-75NHBnCD_<T<SiCH`nE0~^V?XhN2q)-1OyxrFTp1Nl?!xuRiQ@l0CM
z5fU~!P(I7LlxK_$aPh<~#wG~vw%GztT!hsWo|kTM$;vOxuqlE&@~qhwn{a6sT(IW}
z2QqkBh9k#q6Cw6nU~EQbzTymWf5!88m@%wQCDg1SkO!V7S@<O;`%j)XE8pRiJDqw)
z<SlW@Qo(F1GF+k$NI}1am+|yZ9QNQia9i7W*&O9ckn=Z!IG<$O1$Wro1;I59?3jvs
zW3bsA+3e3E%?F3Ep6YamCC5LO27bFF-GQ)(!V&5Ww@tQAS|Urc9doTYQcAuB;bW<4
zlO!$R{m3y{5%ss#vm_<92oP+3%z03}KjYkN&lhpaF)q%Q#KY3iV_D+%)ZNT>__ctC
zfcI%`H=9d#W)~D%4eTdMx{&)8&N8#}3miFIGni%ONphi;llxev)n$?8C1TBF7G{gt
zjk$od151+YXR}P$$#L@mF^jYn4KS9?GTj9h*8;yyZnVH+wOgEdE|v&hkd>M3c4ih>
z77N>%g_)U>eWBIKqd!sMxdo!vxONA3Nphy8urO0rrXzE(OpD7UIc21mz<&uVQyf2#
z_PfcRWto!GX?O7S?^$MUj^F7rq^-6rcP`Sei?n2u?0Is&^}$hG`!4cqE~Wf{h)1Tw
zm7CwOR?Z_Xjx5G5@c3ja9zCkc#H3Q3$V8gAl&3<>{0<kXco%-gL&H{xw9>ohRh=>G
zr?zBaGP8I!4n+R`+@;2>;gbv7J?d<mWO1TTjNPkN6vNfI9sNyI=jPhnu6&*zsm|r&
z=-`WIj5=5NldR4y#DbK0zs4f%mb2MJbuNy3IqZjgFY!5;>k8DHqRw69w93NvkJK=S
z5#s5YYD^1TVZH_FpQ~*Sd!EuSr@ElS*Tw1rsQ}Z9r<d|He_9K+U#L4=U{9#w0vcMS
zh710RYt#_h_)?MBTAp60j6<>7WzFMH55%@!?dmYHLG5y7TkOK#Mm3+(7SYbLBAw5d
z0sD<wnf5&Wyjq#|JpBSsOUhdMEl=B&IbF)rcBR7al(eYu5>FRqIr*gC%+vk`Ugl|6
zp{V<cNXw$#S9w|%rw+DF?Ggtian4gC)3Hdb(YKLy7Z!?iG)r+PXQ8Qfm&~8k$ncsr
z#bLHv?N-@pu_=dUwu8;!$MMcAeqHZ}A$5+yH1+<GBU01Txi$uQ|85BU_Lg|iFSa4b
z`!*D~7G^tTMLiz;dngEwiwQh{8R^E(;C{dTpWS-+^8Gh>x-tHJjv+V=@lUTkjJ*c?
zRgC`xd9Y3;?lb9ck{=m{*KIf0#p?i-xZihE{l1&%x&4wZ^z}$JH{$g{;KSnBiqGSi
z5Y(s5;Jd;*Ie(IxPVpI5l!`@Rtc$HkX}PFK+3`OlDk>^iMMDK^JMaptYxA+~r+sWq
zosTW3^s%&ZA4@3n;i3Up*MO7RPR6n}Fg6V*u8}5IcLw?@=(j?@7J3)-bD=c@Q=y*#
zeG>Eu&<}tfcDHYbehu^spihH7f!|)c&iEBSW6(3`#a&VpI|F?c^jo1{3%v{axzL-L
zk4=Ss0`y7HCqO>{`ZnlKLth7dCG_RcmqA|!{W|EELZ1cwH0Vb{?}Oe4y$^aH^gif)
z&|k-21O9g6Zv*~Z_%jK88}z54uY<l4`f}*G^)jf}LA?~}EU2eJJrepe&{sjf75cT%
zyP%)Tcn=!TgPrKX2K2y%9+=PrUmfgj-wyp6=odhr27N*o(P7MY2Kp-Kw?e-bdKdI_
z6PV8oJzo6noB(|i^a;=pfW8g-)6myJUkQCV^kvYOLB9_ArO@Nr&NU7Ck^J5#^gif)
z(EFhGLGOb<aHs(e?Sw-c;1GY6VuC|$(4U6B4*E*y%b_oWzKiL@&Ol!U{Z{DLLhpip
zF7#bYAK0y9+o4|r{Q~IIpih9ls{qyc&Ol!U{Z{DLLXSxM=CZB=ROf4h{xtM;&{slV
z4n2a?MS$vjn9sl(U;&T@B=FnCT?FXj#f$9RxpVBqi4*M5p+oGWk3M4Wyz>q#D=T9!
zzW5?rw{9JK^2sOJ(xpq8-EL=DSy^n(oH=aTv}tVO#EER=$dPQ&pg}A)HkS47-J7+2
z<71mkO0bUkiuQ?r7$9ZG|9s;9b;rH`e{f^pQR6$x6;Hmc`akc|KUM!<^#42i*XnEi
z7J(A-t^|zv%Wtk=LFG3Qu#j>pXl0}vo`w`7TWCaaE8hnTI?oir$&hl<gouwY9#XE@
z_H~SZtG|s9^R0>*77&E?p_Ta88eGDHobK$3VxhIeQjq^0yfq};9qo6z<zror*A4pr
zZn^)Bq*njb;fbL4m~Im8(9D}ph@jlo)*DJYvf2Om#;<*JI^~@_eucxYWBBccIcyfo
z#Q#P%6RD|e2Ad8(75`60ns4uiwO;3X{t1IM(`&j1_WZp={!0d7?8f&8W5qSnEG9E2
zvobr&gEcF&F$tFKESK?{BA3N=QX(4#B#XU~KkA*Ovc9mF0!;y~?JUqQfj1T~@$!>R
z0W;&OYdl*d{zkyMow>1hptU?U5t^B*7hoeDZDymE9j7Cu?rfHiJQr#gLhFFqDJ<*!
zxw+sH{&ZjtFd6nOB5xe>mLQM+7DBf0N+<GjL`|iiStyl7Z7cRVp~_)D5Ps><ibJS#
zVJDz)3xy=gW%wqEJ?90&AMQhENucLbM4u$#yA9j~J6TBa-s(uN$BF-W?VCsI2hchY
zjU{4qc#nU8+(NJsXjO(EHZjT!e%^tY2F5WJaG@^mcOi6ESP{1%Fp4>BEZVb)k(bzT
z(UMhoB#VeCeOxRephQp>8h-DsPf#0kL?2y2wiDsYNFa%g##|XCSRz;w{^#2uywXsK
zIi^I>E+W)9dw6SnD?&{^KP2>(D^mP1;C|+belzweyyMpS%+$fBz|5FMW(F^F_(#&x
z5s7b~pOZyDJKLWv#*)v30%3K5xPS`G^=)hz?D4oNDM_UG_8{v4J2U)#TmAhF#Lphm
z?-3&AQc#>sVh!ZLw`}+wyvB_FI873MhU)03eAk$aWxf11tZbo(kOaGK;UjOwB=WKm
zb?);*fY0e9QP;_aqV6n=jRb{N^e(t=U`6Em*VtSUgZtqDkG~T4MICt~#Hw~fnJwnJ
zG9D80`8<`y$T0RO8;myRioZZC_^6wZO2J<DFU@d%d}HFc%g6b~qatGi#SvAR5qu5X
Sf@T%pBEy=xe1-YHW&1DE>B}Ji
new file mode 100644
--- /dev/null
+++ b/memory/jemalloc/jemalloc.c
@@ -0,0 +1,6369 @@
+/* -*- Mode: C; tab-width: 4; c-basic-offset: 4 -*- */
+/*-
+ * Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice(s), this list of conditions and the following disclaimer as
+ *    the first lines of this file unmodified other than the possible
+ *    addition of one or more copyright notices.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice(s), this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *******************************************************************************
+ *
+ * This allocator implementation is designed to provide scalable performance
+ * for multi-threaded programs on multi-processor systems.  The following
+ * features are included for this purpose:
+ *
+ *   + Multiple arenas are used if there are multiple CPUs, which reduces lock
+ *     contention and cache sloshing.
+ *
+ *   + Cache line sharing between arenas is avoided for internal data
+ *     structures.
+ *
+ *   + Memory is managed in chunks and runs (chunks can be split into runs),
+ *     rather than as individual pages.  This provides a constant-time
+ *     mechanism for associating allocations with particular arenas.
+ *
+ * Allocation requests are rounded up to the nearest size class, and no record
+ * of the original request size is maintained.  Allocations are broken into
+ * categories according to size class.  Assuming runtime defaults, 4 kB pages
+ * and a 16 byte quantum, the size classes in each category are as follows:
+ *
+ *   |=====================================|
+ *   | Category | Subcategory    |    Size |
+ *   |=====================================|
+ *   | Small    | Tiny           |       2 |
+ *   |          |                |       4 |
+ *   |          |                |       8 |
+ *   |          |----------------+---------|
+ *   |          | Quantum-spaced |      16 |
+ *   |          |                |      32 |
+ *   |          |                |      48 |
+ *   |          |                |     ... |
+ *   |          |                |     480 |
+ *   |          |                |     496 |
+ *   |          |                |     512 |
+ *   |          |----------------+---------|
+ *   |          | Sub-page       |    1 kB |
+ *   |          |                |    2 kB |
+ *   |=====================================|
+ *   | Large                     |    4 kB |
+ *   |                           |    8 kB |
+ *   |                           |   12 kB |
+ *   |                           |     ... |
+ *   |                           | 1012 kB |
+ *   |                           | 1016 kB |
+ *   |                           | 1020 kB |
+ *   |=====================================|
+ *   | Huge                      |    1 MB |
+ *   |                           |    2 MB |
+ *   |                           |    3 MB |
+ *   |                           |     ... |
+ *   |=====================================|
+ *
+ * A different mechanism is used for each category:
+ *
+ *   Small : Each size class is segregated into its own set of runs.  Each run
+ *           maintains a bitmap of which regions are free/allocated.
+ *
+ *   Large : Each allocation is backed by a dedicated run.  Metadata are stored
+ *           in the associated arena chunk header maps.
+ *
+ *   Huge : Each allocation is backed by a dedicated contiguous set of chunks.
+ *          Metadata are stored in a separate red-black tree.
+ *
+ *******************************************************************************
+ */
+
+/*
+ * MALLOC_PRODUCTION disables assertions and statistics gathering.  It also
+ * defaults the A and J runtime options to off.  These settings are appropriate
+ * for production systems.
+ */
+#ifndef MOZ_MEMORY_DEBUG
+#  define	MALLOC_PRODUCTION
+#endif
+
+#ifndef MALLOC_PRODUCTION
+   /*
+    * MALLOC_DEBUG enables assertions and other sanity checks, and disables
+    * inline functions.
+    */
+#  define MALLOC_DEBUG
+
+   /* MALLOC_STATS enables statistics calculation. */
+#  define MALLOC_STATS
+#endif
+
+/*
+ * MALLOC_LAZY_FREE enables the use of a per-thread vector of slots that free()
+ * can atomically stuff object pointers into.  This can reduce arena lock
+ * contention.
+ */
+/* #define	MALLOC_LAZY_FREE */
+
+/*
+ * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
+ * re-balances arena load if exponentially averaged contention exceeds a
+ * certain threshold.
+ */
+/* #define	MALLOC_BALANCE */
+
+/*
+ * MALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
+ * segment (DSS).  In an ideal world, this functionality would be completely
+ * unnecessary, but we are burdened by history and the lack of resource limits
+ * for anonymous mapped memory.
+ */
+#if (!defined(MOZ_MEMORY_DARWIN) && !defined(MOZ_MEMORY_WINDOWS))
+#define	MALLOC_DSS
+#endif
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+#ifdef MOZ_MEMORY_WINDOWS
+#include <cruntime.h>
+#include <internal.h>
+#include <windows.h>
+#include <io.h>
+#include "tree.h"
+
+#pragma warning( disable: 4267 4996 4146 )
+
+#define bool BOOL
+#define false FALSE
+#define true TRUE
+#define inline __inline
+#define SIZE_T_MAX ULONG_MAX
+#define STDERR_FILENO 2
+#define PATH_MAX MAX_PATH
+#define vsnprintf _vsnprintf
+#define assert(f) /* we can't assert in the CRT */
+
+static unsigned long tlsIndex = 0xffffffff;
+
+#define __thread
+#define _pthread_self() __threadid()
+#define issetugid() 0
+
+/* use MSVC intrinsics */
+#pragma intrinsic(_BitScanForward)
+static __forceinline int
+ffs(int x)
+{
+	unsigned long i;
+
+	if (_BitScanForward(&i, x) != 0)
+		return (i + 1);
+
+	return (0);
+}
+
+/* Implement getenv without using malloc */
+static char mozillaMallocOptionsBuf[64];
+
+#define getenv xgetenv
+static char *
+getenv(const char *name)
+{
+
+	if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf,
+		    sizeof(mozillaMallocOptionsBuf)) > 0)
+		return (mozillaMallocOptionsBuf);
+
+	return (NULL);
+}
+
+typedef unsigned uint32_t;
+typedef unsigned long long uint64_t;
+typedef unsigned long long uintmax_t;
+
+#define MALLOC_DECOMMIT
+#endif
+
+/* XXX Temporary, for testing on Linux. */
+#if 0
+#  define MALLOC_DECOMMIT
+/*
+ * The decommit code for Unix doesn't bother to make sure deallocated DSS
+ * chunks are writable.
+ */
+#  undef MALLOC_DSS
+#endif
+
+#ifndef MOZ_MEMORY_WINDOWS
+#include <sys/cdefs.h>
+#ifndef __DECONST
+#  define __DECONST(type, var)	((type)(uintptr_t)(const void *)(var))
+#endif
+#ifndef MOZ_MEMORY
+__FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.161 2008/01/03 23:22:13 jasone Exp $");
+#include "libc_private.h"
+#ifdef MALLOC_DEBUG
+#  define _LOCK_DEBUG
+#endif
+#include "spinlock.h"
+#include "namespace.h"
+#endif
+#include <sys/mman.h>
+#ifndef MADV_FREE
+#  define MADV_FREE	MADV_DONTNEED
+#endif
+#include <sys/param.h>
+#ifndef MOZ_MEMORY
+#include <sys/stddef.h>
+#endif
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include "tree.h"
+#ifndef MOZ_MEMORY
+#include <sys/tree.h>
+#endif
+#include <sys/uio.h>
+#ifndef MOZ_MEMORY
+#include <sys/ktrace.h> /* Must come after several other sys/ includes. */
+
+#include <machine/atomic.h>
+#include <machine/cpufunc.h>
+#include <machine/vmparam.h>
+#endif
+
+#include <errno.h>
+#include <limits.h>
+#ifndef SIZE_T_MAX
+#  define SIZE_T_MAX	ULONG_MAX
+#endif
+#include <pthread.h>
+#ifdef MOZ_MEMORY_DARWIN
+#define _pthread_self pthread_self
+#define _pthread_mutex_init pthread_mutex_init
+#define _pthread_mutex_trylock pthread_mutex_trylock
+#define _pthread_mutex_lock pthread_mutex_lock
+#define _pthread_mutex_unlock pthread_mutex_unlock
+#endif
+#include <sched.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef MOZ_MEMORY_DARWIN
+#include <strings.h>
+#endif
+#include <unistd.h>
+
+#ifdef MOZ_MEMORY_DARWIN
+#include <libkern/OSAtomic.h>
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <malloc/malloc.h>
+#endif
+
+#ifndef MOZ_MEMORY
+#include "un-namespace.h"
+#endif
+
+#endif
+
+#ifdef MOZ_MEMORY_DARWIN
+static const bool __isthreaded = true;
+#endif
+
+#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
+
+#ifdef MALLOC_DEBUG
+#  ifdef NDEBUG
+#    undef NDEBUG
+#  endif
+#else
+#  ifndef NDEBUG
+#    define NDEBUG
+#  endif
+#endif
+#ifndef MOZ_MEMORY_WINDOWS
+#include <assert.h>
+#endif
+
+#ifdef MALLOC_DEBUG
+   /* Disable inlining to make debugging easier. */
+#ifdef inline
+#undef inline
+#endif
+
+#  define inline
+#endif
+
+/* Size of stack-allocated buffer passed to strerror_r(). */
+#define	STRERROR_BUF		64
+
+/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */
+#  define QUANTUM_2POW_MIN      4
+#ifdef MOZ_MEMORY_SIZEOF_PTR_2POW
+#  define SIZEOF_PTR_2POW		MOZ_MEMORY_SIZEOF_PTR_2POW
+#else
+#  define SIZEOF_PTR_2POW       2
+#endif
+#define PIC
+#ifndef MOZ_MEMORY_DARWIN
+static const bool __isthreaded = true;
+#else
+#  define NO_TLS
+#endif
+#if 0
+#ifdef __i386__
+#  define QUANTUM_2POW_MIN	4
+#  define SIZEOF_PTR_2POW	2
+#  define CPU_SPINWAIT		__asm__ volatile("pause")
+#endif
+#ifdef __ia64__
+#  define QUANTUM_2POW_MIN	4
+#  define SIZEOF_PTR_2POW	3
+#endif
+#ifdef __alpha__
+#  define QUANTUM_2POW_MIN	4
+#  define SIZEOF_PTR_2POW	3
+#  define NO_TLS
+#endif
+#ifdef __sparc64__
+#  define QUANTUM_2POW_MIN	4
+#  define SIZEOF_PTR_2POW	3
+#  define NO_TLS
+#endif
+#ifdef __amd64__
+#  define QUANTUM_2POW_MIN	4
+#  define SIZEOF_PTR_2POW	3
+#  define CPU_SPINWAIT		__asm__ volatile("pause")
+#endif
+#ifdef __arm__
+#  define QUANTUM_2POW_MIN	3
+#  define SIZEOF_PTR_2POW	2
+#  define NO_TLS
+#endif
+#ifdef __powerpc__
+#  define QUANTUM_2POW_MIN	4
+#  define SIZEOF_PTR_2POW	2
+#endif
+#endif
+
+#define	SIZEOF_PTR		(1U << SIZEOF_PTR_2POW)
+
+/* sizeof(int) == (1U << SIZEOF_INT_2POW). */
+#ifndef SIZEOF_INT_2POW
+#  define SIZEOF_INT_2POW	2
+#endif
+
+/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
+#if (!defined(PIC) && !defined(NO_TLS))
+#  define NO_TLS
+#endif
+
+#ifdef NO_TLS
+   /* MALLOC_BALANCE requires TLS. */
+#  ifdef MALLOC_BALANCE
+#    undef MALLOC_BALANCE
+#  endif
+   /* MALLOC_LAZY_FREE requires TLS. */
+#  ifdef MALLOC_LAZY_FREE
+#    undef MALLOC_LAZY_FREE
+#  endif
+#endif
+
+/*
+ * Size and alignment of memory chunks that are allocated by the OS's virtual
+ * memory system.
+ */
+#define	CHUNK_2POW_DEFAULT	20
+
+/*
+ * Maximum size of L1 cache line.  This is used to avoid cache line aliasing,
+ * so over-estimates are okay (up to a point), but under-estimates will
+ * negatively affect performance.
+ */
+#define	CACHELINE_2POW		6
+#define	CACHELINE		((size_t)(1U << CACHELINE_2POW))
+
+/* Smallest size class to support. */
+#define	TINY_MIN_2POW		1
+
+/*
+ * Maximum size class that is a multiple of the quantum, but not (necessarily)
+ * a power of 2.  Above this size, allocations are rounded up to the nearest
+ * power of 2.
+ */
+#define	SMALL_MAX_2POW_DEFAULT	9
+#define	SMALL_MAX_DEFAULT	(1U << SMALL_MAX_2POW_DEFAULT)
+
+/*
+ * RUN_MAX_OVRHD indicates maximum desired run header overhead.  Runs are sized
+ * as small as possible such that this setting is still honored, without
+ * violating other constraints.  The goal is to make runs as small as possible
+ * without exceeding a per run external fragmentation threshold.
+ *
+ * We use binary fixed point math for overhead computations, where the binary
+ * point is implicitly RUN_BFP bits to the left.
+ *
+ * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
+ * honored for some/all object sizes, since there is one bit of header overhead
+ * per object (plus a constant).  This constraint is relaxed (ignored) for runs
+ * that are so small that the per-region overhead is greater than:
+ *
+ *   (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP))
+ */
+#define	RUN_BFP			12
+/*                                    \/   Implicit binary fixed point. */
+#define	RUN_MAX_OVRHD		0x0000003dU
+#define	RUN_MAX_OVRHD_RELAX	0x00001800U
+
+/* Put a cap on small object run size.  This overrides RUN_MAX_OVRHD. */
+#define	RUN_MAX_SMALL_2POW	15
+#define	RUN_MAX_SMALL		(1U << RUN_MAX_SMALL_2POW)
+
+#ifdef MALLOC_LAZY_FREE
+   /* Default size of each arena's lazy free cache. */
+#  define LAZY_FREE_2POW_DEFAULT 8
+   /*
+    * Number of pseudo-random probes to conduct before considering the cache to
+    * be overly full.  It takes on average n probes to detect fullness of
+    * (n-1)/n.  However, we are effectively doing multiple non-independent
+    * trials (each deallocation is a trial), so the actual average threshold
+    * for clearing the cache is somewhat lower.
+    */
+#  define LAZY_FREE_NPROBES	5
+#endif
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.  If no such instruction is defined
+ * above, make CPU_SPINWAIT a no-op.
+ */
+#ifndef CPU_SPINWAIT
+#  define CPU_SPINWAIT
+#endif
+
+/*
+ * Adaptive spinning must eventually switch to blocking, in order to avoid the
+ * potential for priority inversion deadlock.  Backing off past a certain point
+ * can actually waste time.
+ */
+#define	SPIN_LIMIT_2POW		11
+
+/*
+ * Conversion from spinning to blocking is expensive; we use (1U <<
+ * BLOCK_COST_2POW) to estimate how many more times costly blocking is than
+ * worst-case spinning.
+ */
+#define	BLOCK_COST_2POW		4
+
+#ifdef MALLOC_BALANCE
+   /*
+    * We use an exponential moving average to track recent lock contention,
+    * where the size of the history window is N, and alpha=2/(N+1).
+    *
+    * Due to integer math rounding, very small values here can cause
+    * substantial degradation in accuracy, thus making the moving average decay
+    * faster than it would with precise calculation.
+    */
+#  define BALANCE_ALPHA_INV_2POW	9
+
+   /*
+    * Threshold value for the exponential moving contention average at which to
+    * re-assign a thread.
+    */
+#  define BALANCE_THRESHOLD_DEFAULT	(1U << (SPIN_LIMIT_2POW-4))
+#endif
+
+/******************************************************************************/
+
+/*
+ * Mutexes based on spinlocks.  We can't use normal pthread spinlocks in all
+ * places, because they require malloc()ed memory, which causes bootstrapping
+ * issues in some cases.
+ */
+#if defined(MOZ_MEMORY_WINDOWS)
+#define malloc_mutex_t CRITICAL_SECTION
+#define malloc_spinlock_t CRITICAL_SECTION
+#elif defined(MOZ_MEMORY_DARWIN)
+typedef struct {
+	OSSpinLock	lock;
+} malloc_mutex_t;
+typedef struct {
+	OSSpinLock	lock;
+} malloc_spinlock_t;
+#elif defined(MOZ_MEMORY)
+typedef pthread_mutex_t malloc_mutex_t;
+typedef pthread_mutex_t malloc_spinlock_t;
+#else
+/* XXX these should #ifdef these for freebsd (and linux?) only */
+typedef struct {
+	spinlock_t	lock;
+} malloc_mutex_t;
+typedef malloc_spinlock_t malloc_mutex_t;
+#endif
+
+/* Set to true once the allocator has been initialized. */
+static bool malloc_initialized = false;
+
+#if defined(MOZ_MEMORY_WINDOWS)
+/* No init lock for Windows. */
+#elif defined(MOZ_MEMORY_DARWIN)
+static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT};
+#elif defined(MOZ_MEMORY)
+static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+#else
+static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER};
+#endif
+
+/******************************************************************************/
+/*
+ * Statistics data structures.
+ */
+
+#ifdef MALLOC_STATS
+
+typedef struct malloc_bin_stats_s malloc_bin_stats_t;
+struct malloc_bin_stats_s {
+	/*
+	 * Number of allocation requests that corresponded to the size of this
+	 * bin.
+	 */
+	uint64_t	nrequests;
+
+	/* Total number of runs created for this bin's size class. */
+	uint64_t	nruns;
+
+	/*
+	 * Total number of runs reused by extracting them from the runs tree for
+	 * this bin's size class.
+	 */
+	uint64_t	reruns;
+
+	/* High-water mark for this bin. */
+	unsigned long	highruns;
+
+	/* Current number of runs in this bin. */
+	unsigned long	curruns;
+};
+
+typedef struct arena_stats_s arena_stats_t;
+struct arena_stats_s {
+	/* Number of bytes currently mapped. */
+	size_t		mapped;
+
+	/*
+	 * Total number of bytes purged in order to keep dirty unused memory
+	 * under control, and the number of madvise calls made while purging.
+	 */
+	uint64_t	npurged;
+	uint64_t	nmadvise;
+
+	/* Per-size-category statistics. */
+	size_t		allocated_small;
+	uint64_t	nmalloc_small;
+	uint64_t	ndalloc_small;
+
+	size_t		allocated_large;
+	uint64_t	nmalloc_large;
+	uint64_t	ndalloc_large;
+
+#ifdef MALLOC_BALANCE
+	/* Number of times this arena reassigned a thread due to contention. */
+	uint64_t	nbalance;
+#endif
+};
+
+typedef struct chunk_stats_s chunk_stats_t;
+struct chunk_stats_s {
+	/* Number of chunks that were allocated. */
+	uint64_t	nchunks;
+
+	/* High-water mark for number of chunks allocated. */
+	unsigned long	highchunks;
+
+	/*
+	 * Current number of chunks allocated.  This value isn't maintained for
+	 * any other purpose, so keep track of it in order to be able to set
+	 * highchunks.
+	 */
+	unsigned long	curchunks;
+};
+
+#endif /* #ifdef MALLOC_STATS */
+
+/******************************************************************************/
+/*
+ * Extent data structures.
+ */
+
+/* Tree of extents. */
+typedef struct extent_node_s extent_node_t;
+struct extent_node_s {
+	/* Linkage for the address-ordered tree. */
+	RB_ENTRY(extent_node_s) link_ad;
+
+	/* Linkage for the size/address-ordered tree. */
+	RB_ENTRY(extent_node_s) link_szad;
+
+	/* Pointer to the extent that this tree node is responsible for. */
+	void	*addr;
+
+	/* Total region size. */
+	size_t	size;
+
+	/*
+	 * Number of dirty bytes in unused run.  This field is only used by the
+	 * runs_avail_* tree nodes.
+	 */
+	size_t	ndirty;
+};
+typedef struct extent_tree_ad_s extent_tree_ad_t;
+RB_HEAD(extent_tree_ad_s, extent_node_s);
+typedef struct extent_tree_szad_s extent_tree_szad_t;
+RB_HEAD(extent_tree_szad_s, extent_node_s);
+
+/*
+ * Magazine of extent nodes.  Arenas each use an unpredictable number of extent
+ * nodes, and they need to be allocated somehow via base_alloc().  We use
+ * magazines in order to amortize the locking cost of acquiring the nodes from
+ * a single source for all threads.
+ */
+typedef struct node_mag_s node_mag_t;
+struct node_mag_s {
+	/* Used to link a stack of magazines. */
+	node_mag_t	*next;
+
+	/* Slots nodes[0..nnodes) contain pointers to available nodes. */
+#define	NODE_MAG_NNODES	254 /* Strange value plays nicely with base_alloc(). */
+	unsigned	nnodes;
+	extent_node_t	*nodes[NODE_MAG_NNODES];
+};
+
+/******************************************************************************/
+/*
+ * Arena data structures.
+ */
+
+typedef struct arena_s arena_t;
+typedef struct arena_bin_s arena_bin_t;
+
+typedef struct arena_chunk_map_s arena_chunk_map_t;
+struct arena_chunk_map_s {
+	/*
+	 * Number of pages in run.  For a free run that has never been touched,
+	 * this is NPAGES_EMPTY for the central pages, which allows us to avoid
+	 * zero-filling untouched pages for calloc().
+	 */
+#define	NPAGES_EMPTY ((uint32_t)0x0U)
+	uint32_t	npages;
+	/*
+	 * Position within run.  For a free run, this is POS_EMPTY/POS_FREE for
+	 * the first and last pages.  The special values make it possible to
+	 * quickly coalesce free runs.  POS_EMPTY indicates that the run has
+	 * never been touched, which allows us to avoid zero-filling untouched
+	 * pages for calloc().
+	 *
+	 * This is the limiting factor for chunksize; there can be at most 2^31
+	 * pages in a run.
+	 *
+	 * POS_EMPTY is assumed by arena_run_dalloc() to be less than POS_FREE.
+	 */
+#define	POS_EMPTY ((uint32_t)0xfffffffeU)
+#define	POS_FREE ((uint32_t)0xffffffffU)
+	uint32_t	pos;
+};
+
+/* Arena chunk header. */
+typedef struct arena_chunk_s arena_chunk_t;
+struct arena_chunk_s {
+	/* Arena that owns the chunk. */
+	arena_t *arena;
+
+	/* Linkage for the arena's chunk tree. */
+	RB_ENTRY(arena_chunk_s) link;
+
+	/*
+	 * Number of pages in use.  This is maintained in order to make
+	 * detection of empty chunks fast.
+	 */
+	uint32_t pages_used;
+
+	/*
+	 * Map of pages within chunk that keeps track of free/large/small.  For
+	 * free runs, only the map entries for the first and last pages are
+	 * kept up to date, so that free runs can be quickly coalesced.
+	 */
+	arena_chunk_map_t map[1]; /* Dynamically sized. */
+};
+typedef struct arena_chunk_tree_s arena_chunk_tree_t;
+RB_HEAD(arena_chunk_tree_s, arena_chunk_s);
+
+typedef struct arena_run_s arena_run_t;
+struct arena_run_s {
+	/* Linkage for run trees. */
+	RB_ENTRY(arena_run_s) link;
+
+#ifdef MALLOC_DEBUG
+	uint32_t	magic;
+#  define ARENA_RUN_MAGIC 0x384adf93
+#endif
+
+	/* Bin this run is associated with. */
+	arena_bin_t	*bin;
+
+	/* Index of first element that might have a free region. */
+	unsigned	regs_minelm;
+
+	/* Number of free regions in run. */
+	unsigned	nfree;
+
+	/* Bitmask of in-use regions (0: in use, 1: free). */
+	unsigned	regs_mask[1]; /* Dynamically sized. */
+};
+typedef struct arena_run_tree_s arena_run_tree_t;
+RB_HEAD(arena_run_tree_s, arena_run_s);
+
+struct arena_bin_s {
+	/*
+	 * Current run being used to service allocations of this bin's size
+	 * class.
+	 */
+	arena_run_t	*runcur;
+
+	/*
+	 * Tree of non-full runs.  This tree is used when looking for an
+	 * existing run when runcur is no longer usable.  We choose the
+	 * non-full run that is lowest in memory; this policy tends to keep
+	 * objects packed well, and it can also help reduce the number of
+	 * almost-empty chunks.
+	 */
+	arena_run_tree_t runs;
+
+	/* Size of regions in a run for this bin's size class. */
+	size_t		reg_size;
+
+	/* Total size of a run for this bin's size class. */
+	size_t		run_size;
+
+	/* Total number of regions in a run for this bin's size class. */
+	uint32_t	nregs;
+
+	/* Number of elements in a run's regs_mask for this bin's size class. */
+	uint32_t	regs_mask_nelms;
+
+	/* Offset of first region in a run for this bin's size class. */
+	uint32_t	reg0_offset;
+
+#ifdef MALLOC_STATS
+	/* Bin statistics. */
+	malloc_bin_stats_t stats;
+#endif
+};
+
+struct arena_s {
+#ifdef MALLOC_DEBUG
+	uint32_t		magic;
+#  define ARENA_MAGIC 0x947d3d24
+#endif
+
+	/* All operations on this arena require that lock be locked. */
+#ifdef MOZ_MEMORY
+	malloc_spinlock_t	lock;
+#else
+	pthread_mutex_t		lock;
+#endif
+
+#ifdef MALLOC_STATS
+	arena_stats_t		stats;
+#endif
+
+	/*
+	 * Node magazines, used by arena_node_[de]alloc().  In order to reduce
+	 * lock contention for base_node_mag_[de]alloc(), we keep up to two
+	 * full magazines on hand.
+	 */
+	node_mag_t		*node_mag_cur;
+	node_mag_t		*node_mag_full;
+
+	/*
+	 * Tree of chunks this arena manages.
+	 */
+	arena_chunk_tree_t	chunks;
+
+	/*
+	 * In order to avoid rapid chunk allocation/deallocation when an arena
+	 * oscillates right on the cusp of needing a new chunk, cache the most
+	 * recently freed chunk.
+	 *
+	 * There is one spare chunk per arena, rather than one spare total, in
+	 * order to avoid interactions between multiple threads that could make
+	 * a single spare inadequate.
+	 */
+	arena_chunk_t		*spare;
+
+	/*
+	 * Current count of bytes within unused runs that are potentially
+	 * dirty, and for which madvise(... MADV_FREE) has not been called.  By
+	 * tracking this, we can institute a limit on how much dirty unused
+	 * memory is mapped for each arena.
+	 */
+	size_t			ndirty;
+	/*
+	 * Number of dirty bytes for spare.  All other dirty bytes are tracked
+	 * in the runs_avail_* tree nodes.
+	 */
+	size_t			spare_ndirty;
+
+	/*
+	 * Trees of this arena's available runs.  Two trees are maintained
+	 * using one set of nodes, since one is needed for first-best-fit run
+	 * allocation, and the other is needed for coalescing.
+	 */
+	extent_tree_szad_t	runs_avail_szad;
+	extent_tree_ad_t	runs_avail_ad;
+
+	/*
+	 * Tree of this arena's allocated (in-use) runs.  This tree is
+	 * maintained solely to guarantee that a node is available in
+	 * arena_run_dalloc(), since there is no way to recover from OOM during
+	 * deallocation.
+	 */
+	extent_tree_ad_t	runs_alloced_ad;
+
+#ifdef MALLOC_BALANCE
+	/*
+	 * The arena load balancing machinery needs to keep track of how much
+	 * lock contention there is.  This value is exponentially averaged.
+	 */
+	uint32_t		contention;
+#endif
+
+#ifdef MALLOC_LAZY_FREE
+	/*
+	 * Deallocation of small objects can be lazy, in which case free_cache
+	 * stores pointers to those objects that have not yet been deallocated.
+	 * In order to avoid lock contention, slots are chosen randomly.  Empty
+	 * slots contain NULL.
+	 */
+	void			**free_cache;
+#endif
+
+	/*
+	 * bins is used to store rings of free regions of the following sizes,
+	 * assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
+	 *
+	 *   bins[i] | size |
+	 *   --------+------+
+	 *        0  |    2 |
+	 *        1  |    4 |
+	 *        2  |    8 |
+	 *   --------+------+
+	 *        3  |   16 |
+	 *        4  |   32 |
+	 *        5  |   48 |
+	 *        6  |   64 |
+	 *           :      :
+	 *           :      :
+	 *       33  |  496 |
+	 *       34  |  512 |
+	 *   --------+------+
+	 *       35  | 1024 |
+	 *       36  | 2048 |
+	 *   --------+------+
+	 */
+	arena_bin_t		bins[1]; /* Dynamically sized. */
+};
+
+/******************************************************************************/
+/*
+ * Data.
+ */
+
+/* Number of CPUs. */
+static unsigned		ncpus;
+
+/* VM page size. */
+static size_t		pagesize;
+static size_t		pagesize_mask;
+static size_t		pagesize_2pow;
+
+/* Various bin-related settings. */
+static size_t		bin_maxclass; /* Max size class for bins. */
+static unsigned		ntbins; /* Number of (2^n)-spaced tiny bins. */
+static unsigned		nqbins; /* Number of quantum-spaced bins. */
+static unsigned		nsbins; /* Number of (2^n)-spaced sub-page bins. */
+static size_t		small_min;
+static size_t		small_max;
+
+/* Various quantum-related settings. */
+static size_t		quantum;
+static size_t		quantum_mask; /* (quantum - 1). */
+
+/* Various chunk-related settings. */
+static size_t		chunksize;
+static size_t		chunksize_mask; /* (chunksize - 1). */
+static unsigned		chunk_npages;
+static unsigned		arena_chunk_header_npages;
+static size_t		arena_maxclass; /* Max size class for arenas. */
+
+/********/
+/*
+ * Chunks.
+ */
+
+/* Protects chunk-related data structures. */
+static malloc_mutex_t	huge_mtx;
+
+/* Tree of chunks that are stand-alone huge allocations. */
+static extent_tree_ad_t	huge;
+
+#ifdef MALLOC_DSS
+/*
+ * Protects sbrk() calls.  This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
+ */
+static malloc_mutex_t	dss_mtx;
+/* Base address of the DSS. */
+static void		*dss_base;
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void		*dss_prev;
+/* Current upper limit on DSS addresses. */
+static void		*dss_max;
+
+/*
+ * Trees of chunks that were previously allocated (trees differ only in node
+ * ordering).  These are used when allocating chunks, in an attempt to re-use
+ * address space.  Depending on funcition, different tree orderings are needed,
+ * which is why there are two trees with the same contents.
+ */
+static extent_tree_ad_t	dss_chunks_ad;
+static extent_tree_szad_t dss_chunks_szad;
+#endif
+
+#ifdef MALLOC_STATS
+/* Huge allocation statistics. */
+static uint64_t		huge_nmalloc;
+static uint64_t		huge_ndalloc;
+static size_t		huge_allocated;
+#endif
+
+/****************************/
+/*
+ * base (internal allocation).
+ */
+
+/*
+ * Current pages that are being used for internal memory allocations.  These
+ * pages are carved up in cacheline-size quanta, so that there is no chance of
+ * false cache line sharing.
+ */
+static void		*base_pages;
+static void		*base_next_addr;
+static void		*base_past_addr; /* Addr immediately past base_pages. */
+static node_mag_t	*base_node_mags_avail; /* LIFO cache of full mags. */
+static node_mag_t	*base_node_mag; /* For base_node_[de]alloc(). */
+static node_mag_t	*base_node_mag_partial; /* For OOM leak prevention. */
+static malloc_mutex_t	base_mtx;
+#ifdef MALLOC_STATS
+static size_t		base_mapped;
+#endif
+
+/********/
+/*
+ * Arenas.
+ */
+
+/*
+ * Arenas that are used to service external requests.  Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ */
+static arena_t		**arenas;
+static unsigned		narenas;
+#ifndef NO_TLS
+#  ifdef MALLOC_BALANCE
+static unsigned		narenas_2pow;
+#  else
+static unsigned		next_arena;
+#  endif
+#endif
+#ifdef MOZ_MEMORY
+static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
+#else
+static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */
+#endif
+
+#ifndef NO_TLS
+/*
+ * Map of pthread_self() --> arenas[???], used for selecting an arena to use
+ * for allocations.
+ */
+#ifndef MOZ_MEMORY_WINDOWS
+static __thread arena_t	*arenas_map;
+#endif
+#endif
+
+#ifdef MALLOC_STATS
+/* Chunk statistics. */
+static chunk_stats_t	stats_chunks;
+#endif
+
+/*******************************/
+/*
+ * Runtime configuration options.
+ */
+const char	*_malloc_options
+#ifdef MOZ_MEMORY_WINDOWS
+= "A10n3F"
+#elif (defined(MOZ_MEMORY_DARWIN))
+= "AP10n"
+#endif
+;
+
+#ifdef MALLOC_DECOMMIT
+static bool opt_decommit = true;
+#endif
+
+#ifndef MALLOC_PRODUCTION
+static bool	opt_abort = true;
+static bool	opt_junk = true;
+#else
+static bool	opt_abort = false;
+static bool	opt_junk = false;
+#endif
+#ifdef MALLOC_DSS
+static bool	opt_dss = true;
+static bool	opt_mmap = true;
+#endif
+static size_t	opt_free_max = (1U << CHUNK_2POW_DEFAULT);
+#ifdef MALLOC_LAZY_FREE
+static int	opt_lazy_free_2pow = LAZY_FREE_2POW_DEFAULT;
+#endif
+#ifdef MALLOC_BALANCE
+static uint64_t	opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT;
+#endif
+static bool	opt_print_stats = false;
+static size_t	opt_quantum_2pow = QUANTUM_2POW_MIN;
+static size_t	opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT;
+static size_t	opt_chunk_2pow = CHUNK_2POW_DEFAULT;
+static bool	opt_utrace = false;
+static bool	opt_sysv = false;
+static bool	opt_xmalloc = false;
+static bool	opt_zero = false;
+static int	opt_narenas_lshift = 0;
+
+typedef struct {
+	void	*p;
+	size_t	s;
+	void	*r;
+} malloc_utrace_t;
+
+#if 0
+#define UTRACE(a, b, c) do {						\
+	if (a == NULL && b == 0 && c == NULL)				\
+		malloc_printf("%d x USER malloc_init()\n", getpid());	\
+	else if (a == NULL && c != 0) {					\
+		malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(),	\
+		    c, b);						\
+	} else if (a != NULL && c != NULL) {				\
+		malloc_printf("%d x USER %p = realloc(%p, %zu)\n",	\
+		    getpid(), c, a, b);					\
+	} else								\
+		malloc_printf("%d x USER free(%p)\n", getpid(), a);	\
+} while (0)
+#elif (defined(MOZ_MEMORY))
+#define	UTRACE(a, b, c)
+#else
+#define	UTRACE(a, b, c)							\
+	if (opt_utrace) {						\
+		malloc_utrace_t ut = {a, b, c};				\
+		utrace(&ut, sizeof(ut));				\
+	}
+#endif
+
+/******************************************************************************/
+/*
+ * Begin function prototypes for non-inline static functions.
+ */
+
+static bool	malloc_mutex_init(malloc_mutex_t *mutex);
+static bool	malloc_spin_init(malloc_spinlock_t *lock);
+static void	wrtmessage(const char *p1, const char *p2, const char *p3,
+		const char *p4);
+#ifdef MALLOC_STATS
+#ifdef MOZ_MEMORY_DARWIN
+/* Avoid namespace collision with OS X's malloc APIs. */
+#define malloc_printf xmalloc_printf
+#endif
+static void	malloc_printf(const char *format, ...);
+#endif
+static char	*umax2s(uintmax_t x, char *s);
+static bool	base_pages_alloc(size_t minsize);
+static void	*base_alloc(size_t size);
+static void	*base_calloc(size_t number, size_t size);
+static extent_node_t *base_node_alloc(void);
+static void	base_node_dealloc(extent_node_t *node);
+#ifdef MALLOC_STATS
+static void	stats_print(arena_t *arena);
+#endif
+static void	*pages_map(void *addr, size_t size);
+static void	pages_unmap(void *addr, size_t size);
+static void	*chunk_alloc(size_t size, bool zero);
+static void	chunk_dealloc(void *chunk, size_t size);
+#ifndef NO_TLS
+static arena_t	*choose_arena_hard(void);
+#endif
+static bool	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
+    bool zero);
+static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
+static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
+static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool zero);
+static void	arena_purge(arena_t *arena);
+static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size,
+    size_t ndirty);
+static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
+static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
+static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
+static void	*arena_malloc(arena_t *arena, size_t size, bool zero);
+static void	*arena_palloc(arena_t *arena, size_t alignment, size_t size,
+    size_t alloc_size);
+static size_t	arena_salloc(const void *ptr);
+static bool	arena_ralloc_resize(void *ptr, size_t size, size_t oldsize);
+static void	*arena_ralloc(void *ptr, size_t size, size_t oldsize);
+static void	arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
+static bool	arena_new(arena_t *arena);
+static arena_t	*arenas_extend(unsigned ind);
+static void	*huge_malloc(size_t size, bool zero);
+static void	*huge_palloc(size_t alignment, size_t size);
+static void	*huge_ralloc(void *ptr, size_t size, size_t oldsize);
+static void	huge_dalloc(void *ptr);
+static void	*imalloc(size_t size);
+static void	*ipalloc(size_t alignment, size_t size);
+static void	*icalloc(size_t size);
+static size_t	isalloc(const void *ptr);
+static void	*iralloc(void *ptr, size_t size);
+static void	idalloc(void *ptr);
+static void	malloc_print_stats(void);
+static bool	malloc_init_hard(void);
+
+/*
+ * End function prototypes.
+ */
+/******************************************************************************/
+/*
+ * Begin mutex.  We can't use normal pthread mutexes in all places, because
+ * they require malloc()ed memory, which causes bootstrapping issues in some
+ * cases.
+ */
+
+static bool
+malloc_mutex_init(malloc_mutex_t *mutex)
+{
+#if defined(MOZ_MEMORY_WINDOWS)
+	if (__isthreaded)
+		if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT))
+			return (true);
+#elif defined(MOZ_MEMORY_DARWIN)
+	mutex->lock = OS_SPINLOCK_INIT;
+#elif defined(MOZ_MEMORY)
+	if (pthread_mutex_init(mutex, NULL) != 0)
+		return (true);
+#else
+	static const spinlock_t lock = _SPINLOCK_INITIALIZER;
+
+	mutex->lock = lock;
+#endif
+	return (false);
+}
+
+static inline void
+malloc_mutex_lock(malloc_mutex_t *mutex)
+{
+
+#if defined(MOZ_MEMORY_WINDOWS)
+	EnterCriticalSection(mutex);
+#elif defined(MOZ_MEMORY_DARWIN)
+	OSSpinLockLock(&mutex->lock);
+#elif defined(MOZ_MEMORY)
+	pthread_mutex_lock(mutex);
+#else
+	if (__isthreaded)
+		_SPINLOCK(&mutex->lock);
+#endif
+}
+
+static inline void
+malloc_mutex_unlock(malloc_mutex_t *mutex)
+{
+
+#if defined(MOZ_MEMORY_WINDOWS)
+	LeaveCriticalSection(mutex);
+#elif defined(MOZ_MEMORY_DARWIN)
+	OSSpinLockUnlock(&mutex->lock);
+#elif defined(MOZ_MEMORY)
+	pthread_mutex_unlock(mutex);
+#else
+	if (__isthreaded)
+		_SPINUNLOCK(&mutex->lock);
+#endif
+}
+
+static bool
+malloc_spin_init(malloc_spinlock_t *lock)
+{
+#if defined(MOZ_MEMORY_WINDOWS)
+	if (__isthreaded)
+		if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT))
+			return (true);
+#elif defined(MOZ_MEMORY_DARWIN)
+	lock->lock = OS_SPINLOCK_INIT;
+#elif defined(MOZ_MEMORY)
+	if (pthread_mutex_init(lock, NULL) != 0)
+		return (true);
+#else
+	lock->lock = _SPINLOCK_INITIALIZER;
+#endif
+	return (false);
+}
+
+static inline void
+malloc_spin_lock(malloc_spinlock_t *lock)
+{
+
+#if defined(MOZ_MEMORY_WINDOWS)
+	EnterCriticalSection(lock);
+#elif defined(MOZ_MEMORY_DARWIN)
+	OSSpinLockLock(&lock->lock);
+#elif defined(MOZ_MEMORY)
+	pthread_mutex_lock(lock);
+#else
+	if (__isthreaded)
+		_SPINLOCK(&lock->lock);
+#endif
+}
+
+static inline void
+malloc_spin_unlock(malloc_spinlock_t *lock)
+{
+#if defined(MOZ_MEMORY_WINDOWS)
+	LeaveCriticalSection(lock);
+#elif defined(MOZ_MEMORY_DARWIN)
+	OSSpinLockUnlock(&lock->lock);
+#elif defined(MOZ_MEMORY)
+	pthread_mutex_unlock(lock);
+#else
+	if (__isthreaded)
+		_SPINUNLOCK(&lock->lock);
+#endif
+}
+
+/*
+ * End mutex.
+ */
+/******************************************************************************/
+/*
+ * Begin spin lock.  Spin locks here are actually adaptive mutexes that block
+ * after a period of spinning, because unbounded spinning would allow for
+ * priority inversion.
+ */
+
+
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN)
+#  define	malloc_spin_init	malloc_mutex_init
+#  define	malloc_spin_lock	malloc_mutex_lock
+#  define	malloc_spin_unlock	malloc_mutex_unlock
+#endif
+
+#ifndef MOZ_MEMORY
+/*
+ * We use an unpublished interface to initialize pthread mutexes with an
+ * allocation callback, in order to avoid infinite recursion.
+ */
+int	_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
+    _pthread_mutex_init_calloc_cb);
+
+int
+_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex,
+    void *(calloc_cb)(size_t, size_t))
+{
+
+	return (0);
+}
+
+static bool
+malloc_spin_init(pthread_mutex_t *lock)
+{
+
+	if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0)
+		return (true);
+
+	return (false);
+}
+
+static inline unsigned
+malloc_spin_lock(pthread_mutex_t *lock)
+{
+	unsigned ret = 0;
+
+	if (__isthreaded) {
+		if (_pthread_mutex_trylock(lock) != 0) {
+			unsigned i;
+			volatile unsigned j;
+
+			/* Exponentially back off. */
+			for (i = 1; i <= SPIN_LIMIT_2POW; i++) {
+				for (j = 0; j < (1U << i); j++)
+					ret++;
+
+				CPU_SPINWAIT;
+				if (_pthread_mutex_trylock(lock) == 0)
+					return (ret);
+			}
+
+			/*
+			 * Spinning failed.  Block until the lock becomes
+			 * available, in order to avoid indefinite priority
+			 * inversion.
+			 */
+			_pthread_mutex_lock(lock);
+			assert((ret << BLOCK_COST_2POW) != 0);
+			return (ret << BLOCK_COST_2POW);
+		}
+	}
+
+	return (ret);
+}
+
+static inline void
+malloc_spin_unlock(pthread_mutex_t *lock)
+{
+
+	if (__isthreaded)
+		_pthread_mutex_unlock(lock);
+}
+#endif
+
+/*
+ * End spin lock.
+ */
+
+/******************************************************************************/
+/*
+ * Begin Utility functions/macros.
+ */
+
+/* Return the chunk address for allocation address a. */
+#define	CHUNK_ADDR2BASE(a)						\
+	((void *)((uintptr_t)(a) & ~chunksize_mask))
+
+/* Return the chunk offset of address a. */
+#define	CHUNK_ADDR2OFFSET(a)						\
+	((size_t)((uintptr_t)(a) & chunksize_mask))
+
+/* Return the smallest chunk multiple that is >= s. */
+#define	CHUNK_CEILING(s)						\
+	(((s) + chunksize_mask) & ~chunksize_mask)
+
+/* Return the smallest cacheline multiple that is >= s. */
+#define	CACHELINE_CEILING(s)						\
+	(((s) + (CACHELINE - 1)) & ~(CACHELINE - 1))
+
+/* Return the smallest quantum multiple that is >= a. */
+#define	QUANTUM_CEILING(a)						\
+	(((a) + quantum_mask) & ~quantum_mask)
+
+/* Return the smallest pagesize multiple that is >= s. */
+#define	PAGE_CEILING(s)							\
+	(((s) + pagesize_mask) & ~pagesize_mask)
+
+/* Compute the smallest power of 2 that is >= x. */
+static inline size_t
+pow2_ceil(size_t x)
+{
+
+	x--;
+	x |= x >> 1;
+	x |= x >> 2;
+	x |= x >> 4;
+	x |= x >> 8;
+	x |= x >> 16;
+#if (SIZEOF_PTR == 8)
+	x |= x >> 32;
+#endif
+	x++;
+	return (x);
+}
+
+#if (defined(MALLOC_LAZY_FREE) || defined(MALLOC_BALANCE))
+/*
+ * Use a simple linear congruential pseudo-random number generator:
+ *
+ *   prn(y) = (a*x + c) % m
+ *
+ * where the following constants ensure maximal period:
+ *
+ *   a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4.
+ *   c == Odd number (relatively prime to 2^n).
+ *   m == 2^32
+ *
+ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints.
+ *
+ * This choice of m has the disadvantage that the quality of the bits is
+ * proportional to bit position.  For example. the lowest bit has a cycle of 2,
+ * the next has a cycle of 4, etc.  For this reason, we prefer to use the upper
+ * bits.
+ */
+#  define PRN_DEFINE(suffix, var, a, c)					\
+static inline void							\
+sprn_##suffix(uint32_t seed)						\
+{									\
+	var = seed;							\
+}									\
+									\
+static inline uint32_t							\
+prn_##suffix(uint32_t lg_range)						\
+{									\
+	uint32_t ret, x;						\
+									\
+	assert(lg_range > 0);						\
+	assert(lg_range <= 32);						\
+									\
+	x = (var * (a)) + (c);						\
+	var = x;							\
+	ret = x >> (32 - lg_range);					\
+									\
+	return (ret);							\
+}
+#  define SPRN(suffix, seed)	sprn_##suffix(seed)
+#  define PRN(suffix, lg_range)	prn_##suffix(lg_range)
+#endif
+
+/*
+ * Define PRNGs, one for each purpose, in order to avoid auto-correlation
+ * problems.
+ */
+
+#ifdef MALLOC_LAZY_FREE
+/* Define the per-thread PRNG used for lazy deallocation. */
+static __thread uint32_t lazy_free_x;
+PRN_DEFINE(lazy_free, lazy_free_x, 12345, 12347)
+#endif
+
+#ifdef MALLOC_BALANCE
+/* Define the PRNG used for arena assignment. */
+static __thread uint32_t balance_x;
+PRN_DEFINE(balance, balance_x, 1297, 1301)
+#endif
+
+static inline const char *
+_getprogname(void)
+{
+
+	return ("<jemalloc>");
+}
+
+static void
+wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
+{
+#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS)
+#define	_write	write
+#endif
+	_write(STDERR_FILENO, p1, (unsigned int) strlen(p1));
+	_write(STDERR_FILENO, p2, (unsigned int) strlen(p2));
+	_write(STDERR_FILENO, p3, (unsigned int) strlen(p3));
+	_write(STDERR_FILENO, p4, (unsigned int) strlen(p4));
+}
+
+#define _malloc_message malloc_message
+
+void	(*_malloc_message)(const char *p1, const char *p2, const char *p3,
+	    const char *p4) = wrtmessage;
+
+#ifdef MALLOC_STATS
+/*
+ * Print to stderr in such a way as to (hopefully) avoid memory allocation.
+ */
+static void
+malloc_printf(const char *format, ...)
+{
+	char buf[4096];
+	va_list ap;
+
+	va_start(ap, format);
+	vsnprintf(buf, sizeof(buf), format, ap);
+	va_end(ap);
+	_malloc_message(buf, "", "", "");
+}
+#endif
+
+/*
+ * We don't want to depend on vsnprintf() for production builds, since that can
+ * cause unnecessary bloat for static binaries.  umax2s() provides minimal
+ * integer printing functionality, so that malloc_printf() use can be limited to
+ * MALLOC_STATS code.
+ */
+#define	UMAX2S_BUFSIZE	21
+static char *
+umax2s(uintmax_t x, char *s)
+{
+	unsigned i;
+
+	/* Make sure UMAX2S_BUFSIZE is large enough. */
+	assert(sizeof(uintmax_t) <= 8);
+
+	i = UMAX2S_BUFSIZE - 1;
+	s[i] = '\0';
+	do {
+		i--;
+		s[i] = "0123456789"[x % 10];
+		x /= 10;
+	} while (x > 0);
+
+	return (&s[i]);
+}
+
+/******************************************************************************/
+
+#ifdef MALLOC_DSS
+static inline bool
+base_pages_alloc_dss(size_t minsize)
+{
+
+	/*
+	 * Do special DSS allocation here, since base allocations don't need to
+	 * be chunk-aligned.
+	 */
+	malloc_mutex_lock(&dss_mtx);
+	if (dss_prev != (void *)-1) {
+		intptr_t incr;
+		size_t csize = CHUNK_CEILING(minsize);
+
+		do {
+			/* Get the current end of the DSS. */
+			dss_max = sbrk(0);
+
+			/*
+			 * Calculate how much padding is necessary to
+			 * chunk-align the end of the DSS.  Don't worry about
+			 * dss_max not being chunk-aligned though.
+			 */
+			incr = (intptr_t)chunksize
+			    - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
+			assert(incr >= 0);
+			if ((size_t)incr < minsize)
+				incr += csize;
+
+			dss_prev = sbrk(incr);
+			if (dss_prev == dss_max) {
+				/* Success. */
+				dss_max = (void *)((intptr_t)dss_prev + incr);
+				base_pages = dss_prev;
+				base_next_addr = base_pages;
+				base_past_addr = dss_max;
+#ifdef MALLOC_STATS
+				base_mapped += incr;
+#endif
+				malloc_mutex_unlock(&dss_mtx);
+				return (false);
+			}
+		} while (dss_prev != (void *)-1);
+	}
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (true);
+}
+#endif
+
+static inline bool
+base_pages_alloc_mmap(size_t minsize)
+{
+	size_t csize;
+
+	assert(minsize != 0);
+	csize = PAGE_CEILING(minsize);
+	base_pages = pages_map(NULL, csize);
+	if (base_pages == NULL)
+		return (true);
+	base_next_addr = base_pages;
+	base_past_addr = (void *)((uintptr_t)base_pages + csize);
+#ifdef MALLOC_STATS
+	base_mapped += csize;
+#endif
+
+	return (false);
+}
+
+static bool
+base_pages_alloc(size_t minsize)
+{
+
+#ifdef MALLOC_DSS
+	if (opt_dss) {
+		if (base_pages_alloc_dss(minsize) == false)
+			return (false);
+	}
+
+	if (opt_mmap && minsize != 0)
+#endif
+	{
+		if (base_pages_alloc_mmap(minsize) == false)
+			return (false);
+	}
+
+	return (true);
+}
+
+static inline void *
+base_alloc_locked(size_t size)
+{
+	void *ret;
+	size_t csize;
+
+	/* Round size up to nearest multiple of the cacheline size. */
+	csize = CACHELINE_CEILING(size);
+
+	/* Make sure there's enough space for the allocation. */
+	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
+		if (base_pages_alloc(csize))
+			return (NULL);
+	}
+
+	/* Allocate. */
+	ret = base_next_addr;
+	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
+
+	return (ret);
+}
+
+static void *
+base_alloc(size_t size)
+{
+	void *ret;
+
+	malloc_mutex_lock(&base_mtx);
+	ret = base_alloc_locked(size);
+	malloc_mutex_unlock(&base_mtx);
+
+	return (ret);
+}
+
+static void *
+base_calloc(size_t number, size_t size)
+{
+	void *ret;
+
+	ret = base_alloc(number * size);
+	memset(ret, 0, number * size);
+
+	return (ret);
+}
+
+static inline node_mag_t *
+base_node_mag_alloc_locked(void)
+{
+	node_mag_t *ret;
+
+	if (base_node_mags_avail != NULL) {
+		ret = base_node_mags_avail;
+		base_node_mags_avail = base_node_mags_avail->next;
+	} else {
+		extent_node_t *node;
+		unsigned i;
+
+		if (base_node_mag_partial == NULL) {
+			ret = base_alloc_locked(sizeof(node_mag_t));
+			if (ret == NULL)
+				return (NULL);
+		} else {
+			/*
+			 * Try to complete partial initalization that was
+			 * impeded by OOM.
+			 */
+			ret = base_node_mag_partial;
+			base_node_mag_partial = NULL;
+		}
+		ret->next = NULL;
+		ret->nnodes = NODE_MAG_NNODES;
+		for (i = 0; i < NODE_MAG_NNODES; i++) {
+			if (ret->nodes[i] == NULL) {
+				node = (extent_node_t *)base_alloc_locked(
+				    sizeof(extent_node_t));
+				if (node == NULL) {
+					/*
+					 * Stash the magazine for later
+					 * completion of initialization.
+					 */
+					base_node_mag_partial = ret;
+					return (NULL);
+				}
+				ret->nodes[i] = node;
+			}
+		}
+	}
+
+	return (ret);
+}
+
+static inline node_mag_t *
+base_node_mag_alloc(void)
+{
+	node_mag_t *ret;
+
+	malloc_mutex_lock(&base_mtx);
+	ret = base_node_mag_alloc_locked();
+	malloc_mutex_unlock(&base_mtx);
+
+	return (ret);
+}
+
+static inline void
+base_node_mag_dealloc_locked(node_mag_t *mag)
+{
+
+	mag->next = base_node_mags_avail;
+	base_node_mags_avail = mag;
+}
+
+static inline void
+base_node_mag_dealloc(node_mag_t *mag)
+{
+
+	malloc_mutex_lock(&base_mtx);
+	base_node_mag_dealloc_locked(mag);
+	malloc_mutex_unlock(&base_mtx);
+}
+
+static extent_node_t *
+base_node_alloc(void)
+{
+	extent_node_t *ret;
+
+	malloc_mutex_lock(&base_mtx);
+	if (base_node_mag == NULL || base_node_mag->nnodes == 0) {
+		node_mag_t *node_mag = base_node_mag_alloc_locked();
+		if (node_mag == NULL) {
+			malloc_mutex_unlock(&base_mtx);
+			return (NULL);
+		}
+		node_mag->next = base_node_mag;
+		base_node_mag = node_mag;
+	}
+	base_node_mag->nnodes--;
+	ret = base_node_mag->nodes[base_node_mag->nnodes];
+	malloc_mutex_unlock(&base_mtx);
+
+	return (ret);
+}
+
+static void
+base_node_dealloc(extent_node_t *node)
+{
+
+	malloc_mutex_lock(&base_mtx);
+	if (base_node_mag->nnodes == NODE_MAG_NNODES) {
+		/*
+		 * Move full magazine to base_node_mags_avail.  This will leave
+		 * an empty magazine in base_node_mag.
+		 */
+		node_mag_t *node_mag = base_node_mag;
+		base_node_mag = base_node_mag->next;
+		base_node_mag_dealloc_locked(node_mag);
+	}
+	base_node_mag->nodes[base_node_mag->nnodes] = node;
+	base_node_mag->nnodes++;
+	malloc_mutex_unlock(&base_mtx);
+}
+
+/******************************************************************************/
+
+#ifdef MALLOC_STATS
+static void
+stats_print(arena_t *arena)
+{
+	unsigned i, gap_start;
+
+	malloc_printf("            allocated      nmalloc      ndalloc\n");
+#ifdef MOZ_MEMORY_WINDOWS
+	malloc_printf("small:   %12Iu %12I64u %12I64u\n",
+	    arena->stats.allocated_small, arena->stats.nmalloc_small,
+	    arena->stats.ndalloc_small);
+	malloc_printf("large:   %12Iu %12I64u %12I64u\n",
+	    arena->stats.allocated_large, arena->stats.nmalloc_large,
+	    arena->stats.ndalloc_large);
+	malloc_printf("total:   %12Iu %12I64u %12I64u\n",
+	    arena->stats.allocated_small + arena->stats.allocated_large,
+	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
+	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
+	malloc_printf("mapped:  %12Iu\n", arena->stats.mapped);
+	malloc_printf("dirty:   %12Iu\n", arena->ndirty);
+	malloc_printf("purged:  %12I64u\n", arena->stats.npurged);
+	malloc_printf("nmadvise:%12I64u\n", arena->stats.nmadvise);
+#else
+	malloc_printf("small:   %12zu %12llu %12llu\n",
+	    arena->stats.allocated_small, arena->stats.nmalloc_small,
+	    arena->stats.ndalloc_small);
+	malloc_printf("large:   %12zu %12llu %12llu\n",
+	    arena->stats.allocated_large, arena->stats.nmalloc_large,
+	    arena->stats.ndalloc_large);
+	malloc_printf("total:   %12zu %12llu %12llu\n",
+	    arena->stats.allocated_small + arena->stats.allocated_large,
+	    arena->stats.nmalloc_small + arena->stats.nmalloc_large,
+	    arena->stats.ndalloc_small + arena->stats.ndalloc_large);
+	malloc_printf("mapped:  %12zu\n", arena->stats.mapped);
+	malloc_printf("dirty:   %12zu\n", arena->ndirty);
+	malloc_printf("purged:  %12llu\n", arena->stats.npurged);
+	malloc_printf("nmadvise:%12llu\n", arena->stats.nmadvise);
+#endif
+	malloc_printf("bins:     bin   size regs pgs  requests   newruns"
+	    "    reruns maxruns curruns\n");
+	for (i = 0, gap_start = UINT_MAX; i < ntbins + nqbins + nsbins; i++) {
+		if (arena->bins[i].stats.nrequests == 0) {
+			if (gap_start == UINT_MAX)
+				gap_start = i;
+		} else {
+			if (gap_start != UINT_MAX) {
+				if (i > gap_start + 1) {
+					/* Gap of more than one size class. */
+					malloc_printf("[%u..%u]\n",
+					    gap_start, i - 1);
+				} else {
+					/* Gap of one size class. */
+					malloc_printf("[%u]\n", gap_start);
+				}
+				gap_start = UINT_MAX;
+			}
+			malloc_printf(
+#if defined(MOZ_MEMORY_WINDOWS)
+			    "%13u %1s %4u %4u %3u %9I64u %9I64u"
+			    " %9I64u %7u %7u\n",
+#else
+			    "%13u %1s %4u %4u %3u %9llu %9llu"
+			    " %9llu %7lu %7lu\n",
+#endif
+			    i,
+			    i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S",
+			    arena->bins[i].reg_size,
+			    arena->bins[i].nregs,
+			    arena->bins[i].run_size >> pagesize_2pow,
+			    arena->bins[i].stats.nrequests,
+			    arena->bins[i].stats.nruns,
+			    arena->bins[i].stats.reruns,
+			    arena->bins[i].stats.highruns,
+			    arena->bins[i].stats.curruns);
+		}
+	}
+	if (gap_start != UINT_MAX) {
+		if (i > gap_start + 1) {
+			/* Gap of more than one size class. */
+			malloc_printf("[%u..%u]\n", gap_start, i - 1);
+		} else {
+			/* Gap of one size class. */
+			malloc_printf("[%u]\n", gap_start);
+		}
+	}
+}
+#endif
+
+/*
+ * End Utility functions/macros.
+ */
+/******************************************************************************/
+/*
+ * Begin extent tree code.
+ */
+
+static inline int
+extent_ad_comp(extent_node_t *a, extent_node_t *b)
+{
+	uintptr_t a_addr = (uintptr_t)a->addr;
+	uintptr_t b_addr = (uintptr_t)b->addr;
+
+	return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+/* Generate red-black tree code for address-ordered extents. */
+RB_GENERATE_STATIC(extent_tree_ad_s, extent_node_s, link_ad, extent_ad_comp)
+
+static inline int
+extent_szad_comp(extent_node_t *a, extent_node_t *b)
+{
+	int ret;
+	size_t a_size = a->size;
+	size_t b_size = b->size;
+
+	ret = (a_size > b_size) - (a_size < b_size);
+	if (ret == 0) {
+		uintptr_t a_addr = (uintptr_t)a->addr;
+		uintptr_t b_addr = (uintptr_t)b->addr;
+
+		ret = (a_addr > b_addr) - (a_addr < b_addr);
+	}
+
+	return (ret);
+}
+
+/* Generate red-black tree code for size/address-ordered extents. */
+RB_GENERATE_STATIC(extent_tree_szad_s, extent_node_s, link_szad,
+    extent_szad_comp)
+
+/*
+ * End extent tree code.
+ */
+/******************************************************************************/
+/*
+ * Begin chunk management functions.
+ */
+
+#ifdef MOZ_MEMORY_WINDOWS
+static void *
+pages_map(void *addr, size_t size)
+{
+	void *ret;
+
+	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+	    PAGE_READWRITE);
+
+	return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+		_malloc_message(_getprogname(),
+		    ": (malloc) Error in VirtualFree()\n", "", "");
+		if (opt_abort)
+			abort();
+	}
+}
+#elif (defined(MOZ_MEMORY_DARWIN))
+static void *
+pages_map(void *addr, size_t size)
+{
+	void *ret;
+	kern_return_t err;
+	int flags;
+
+	if (addr != NULL) {
+		ret = addr;
+		flags = 0;
+	} else
+		flags = VM_FLAGS_ANYWHERE;
+
+	err = vm_allocate((vm_map_t)mach_task_self(), (vm_address_t *)&ret,
+	    (vm_size_t)size, flags);
+	if (err != KERN_SUCCESS)
+		ret = NULL;
+
+	assert(ret == NULL || (addr == NULL && ret != addr)
+	    || (addr != NULL && ret == addr));
+	return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+	kern_return_t err;
+
+	err = vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)addr,
+	    (vm_size_t)size);
+	if (err != KERN_SUCCESS) {
+		malloc_message(_getprogname(),
+		    ": (malloc) Error in vm_deallocate(): ",
+		    mach_error_string(err), "\n");
+		if (opt_abort)
+			abort();
+	}
+}
+#else /* MOZ_MEMORY_DARWIN */
+static void *
+pages_map(void *addr, size_t size)
+{
+	void *ret;
+
+	/*
+	 * We don't use MAP_FIXED here, because it can cause the *replacement*
+	 * of existing mappings, and we only want to create new mappings.
+	 */
+	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+	    -1, 0);
+	assert(ret != NULL);
+
+	if (ret == MAP_FAILED)
+		ret = NULL;
+	else if (addr != NULL && ret != addr) {
+		/*
+		 * We succeeded in mapping memory, but not in the right place.
+		 */
+		if (munmap(ret, size) == -1) {
+			char buf[STRERROR_BUF];
+
+			strerror_r(errno, buf, sizeof(buf));
+			_malloc_message(_getprogname(),
+			    ": (malloc) Error in munmap(): ", buf, "\n");
+			if (opt_abort)
+				abort();
+		}
+		ret = NULL;
+	}
+
+	assert(ret == NULL || (addr == NULL && ret != addr)
+	    || (addr != NULL && ret == addr));
+	return (ret);
+}
+
+static void
+pages_unmap(void *addr, size_t size)
+{
+
+	if (munmap(addr, size) == -1) {
+		char buf[STRERROR_BUF];
+
+		strerror_r(errno, buf, sizeof(buf));
+		_malloc_message(_getprogname(),
+		    ": (malloc) Error in munmap(): ", buf, "\n");
+		if (opt_abort)
+			abort();
+	}
+}
+#endif
+
+#ifdef MALLOC_DSS
+static inline void *
+chunk_alloc_dss(size_t size)
+{
+
+	malloc_mutex_lock(&dss_mtx);
+	if (dss_prev != (void *)-1) {
+		intptr_t incr;
+
+		/*
+		 * The loop is necessary to recover from races with other
+		 * threads that are using the DSS for something other than
+		 * malloc.
+		 */
+		do {
+			void *ret;
+
+			/* Get the current end of the DSS. */
+			dss_max = sbrk(0);
+
+			/*
+			 * Calculate how much padding is necessary to
+			 * chunk-align the end of the DSS.
+			 */
+			incr = (intptr_t)size
+			    - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
+			if (incr == (intptr_t)size)
+				ret = dss_max;
+			else {
+				ret = (void *)((intptr_t)dss_max + incr);
+				incr += size;
+			}
+
+			dss_prev = sbrk(incr);
+			if (dss_prev == dss_max) {
+				/* Success. */
+				dss_max = (void *)((intptr_t)dss_prev + incr);
+				malloc_mutex_unlock(&dss_mtx);
+				return (ret);
+			}
+		} while (dss_prev != (void *)-1);
+	}
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (NULL);
+}
+
+static inline void *
+chunk_recycle_dss(size_t size, bool zero)
+{
+	extent_node_t *node, key;
+
+	key.addr = NULL;
+	key.size = size;
+	malloc_mutex_lock(&dss_mtx);
+	node = RB_NFIND(extent_tree_szad_s, &dss_chunks_szad, &key);
+	if (node != NULL) {
+		void *ret = node->addr;
+
+		/* Remove node from the tree. */
+		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+		if (node->size == size) {
+			RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, node);
+			base_node_dealloc(node);
+		} else {
+			/*
+			 * Insert the remainder of node's address range as a
+			 * smaller chunk.  Its position within dss_chunks_ad
+			 * does not change.
+			 */
+			assert(node->size > size);
+			node->addr = (void *)((uintptr_t)node->addr + size);
+			node->size -= size;
+			RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+		}
+		malloc_mutex_unlock(&dss_mtx);
+
+		if (zero)
+			memset(ret, 0, size);
+		return (ret);
+	}
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (NULL);
+}
+#endif
+
+#ifdef MOZ_MEMORY_WINDOWS
+static inline void *
+chunk_alloc_mmap(size_t size)
+{
+	void *ret;
+	size_t offset;
+
+	/*
+	 * Windows requires that there be a 1:1 mapping between VM
+	 * allocation/deallocation operations.  Therefore, take care here to
+	 * acquire the final result via one mapping operation.  This means
+	 * unmapping any preliminary result that is not correctly aligned.
+	 */
+
+	ret = pages_map(NULL, size);
+	if (ret == NULL)
+		return (NULL);
+
+	offset = CHUNK_ADDR2OFFSET(ret);
+	if (offset != 0) {
+		/* Deallocate, then try to allocate at (ret + size - offset). */
+		pages_unmap(ret, size);
+		ret = pages_map((void *)((uintptr_t)ret + size - offset), size);
+		while (ret == NULL) {
+			/*
+			 * Over-allocate in order to map a memory region that
+			 * is definitely large enough.
+			 */
+			ret = pages_map(NULL, size + chunksize);
+			if (ret == NULL)
+				return (NULL);
+			/*
+			 * Deallocate, then allocate the correct size, within
+			 * the over-sized mapping.
+			 */
+			offset = CHUNK_ADDR2OFFSET(ret);
+			pages_unmap(ret, size + chunksize);
+			if (offset == 0)
+				ret = pages_map(ret, size);
+			else {
+				ret = pages_map((void *)((uintptr_t)ret + chunksize
+				    - offset), size);
+			}
+			/*
+			 * Failure here indicates a race with another thread, so
+			 * try again.
+			 */
+		}
+	}
+
+	return (ret);
+}
+#else
+static inline void *
+chunk_alloc_mmap(size_t size)
+{
+	void *ret;
+	size_t offset;
+
+	/*
+	 * Ideally, there would be a way to specify alignment to mmap() (like
+	 * NetBSD has), but in the absence of such a feature, we have to work
+	 * hard to efficiently create aligned mappings.  The reliable, but
+	 * expensive method is to create a mapping that is over-sized, then
+	 * trim the excess.  However, that always results in at least one call
+	 * to pages_unmap().
+	 *
+	 * A more optimistic approach is to try mapping precisely the right
+	 * amount, then try to append another mapping if alignment is off.  In
+	 * practice, this works out well as long as the application is not
+	 * interleaving mappings via direct mmap() calls.  If we do run into a
+	 * situation where there is an interleaved mapping and we are unable to
+	 * extend an unaligned mapping, our best option is to momentarily
+	 * revert to the reliable-but-expensive method.  This will tend to
+	 * leave a gap in the memory map that is too small to cause later
+	 * problems for the optimistic method.
+	 */
+
+	ret = pages_map(NULL, size);
+	if (ret == NULL)
+		return (NULL);
+
+	offset = CHUNK_ADDR2OFFSET(ret);
+	if (offset != 0) {
+		/* Try to extend chunk boundary. */
+		if (pages_map((void *)((uintptr_t)ret + size),
+		    chunksize - offset) == NULL) {
+			/*
+			 * Extension failed.  Clean up, then revert to the
+			 * reliable-but-expensive method.
+			 */
+			pages_unmap(ret, size);
+
+			/* Beware size_t wrap-around. */
+			if (size + chunksize <= size)
+				return NULL;
+
+			ret = pages_map(NULL, size + chunksize);
+			if (ret == NULL)
+				return (NULL);
+
+			/* Clean up unneeded leading/trailing space. */
+			offset = CHUNK_ADDR2OFFSET(ret);
+			if (offset != 0) {
+				/* Leading space. */
+				pages_unmap(ret, chunksize - offset);
+
+				ret = (void *)((uintptr_t)ret +
+				    (chunksize - offset));
+
+				/* Trailing space. */
+				pages_unmap((void *)((uintptr_t)ret + size),
+				    offset);
+			} else {
+				/* Trailing space only. */
+				pages_unmap((void *)((uintptr_t)ret + size),
+				    chunksize);
+			}
+		} else {
+			/* Clean up unneeded leading space. */
+			pages_unmap(ret, chunksize - offset);
+			ret = (void *)((uintptr_t)ret + (chunksize - offset));
+		}
+	}
+
+	return (ret);
+}
+#endif
+
+static void *
+chunk_alloc(size_t size, bool zero)
+{
+	void *ret;
+
+	assert(size != 0);
+	assert((size & chunksize_mask) == 0);
+
+
+#ifdef MALLOC_DSS
+	if (opt_dss) {
+		ret = chunk_recycle_dss(size, zero);
+		if (ret != NULL) {
+			goto RETURN;
+		}
+
+		ret = chunk_alloc_dss(size);
+		if (ret != NULL)
+			goto RETURN;
+	}
+
+	if (opt_mmap)
+#endif
+	{
+		ret = chunk_alloc_mmap(size);
+		if (ret != NULL)
+			goto RETURN;
+	}
+
+	/* All strategies for allocation failed. */
+	ret = NULL;
+RETURN:
+#ifdef MALLOC_STATS
+	if (ret != NULL) {
+		stats_chunks.nchunks += (size / chunksize);
+		stats_chunks.curchunks += (size / chunksize);
+	}
+	if (stats_chunks.curchunks > stats_chunks.highchunks)
+		stats_chunks.highchunks = stats_chunks.curchunks;
+#endif
+
+	assert(CHUNK_ADDR2BASE(ret) == ret);
+	return (ret);
+}
+
+#ifdef MALLOC_DSS
+static inline extent_node_t *
+chunk_dealloc_dss_record(void *chunk, size_t size)
+{
+	extent_node_t *node, *prev, key;
+
+	key.addr = (void *)((uintptr_t)chunk + size);
+	node = RB_NFIND(extent_tree_ad_s, &dss_chunks_ad, &key);
+	/* Try to coalesce forward. */
+	if (node != NULL && node->addr == key.addr) {
+		/*
+		 * Coalesce chunk with the following address range.  This does
+		 * not change the position within dss_chunks_ad, so only
+		 * remove/insert from/into dss_chunks_szad.
+		 */
+		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+		node->addr = chunk;
+		node->size += size;
+		RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+	} else {
+		/*
+		 * Coalescing forward failed, so insert a new node.  Drop
+		 * dss_mtx during node allocation, since it is possible that a
+		 * new base chunk will be allocated.
+		 */
+		malloc_mutex_unlock(&dss_mtx);
+		node = base_node_alloc();
+		malloc_mutex_lock(&dss_mtx);
+		if (node == NULL)
+			return (NULL);
+		node->addr = chunk;
+		node->size = size;
+		RB_INSERT(extent_tree_ad_s, &dss_chunks_ad, node);
+		RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+	}
+
+	/* Try to coalesce backward. */
+	prev = RB_PREV(extent_tree_ad_s, &dss_chunks_ad, node);
+	if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
+	    chunk) {
+		/*
+		 * Coalesce chunk with the previous address range.  This does
+		 * not change the position within dss_chunks_ad, so only
+		 * remove/insert node from/into dss_chunks_szad.
+		 */
+		RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad, prev);
+		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, prev);
+
+		RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad, node);
+		node->addr = prev->addr;
+		node->size += prev->size;
+		RB_INSERT(extent_tree_szad_s, &dss_chunks_szad, node);
+
+		base_node_dealloc(prev);
+	}
+
+	return (node);
+}
+
+static inline bool
+chunk_dealloc_dss(void *chunk, size_t size)
+{
+
+	malloc_mutex_lock(&dss_mtx);
+	if ((uintptr_t)chunk >= (uintptr_t)dss_base
+	    && (uintptr_t)chunk < (uintptr_t)dss_max) {
+		extent_node_t *node;
+
+		/* Try to coalesce with other unused chunks. */
+		node = chunk_dealloc_dss_record(chunk, size);
+		if (node != NULL) {
+			chunk = node->addr;
+			size = node->size;
+		}
+
+		/* Get the current end of the DSS. */
+		dss_max = sbrk(0);
+
+		/*
+		 * Try to shrink the DSS if this chunk is at the end of the
+		 * DSS.  The sbrk() call here is subject to a race condition
+		 * with threads that use brk(2) or sbrk(2) directly, but the
+		 * alternative would be to leak memory for the sake of poorly
+		 * designed multi-threaded programs.
+		 */
+		if ((void *)((uintptr_t)chunk + size) == dss_max
+		    && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
+			/* Success. */
+			dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
+
+			if (node != NULL) {
+				RB_REMOVE(extent_tree_ad_s, &dss_chunks_ad,
+				    node);
+				RB_REMOVE(extent_tree_szad_s, &dss_chunks_szad,
+				    node);
+				base_node_dealloc(node);
+			}
+			malloc_mutex_unlock(&dss_mtx);
+		} else {
+			malloc_mutex_unlock(&dss_mtx);
+#ifdef MOZ_MEMORY_WINDOWS
+			VirtualAlloc(chunk, size, MEM_RESET, PAGE_READWRITE);
+#elif (defined(MOZ_MEMORY_DARWIN))
+			mmap(chunk, size, PROT_READ | PROT_WRITE, MAP_PRIVATE
+			    | MAP_ANON | MAP_FIXED, -1, 0);
+			//XXXmsync(chunk, size, MS_DEACTIVATE);
+#else
+			madvise(chunk, size, MADV_FREE);
+#endif
+		}
+
+		return (false);
+	}
+	malloc_mutex_unlock(&dss_mtx);
+
+	return (true);
+}
+#endif
+
+static inline void
+chunk_dealloc_mmap(void *chunk, size_t size)
+{
+
+	pages_unmap(chunk, size);
+}
+
+static void
+chunk_dealloc(void *chunk, size_t size)
+{
+
+	assert(chunk != NULL);
+	assert(CHUNK_ADDR2BASE(chunk) == chunk);
+	assert(size != 0);
+	assert((size & chunksize_mask) == 0);
+
+#ifdef MALLOC_STATS
+	stats_chunks.curchunks -= (size / chunksize);
+#endif
+
+#ifdef MALLOC_DSS
+	if (opt_dss) {
+		if (chunk_dealloc_dss(chunk, size) == false)
+			return;
+	}
+
+	if (opt_mmap)
+#endif
+		chunk_dealloc_mmap(chunk, size);
+}
+
+/*
+ * End chunk management functions.
+ */
+/******************************************************************************/
+/*
+ * Begin arena.
+ */
+
+/*
+ * Choose an arena based on a per-thread value (fast-path code, calls slow-path
+ * code if necessary).
+ */
+static inline arena_t *
+choose_arena(void)
+{
+	arena_t *ret;
+
+	/*
+	 * We can only use TLS if this is a PIC library, since for the static
+	 * library version, libc's malloc is used by TLS allocation, which
+	 * introduces a bootstrapping issue.
+	 */
+#ifndef NO_TLS
+	if (__isthreaded == false) {
+	    /*
+	     * Avoid the overhead of TLS for single-threaded operation.  If the
+	     * app switches to threaded mode, the initial thread may end up
+	     * being assigned to some other arena, but this one-time switch
+	     * shouldn't cause significant issues.
+	     */
+	    return (arenas[0]);
+	}
+
+#  ifdef MOZ_MEMORY_WINDOWS
+	ret = TlsGetValue(tlsIndex);
+#  else
+	ret = arenas_map;
+#  endif
+
+	if (ret == NULL) {
+		ret = choose_arena_hard();
+		assert(ret != NULL);
+	}
+#else
+	if (__isthreaded && narenas > 1) {
+		unsigned long ind;
+
+		/*
+		 * Hash _pthread_self() to one of the arenas.  There is a prime
+		 * number of arenas, so this has a reasonable chance of
+		 * working.  Even so, the hashing can be easily thwarted by
+		 * inconvenient _pthread_self() values.  Without specific
+		 * knowledge of how _pthread_self() calculates values, we can't
+		 * easily do much better than this.
+		 */
+		ind = (unsigned long) _pthread_self() % narenas;
+
+		/*
+		 * Optimistially assume that arenas[ind] has been initialized.
+		 * At worst, we find out that some other thread has already
+		 * done so, after acquiring the lock in preparation.  Note that
+		 * this lazy locking also has the effect of lazily forcing
+		 * cache coherency; without the lock acquisition, there's no
+		 * guarantee that modification of arenas[ind] by another thread
+		 * would be seen on this CPU for an arbitrary amount of time.
+		 *
+		 * In general, this approach to modifying a synchronized value
+		 * isn't a good idea, but in this case we only ever modify the
+		 * value once, so things work out well.
+		 */
+		ret = arenas[ind];
+		if (ret == NULL) {
+			/*
+			 * Avoid races with another thread that may have already
+			 * initialized arenas[ind].
+			 */
+			malloc_spin_lock(&arenas_lock);
+			if (arenas[ind] == NULL)
+				ret = arenas_extend((unsigned)ind);
+			else
+				ret = arenas[ind];
+			malloc_spin_unlock(&arenas_lock);
+		}
+	} else
+		ret = arenas[0];
+#endif
+
+	assert(ret != NULL);
+	return (ret);
+}
+
+#ifndef NO_TLS
+/*
+ * Choose an arena based on a per-thread value (slow-path code only, called
+ * only by choose_arena()).
+ */
+static arena_t *
+choose_arena_hard(void)
+{
+	arena_t *ret;
+
+	assert(__isthreaded);
+
+#ifdef MALLOC_LAZY_FREE
+	/*
+	 * Seed the PRNG used for lazy deallocation.  Since seeding only occurs
+	 * on the first allocation by a thread, it is possible for a thread to
+	 * deallocate before seeding.  This is not a critical issue though,
+	 * since it is extremely unusual for an application to to use threads
+	 * that deallocate but *never* allocate, and because even if seeding
+	 * never occurs for multiple threads, they will tend to drift apart
+	 * unless some aspect of the application forces deallocation
+	 * synchronization.
+	 */
+	SPRN(lazy_free, (uint32_t)(uintptr_t)(_pthread_self()));
+#endif
+
+#ifdef MALLOC_BALANCE
+	/*
+	 * Seed the PRNG used for arena load balancing.  We can get away with
+	 * using the same seed here as for the lazy_free PRNG without
+	 * introducing autocorrelation because the PRNG parameters are
+	 * distinct.
+	 */
+	SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self()));
+#endif
+
+	if (narenas > 1) {
+#ifdef MALLOC_BALANCE
+		unsigned ind;
+
+		ind = PRN(balance, narenas_2pow);
+		if ((ret = arenas[ind]) == NULL) {
+			malloc_spin_lock(&arenas_lock);
+			if ((ret = arenas[ind]) == NULL)
+				ret = arenas_extend(ind);
+			malloc_spin_unlock(&arenas_lock);
+		}
+#else
+		malloc_spin_lock(&arenas_lock);
+		if ((ret = arenas[next_arena]) == NULL)
+			ret = arenas_extend(next_arena);
+		next_arena = (next_arena + 1) % narenas;
+		malloc_spin_unlock(&arenas_lock);
+#endif
+	} else
+		ret = arenas[0];
+
+#ifdef MOZ_MEMORY_WINDOWS
+	TlsSetValue(tlsIndex, ret);
+#else
+	arenas_map = ret;
+#endif
+
+	return (ret);
+}
+#endif
+
+static inline int
+arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b)
+{
+	uintptr_t a_chunk = (uintptr_t)a;
+	uintptr_t b_chunk = (uintptr_t)b;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	return ((a_chunk > b_chunk) - (a_chunk < b_chunk));
+}
+
+/* Generate red-black tree code for arena chunks. */
+RB_GENERATE_STATIC(arena_chunk_tree_s, arena_chunk_s, link, arena_chunk_comp)
+
+static inline int
+arena_run_comp(arena_run_t *a, arena_run_t *b)
+{
+	uintptr_t a_run = (uintptr_t)a;
+	uintptr_t b_run = (uintptr_t)b;
+
+	assert(a != NULL);
+	assert(b != NULL);
+
+	return ((a_run > b_run) - (a_run < b_run));
+}
+
+/* Generate red-black tree code for arena runs. */
+RB_GENERATE_STATIC(arena_run_tree_s, arena_run_s, link, arena_run_comp)
+
+static inline extent_node_t *
+arena_node_alloc(arena_t *arena)
+{
+	extent_node_t *ret;
+	node_mag_t *node_mag = arena->node_mag_cur;
+
+	if (node_mag == NULL || node_mag->nnodes == 0) {
+		if (arena->node_mag_full != NULL) {
+			arena->node_mag_full->next = node_mag;
+			node_mag = arena->node_mag_full;
+			arena->node_mag_cur = node_mag;
+			arena->node_mag_full = NULL;
+		} else {
+			node_mag = base_node_mag_alloc();
+			if (node_mag == NULL)
+				return (NULL);
+			node_mag->next = arena->node_mag_cur;
+			arena->node_mag_cur = node_mag;
+		}
+	}
+
+	node_mag->nnodes--;
+	ret = node_mag->nodes[node_mag->nnodes];
+
+	return (ret);
+}
+
+static inline void
+arena_node_dealloc(arena_t *arena, extent_node_t *node)
+{
+	node_mag_t *node_mag = arena->node_mag_cur;
+
+	if (node_mag->nnodes == NODE_MAG_NNODES) {
+		if (arena->node_mag_full != NULL)
+			base_node_mag_dealloc(arena->node_mag_full);
+		arena->node_mag_full = node_mag;
+		node_mag = node_mag->next;
+		arena->node_mag_cur = node_mag;
+	}
+	assert(node_mag->nnodes < NODE_MAG_NNODES);
+
+	node_mag->nodes[node_mag->nnodes] = node;
+	node_mag->nnodes++;
+}
+
+static inline void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
+{
+	void *ret;
+	unsigned i, mask, bit, regind;
+
+	assert(run->magic == ARENA_RUN_MAGIC);
+	assert(run->regs_minelm < bin->regs_mask_nelms);
+
+	/*
+	 * Move the first check outside the loop, so that run->regs_minelm can
+	 * be updated unconditionally, without the possibility of updating it
+	 * multiple times.
+	 */
+	i = run->regs_minelm;
+	mask = run->regs_mask[i];
+	if (mask != 0) {
+		/* Usable allocation found. */
+		bit = ffs((int)mask) - 1;
+
+		regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+		assert(regind < bin->nregs);
+		ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+		    + (bin->reg_size * regind));
+
+		/* Clear bit. */
+		mask ^= (1U << bit);
+		run->regs_mask[i] = mask;
+
+		return (ret);
+	}
+
+	for (i++; i < bin->regs_mask_nelms; i++) {
+		mask = run->regs_mask[i];
+		if (mask != 0) {
+			/* Usable allocation found. */
+			bit = ffs((int)mask) - 1;
+
+			regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
+			assert(regind < bin->nregs);
+			ret = (void *)(((uintptr_t)run) + bin->reg0_offset
+			    + (bin->reg_size * regind));
+
+			/* Clear bit. */
+			mask ^= (1U << bit);
+			run->regs_mask[i] = mask;
+
+			/*
+			 * Make a note that nothing before this element
+			 * contains a free region.
+			 */
+			run->regs_minelm = i; /* Low payoff: + (mask == 0); */
+
+			return (ret);
+		}
+	}
+	/* Not reached. */
+	assert(0);
+	return (NULL);
+}
+
+static inline void
+arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
+{
+	/*
+	 * To divide by a number D that is not a power of two we multiply
+	 * by (2^21 / D) and then right shift by 21 positions.
+	 *
+	 *   X / D
+	 *
+	 * becomes
+	 *
+	 *   (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT
+	 */
+#define	SIZE_INV_SHIFT 21
+#define	SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1)
+	static const unsigned size_invs[] = {
+	    SIZE_INV(3),
+	    SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
+	    SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
+	    SIZE_INV(12),SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
+	    SIZE_INV(16),SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
+	    SIZE_INV(20),SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
+	    SIZE_INV(24),SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
+	    SIZE_INV(28),SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
+#if (QUANTUM_2POW_MIN < 4)
+	    ,
+	    SIZE_INV(32), SIZE_INV(33), SIZE_INV(34), SIZE_INV(35),
+	    SIZE_INV(36), SIZE_INV(37), SIZE_INV(38), SIZE_INV(39),
+	    SIZE_INV(40), SIZE_INV(41), SIZE_INV(42), SIZE_INV(43),
+	    SIZE_INV(44), SIZE_INV(45), SIZE_INV(46), SIZE_INV(47),
+	    SIZE_INV(48), SIZE_INV(49), SIZE_INV(50), SIZE_INV(51),
+	    SIZE_INV(52), SIZE_INV(53), SIZE_INV(54), SIZE_INV(55),
+	    SIZE_INV(56), SIZE_INV(57), SIZE_INV(58), SIZE_INV(59),
+	    SIZE_INV(60), SIZE_INV(61), SIZE_INV(62), SIZE_INV(63)
+#endif
+	};
+	unsigned diff, regind, elm, bit;
+
+	assert(run->magic == ARENA_RUN_MAGIC);
+	assert(((sizeof(size_invs)) / sizeof(unsigned)) + 3
+	    >= (SMALL_MAX_DEFAULT >> QUANTUM_2POW_MIN));
+
+	/*
+	 * Avoid doing division with a variable divisor if possible.  Using
+	 * actual division here can reduce allocator throughput by over 20%!
+	 */
+	diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset);
+	if ((size & (size - 1)) == 0) {
+		/*
+		 * log2_table allows fast division of a power of two in the
+		 * [1..128] range.
+		 *
+		 * (x / divisor) becomes (x >> log2_table[divisor - 1]).
+		 */
+		static const unsigned char log2_table[] = {
+		    0, 1, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+		};
+
+		if (size <= 128)
+			regind = (diff >> log2_table[size - 1]);
+		else if (size <= 32768)
+			regind = diff >> (8 + log2_table[(size >> 8) - 1]);
+		else {
+			/*
+			 * The page size is too large for us to use the lookup
+			 * table.  Use real division.
+			 */
+			regind = diff / size;
+		}
+	} else if (size <= ((sizeof(size_invs) / sizeof(unsigned))
+	    << QUANTUM_2POW_MIN) + 2) {
+		regind = size_invs[(size >> QUANTUM_2POW_MIN) - 3] * diff;
+		regind >>= SIZE_INV_SHIFT;
+	} else {
+		/*
+		 * size_invs isn't large enough to handle this size class, so
+		 * calculate regind using actual division.  This only happens
+		 * if the user increases small_max via the 'S' runtime
+		 * configuration option.
+		 */
+		regind = diff / size;
+	};
+	assert(diff == regind * size);
+	assert(regind < bin->nregs);
+
+	elm = regind >> (SIZEOF_INT_2POW + 3);
+	if (elm < run->regs_minelm)
+		run->regs_minelm = elm;
+	bit = regind - (elm << (SIZEOF_INT_2POW + 3));
+	assert((run->regs_mask[elm] & (1U << bit)) == 0);
+	run->regs_mask[elm] |= (1U << bit);
+#undef SIZE_INV
+#undef SIZE_INV_SHIFT
+}
+
+static bool
+arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+	arena_chunk_t *chunk;
+	unsigned run_ind, map_offset, total_pages, need_pages, rem_pages;
+	unsigned i;
+	uint32_t pos_beg, pos_end;
+	extent_node_t *nodeA, *nodeB, key;
+
+	/* Insert a node into runs_alloced_ad for the first part of the run. */
+	nodeA = arena_node_alloc(arena);
+	if (nodeA == NULL)
+		return (true);
+	nodeA->addr = run;
+	nodeA->size = size;
+	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+	    >> pagesize_2pow);
+	total_pages = chunk->map[run_ind].npages;
+	need_pages = (unsigned)(size >> pagesize_2pow);
+	assert(need_pages > 0);
+	assert(need_pages <= total_pages);
+	rem_pages = total_pages - need_pages;
+
+	key.addr = run;
+	nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+	assert(nodeB != NULL);
+
+#ifdef MALLOC_DECOMMIT
+	if (opt_decommit) {
+		if (nodeB->ndirty != nodeB->size) {
+			/*
+			 * Commit the part of the run that is being allocated.
+			 */
+#  ifdef MOZ_MEMORY_WINDOWS
+			VirtualAlloc(run, size, MEM_COMMIT, PAGE_READWRITE);
+#  else
+			if (mmap(run, size, PROT_READ | PROT_WRITE,
+			    MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) ==
+			    MAP_FAILED)
+				abort();
+#  endif
+			if (nodeB->ndirty != 0 && nodeB->size > size) {
+				/*
+				 * Decommit the unused portion of the run in
+				 * order to assure a uniform state where all
+				 * pages in each part of the split are either
+				 * completely committed or completely
+				 * decommitted.
+				 */
+#  ifdef MOZ_MEMORY_WINDOWS
+				VirtualFree((void *)((uintptr_t)run + size),
+				    nodeB->size - size, MEM_DECOMMIT);
+#  else
+				if (mmap((void *)((uintptr_t)run + size),
+				    nodeB->size - size, PROT_NONE, MAP_FIXED |
+				    MAP_PRIVATE | MAP_ANON, -1, 0) ==
+				    MAP_FAILED)
+					abort();
+#  endif
+#  ifdef MALLOC_STATS
+				arena->stats.npurged += nodeB->ndirty;
+				arena->stats.nmadvise++;
+#  endif
+				arena->ndirty -= nodeB->ndirty;
+				nodeB->ndirty = 0;
+			}
+		}
+	}
+#endif
+
+	/* Split enough pages from the front of run to fit allocation size. */
+	map_offset = run_ind;
+	pos_beg = chunk->map[map_offset].pos;
+	pos_end = chunk->map[map_offset + total_pages - 1].pos;
+	if (zero == false) {
+		for (i = 0; i < need_pages; i++) {
+			chunk->map[map_offset + i].npages = need_pages;
+			chunk->map[map_offset + i].pos = i;
+		}
+	} else {
+		/*
+		 * Handle first page specially, since we need to look for
+		 * POS_EMPTY rather than NPAGES_EMPTY.
+		 */
+		i = 0;
+		if (chunk->map[map_offset + i].pos != POS_EMPTY) {
+			memset((void *)((uintptr_t)chunk + ((map_offset + i) <<
+			    pagesize_2pow)), 0, pagesize);
+		}
+		chunk->map[map_offset + i].npages = need_pages;
+		chunk->map[map_offset + i].pos = i;
+
+		/* Handle central pages. */
+		for (i++; i < need_pages - 1; i++) {
+			if (chunk->map[map_offset + i].npages != NPAGES_EMPTY) {
+				memset((void *)((uintptr_t)chunk + ((map_offset
+				    + i) << pagesize_2pow)), 0, pagesize);
+			}
+			chunk->map[map_offset + i].npages = need_pages;
+			chunk->map[map_offset + i].pos = i;
+		}
+
+		/*
+		 * Handle last page specially, since we need to look for
+		 * POS_EMPTY rather than NPAGES_EMPTY.
+		 */
+		if (i < need_pages) {
+			if (chunk->map[map_offset + i].npages != POS_EMPTY) {
+				memset((void *)((uintptr_t)chunk + ((map_offset
+				    + i) << pagesize_2pow)), 0, pagesize);
+			}
+			chunk->map[map_offset + i].npages = need_pages;
+			chunk->map[map_offset + i].pos = i;
+		}
+	}
+
+	/* Keep track of trailing unused pages for later use. */
+	if (rem_pages > 0) {
+		/* Update map for trailing pages. */
+		map_offset += need_pages;
+		chunk->map[map_offset].npages = rem_pages;
+		chunk->map[map_offset].pos = pos_beg;
+		chunk->map[map_offset + rem_pages - 1].npages = rem_pages;
+		chunk->map[map_offset + rem_pages - 1].pos = pos_end;
+
+		/*
+		 * Update nodeB in runs_avail_*.  Its position within
+		 * runs_avail_ad does not change.
+		 */
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+		nodeB->addr = (void *)((uintptr_t)nodeB->addr + size);
+		nodeB->size -= size;
+		if (nodeB->ndirty > nodeB->size) {
+			arena->ndirty -= nodeB->ndirty - nodeB->size;
+			nodeB->ndirty = nodeB->size;
+		}
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+	} else {
+		/* Remove nodeB from runs_avail_*. */
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+		arena->ndirty -= nodeB->ndirty;
+		arena_node_dealloc(arena, nodeB);
+	}
+
+	chunk->pages_used += need_pages;
+
+	return (false);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(arena_t *arena)
+{
+	arena_chunk_t *chunk;
+	extent_node_t *node;
+
+	node = arena_node_alloc(arena);
+	if (node == NULL)
+		return (NULL);
+
+	if (arena->spare != NULL) {
+		chunk = arena->spare;
+		arena->spare = NULL;
+		node->ndirty = arena->spare_ndirty;
+		arena->spare_ndirty = 0;
+	} else {
+		unsigned i;
+
+		chunk = (arena_chunk_t *)chunk_alloc(chunksize, true);
+		if (chunk == NULL) {
+			arena_node_dealloc(arena, node);
+			return (NULL);
+		}
+#ifdef MALLOC_STATS
+		arena->stats.mapped += chunksize;
+#endif
+
+		chunk->arena = arena;
+
+		RB_INSERT(arena_chunk_tree_s, &arena->chunks, chunk);
+
+		/*
+		 * Claim that no pages are in use, since the header is merely
+		 * overhead.
+		 */
+		chunk->pages_used = 0;
+
+		/*
+		 * Initialize enough of the map to support one maximal free run.
+		 */
+		i = arena_chunk_header_npages;
+		chunk->map[i].npages = chunk_npages - arena_chunk_header_npages;
+		chunk->map[i].pos = POS_EMPTY;
+
+		/* Mark the free run's central pages as untouched. */
+		for (i++; i < chunk_npages - 1; i++)
+			chunk->map[i].npages = NPAGES_EMPTY;
+
+		/* Take care when (chunk_npages == 2). */
+		if (i < chunk_npages) {
+			chunk->map[i].npages = chunk_npages -
+			    arena_chunk_header_npages;
+			chunk->map[i].pos = POS_EMPTY;
+		}
+
+		node->ndirty = 0;
+#ifdef MALLOC_DECOMMIT
+#  ifdef MOZ_MEMORY_WINDOWS
+		if (opt_decommit) {
+			VirtualFree((void *)((uintptr_t)chunk +
+			    (arena_chunk_header_npages << pagesize_2pow)),
+			    chunksize - (arena_chunk_header_npages <<
+			    pagesize_2pow), MEM_DECOMMIT);
+		} else {
+			VirtualAlloc((void *)((uintptr_t)chunk +
+			    (arena_chunk_header_npages << pagesize_2pow)),
+			    chunksize - (arena_chunk_header_npages <<
+			    pagesize_2pow), MEM_RESET, PAGE_READWRITE);
+		}
+#  else
+		if (mmap((void *)((uintptr_t)chunk + (arena_chunk_header_npages
+		    << pagesize_2pow)), chunksize - (arena_chunk_header_npages
+		    << pagesize_2pow), PROT_NONE, MAP_FIXED | MAP_PRIVATE |
+		    MAP_ANON, -1, 0) == MAP_FAILED)
+			abort();
+#  endif
+#endif
+	}
+
+	/* Insert the run into the runs_avail_* red-black trees. */
+	node->addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+	    pagesize_2pow));
+	node->size = chunksize - (arena_chunk_header_npages << pagesize_2pow);
+	RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
+	RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
+
+	return (chunk);
+}
+
+static void
+arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
+{
+	extent_node_t *node, key;
+
+	if (arena->spare != NULL) {
+		RB_REMOVE(arena_chunk_tree_s, &chunk->arena->chunks,
+		    arena->spare);
+		arena->ndirty -= arena->spare_ndirty;
+		chunk_dealloc((void *)arena->spare, chunksize);
+#ifdef MALLOC_STATS
+		arena->stats.mapped -= chunksize;
+#endif
+	}
+
+	/*
+	 * Remove run from the runs trees, regardless of whether this chunk
+	 * will be cached, so that the arena does not use it.
+	 */
+	key.addr = (void *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+	    pagesize_2pow));
+	node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+	assert(node != NULL);
+	RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
+	RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, node);
+
+	arena->spare = chunk;
+	arena->spare_ndirty = node->ndirty;
+
+	arena_node_dealloc(arena, node);
+}
+
+static arena_run_t *
+arena_run_alloc(arena_t *arena, size_t size, bool zero)
+{
+	arena_chunk_t *chunk;
+	arena_run_t *run;
+	extent_node_t *node, key;
+
+	assert(size <= (chunksize - (arena_chunk_header_npages <<
+	    pagesize_2pow)));
+	assert((size & pagesize_mask) == 0);
+
+	/* Search the arena's chunks for the lowest best fit. */
+	key.addr = NULL;
+	key.size = size;
+	node = RB_NFIND(extent_tree_szad_s, &arena->runs_avail_szad, &key);
+	if (node != NULL) {
+		run = (arena_run_t *)node->addr;
+		if (arena_run_split(arena, run, size, zero))
+			return (NULL);
+		return (run);
+	}
+
+	/*
+	 * No usable runs.  Create a new chunk from which to allocate the run.
+	 */
+	chunk = arena_chunk_alloc(arena);
+	if (chunk == NULL)
+		return (NULL);
+	run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages <<
+	    pagesize_2pow));
+
+	/* Update page map. */
+	if (arena_run_split(arena, run, size, zero)) {
+		arena_chunk_dealloc(arena, chunk);
+		return (NULL);
+	}
+	return (run);
+}
+
+static void
+arena_purge(arena_t *arena)
+{
+	extent_node_t *node;
+#ifdef MALLOC_DEBUG
+	size_t ndirty;
+
+	ndirty = arena->spare_ndirty;
+	RB_FOREACH(node, extent_tree_ad_s, &arena->runs_avail_ad) {
+		ndirty += node->ndirty;
+	}
+	assert(ndirty == arena->ndirty);
+#endif
+	assert(arena->ndirty > opt_free_max);
+
+	/*
+	 * Purge the spare first, even if it isn't at the lowest address of
+	 * anything currently mapped by the arena.
+	 */
+	if (arena->spare_ndirty > 0) {
+		assert(arena->spare != NULL);
+#ifdef MALLOC_DECOMMIT
+#  ifdef MOZ_MEMORY_WINDOWS
+		/*
+		 * Tell the kernel that we don't need the data in this
+		 * run, but only if requested via runtime
+		 * configuration.
+		 */
+		if (opt_decommit) {
+			VirtualFree((void *)((uintptr_t)arena->spare +
+			    (arena_chunk_header_npages <<
+			    pagesize_2pow)), chunksize -
+			    (arena_chunk_header_npages <<
+			    pagesize_2pow), MEM_DECOMMIT);
+		} else {
+			VirtualAlloc((void *)((uintptr_t)arena->spare +
+			    (arena_chunk_header_npages <<
+			    pagesize_2pow)), chunksize -
+			    (arena_chunk_header_npages <<
+			    pagesize_2pow), MEM_RESET, PAGE_READWRITE);
+		}
+#  else
+		if (mmap((void *)((uintptr_t)arena->spare +
+		    (arena_chunk_header_npages << pagesize_2pow)),
+		    chunksize - (arena_chunk_header_npages <<
+		    pagesize_2pow), PROT_NONE, MAP_FIXED | MAP_PRIVATE |
+		    MAP_ANON, -1, 0) == MAP_FAILED)
+			abort();
+#  endif
+#elif (defined(MOZ_MEMORY_DARWIN))
+		mmap((void *)((uintptr_t)arena->spare +
+		    (arena_chunk_header_npages << pagesize_2pow)),
+		    chunksize - (arena_chunk_header_npages <<
+		    pagesize_2pow), PROT_READ | PROT_WRITE, MAP_PRIVATE
+		    | MAP_ANON | MAP_FIXED, -1, 0);
+		//msync((void *)((uintptr_t)arena->spare +
+		//    (arena_chunk_header_npages << pagesize_2pow)),
+		//    chunksize - (arena_chunk_header_npages <<
+		//    pagesize_2pow), MS_DEACTIVATE);
+#else
+		madvise((void *)((uintptr_t)arena->spare +
+		    (arena_chunk_header_npages << pagesize_2pow)),
+		    chunksize - (arena_chunk_header_npages <<
+		    pagesize_2pow), MADV_FREE);
+#endif
+#ifdef MALLOC_STATS
+		arena->stats.npurged += arena->spare_ndirty;
+		arena->stats.nmadvise++;
+#endif
+		arena->ndirty -= arena->spare_ndirty;
+		arena->spare_ndirty = 0;
+		if (arena->ndirty <= (opt_free_max >> 1))
+			return;
+	}
+
+	/*
+	 * Iterate backward through runs until enough dirty memory has been
+	 * purged.
+	 */
+	RB_FOREACH_REVERSE(node, extent_tree_ad_s, &arena->runs_avail_ad) {
+		if (node->ndirty > 0) {
+#ifdef MALLOC_DECOMMIT
+#  ifdef MOZ_MEMORY_WINDOWS
+			/*
+			 * Tell the kernel that we don't need the data
+			 * in this run, but only if requested via
+			 * runtime configuration.
+			 */
+			if (opt_decommit) {
+				VirtualFree(node->addr, node->size,
+				    MEM_DECOMMIT);
+			} else {
+				VirtualAlloc(node->addr, node->size,
+				    MEM_RESET, PAGE_READWRITE);
+			}
+#  else
+			if (mmap(node->addr, node->size, PROT_NONE,
+			    MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0)
+			    == MAP_FAILED)
+				abort();
+#  endif
+#elif (defined(MOZ_MEMORY_DARWIN))
+			mmap(node->addr, node->size, PROT_READ | PROT_WRITE,
+			    MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+			//msync(node->addr, node->size, MS_DEACTIVATE);
+#else
+			madvise(node->addr, node->size, MADV_FREE);
+#endif
+#ifdef MALLOC_STATS
+			arena->stats.npurged += node->ndirty;
+			arena->stats.nmadvise++;
+#endif
+			arena->ndirty -= node->ndirty;
+			node->ndirty = 0;
+			if (arena->ndirty <= (opt_free_max >> 1))
+				return;
+		}
+	}
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, size_t size, size_t ndirty)
+{
+	arena_chunk_t *chunk;
+	extent_node_t *node, key;
+	unsigned run_ind, run_pages;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+
+	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
+	    >> pagesize_2pow);
+	assert(run_ind >= arena_chunk_header_npages);
+	assert(run_ind < (chunksize >> pagesize_2pow));
+	run_pages = (size >> pagesize_2pow);
+	assert(run_pages == chunk->map[run_ind].npages);
+
+	/* Subtract pages from count of pages used in chunk. */
+	chunk->pages_used -= run_pages;
+
+	/* Mark run as deallocated. */
+	assert(chunk->map[run_ind].npages == run_pages);
+	chunk->map[run_ind].pos = POS_FREE;
+	assert(chunk->map[run_ind + run_pages - 1].npages == run_pages);
+	chunk->map[run_ind + run_pages - 1].pos = POS_FREE;
+
+	/* Remove run from runs_alloced_ad. */
+	key.addr = run;
+	node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+	assert(node != NULL);
+	RB_REMOVE(extent_tree_ad_s, &arena->runs_alloced_ad, node);
+
+	/* Try to coalesce with neighboring runs. */
+	if (run_ind > arena_chunk_header_npages &&
+	    chunk->map[run_ind - 1].pos >= POS_EMPTY) {
+		unsigned prev_npages;
+
+		/* Coalesce with previous run. */
+		prev_npages = chunk->map[run_ind - 1].npages;
+		/*
+		 * The way run allocation currently works (lowest best fit),
+		 * it is impossible for a free run to have empty (untouched)
+		 * pages followed by dirty pages.  If the run allocation policy
+		 * changes, then we will need to account for it here.
+		 */
+		assert(chunk->map[run_ind - 1].pos != POS_EMPTY);
+#if 0 /* Currently unnecessary. */
+		if (prev_npages > 1 && chunk->map[run_ind - 1].pos == POS_EMPTY)
+			chunk->map[run_ind - 1].npages = NPAGES_EMPTY;
+#endif
+		run_ind -= prev_npages;
+		assert(chunk->map[run_ind].npages == prev_npages);
+		assert(chunk->map[run_ind].pos >= POS_EMPTY);
+		run_pages += prev_npages;
+
+		chunk->map[run_ind].npages = run_pages;
+		assert(chunk->map[run_ind].pos >= POS_EMPTY);
+		chunk->map[run_ind + run_pages - 1].npages = run_pages;
+		assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
+
+		/*
+		 * Update node in runs_avail_*.  Its position does not change
+		 * in runs_avail_ad.
+		 */
+		arena_node_dealloc(arena, node);
+		key.addr = (void *)((uintptr_t)run - (prev_npages <<
+		    pagesize_2pow));
+		node = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+		assert(node != NULL);
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
+		node->size = (run_pages << pagesize_2pow);
+		node->ndirty += ndirty;
+		assert(node->ndirty <= node->size);
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
+	} else {
+		/*
+		 * Coalescing backward failed, so insert node into runs_avail_*.
+		 */
+		node->ndirty = ndirty;
+		assert(node->ndirty <= node->size);
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
+		RB_INSERT(extent_tree_ad_s, &arena->runs_avail_ad, node);
+	}
+
+	if (run_ind + run_pages < chunk_npages &&
+	    chunk->map[run_ind + run_pages].pos >= POS_EMPTY) {
+		unsigned next_npages;
+		extent_node_t *nodeB;
+
+		/* Coalesce with next run. */
+		next_npages = chunk->map[run_ind + run_pages].npages;
+		if (next_npages > 1 && chunk->map[run_ind + run_pages].pos ==
+		    POS_EMPTY)
+			chunk->map[run_ind + run_pages].npages = NPAGES_EMPTY;
+		run_pages += next_npages;
+		assert(chunk->map[run_ind + run_pages - 1].npages ==
+		    next_npages);
+		assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
+
+		chunk->map[run_ind].npages = run_pages;
+		assert(chunk->map[run_ind].pos >= POS_EMPTY);
+		chunk->map[run_ind + run_pages - 1].npages = run_pages;
+		assert(chunk->map[run_ind + run_pages - 1].pos >= POS_EMPTY);
+
+		/*
+		 * Update node.  Its position does not change in runs_avail_ad.
+		 */
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, node);
+		node->size = (run_pages << pagesize_2pow);
+		RB_INSERT(extent_tree_szad_s, &arena->runs_avail_szad, node);
+		/* Delete the subsumed run's node. */
+		nodeB = RB_NEXT(extent_tree_ad_s, &arena->runs_avail_ad, node);
+		assert(nodeB->size == (next_npages << pagesize_2pow));
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+		node->ndirty += nodeB->ndirty;
+		assert(node->ndirty <= node->size);
+		arena_node_dealloc(arena, nodeB);
+	}
+
+	/* Deallocate chunk if it is now completely unused. */
+	if (chunk->pages_used == 0)
+		arena_chunk_dealloc(arena, chunk);
+
+	/* Enforce opt_free_max. */
+	arena->ndirty += ndirty;
+	if (arena->ndirty > opt_free_max)
+		arena_purge(arena);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+{
+	arena_run_t *run;
+	unsigned i, remainder;
+
+	/* Look for a usable run. */
+	if ((run = RB_MIN(arena_run_tree_s, &bin->runs)) != NULL) {
+		/* run is guaranteed to have available space. */
+		RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+#ifdef MALLOC_STATS
+		bin->stats.reruns++;
+#endif
+		return (run);
+	}
+	/* No existing runs have any space available. */
+
+	/* Allocate a new run. */
+	run = arena_run_alloc(arena, bin->run_size, false);
+	if (run == NULL)
+		return (NULL);
+
+	/* Initialize run internals. */
+	run->bin = bin;
+
+	for (i = 0; i < bin->regs_mask_nelms; i++)
+		run->regs_mask[i] = UINT_MAX;
+	remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
+	if (remainder != 0) {
+		/* The last element has spare bits that need to be unset. */
+		run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
+		    - remainder));
+	}
+
+	run->regs_minelm = 0;
+
+	run->nfree = bin->nregs;
+#ifdef MALLOC_DEBUG
+	run->magic = ARENA_RUN_MAGIC;
+#endif
+
+#ifdef MALLOC_STATS
+	bin->stats.nruns++;
+	bin->stats.curruns++;
+	if (bin->stats.curruns > bin->stats.highruns)
+		bin->stats.highruns = bin->stats.curruns;
+#endif
+	return (run);
+}
+
+/* bin->runcur must have space available before this function is called. */
+static inline void *
+arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
+{
+	void *ret;
+
+	assert(run->magic == ARENA_RUN_MAGIC);
+	assert(run->nfree > 0);
+
+	ret = arena_run_reg_alloc(run, bin);
+	assert(ret != NULL);
+	run->nfree--;
+
+	return (ret);
+}
+
+/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */
+static void *
+arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+{
+
+	bin->runcur = arena_bin_nonfull_run_get(arena, bin);
+	if (bin->runcur == NULL)
+		return (NULL);
+	assert(bin->runcur->magic == ARENA_RUN_MAGIC);
+	assert(bin->runcur->nfree > 0);
+
+	return (arena_bin_malloc_easy(arena, bin, bin->runcur));
+}
+
+/*
+ * Calculate bin->run_size such that it meets the following constraints:
+ *
+ *   *) bin->run_size >= min_run_size
+ *   *) bin->run_size <= arena_maxclass
+ *   *) bin->run_size <= RUN_MAX_SMALL
+ *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ *
+ * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are
+ * also calculated here, since these settings are all interdependent.
+ */
+static size_t
+arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
+{
+	size_t try_run_size, good_run_size;
+	unsigned good_nregs, good_mask_nelms, good_reg0_offset;
+	unsigned try_nregs, try_mask_nelms, try_reg0_offset;
+
+	assert(min_run_size >= pagesize);
+	assert(min_run_size <= arena_maxclass);
+	assert(min_run_size <= RUN_MAX_SMALL);
+
+	/*
+	 * Calculate known-valid settings before entering the run_size
+	 * expansion loop, so that the first part of the loop always copies
+	 * valid settings.
+	 *
+	 * The do..while loop iteratively reduces the number of regions until
+	 * the run header and the regions no longer overlap.  A closed formula
+	 * would be quite messy, since there is an interdependency between the
+	 * header's mask length and the number of regions.
+	 */
+	try_run_size = min_run_size;
+	try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
+	    + 1; /* Counter-act try_nregs-- in loop. */
+	do {
+		try_nregs--;
+		try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+		    ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
+		try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
+	} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
+	    > try_reg0_offset);
+
+	/* run_size expansion loop. */
+	do {
+		/*
+		 * Copy valid settings before trying more aggressive settings.
+		 */
+		good_run_size = try_run_size;
+		good_nregs = try_nregs;
+		good_mask_nelms = try_mask_nelms;
+		good_reg0_offset = try_reg0_offset;
+
+		/* Try more aggressive settings. */
+		try_run_size += pagesize;
+		try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+		    bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
+		do {
+			try_nregs--;
+			try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
+			    ((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
+			    1 : 0);
+			try_reg0_offset = try_run_size - (try_nregs *
+			    bin->reg_size);
+		} while (sizeof(arena_run_t) + (sizeof(unsigned) *
+		    (try_mask_nelms - 1)) > try_reg0_offset);
+	} while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL
+	    && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
+	    && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size);
+
+	assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
+	    <= good_reg0_offset);
+	assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
+
+	/* Copy final settings. */
+	bin->run_size = good_run_size;
+	bin->nregs = good_nregs;
+	bin->regs_mask_nelms = good_mask_nelms;
+	bin->reg0_offset = good_reg0_offset;
+
+	return (good_run_size);
+}
+
+#ifdef MALLOC_BALANCE
+static inline void
+arena_lock_balance(arena_t *arena)
+{
+	unsigned contention;
+
+	contention = malloc_spin_lock(&arena->lock);
+	if (narenas > 1) {
+		/*
+		 * Calculate the exponentially averaged contention for this
+		 * arena.  Due to integer math always rounding down, this value
+		 * decays somewhat faster then normal.
+		 */
+		arena->contention = (((uint64_t)arena->contention
+		    * (uint64_t)((1U << BALANCE_ALPHA_INV_2POW)-1))
+		    + (uint64_t)contention) >> BALANCE_ALPHA_INV_2POW;
+		if (arena->contention >= opt_balance_threshold) {
+			uint32_t ind;
+
+			arena->contention = 0;
+#ifdef MALLOC_STATS
+			arena->stats.nbalance++;
+#endif
+			ind = PRN(balance, narenas_2pow);
+			if (arenas[ind] != NULL) {
+#ifdef MOZ_MEMORY_WINDOWS
+				TlsSetValue(tlsIndex, arenas[ind]);
+#else
+				arenas_map = arenas[ind];
+#endif
+			} else {
+				malloc_spin_lock(&arenas_lock);
+				if (arenas[ind] != NULL) {
+#ifdef MOZ_MEMORY_WINDOWS
+					TlsSetValue(tlsIndex, arenas[ind]);
+#else
+					arenas_map = arenas[ind];
+#endif
+				} else {
+#ifdef MOZ_MEMORY_WINDOWS
+					TlsSetValue(tlsIndex,
+					    arenas_extend(ind));
+#else
+					arenas_map = arenas_extend(ind);
+#endif
+				}
+				malloc_spin_unlock(&arenas_lock);
+			}
+		}
+	}
+}
+#endif
+
+static void *
+arena_malloc(arena_t *arena, size_t size, bool zero)
+{
+	void *ret;
+
+	assert(arena != NULL);
+	assert(arena->magic == ARENA_MAGIC);
+	assert(size != 0);
+	assert(QUANTUM_CEILING(size) <= arena_maxclass);
+
+	if (size <= bin_maxclass) {
+		arena_bin_t *bin;
+		arena_run_t *run;
+
+		/* Small allocation. */
+
+		if (size < small_min) {
+			/* Tiny. */
+			size = pow2_ceil(size);
+			bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
+			    1)))];
+#if (!defined(NDEBUG) || defined(MALLOC_STATS))
+			/*
+			 * Bin calculation is always correct, but we may need
+			 * to fix size for the purposes of assertions and/or
+			 * stats accuracy.
+			 */
+			if (size < (1U << TINY_MIN_2POW))
+				size = (1U << TINY_MIN_2POW);
+#endif
+		} else if (size <= small_max) {
+			/* Quantum-spaced. */
+			size = QUANTUM_CEILING(size);
+			bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
+			    - 1];
+		} else {
+			/* Sub-page. */
+			size = pow2_ceil(size);
+			bin = &arena->bins[ntbins + nqbins
+			    + (ffs((int)(size >> opt_small_max_2pow)) - 2)];
+		}
+		assert(size == bin->reg_size);
+
+#ifdef MALLOC_BALANCE
+		arena_lock_balance(arena);
+#else
+		malloc_spin_lock(&arena->lock);
+#endif
+		if ((run = bin->runcur) != NULL && run->nfree > 0)
+			ret = arena_bin_malloc_easy(arena, bin, run);
+		else
+			ret = arena_bin_malloc_hard(arena, bin);
+
+		if (ret == NULL) {
+			malloc_spin_unlock(&arena->lock);
+			return (NULL);
+		}
+
+#ifdef MALLOC_STATS
+		bin->stats.nrequests++;
+		arena->stats.nmalloc_small++;
+		arena->stats.allocated_small += size;
+#endif
+		malloc_spin_unlock(&arena->lock);
+
+		if (zero == false) {
+			if (opt_junk)
+				memset(ret, 0xa5, size);
+			else if (opt_zero)
+				memset(ret, 0, size);
+		} else
+			memset(ret, 0, size);
+	} else {
+		/* Large allocation. */
+		size = PAGE_CEILING(size);
+#ifdef MALLOC_BALANCE
+		arena_lock_balance(arena);
+#else
+		malloc_spin_lock(&arena->lock);
+#endif
+		ret = (void *)arena_run_alloc(arena, size, zero);
+		if (ret == NULL) {
+			malloc_spin_unlock(&arena->lock);
+			return (NULL);
+		}
+#ifdef MALLOC_STATS
+		arena->stats.nmalloc_large++;
+		arena->stats.allocated_large += size;
+#endif
+		malloc_spin_unlock(&arena->lock);
+
+		if (zero == false) {
+			if (opt_junk)
+				memset(ret, 0xa5, size);
+			else if (opt_zero)
+				memset(ret, 0, size);
+		}
+	}
+
+	return (ret);
+}
+
+static inline void
+arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
+    extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize)
+{
+	unsigned i, pageind, npages;
+
+	assert(nodeB->addr == run);
+	assert(nodeB->size == oldsize);
+	assert(oldsize > newsize);
+
+	/*
+	 * Update the run's node in runs_alloced_ad.  Its position does not
+	 * change.
+	 */
+	nodeB->addr = (void *)((uintptr_t)run + (oldsize - newsize));
+	nodeB->size = newsize;
+
+	/* Update the map for the run to be kept. */
+	pageind = (((uintptr_t)nodeB->addr - (uintptr_t)chunk) >>
+	    pagesize_2pow);
+	npages = newsize >> pagesize_2pow;
+	for (i = 0; i < npages; i++) {
+		chunk->map[pageind + i].npages = npages;
+		chunk->map[pageind + i].pos = i;
+	}
+
+	/*
+	 * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
+	 * treat the leading run as separately allocated.
+	 */
+	nodeA->addr = (void *)run;
+	nodeA->size = oldsize - newsize;
+	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeA);
+
+	/*
+	 * Modifiy the map such that arena_run_dalloc() sees the leading run as
+	 * separately allocated.
+	 */
+	pageind = (((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow);
+	npages = (oldsize - newsize) >> pagesize_2pow;
+	chunk->map[pageind].npages = npages;
+	assert(chunk->map[pageind].pos == 0);
+	pageind += npages - 1;
+	chunk->map[pageind].npages = npages;
+	assert(chunk->map[pageind].pos == npages - 1);
+
+	arena_run_dalloc(arena, (arena_run_t *)run, oldsize - newsize,
+	    oldsize - newsize);
+}
+
+static inline void
+arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, extent_node_t *nodeA,
+    extent_node_t *nodeB, arena_run_t *run, size_t oldsize, size_t newsize,
+    size_t ndirty)
+{
+	unsigned i, pageind, npages;
+
+	assert(nodeA->addr == run);
+	assert(nodeA->size == oldsize);
+	assert(oldsize > newsize);
+
+	/*
+	 * Update the run's node in runs_alloced_ad.  Its position does not
+	 * change.
+	 */
+	nodeA->size = newsize;
+
+	/* Update the map for the run to be kept. */
+	pageind = (((uintptr_t)run - (uintptr_t)chunk) >> pagesize_2pow);
+	npages = newsize >> pagesize_2pow;
+	for (i = 0; i < npages; i++) {
+		chunk->map[pageind + i].npages = npages;
+		assert(chunk->map[pageind + i].pos == i);
+	}
+
+	/*
+	 * Insert a node into runs_alloced_ad so that arena_run_dalloc() can
+	 * treat the trailing run as separately allocated.
+	 */
+	nodeB->addr = (void *)((uintptr_t)run + newsize);
+	nodeB->size = oldsize - newsize;
+	RB_INSERT(extent_tree_ad_s, &arena->runs_alloced_ad, nodeB);
+
+	/*
+	 * Modify the map such that arena_run_dalloc() sees the trailing run as
+	 * separately allocated.
+	 */
+	pageind = (((uintptr_t)run + newsize - (uintptr_t)chunk) >>
+	    pagesize_2pow);
+	npages = (oldsize - newsize) >> pagesize_2pow;
+	chunk->map[pageind].npages = npages;
+	chunk->map[pageind].pos = 0;
+	pageind += npages - 1;
+	chunk->map[pageind].npages = npages;
+	chunk->map[pageind].pos = npages - 1;
+
+	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
+	    oldsize - newsize, ndirty);
+}
+
+/* Only handles large allocations that require more than page alignment. */
+static void *
+arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
+{
+	void *ret;
+	size_t offset;
+	arena_chunk_t *chunk;
+	extent_node_t *node, *nodeB, key;
+
+	assert((size & pagesize_mask) == 0);
+	assert((alignment & pagesize_mask) == 0);
+
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
+	ret = (void *)arena_run_alloc(arena, alloc_size, false);
+	if (ret == NULL) {
+		malloc_spin_unlock(&arena->lock);
+		return (NULL);
+	}
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
+
+	offset = (uintptr_t)ret & (alignment - 1);
+	assert((offset & pagesize_mask) == 0);
+	assert(offset < alloc_size);
+	if (offset == 0) {
+		/*
+		 * Allocate node in advance, in order to simplify OOM recovery.
+		 */
+		if ((nodeB = arena_node_alloc(arena)) == NULL) {
+			arena_run_dalloc(arena, ret, alloc_size, 0);
+			malloc_spin_unlock(&arena->lock);
+			return (NULL);
+		}
+
+		/*
+		 * Update the run's node in runs_alloced_ad.  Its position
+		 * does not change.
+		 */
+		key.addr = ret;
+		node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+		assert(node != NULL);
+
+		arena_run_trim_tail(arena, chunk, node, nodeB, ret, alloc_size,
+		    size, alloc_size - size);
+	} else {
+		extent_node_t *nodeA;
+		size_t leadsize, trailsize;
+
+		/*
+		 * Allocate nodes in advance, in order to simplify OOM recovery.
+		 */
+		if ((nodeA = arena_node_alloc(arena)) == NULL) {
+			arena_run_dalloc(arena, ret, alloc_size, 0);
+			malloc_spin_unlock(&arena->lock);
+			return (NULL);
+		}
+		if ((nodeB = arena_node_alloc(arena)) == NULL) {
+			arena_node_dealloc(arena, nodeA);
+			arena_run_dalloc(arena, ret, alloc_size, 0);
+			malloc_spin_unlock(&arena->lock);
+			return (NULL);
+		}
+
+		/*
+		 * Update the run's node in runs_alloced_ad.  Its position
+		 * does not change.
+		 */
+		key.addr = ret;
+		node = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad, &key);
+		assert(node != NULL);
+
+		leadsize = alignment - offset;
+		if (leadsize > 0) {
+			arena_run_trim_head(arena, chunk, nodeA, node, ret,
+			    alloc_size, alloc_size - leadsize);
+			ret = (void *)((uintptr_t)ret + leadsize);
+		}
+
+		trailsize = alloc_size - leadsize - size;
+		if (trailsize != 0) {
+			/* Trim trailing space. */
+			assert(trailsize < alloc_size);
+			arena_run_trim_tail(arena, chunk, node, nodeB, ret,
+			    size + trailsize, size, trailsize);
+		} else
+			arena_node_dealloc(arena, nodeB);
+	}
+
+#ifdef MALLOC_STATS
+	arena->stats.nmalloc_large++;
+	arena->stats.allocated_large += size;
+#endif
+	malloc_spin_unlock(&arena->lock);
+
+	if (opt_junk)
+		memset(ret, 0xa5, size);
+	else if (opt_zero)
+		memset(ret, 0, size);
+	return (ret);
+}
+
+/* Return the size of the allocation pointed to by ptr. */
+static size_t
+arena_salloc(const void *ptr)
+{
+	size_t ret;
+	arena_chunk_t *chunk;
+	arena_chunk_map_t *mapelm;
+	unsigned pageind;
+
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	/*
+	 * No arena data structures that we query here can change in a way that
+	 * affects this function, so we don't need to lock.
+	 */
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+	mapelm = &chunk->map[pageind];
+	if (mapelm->pos != 0 || ptr != (void *)(((uintptr_t)chunk) + (pageind <<
+	    pagesize_2pow))) {
+		arena_run_t *run;
+
+		pageind -= mapelm->pos;
+
+		run = (arena_run_t *)((uintptr_t)chunk + (pageind <<
+		    pagesize_2pow));
+		assert(run->magic == ARENA_RUN_MAGIC);
+		ret = run->bin->reg_size;
+	} else
+		ret = mapelm->npages << pagesize_2pow;
+
+	return (ret);
+}
+
+/*
+ * Try to resize a large allocation, in order to avoid copying.  This will fail
+ * if growing an object, and following pages are already in use.
+ */
+static bool
+arena_ralloc_resize(void *ptr, size_t size, size_t oldsize)
+{
+	arena_chunk_t *chunk;
+	arena_t *arena;
+	extent_node_t *nodeA, *nodeB, key;
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	arena = chunk->arena;
+	assert(arena->magic == ARENA_MAGIC);
+
+	if (size < oldsize) {
+		/*
+		 * Shrink the run, and make trailing pages available for other
+		 * allocations.
+		 */
+		key.addr = (void *)((uintptr_t)ptr);
+#ifdef MALLOC_BALANCE
+		arena_lock_balance(arena);
+#else
+		malloc_spin_lock(&arena->lock);
+#endif
+		nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+		    &key);
+		assert(nodeA != NULL);
+		if ((nodeB = arena_node_alloc(arena)) == NULL) {
+			malloc_spin_unlock(&arena->lock);
+			return (true);
+		}
+		arena_run_trim_tail(arena, chunk, nodeA, nodeB,
+		    (arena_run_t *)ptr, oldsize, size, oldsize - size);
+#ifdef MALLOC_STATS
+		arena->stats.allocated_large -= oldsize - size;
+#endif
+		malloc_spin_unlock(&arena->lock);
+		return (false);
+	}
+
+	/* Try to extend the run. */
+	assert(size > oldsize);
+	key.addr = (void *)((uintptr_t)ptr + oldsize);
+#ifdef MALLOC_BALANCE
+	arena_lock_balance(arena);
+#else
+	malloc_spin_lock(&arena->lock);
+#endif
+	nodeB = RB_FIND(extent_tree_ad_s, &arena->runs_avail_ad, &key);
+	if (nodeB != NULL && oldsize + nodeB->size >= size) {
+		unsigned i, pageind, npages;
+
+		/*
+		 * The next run is available and sufficiently large.
+		 * Merge the two adjacent runs, then trim if necessary.
+		 */
+
+		RB_REMOVE(extent_tree_szad_s, &arena->runs_avail_szad, nodeB);
+		RB_REMOVE(extent_tree_ad_s, &arena->runs_avail_ad, nodeB);
+		arena->ndirty -= nodeB->ndirty;
+
+		key.addr = ptr;
+		nodeA = RB_FIND(extent_tree_ad_s, &arena->runs_alloced_ad,
+		    &key);
+		assert(nodeA != NULL);
+		nodeA->size += nodeB->size;
+
+		chunk->pages_used += (nodeB->size >> pagesize_2pow);
+
+		/* Update the portion of the map that will be retained. */
+		pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
+		    pagesize_2pow);
+		npages = size >> pagesize_2pow;
+		for (i = 0; i < npages; i++) {
+			chunk->map[pageind + i].npages = npages;
+			chunk->map[pageind + i].pos = i;
+		}
+
+#ifdef MALLOC_DECOMMIT
+		if (opt_decommit) {
+			if (nodeB->ndirty != nodeB->size) {
+				/*
+				 * Commit the part of the run that is being
+				 * allocated.
+				 */
+#  ifdef MOZ_MEMORY_WINDOWS
+				VirtualAlloc(nodeB->addr, nodeA->size - oldsize,
+				    MEM_COMMIT, PAGE_READWRITE);
+#  else
+				if (mmap(nodeB->addr, nodeA->size - oldsize,
+				    PROT_READ | PROT_WRITE, MAP_FIXED |
+				    MAP_PRIVATE | MAP_ANON, -1, 0) ==
+				    MAP_FAILED)
+					abort();
+#  endif
+				if (nodeB->ndirty != 0 && nodeA->size > size) {
+					/*
+					 * Decommit the unused portion of the
+					 * run in order to assure a uniform
+					 * state where all pages in each part
+					 * of the split are either completely
+					 * committed or completely decommitted.
+					 */
+#  ifdef MOZ_MEMORY_WINDOWS
+					VirtualFree((void *)((uintptr_t)ptr +
+					    size), nodeA->size - size,
+					    MEM_DECOMMIT);
+#  else
+					if (mmap((void *)((uintptr_t)ptr +
+					    size), nodeA->size - size,
+					    PROT_NONE, MAP_FIXED | MAP_PRIVATE
+					    | MAP_ANON, -1, 0) == MAP_FAILED)
+						abort();
+#  endif
+#  ifdef MALLOC_STATS
+					arena->stats.npurged += nodeB->ndirty;
+					arena->stats.nmadvise++;
+#  endif
+					nodeB->ndirty = 0;
+				}
+			}
+		}
+#endif
+		/* Trim if necessary. */
+		if (nodeA->size > size) {
+			size_t ndirty;
+
+			if (nodeB->ndirty == 0)
+				ndirty = 0;
+			else if (nodeB->ndirty >= nodeA->size - size)
+				ndirty = nodeA->size - size;
+			else
+				ndirty = nodeB->ndirty;
+			arena_run_trim_tail(arena, chunk, nodeA, nodeB,
+			    (arena_run_t *)ptr, nodeA->size, size, ndirty);
+		} else
+			arena_node_dealloc(arena, nodeB);
+#ifdef MALLOC_STATS
+		arena->stats.allocated_large += size - oldsize;
+#endif
+		malloc_spin_unlock(&arena->lock);
+		return (false);
+	}
+	malloc_spin_unlock(&arena->lock);
+
+	return (true);
+}
+
+static void *
+arena_ralloc(void *ptr, size_t size, size_t oldsize)
+{
+	void *ret;
+
+	/* Try to avoid moving the allocation. */
+	if (size < small_min) {
+		if (oldsize < small_min &&
+		    ffs((int)(pow2_ceil(size) >> (TINY_MIN_2POW + 1)))
+		    == ffs((int)(pow2_ceil(oldsize) >> (TINY_MIN_2POW + 1))))
+			goto IN_PLACE; /* Same size class. */
+	} else if (size <= small_max) {
+		if (oldsize >= small_min && oldsize <= small_max &&
+		    (QUANTUM_CEILING(size) >> opt_quantum_2pow)
+		    == (QUANTUM_CEILING(oldsize) >> opt_quantum_2pow))
+			goto IN_PLACE; /* Same size class. */
+	} else if (size <= bin_maxclass) {
+		if (oldsize > small_max && oldsize <= bin_maxclass &&
+		    pow2_ceil(size) == pow2_ceil(oldsize))
+			goto IN_PLACE; /* Same size class. */
+	} else if (oldsize > bin_maxclass && oldsize <= arena_maxclass) {
+		size_t psize;
+
+		assert(size > bin_maxclass);
+		psize = PAGE_CEILING(size);
+
+		if (psize == oldsize)
+			goto IN_PLACE; /* Same size class. */
+
+		if (arena_ralloc_resize(ptr, psize, oldsize) == false)
+			goto IN_PLACE;
+	}
+
+	/*
+	 * If we get here, then size and oldsize are different enough that we
+	 * need to move the object.  In that case, fall back to allocating new
+	 * space and copying.
+	 */
+	ret = arena_malloc(choose_arena(), size, false);
+	if (ret == NULL)
+		return (NULL);
+
+	/* Junk/zero-filling were already done by arena_malloc(). */
+	if (size < oldsize)
+		memcpy(ret, ptr, size);
+	else
+		memcpy(ret, ptr, oldsize);
+	idalloc(ptr);
+	return (ret);
+IN_PLACE:
+	if (opt_junk && size < oldsize)
+		memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size);
+	else if (opt_zero && size > oldsize)
+		memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize);
+	return (ptr);
+}
+
+static inline void
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    unsigned pageind, arena_chunk_map_t *mapelm)
+{
+	arena_run_t *run;
+	arena_bin_t *bin;
+	size_t size;
+
+	pageind -= mapelm->pos;
+
+	run = (arena_run_t *)((uintptr_t)chunk + (pageind << pagesize_2pow));
+	assert(run->magic == ARENA_RUN_MAGIC);
+	bin = run->bin;
+	size = bin->reg_size;
+
+	if (opt_junk)
+		memset(ptr, 0x5a, size);
+
+	arena_run_reg_dalloc(run, bin, ptr, size);
+	run->nfree++;
+
+	if (run->nfree == bin->nregs) {
+		/* Deallocate run. */
+		if (run == bin->runcur)
+			bin->runcur = NULL;
+		else if (bin->nregs != 1) {
+			/*
+			 * This block's conditional is necessary because if the
+			 * run only contains one region, then it never gets
+			 * inserted into the non-full runs tree.
+			 */
+			RB_REMOVE(arena_run_tree_s, &bin->runs, run);
+		}
+#ifdef MALLOC_DEBUG
+		run->magic = 0;
+#endif
+		arena_run_dalloc(arena, run, bin->run_size, bin->run_size);
+#ifdef MALLOC_STATS
+		bin->stats.curruns--;
+#endif
+	} else if (run->nfree == 1 && run != bin->runcur) {
+		/*
+		 * Make sure that bin->runcur always refers to the lowest
+		 * non-full run, if one exists.
+		 */
+		if (bin->runcur == NULL)
+			bin->runcur = run;
+		else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+			/* Switch runcur. */
+			if (bin->runcur->nfree > 0) {
+				/* Insert runcur. */
+				RB_INSERT(arena_run_tree_s, &bin->runs,
+				    bin->runcur);
+			}
+			bin->runcur = run;
+		} else
+			RB_INSERT(arena_run_tree_s, &bin->runs, run);
+	}
+#ifdef MALLOC_STATS
+	arena->stats.allocated_small -= size;
+	arena->stats.ndalloc_small++;
+#endif
+}
+
+#ifdef MALLOC_LAZY_FREE
+static inline void
+arena_dalloc_lazy(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+    unsigned pageind, arena_chunk_map_t *mapelm)
+{
+	void **free_cache = arena->free_cache;
+	unsigned i, slot;
+
+	if (!__isthreaded || opt_lazy_free_2pow < 0) {
+		malloc_spin_lock(&arena->lock);
+		arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
+		malloc_spin_unlock(&arena->lock);
+		return;
+	}
+
+	for (i = 0; i < LAZY_FREE_NPROBES; i++) {
+		slot = PRN(lazy_free, opt_lazy_free_2pow);
+		if (atomic_cmpset_ptr((uintptr_t *)&free_cache[slot],
+		    (uintptr_t)NULL, (uintptr_t)ptr)) {
+			return;
+		}
+	}
+
+	malloc_spin_lock(&arena->lock);
+	arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
+
+	/*
+	 * Check whether another thread already cleared the cache.  It is
+	 * possible that another thread cleared the cache *and* this slot was
+	 * already refilled, which could result in a mostly fruitless cache
+	 * sweep, but such a sequence of events causes no correctness issues.
+	 */
+	if ((ptr = (void *)atomic_readandclear_ptr(
+	    (uintptr_t *)&free_cache[slot]))
+	    != NULL) {
+		unsigned lazy_free_mask;
+		
+		/*
+		 * Clear the cache, since we failed to find a slot.  It is
+		 * possible that other threads will continue to insert objects
+		 * into the cache while this one sweeps, but that is okay,
+		 * since on average the cache is still swept with the same
+		 * frequency.
+		 */
+
+		/* Handle pointer at current slot. */
+		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+		pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >>
+		    pagesize_2pow);
+		mapelm = &chunk->map[pageind];
+		arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
+
+		/* Sweep remainder of slots. */
+		lazy_free_mask = (1U << opt_lazy_free_2pow) - 1;
+		for (i = (slot + 1) & lazy_free_mask;
+		     i != slot;
+		     i = (i + 1) & lazy_free_mask) {
+			ptr = (void *)atomic_readandclear_ptr(
+			    (uintptr_t *)&free_cache[i]);
+			if (ptr != NULL) {
+				chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+				pageind = (((uintptr_t)ptr - (uintptr_t)chunk)
+				    >> pagesize_2pow);
+				mapelm = &chunk->map[pageind];
+				arena_dalloc_small(arena, chunk, ptr, pageind,
+				    mapelm);
+			}
+		}
+	}
+
+	malloc_spin_unlock(&arena->lock);
+}
+#endif
+
+static void
+arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
+	unsigned pageind;
+	arena_chunk_map_t *mapelm;
+
+	assert(arena != NULL);
+	assert(arena->magic == ARENA_MAGIC);
+	assert(chunk->arena == arena);
+	assert(ptr != NULL);
+	assert(CHUNK_ADDR2BASE(ptr) != ptr);
+
+	pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> pagesize_2pow);
+	mapelm = &chunk->map[pageind];
+	if (mapelm->pos != 0 || ptr != (void *)(((uintptr_t)chunk) + (pageind <<
+	    pagesize_2pow))) {
+		/* Small allocation. */
+#ifdef MALLOC_LAZY_FREE
+		arena_dalloc_lazy(arena, chunk, ptr, pageind, mapelm);
+#else
+		malloc_spin_lock(&arena->lock);
+		arena_dalloc_small(arena, chunk, ptr, pageind, mapelm);
+		malloc_spin_unlock(&arena->lock);
+#endif
+	} else {
+		size_t size;
+
+		/* Large allocation. */
+
+		size = mapelm->npages << pagesize_2pow;
+		assert((((uintptr_t)ptr) & pagesize_mask) == 0);
+
+		if (opt_junk)
+			memset(ptr, 0x5a, size);
+
+		malloc_spin_lock(&arena->lock);
+		arena_run_dalloc(arena, (arena_run_t *)ptr, size, size);
+#ifdef MALLOC_STATS
+		arena->stats.allocated_large -= size;
+		arena->stats.ndalloc_large++;
+#endif
+		malloc_spin_unlock(&arena->lock);
+	}
+}
+
+static bool
+arena_new(arena_t *arena)
+{
+	unsigned i;
+	arena_bin_t *bin;
+	size_t pow2_size, prev_run_size;
+
+	if (malloc_spin_init(&arena->lock))
+		return (true);
+
+#ifdef MALLOC_STATS
+	memset(&arena->stats, 0, sizeof(arena_stats_t));
+#endif
+
+	arena->node_mag_cur = NULL;
+	arena->node_mag_full = NULL;
+
+	/* Initialize chunks. */
+	RB_INIT(&arena->chunks);
+	arena->spare = NULL;
+
+	arena->ndirty = 0;
+	arena->spare_ndirty = 0;
+
+	RB_INIT(&arena->runs_avail_szad);
+	RB_INIT(&arena->runs_avail_ad);
+	RB_INIT(&arena->runs_alloced_ad);
+
+#ifdef MALLOC_BALANCE
+	arena->contention = 0;
+#endif
+#ifdef MALLOC_LAZY_FREE
+	if (opt_lazy_free_2pow >= 0) {
+		arena->free_cache = (void **) base_alloc(sizeof(void *)
+		    * (1U << opt_lazy_free_2pow));
+		if (arena->free_cache == NULL)
+			return (true);
+		memset(arena->free_cache, 0, sizeof(void *)
+		    * (1U << opt_lazy_free_2pow));
+	} else
+		arena->free_cache = NULL;
+#endif
+
+	/* Initialize bins. */
+	prev_run_size = pagesize;
+
+	/* (2^n)-spaced tiny bins. */
+	for (i = 0; i < ntbins; i++) {
+		bin = &arena->bins[i];
+		bin->runcur = NULL;
+		RB_INIT(&bin->runs);
+
+		bin->reg_size = (1U << (TINY_MIN_2POW + i));
+
+		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+	}
+
+	/* Quantum-spaced bins. */
+	for (; i < ntbins + nqbins; i++) {
+		bin = &arena->bins[i];
+		bin->runcur = NULL;
+		RB_INIT(&bin->runs);
+
+		bin->reg_size = quantum * (i - ntbins + 1);
+
+		pow2_size = pow2_ceil(quantum * (i - ntbins + 1));
+		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+	}
+
+	/* (2^n)-spaced sub-page bins. */
+	for (; i < ntbins + nqbins + nsbins; i++) {
+		bin = &arena->bins[i];
+		bin->runcur = NULL;
+		RB_INIT(&bin->runs);
+
+		bin->reg_size = (small_max << (i - (ntbins + nqbins) + 1));
+
+		prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+
+#ifdef MALLOC_STATS
+		memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+#endif
+	}
+
+#ifdef MALLOC_DEBUG
+	arena->magic = ARENA_MAGIC;
+#endif
+
+	return (false);
+}
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arenas_extend(unsigned ind)
+{
+	arena_t *ret;
+
+	/* Allocate enough space for trailing bins. */
+	ret = (arena_t *)base_alloc(sizeof(arena_t)
+	    + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
+	if (ret != NULL && arena_new(ret) == false) {
+		arenas[ind] = ret;
+		return (ret);
+	}
+	/* Only reached if there is an OOM error. */
+
+	/*
+	 * OOM here is quite inconvenient to propagate, since dealing with it
+	 * would require a check for failure in the fast path.  Instead, punt
+	 * by using arenas[0].  In practice, this is an extremely unlikely
+	 * failure.
+	 */
+	_malloc_message(_getprogname(),
+	    ": (malloc) Error initializing arena\n", "", "");
+	if (opt_abort)
+		abort();
+
+	return (arenas[0]);
+}
+
+/*
+ * End arena.
+ */
+/******************************************************************************/
+/*
+ * Begin general internal functions.
+ */
+
+static void *
+huge_malloc(size_t size, bool zero)
+{
+	void *ret;
+	size_t csize;
+	extent_node_t *node;
+
+	/* Allocate one or more contiguous chunks for this request. */
+
+	csize = CHUNK_CEILING(size);
+	if (csize == 0) {
+		/* size is large enough to cause size_t wrap-around. */
+		return (NULL);
+	}
+
+	/* Allocate a chunk node with which to track the chunk. */
+	node = base_node_alloc();
+	if (node == NULL)
+		return (NULL);
+
+	ret = chunk_alloc(csize, zero);
+	if (ret == NULL) {
+		base_node_dealloc(node);
+		return (NULL);
+	}
+
+	/* Insert node into huge. */
+	node->addr = ret;
+	node->size = csize;
+
+	malloc_mutex_lock(&huge_mtx);
+	RB_INSERT(extent_tree_ad_s, &huge, node);
+#ifdef MALLOC_STATS
+	huge_nmalloc++;
+	huge_allocated += csize;
+#endif
+	malloc_mutex_unlock(&huge_mtx);
+
+	if (zero == false) {
+		if (opt_junk)
+			memset(ret, 0xa5, csize);
+		else if (opt_zero)
+			memset(ret, 0, csize);
+	}
+
+	return (ret);
+}
+
+/* Only handles large allocations that require more than chunk alignment. */
+static void *
+huge_palloc(size_t alignment, size_t size)
+{
+	void *ret;
+	size_t alloc_size, chunk_size, offset;
+	extent_node_t *node;
+
+	/*
+	 * This allocation requires alignment that is even larger than chunk
+	 * alignment.  This means that huge_malloc() isn't good enough.
+	 *
+	 * Allocate almost twice as many chunks as are demanded by the size or
+	 * alignment, in order to assure the alignment can be achieved, then
+	 * unmap leading and trailing chunks.
+	 */
+	assert(alignment >= chunksize);
+
+	chunk_size = CHUNK_CEILING(size);
+
+	if (size >= alignment)
+		alloc_size = chunk_size + alignment - chunksize;
+	else
+		alloc_size = (alignment << 1) - chunksize;
+
+	/* Allocate a chunk node with which to track the chunk. */
+	node = base_node_alloc();
+	if (node == NULL)
+		return (NULL);
+
+	ret = chunk_alloc(alloc_size, false);
+	if (ret == NULL) {
+		base_node_dealloc(node);
+		return (NULL);
+	}
+
+	offset = (uintptr_t)ret & (alignment - 1);
+	assert((offset & chunksize_mask) == 0);
+	assert(offset < alloc_size);
+	if (offset == 0) {
+		/* Trim trailing space. */
+		chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
+		    - chunk_size);
+	} else {
+		size_t trailsize;
+
+		/* Trim leading space. */
+		chunk_dealloc(ret, alignment - offset);
+
+		ret = (void *)((uintptr_t)ret + (alignment - offset));
+
+		trailsize = alloc_size - (alignment - offset) - chunk_size;
+		if (trailsize != 0) {
+		    /* Trim trailing space. */
+		    assert(trailsize < alloc_size);
+		    chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
+			trailsize);
+		}
+	}
+
+	/* Insert node into huge. */
+	node->addr = ret;
+	node->size = chunk_size;
+
+	malloc_mutex_lock(&huge_mtx);
+	RB_INSERT(extent_tree_ad_s, &huge, node);
+#ifdef MALLOC_STATS
+	huge_nmalloc++;
+	huge_allocated += chunk_size;
+#endif
+	malloc_mutex_unlock(&huge_mtx);
+
+	if (opt_junk)
+		memset(ret, 0xa5, chunk_size);
+	else if (opt_zero)
+		memset(ret, 0, chunk_size);
+
+	return (ret);
+}
+
+static void *
+huge_ralloc(void *ptr, size_t size, size_t oldsize)
+{
+	void *ret;
+
+	/* Avoid moving the allocation if the size class would not change. */
+	if (oldsize > arena_maxclass &&
+	    CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) {
+		if (opt_junk && size < oldsize) {
+			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize
+			    - size);
+		} else if (opt_zero && size > oldsize) {
+			memset((void *)((uintptr_t)ptr + oldsize), 0, size
+			    - oldsize);
+		}
+		return (ptr);
+	}
+
+	/*
+	 * If we get here, then size and oldsize are different enough that we
+	 * need to use a different size class.  In that case, fall back to
+	 * allocating new space and copying.
+	 */
+	ret = huge_malloc(size, false);
+	if (ret == NULL)
+		return (NULL);
+
+	if (CHUNK_ADDR2BASE(ptr) == ptr) {
+		/* The old allocation is a chunk. */
+		if (size < oldsize)
+			memcpy(ret, ptr, size);
+		else
+			memcpy(ret, ptr, oldsize);
+	} else {
+		/* The old allocation is a region. */
+		assert(oldsize < size);
+		memcpy(ret, ptr, oldsize);
+	}
+	idalloc(ptr);
+	return (ret);
+}
+
+static void
+huge_dalloc(void *ptr)
+{
+	extent_node_t *node, key;
+
+	malloc_mutex_lock(&huge_mtx);
+
+	/* Extract from tree of huge allocations. */
+	key.addr = ptr;
+	node = RB_FIND(extent_tree_ad_s, &huge, &key);
+	assert(node != NULL);
+	assert(node->addr == ptr);
+	RB_REMOVE(extent_tree_ad_s, &huge, node);
+
+#ifdef MALLOC_STATS
+	huge_ndalloc++;
+	huge_allocated -= node->size;
+#endif
+
+	malloc_mutex_unlock(&huge_mtx);
+
+	/* Unmap chunk. */
+#ifdef MALLOC_DSS
+	if (opt_dss && opt_junk)
+		memset(node->addr, 0x5a, node->size);
+#endif
+	chunk_dealloc(node->addr, node->size);
+
+	base_node_dealloc(node);
+}
+
+static void *
+imalloc(size_t size)
+{
+	void *ret;
+
+	assert(size != 0);
+
+	if (size <= arena_maxclass)
+		ret = arena_malloc(choose_arena(), size, false);
+	else
+		ret = huge_malloc(size, false);
+
+	return (ret);
+}
+
+static void *
+ipalloc(size_t alignment, size_t size)
+{
+	void *ret;
+	size_t ceil_size;
+
+	/*
+	 * Round size up to the nearest multiple of alignment.
+	 *
+	 * This done, we can take advantage of the fact that for each small
+	 * size class, every object is aligned at the smallest power of two
+	 * that is non-zero in the base two representation of the size.  For
+	 * example:
+	 *
+	 *   Size |   Base 2 | Minimum alignment
+	 *   -----+----------+------------------
+	 *     96 |  1100000 |  32
+	 *    144 | 10100000 |  32
+	 *    192 | 11000000 |  64
+	 *
+	 * Depending on runtime settings, it is possible that arena_malloc()
+	 * will further round up to a power of two, but that never causes
+	 * correctness issues.
+	 */
+	ceil_size = (size + (alignment - 1)) & (-alignment);
+	/*
+	 * (ceil_size < size) protects against the combination of maximal
+	 * alignment and size greater than maximal alignment.
+	 */
+	if (ceil_size < size) {
+		/* size_t overflow. */
+		return (NULL);
+	}
+
+	if (ceil_size <= pagesize || (alignment <= pagesize
+	    && ceil_size <= arena_maxclass))
+		ret = arena_malloc(choose_arena(), ceil_size, false);
+	else {
+		size_t run_size;
+
+		/*
+		 * We can't achieve sub-page alignment, so round up alignment
+		 * permanently; it makes later calculations simpler.
+		 */
+		alignment = PAGE_CEILING(alignment);
+		ceil_size = PAGE_CEILING(size);
+		/*
+		 * (ceil_size < size) protects against very large sizes within
+		 * pagesize of SIZE_T_MAX.
+		 *
+		 * (ceil_size + alignment < ceil_size) protects against the
+		 * combination of maximal alignment and ceil_size large enough
+		 * to cause overflow.  This is similar to the first overflow
+		 * check above, but it needs to be repeated due to the new
+		 * ceil_size value, which may now be *equal* to maximal
+		 * alignment, whereas before we only detected overflow if the
+		 * original size was *greater* than maximal alignment.
+		 */
+		if (ceil_size < size || ceil_size + alignment < ceil_size) {
+			/* size_t overflow. */
+			return (NULL);
+		}
+
+		/*
+		 * Calculate the size of the over-size run that arena_palloc()
+		 * would need to allocate in order to guarantee the alignment.
+		 */
+		if (ceil_size >= alignment)
+			run_size = ceil_size + alignment - pagesize;
+		else {
+			/*
+			 * It is possible that (alignment << 1) will cause
+			 * overflow, but it doesn't matter because we also
+			 * subtract pagesize, which in the case of overflow
+			 * leaves us with a very large run_size.  That causes
+			 * the first conditional below to fail, which means
+			 * that the bogus run_size value never gets used for
+			 * anything important.
+			 */
+			run_size = (alignment << 1) - pagesize;
+		}
+
+		if (run_size <= arena_maxclass) {
+			ret = arena_palloc(choose_arena(), alignment, ceil_size,
+			    run_size);
+		} else if (alignment <= chunksize)
+			ret = huge_malloc(ceil_size, false);
+		else
+			ret = huge_palloc(alignment, ceil_size);
+	}
+
+	assert(((uintptr_t)ret & (alignment - 1)) == 0);
+	return (ret);
+}
+
+static void *
+icalloc(size_t size)
+{
+	void *ret;
+
+	if (size <= arena_maxclass)
+		ret = arena_malloc(choose_arena(), size, true);
+	else
+		ret = huge_malloc(size, true);
+
+	return (ret);
+}
+
+static size_t
+isalloc(const void *ptr)
+{
+	size_t ret;
+	arena_chunk_t *chunk;
+
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		/* Region. */
+		assert(chunk->arena->magic == ARENA_MAGIC);
+
+		ret = arena_salloc(ptr);
+	} else {
+		extent_node_t *node, key;
+
+		/* Chunk (huge allocation). */
+
+		malloc_mutex_lock(&huge_mtx);
+
+		/* Extract from tree of huge allocations. */
+		key.addr = __DECONST(void *, ptr);
+		node = RB_FIND(extent_tree_ad_s, &huge, &key);
+		assert(node != NULL);
+
+		ret = node->size;
+
+		malloc_mutex_unlock(&huge_mtx);
+	}
+
+	return (ret);
+}
+
+static void *
+iralloc(void *ptr, size_t size)
+{
+	void *ret;
+	size_t oldsize;
+
+	assert(ptr != NULL);
+	assert(size != 0);
+
+	oldsize = isalloc(ptr);
+
+	if (size <= arena_maxclass)
+		ret = arena_ralloc(ptr, size, oldsize);
+	else
+		ret = huge_ralloc(ptr, size, oldsize);
+
+	return (ret);
+}
+
+static void
+idalloc(void *ptr)
+{
+	arena_chunk_t *chunk;
+
+	assert(ptr != NULL);
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		/* Region. */
+		arena_dalloc(chunk->arena, chunk, ptr);
+	} else
+		huge_dalloc(ptr);
+}
+
+
+#ifdef MOZ_MEMORY_BSD
+static inline unsigned
+malloc_ncpus(void)
+{
+	unsigned ret;
+	int mib[2];
+	size_t len;
+
+	mib[0] = CTL_HW;
+	mib[1] = HW_NCPU;
+	len = sizeof(ret);
+	if (sysctl(mib, 2, &ret, &len, (void *) 0, 0) == -1) {
+		/* Error. */
+		return (1);
+	}
+
+	return (ret);
+}
+#elif (defined(MOZ_MEMORY_LINUX))
+#include <fcntl.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+	unsigned ret;
+	int fd, nread, column;
+	char buf[1];
+	static const char matchstr[] = "processor\t:";
+
+	/*
+	 * sysconf(3) would be the preferred method for determining the number
+	 * of CPUs, but it uses malloc internally, which causes untennable
+	 * recursion during malloc initialization.
+	 */
+	fd = open("/proc/cpuinfo", O_RDONLY);
+	if (fd == -1)
+		return (1); /* Error. */
+	/*
+	 * Count the number of occurrences of matchstr at the beginnings of
+	 * lines.  This treats hyperthreaded CPUs as multiple processors.
+	 */
+	column = 0;
+	ret = 0;
+	while (true) {
+		nread = read(fd, &buf, sizeof(buf));
+		if (nread <= 0)
+			break; /* EOF or error. */
+
+		if (buf[0] == '\n')
+			column = 0;
+		else if (column != -1) {
+			if (buf[0] == matchstr[column]) {
+				column++;
+				if (column == sizeof(matchstr) - 1) {
+					column = -1;
+					ret++;
+				}
+			} else
+				column = -1;
+		}
+	}
+	if (ret == 0)
+		ret = 1; /* Something went wrong in the parser. */
+
+	return (ret);
+}
+#elif (defined(MOZ_MEMORY_DARWIN))
+#include <mach/mach_init.h>
+#include <mach/mach_host.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+	kern_return_t error;
+	natural_t n;
+	processor_info_array_t pinfo;
+	mach_msg_type_number_t pinfocnt;
+
+	error = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO,
+				    &n, &pinfo, &pinfocnt);
+	if (error != KERN_SUCCESS)
+		return (1); /* Error. */
+	else
+		return (n);
+}
+#elif (defined(MOZ_MEMORY_SOLARIS))
+#include <kstat.h>
+
+static inline unsigned
+malloc_ncpus(void)
+{
+	unsigned ret;
+	kstat_ctl_t *ctl;
+	kstat_t *kstat;
+	kstat_named_t *named;
+	unsigned i;
+
+	if ((ctl = kstat_open()) == NULL)
+		return (1); /* Error. */
+
+	if ((kstat = kstat_lookup(ctl, "unix", -1, "system_misc")) == NULL)
+		return (1); /* Error. */
+
+	if (kstat_read(ctl, kstat, NULL) == -1)
+		return (1); /* Error. */
+
+	named = KSTAT_NAMED_PTR(kstat);
+
+	for (i = 0; i < kstat->ks_ndata; i++) {
+		if (strcmp(named[i].name, "ncpus") == 0) {
+			/* Figure out which one of these to actually use. */
+			switch(named[i].data_type) {
+			case KSTAT_DATA_INT32:
+				ret = named[i].value.i32;
+				break;
+			case KSTAT_DATA_UINT32:
+				ret = named[i].value.ui32;
+				break;
+			case KSTAT_DATA_INT64:
+				ret = named[i].value.i64;
+				break;
+			case KSTAT_DATA_UINT64:
+				ret = named[i].value.ui64;
+				break;
+			default:
+				return (1); /* Error. */
+			}
+		}
+	}
+
+	kstat_close(ctl); /* Don't bother checking for an error. */
+
+	return (ret);
+}
+#else
+static inline unsigned
+malloc_ncpus(void)
+{
+
+	/*
+	 * We lack a way to determine the number of CPUs on this platform, so
+	 * assume 1 CPU.
+	 */
+	return (1);
+}
+#endif
+
+static void
+malloc_print_stats(void)
+{
+
+	if (opt_print_stats) {
+		char s[UMAX2S_BUFSIZE];
+		_malloc_message("___ Begin malloc statistics ___\n", "", "",
+		    "");
+		_malloc_message("Assertions ",
+#ifdef NDEBUG
+		    "disabled",
+#else
+		    "enabled",
+#endif
+		    "\n", "");
+		_malloc_message("Boolean MALLOC_OPTIONS: ",
+		    opt_abort ? "A" : "a", "", "");
+#ifdef MALLOC_DSS
+		_malloc_message(opt_dss ? "D" : "d", "", "", "");
+#endif
+		_malloc_message(opt_junk ? "J" : "j", "", "", "");
+#ifdef MALLOC_DSS
+		_malloc_message(opt_mmap ? "M" : "m", "", "", "");
+#endif
+		_malloc_message(opt_utrace ? "PU" : "Pu",
+		    opt_sysv ? "V" : "v",
+		    opt_xmalloc ? "X" : "x",
+		    opt_zero ? "Z\n" : "z\n");
+
+		_malloc_message("CPUs: ", umax2s(ncpus, s), "\n", "");
+		_malloc_message("Max arenas: ", umax2s(narenas, s), "\n", "");
+#ifdef MALLOC_LAZY_FREE
+		if (opt_lazy_free_2pow >= 0) {
+			_malloc_message("Lazy free slots: ",
+			    umax2s(1U << opt_lazy_free_2pow, s), "\n", "");
+		} else
+			_malloc_message("Lazy free slots: 0\n", "", "", "");
+#endif
+#ifdef MALLOC_BALANCE
+		_malloc_message("Arena balance threshold: ",
+		    umax2s(opt_balance_threshold, s), "\n", "");
+#endif
+		_malloc_message("Pointer size: ", umax2s(sizeof(void *), s),
+		    "\n", "");
+		_malloc_message("Quantum size: ", umax2s(quantum, s), "\n", "");
+		_malloc_message("Max small size: ", umax2s(small_max, s), "\n",
+		    "");
+		_malloc_message("Max free per arena: ", umax2s(opt_free_max, s),
+		    "\n", "");
+
+		_malloc_message("Chunk size: ", umax2s(chunksize, s), "", "");
+		_malloc_message(" (2^", umax2s(opt_chunk_2pow, s), ")\n", "");
+
+#ifdef MALLOC_STATS
+		{
+			size_t allocated, mapped;
+#ifdef MALLOC_BALANCE
+			uint64_t nbalance = 0;
+#endif
+			unsigned i;
+			arena_t *arena;
+
+			/* Calculate and print allocated/mapped stats. */
+
+			/* arenas. */
+			for (i = 0, allocated = 0; i < narenas; i++) {
+				if (arenas[i] != NULL) {
+					malloc_spin_lock(&arenas[i]->lock);
+					allocated +=
+					    arenas[i]->stats.allocated_small;
+					allocated +=
+					    arenas[i]->stats.allocated_large;
+#ifdef MALLOC_BALANCE
+					nbalance += arenas[i]->stats.nbalance;
+#endif
+					malloc_spin_unlock(&arenas[i]->lock);
+				}
+			}
+
+			/* huge/base. */
+			malloc_mutex_lock(&huge_mtx);
+			allocated += huge_allocated;
+			mapped = stats_chunks.curchunks * chunksize;
+			malloc_mutex_unlock(&huge_mtx);
+
+			malloc_mutex_lock(&base_mtx);
+			mapped += base_mapped;
+			malloc_mutex_unlock(&base_mtx);
+
+#ifdef MOZ_MEMORY_WINDOWS
+			malloc_printf("Allocated: %lu, mapped: %lu\n",
+			    allocated, mapped);
+#else
+			malloc_printf("Allocated: %zu, mapped: %zu\n",
+			    allocated, mapped);
+#endif
+
+#ifdef MALLOC_BALANCE
+			malloc_printf("Arena balance reassignments: %llu\n",
+			    nbalance);
+#endif
+
+			/* Print chunk stats. */
+			{
+				chunk_stats_t chunks_stats;
+
+				malloc_mutex_lock(&huge_mtx);
+				chunks_stats = stats_chunks;
+				malloc_mutex_unlock(&huge_mtx);
+
+				malloc_printf("chunks: nchunks   "
+				    "highchunks    curchunks\n");
+				malloc_printf("  %13llu%13lu%13lu\n",
+				    chunks_stats.nchunks,
+				    chunks_stats.highchunks,
+				    chunks_stats.curchunks);
+			}
+
+			/* Print chunk stats. */
+			malloc_printf(
+			    "huge: nmalloc      ndalloc    allocated\n");
+#ifdef MOZ_MEMORY_WINDOWS
+			malloc_printf(" %12llu %12llu %12lu\n",
+			    huge_nmalloc, huge_ndalloc, huge_allocated);
+#else
+			malloc_printf(" %12llu %12llu %12zu\n",
+			    huge_nmalloc, huge_ndalloc, huge_allocated);
+#endif
+			/* Print stats for each arena. */
+			for (i = 0; i < narenas; i++) {
+				arena = arenas[i];
+				if (arena != NULL) {
+					malloc_printf(
+					    "\narenas[%u]:\n", i);
+					malloc_spin_lock(&arena->lock);
+					stats_print(arena);
+					malloc_spin_unlock(&arena->lock);
+				}
+			}
+		}
+#endif /* #ifdef MALLOC_STATS */
+		_malloc_message("--- End malloc statistics ---\n", "", "", "");
+	}
+}
+
+/*
+ * FreeBSD's pthreads implementation calls malloc(3), so the malloc
+ * implementation has to take pains to avoid infinite recursion during
+ * initialization.
+ */
+#ifndef MOZ_MEMORY_WINDOWS
+static inline
+#endif
+bool
+malloc_init(void)
+{
+
+	if (malloc_initialized == false)
+		return (malloc_init_hard());
+
+	return (false);
+}
+
+static bool
+malloc_init_hard(void)
+{
+	unsigned i;
+	char buf[PATH_MAX + 1];
+	const char *opts;
+	long result;
+#ifndef MOZ_MEMORY_WINDOWS
+	int linklen;
+#endif
+
+#ifndef MOZ_MEMORY_WINDOWS
+	malloc_mutex_lock(&init_lock);
+#endif
+
+	if (malloc_initialized) {
+		/*
+		 * Another thread initialized the allocator before this one
+		 * acquired init_lock.
+		 */
+#ifndef MOZ_MEMORY_WINDOWS
+		malloc_mutex_unlock(&init_lock);
+#endif
+		return (false);
+	}
+
+#ifdef MOZ_MEMORY_WINDOWS
+	/* get a thread local storage index */
+	tlsIndex = TlsAlloc();
+#endif
+
+	/* Get page size and number of CPUs */
+#ifdef MOZ_MEMORY_WINDOWS
+	{
+		SYSTEM_INFO info;
+
+		GetSystemInfo(&info);
+		result = info.dwPageSize;
+
+		pagesize = (unsigned) result;
+
+		ncpus = info.dwNumberOfProcessors;
+	}
+#else
+	ncpus = malloc_ncpus();
+
+	result = sysconf(_SC_PAGESIZE);
+	assert(result != -1);
+
+	pagesize = (unsigned) result;
+#endif
+
+	/*
+	 * We assume that pagesize is a power of 2 when calculating
+	 * pagesize_mask and pagesize_2pow.
+	 */
+	assert(((result - 1) & result) == 0);
+	pagesize_mask = result - 1;
+	pagesize_2pow = ffs((int)result) - 1;
+
+#ifdef MALLOC_LAZY_FREE
+		if (ncpus == 1)
+			opt_lazy_free_2pow = -1;
+#endif
+
+	for (i = 0; i < 3; i++) {
+		unsigned j;
+
+		/* Get runtime configuration. */
+		switch (i) {
+		case 0:
+#ifndef MOZ_MEMORY_WINDOWS
+			if ((linklen = readlink("/etc/malloc.conf", buf,
+						sizeof(buf) - 1)) != -1) {
+				/*
+				 * Use the contents of the "/etc/malloc.conf"
+				 * symbolic link's name.
+				 */
+				buf[linklen] = '\0';
+				opts = buf;
+			} else
+#endif
+			{
+				/* No configuration specified. */
+				buf[0] = '\0';
+				opts = buf;
+			}
+			break;
+		case 1:
+			if (issetugid() == 0 && (opts =
+			    getenv("MALLOC_OPTIONS")) != NULL) {
+				/*
+				 * Do nothing; opts is already initialized to
+				 * the value of the MALLOC_OPTIONS environment
+				 * variable.
+				 */
+			} else {
+				/* No configuration specified. */
+				buf[0] = '\0';
+				opts = buf;
+			}
+			break;
+		case 2:
+			if (_malloc_options != NULL) {
+				/*
+				 * Use options that were compiled into the
+				 * program.
+				 */
+				opts = _malloc_options;
+			} else {
+				/* No configuration specified. */
+				buf[0] = '\0';
+				opts = buf;
+			}
+			break;
+		default:
+			/* NOTREACHED */
+			buf[0] = '\0';
+			opts = buf;
+			assert(false);
+		}
+
+		for (j = 0; opts[j] != '\0'; j++) {
+			unsigned k, nreps;
+			bool nseen;
+
+			/* Parse repetition count, if any. */
+			for (nreps = 0, nseen = false;; j++, nseen = true) {
+				switch (opts[j]) {
+					case '0': case '1': case '2': case '3':
+					case '4': case '5': case '6': case '7':
+					case '8': case '9':
+						nreps *= 10;
+						nreps += opts[j] - '0';
+						break;
+					default:
+						goto MALLOC_OUT;
+				}
+			}
+MALLOC_OUT:
+			if (nseen == false)
+				nreps = 1;
+
+			for (k = 0; k < nreps; k++) {
+				switch (opts[j]) {
+				case 'a':
+					opt_abort = false;
+					break;
+				case 'A':
+					opt_abort = true;
+					break;
+				case 'b':
+#ifdef MALLOC_BALANCE
+					opt_balance_threshold >>= 1;
+#endif
+					break;
+				case 'B':
+#ifdef MALLOC_BALANCE
+					if (opt_balance_threshold == 0)
+						opt_balance_threshold = 1;
+					else if ((opt_balance_threshold << 1)
+					    > opt_balance_threshold)
+						opt_balance_threshold <<= 1;
+#endif
+					break;
+				case 'd':
+#ifdef MALLOC_DSS
+					opt_dss = false;
+#endif
+					break;
+				case 'D':
+#ifdef MALLOC_DSS
+					opt_dss = true;
+#endif
+					break;
+				case 'f':
+					opt_free_max >>= 1;
+					break;
+				case 'F':
+					if (opt_free_max == 0)
+						opt_free_max = 1;
+					else if ((opt_free_max << 1) != 0)
+						opt_free_max <<= 1;
+					break;
+				case 'j':
+					opt_junk = false;
+					break;
+				case 'J':
+					opt_junk = true;
+					break;
+				case 'k':
+					/*
+					 * Chunks always require at least one
+					 * header page, so chunks can never be
+					 * smaller than two pages.
+					 */
+					if (opt_chunk_2pow > pagesize_2pow + 1)
+						opt_chunk_2pow--;
+					break;
+				case 'K':
+					/*
+					 * There must be fewer pages in a chunk
+					 * than can be recorded by the pos
+					 * field of arena_chunk_map_t, in order
+					 * to make POS_EMPTY/POS_FREE special.
+					 */
+					if (opt_chunk_2pow - pagesize_2pow
+					    < (sizeof(uint32_t) << 3) - 1)
+						opt_chunk_2pow++;
+					break;
+				case 'l':
+#ifdef MALLOC_LAZY_FREE
+					if (opt_lazy_free_2pow >= 0)
+						opt_lazy_free_2pow--;
+#endif
+					break;
+				case 'L':
+#ifdef MALLOC_LAZY_FREE
+					if (ncpus > 1)
+						opt_lazy_free_2pow++;
+#endif
+					break;
+				case 'm':
+#ifdef MALLOC_DSS
+					opt_mmap = false;
+#endif
+					break;
+				case 'M':
+#ifdef MALLOC_DSS
+					opt_mmap = true;
+#endif
+					break;
+				case 'n':
+					opt_narenas_lshift--;
+					break;
+				case 'N':
+					opt_narenas_lshift++;
+					break;
+				case 'p':
+					opt_print_stats = false;
+					break;
+				case 'P':
+					opt_print_stats = true;
+					break;
+				case 'q':
+					if (opt_quantum_2pow > QUANTUM_2POW_MIN)
+						opt_quantum_2pow--;
+					break;
+				case 'Q':
+					if (opt_quantum_2pow < pagesize_2pow -
+					    1)
+						opt_quantum_2pow++;
+					break;
+				case 's':
+					if (opt_small_max_2pow >
+					    QUANTUM_2POW_MIN)
+						opt_small_max_2pow--;
+					break;
+				case 'S':
+					if (opt_small_max_2pow < pagesize_2pow
+					    - 1)
+						opt_small_max_2pow++;
+					break;
+				case 'u':
+					opt_utrace = false;
+					break;
+				case 'U':
+					opt_utrace = true;
+					break;
+				case 'v':
+					opt_sysv = false;
+					break;
+				case 'V':
+					opt_sysv = true;
+					break;
+				case 'x':
+					opt_xmalloc = false;
+					break;
+				case 'X':
+					opt_xmalloc = true;
+					break;
+				case 'z':
+					opt_zero = false;
+					break;
+				case 'Z':
+					opt_zero = true;
+					break;
+				default: {
+					char cbuf[2];
+					
+					cbuf[0] = opts[j];
+					cbuf[1] = '\0';
+					_malloc_message(_getprogname(),
+					    ": (malloc) Unsupported character "
+					    "in malloc options: '", cbuf,
+					    "'\n");
+				}
+				}
+			}
+		}
+	}
+
+#ifdef MALLOC_DSS
+	/* Make sure that there is some method for acquiring memory. */
+	if (opt_dss == false && opt_mmap == false)
+		opt_mmap = true;
+#endif
+
+	/* Take care to call atexit() only once. */
+	if (opt_print_stats) {
+#ifndef MOZ_MEMORY_WINDOWS
+		/* Print statistics at exit. */
+		atexit(malloc_print_stats);
+#endif
+	}
+
+	/* Set variables according to the value of opt_small_max_2pow. */
+	if (opt_small_max_2pow < opt_quantum_2pow)
+		opt_small_max_2pow = opt_quantum_2pow;
+	small_max = (1U << opt_small_max_2pow);
+
+	/* Set bin-related variables. */
+	bin_maxclass = (pagesize >> 1);
+	assert(opt_quantum_2pow >= TINY_MIN_2POW);
+	ntbins = opt_quantum_2pow - TINY_MIN_2POW;
+	assert(ntbins <= opt_quantum_2pow);
+	nqbins = (small_max >> opt_quantum_2pow);
+	nsbins = pagesize_2pow - opt_small_max_2pow - 1;
+
+	/* Set variables according to the value of opt_quantum_2pow. */
+	quantum = (1U << opt_quantum_2pow);
+	quantum_mask = quantum - 1;
+	if (ntbins > 0)
+		small_min = (quantum >> 1) + 1;
+	else
+		small_min = 1;
+	assert(small_min <= quantum);
+
+	/* Set variables according to the value of opt_chunk_2pow. */
+	chunksize = (1LU << opt_chunk_2pow);
+	chunksize_mask = chunksize - 1;
+	chunk_npages = (chunksize >> pagesize_2pow);
+	{
+		unsigned header_size;
+
+		header_size = sizeof(arena_chunk_t) + (sizeof(arena_chunk_map_t)
+		    * (chunk_npages - 1));
+		arena_chunk_header_npages = (header_size >> pagesize_2pow);
+		if ((header_size & pagesize_mask) != 0)
+			arena_chunk_header_npages++;
+	}
+	arena_maxclass = chunksize - (arena_chunk_header_npages <<
+	    pagesize_2pow);
+#ifdef MALLOC_LAZY_FREE
+	/*
+	 * Make sure that allocating the free_cache does not exceed the limits
+	 * of what base_alloc() can handle.
+	 */
+	while ((sizeof(void *) << opt_lazy_free_2pow) > chunksize)
+		opt_lazy_free_2pow--;
+#endif
+
+	UTRACE(0, 0, 0);
+
+#ifdef MALLOC_STATS
+	memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+#endif
+
+	/* Various sanity checks that regard configuration. */
+	assert(quantum >= sizeof(void *));
+	assert(quantum <= pagesize);
+	assert(chunksize >= pagesize);
+	assert(quantum * 4 <= chunksize);
+
+	/* Initialize chunks data. */
+	malloc_mutex_init(&huge_mtx);
+	RB_INIT(&huge);
+#ifdef MALLOC_DSS
+	malloc_mutex_init(&dss_mtx);
+	dss_base = sbrk(0);
+	dss_prev = dss_base;
+	dss_max = dss_base;
+	RB_INIT(&dss_chunks_ad);
+	RB_INIT(&dss_chunks_szad);
+#endif
+#ifdef MALLOC_STATS
+	huge_nmalloc = 0;
+	huge_ndalloc = 0;
+	huge_allocated = 0;
+#endif
+
+	/* Initialize base allocation data structures. */
+#ifdef MALLOC_STATS
+	base_mapped = 0;
+#endif
+#ifdef MALLOC_DSS
+	/*
+	 * Allocate a base chunk here, since it doesn't actually have to be
+	 * chunk-aligned.  Doing this before allocating any other chunks allows
+	 * the use of space that would otherwise be wasted.
+	 */
+	if (opt_dss)
+		base_pages_alloc(0);
+#endif
+	base_node_mags_avail = NULL;
+	base_node_mag = NULL;
+	base_node_mag_partial = NULL;
+	malloc_mutex_init(&base_mtx);
+
+	if (ncpus > 1) {
+		/*
+		 * For SMP systems, create four times as many arenas as there
+		 * are CPUs by default.
+		 */
+		opt_narenas_lshift += 2;
+	}
+
+	/* Determine how many arenas to use. */
+	narenas = ncpus;
+	if (opt_narenas_lshift > 0) {
+		if ((narenas << opt_narenas_lshift) > narenas)
+			narenas <<= opt_narenas_lshift;
+		/*
+		 * Make sure not to exceed the limits of what base_alloc() can
+		 * handle.
+		 */
+		if (narenas * sizeof(arena_t *) > chunksize)
+			narenas = chunksize / sizeof(arena_t *);
+	} else if (opt_narenas_lshift < 0) {
+		if ((narenas >> -opt_narenas_lshift) < narenas)
+			narenas >>= -opt_narenas_lshift;
+		/* Make sure there is at least one arena. */
+		if (narenas == 0)
+			narenas = 1;
+	}
+#ifdef MALLOC_BALANCE
+	assert(narenas != 0);
+	for (narenas_2pow = 0;
+	     (narenas >> (narenas_2pow + 1)) != 0;
+	     narenas_2pow++);
+#endif
+
+#ifdef NO_TLS
+	if (narenas > 1) {
+		static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19,
+		    23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
+		    89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+		    151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
+		    223, 227, 229, 233, 239, 241, 251, 257, 263};
+		unsigned nprimes, parenas;
+
+		/*
+		 * Pick a prime number of hash arenas that is more than narenas
+		 * so that direct hashing of pthread_self() pointers tends to
+		 * spread allocations evenly among the arenas.
+		 */
+		assert((narenas & 1) == 0); /* narenas must be even. */
+		nprimes = (sizeof(primes) >> SIZEOF_INT_2POW);
+		parenas = primes[nprimes - 1]; /* In case not enough primes. */
+		for (i = 1; i < nprimes; i++) {
+			if (primes[i] > narenas) {
+				parenas = primes[i];
+				break;
+			}
+		}
+		narenas = parenas;
+	}
+#endif
+
+#ifndef NO_TLS
+#  ifndef MALLOC_BALANCE
+	next_arena = 0;
+#  endif
+#endif
+
+	/* Allocate and initialize arenas. */
+	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+	if (arenas == NULL) {
+#ifndef MOZ_MEMORY_WINDOWS
+		malloc_mutex_unlock(&init_lock);
+#endif
+		return (true);
+	}
+	/*
+	 * Zero the array.  In practice, this should always be pre-zeroed,
+	 * since it was just mmap()ed, but let's be sure.
+	 */
+	memset(arenas, 0, sizeof(arena_t *) * narenas);
+
+	/*
+	 * Initialize one arena here.  The rest are lazily created in
+	 * choose_arena_hard().
+	 */
+	arenas_extend(0);
+	if (arenas[0] == NULL) {
+#ifndef MOZ_MEMORY_WINDOWS
+		malloc_mutex_unlock(&init_lock);
+#endif
+		return (true);
+	}
+#ifndef NO_TLS
+	/*
+	 * Assign the initial arena to the initial thread, in order to avoid
+	 * spurious creation of an extra arena if the application switches to
+	 * threaded mode.
+	 */
+#ifdef MOZ_MEMORY_WINDOWS
+	TlsSetValue(tlsIndex, arenas[0]);
+#else
+	arenas_map = arenas[0];
+#endif
+#endif
+
+	/*
+	 * Seed here for the initial thread, since choose_arena_hard() is only
+	 * called for other threads.  The seed values don't really matter.
+	 */
+#ifdef MALLOC_LAZY_FREE
+	SPRN(lazy_free, 42);
+#endif
+#ifdef MALLOC_BALANCE
+	SPRN(balance, 42);
+#endif
+
+	malloc_spin_init(&arenas_lock);
+
+	malloc_initialized = true;
+#ifndef MOZ_MEMORY_WINDOWS
+	malloc_mutex_unlock(&init_lock);
+#endif
+	return (false);
+}
+
+/* XXX Why not just expose malloc_print_stats()? */
+#ifdef MOZ_MEMORY_WINDOWS
+void
+malloc_shutdown()
+{
+	malloc_print_stats();
+}
+#endif
+
+/*
+ * End general internal functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline void *
+moz_malloc(size_t size)
+#else
+void *
+malloc(size_t size)
+#endif
+{
+	void *ret;
+
+	if (malloc_init()) {
+		ret = NULL;
+		goto RETURN;
+	}
+
+	if (size == 0) {
+		if (opt_sysv == false)
+			size = 1;
+		else {
+			ret = NULL;
+			goto RETURN;
+		}
+	}
+
+	ret = imalloc(size);
+
+RETURN:
+	if (ret == NULL) {
+		if (opt_xmalloc) {
+			_malloc_message(_getprogname(),
+			    ": (malloc) Error in malloc(): out of memory\n", "",
+			    "");
+			abort();
+		}
+		errno = ENOMEM;
+	}
+
+	UTRACE(0, size, ret);
+	return (ret);
+}
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline int
+moz_posix_memalign(void **memptr, size_t alignment, size_t size)
+#else
+int
+posix_memalign(void **memptr, size_t alignment, size_t size)
+#endif
+{
+	int ret;
+	void *result;
+
+	if (malloc_init())
+		result = NULL;
+	else {
+		/* Make sure that alignment is a large enough power of 2. */
+		if (((alignment - 1) & alignment) != 0
+		    || alignment < sizeof(void *)) {
+			if (opt_xmalloc) {
+				_malloc_message(_getprogname(),
+				    ": (malloc) Error in posix_memalign(): "
+				    "invalid alignment\n", "", "");
+				abort();
+			}
+			result = NULL;
+			ret = EINVAL;
+			goto RETURN;
+		}
+
+		result = ipalloc(alignment, size);
+	}
+
+	if (result == NULL) {
+		if (opt_xmalloc) {
+			_malloc_message(_getprogname(),
+			": (malloc) Error in posix_memalign(): out of memory\n",
+			"", "");
+			abort();
+		}
+		ret = ENOMEM;
+		goto RETURN;
+	}
+
+	*memptr = result;
+	ret = 0;
+
+RETURN:
+	UTRACE(0, size, result);
+	return (ret);
+}
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline void *
+moz_memalign(size_t alignment, size_t size)
+#else
+void *
+memalign(size_t alignment, size_t size)
+#endif
+{
+	void *ret;
+
+#ifdef MOZ_MEMORY_DARWIN
+	if (moz_posix_memalign(&ret, alignment, size) != 0)
+#else
+	if (posix_memalign(&ret, alignment, size) != 0)
+#endif
+		return (NULL);
+
+	return ret;
+}
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline void *
+moz_valloc(size_t size)
+#else
+void *
+valloc(size_t size)
+#endif
+{
+#ifdef MOZ_MEMORY_DARWIN
+	return (moz_memalign(pagesize, size));
+#else
+	return (memalign(pagesize, size));
+#endif
+}
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline void *
+moz_calloc(size_t num, size_t size)
+#else
+void *
+calloc(size_t num, size_t size)
+#endif
+{
+	void *ret;
+	size_t num_size;
+
+	if (malloc_init()) {
+		num_size = 0;
+		ret = NULL;
+		goto RETURN;
+	}
+
+	num_size = num * size;
+	if (num_size == 0) {
+		if ((opt_sysv == false) && ((num == 0) || (size == 0)))
+			num_size = 1;
+		else {
+			ret = NULL;
+			goto RETURN;
+		}
+	/*
+	 * Try to avoid division here.  We know that it isn't possible to
+	 * overflow during multiplication if neither operand uses any of the
+	 * most significant half of the bits in a size_t.
+	 */
+	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
+	    && (num_size / size != num)) {
+		/* size_t overflow. */
+		ret = NULL;
+		goto RETURN;
+	}
+
+	ret = icalloc(num_size);
+
+RETURN:
+	if (ret == NULL) {
+		if (opt_xmalloc) {
+			_malloc_message(_getprogname(),
+			    ": (malloc) Error in calloc(): out of memory\n", "",
+			    "");
+			abort();
+		}
+		errno = ENOMEM;
+	}
+
+	UTRACE(0, num_size, ret);
+	return (ret);
+}
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline void *
+moz_realloc(void *ptr, size_t size)
+#else
+void *
+realloc(void *ptr, size_t size)
+#endif
+{
+	void *ret;
+
+	if (size == 0) {
+		if (opt_sysv == false)
+			size = 1;
+		else {
+			if (ptr != NULL)
+				idalloc(ptr);
+			ret = NULL;
+			goto RETURN;
+		}
+	}
+
+	if (ptr != NULL) {
+		assert(malloc_initialized);
+
+		ret = iralloc(ptr, size);
+
+		if (ret == NULL) {
+			if (opt_xmalloc) {
+				_malloc_message(_getprogname(),
+				    ": (malloc) Error in realloc(): out of "
+				    "memory\n", "", "");
+				abort();
+			}
+			errno = ENOMEM;
+		}
+	} else {
+		if (malloc_init())
+			ret = NULL;
+		else
+			ret = imalloc(size);
+
+		if (ret == NULL) {
+			if (opt_xmalloc) {
+				_malloc_message(_getprogname(),
+				    ": (malloc) Error in realloc(): out of "
+				    "memory\n", "", "");
+				abort();
+			}
+			errno = ENOMEM;
+		}
+	}
+
+RETURN:
+	UTRACE(ptr, size, ret);
+	return (ret);
+}
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline void
+moz_free(void *ptr)
+#else
+void
+free(void *ptr)
+#endif
+{
+
+	UTRACE(ptr, 0, 0);
+	if (ptr != NULL) {
+		assert(malloc_initialized);
+
+		idalloc(ptr);
+	}
+}
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+#ifdef MOZ_MEMORY_DARWIN
+__attribute__((visibility("default"))) 
+inline size_t
+moz_malloc_usable_size(const void *ptr)
+#else
+size_t
+malloc_usable_size(const void *ptr)
+#endif
+{
+
+	assert(ptr != NULL);
+
+	return (isalloc(ptr));
+}
+
+#ifdef MOZ_MEMORY_WINDOWS
+void*
+_recalloc(void *ptr, size_t count, size_t size)
+{
+	size_t newsize = count * size;
+
+	ptr = realloc(ptr, newsize);
+
+	return ptr;
+}
+
+/*
+ * This impl of _expand doesn't ever actually expand or shrink blocks: it
+ * simply replies that you may continue using a shrunk block.
+ */
+void*
+_expand(void *ptr, size_t newsize)
+{
+	if (isalloc(ptr) >= newsize)
+		return ptr;
+
+	return NULL;
+}
+
+size_t
+_msize(const void *ptr)
+{
+	return malloc_usable_size(ptr);
+}
+#endif
+
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin library-private functions, used by threading libraries for protection
+ * of malloc during fork().  These functions are only called if the program is
+ * running in threaded mode, so there is no need to check whether the program
+ * is threaded here.
+ */
+
+void
+_malloc_prefork(void)
+{
+	unsigned i;
+
+	/* Acquire all mutexes in a safe order. */
+
+	malloc_spin_lock(&arenas_lock);
+	for (i = 0; i < narenas; i++) {
+		if (arenas[i] != NULL)
+			malloc_spin_lock(&arenas[i]->lock);
+	}
+	malloc_spin_unlock(&arenas_lock);
+
+	malloc_mutex_lock(&base_mtx);
+
+	malloc_mutex_lock(&huge_mtx);
+
+#ifdef MALLOC_DSS
+	malloc_mutex_lock(&dss_mtx);
+#endif
+}
+
+void
+_malloc_postfork(void)
+{
+	unsigned i;
+
+	/* Release all mutexes, now that fork() has completed. */
+
+#ifdef MALLOC_DSS
+	malloc_mutex_unlock(&dss_mtx);
+#endif
+
+	malloc_mutex_unlock(&huge_mtx);
+
+	malloc_mutex_unlock(&base_mtx);
+
+	malloc_spin_lock(&arenas_lock);
+	for (i = 0; i < narenas; i++) {
+		if (arenas[i] != NULL)
+			malloc_spin_unlock(&arenas[i]->lock);
+	}
+	malloc_spin_unlock(&arenas_lock);
+}
+
+/*
+ * End library-private functions.
+ */
+/******************************************************************************/
+
+
+#ifdef MOZ_MEMORY_DARWIN
+static malloc_zone_t zone;
+static struct malloc_introspection_t zone_introspect;
+
+static size_t
+zone_size(malloc_zone_t *zone, void *ptr)
+{
+	size_t ret = 0;
+	arena_chunk_t *chunk;
+
+	/*
+	 * There appear to be places within Darwin (such as setenv(3)) that
+	 * cause calls to this function with pointers that *no* zone owns.  If
+	 * we knew that all pointers were owned by *some* zone, we could split
+	 * our zone into two parts, and use one as the default allocator and
+	 * the other as the default deallocator/reallocator.  Since that will
+	 * not work in practice, we must check all pointers to assure that they
+	 * reside within a mapped chunk before determining size.
+	 */
+
+	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+	if (chunk != ptr) {
+		arena_t *arena;
+		unsigned i;
+		arena_t *arenas_snapshot[narenas];
+
+		/*
+		 * Make a copy of the arenas vector while holding arenas_lock in
+		 * order to assure that all elements are up to date in this
+		 * processor's cache.  Do this outside the following loop in
+		 * order to reduce lock acquisitions.
+		 */
+		malloc_spin_lock(&arenas_lock);
+		memcpy(&arenas_snapshot, arenas, sizeof(arena_t *) * narenas);
+		malloc_spin_unlock(&arenas_lock);
+
+		/* Region. */
+		for (i = 0; i < narenas; i++) {
+			arena = arenas_snapshot[i];
+
+			if (arena != NULL) {
+				bool own;
+
+				/* Make sure ptr is within a chunk. */
+				malloc_spin_lock(&arena->lock);
+				if (RB_FIND(arena_chunk_tree_s, &arena->chunks,
+				    chunk) == chunk)
+					own = true;
+				else
+					own = false;
+				malloc_spin_unlock(&arena->lock);
+
+				if (own) {
+					ret = arena_salloc(ptr);
+					goto RETURN;
+				}
+			}
+		}
+	} else {
+		extent_node_t *node;
+		extent_node_t key;
+
+		/* Chunk. */
+		key.addr = (void *)chunk;
+		malloc_mutex_lock(&huge_mtx);
+		node = RB_FIND(extent_tree_ad_s, &huge, &key);
+		if (node != NULL)
+			ret = node->size;
+		else
+			ret = 0;
+		malloc_mutex_unlock(&huge_mtx);
+	}
+
+RETURN:
+	return (ret);
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+
+	return (moz_malloc(size));
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+
+	return (moz_calloc(num, size));
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+	void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+	moz_posix_memalign(&ret, pagesize, size);
+
+	return (ret);
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr)
+{
+
+	moz_free(ptr);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+
+	return (moz_realloc(ptr, size));
+}
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+
+	/* This function should never be called. */
+	assert(false);
+	return (NULL);
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+	size_t ret;
+	void *p;
+
+	/*
+	 * Actually create an object of the appropriate size, then find out
+	 * how large it could have been without moving up to the next size
+	 * class.
+	 */
+	p = moz_malloc(size);
+	if (p != NULL) {
+		ret = isalloc(p);
+		moz_free(p);
+	} else
+		ret = size;
+
+	return (ret);
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+
+	_malloc_prefork();
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+
+	_malloc_postfork();
+}
+
+static malloc_zone_t *
+create_zone(void)
+{
+
+	assert(malloc_initialized);
+
+	zone.size = (void *)zone_size;
+	zone.malloc = (void *)zone_malloc;
+	zone.calloc = (void *)zone_calloc;
+	zone.valloc = (void *)zone_valloc;
+	zone.free = (void *)zone_free;
+	zone.realloc = (void *)zone_realloc;
+	zone.destroy = (void *)zone_destroy;
+	zone.zone_name = "jemalloc_zone";
+	zone.batch_malloc = NULL;
+	zone.batch_free = NULL;
+	zone.introspect = &zone_introspect;
+
+	zone_introspect.enumerator = NULL;
+	zone_introspect.good_size = (void *)zone_good_size;
+	zone_introspect.check = NULL;
+	zone_introspect.print = NULL;
+	zone_introspect.log = NULL;
+	zone_introspect.force_lock = (void *)zone_force_lock;
+	zone_introspect.force_unlock = (void *)zone_force_unlock;
+	zone_introspect.statistics = NULL;
+
+	return (&zone);
+}
+
+__attribute__((visibility("default")))
+void
+jemalloc_darwin_init(void)
+{
+	extern unsigned malloc_num_zones;
+	extern malloc_zone_t **malloc_zones;
+
+	if (malloc_init())
+		abort();
+
+	/*
+	 * The following code is *not* thread-safe, so it's critical that
+	 * initialization be manually triggered.
+	 */
+
+	/* Register the custom zones. */
+	malloc_zone_register(create_zone());
+	assert(malloc_zones[malloc_num_zones - 1] == &zone);
+
+	/*
+	 * Shift malloc_zones around so that zone is first, which makes it the
+	 * default zone.
+	 */
+	assert(malloc_num_zones > 1);
+	memmove(&malloc_zones[1], &malloc_zones[0],
+		sizeof(malloc_zone_t *) * (malloc_num_zones - 1));
+	malloc_zones[0] = &zone;
+}
+
+static void
+jemalloc_darwin_exit(void)
+{
+}
+
+#endif
new file mode 100644
--- /dev/null
+++ b/memory/jemalloc/tree.h
@@ -0,0 +1,743 @@
+/*	$NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $	*/
+/*	$OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $	*/
+/* $FreeBSD: src/sys/sys/tree.h,v 1.7 2007/12/28 07:03:26 jasone Exp $ */
+
+/*-
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef	_SYS_TREE_H_
+#define	_SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure.  Every operation
+ * on the tree causes a splay to happen.  The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree.  On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n).  The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute.  It fulfills a set of conditions:
+ *	- every search path from the root to a leaf consists of the
+ *	  same number of black nodes,
+ *	- each red node (except for the root) has a black parent,
+ *	- each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type)						\
+struct name {								\
+	struct type *sph_root; /* root of the tree */			\
+}
+
+#define SPLAY_INITIALIZER(root)						\
+	{ NULL }
+
+#define SPLAY_INIT(root) do {						\
+	(root)->sph_root = NULL;					\
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ENTRY(type)						\
+struct {								\
+	struct type *spe_left; /* left element */			\
+	struct type *spe_right; /* right element */			\
+}
+
+#define SPLAY_LEFT(elm, field)		(elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field)		(elm)->field.spe_right
+#define SPLAY_ROOT(head)		(head)->sph_root
+#define SPLAY_EMPTY(head)		(SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do {			\
+	SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field);	\
+	SPLAY_RIGHT(tmp, field) = (head)->sph_root;			\
+	(head)->sph_root = tmp;						\
+} while (/*CONSTCOND*/ 0)
+	
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do {			\
+	SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field);	\
+	SPLAY_LEFT(tmp, field) = (head)->sph_root;			\
+	(head)->sph_root = tmp;						\
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do {				\
+	SPLAY_LEFT(tmp, field) = (head)->sph_root;			\
+	tmp = (head)->sph_root;						\
+	(head)->sph_root = SPLAY_LEFT((head)->sph_root, field);		\
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do {				\
+	SPLAY_RIGHT(tmp, field) = (head)->sph_root;			\
+	tmp = (head)->sph_root;						\
+	(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);	\
+} while (/*CONSTCOND*/ 0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do {		\
+	SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field);	\
+	SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+	SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field);	\
+	SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field);	\
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp)				\
+void name##_SPLAY(struct name *, struct type *);			\
+void name##_SPLAY_MINMAX(struct name *, int);				\
+struct type *name##_SPLAY_INSERT(struct name *, struct type *);		\
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *);		\
+									\
+/* Finds the node with the same key as elm */				\
+static __inline struct type *						\
+name##_SPLAY_FIND(struct name *head, struct type *elm)			\
+{									\
+	if (SPLAY_EMPTY(head))						\
+		return(NULL);						\
+	name##_SPLAY(head, elm);					\
+	if ((cmp)(elm, (head)->sph_root) == 0)				\
+		return (head->sph_root);				\
+	return (NULL);							\
+}									\
+									\
+static __inline struct type *						\
+name##_SPLAY_NEXT(struct name *head, struct type *elm)			\
+{									\
+	name##_SPLAY(head, elm);					\
+	if (SPLAY_RIGHT(elm, field) != NULL) {				\
+		elm = SPLAY_RIGHT(elm, field);				\
+		while (SPLAY_LEFT(elm, field) != NULL) {		\
+			elm = SPLAY_LEFT(elm, field);			\
+		}							\
+	} else								\
+		elm = NULL;						\
+	return (elm);							\
+}									\
+									\
+static __inline struct type *						\
+name##_SPLAY_MIN_MAX(struct name *head, int val)			\
+{									\
+	name##_SPLAY_MINMAX(head, val);					\
+        return (SPLAY_ROOT(head));					\
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp)				\
+struct type *								\
+name##_SPLAY_INSERT(struct name *head, struct type *elm)		\
+{									\
+    if (SPLAY_EMPTY(head)) {						\
+	    SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL;	\
+    } else {								\
+	    int __comp;							\
+	    name##_SPLAY(head, elm);					\
+	    __comp = (cmp)(elm, (head)->sph_root);			\
+	    if(__comp < 0) {						\
+		    SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+		    SPLAY_RIGHT(elm, field) = (head)->sph_root;		\
+		    SPLAY_LEFT((head)->sph_root, field) = NULL;		\
+	    } else if (__comp > 0) {					\
+		    SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+		    SPLAY_LEFT(elm, field) = (head)->sph_root;		\
+		    SPLAY_RIGHT((head)->sph_root, field) = NULL;	\
+	    } else							\
+		    return ((head)->sph_root);				\
+    }									\
+    (head)->sph_root = (elm);						\
+    return (NULL);							\
+}									\
+									\
+struct type *								\
+name##_SPLAY_REMOVE(struct name *head, struct type *elm)		\
+{									\
+	struct type *__tmp;						\
+	if (SPLAY_EMPTY(head))						\
+		return (NULL);						\
+	name##_SPLAY(head, elm);					\
+	if ((cmp)(elm, (head)->sph_root) == 0) {			\
+		if (SPLAY_LEFT((head)->sph_root, field) == NULL) {	\
+			(head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+		} else {						\
+			__tmp = SPLAY_RIGHT((head)->sph_root, field);	\
+			(head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+			name##_SPLAY(head, elm);			\
+			SPLAY_RIGHT((head)->sph_root, field) = __tmp;	\
+		}							\
+		return (elm);						\
+	}								\
+	return (NULL);							\
+}									\
+									\
+void									\
+name##_SPLAY(struct name *head, struct type *elm)			\
+{									\
+	struct type __node, *__left, *__right, *__tmp;			\
+	int __comp;							\
+\
+	SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+	__left = __right = &__node;					\
+\
+	while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) {		\
+		if (__comp < 0) {					\
+			__tmp = SPLAY_LEFT((head)->sph_root, field);	\
+			if (__tmp == NULL)				\
+				break;					\
+			if ((cmp)(elm, __tmp) < 0){			\
+				SPLAY_ROTATE_RIGHT(head, __tmp, field);	\
+				if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+					break;				\
+			}						\
+			SPLAY_LINKLEFT(head, __right, field);		\
+		} else if (__comp > 0) {				\
+			__tmp = SPLAY_RIGHT((head)->sph_root, field);	\
+			if (__tmp == NULL)				\
+				break;					\
+			if ((cmp)(elm, __tmp) > 0){			\
+				SPLAY_ROTATE_LEFT(head, __tmp, field);	\
+				if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+					break;				\
+			}						\
+			SPLAY_LINKRIGHT(head, __left, field);		\
+		}							\
+	}								\
+	SPLAY_ASSEMBLE(head, &__node, __left, __right, field);		\
+}									\
+									\
+/* Splay with either the minimum or the maximum element			\
+ * Used to find minimum or maximum element in tree.			\
+ */									\
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{									\
+	struct type __node, *__left, *__right, *__tmp;			\
+\
+	SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+	__left = __right = &__node;					\
+\
+	while (1) {							\
+		if (__comp < 0) {					\
+			__tmp = SPLAY_LEFT((head)->sph_root, field);	\
+			if (__tmp == NULL)				\
+				break;					\
+			if (__comp < 0){				\
+				SPLAY_ROTATE_RIGHT(head, __tmp, field);	\
+				if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+					break;				\
+			}						\
+			SPLAY_LINKLEFT(head, __right, field);		\
+		} else if (__comp > 0) {				\
+			__tmp = SPLAY_RIGHT((head)->sph_root, field);	\
+			if (__tmp == NULL)				\
+				break;					\
+			if (__comp > 0) {				\
+				SPLAY_ROTATE_LEFT(head, __tmp, field);	\
+				if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+					break;				\
+			}						\
+			SPLAY_LINKRIGHT(head, __left, field);		\
+		}							\
+	}								\
+	SPLAY_ASSEMBLE(head, &__node, __left, __right, field);		\
+}
+
+#define SPLAY_NEGINF	-1
+#define SPLAY_INF	1
+
+#define SPLAY_INSERT(name, x, y)	name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y)	name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y)		name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y)		name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x)		(SPLAY_EMPTY(x) ? NULL	\
+					: name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x)		(SPLAY_EMPTY(x) ? NULL	\
+					: name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head)					\
+	for ((x) = SPLAY_MIN(name, head);				\
+	     (x) != NULL;						\
+	     (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-black tree */
+#define RB_HEAD(name, type)						\
+struct name {								\
+	struct type *rbh_root; /* root of the tree */			\
+}
+
+#define RB_INITIALIZER(root)						\
+	{ NULL }
+
+#define RB_INIT(root) do {						\
+	(root)->rbh_root = NULL;					\
+} while (/*CONSTCOND*/ 0)
+
+#define RB_BLACK	0
+#define RB_RED		1
+#define RB_ENTRY(type)							\
+struct {								\
+	struct type *rbe_left;		/* left element */		\
+	struct type *rbe_right;		/* right element */		\
+	struct type *rbe_parent;	/* parent element */		\
+	int rbe_color;			/* node color */		\
+}
+
+#define RB_LEFT(elm, field)		(elm)->field.rbe_left
+#define RB_RIGHT(elm, field)		(elm)->field.rbe_right
+#define RB_PARENT(elm, field)		(elm)->field.rbe_parent
+#define RB_COLOR(elm, field)		(elm)->field.rbe_color
+#define RB_ROOT(head)			(head)->rbh_root
+#define RB_EMPTY(head)			(RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do {					\
+	RB_PARENT(elm, field) = parent;					\
+	RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL;		\
+	RB_COLOR(elm, field) = RB_RED;					\
+} while (/*CONSTCOND*/ 0)
+
+#define RB_SET_BLACKRED(black, red, field) do {				\
+	RB_COLOR(black, field) = RB_BLACK;				\
+	RB_COLOR(red, field) = RB_RED;					\
+} while (/*CONSTCOND*/ 0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x)	do {} while (0)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do {			\
+	(tmp) = RB_RIGHT(elm, field);					\
+	if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) {	\
+		RB_PARENT(RB_LEFT(tmp, field), field) = (elm);		\
+	}								\
+	RB_AUGMENT(elm);						\
+	if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) {	\
+		if ((elm) == RB_LEFT(RB_PARENT(elm, field), field))	\
+			RB_LEFT(RB_PARENT(elm, field), field) = (tmp);	\
+		else							\
+			RB_RIGHT(RB_PARENT(elm, field), field) = (tmp);	\
+	} else								\
+		(head)->rbh_root = (tmp);				\
+	RB_LEFT(tmp, field) = (elm);					\
+	RB_PARENT(elm, field) = (tmp);					\
+	RB_AUGMENT(tmp);						\
+	if ((RB_PARENT(tmp, field)))					\
+		RB_AUGMENT(RB_PARENT(tmp, field));			\
+} while (/*CONSTCOND*/ 0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do {			\
+	(tmp) = RB_LEFT(elm, field);					\
+	if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) {	\
+		RB_PARENT(RB_RIGHT(tmp, field), field) = (elm);		\
+	}								\
+	RB_AUGMENT(elm);						\
+	if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) {	\
+		if ((elm) == RB_LEFT(RB_PARENT(elm, field), field))	\
+			RB_LEFT(RB_PARENT(elm, field), field) = (tmp);	\
+		else							\
+			RB_RIGHT(RB_PARENT(elm, field), field) = (tmp);	\
+	} else								\
+		(head)->rbh_root = (tmp);				\
+	RB_RIGHT(tmp, field) = (elm);					\
+	RB_PARENT(elm, field) = (tmp);					\
+	RB_AUGMENT(tmp);						\
+	if ((RB_PARENT(tmp, field)))					\
+		RB_AUGMENT(RB_PARENT(tmp, field));			\
+} while (/*CONSTCOND*/ 0)
+
+/* Generates prototypes and inline functions */
+#define	RB_PROTOTYPE(name, type, field, cmp)				\
+	RB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
+#define	RB_PROTOTYPE_STATIC(name, type, field, cmp)			\
+	RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
+#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr)		\
+attr void name##_RB_INSERT_COLOR(struct name *, struct type *);		\
+attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+attr struct type *name##_RB_REMOVE(struct name *, struct type *);	\
+attr struct type *name##_RB_INSERT(struct name *, struct type *);	\
+attr struct type *name##_RB_FIND(struct name *, struct type *);		\
+attr struct type *name##_RB_NFIND(struct name *, struct type *);	\
+attr struct type *name##_RB_NEXT(struct type *);			\
+attr struct type *name##_RB_PREV(struct type *);			\
+attr struct type *name##_RB_MINMAX(struct name *, int);			\
+									\
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define	RB_GENERATE(name, type, field, cmp)				\
+	RB_GENERATE_INTERNAL(name, type, field, cmp,)
+#define	RB_GENERATE_STATIC(name, type, field, cmp)			\
+	RB_GENERATE_INTERNAL(name, type, field, cmp, static)
+#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr)		\
+attr void								\
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm)		\
+{									\
+	struct type *parent, *gparent, *tmp;				\
+	while ((parent = RB_PARENT(elm, field)) != NULL &&		\
+	    RB_COLOR(parent, field) == RB_RED) {			\
+		gparent = RB_PARENT(parent, field);			\
+		if (parent == RB_LEFT(gparent, field)) {		\
+			tmp = RB_RIGHT(gparent, field);			\
+			if (tmp && RB_COLOR(tmp, field) == RB_RED) {	\
+				RB_COLOR(tmp, field) = RB_BLACK;	\
+				RB_SET_BLACKRED(parent, gparent, field);\
+				elm = gparent;				\
+				continue;				\
+			}						\
+			if (RB_RIGHT(parent, field) == elm) {		\
+				RB_ROTATE_LEFT(head, parent, tmp, field);\
+				tmp = parent;				\
+				parent = elm;				\
+				elm = tmp;				\
+			}						\
+			RB_SET_BLACKRED(parent, gparent, field);	\
+			RB_ROTATE_RIGHT(head, gparent, tmp, field);	\
+		} else {						\
+			tmp = RB_LEFT(gparent, field);			\
+			if (tmp && RB_COLOR(tmp, field) == RB_RED) {	\
+				RB_COLOR(tmp, field) = RB_BLACK;	\
+				RB_SET_BLACKRED(parent, gparent, field);\
+				elm = gparent;				\
+				continue;				\
+			}						\
+			if (RB_LEFT(parent, field) == elm) {		\
+				RB_ROTATE_RIGHT(head, parent, tmp, field);\
+				tmp = parent;				\
+				parent = elm;				\
+				elm = tmp;				\
+			}						\
+			RB_SET_BLACKRED(parent, gparent, field);	\
+			RB_ROTATE_LEFT(head, gparent, tmp, field);	\
+		}							\
+	}								\
+	RB_COLOR(head->rbh_root, field) = RB_BLACK;			\
+}									\
+									\
+attr void								\
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{									\
+	struct type *tmp;						\
+	while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) &&	\
+	    elm != RB_ROOT(head)) {					\
+		if (RB_LEFT(parent, field) == elm) {			\
+			tmp = RB_RIGHT(parent, field);			\
+			if (RB_COLOR(tmp, field) == RB_RED) {		\
+				RB_SET_BLACKRED(tmp, parent, field);	\
+				RB_ROTATE_LEFT(head, parent, tmp, field);\
+				tmp = RB_RIGHT(parent, field);		\
+			}						\
+			if ((RB_LEFT(tmp, field) == NULL ||		\
+			    RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+			    (RB_RIGHT(tmp, field) == NULL ||		\
+			    RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+				RB_COLOR(tmp, field) = RB_RED;		\
+				elm = parent;				\
+				parent = RB_PARENT(elm, field);		\
+			} else {					\
+				if (RB_RIGHT(tmp, field) == NULL ||	\
+				    RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+					struct type *oleft;		\
+					if ((oleft = RB_LEFT(tmp, field)) \
+					    != NULL)			\
+						RB_COLOR(oleft, field) = RB_BLACK;\
+					RB_COLOR(tmp, field) = RB_RED;	\
+					RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+					tmp = RB_RIGHT(parent, field);	\
+				}					\
+				RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+				RB_COLOR(parent, field) = RB_BLACK;	\
+				if (RB_RIGHT(tmp, field))		\
+					RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+				RB_ROTATE_LEFT(head, parent, tmp, field);\
+				elm = RB_ROOT(head);			\
+				break;					\
+			}						\
+		} else {						\
+			tmp = RB_LEFT(parent, field);			\
+			if (RB_COLOR(tmp, field) == RB_RED) {		\
+				RB_SET_BLACKRED(tmp, parent, field);	\
+				RB_ROTATE_RIGHT(head, parent, tmp, field);\
+				tmp = RB_LEFT(parent, field);		\
+			}						\
+			if ((RB_LEFT(tmp, field) == NULL ||		\
+			    RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+			    (RB_RIGHT(tmp, field) == NULL ||		\
+			    RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+				RB_COLOR(tmp, field) = RB_RED;		\
+				elm = parent;				\
+				parent = RB_PARENT(elm, field);		\
+			} else {					\
+				if (RB_LEFT(tmp, field) == NULL ||	\
+				    RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+					struct type *oright;		\
+					if ((oright = RB_RIGHT(tmp, field)) \
+					    != NULL)			\
+						RB_COLOR(oright, field) = RB_BLACK;\
+					RB_COLOR(tmp, field) = RB_RED;	\
+					RB_ROTATE_LEFT(head, tmp, oright, field);\
+					tmp = RB_LEFT(parent, field);	\
+				}					\
+				RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+				RB_COLOR(parent, field) = RB_BLACK;	\
+				if (RB_LEFT(tmp, field))		\
+					RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+				RB_ROTATE_RIGHT(head, parent, tmp, field);\
+				elm = RB_ROOT(head);			\
+				break;					\
+			}						\
+		}							\
+	}								\
+	if (elm)							\
+		RB_COLOR(elm, field) = RB_BLACK;			\
+}									\
+									\
+attr struct type *							\
+name##_RB_REMOVE(struct name *head, struct type *elm)			\
+{									\
+	struct type *child, *parent, *old = elm;			\
+	int color;							\
+	if (RB_LEFT(elm, field) == NULL)				\
+		child = RB_RIGHT(elm, field);				\
+	else if (RB_RIGHT(elm, field) == NULL)				\
+		child = RB_LEFT(elm, field);				\
+	else {								\
+		struct type *left;					\
+		elm = RB_RIGHT(elm, field);				\
+		while ((left = RB_LEFT(elm, field)) != NULL)		\
+			elm = left;					\
+		child = RB_RIGHT(elm, field);				\
+		parent = RB_PARENT(elm, field);				\
+		color = RB_COLOR(elm, field);				\
+		if (child)						\
+			RB_PARENT(child, field) = parent;		\
+		if (parent) {						\
+			if (RB_LEFT(parent, field) == elm)		\
+				RB_LEFT(parent, field) = child;		\
+			else						\
+				RB_RIGHT(parent, field) = child;	\
+			RB_AUGMENT(parent);				\
+		} else							\
+			RB_ROOT(head) = child;				\
+		if (RB_PARENT(elm, field) == old)			\
+			parent = elm;					\
+		(elm)->field = (old)->field;				\
+		if (RB_PARENT(old, field)) {				\
+			if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+				RB_LEFT(RB_PARENT(old, field), field) = elm;\
+			else						\
+				RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+			RB_AUGMENT(RB_PARENT(old, field));		\
+		} else							\
+			RB_ROOT(head) = elm;				\
+		RB_PARENT(RB_LEFT(old, field), field) = elm;		\
+		if (RB_RIGHT(old, field))				\
+			RB_PARENT(RB_RIGHT(old, field), field) = elm;	\
+		if (parent) {						\
+			left = parent;					\
+			do {						\
+				RB_AUGMENT(left);			\
+			} while ((left = RB_PARENT(left, field)) != NULL); \
+		}							\
+		goto color;						\
+	}								\
+	parent = RB_PARENT(elm, field);					\
+	color = RB_COLOR(elm, field);					\
+	if (child)							\
+		RB_PARENT(child, field) = parent;			\
+	if (parent) {							\
+		if (RB_LEFT(parent, field) == elm)			\
+			RB_LEFT(parent, field) = child;			\
+		else							\
+			RB_RIGHT(parent, field) = child;		\
+		RB_AUGMENT(parent);					\
+	} else								\
+		RB_ROOT(head) = child;					\
+color:									\
+	if (color == RB_BLACK)						\
+		name##_RB_REMOVE_COLOR(head, parent, child);		\
+	return (old);							\
+}									\
+									\
+/* Inserts a node into the RB tree */					\
+attr struct type *							\
+name##_RB_INSERT(struct name *head, struct type *elm)			\
+{									\
+	struct type *tmp;						\
+	struct type *parent = NULL;					\
+	int comp = 0;							\
+	tmp = RB_ROOT(head);						\
+	while (tmp) {							\
+		parent = tmp;						\
+		comp = (cmp)(elm, parent);				\
+		if (comp < 0)						\
+			tmp = RB_LEFT(tmp, field);			\
+		else if (comp > 0)					\
+			tmp = RB_RIGHT(tmp, field);			\
+		else							\
+			return (tmp);					\
+	}								\
+	RB_SET(elm, parent, field);					\
+	if (parent != NULL) {						\
+		if (comp < 0)						\
+			RB_LEFT(parent, field) = elm;			\
+		else							\
+			RB_RIGHT(parent, field) = elm;			\
+		RB_AUGMENT(parent);					\
+	} else								\
+		RB_ROOT(head) = elm;					\
+	name##_RB_INSERT_COLOR(head, elm);				\
+	return (NULL);							\
+}									\
+									\
+/* Finds the node with the same key as elm */				\
+attr struct type *							\
+name##_RB_FIND(struct name *head, struct type *elm)			\
+{									\
+	struct type *tmp = RB_ROOT(head);				\
+	int comp;							\
+	while (tmp) {							\
+		comp = cmp(elm, tmp);					\
+		if (comp < 0)						\
+			tmp = RB_LEFT(tmp, field);			\
+		else if (comp > 0)					\
+			tmp = RB_RIGHT(tmp, field);			\
+		else							\
+			return (tmp);					\
+	}								\
+	return (NULL);							\
+}									\
+									\
+/* Finds the first node greater than or equal to the search key */	\
+attr struct type *							\
+name##_RB_NFIND(struct name *head, struct type *elm)			\
+{									\
+	struct type *tmp = RB_ROOT(head);				\
+	struct type *res = NULL;					\
+	int comp;							\
+	while (tmp) {							\
+		comp = cmp(elm, tmp);					\
+		if (comp < 0) {						\
+			res = tmp;					\
+			tmp = RB_LEFT(tmp, field);			\
+		}							\
+		else if (comp > 0)					\
+			tmp = RB_RIGHT(tmp, field);			\
+		else							\
+			return (tmp);					\
+	}								\
+	return (res);							\
+}									\
+									\
+/* ARGSUSED */								\
+attr struct type *							\
+name##_RB_NEXT(struct type *elm)					\
+{									\
+	if (RB_RIGHT(elm, field)) {					\
+		elm = RB_RIGHT(elm, field);				\
+		while (RB_LEFT(elm, field))				\
+			elm = RB_LEFT(elm, field);			\
+	} else {							\
+		if (RB_PARENT(elm, field) &&				\
+		    (elm == RB_LEFT(RB_PARENT(elm, field), field)))	\
+			elm = RB_PARENT(elm, field);			\
+		else {							\
+			while (RB_PARENT(elm, field) &&			\
+			    (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+				elm = RB_PARENT(elm, field);		\
+			elm = RB_PARENT(elm, field);			\
+		}							\
+	}								\
+	return (elm);							\
+}									\
+									\
+/* ARGSUSED */								\
+attr struct type *							\
+name##_RB_PREV(struct type *elm)					\
+{									\
+	if (RB_LEFT(elm, field)) {					\
+		elm = RB_LEFT(elm, field);				\
+		while (RB_RIGHT(elm, field))				\
+			elm = RB_RIGHT(elm, field);			\
+	} else {							\
+		if (RB_PARENT(elm, field) &&				\
+		    (elm == RB_RIGHT(RB_PARENT(elm, field), field)))	\
+			elm = RB_PARENT(elm, field);			\
+		else {							\
+			while (RB_PARENT(elm, field) &&			\
+			    (elm == RB_LEFT(RB_PARENT(elm, field), field)))\
+				elm = RB_PARENT(elm, field);		\
+			elm = RB_PARENT(elm, field);			\
+		}							\
+	}								\
+	return (elm);							\
+}									\
+									\
+attr struct type *							\
+name##_RB_MINMAX(struct name *head, int val)				\
+{									\
+	struct type *tmp = RB_ROOT(head);				\
+	struct type *parent = NULL;					\
+	while (tmp) {							\
+		parent = tmp;						\
+		if (val < 0)						\
+			tmp = RB_LEFT(tmp, field);			\
+		else							\
+			tmp = RB_RIGHT(tmp, field);			\
+	}								\
+	return (parent);						\
+}
+
+#define RB_NEGINF	-1
+#define RB_INF	1
+
+#define RB_INSERT(name, x, y)	name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y)	name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y)	name##_RB_FIND(x, y)
+#define RB_NFIND(name, x, y)	name##_RB_NFIND(x, y)
+#define RB_NEXT(name, x, y)	name##_RB_NEXT(y)
+#define RB_PREV(name, x, y)	name##_RB_PREV(y)
+#define RB_MIN(name, x)		name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x)		name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head)					\
+	for ((x) = RB_MIN(name, head);					\
+	     (x) != NULL;						\
+	     (x) = name##_RB_NEXT(x))
+
+#define RB_FOREACH_REVERSE(x, name, head)				\
+	for ((x) = RB_MAX(name, head);					\
+	     (x) != NULL;						\
+	     (x) = name##_RB_PREV(x))
+
+#endif	/* _SYS_TREE_H_ */