First check-in, pretty horrible
authorjorendorff@mozilla.com
Wed, 28 Nov 2007 10:09:48 -0600
changeset 7 66abaa75a3727ffeba67074a9210006ecdcb1e64
child 8 6e30b55406170d46017ea7e169b27bfba22e3e5f
push id1
push userbsmedberg@mozilla.com
push dateMon, 21 Apr 2008 01:54:18 +0000
First check-in, pretty horrible
DO-NOT-USE
configure-with-threadsafe-mmgc
mmgc-bit-checks
mmgc-decref-microfix
mmgc-graphviz
mmgc-remove-precollect-hook
mmgc-rm-isd
mmgc-threadsafe-take2
mmgc-threadsafe-v1
series
sm-features
status
new file mode 100644
--- /dev/null
+++ b/DO-NOT-USE
@@ -0,0 +1,118 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -2083,6 +2083,17 @@ bail:
+ 
+ #endif
+ 
++#ifdef MMGC_INTERIOR_PTRS
++	inline bool IsLargeAllocPage(int bits) {
++		return (bits == GC::kGCLargeAllocPageFirst
++				|| bits == GC::kGCLargeAllocPageRest);
++	}
++#else
++	inline bool IsLargeAllocPage(int bits) {
++		return bits == GC::kGCLargeAllocPageFirst;
++	}
++#endif
++
+ 	void GC::MarkItem(GCWorkItem &wi, GCStack<GCWorkItem> &work)
+ 	{
+ 		size_t size = wi.GetSize();
+@@ -2131,27 +2142,32 @@ bail:
+ 			// normalize and divide by 4K to get index
+ 			int bits = GetPageMapValue(val); 
+ 			
+-			if (bits == 1)
++			if (bits == kGCAllocPage)
+ 			{
+-				//GCAlloc::ConservativeMark(work, (void*) (val&~7), workitem.ptr);
+-				const void* item = (void*) (val&~7);
+-
+-				GCAlloc::GCBlock *block = (GCAlloc::GCBlock*) ((uintptr) item & ~0xFFF);
+-
++				const void *item;
++				GCAlloc::GCBlock *block = (GCAlloc::GCBlock*) (val & ~0xFFF);
++
++#ifdef MMGC_INTERIOR_PTRS
++				item = (void*) val;
++#else
+ 				// back up to real beginning
+-				item = GetRealPointer((const void*) item);
++				item = GetRealPointer((const void*) (val & ~7));
++#endif
+ 
+ 				// guard against bogus pointers to the block header
+ 				if(item < block->items)
+ 					continue;
+ 
+-				// make sure this is a pointer to the beginning
+-
+ 				int itemNum = GCAlloc::GetIndex(block, item);
+-
++#ifdef MMGC_INTERIOR_PTRS
++				// adjust |item| to the beginning of the allocation
++				item = block->items + itemNum * block->size;
++#else
++				// if |item| doesn't point to the beginning of an allocation,
++				// it's not considered a pointer.
+ 				if(block->items + itemNum * block->size != item)
+ 					continue;
+-
++#endif
+ 				if (m_edgeCallbacks)
+ 					FireFoundEdgeTo(item);
+ 
+@@ -2205,15 +2221,34 @@ bail:
+ 					#endif
+ 				}
+ 			}
+-			else if (bits == 3)
++			else if (IsLargeAllocPage(bits))
+ 			{
+ 				//largeAlloc->ConservativeMark(work, (void*) (val&~7), workitem.ptr);
+-				const void* item = (void*) (val&~7);
+-
++				const void* item;
++
++#ifdef MMGC_INTERIOR_PTRS
++				if (bits == kGCLargeAllocPageFirst)
++				{
++					// guard against bogus pointers to the block header
++					if ((val & 0xffff) < sizeof(GCLargeAlloc::LargeBlock))
++						continue;
++
++					item = (void *) ((val & ~0xfff) |
++									 sizeof(GCLargeAlloc::LargeBlock));
++				}
++				else
++				{
++					item = FindBeginning((void *) val);
++				}
++#else
+ 				// back up to real beginning
+-				item = GetRealPointer((const void*) item);
+-
+-				if(((uintptr) item & 0xfff) == sizeof(GCLargeAlloc::LargeBlock))
++				item = GetRealPointer((const void*) (val & ~7));
++
++				// If |item| doesn't point to the start of the page, it's not
++				// really a pointer.
++				if(((uintptr) item & 0xfff) != sizeof(GCLargeAlloc::LargeBlock))
++					continue;
++#endif
+ 				{
+ 					if (m_edgeCallbacks)
+ 						FireFoundEdgeTo(item);
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -232,7 +232,7 @@ namespace MMgc
+ 		 * corresponding methods of <code>class GC</code> are
+ 		 * private.)
+ 		 */
+-		virtual void lastmark(GCStack<GCWorkItem> &work) {}
++		virtual void lastmark(GCStack<GCWorkItem> &) {}
+ 
+ 		/**
+ 		 * This method is invoked after all marking and before any
new file mode 100644
--- /dev/null
+++ b/configure-with-threadsafe-mmgc
@@ -0,0 +1,87 @@
+diff --git a/configure b/configure
+--- a/configure
++++ b/configure
+@@ -35,5 +35,12 @@
+ #
+ # ***** END LICENSE BLOCK *****
+ 
+-echo "running $PYTHON $0.py --ignore-unknown-flags $@"
+-exec $PYTHON $0.py --ignore-unknown-flags $@
++args=''
++while [ $# -ge 1 ]; do
++    args="$args '$1'"
++    shift
++done
++
++echo "running $PYTHON $0.py --ignore-unknown-flags $args"
++
++eval "'$PYTHON' '$0.py' --ignore-unknown-flags $args"
+diff --git a/configure.py b/configure.py
+--- a/configure.py
++++ b/configure.py
+@@ -71,15 +71,28 @@ DEBUG_CXXFLAGS = ""
+ DEBUG_CXXFLAGS = ""
+ DEBUG_LDFLAGS = ""
+ OS_LIBS = []
++OS_LDFLAGS = ""
+ MMGC_CPPFLAGS = ""
+ AVMSHELL_CPPFLAGS = ""
+ AVMSHELL_LDFLAGS = ""
+ MMGC_DEFINES = {'SOFT_ASSERTS': None}
++NSPR_INCLUDES=""
++NSPR_LDOPTS=""
+ 
+ MMGC_DYNAMIC = o.getBoolArg('mmgc-shared', False)
+ if MMGC_DYNAMIC:
+     MMGC_DEFINES['MMGC_DLL'] = None
+     MMGC_CPPFLAGS += "-DMMGC_IMPL "
++
++MMGC_THREADSAFE = o.getBoolArg('threadsafe-mmgc', False)
++if MMGC_THREADSAFE:
++    MMGC_DEFINES['MMGC_THREADSAFE'] = None
++    NSPR_INCLUDES = o.getStringArg('nspr-includes')
++    MMGC_CPPFLAGS += NSPR_INCLUDES + " "
++    APP_CPPFLAGS += NSPR_INCLUDES + " "
++
++    NSPR_LDOPTS = o.getStringArg('nspr-ldopts')
++    OS_LDFLAGS += " " + NSPR_LDOPTS
+ 
+ if config.COMPILER_IS_GCC:
+     APP_CXXFLAGS = "-fno-exceptions -Werror -Wall -Wno-reorder -Wno-switch -Wno-invalid-offsetof -Wno-uninitialized -Wno-strict-aliasing -fmessage-length=0 -finline-functions -finline-limit=65536 "
+@@ -178,6 +191,7 @@ config.subst("DEBUG_CXXFLAGS", DEBUG_CXX
+ config.subst("DEBUG_CXXFLAGS", DEBUG_CXXFLAGS)
+ config.subst("DEBUG_LDFLAGS", DEBUG_LDFLAGS)
+ config.subst("OS_LIBS", " ".join(OS_LIBS))
++config.subst("OS_LDFLAGS", OS_LDFLAGS)
+ config.subst("MMGC_CPPFLAGS", MMGC_CPPFLAGS)
+ config.subst("AVMSHELL_CPPFLAGS", AVMSHELL_CPPFLAGS)
+ config.subst("AVMSHELL_LDFLAGS", AVMSHELL_LDFLAGS)
+diff --git a/pcre/config.h b/pcre/config.h
+--- a/pcre/config.h
++++ b/pcre/config.h
+@@ -40,7 +40,9 @@ them both to 0; an emulation function wi
+ #define HAVE_LIMITS_H 1
+ 
+ /* Define to 1 if the system has the type `long long'. */
++#ifndef HAVE_LONG_LONG
+ #define HAVE_LONG_LONG 1
++#endif
+ 
+ /* Define to 1 if you have the `memmove' function. */
+ #define HAVE_MEMMOVE 1
+diff --git a/shell/avmshell.h b/shell/avmshell.h
+--- a/shell/avmshell.h
++++ b/shell/avmshell.h
+@@ -48,6 +48,12 @@
+ // #define AVMPLUS_INTERACTIVE
+ 
+ using namespace avmplus;
++
++// avmplus and NSPR both typedef some basic types: we must disambiguate
++using avmplus::uint64;
++using avmplus::uint32;
++using avmplus::uint16;
++using avmplus::uint8;
+ 
+ namespace avmshell
+ {
new file mode 100644
--- /dev/null
+++ b/mmgc-bit-checks
@@ -0,0 +1,259 @@
+diff -r af09bd95194a MMgc/GC.cpp
+--- a/MMgc/GC.cpp	Fri Aug 24 18:49:48 2007 -0400
++++ b/MMgc/GC.cpp	Fri Aug 24 18:52:02 2007 -0400
+@@ -2388,27 +2388,6 @@ bail:
+ 		collecting = false;
+ 		marking = false;
+ 	}
+-
+-	int GC::IsWhite(const void *item)
+-	{
+-		// back up to real beginning
+-		item = GetRealPointer((const void*) item);
+-
+-		// normalize and divide by 4K to get index
+-		if(!IsPointerToGCPage(item))
+-			return false;
+-		int bits = GetPageMapValue((uintptr)item);	
+-		switch(bits) {
+-		case 1:
+-			return GCAlloc::IsWhite(item);
+-		case 3:
+-			// FIXME: we only want pointers to the first page for large items, fix
+-			// this by marking the first page and subsequent pages of large items differently
+-			// in the page map (ie use 2).
+-			return GCLargeAlloc::IsWhite(item);
+-		}
+-		return false;
+-	}
+ 	
+ 	// TODO: fix headers so this can be declared there and inlined
+ 	void GC::WriteBarrierWrite(const void *address, const void *value)
+@@ -2510,11 +2489,6 @@ bail:
+ 	{
+ 		int bits = GetPageMapValue((uintptr)item);
+ 		return (bits != 0);
+-	}
+-
+-	bool GC::IsQueued(const void *item)
+-	{
+-		return !GetMark(item) && !IsWhite(item);
+ 	}
+ 
+ 	uint32 *GC::GetBits(int numBytes, int sizeClass)
+diff -r af09bd95194a MMgc/GC.h
+--- a/MMgc/GC.h	Fri Aug 24 18:49:48 2007 -0400
++++ b/MMgc/GC.h	Fri Aug 24 18:52:02 2007 -0400
+@@ -587,13 +587,31 @@ namespace MMgc
+ 
+ 		}
+ 
++#if defined(MEMORY_INFO)
++#define MMGC_CHECK_ITEM(item) \
++		do { \
++			GCAssert(item != NULL); \
++			GC *gc = GetGC(item); \
++			item = GetRealPointer(item); \
++			GCAssert(gc->IsPointerToGCPage(item)); \
++		} while (0)
++#elif defined(DEBUG)
++#define MMGC_CHECK_ITEM(item) \
++		do { \
++			GCAssert(item != NULL); \
++			item = GetRealPointer(item); \
++		} while (0)
++#else
++#define MMGC_CHECK_ITEM(item)
++#endif
++
+ 		/**
+ 		 * Tracers should employ GetMark and SetMark to
+ 		 * set the mark bits during the mark pass.
+ 		 */
+ 		static int GetMark(const void *item)
+ 		{
+-			item = GetRealPointer(item);
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				return GCLargeAlloc::GetMark(item);
+ 			} else {
+@@ -603,25 +621,27 @@ namespace MMgc
+ 
+ 		static int SetMark(const void *item)
+ 		{
+-			GCAssert(item != NULL);
+-#ifdef MEMORY_INFO
+-			GC *gc = GetGC(item);	
+-			item = GetRealPointer(item);
+-			GCAssert(gc->IsPointerToGCPage(item));
+-#endif 			
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				return GCLargeAlloc::SetMark(item);
+ 			} else {
+ 				return GCAlloc::SetMark(item);
+ 			}
+ 		}
+-		
+-		void SetQueued(const void *item)
+-		{
+-#ifdef MEMORY_INFO
+-			item = GetRealPointer(item);
+-			GCAssert(IsPointerToGCPage(item));
+-#endif 			
++
++		static bool IsQueued(const void *item)
++		{
++			MMGC_CHECK_ITEM(item);
++			if (GCLargeAlloc::IsLargeBlock(item)) {
++				return GCLargeAlloc::IsQueued(item);
++			} else {
++				return GCAlloc::IsQueued(item);
++			}
++		}
++
++		static void SetQueued(const void *item)
++		{
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				GCLargeAlloc::SetQueued(item);
+ 			} else {
+@@ -629,13 +649,22 @@ namespace MMgc
+ 			}
+ 		}
+ 
++		/**
++		 * True if the item is neither marked nor queued.
++		 */
++		static int IsWhite(const void *item)
++		{
++			MMGC_CHECK_ITEM(item);
++			if (GCLargeAlloc::IsLargeBlock(item)) {
++				return GCLargeAlloc::IsWhite(item);
++			} else {
++				return GCAlloc::IsWhite(item);
++			}
++		}
++
+ 		static void ClearFinalized(const void *item)
+ 		{
+-#ifdef MEMORY_INFO
+-			GC *gc = GetGC(item);	
+-			item = GetRealPointer(item);
+-			GCAssert(gc->IsPointerToGCPage(item));
+-#endif 			
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				GCLargeAlloc::ClearFinalized(item);
+ 			} else {
+@@ -645,11 +674,7 @@ namespace MMgc
+ 
+ 		static void SetFinalize(const void *item)
+ 		{
+-#ifdef MEMORY_INFO
+-			GC *gc = GetGC(item);	
+-			item = GetRealPointer(item);
+-			GCAssert(gc->IsPointerToGCPage(item));
+-#endif 			
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				GCLargeAlloc::SetFinalize(item);
+ 			} else {
+@@ -659,11 +684,7 @@ namespace MMgc
+ 
+ 		static int IsFinalized(const void *item)
+ 		{
+-#ifdef MEMORY_INFO
+-			GC *gc = GetGC(item);	
+-			item = GetRealPointer(item);
+-			GCAssert(gc->IsPointerToGCPage(item));
+-#endif 			
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				return GCLargeAlloc::IsFinalized(item);
+ 			} else {
+@@ -673,17 +694,15 @@ namespace MMgc
+ 
+ 		static int HasWeakRef(const void *item)
+ 		{
+-#ifdef MEMORY_INFO
+-			GC *gc = GetGC(item);	
+-			item = GetRealPointer(item);
+-			GCAssert(gc->IsPointerToGCPage(item));
+-#endif 			
++			MMGC_CHECK_ITEM(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+ 				return GCLargeAlloc::HasWeakRef(item);
+ 			} else {
+ 				return GCAlloc::HasWeakRef(item);
+ 			}
+ 		}
++
++#undef MMGC_CHECK_ITEM
+ 
+ 		/**
+ 		 * Utility function: Returns the GC object
+@@ -824,8 +843,6 @@ namespace MMgc
+ 
+ 		bool IsGCMemory (const void *);
+ 
+-		bool IsQueued(const void *item);
+-
+ 		static uint64 GetPerformanceCounter();
+ 		static uint64 GetPerformanceFrequency();
+ 		
+@@ -922,7 +939,6 @@ namespace MMgc
+ 		GCStack<GCWorkItem> m_incrementalWork;
+ 		void StartIncrementalMark();
+ 		void FinishIncrementalMark();
+-		int IsWhite(const void *item);
+ 		
+ 		uint64 lastMarkTicks;
+ 		uint64 lastSweepTicks;
+diff -r af09bd95194a MMgc/GCAlloc.h
+--- a/MMgc/GCAlloc.h	Fri Aug 24 18:49:48 2007 -0400
++++ b/MMgc/GCAlloc.h	Fri Aug 24 18:52:02 2007 -0400
+@@ -110,6 +110,13 @@ namespace MMgc
+ 			return SetBit(block, GetIndex(block, item), kQueued);
+ 		}
+ 		
++		static bool IsQueued(const void *item)
++		{
++			// Zero low 12 bits of address to get to the Block header
++			GCBlock *block = (GCBlock*) ((uintptr)item & ~0xFFF);
++			return GetBit(block, GetIndex(block, item), kQueued) != 0;
++		}
++
+ 		static int SetFinalize(const void *item)
+ 		{
+ 			// Zero low 12 bits of address to get to the Block header
+@@ -123,11 +130,8 @@ namespace MMgc
+ 			GCBlock *block = (GCBlock*) ((uintptr)item & ~0xFFF);
+ 
+ 			// not a real item
+-			if(item < block->items)
+-				return false;
+-
+-			if(FindBeginning(item) != item)
+-				return false;
++			GCAssert(item >= block->items);
++			GCAssert(FindBeginning(item) == item);
+ 
+ 			return IsWhite(block, GetIndex(block, item));
+ 		}
+diff -r af09bd95194a MMgc/GCLargeAlloc.h
+--- a/MMgc/GCLargeAlloc.h	Fri Aug 24 18:49:48 2007 -0400
++++ b/MMgc/GCLargeAlloc.h	Fri Aug 24 18:52:02 2007 -0400
+@@ -101,6 +101,12 @@ namespace MMgc
+ 			block->flags |= kQueuedFlag;
+ 		}
+ 
++		static bool IsQueued(const void *item)
++		{
++			LargeBlock *block = GetBlockHeader(item);
++			return (block->flags & kQueuedFlag) != 0;
++		}
++
+ 		static void SetFinalize(const void *item)
+ 		{
+ 			LargeBlock *block = GetBlockHeader(item);
new file mode 100644
--- /dev/null
+++ b/mmgc-decref-microfix
@@ -0,0 +1,12 @@
+diff --git a/MMgc/WriteBarrier.h b/MMgc/WriteBarrier.h
+--- a/MMgc/WriteBarrier.h
++++ b/MMgc/WriteBarrier.h
+@@ -147,7 +147,7 @@ namespace MMgc
+ 		~WriteBarrierRC() 
+ 		{
+ 			if(t != 0) {
+-				((RCObject*)t)->DecrementRef();
++				t->DecrementRef();
+ 				t=0;
+ 			}
+ 		}
new file mode 100644
--- /dev/null
+++ b/mmgc-graphviz
@@ -0,0 +1,333 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -280,6 +280,9 @@ namespace MMgc
+ #else
+ 		  disableThreadCheck(false)
+ #endif
++#ifdef MMGC_GRAPHVIZ
++		  , m_gvFile(NULL)
++#endif
+ 	{
+ 		// sanity check for all our types
+ 		GCAssert (sizeof(int8) == 1);
+@@ -352,6 +355,10 @@ namespace MMgc
+ 
+ 	GC::~GC()
+ 	{
++#ifdef MMGC_GRAPHVIZ
++		StopGraphing();
++#endif
++
+ 		// Force all objects to be destroyed
+ 		destroying = true;
+ 		ClearMarks();
+@@ -545,7 +552,19 @@ namespace MMgc
+ 				cb->enterExclusiveGCNoLock();
+ 		}
+ 
++#ifdef MMGC_GRAPHVIZ
++		if (m_gvFile) {
++			fprintf(m_gvFile, "digraph C%d {\n", sweeps);
++			fprintf(m_gvFile, "    node [shape=box];\n");
++		}
++#endif
++
+ 		CollectImpl();
++
++#ifdef MMGC_GRAPHVIZ
++		if (m_gvFile)
++			fprintf(m_gvFile, "}\n");
++#endif
+ 
+ #ifdef MMGC_THREADSAFE
+ 		m_lock.Acquire();
+@@ -619,6 +638,46 @@ namespace MMgc
+ #endif
+ 	}
+ 
++#define GRAPHVIZ_ROOT_STYLE "style=\"filled\", color=\"#ccddbb\""
++#define GRAPHVIZ_OBJECT_STYLE "style=\"filled\", color=\"#b3cc99\""
++#define GRAPHVIZ_CONSERVATIVE_EDGE_STYLE "color=\"#808080\""
++
++#ifdef MMGC_GRAPHVIZ
++	std::string Quote(const std::string &s)
++	{
++		// XXX BOGUS FIXME - should at least escape quotes and newlines
++		return '"' + s + '"';
++	}
++
++	void GC::GraphNode(const void *obj, const char *style)
++	{
++		if (m_gvFile != NULL && obj != NULL) {
++			if (IsGCMemory(obj)) {
++				std::string label;
++				if (IsFinalized(obj)) {
++					label = ((const GCFinalizedObject *) obj)->GetRepr();
++				} else {
++					label = typeid(*(const GCObject *) obj).name();
++				}
++				label = Quote(label);
++				fprintf(m_gvFile, "    P%p [label=%s, %s];\n",
++						obj, label.c_str(), style);
++			} else {
++				fprintf(m_gvFile, "    P%p [%s];\n", obj, style);
++			}
++		}
++	}
++
++	void GC::GraphEdge(const void *fromObj,
++					   const void *fromField,
++					   const void *toObj,
++					   const char *style)
++	{
++		if (m_gvFile)
++			fprintf(m_gvFile, "    P%p -> P%p [%s];\n", fromObj, toObj, style);
++	}
++#endif
++
+ 	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
+ 	{
+ 		ASSERT_EXCLUSIVE_GC();
+@@ -638,6 +697,16 @@ namespace MMgc
+ 			GCRoot *r = m_roots;
+ 			while(r) {
+ 				GCWorkItem item = r->GetWorkItem();
++#ifdef MMGC_GRAPHVIZ
++				if (m_gvFile) {
++					fprintf(m_gvFile,
++							"    P%p [label=\"root at %p\", %s];\n",
++							item.ptr,
++							item.ptr,
++							GRAPHVIZ_ROOT_STYLE);
++				}
++#endif
++
+ 				MarkItem(item, work);
+ 				r = r->next;
+ 			}
+@@ -2248,7 +2317,6 @@ bail:
+ #undef ALLOCA_AND_FILL_WITH_SPACES
+ #endif
+ 
+-
+ 	void GC::StartIncrementalMark()
+ 	{
+ 		GCAssert(!marking);
+@@ -2406,6 +2474,11 @@ bail:
+ 
+ 	void GC::MarkItem(GCWorkItem &wi, GCStack<GCWorkItem> &work)
+ 	{
++#ifdef MMGC_GRAPHVIZ
++		if (wi.IsGCItem())
++			GraphNode(wi.ptr, GRAPHVIZ_OBJECT_STYLE);
++#endif
++
+ 		size_t size = wi.GetSize();
+ 		uintptr *p = (uintptr*) wi.ptr;
+ 
+@@ -2486,6 +2559,9 @@ bail:
+ 				{
+ 					if(block->alloc->ContainsPointers())
+ 					{
++#ifdef MMGC_GRAPHVIZ
++						GraphEdge(wi.ptr, p, item, GRAPHVIZ_CONSERVATIVE_EDGE_STYLE);
++#endif
+ 						// try custom marking
+ 						if ((bits2 & (GCAlloc::kFinalize<<shift)) != 0
+ 							&& ((GCFinalizedObject *) GetUserPointer(item))->CustomMark())
+@@ -2545,6 +2621,9 @@ bail:
+ 						size_t usize = b->usableSize;
+ 						if((b->flags & GCLargeAlloc::kContainsPointers) != 0) 
+ 						{
++#ifdef MMGC_GRAPHVIZ
++							GraphEdge(wi.ptr, p, item, GRAPHVIZ_CONSERVATIVE_EDGE_STYLE);
++#endif
+ 							// try custom marking
+ 							if ((b->flags & GCLargeAlloc::kFinalizeFlag) != 0
+ 								&& ((GCFinalizedObject *) GetUserPointer(item))->CustomMark())
+@@ -3169,5 +3248,4 @@ bail:
+ 	}
+ #endif
+ 
+-
+ }
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -38,6 +38,10 @@
+ 
+ #ifndef __GC__
+ #define __GC__
++
++#ifdef MMGC_GRAPHVIZ
++#include <typeinfo>
++#endif
+ 
+ #if defined MMGC_IA32
+ 
+@@ -1515,8 +1519,8 @@ private:
+ #endif
+ #endif
+ 
+-public:
+ #ifdef MEMORY_INFO
++	public:
+ 		void DumpBackPointerChain(void *o);
+ 
+ 		// debugging routine that records who marked who, can be used to
+@@ -1526,6 +1530,7 @@ public:
+ 		static void WriteBackPointer(const void *item, const void *container, size_t itemSize);
+ #endif
+ #ifdef _DEBUG
++	public:
+ 		// Dump a list of objects that have pointers to the given location.
+ 		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);
+ 
+@@ -1539,8 +1544,9 @@ public:
+ #ifdef MMGC_THREADSAFE
+ 	public:
+ 		/**
+-		 * True if marking or sweeping is happening.  This implies that no
+-		 * threads in requests are running right now.
++		 * True if marking or sweeping is happening.  In an MMGC_THREADSAFE
++		 * build, this implies that no threads in requests are running right
++		 * now.
+ 		 *
+ 		 * Contrast Collecting().
+ 		 *
+@@ -1664,6 +1670,31 @@ public:
+ 		GCThread *m_exclusiveGCThread;
+ 
+ 		/**
++		 * This is notified whenever m_exclusiveGCThread becomes NULL.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCCondition m_condDone;
++
++		/**
++		 * The number of threads currently in active requests.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		int m_requestCount;
++
++		/**
++		 * This is notified whenever m_requestCount becomes zero.
++		 *
++		 * At most one thread is ever waiting on this condition at a time; if
++		 * a thread is waiting on it, that thread is `m_exclusiveGCThread` and
++		 * `m_gcRunning` is false.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCCondition m_condNoRequests;
++
++		/**
+ 		 * True if a thread is doing GC-related work, having already ensured
+ 		 * that no threads are in active requests.
+ 		 *
+@@ -1674,31 +1705,31 @@ public:
+ 		 * @access ReadWrite(request || exclusiveGC || m_lock, exclusiveGC && m_lock)
+ 		 */
+ 		bool m_gcRunning;
+-
+-		/**
+-		 * This is notified whenever m_exclusiveGCThread becomes NULL.
+-		 *
+-		 * @access Requires(m_lock)
+-		 */
+-		GCCondition m_condDone;
+-
+-		/**
+-		 * The number of threads currently in active requests.
+-		 *
+-		 * @access Requires(m_lock)
+-		 */
+-		int m_requestCount;
+-
+-		/**
+-		 * This is notified whenever m_requestCount becomes zero.
+-		 *
+-		 * At most one thread is ever waiting on this condition at a time; if
+-		 * a thread is waiting on it, that thread is `m_exclusiveGCThread` and
+-		 * `m_gcRunning` is false.
+-		 *
+-		 * @access Requires(m_lock)
+-		 */
+-		GCCondition m_condNoRequests;
++#endif
++
++#ifdef MMGC_GRAPHVIZ
++	public:
++		void StartGraphing(const char *filename)
++		{
++			StopGraphing();
++			m_gvFile = fopen(filename, "w");
++		}
++
++		void StopGraphing()
++		{
++			if (m_gvFile)
++				fclose(m_gvFile);
++		}
++
++		void GraphNode(const void *obj, const char *style);
++
++		void GraphEdge(const void *fromObj,
++					   const void *fromField,
++					   const void *toObj,
++					   const char *style);
++
++	private:
++		FILE *m_gvFile;
+ #endif
+ 	};
+ 
+diff --git a/MMgc/GCObject.cpp b/MMgc/GCObject.cpp
+--- a/MMgc/GCObject.cpp
++++ b/MMgc/GCObject.cpp
+@@ -60,6 +60,19 @@ namespace MMgc
+ 		GC::GetGC(gcObject)->Free(gcObject);
+ 	}		
+ 
++	std::string GCFinalizedObject::GetRepr() const
++	{
++		std::string s;
++		char buf[60];
++		size_t n;
++
++		n = snprintf(buf, 60, "%s at %p",
++							typeid(*this).name(),
++							dynamic_cast<void *>(this));
++		s.assign(buf, n);
++		return s;
++	}
++
+ 	void* GCFinalizedObjectOptIn::operator new(size_t size, GC *gc, size_t extra)
+ 	{
+ 		return gc->Alloc(size + extra, GC::kContainsPointers|GC::kZero, 4);
+diff --git a/MMgc/GCObject.h b/MMgc/GCObject.h
+--- a/MMgc/GCObject.h
++++ b/MMgc/GCObject.h
+@@ -136,6 +136,8 @@ namespace MMgc
+ 		 */
+ 		virtual bool CustomMark() { return false; }
+ 
++		virtual std::string GetRepr() const;
++
+ 		static void *operator new(size_t size, GC *gc, size_t extra = 0);
+ 		static void operator delete (void *gcObject);
+ 	};
+diff --git a/MMgc/macbuild.h b/MMgc/macbuild.h
+--- a/MMgc/macbuild.h
++++ b/MMgc/macbuild.h
+@@ -66,6 +66,7 @@
+  */
+ #ifdef DEBUG
+ #define MEMORY_INFO
++#define MMGC_GRAPHVIZ
+ #endif
+ 
+ /**
new file mode 100644
--- /dev/null
+++ b/mmgc-remove-precollect-hook
@@ -0,0 +1,43 @@
+diff -r 7b346c66ebc4 MMgc/GC.cpp
+--- a/MMgc/GC.cpp	Wed Aug 08 11:51:24 2007 -0400
++++ b/MMgc/GC.cpp	Wed Aug 08 11:51:30 2007 -0400
+@@ -383,16 +383,7 @@ namespace MMgc
+ 
+ 	void GC::Collect()
+ 	{
+-		// invoke precollect callback
+-		bool vetoed = false;
+-		GCCallback *cb = m_callbacks;
+-		while (cb) {
+-			if (!cb->precollect())
+-				vetoed = true;
+-			cb = cb->nextCB;
+-		}
+-
+-		if (vetoed || nogc || collecting) {
++		if (nogc || collecting) {
+ 
+ 			return;
+ 		}
+diff -r 7b346c66ebc4 MMgc/GC.h
+--- a/MMgc/GC.h	Wed Aug 08 11:51:24 2007 -0400
++++ b/MMgc/GC.h	Wed Aug 08 11:51:30 2007 -0400
+@@ -208,18 +208,6 @@ namespace MMgc
+ 		GC *GetGC() const { return gc; }
+ 		/** if your object goes away after the GC is deleted this can be useful */
+ 		void Destroy();
+-
+-		/**
+-		 * This method is called from GC::Collect(), before
+-		 * anything else, even in cases where it's logically
+-		 * certain that collection <em>will not start</em>
+-		 * (e.g. because we're already collecting, or GC is
+-		 * disabled).
+-		 *
+-		 * Returns true to allow collection to happen, false
+-		 * to veto.
+-		 */
+-		virtual bool precollect() { return true; }
+ 
+ 		/**
+ 		 * This method is invoked during the last mark phase
new file mode 100644
--- /dev/null
+++ b/mmgc-rm-isd
@@ -0,0 +1,15 @@
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -530,11 +530,6 @@ namespace MMgc
+ 		bool validateDefRef;		
+ 		bool keepDRCHistory;
+ 
+-		/**
+-		 * incremental space divisor
+-		 */
+-		int ISD;
+-
+ 		size_t collectThreshold;
+ 
+ 		bool gcstats;
new file mode 100644
--- /dev/null
+++ b/mmgc-threadsafe-take2
@@ -0,0 +1,2349 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -198,8 +198,23 @@ namespace MMgc
+ 	};
+ 	const size_t kLargestAlloc = 1968;
+ 
++#ifdef MMGC_THREADSAFE
++#define USING_CALLBACK_LIST(gc)  GCAcquireSpinlock _cblock((gc)->m_callbackListLock)
++#define USING_PAGE_MAP()         GCAcquireSpinlock _pmlock(pageMapLock)
++#define ASSERT_GC_LOCK()         GCAssert(m_lock.IsHeld() || destroying)
++#define ASSERT_EXCLUSIVE_GC() \
++    GCAssert((m_gcRunning \
++			  && m_exclusiveGCThread == GCThread::GetCurrentThread()) \
++             || destroying)
++#else
++#define USING_CALLBACK_LIST(gc)  ((gc)->CheckThread())
++#define USING_PAGE_MAP()         ((void) 0)
++#define ASSERT_GC_LOCK()         ((void) 0)
++#define ASSERT_EXCLUSIVE_GC()    ((void) 0)
++#endif
++
+ 	GC::GC(GCHeap *gcheap)
+-		: disableThreadCheck(false),
++		:
+ #ifdef MMGC_DRC
+ 		  zct(gcheap),
+ #endif
+@@ -255,8 +270,17 @@ namespace MMgc
+ 		  heapSizeAtLastAlloc(gcheap->GetTotalHeapSize()),
+ 		  finalizedValue(true),
+ 		  // Expand, don't collect, until we hit this threshold
+-		  collectThreshold(256)
+-	{		
++		  collectThreshold(256),
++#if MMGC_THREADSAFE
++		  m_exclusiveGCThread(NULL),
++		  m_gcRunning(false),
++		  m_condDone(m_lock),
++		  m_requestCount(0),
++		  m_condNoRequests(m_lock)
++#else
++		  disableThreadCheck(false)
++#endif
++	{
+ 		// sanity check for all our types
+ 		GCAssert (sizeof(int8) == 1);
+ 		GCAssert (sizeof(uint8) == 1);		
+@@ -372,7 +396,9 @@ namespace MMgc
+ 
+ 		heap->Free(pageMap);
+ 
++#ifndef MMGC_THREADSAFE
+ 		CheckThread();
++#endif
+ 
+ 		GCAssert(!m_roots);
+ 		GCAssert(!m_callbacks);
+@@ -380,15 +406,43 @@ namespace MMgc
+ 
+ 	void GC::Collect()
+ 	{
++		CollectWithBookkeeping(false, false);
++	}
++
++	void GC::CollectWithBookkeeping(bool callerHoldsLock,
++									bool callerHasActiveRequest)
++	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(callerHoldsLock == m_lock.IsHeld());
++#ifdef _DEBUG
++		GCAssert(callerHasActiveRequest == GCThread::GetCurrentThread()->IsInActiveRequest());
++#endif
++#else
++		CheckThread();
++#endif
++
+ 		// invoke precollect callback
+ 		bool vetoed = false;
+-		GCCallback *cb = m_callbacks;
+-		while (cb) {
+-			if (!cb->precollect())
+-				vetoed = true;
+-			cb = cb->nextCB;
+-		}
+-
++		{
++			USING_CALLBACK_LIST(this);
++			GCCallback *cb = m_callbacks;
++			while (cb) {
++				if (!cb->precollect())
++					vetoed = true;
++				cb = cb->nextCB;
++			}
++		}
++
++#ifdef MMGC_THREADSAFE
++		if (!callerHoldsLock)
++			m_lock.Acquire();
++
++		if (vetoed || nogc || m_gcRunning || zct.reaping) {
++			if (!callerHoldsLock)
++				m_lock.Release();
++			return;
++		}
++#else
+ 		if (vetoed || nogc || collecting) {
+ 
+ 			return;
+@@ -400,8 +454,136 @@ namespace MMgc
+ 
+ 			return;
+ 		}
+-
++#endif
++
++#ifdef MMGC_THREADSAFE
++		GCThread *thisThread = GCThread::GetCurrentThread();
++		if (m_exclusiveGCThread != NULL) {
++			// Someone is already collecting or waiting to collect.
++			//
++			// If it's the current thread, then we're being called
++			// recursively.  This maybe shouldn't happen, but Collect can be
++			// called indirectly from a finalizer.  Don't recurse, just ignore
++			// it.
++			//
++			// If it's some other thread, wait for that thread to finish.
++			//
++			if (m_exclusiveGCThread != thisThread) {
++				GCRoot *stackRoot;
++
++				// Before waiting, this thread must suspend any active
++				// requests.  This avoids a deadlock where m_exclusiveGCThread
++				// is waiting for m_requestCount to go to zero while we're
++				// waiting for m_exclusiveGCThread to be done.
++				//
++				// When we do this, we root the calling thread's stack; this
++				// way each collection scans the stack of every thread that is
++				// in a suspended request.
++				//
++				if (callerHasActiveRequest) {
++					void *stack;
++					size_t stackSize;
++
++					MMGC_GET_STACK_EXENTS(this, stack, stackSize);
++					stackRoot = new GCRoot(this, stack, stackSize);
++
++					OnLeaveRequestAlreadyLocked();
++				}
++
++				while (m_exclusiveGCThread != NULL)
++					m_condDone.Wait();
++
++				if (callerHasActiveRequest) {
++					// Resume previously suspended request.
++					delete stackRoot;
++					OnEnterRequestAlreadyLocked();
++				}
++			}
++
++			if (!callerHoldsLock)
++				m_lock.Release();
++			return;
++		}
++
++		// If we get here, this thread will collect.
++		m_exclusiveGCThread = thisThread;
++		if (callerHasActiveRequest) {
++			// We must suspend any active requests on this thread.
++			OnLeaveRequestAlreadyLocked();
++		}
++		// Wait for other threads to suspend or end their active requests,
++		// if any.
++		while (m_requestCount > 0)
++			m_condNoRequests.Wait();
++
++		// These should not have changed while we were waiting, even though we
++		// released the lock.
++		GCAssert(m_requestCount == 0);
++		GCAssert(m_exclusiveGCThread == thisThread);
++
++		// This thread has acquired exclusiveGC.
++		m_gcRunning = true;
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->enterExclusiveGC();
++		}
++
++#ifdef MMGC_THREADSAFE
++		m_lock.Release();
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->enterExclusiveGCNoLock();
++		}
++
++		CollectImpl();
++
++#ifdef MMGC_THREADSAFE
++		m_lock.Acquire();
++
++		// These should not have changed during collection,
++		// even though we released the lock.
++		GCAssert(m_requestCount == 0);
++		GCAssert(m_exclusiveGCThread == thisThread);
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->leaveExclusiveGC();
++		}
++
++#ifdef MMGC_THREADSAFE
++		// This thread is relinquishing exclusiveGC.
++		m_gcRunning = false;
++
++		m_exclusiveGCThread = NULL;
++		if (callerHasActiveRequest)
++			OnEnterRequestAlreadyLocked();
++
++		m_condDone.NotifyAll();
++
++		if (!callerHoldsLock)
++			m_lock.Release();
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->postcollect();
++		}
++	}
++
++	void GC::CollectImpl()
++	{
++#ifndef MMGC_THREADSAFE
+ 		ReapZCT();
++#endif
+ 
+ 		// if we're in the middle of an incremental mark kill it
+ 		// FIXME: we should just push it to completion 
+@@ -428,8 +610,6 @@ namespace MMgc
+ 		FindUnmarkedPointers();
+ #endif
+ 
+-		CheckThread();
+-
+ #ifdef DEBUGGER
+ 		StopGCActivity();
+ #endif
+@@ -437,6 +617,8 @@ namespace MMgc
+ 
+ 	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		SAMPLE_FRAME("[mark]", core());
+ 
+ 		// Clear all mark bits.
+@@ -460,8 +642,11 @@ namespace MMgc
+ 		SAMPLE_CHECK();
+ 
+ 		// invoke lastmark on all callbacks
+-		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
+-			cb->lastmark(work);
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->lastmark(work);
++		}
+ 
+ 		if(stackStart == NULL) {
+ 			MarkQueueAndStack(work);
+@@ -476,6 +661,17 @@ namespace MMgc
+ 
+ 	void *GC::Alloc(size_t size, int flags/*0*/, int skip/*3*/)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAutoLock _lock(m_lock);
++		GCAssert(!m_gcRunning);
++#else
++		CheckThread();
++#endif
++		return AllocAlreadyLocked(size, flags, skip);
++	}
++
++	void *GC::AllocAlreadyLocked(size_t size, int flags/*0*/, int skip/*3*/)
++	{
+ #ifdef DEBUGGER
+ 		avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
+ 		if(core)
+@@ -487,9 +683,8 @@ namespace MMgc
+ 
+ #ifdef _DEBUG
+ 		GCAssert(size + 7 > size);
+-		CheckThread();
+ 		if (GC::greedy) {
+-			Collect();
++			CollectWithBookkeeping(true, true);
+ 		}
+ 		// always be marking in pedantic mode
+ 		if(incrementalValidationPedantic) {
+@@ -572,15 +767,21 @@ namespace MMgc
+ 
+ 	void GC::Free(void *item)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAutoLock _lock(m_lock);
++		GCAssert(!m_gcRunning);
++#else
+ 		CheckThread();
++#endif
++
++		if(item == NULL) {
++			return;
++		}
++
++		bool isLarge;
++
+ 		// we can't allow free'ing something during Sweeping, otherwise alloc counters
+ 		// get decremented twice and destructors will be called twice.
+-		if(item == NULL) {
+-			return;
+-		}
+-
+-		bool isLarge;
+-
+ 		if(collecting) {
+ 			goto bail;
+ 		}
+@@ -629,6 +830,8 @@ bail:
+ 
+ 	void GC::ClearMarks()
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		for (int i=0; i < kNumSizeClasses; i++) {
+ #ifdef MMGC_DRC
+ 			containsPointersRCAllocs[i]->ClearMarks();
+@@ -641,6 +844,8 @@ bail:
+ 
+ 	void GC::Finalize()
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		for(int i=kNumSizeClasses; i-- > 0;) {
+ #ifdef MMGC_DRC
+ 			containsPointersRCAllocs[i]->Finalize();
+@@ -662,7 +867,9 @@ bail:
+ 	}
+ 
+ 	void GC::Sweep(bool force)
+-	{	
++	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		SAMPLE_FRAME("[sweep]", core());
+ 		sweeps++;
+ 
+@@ -681,10 +888,13 @@ bail:
+ 		collecting = true;
+ 
+ 		// invoke presweep on all callbacks
+-		GCCallback *cb = m_callbacks;
+-		while(cb) {
+-			cb->presweep();
+-			cb = cb->nextCB;
++		{
++			USING_CALLBACK_LIST(this);
++			GCCallback *cb = m_callbacks;
++			while(cb) {
++				cb->presweep();
++				cb = cb->nextCB;
++			}
+ 		}
+ 
+ 		SAMPLE_CHECK();
+@@ -737,12 +947,15 @@ bail:
+ 		collecting = false;
+ 
+ 		// invoke postsweep callback
+-		cb = m_callbacks;
+-		while(cb) {
+-			cb->postsweep();
+-			cb = cb->nextCB;
+-		}
+-		
++		{
++			USING_CALLBACK_LIST(this);
++			GCCallback *cb = m_callbacks;
++			while(cb) {
++				cb->postsweep();
++				cb = cb->nextCB;
++			}
++		}
++
+ 		SAMPLE_CHECK();
+ 
+ 		allocsSinceCollect = 0;
+@@ -779,6 +992,7 @@ bail:
+ 
+ 	void* GC::AllocBlock(int size, int pageType, bool zero)
+ 	{
++		ASSERT_GC_LOCK();
+ #ifdef DEBUGGER
+ 		AllocActivity(size);
+ #endif
+@@ -796,7 +1010,7 @@ bail:
+ 			if(incremental)
+ 				StartIncrementalMark();
+ 			else
+-				Collect();
++				CollectWithBookkeeping(true, true);
+ 		}
+ 
+ 		void *item;
+@@ -837,6 +1051,8 @@ bail:
+ 
+ 	void* GC::AllocBlockIncremental(int size, bool zero)
+ 	{
++		ASSERT_GC_LOCK();
++
+ 		if(!nogc && !collecting) {
+ 			uint64 now = GetPerformanceCounter();
+ 			if (marking) {		
+@@ -864,12 +1080,14 @@ bail:
+ 
+ 	void* GC::AllocBlockNonIncremental(int size, bool zero)
+ 	{
++		ASSERT_GC_LOCK();
++
+ 		void *item = heap->Alloc(size, false, zero);
+ 		if (!item) {
+ 			if (heap->GetTotalHeapSize() >= collectThreshold &&
+ 				allocsSinceCollect >= totalGCPages / kFreeSpaceDivisor) 
+ 			{
+-				Collect();
++				CollectWithBookkeeping(true, true);
+ 				item = heap->Alloc(size, false, zero);
+ 			}
+ 		}
+@@ -878,6 +1096,11 @@ bail:
+ 
+ 	void GC::FreeBlock(void *ptr, uint32 size)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_lock.IsHeld() || destroying
++				 || (m_gcRunning
++					 && m_exclusiveGCThread == GCThread::GetCurrentThread()));
++#endif
+ #ifdef DEBUGGER
+ 		AllocActivity(- (int)size);
+ #endif
+@@ -905,6 +1128,7 @@ bail:
+ 
+ 	void GC::Mark(GCStack<GCWorkItem> &work)
+ 	{
++		ASSERT_EXCLUSIVE_GC();
+ 		while(work.Count()) {
+ 			MarkItem(work);
+ 		}
+@@ -912,6 +1136,7 @@ bail:
+ 
+ 	void GC::MarkGCPages(void *item, uint32 numPages, int to)
+ 	{
++		USING_PAGE_MAP();
+ 		uintptr addr = (uintptr)item;
+ 		uint32 shiftAmount=0;
+ 		unsigned char *dst = pageMap;
+@@ -954,7 +1179,7 @@ bail:
+ 		addr = (uintptr)item;
+ 		while(numPages--)
+ 		{
+-			GCAssert(GetPageMapValue(addr) == 0);
++			GCAssert(GetPageMapValueAlreadyLocked(addr) == 0);
+ 			SetPageMapValue(addr, to);
+ 			addr += GCHeap::kBlockSize;
+ 		}
+@@ -963,6 +1188,8 @@ bail:
+ 	void GC::UnmarkGCPages(void *item, uint32 numpages)
+ 	{
+ 		uintptr addr = (uintptr) item;
++
++		USING_PAGE_MAP();
+ 		while(numpages--)
+ 		{
+ 			ClearPageMapValue(addr);
+@@ -1128,6 +1355,7 @@ bail:
+ 		gc->Mark(work);
+ 	}
+ 
++#ifndef MMGC_THREADSAFE
+ 	void GC::CheckThread()
+ 	{
+ #ifdef _DEBUG
+@@ -1136,12 +1364,13 @@ bail:
+ #endif
+ #endif
+ 	}
+-
++#endif
+ 
+ 	bool GC::IsPointerToGCPage(const void *item)
+ 	{
++		USING_PAGE_MAP();
+ 		if((uintptr)item >= memStart && (uintptr)item < memEnd)
+-			return GetPageMapValue((uintptr) item) != 0;
++			return GetPageMapValueAlreadyLocked((uintptr) item) != 0;
+ 		return false;
+ 	}
+ 
+@@ -1182,10 +1411,13 @@ bail:
+ 		// note if we add things while reaping we could delete the object
+ 		// here if we had a way to monitor our stack usage
+ 		if(reaping && PLENTY_OF_STACK()) {
+-			GCCallback *cb = gc->m_callbacks;
+-			while(cb) {
+-				cb->prereap(obj);
+-				cb = cb->nextCB;
++			{
++				USING_CALLBACK_LIST(gc);
++				GCCallback *cb = gc->m_callbacks;
++				while(cb) {
++					cb->prereap(obj);
++					cb = cb->nextCB;
++				}
+ 			}
+ 			if(gc->IsFinalized(obj))
+ 				((GCFinalizedObject*)obj)->~GCFinalizedObject();
+@@ -1376,10 +1608,13 @@ bail:
+ 		nextPinnedIndex = 0;
+ 
+ 		// invoke prereap on all callbacks
+-		GCCallback *cb = gc->m_callbacks;
+-		while(cb) {
+-			cb->prereap();
+-			cb = cb->nextCB;
++		{
++			USING_CALLBACK_LIST(gc);
++			GCCallback *cb = gc->m_callbacks;
++			while(cb) {
++				cb->prereap();
++				cb = cb->nextCB;
++			}
+ 		}
+ 
+ #ifdef _DEBUG
+@@ -1420,10 +1655,13 @@ bail:
+ 				}
+ #endif
+ 				// invoke prereap on all callbacks
+-				GCCallback *cbb = gc->m_callbacks;
+-				while(cbb) {
+-					cbb->prereap(rcobj);
+-					cbb = cbb->nextCB;
++				{
++					USING_CALLBACK_LIST(gc);
++					GCCallback *cbb = gc->m_callbacks;
++					while(cbb) {
++						cbb->prereap(rcobj);
++						cbb = cbb->nextCB;
++					}
+ 				}
+ 
+ 				GCAssert(*(int*)rcobj != 0);
+@@ -1457,10 +1695,13 @@ bail:
+ 		zctIndex = nextPinnedIndex = 0;
+ 
+ 		// invoke postreap on all callbacks
+-		cb = gc->m_callbacks;
+-		while(cb) {
+-			cb->postreap();
+-			cb = cb->nextCB;
++		{
++			USING_CALLBACK_LIST(gc);
++			GCCallback *cb = gc->m_callbacks;
++			while(cb) {
++				cb->postreap();
++				cb = cb->nextCB;
++			}
+ 		}
+ #ifdef DEBUGGER
+ 		if(gc->gcstats && numObjects) {
+@@ -1607,7 +1848,8 @@ bail:
+ 		va_end(argptr);
+ 
+ 		GCAssert(strlen(buf) < 4096);
+-			
++
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->log(buf);
+@@ -1659,23 +1901,27 @@ bail:
+ 
+ 	bool GC::IsRCObject(const void *item)
+ 	{
+-		if((uintptr)item >= memStart && (uintptr)item < memEnd && ((uintptr)item&0xfff) != 0)
+-		{
+-			int bits = GetPageMapValue((uintptr)item);		
+-			item = GetRealPointer(item);
+-			switch(bits)
+-			{
+-			case kGCAllocPage:
+-				if((char*)item < ((GCAlloc::GCBlock*)((uintptr)item&~0xfff))->items)
+-					return false;
+-				return GCAlloc::IsRCObject(item);
+-			case kGCLargeAllocPageFirst:
+-				return GCLargeAlloc::IsRCObject(item);
+-			default:
++		int bits;
++		{
++			USING_PAGE_MAP();
++
++			if ((uintptr)item < memStart || (uintptr)item >= memEnd || ((uintptr)item&0xfff) == 0)
+ 				return false;
+-			}
+-		}
+-		return false;
++			bits = GetPageMapValueAlreadyLocked((uintptr)item);
++		}
++
++		item = GetRealPointer(item);
++		switch(bits)
++		{
++		case kGCAllocPage:
++			if((char*)item < ((GCAlloc::GCBlock*)((uintptr)item&~0xfff))->items)
++				return false;
++			return GCAlloc::IsRCObject(item);
++		case kGCLargeAllocPageFirst:
++			return GCLargeAlloc::IsRCObject(item);
++		default:
++			return false;
++		}
+ 	}
+ 
+ #endif // MMGC_DRC
+@@ -1761,7 +2007,7 @@ bail:
+ 				continue;
+ 			
+ 			// normalize and divide by 4K to get index
+-			int bits = GetPageMapValue(val);
++			int bits = GetPageMapValueAlreadyLocked(val);
+ 			switch(bits)
+ 			{
+ 			case 0:
+@@ -1782,6 +2028,8 @@ bail:
+ 
+ 	void GC::FindUnmarkedPointers()
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		if(findUnmarkedPointers)
+ 		{
+ 			uintptr m = memStart;
+@@ -1920,6 +2168,11 @@ bail:
+ 	 */
+ 	void GC::WhosPointingAtMe(void* me, int recurseDepth, int currentDepth)
+ 	{
++#ifdef MMGC_THREADSAFE
++		if (currentDepth == 0)
++			pageMapLock.Acquire();
++#endif
++
+ 		uintptr val = (uintptr)me;
+ 		uintptr m = memStart;
+ 
+@@ -1942,7 +2195,7 @@ bail:
+ #endif
+ 
+ 			// divide by 4K to get index
+-			int bits = GetPageMapValue(m);
++			int bits = GetPageMapValueAlreadyLocked(m);
+ 			if(bits == kNonGC) 
+ 			{
+ 				ProbeForMatch((const void*)m, GCHeap::kBlockSize, val, recurseDepth, currentDepth);
+@@ -1986,6 +2239,11 @@ bail:
+ 			
+ 			}
+ 		}
++
++#ifdef MMGC_THREADSAFE
++		if (currentDepth == 0)
++			pageMapLock.Release();
++#endif
+ 	}
+ #undef ALLOCA_AND_FILL_WITH_SPACES
+ #endif
+@@ -2354,9 +2612,11 @@ bail:
+ 		return 1000000;
+ 		#endif
+ 	}
+-	
++
+ 	void GC::IncrementalMark(uint32 time)
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		SAMPLE_FRAME("[mark]", core());
+ 		if(m_incrementalWork.Count() == 0 || hitZeroObjects) {
+ 			FinishIncrementalMark();
+@@ -2411,6 +2671,8 @@ bail:
+ 
+ 	void GC::FinishIncrementalMark()
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		// Don't finish an incremental mark (i.e., sweep) if we
+ 		// are in the midst of a ZCT reap.
+ 		if (zct.reaping)
+@@ -2442,8 +2704,11 @@ bail:
+ 		}
+ 
+ 		// invoke lastmark on all callbacks
+-		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
+-			cb->lastmark(m_incrementalWork);
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->lastmark(m_incrementalWork);
++		}
+ 
+ 		MarkQueueAndStack(m_incrementalWork);
+ 
+@@ -2594,6 +2859,7 @@ bail:
+ 	{
+ 		uint32 *bits;
+ 
++		ASSERT_GC_LOCK();
+ 		GCAssert(numBytes % 4 == 0);
+ 
+ 		#ifdef MMGC_64BIT // we use first 8-byte slot for the free list
+@@ -2665,7 +2931,8 @@ bail:
+ 
+ 	void GC::AddCallback(GCCallback *cb)
+ 	{
+-		CheckThread();
++		USING_CALLBACK_LIST(this);
++
+ 		cb->prevCB = NULL;
+ 		cb->nextCB = m_callbacks;
+ 		if(m_callbacks)
+@@ -2675,7 +2942,8 @@ bail:
+ 
+ 	void GC::RemoveCallback(GCCallback *cb)
+ 	{
+-		CheckThread();
++		USING_CALLBACK_LIST(this);
++
+ 		if( m_callbacks == cb )
+ 			m_callbacks = cb->nextCB;
+ 		else
+@@ -2687,7 +2955,8 @@ bail:
+ 
+ 	void GC::AddEdgeCallback(GCEdgeCallback *cb)
+ 	{
+-		CheckThread();
++		USING_CALLBACK_LIST(this);
++
+ 		cb->prevCB = NULL;
+ 		cb->nextCB = m_edgeCallbacks;
+ 		if(m_edgeCallbacks)
+@@ -2697,7 +2966,8 @@ bail:
+ 
+ 	void GC::RemoveEdgeCallback(GCEdgeCallback *cb)
+ 	{
+-		CheckThread();
++		USING_CALLBACK_LIST(this);
++
+ 		if( m_edgeCallbacks == cb )
+ 			m_edgeCallbacks = cb->nextCB;
+ 		else
+@@ -2709,12 +2979,12 @@ bail:
+ 
+ 	void GC::FireFoundEdgeTo(const void *p)
+ 	{
++		// Don't acquire the spinlock here because (a) that would really hurt
++		// performance; (b) the m_edgeCallbacks list, unlike the m_callbacks
++		// list, is protected by the request model.
+ 		p = GetUserPointer(p);
+-		GCEdgeCallback *cb = m_edgeCallbacks;
+-		while(cb) {
++		for (GCEdgeCallback *cb = m_edgeCallbacks; cb; cb = cb->nextCB)
+ 			cb->foundEdgeTo(p);
+-			cb = cb->nextCB;
+-		}
+ 	}
+ 
+ 	void GC::PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item)
+@@ -2727,9 +2997,18 @@ bail:
+ 	GCWeakRef* GC::GetWeakRef(const void *item) 
+ 	{
+ 		GC *gc = GetGC(item);
++#ifdef MMGC_THREADSAFE
++		GCAutoLock _lock(gc->m_lock);
++#endif
+ 		GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(item);
++
+ 		if(ref == NULL) {
++#ifdef MMGC_THREADSAFE
++			void *p = gc->AllocAlreadyLocked(sizeof(GCWeakRef), GC::kFinalize, 4);
++			ref = new (p) GCWeakRef(item);
++#else
+ 			ref = new (gc) GCWeakRef(item);
++#endif
+ 			gc->weakRefs.put(item, ref);
+ 			item = GetRealPointer(item);
+ 			if (GCLargeAlloc::IsLargeBlock(item)) {
+@@ -2789,6 +3068,8 @@ bail:
+ 
+ 	void GC::FindMissingWriteBarriers()
+ 	{
++		ASSERT_EXCLUSIVE_GC();
++
+ 		if(!incrementalValidation)
+ 			return;
+ 
+@@ -2850,6 +3131,7 @@ bail:
+ 	void GC::StartGCActivity()
+ 	{
+ 		// invoke postsweep callback
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->startGCActivity();
+@@ -2860,6 +3142,7 @@ bail:
+ 	void GC::StopGCActivity()
+ 	{
+ 		// invoke postsweep callback
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->stopGCActivity();
+@@ -2870,6 +3153,7 @@ bail:
+ 	void GC::AllocActivity(int blocks)
+ 	{
+ 		// invoke postsweep callback
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->allocActivity(blocks);
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -195,7 +195,10 @@ namespace MMgc
+ 	private:
+ 		FixedMalloc* fm;
+ 		GC * gc;
++
++		/** @access Requires(gc->m_rootListLock) */
+ 		GCRoot *next;
++		/** @access Requires(gc->m_rootListLock) */
+ 		GCRoot *prev;
+ 		const void *object;
+ 		size_t size;
+@@ -220,16 +223,25 @@ namespace MMgc
+ 		void Destroy();
+ 
+ 		/**
+-		 * This method is called from GC::Collect(), before
+-		 * anything else, even in cases where it's logically
+-		 * certain that collection <em>will not start</em>
+-		 * (e.g. because we're already collecting, or GC is
++		 * GC::Collect() fires this callback before anything else, even in
++		 * cases where it's logically certain that collection <em>will not
++		 * start</em> (e.g. because we're already collecting, or GC is
+ 		 * disabled).
++		 *
++		 * In an MMGC_THREADSAFE build, it is unspecified whether the calling
++		 * thread holds the gc-wide lock when this callback is called.
++		 * Multiple threads may fire this callback concurrently.
+ 		 *
+ 		 * Returns true to allow collection to happen, false
+ 		 * to veto.
+ 		 */
+ 		virtual bool precollect() { return true; }
++
++		/**
++		 * GC::Collect() fires this callback after everything else,
++		 * only if collection actually happened in this thread.
++		 */
++		virtual void postcollect() {}
+ 
+ 		/**
+ 		 * This method is invoked during the last mark phase
+@@ -241,6 +253,8 @@ namespace MMgc
+ 		 * provided by the GCCallback base class.  The
+ 		 * corresponding methods of <code>class GC</code> are
+ 		 * private.)
++		 *
++		 * @access Requires(gc->exclusiveGC)
+ 		 */
+ 		virtual void lastmark(GCStack<GCWorkItem> &) {}
+ 
+@@ -248,11 +262,15 @@ namespace MMgc
+ 		 * This method is invoked after all marking and before any
+ 		 * sweeping, useful for bookkeeping based on whether things
+ 		 * got marked
++		 *
++		 * @access Requires(gc->exclusiveGC)
+ 		 */
+ 		virtual void presweep() {}
+ 
+ 		/**
+ 		 * This method is invoked after all sweeping
++		 *
++		 * @access Requires(gc->exclusiveGC)
+ 		 */
+ 		virtual void postsweep() {}
+ 
+@@ -268,6 +286,43 @@ namespace MMgc
+ 		// called after a ZCT reap completes
+ 		virtual void postreap() {}
+ #endif
++
++		/**
++		 * This callback is the first thing a collecting thread does once it
++		 * acquires exclusiveGC status.  When this is called, no other threads
++		 * are running in requests and the calling thread holds gc->m_lock.
++		 *
++		 * Although this makes the most sense in an MMGC_THREADSAFE build, the
++		 * callback is nonetheless called at the same point during collection
++		 * even in non-MMGC_THREADSAFE builds.  This is so programs that build
++		 * in both thread-safe and non-thread-safe configurations will see the
++		 * same behavior in both.  The same idea applies to
++		 * enterExclusiveGCNoLock and leaveExclusiveGC.
++		 *
++		 * @access Requires(gc->exclusiveGC && gc->m_lock)
++		 */
++		virtual void enterExclusiveGC() {}
++
++		/**
++		 * This callback happens after enterExclusiveGC.  The only difference
++		 * is that this is called after the GC has released gc->m_lock.
++		 *
++		 * See note at GCCallback::enterExclusiveGC().
++		 *
++		 * @access Requires(gc->exclusiveGC && !gc->m_lock)
++		 */
++		virtual void enterExclusiveGCNoLock() {}
++
++		/**
++		 * This callback is the last thing a collecting thread does before it
++		 * relinquishes exclusiveGC status, allowing other threads to run in
++		 * requests.
++		 *
++		 * See note at GCCallback::enterExclusiveGC().
++		 *
++		 * @access Requires(gc->exclusiveGC && gc->m_lock)
++		 */
++		virtual void leaveExclusiveGC() {}
+ 
+ 		/**
+ 		 * This method is called before an RC object is reaped
+@@ -312,6 +367,17 @@ namespace MMgc
+ 	{
+ 		friend class GC;
+ 	public:
++		/**
++		 * Create an edge callback.
++		 *
++		 * If `gc` is non-null, this constructor adds this GCEdgeCallback to a
++		 * list of callbacks managed by `gc`.  This means the new object
++		 * immediately starts receiving calls, even if it isn't fully
++		 * constructed yet, potentially resulting in "Pure virtual method
++		 * called" or some other crash.  To avoid this race condition, call
++		 * this constructor with a non-null `gc` only from a thread that is in
++		 * a request on `gc`.
++		 */
+ 		GCEdgeCallback(GC *gc = NULL);
+ 		virtual ~GCEdgeCallback();
+ 
+@@ -509,12 +575,19 @@ namespace MMgc
+ 		 * will cause a garbage collection.  This makes code run
+ 		 * abysmally slow, but can be useful for detecting mark
+ 		 * bugs.
++		 *
++		 * The GC reads this flag only when holding the GC lock.  It is best
++		 * to set it as soon as the GC is created.
++		 *
++		 * @access Requires(m_lock)
+ 		 */
+ 		bool greedy;
+ 
+ 		/**
+ 		 * nogc is a debugging flag.  When set, garbage collection
+ 		 * never happens.
++		 *
++		 * @access Requires(m_lock)
+ 		 */
+ 		bool nogc;
+ 
+@@ -530,8 +603,28 @@ namespace MMgc
+ 		bool validateDefRef;		
+ 		bool keepDRCHistory;
+ 
++		/**
++		 * Expand, don't collect, until we reach this threshold. Units are
++		 * pages, not KB, just like GCHeap::GetTotalHeapSize().
++		 *
++		 * In an MMGC_THREADSAFE build, the GC reads this configuration value
++		 * only when holding the GC lock.  Set it during
++		 * initialization, before the GC is visible to multiple threads.
++		 *
++		 * @access Requires(m_lock)
++		 */
+ 		size_t collectThreshold;
+ 
++		/**
++		 * If true, log some statistics during each collection.
++		 * The output goes to gclog().
++		 *
++		 * In an MMGC_THREADSAFE build, the GC reads this flag only during
++		 * stop-the-world collection.  Set it during initialization, before
++		 * the GC is visible to multiple threads.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		bool gcstats;
+ 
+ 		bool dontAddToZCTDuringCollection;
+@@ -541,6 +634,14 @@ namespace MMgc
+ 		bool incrementalValidationPedantic;
+ #endif
+ 
++		/**
++		 * Configuration flag enabling incremental collection.
++		 *
++		 * In an MMGC_THREADSAFE build, the GC reads this flag only when
++		 * holding the GC lock.  Set it during initialization.
++		 *
++		 * @access Requires(m_lock)
++		 */
+ 		bool incremental;
+ 
+ 		// -- Interface
+@@ -548,7 +649,13 @@ namespace MMgc
+ 		virtual ~GC();
+ 		
+ 		/**
+-		 * Causes an immediate garbage collection
++		 * Causes an immediate garbage collection.
++		 *
++		 * In an MMGC_THREADSAFE build, the caller must not be inside a
++		 * request.  If the caller is inside a request, call
++		 * CollectFromRequest() instead.
++		 *
++		 * @access Requires(!m_lock && !request)
+ 		 */
+ 		void Collect();
+ 
+@@ -573,15 +680,26 @@ namespace MMgc
+ 
+ 		/**
+ 		 * Main interface for allocating memory.  Default flags is no
+-		 * finalization, contains pointers is set and zero is set
++		 * finalization, contains pointers is set and zero is set.
++		 *
++		 * Do not call this from a finalizer.
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		void *Alloc(size_t size, int flags=0, int skip=3);
++
++		/** @access Requires(request && m_lock) */
++		void *AllocAlreadyLocked(size_t size, int flags=0, int skip=3);
+ 
+ 		
+ 		/**
+ 		 * overflow checking way to call Alloc for a # of n size'd items,
+ 		 * all instance of Alloc(num*sizeof(thing)) should be replaced with:
+ 		 * Calloc(num, sizeof(thing))
++		 *
++		 * Do not call this from a finalizer.
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		void *Calloc(size_t num, size_t elsize, int flags=0, int skip=3);
+ 
+@@ -589,11 +707,15 @@ namespace MMgc
+ 		 * One can free a GC allocated pointer, this will throw an assertion
+ 		 * if called during the Sweep phase (ie via a finalizer) it can only be
+ 		 * used outside the scope of a collection
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		void Free(void *ptr);
+ 
+ 		/**
+ 		 * return the size of a piece of memory, may be bigger than what was asked for
++		 *
++		 * @access Requires(request || exclusiveGC)
+ 		 */
+ 		static size_t Size(const void *ptr)
+ 		{
+@@ -607,6 +729,8 @@ namespace MMgc
+ 		/**
+ 		 * Tracers should employ GetMark and SetMark to
+ 		 * set the mark bits during the mark pass.
++		 *
++		 * @access Requires(request || exclusiveGC)
+ 		 */
+ 		static int GetMark(const void *item)
+ 		{
+@@ -713,9 +837,13 @@ namespace MMgc
+ 		}
+ 
+ 		/**
+-		 * Used by sub-allocators to obtain memory
++		 * Used by sub-allocators to obtain memory.
++		 *
++		 * @access Requires(m_lock)
+ 		 */
+ 		void* AllocBlock(int size, int pageType, bool zero=true);
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		void FreeBlock(void *ptr, uint32 size);
+ 
+ 		GCHeap *GetGCHeap() const { return heap; }
+@@ -743,11 +871,19 @@ namespace MMgc
+ 		 */
+ 		bool IncrementalMarking() { return marking; }
+ 
+-		// a magical write barriers that finds the container's address and the GC, just
+-		// make sure address is a pointer to a GC page, only used by WB smart pointers
++		/**
++		 * A magical write barrier that finds the container's address and the
++		 * GC, just make sure @a address is a pointer to a GC page. Only used
++		 * by WB smart pointers.
++		 *
++		 * @access Requires(request)
++		 */
+ 		static void WriteBarrier(const void *address, const void *value);
++
++		/** @access Requires(request) */
+ 		static void WriteBarrierNoSub(const void *address, const void *value);
+ 
++		/** @access Requires(request) */
+ 		void writeBarrier(const void *container, const void *address, const void *value)
+ 		{
+ 			GCAssert(IsPointerToGCPage(container));
+@@ -759,13 +895,19 @@ namespace MMgc
+ 			WriteBarrierWrite(address, value);
+ 		}
+ 
+-		// optimized version with no RC checks or pointer masking
++		/**
++		 * optimized version with no RC checks or pointer masking
++		 *
++		 * @access Requires(request)
++		 */
+ 		void writeBarrierRC(const void *container, const void *address, const void *value);
+ 
+ 		/**
+-		Write barrier when the value could be a pointer with anything in the lower 3 bits
+-		FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
+-		this would require the application to tell us what bit patterns are pointers.
++		 * Write barrier when the value could be a pointer with anything in the lower 3 bits
++		 * FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
++		 * this would require the application to tell us what bit patterns are pointers.
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		__forceinline void WriteBarrierNoSubstitute(const void *container, const void *value)
+ 		{
+@@ -773,9 +915,11 @@ namespace MMgc
+ 		}
+ 			
+ 		/**
+-		 AVM+ write barrier, valuePtr is known to be pointer and the caller
+-		does the write
+-		*/
++		 * AVM+ write barrier, valuePtr is known to be pointer and the caller
++		 * does the write.
++		 *
++		 * @access Requires(request)
++		 */
+ 		__forceinline void WriteBarrierTrap(const void *container, const void *valuePtr)
+ 		{
+ 			GCAssert(IsPointerToGCPage(container));
+@@ -799,8 +943,10 @@ namespace MMgc
+ 
+ 	public:
+ 
++		/** @access Requires(request || exclusiveGC) */
+ 		bool ContainsPointers(const void *item);
+ 
++		/** @access Requires(request) */
+ 		void *FindBeginning(const void *gcItem)
+ 		{
+ 			GCAssert(gcItem != NULL);
+@@ -836,11 +982,23 @@ namespace MMgc
+ 		bool IsRCObject(const void *) { return false; }
+ #endif
+ 
+-
+-		bool Collecting() const { return collecting; }
+-
++		/**
++		 * True during Sweep phase.  Application code can use this to
++		 * determine if it's being called (directly or indirectly) from a
++		 * finalizer.
++		 *
++		 * @see IsGCRunning()
++		 * @access Requires(request || exclusiveGC)
++		 */
++		bool Collecting() const
++		{
++			return collecting;
++		}
++
++		/** @access Requires(request || exclusiveGC) */
+ 		bool IsGCMemory (const void *);
+ 
++		/** @access Requires(request || exclusiveGC) */
+ 		bool IsQueued(const void *item);
+ 
+ 		static uint64 GetPerformanceCounter();
+@@ -851,30 +1009,80 @@ namespace MMgc
+ 			return (double(GC::GetPerformanceCounter() - start) * 1000) / GC::GetPerformanceFrequency();
+ 		}
+ 
++#ifndef MMGC_THREADSAFE
+ 		void DisableThreadCheck() { disableThreadCheck = true; }
+-		
+-		uint64 t0;
++#endif
++
++		/** GC initialization time, in ticks.  Used for logging. */
++		const uint64 t0;
+ 
+ 		static uint64 ticksToMicros(uint64 ticks) { return (ticks*1000000)/GetPerformanceFrequency(); }
+ 
+ 		static uint64 ticksToMillis(uint64 ticks) { return (ticks*1000)/GetPerformanceFrequency(); }
+ 
+-		// marking rate counter
++		/**
++		 * Total number of bytes of pointer-containing memory scanned by this
++		 * GC.  Used to measure marking rate, which is
++		 * <code>bytesMarked/ticksToMillis(markTicks)</code>.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint64 bytesMarked;
++
++		/**
++		 * Total time spent doing incremental marking, in ticks.  See
++		 * bytesMarked.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint64 markTicks;
+ 
+ 		// calls to mark item
+ 		uint32 lastStartMarkIncrementCount;
+ 		uint32 markIncrements;
++
++		/**
++		 * Number of calls to MarkItem().
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint32 marks;
++
++		/**
++		 * Number of calls to Sweep().
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+         uint32 sweeps;
+ 
++		/**
++		 * Number of calls to MarkItem() during the current (or most recent)
++		 * IncrementalMark().
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint32 numObjects;
++
++		/**
++		 * Number of bytes scanned in MarkItem() during the current (or most
++		 * recent) IncrementalMark().
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint32 objSize;
+ 
++		/**
++		 * Time of the latest FinishIncrementalMark() call, in ticks.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint64 sweepStart;
+ 
+-		// if we hit zero in a marking incremental force completion in the next one
++		/**
++		 * True if we emptied the work queue during the most recent
++		 * incremental mark.  This means the next mark will force the GC cycle
++		 * through to completion.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		bool hitZeroObjects;
+ 
+ 		// called at some apropos moment from the mututor, ideally at a point
+@@ -886,6 +1094,7 @@ namespace MMgc
+ 
+ 		bool Destroying() { return destroying; }
+ 
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		void ClearWeakRef(const void *obj);
+ 
+ 		uintptr	GetStackTop() const;		
+@@ -901,7 +1110,10 @@ namespace MMgc
+ 		// FIXME: only used for FixedAlloc, GCAlloc sized dynamically
+ 		const static int kPageUsableSpace = 3936;
+ 
++		/** @access Requires(request && m_lock) */
+ 		uint32 *GetBits(int numBytes, int sizeClass);
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		void FreeBits(uint32 *bits, int sizeClass)
+ 		{
+ #ifdef _DEBUG
+@@ -910,35 +1122,60 @@ namespace MMgc
+ 			*(uint32**)bits = m_bitsFreelists[sizeClass];
+ 			m_bitsFreelists[sizeClass] = bits;
+ 		}
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		uint32 *m_bitsFreelists[kNumSizeClasses];
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		uint32 *m_bitsNext;
+ 
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		GCHashtable weakRefs;
+ 		friend class GCObject;
+ 		friend class GCFinalizedObject;
++
++		/** @access Requires(request) */
+ 		static GCWeakRef *GetWeakRef(const void *obj);
+ 
+ 		bool destroying;
+ 
+-		// we track the heap size so if it expands due to fixed memory allocations
+-		// we can trigger a mark/sweep, other wise we can have lots of small GC 
+-		// objects allocating tons of fixed memory which results in huge heaps
+-		// and possible out of memory situations.
++		/**
++		 * We track the heap size so if it expands due to fixed memory
++		 * allocations we can trigger a mark/sweep. Otherwise we can have lots
++		 * of small GC objects allocating tons of fixed memory, which results
++		 * in huge heaps and possible out of memory situations.
++		 *
++		 * @access Requires(m_lock)
++		 */
+ 		size_t heapSizeAtLastAlloc;
+ 
+ 		bool stackCleaned;
+ 		const void *rememberedStackTop;
+ 
++#ifndef MMGC_THREADSAFE
+ 		// for external which does thread safe multi-thread AS execution
+ 		bool disableThreadCheck;
+-
++#endif
++
++		/**
++		 * True if incremental marking is on and some objects have been marked.
++		 * This means write barriers are enabled.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 *
++		 * The GC thread may read and write this flag.  Application threads in
++		 * requests have read-only access.
++		 */
+ 		bool marking;
+ 		GCStack<GCWorkItem> m_incrementalWork;
+ 		void StartIncrementalMark();
+ 		void FinishIncrementalMark();
++
++		/** @access Requires(request || exclusiveGC) */
+ 		int IsWhite(const void *item);
+-		
++
++		/** @access ReadWrite(request, exclusiveGC) */
+ 		uint64 lastMarkTicks;
++		/** @access ReadWrite(request, exclusiveGC) */
+ 		uint64 lastSweepTicks;
+ 
+ 		const static int16 kSizeClasses[kNumSizeClasses];		
+@@ -950,14 +1187,44 @@ namespace MMgc
+ 		// 0 - not in use
+ 		// 1 - used by GCAlloc
+ 		// 3 - used by GCLargeAlloc
++
++		/** @access Requires(pageMapLock) */
+ 		uintptr memStart;
++		/** @access Requires(pageMapLock) */
+ 		uintptr memEnd;
+ 
++		/** @access Requires(m_lock) */
+ 		size_t totalGCPages;
+ 
++		/**
++		 * The bitmap for what pages are in use.  Any access to either the
++		 * pageMap pointer or the bitmap requires pageMapLock.
++		 *
++		 * (Note: A better synchronization scheme might be to use atomic
++		 * operations to read and write the pageMap pointer, writing it only
++		 * from within m_lock; and then using atomic read and write
++		 * operations--on Intel x86, these are just ordinary reads and
++		 * writes--to access the bitmap, with writes again only from within
++		 * m_lock.  This would require reallocating the pageMap more often,
++		 * but at least write barriers wouldn't have to acquire the spinlock.)
++		 *
++		 * @access Requires(pageMapLock)
++		 */
+ 		unsigned char *pageMap;
+ 
++		/**
++		 * This spinlock covers memStart, memEnd, and the contents of pageMap.
++		 */
++		mutable GCSpinLock pageMapLock;
++
+ 		inline int GetPageMapValue(uintptr addr) const
++		{
++			GCAcquireSpinlock lock(pageMapLock);
++			return GetPageMapValueAlreadyLocked(addr);
++		}
++
++		/** @access Requires(pageMapLock) */
++		inline int GetPageMapValueAlreadyLocked(uintptr addr) const
+ 		{
+ 			uintptr index = (addr-memStart) >> 12;
+ 
+@@ -969,7 +1236,20 @@ namespace MMgc
+ 			//return (pageMap[addr >> 2] & (3<<shiftAmount)) >> shiftAmount;
+ 			return (pageMap[index >> 2] >> shiftAmount) & 3;
+ 		}
++
++		/**
++		 * Set the pageMap bits for the given address.  Those bits must be
++		 * zero beforehand.
++		 *
++		 * @access Requires(pageMapLock)
++		 */
+ 		void SetPageMapValue(uintptr addr, int val);
++
++		/**
++		 * Zero out the pageMap bits for the given address.
++		 *
++		 * @access Requires(pageMapLock)
++		 */
+ 		void ClearPageMapValue(uintptr addr);
+ 
+ 		void MarkGCPages(void *item, uint32 numpages, int val);
+@@ -988,69 +1268,163 @@ namespace MMgc
+ 		GCAlloc *noPointersAllocs[kNumSizeClasses];
+ 		GCLargeAlloc *largeAlloc;
+ 		GCHeap *heap;
+-		
++
++		/** @access Requires(m_lock) */
+ 		void* AllocBlockIncremental(int size, bool zero=true);
++
++		/** @access Requires(m_lock) */
+ 		void* AllocBlockNonIncremental(int size, bool zero=true);
+ 
++	protected:
++		/**
++		 * Collect in a thread-safe, recursion-preventing way, with
++		 * callbacks.
++		 *
++		 * Both parameters are ignored in non-MMGC_THREADSAFE builds.  In an
++		 * MMGC_THREADSAFE build, callerHoldsLock must be true iff the calling
++		 * thread already holds m_lock, and callerHasActiveRequest must be
++		 * true iff the calling thread is already in an active request.
++		 */
++		void CollectWithBookkeeping(bool callerHoldsLock,
++									bool callerHasActiveRequest);
++
++	private:
++		/**
++		 * Just collect.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
++		void CollectImpl();
++
++		/** @access Requires(exclusiveGC) */
+ 		void ClearMarks();
+ #ifdef _DEBUG
+ 		public:
+ 		// sometimes useful for mutator to call this
++		/** @access Requires(exclusiveGC) */
+ 		void Trace(const void *stackStart=NULL, size_t stackSize=0);
+ 		private:
+ #else
++		/** @access Requires(exclusiveGC) */
+ 		void Trace(const void *stackStart=NULL, size_t stackSize=0);
+ #endif
+ 
++		/** @access Requires(exclusiveGC) */
+ 		void Finalize();
++		/** @access Requires(exclusiveGC) */
+ 		void Sweep(bool force=false);
++		/** @access Requires(exclusiveGC) */
+ 		void ForceSweep() { Sweep(true); }
++		/** @access Requires(exclusiveGC) */
+ 		void Mark(GCStack<GCWorkItem> &work);
++		/** @access Requires(exclusiveGC) */
+ 		void MarkQueueAndStack(GCStack<GCWorkItem> &work);
++		/** @access Requires(exclusiveGC) */
+ 		void MarkItem(GCStack<GCWorkItem> &work)
+ 		{
+ 			GCWorkItem workitem = work.Pop();
+ 			MarkItem(workitem, work);
+ 		}
++		/** @access Requires(exclusiveGC) */
+ 		void MarkItem(GCWorkItem &workitem, GCStack<GCWorkItem> &work);
+-		// Write barrier slow path
+-
++
++		/**
++		 * Write barrier slow path.  Queues the white object.
++		 *
++		 * @access Requires(request)
++		 */
+ 		void TrapWrite(const void *black, const void *white);
+ 
++		/**
++		 * Number of pages allocated from the GCHeap since the start of the
++		 * current GC cycle.
++		 *
++		 * @access Requires((request && m_lock) || exclusiveGC)
++		 */
+ 		unsigned int allocsSinceCollect;
+-		// The collecting flag prevents an unwanted recursive collection.
++
++		/**
++		 * True during the sweep phase of collection.  Several things have to
++		 * behave a little differently during this phase.  For example,
++		 * GC::Free() does nothing during sweep phase; otherwise finalizers
++		 * could be called twice.
++		 *
++		 * Also, Collect() uses this to protect itself from recursive calls
++		 * (from badly behaved finalizers).
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		bool collecting;
+- 
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		bool finalizedValue;
+ 
+-		// list of pages to be swept, built up in Finalize
++		/** @access Requires(exclusiveGC) */
+ 		void AddToSmallEmptyBlockList(GCAlloc::GCBlock *b)
+ 		{
+ 			b->next = smallEmptyPageList;
+ 			smallEmptyPageList = b;
+ 		}
++
++		/**
++		 * list of pages to be swept, built up in Finalize
++		 * @access Requires(exclusiveGC)
++		 */ 
+ 		GCAlloc::GCBlock *smallEmptyPageList;
+ 		
+-		// list of pages to be swept, built up in Finalize
++		/** @access Requires(exclusiveGC) */
+ 		void AddToLargeEmptyBlockList(GCLargeAlloc::LargeBlock *lb)
+ 		{
+ 			lb->next = largeEmptyPageList;
+ 			largeEmptyPageList = lb;
+ 		}
++
++		/**
++		 * list of pages to be swept, built up in Finalize
++		 * @access Requires(exclusiveGC)
++		 */
+ 		GCLargeAlloc::LargeBlock *largeEmptyPageList;
+ 		
+ #ifdef GCHEAP_LOCK
+ 		GCSpinLock m_rootListLock;
+ #endif
+ 
++		/** @access Requires(m_rootListLock) */
+ 		GCRoot *m_roots;
+ 		void AddRoot(GCRoot *root);
+ 		void RemoveRoot(GCRoot *root);
+-		
++
++#ifdef MMGC_THREADSAFE
++		GCSpinLock m_callbackListLock;
++#endif
++
++		/**
++		 * Points to the head of a linked list of callback objects.
++		 *
++		 * @access Requires(m_callbackListLock)
++		 */
+ 		GCCallback *m_callbacks;
+ 		void AddCallback(GCCallback *cb);
+ 		void RemoveCallback(GCCallback *cb);
+ 
++		/**
++		 * Points to the head of a linked list of edge callback objects.
++		 *
++		 * @access Requires((request && m_callbackListLock) || exclusiveGC)
++		 *
++		 * In an MMGC_THREADSAFE build, this linked list is protected by the
++		 * request model.  A thread must only access the list (a) from
++		 * application code that is within a request AND holds
++		 * m_callbackListLock; or (b) from MMgc code in the
++		 * m_exclusiveGCThread.
++		 *
++		 * This policy is different from the one that covers m_callbacks for
++		 * two reasons.  First, m_callbacks can fire the precollect callback
++		 * even if the calling thread is not in a request at all, so this
++		 * policy would be insufficient for m_callbacks.  Second,
++		 * m_edgeCallbacks fires very frequently during marking, so a
++		 * lock-free policy is probably much faster.
++		 */
+ 		GCEdgeCallback *m_edgeCallbacks;
+ 		void AddEdgeCallback(GCEdgeCallback *cb);
+ 		void RemoveEdgeCallback(GCEdgeCallback *cb);
+@@ -1058,9 +1432,10 @@ namespace MMgc
+ 		/**
+ 		 * Notify GCEdgeCallbacks of an edge.
+ 		 *
+-		 * p is a "real pointer".  This method converts it to
+-		 * a "user pointer" using GetUserPointer() before
+-		 * calling callbacks.
++		 * p is a "real pointer".  This method converts it to a "user pointer"
++		 * using GetUserPointer() before calling callbacks.
++		 *
++		 * @access Requires(exclusiveGC)
+ 		 */
+ 		void FireFoundEdgeTo(const void *p);
+ 
+@@ -1083,7 +1458,9 @@ private:
+ private:
+ #endif
+ 
++#ifndef MMGC_THREADSAFE
+ 		void CheckThread();
++#endif
+ 
+ 		void PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item);
+ 
+@@ -1100,17 +1477,37 @@ private:
+ 		void CheckFreelists();
+ 
+ 		int m_gcLastStackTrace;
++
++		/**
++		 * Used by FindUnmarkedPointers.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void UnmarkedScan(const void *mem, size_t size);
+-		// find unmarked pointers in entire heap
++
++		/**
++		 * Find unmarked pointers in the entire heap.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void FindUnmarkedPointers();
+ 
+ 		// methods for incremental verification
+ 
+-		// scan a region of memory for white pointers, used by FindMissingWriteBarriers
++		/**
++		 * Scan a region of memory for white pointers. Used by
++		 * FindMissingWriteBarriers.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void WhitePointerScan(const void *mem, size_t size);
+ 
+-		// scan all GC memory (skipping roots) If a GC object is black make sure
+-		// it has no white pointers, skip it if its grey.
++		/**
++		 * Scan all GC memory (skipping roots). If a GC object is black make sure
++		 * it has no pointers to white objects.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void FindMissingWriteBarriers();
+ #ifdef WIN32
+ 		// store a handle to the thread that create the GC to ensure thread safety
+@@ -1131,7 +1528,187 @@ public:
+ #ifdef _DEBUG
+ 		// Dump a list of objects that have pointers to the given location.
+ 		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);
++
++		/**
++		 * Used by WhosPointingAtMe.
++		 * @access Requires(pageMapLock)
++		 */
+     	void ProbeForMatch(const void *mem, size_t size, uintptr value, int recurseDepth, int currentDepth);
++#endif
++
++#ifdef MMGC_THREADSAFE
++	public:
++		/**
++		 * True if marking or sweeping is happening.  In an MMGC_THREADSAFE
++		 * build, this implies that no threads in requests are running right
++		 * now.
++		 *
++		 * Contrast Collecting().
++		 *
++		 * @access Requires(request || exclusiveGC || m_lock)
++		 */
++		bool IsGCRunning() const { return m_gcRunning; }
++
++	protected:
++		/** @access Requires(m_lock) */
++		void OnEnterRequestAlreadyLocked()
++		{
++			WaitForGCDone();
++			m_requestCount++;
++#ifdef _DEBUG
++			GCThread::GetCurrentThread()->OnEnterRequest();
++#endif
++		}
++
++		/** @access Requires(m_lock) */
++		void OnLeaveRequestAlreadyLocked()
++		{
++			GCAssert(m_requestCount > 0);
++			m_requestCount--;
++			if (m_requestCount == 0)
++				m_condNoRequests.NotifyAll();
++#ifdef _DEBUG
++			GCThread::GetCurrentThread()->OnLeaveRequest();
++#endif
++		}
++
++	public:
++		/**
++		 * Call this when the number of active requests on a thread goes from
++		 * zero to one.
++		 */
++		void OnEnterRequest()
++		{
++			GCAutoLock _lock(m_lock);
++			OnEnterRequestAlreadyLocked();
++		}
++
++		/**
++		 * Call this when the number of active requests on a thread goes from 
++		 * one to zero.
++		 */
++		void OnLeaveRequest()
++		{
++			GCAutoLock _lock(m_lock);
++			OnLeaveRequestAlreadyLocked();
++		}
++
++		/**
++		 * Exactly like Collect(), except that the caller must be inside a
++		 * request.
++		 *
++		 * @access Requires(request && !m_lock)
++		 */
++		void CollectFromRequest()
++		{
++			CollectWithBookkeeping(false, true);
++		}
++
++	protected:
++		/**
++		 * Wait for the current GC operation, if any, to finish.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		void WaitForGCDone()
++		{
++			GCAssert(m_exclusiveGCThread != GCThread::GetCurrentThread());
++			while (m_exclusiveGCThread != NULL)
++				m_condDone.Wait();
++		}
++
++		/**
++		 * This lock protects m_exclusiveGCThread and m_requestCount.
++		 * Policies built on m_lock and m_exclusiveGCThread govern how threads
++		 * access the rest of MMgc (both data and code).
++		 *
++		 * These policies are documented with "\@access" comments, which is a
++		 * kind of shorthand.  Here are a few illustrative examples:
++		 *
++		 * <code>\@access Requires(m_lock)</code> on a member variable or
++		 * method means it must be used only by a thread that holds m_lock.
++		 *
++		 * <code>\@access Requires(exclusiveGC)</code> means the member is to
++		 * be used only by the GC thread, and only when no threads are running
++		 * in requests.  This applies to methods like Trace(), MarkItem(),
++		 * Sweep(), and Finalize().
++		 *
++		 * <code>\@access Requires(request)</code> means the member must be
++		 * used only by a thread that is in a request.  This applies to
++		 * Alloc(), Calloc(), and Free().
++		 *
++		 * <code>\@access Requires(exclusiveGC || request)</code> requires
++		 * that the caller be <em>either</em> in a request <em>or</em> that it
++		 * be the GC thread, with no threads running in requests.  (Of course
++		 * a thread can never be both.)  This is just like
++		 * <code>Requires(request)</code> except that a member marked this way
++		 * is also safe to use from finalizer code.  (Finalizers run with
++		 * <code>exclusiveGC</code>, not in a request.)
++		 *
++		 * <code>\@access ReadWrite(request, exclusiveGC)</code> applies to
++		 * several data members.  This denotes a read-write locking scheme.
++		 * Any thread with <code>request || exclusiveGC</code> can safely
++		 * <em>read</em> these data members, but only a thread with
++		 * <code>exclusiveGC</code> can <em>write</em> to them.
++		 *
++		 * Other policies are occasionally used.  For example, the list of
++		 * GCRoots has its own spinlock, so the head of the list and each of
++		 * the links has <code>@access(m_rootListLock)</code>.
++		 *
++		 * XXX TODO - worry about less-than-one-word fields (just bools, I
++		 * think) that are packed with other fields that have different access
++		 * policies!
++		 *
++		 * Some data structures, like the list of GCRoots, are protected by
++		 * separate spinlocks.  The rule to avoid deadlock is: do not try to
++		 * acquire m_lock if you already hold any spinlock.
++		 */
++		mutable GCLock m_lock;
++
++		/**
++		 * The thread currently doing GC-related work (or waiting to do
++		 * GC-related work); or NULL if there is no such thread.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCThread *m_exclusiveGCThread;
++
++		/**
++		 * True if a thread is doing GC-related work, having already ensured
++		 * that no threads are in active requests.
++		 *
++		 * (The complicated thread-safety policy here makes reads lock-free.
++		 * It also supports checking the status even if the currently running
++		 * code isn't in a request at all; Collect() needs that.)
++		 *
++		 * @access ReadWrite(request || exclusiveGC || m_lock, exclusiveGC && m_lock)
++		 */
++		bool m_gcRunning;
++
++		/**
++		 * This is notified whenever m_exclusiveGCThread becomes NULL.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCCondition m_condDone;
++
++		/**
++		 * The number of threads currently in active requests.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		int m_requestCount;
++
++		/**
++		 * This is notified whenever m_requestCount becomes zero.
++		 *
++		 * At most one thread is ever waiting on this condition at a time; if
++		 * a thread is waiting on it, that thread is `m_exclusiveGCThread` and
++		 * `m_gcRunning` is false.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCCondition m_condNoRequests;
+ #endif
+ 	};
+ 
+diff --git a/MMgc/GCAlloc.cpp b/MMgc/GCAlloc.cpp
+--- a/MMgc/GCAlloc.cpp
++++ b/MMgc/GCAlloc.cpp
+@@ -127,6 +127,10 @@ namespace MMgc
+ 
+ 	GCAlloc::GCBlock* GCAlloc::CreateChunk()
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_gc->m_lock.IsHeld());
++#endif
++
+ 		// Get space in the bitmap.  Do this before allocating the actual block,
+ 		// since we might call GC::AllocBlock for more bitmap space and thus
+ 		// cause some incremental marking.
+@@ -225,6 +229,9 @@ namespace MMgc
+ 
+ 	void* GCAlloc::Alloc(size_t size, int flags)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_gc->m_lock.IsHeld());
++#endif
+ 		(void)size;
+ 		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");
+ start:
+@@ -374,6 +381,11 @@ start:
+ 
+ 	void GCAlloc::Finalize()
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_gc->m_exclusiveGCThread == GCThread::GetCurrentThread()
++		         || m_gc->destroying);
++#endif
++
+ 		m_finalized = true;
+ 		// Go through every item of every block.  Look for items
+ 		// that are in use but not marked as reachable, and delete
+diff --git a/MMgc/GCLargeAlloc.cpp b/MMgc/GCLargeAlloc.cpp
+--- a/MMgc/GCLargeAlloc.cpp
++++ b/MMgc/GCLargeAlloc.cpp
+@@ -53,6 +53,9 @@ namespace MMgc
+ 
+ 	void* GCLargeAlloc::Alloc(size_t size, int flags)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_gc->m_lock.IsHeld());
++#endif
+ 		int blocks = (size+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize;
+ 		
+ 		LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, GC::kGCLargeAllocPageFirst, (flags&GC::kZero) != 0);
+diff --git a/MMgc/GCTests.cpp b/MMgc/GCTests.cpp
+--- a/MMgc/GCTests.cpp
++++ b/MMgc/GCTests.cpp
+@@ -53,8 +53,23 @@ namespace MMgc
+ {
+ 	GC *gc;
+ 
+-	GCWeakRef* createWeakRef(int extra=0)
++	GCWeakRef* createWeakRef(int extra = 0)
+ 	{
++		// Bogusly use up some extra stack.
++		//
++		// Certain compilers/platforms require this (at least gcc/MacIntel).
++		// A pointer to the temporary GCObject can be left on the stack, so
++		// far down that CleanStack doesn't dare touch it.  Leaving the
++		// pointer there causes the temporary to be marked, and not collected,
++		// which causes tests to fail with assertions.
++		///
++		// The extra 32 bytes here causes the temporary to end up higher on
++		// the stack (numerically lesser address, on Intel at least) where
++		// CleanStack will clobber it.
++		//
++		char buf[32];
++		sprintf(buf, "%d", extra);  // don't optimize away buf
++
+ 		return (new (gc, extra) GCObject())->GetWeakRef();
+ 	}
+ 
+diff --git a/MMgc/GCThreads.cpp b/MMgc/GCThreads.cpp
+new file mode 100644
+--- /dev/null
++++ b/MMgc/GCThreads.cpp
+@@ -0,0 +1,100 @@
++/* ***** BEGIN LICENSE BLOCK *****
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
++ *
++ * The contents of this file are subject to the Mozilla Public License Version
++ * 1.1 (the "License"); you may not use this file except in compliance with
++ * the License. You may obtain a copy of the License at
++ * http://www.mozilla.org/MPL/
++ *
++ * Software distributed under the License is distributed on an "AS IS" basis,
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
++ * for the specific language governing rights and limitations under the
++ * License.
++ *
++ * The Original Code is [Open Source Virtual Machine.].
++ *
++ * The Initial Developer of the Original Code is
++ * Adobe System Incorporated.
++ * Portions created by the Initial Developer are Copyright (C) 2004-2006
++ * the Initial Developer. All Rights Reserved.
++ *
++ * Contributor(s):
++ *   Jason Orendorff
++ *
++ * Alternatively, the contents of this file may be used under the terms of
++ * either the GNU General Public License Version 2 or later (the "GPL"), or
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
++ * in which case the provisions of the GPL or the LGPL are applicable instead
++ * of those above. If you wish to allow use of your version of this file only
++ * under the terms of either the GPL or the LGPL, and not to allow others to
++ * use your version of this file under the terms of the MPL, indicate your
++ * decision by deleting the provisions above and replace them with the notice
++ * and other provisions required by the GPL or the LGPL. If you do not delete
++ * the provisions above, a recipient may use your version of this file under
++ * the terms of any one of the MPL, the GPL or the LGPL.
++ *
++ * ***** END LICENSE BLOCK ***** */
++
++#include "MMgc.h"
++
++#ifdef MMGC_THREADSAFE
++#include "prtypes.h"
++
++using namespace MMgc;
++
++PRUintn GCThread::g_tlsIndex;
++GCThread *GCThread::g_first = NULL;
++static PRLock *g_threadListLock = NULL;
++
++void (PR_CALLBACK DestroyGCThread)(void *obj)
++{
++	delete static_cast<GCThread *>(obj);
++}
++
++void GCThread::Init()
++{
++    GCAssert(g_threadListLock == NULL);
++	g_threadListLock = PR_NewLock();
++	GCAssert(g_threadListLock != NULL);
++	if (g_threadListLock != NULL) {
++		PRStatus st = PR_NewThreadPrivateIndex(&g_tlsIndex, DestroyGCThread);
++		(void) st;
++		GCAssert(st == PR_SUCCESS);
++	}
++}
++
++void GCThread::Destroy()
++{
++    GCAssert(g_threadListLock != NULL);
++
++	PR_DestroyLock(g_threadListLock);
++	g_threadListLock = NULL;
++    // NSPR provides no way to release g_tlsIndex to the system.
++}
++
++GCThread::GCThread()
++#ifdef _DEBUG
++	: m_inRequest(false)
++#endif
++{
++	PR_Lock(g_threadListLock);
++	m_next = g_first;
++	g_first = this;
++	PR_Unlock(g_threadListLock);
++}
++
++GCThread * GCThread::GetCurrentThread()
++{
++    GCAssert(g_threadListLock != NULL);
++
++    GCThread *t = static_cast<GCThread *>(PR_GetThreadPrivate(g_tlsIndex));
++    if (t == NULL) {
++        t = new GCThread;
++        PRStatus st = PR_SetThreadPrivate(g_tlsIndex, t);
++		(void) st;
++        GCAssert(st == PR_SUCCESS);
++    }
++    return t;
++}
++
++#endif  // MMGC_THREADSAFE
+diff --git a/MMgc/GCThreads.h b/MMgc/GCThreads.h
+new file mode 100644
+--- /dev/null
++++ b/MMgc/GCThreads.h
+@@ -0,0 +1,227 @@
++/* ***** BEGIN LICENSE BLOCK *****
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
++ *
++ * The contents of this file are subject to the Mozilla Public License Version
++ * 1.1 (the "License"); you may not use this file except in compliance with
++ * the License. You may obtain a copy of the License at
++ * http://www.mozilla.org/MPL/
++ *
++ * Software distributed under the License is distributed on an "AS IS" basis,
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
++ * for the specific language governing rights and limitations under the
++ * License.
++ *
++ * The Original Code is [Open Source Virtual Machine.].
++ *
++ * The Initial Developer of the Original Code is
++ * Adobe System Incorporated.
++ * Portions created by the Initial Developer are Copyright (C) 2004-2006
++ * the Initial Developer. All Rights Reserved.
++ *
++ * Contributor(s):
++ *   Jason Orendorff
++ *
++ * Alternatively, the contents of this file may be used under the terms of
++ * either the GNU General Public License Version 2 or later (the "GPL"), or
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
++ * in which case the provisions of the GPL or the LGPL are applicable instead
++ * of those above. If you wish to allow use of your version of this file only
++ * under the terms of either the GPL or the LGPL, and not to allow others to
++ * use your version of this file under the terms of the MPL, indicate your
++ * decision by deleting the provisions above and replace them with the notice
++ * and other provisions required by the GPL or the LGPL. If you do not delete
++ * the provisions above, a recipient may use your version of this file under
++ * the terms of any one of the MPL, the GPL or the LGPL.
++ *
++ * ***** END LICENSE BLOCK ***** */
++
++#ifndef __MMGC_GCTHREADS__
++#define __MMGC_GCTHREADS__
++
++#ifdef MMGC_THREADSAFE
++
++#include "prthread.h"
++#include "prlock.h"
++#include "prcvar.h"
++
++namespace MMgc
++{
++	/** A non-reentrant mutex. */
++	class GCLock
++	{
++		friend class GCCondition;
++	public:
++		GCLock() 
++			: m_lock(PR_NewLock())
++#ifdef _DEBUG
++			, m_holder(NULL)
++#endif
++		{
++			GCAssert(m_lock != NULL);
++		}
++
++		~GCLock() { PR_DestroyLock(m_lock); }
++
++		void Acquire()
++		{
++#ifdef _DEBUG
++			PRThread *thr = PR_GetCurrentThread();
++			GCAssertMsg(m_holder != thr, "deadlock in GCLock::Acquire");
++#endif
++			PR_Lock(m_lock);
++#ifdef _DEBUG
++			m_holder = thr;
++#endif
++		}
++
++		void Release()
++		{
++#ifdef _DEBUG
++			GCAssert(IsHeld());
++			m_holder = NULL;
++#endif
++			PR_Unlock(m_lock);
++		}
++
++#ifdef _DEBUG
++		/**
++		 * True if the calling thread owns the lock.
++		 *
++		 * Warning: This is for use in debug-mode assertions about the lock
++		 * state ONLY.  It's theoretically possible, on some non-IA32
++		 * platforms, that reading m_holder outside the lock like this could
++		 * return random junk, causing a false positive here.  On IA32, it's
++		 * safe.
++		 */
++		bool IsHeld()
++		{
++			return m_holder == PR_GetCurrentThread();
++		}
++#endif
++
++	private:
++		// Prohibit copying.
++		GCLock(const GCLock &);
++		GCLock & operator=(const GCLock &);
++
++		PRLock *m_lock;
++#ifdef _DEBUG
++		PRThread *m_holder;
++#endif
++	};
++
++	class GCAutoLock {
++	public:
++		explicit GCAutoLock(GCLock &lock)
++			: m_lock(lock)
++		{
++			lock.Acquire();
++		}
++
++		~GCAutoLock() { m_lock.Release(); }
++
++	private:
++		// Prohibit copying.
++		GCAutoLock(const GCAutoLock &);
++		GCAutoLock & operator=(const GCAutoLock &);
++
++		GCLock &m_lock;
++	};
++
++	/** A pthreads-style condition variable. */
++	class GCCondition {
++	public:
++		explicit GCCondition(GCLock &lock)
++			: m_cvar(PR_NewCondVar(lock.m_lock))
++#ifdef _DEBUG
++			, m_lockObject(lock)
++#endif
++		{
++			GCAssert(m_cvar != NULL);
++		}
++
++		~GCCondition() { PR_DestroyCondVar(m_cvar); }
++
++		void Wait()
++		{
++			PRStatus st;
++
++#ifdef _DEBUG
++			m_lockObject.m_holder = NULL;
++#endif
++			st = PR_WaitCondVar(m_cvar, PR_INTERVAL_NO_TIMEOUT);
++#ifdef _DEBUG
++			m_lockObject.m_holder = PR_GetCurrentThread();
++#endif
++			(void)st;
++			GCAssert(st == PR_SUCCESS);
++		}
++
++		void NotifyAll()
++		{
++			PRStatus st;
++
++			st = PR_NotifyAllCondVar(m_cvar);
++			(void)st;
++			GCAssert(st == PR_SUCCESS);
++		}
++
++	private:
++		// Prohibit copying.
++		GCCondition(const GCCondition &);
++		GCCondition & operator=(const GCCondition &);
++
++		PRCondVar *m_cvar;
++#ifdef _DEBUG
++		GCLock &m_lockObject;
++#endif
++    };
++
++	/** A catchall object for per-thread state. */
++	class GCThread {
++	public:
++		static void Init();
++		static void Destroy();
++
++		static GCThread * GetCurrentThread();
++
++#ifdef _DEBUG
++	public:
++		void OnEnterRequest()
++		{
++			GCAssert(!m_inRequest);
++			m_inRequest = true;
++		}
++
++		void OnLeaveRequest()
++		{
++			GCAssert(m_inRequest);
++			m_inRequest = false;
++		}
++
++		bool IsInActiveRequest() { return m_inRequest; }
++
++	private:
++		bool m_inRequest;
++#else
++	public:
++		void OnEnterRequest() {}
++		void OnLeaveRequest() {}
++#endif
++
++	private:
++		// Prohibit copying.
++		GCThread(const GCThread &);
++		GCThread & operator=(const GCThread &);
++
++		GCThread();
++
++		static PRUintn g_tlsIndex;
++		static GCThread *g_first;
++		GCThread *m_next;
++	};
++}
++
++#endif // MMGC_THREADSAFE
++
++#endif // __MMGC_GCTHREADS__
+diff --git a/MMgc/GCWeakRef.h b/MMgc/GCWeakRef.h
+--- a/MMgc/GCWeakRef.h
++++ b/MMgc/GCWeakRef.h
+@@ -63,6 +63,11 @@ namespace MMgc
+ 		{
+ 			return gc->Alloc(size, GC::kFinalize, 4);
+ 		}
++		// Placement new.
++		void *operator new(size_t, void *p)
++		{
++			return p;
++		}
+ 		// private, only GC can access
+ 		GCWeakRef(const void *obj) : m_obj(obj) 
+ 		{
+diff --git a/MMgc/MMgc.h b/MMgc/MMgc.h
+--- a/MMgc/MMgc.h
++++ b/MMgc/MMgc.h
+@@ -125,6 +125,7 @@ namespace MMgc
+ 
+ #include "GCTypes.h"
+ #include "GCStack.h"
++#include "GCThreads.h"
+ #include "GCAllocObject.h"
+ #include "GCHeap.h"
+ #include "GCAlloc.h"
+diff --git a/MMgc/manifest.mk b/MMgc/manifest.mk
+--- a/MMgc/manifest.mk
++++ b/MMgc/manifest.mk
+@@ -56,6 +56,7 @@ MMgc_CXXSRCS := $(MMgc_CXXSRCS) \
+   $(curdir)/GCMemoryProfiler.cpp \
+   $(curdir)/GCObject.cpp \
+   $(curdir)/GCTests.cpp \
++  $(curdir)/GCThreads.cpp \
+   $(NULL)
+ 
+ ifeq (windows,$(TARGET_OS))
+diff --git a/configure.py b/configure.py
+--- a/configure.py
++++ b/configure.py
+@@ -87,6 +87,7 @@ MMGC_THREADSAFE = o.getBoolArg('threadsa
+ MMGC_THREADSAFE = o.getBoolArg('threadsafe-mmgc', False)
+ if MMGC_THREADSAFE:
+     MMGC_DEFINES['MMGC_THREADSAFE'] = None
++    MMGC_DEFINES['MMGC_INTERIOR_PTRS'] = None
+     NSPR_INCLUDES = o.getStringArg('nspr-includes')
+     MMGC_CPPFLAGS += NSPR_INCLUDES + " "
+     APP_CPPFLAGS += NSPR_INCLUDES + " "
new file mode 100644
--- /dev/null
+++ b/mmgc-threadsafe-v1
@@ -0,0 +1,834 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -219,6 +219,8 @@ namespace MMgc
+ 		  memEnd(0),
+ 		  heap(gcheap),
+ 		  allocsSinceCollect(0),
++		  busy(false),
++		  restartRequested(false),
+ 		  collecting(false),
+ 		  m_roots(0),
+ 		  m_callbacks(0),
+@@ -246,6 +248,13 @@ namespace MMgc
+ 		  finalizedValue(true),
+ 		  // Expand, don't collect, until we hit this threshold
+ 		  collectThreshold(256)
++#ifdef MMGC_THREADSAFE
++		  ,
++		  condNotBusy(statusLock),
++		  condNoRequests(statusLock),
++		  requestCount(0)
++#endif
++
+ 	{		
+ 		// sanity check for all our types
+ 		GCAssert (sizeof(int8) == 1);
+@@ -383,7 +392,61 @@ namespace MMgc
+ 
+ 	void GC::Collect()
+ 	{
+-		// invoke precollect callback
++		AssertLockNotHeld();
++
++		{
++#ifdef MMGC_THREADSAFE
++			GC::AutoLock(this);
++#endif
++			if (vetoed || nogc || collecting)
++				return;
++
++			// Don't start a collection if we are in the midst of a ZCT reap.
++			if (zct.reaping)
++				return;
++
++			ReapZCT();
++
++			// if we're in the middle of an incremental mark kill it
++			// FIXME: we should just push it to completion 
++			if(marking) 
++			{
++				marking = false;
++				m_incrementalWork.Keep(0);
++			}
++
++			busy = true;
++		}
++
++#ifdef DEBUGGER
++		StartGCActivity();
++#endif
++
++		// Dumping the stack trace at GC time can be very helpful with stack walk bugs
++		// where something was created, stored away in untraced, unmanaged memory and not 
++		// reachable by the conservative stack walk
++		//DumpStackTrace();
++
++		Trace();
++		
++		Sweep();
++
++#ifdef _DEBUG
++		FindUnmarkedPointers();
++#endif
++
++		CheckThread();
++
++#ifdef DEBUGGER
++		StopGCActivity();
++#endif
++	}
++
++	void GC::CollectWithBookkeeping()
++	{
++		AssertLockNotHeld();
++
++		// Invoke the precollect callback.
+ 		bool vetoed = false;
+ 		GCCallback *cb = m_callbacks;
+ 		while (cb) {
+@@ -391,55 +454,55 @@ namespace MMgc
+ 				vetoed = true;
+ 			cb = cb->nextCB;
+ 		}
+-
+-		if (vetoed || nogc || collecting) {
+-
++		if (vetoed)
+ 			return;
+-		}
+-
+-		// Don't start a collection if we are in the midst of a ZCT reap.
+-		if (zct.reaping)
+-		{
+-
++
++#ifdef MMGC_THREADSAFE
++		// Lock before collecting, to update the GC status and detect
++		// cases where we shouldn't collect.
++		{
++			GC::AutoLock _lock(this);
++
++			if (nogc)
++				return;
++
++			// If we're already collecting on this thread, we've been called
++			// from a finalizer.  Don't recurse.
++			
++		
++			// If we're already collecting on another thread, just wait for that
++			// to finish.
++			???;
++
++			// We're going to do a collection.
++			//
++			// Suspend this thread's requests, update status, and wait for
++			// other threads to end or suspend their requests.
++			???;
++
++			// No requests are active.
++			???;
++		}
++#else
++		if (nogc || collecting || zct.reaping)
+ 			return;
+-		}
+-
+-		ReapZCT();
+-
+-		// if we're in the middle of an incremental mark kill it
+-		// FIXME: we should just push it to completion 
+-		if(marking) 
+-		{
+-			marking = false;
+-			m_incrementalWork.Keep(0);
+-		}
+-
+-#ifdef DEBUGGER
+-		StartGCActivity();
+-#endif
+-
+-		// Dumping the stack trace at GC time can be very helpful with stack walk bugs
+-		// where something was created, stored away in untraced, unmanaged memory and not 
+-		// reachable by the conservative stack walk
+-		//DumpStackTrace();
+-
+-		Trace();
++#endif
++
++		CollectImpl();
++
++#ifdef MMGC_THREADSAFE
++		// Lock again afterwards to update status, allowing other threads
++		// to proceed.
++		{
++			GC::AutoLock _lock(this);
++			???;
++		}
++#endif
++	}
++
++	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
++	{
+ 		
+-		Sweep();
+-
+-#ifdef _DEBUG
+-		FindUnmarkedPointers();
+-#endif
+-
+-		CheckThread();
+-
+-#ifdef DEBUGGER
+-		StopGCActivity();
+-#endif
+-	}
+-
+-	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
+-	{
+ 		SAMPLE_FRAME("[mark]", core());
+ 
+ 		// Clear all mark bits.
+@@ -479,6 +542,12 @@ namespace MMgc
+ 
+ 	void *GC::Alloc(size_t size, int flags/*0*/, int skip/*3*/)
+ 	{
++#ifdef MMGC_THREADSAFE
++		AssertLockNotHeld();
++		GC::AutoLock _lock(this);
++		GCAssert(!busy);
++#endif
++
+ #ifdef DEBUGGER
+ 		avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
+ 		if(core)
+@@ -492,12 +561,12 @@ namespace MMgc
+ 		GCAssert(size + 7 > size);
+ 		CheckThread();
+ 		if (GC::greedy) {
+-			Collect();
++			CollectImpl(true);
+ 		}
+ 		// always be marking in pedantic mode
+ 		if(incrementalValidationPedantic) {
+ 			if(!marking)
+-				StartIncrementalMark();
++				StartIncrementalMarkImpl(true);
+ 		}
+ #endif
+ 
+@@ -575,19 +644,25 @@ namespace MMgc
+ 
+ 	void GC::Free(void *item)
+ 	{
++		if (item == NULL)
++			return;
++
++#ifdef MMGC_THREADSAFE
++		AssertLockNotHeld();
++		GC::AutoLock _lock(this);
++		GCAssert(!busy);
++#else
+ 		CheckThread();
++#endif
++
+ 		// we can't allow free'ing something during Sweeping, otherwise alloc counters
+ 		// get decremented twice and destructors will be called twice.
+-		if(item == NULL) {
+-			return;
+-		}
+ 
+ 		bool isLarge;
+ 
+ 		if(collecting) {
+ 			goto bail;
+ 		}
+-
+ 		isLarge = GCLargeAlloc::IsLargeBlock(GetRealPointer(item));
+ 
+ 		if (marking) {
+@@ -665,7 +740,10 @@ bail:
+ 	}
+ 
+ 	void GC::Sweep(bool force)
+-	{	
++	{
++		AssertLockNotHeld();
++		GCAssert(collecting);
++
+ 		SAMPLE_FRAME("[sweep]", core());
+ 		sweeps++;
+ 
+@@ -677,7 +755,7 @@ bail:
+ 			DumpMemoryInfo();
+ 		}
+ #endif
+-		
++
+ 		// collecting must be true because it indicates allocations should
+ 		// start out marked, we can't rely on write barriers below since 
+ 		// presweep could write a new GC object to a root
+@@ -736,8 +814,17 @@ bail:
+ #endif
+ 
+ 		// don't want postsweep to fire WB's
+-		marking = false;
+-		collecting = false;
++		{
++#ifdef MMGC_THREADSAFE
++			GC::AutoLock _lock(this);
++#endif
++			marking = false;
++			collecting = false;
++			busy = false;
++#ifdef MMGC_THREADSAFE
++			condNotBusy.NotifyAll();
++#endif
++		}
+ 
+ 		// invoke postsweep callback
+ 		cb = m_callbacks;
+@@ -782,11 +869,12 @@ bail:
+ 
+ 	void* GC::AllocBlock(int size, int pageType, bool zero)
+ 	{
++		AssertLockHeld();
+ #ifdef DEBUGGER
+ 		AllocActivity(size);
+ #endif
+ 		GCAssert(size > 0);
+-	
++
+ 		// perform gc if heap expanded due to fixed memory allocations
+ 		// utilize burst logic to prevent this from happening back to back
+ 		// this logic is here to apply to incremental and non-incremental
+@@ -799,7 +887,7 @@ bail:
+ 			if(incremental)
+ 				StartIncrementalMark();
+ 			else
+-				Collect();
++				CollectImpl(true);
+ 		}
+ 
+ 		void *item;
+@@ -840,6 +928,8 @@ bail:
+ 
+ 	void* GC::AllocBlockIncremental(int size, bool zero)
+ 	{
++		AssertLockHeld();
++
+ 		if(!nogc && !collecting) {
+ 			uint64 now = GetPerformanceCounter();
+ 			if (marking) {		
+@@ -1133,7 +1223,6 @@ bail:
+ #endif
+ #endif
+ 	}
+-
+ 
+ 	bool GC::IsPointerToGCPage(const void *item)
+ 	{
+@@ -1280,7 +1369,70 @@ bail:
+ 			}
+ 		}
+ 	}
+-			
++
++#ifdef MMGC_THREADSAFE
++
++	void GC::AcquireLock()
++	{
++	}
++
++	void GC::ReleaseLock()
++	{
++	}
++
++	void GCRequestContext::BeginRequest()
++	{
++		if (m_requestDepth != 0) {
++			// Nested request.
++			m_requestDepth++;
++			return;
++		}
++
++		GC::AutoLock _lock(m_gc);
++		m_gc->WaitForNotCollecting();
++		m_gc->IncrementRequestCount();
++		m_requestDepth = 1;
++	}
++
++	void GCRequestContext::EndRequest()
++	{
++		GCAssert(m_requestDepth != 0);
++		if (m_requestDepth > 1) {
++			// Nested request.
++			m_requestDepth--;
++			return;
++		}
++
++		GC::AutoLock _lock(m_gc);
++		m_gc->DecrementRequestCount();
++		m_requestDepth = 0;
++	}
++
++	void GCRequestContext::SuspendRequest(GCSuspendedRequestState *state)
++	{
++		void *stack;
++		size_t stackSize;
++
++		MMGC_GET_STACK_EXTENTS(m_gc, stack, stackSize);
++		state->stack = new GCRoot(m_gc, stack, stackSize);
++
++		state->count = 0;
++		while (requestDepth) {
++			EndRequest();
++			state->count++;
++		}
++	}
++
++	void GCRequestContext::ResumeRequest(GCSuspendedRequestState *state)
++	{
++		for (int i = 0; i < state->count; i++)
++			BeginRequest();
++
++		delete state->stack;
++	}
++
++#endif // MMGC_THREADSAFE
++
+ #ifdef _DEBUG
+ 
+ 	void GC::RCObjectZeroCheck(RCObject *item)
+@@ -1930,6 +2082,7 @@ bail:
+ 
+ 	void GC::StartIncrementalMark()
+ 	{
++		AssertLockHeld();
+ 		GCAssert(!marking);
+ 		GCAssert(!collecting);
+ 
+@@ -2300,7 +2453,7 @@ bail:
+ #ifdef DEBUGGER
+ 		StartGCActivity();
+ #endif
+-		
++
+ 		markIncrements++;
+ 		// FIXME: tune this so that getPerformanceCounter() overhead is noise
+ 		static unsigned int checkTimeIncrements = 100;
+@@ -2584,7 +2737,7 @@ bail:
+ 	}
+ 
+ 	void GC::RemoveRoot(GCRoot *root)
+-	{		
++	{
+ #ifdef GCHEAP_LOCK
+ 		GCAcquireSpinlock lock(m_rootListLock);
+ #endif
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -431,6 +431,149 @@ namespace MMgc
+ 		{
+ 			return obj >= zct && obj < (RCObject**)(zct+zctSize/sizeof(RCObject*));
+ 		}
++	};
++
++#ifdef MMGC_THREADSAFE
++	/** State saved by GCRequestContext::SuspendRequest. */
++	struct GCSuspendedRequestState
++	{
++		/** Number of nested BeginRequest calls that were suspended. */
++		int count;
++		/** Rooted stack. */
++		GCRoot *stack;
++	};
++
++	/**
++	 * An execution context in which requests my run.  Each GCRequestRunner is
++	 * single-threaded.  See MMgc::GCRequest.
++	 */
++	class GCRequestContext
++	{
++	public:
++		GCRequestContext();
++		virtual ~GCRequestContext() {}
++
++		/**
++		 * Enter a request.  This is the low-level method that implements
++		 * MMgc::GCRequest.
++		 */
++		void BeginRequest();
++
++		/** Leave the current request. */
++		void EndRequest();
++
++		/**
++		 * Suspend the current request.  This is the method that implements
++		 * MMgc::GCSuspendRequest.
++		 *
++		 * The caller must provide `state`, which is a place to store
++		 * information that will be needed later to resume the request.
++		 * `state` must be non-null, and a GCSuspendedRequestState object can
++		 * only be used for one suspended request at a time.
++		 */
++		void SuspendRequest(GCSuspendedRequestState *state);
++
++		/**
++		 * Resume a suspended request.
++		 *
++		 * `state` must point to a struct previously passed to SuspendRequest.
++		 */
++		void ResumeRequest(GCSuspendedRequestState *state);
++
++	private:
++		GC *m_gc;
++		int m_requestDepth;
++	};
++
++#else
++	/*
++	 * Dummy implementations of the request-related classes.  The compiler
++	 * should optimize away calls to the *Request methods.
++	 */
++
++	struct GCSuspendedRequestState {};
++
++	class GCRequestContext {
++	public:
++		explicit GCRequestContext(GC *) {}
++		virtual ~GCRequestContext() {}
++
++		void BeginRequest() {}
++		void EndRequest() {}
++		void SuspendRequest(GCSuspendedRequestState *) {}
++		void ResumeRequest(GCSuspendedRequestState *) {}
++	};
++#endif // MMGC_THREADSAFE
++
++
++	/**
++	 * Sentry object to protect application code that uses MMgc.
++	 *
++	 * When MMgc is compiled with MMGC_THREADSAFE defined, the application
++	 * must use the request model to ensure that allocation and garbage
++	 * collection interact safely. Wrapping some code in a request ensures two
++	 * things: (1) garbage collection will not collide with that code, because
++	 * the GC treats threads that are in requests with extra care; (2) when GC
++	 * does happen, the stack of the thread that is in the request will be
++	 * scanned.
++	 *
++	 * Threads that are not in requests do not get these guarantees.
++	 * Therefore code must not interact with the GC or GC-managed objects
++	 * outside a request!
++	 *
++	 * Code inside a request should not block or do CPU-intensive processing,
++	 * as this would lock out garbage collection.  See GCSuspendRequest.
++	 */
++	class GCRequest
++	{
++	public:
++		explicit GCRequest(GCRequestContext *cx) : m_cx(cx)
++		{
++			cx->BeginRequest();
++		}
++
++		~GCRequest()
++		{
++			m_cx->EndRequest();
++		}
++
++	private:
++		// Prohibit copying.
++		GCRequest(const GCRequest &);
++		GCRequest & operator=(const GCRequest &);
++
++		GCRequestContext *m_cx;
++	};
++
++	/**
++	 * Sentry object to suspend a request temporarily. Suspending a request
++	 * allows the GC to work while a thread is performing blocking IO or
++	 * CPU-intensive work.
++	 *
++	 * While a request is suspended, the calling thread's stack is temporarily
++	 * rooted so that if GC does happen, the appropriate stack frames will be
++	 * scanned.
++	 */
++	class GCSuspendRequest
++	{
++	public:
++		explicit GCSuspendRequest(GCRequestContext *cx) : m_cx(cx)
++		{
++			cx->SuspendRequest(&m_state);
++		}
++
++		~GCSuspendRequest()
++		{
++			m_cx->ResumeRequest(&m_state);
++		}
++
++	private:
++		// Prohibit copying.
++		GCSuspendRequest(const GCSuspendRequest &);
++		GCSuspendRequest & operator=(const GCSuspendRequest &);
++
++		GCRequestContext *m_cx;
++		GCSuspendedRequestState m_state;
+ 	};
+ 
+ 	/**
+@@ -475,8 +618,20 @@ namespace MMgc
+ 		friend class GCLargeAlloc;
+ 		friend class RCObject;
+ 		friend class GCInterval;
++		friend class GCRequestContext;
+ 		friend class ZCT;
+ 	public:
++
++#ifdef MMGC_THREADSAFE
++		void AcquireLock();
++		void ReleaseLock();
++
++		void AssertLockHeld()    { GCAssert(statusLock.IsHeld()); }
++		void AssertLockNotHeld() { GCAssert(!statusLock.IsHeld()); }
++#else
++		void AssertLockHeld() {}
++		void AssertLockNotHeld() {}
++#endif // MMGC_THREADSAFE
+ 
+ 		/**
+ 		 * If you need context vars use this!
+@@ -708,7 +863,10 @@ namespace MMgc
+ 		}
+ 
+ 		/**
+-		 * Used by sub-allocators to obtain memory
++		 * Used by sub-allocators to obtain memory.
++		 *
++		 * In MMGC_THREADSAFE builds, the caller must hold the GC-wide
++		 * lock. See GC::AcquireLock().
+ 		 */
+ 		void* AllocBlock(int size, int pageType, bool zero=true);
+ 		void FreeBlock(void *ptr, uint32 size);
+@@ -831,8 +989,17 @@ namespace MMgc
+ 		bool IsRCObject(const void *) { return false; }
+ #endif
+ 
+-
+-		bool Collecting() const { return collecting; }
++		/**
++		 * True during finalization; false during ordinary or incremental
++		 * marking.  See GC::collecting.
++		 */
++		bool Collecting() const
++		{
++#ifdef MMGC_THREADSAFE
++			GC::AutoLock _lock(this);
++#endif
++			return collecting;
++		}
+ 
+ 		bool IsGCMemory (const void *);
+ 
+@@ -932,7 +1099,22 @@ namespace MMgc
+ 		// for external which does thread safe multi-thread AS execution
+ 		bool disableThreadCheck;
+ 
++		/**
++		 * True if incremental marking has started for the current GC cycle.
++		 * As long as this is false, write barriers do not need to do
++		 * anything.
++		 *
++		 * This is true from the time StartIncrementalMark() is called to the
++		 * end of the cycle, that is, until Collect() or
++		 * FinishIncrementalMark() completes successfully.
++		 *
++		 * While `busy` and `collecting` indicate that the GC is actually
++		 * actively doing something, `marking` only indicates that marking has
++		 * started and is true even in between calls to IncrementalMark(),
++		 * when no GC-related code is running.
++		 */
+ 		bool marking;
++
+ 		GCStack<GCWorkItem> m_incrementalWork;
+ 		void StartIncrementalMark();
+ 		void FinishIncrementalMark();
+@@ -1002,6 +1184,22 @@ namespace MMgc
+ 		void Trace(const void *stackStart=NULL, size_t stackSize=0);
+ #endif
+ 
++		/**
++		 * A thread-wrangling wrapper around CollectImpl().
++		 *
++		 * Ordinarily this just calls CollectImpl, but in an MMGC_THREADSAFE
++		 * build this does more work, implementing the GC side of the request
++		 * model, and arranging for GC to restart if multiple threads request
++		 * it.
++		 */
++		void CollectThreadSafe();
++
++		/**
++		 * Actually do garbage collection.  This should only be
++		 * called from CollectThreadSafe().
++		 */
++		void CollectImpl();
++
+ 		void Finalize();
+ 		void Sweep(bool force=false);
+ 		void ForceSweep() { Sweep(true); }
+@@ -1018,9 +1216,31 @@ namespace MMgc
+ 		void TrapWrite(const void *black, const void *white);
+ 
+ 		unsigned int allocsSinceCollect;
+-		// The collecting flag prevents an unwanted recursive collection.
++
++		/**
++		 * True whenever any thread is (a) doing GC; (b) doing incremental
++		 * marking; or (c) waiting for all threads to be ready for GC.
++		 *
++		 * In an MMGC_THREADSAFE build, this field is protected by statusLock.
++		 */
++		bool busy;
++
++		/**
++		 * True when another thread has requested GC while GC or incremental
++		 * marking is already underway.  This indicates that after the current
++		 * GC, a new full GC will happen.  This field is protected by
++		 * statusLock.
++		 *
++		 * In a non-MMGC_THREADSAFE build, this is always false.
++		 */
++		bool restartRequested;
++
++		/**
++		 * True when Sweep() is running.  This is used to determine whether
++		 * newly allocated objects should start out marked.
++		 */
+ 		bool collecting;
+- 
++
+ 		bool finalizedValue;
+ 
+ 		// list of pages to be swept, built up in Finalize
+@@ -1084,6 +1304,107 @@ private:
+ #endif
+ 
+ 		void CheckThread();
++
++#ifdef MMGC_THREADSAFE
++		/**
++		 * The GC-wide lock exposed via AcquireLock() and ReleaseLock().
++		 *
++		 * This lock protects all gc-wide "status" information: GC::busy,
++		 * GC::collecting, GC::marking, GC::requestCount, and
++		 * GCRequestContext::requestDepth (but not ZCT::reaping, yet; DRC is
++		 * not yet thread-safe in any sense). These fields may be read or
++		 * written only by a thread that holds this lock.
++		 *
++		 * A slightly different locking scheme protects the GCAlloc and
++		 * GCLargeAlloc objects and all the GCBlocks and per-object bits. We
++		 * divide code that may touch these objects into two categories:
++		 * application code (running in GCRequests) and collector code. At any
++		 * given time, either one collector thread or multiple application
++		 * threads may be running. That is, there is an invariant that
++		 *
++		 *    !(gc->busy && gc->requestCount > 0)
++		 *
++		 * So the collector may touch allocation-related data freely (because
++		 * it makes sure there's no application code running before it begins)
++		 * but application code must do so only when holding the lock.
++		 *
++		 * This lock does NOT protect m_roots, GCRoot::next, and
++		 * GCRoot::prev. Those are protected by a separate spinlock. To avoid
++		 * deadlock, never lock statusLock and m_rootListLock at the same
++		 * time.
++		 */
++		GCLock statusLock;
++
++		/** This condition is notified when `busy` becomes false. */
++		GCCondition condNotBusy;
++
++		/** This condition is notified when `reqeustCount==0` becomes true. */
++		GCCondition condNoRequests;
++
++		/**
++		 * The number of GCRequestContext objects with active requests.
++		 *
++		 * (Note: GCRequestContext doesn't have any data members that tell
++		 * whether it's active or not. In particular,
++		 * GCRequestContext::requestDepth is no indication; it may be nonzero
++		 * while the request is temporarily inactive because GC is happening.)
++		 */
++		int requestCount;
++
++		/**
++		 * Sentry object that acquires the GC-wide status lock, then releases
++		 * it when it leaves scope.
++		 */
++		class AutoLock
++		{
++		public:
++			explicit AutoLock(GC *gc) : m_gc(gc) { gc->AcquireLock(); }
++			~AutoLock() { m_gc->ReleaseLock(); }
++
++		private:
++			// Prohibit copying
++			AutoLock(const AutoLock &);
++			AutoLock & operator=(const AutoLock &);
++
++			GC *m_gc;
++		};
++
++		/**
++		 * Wait until garbage collection is *not* happening and it is safe for
++		 * code within a request to proceed.
++		 *
++		 * The caller must hold `this->statusLock`.
++		 */
++		inline void WaitForNotBusy()
++		{
++			while (busy)
++				condNotBusy.Wait();
++		}
++
++		/**
++		 * GCRequestContext::BeginRequest calls this to notify the GC that a
++		 * context has entered a request.
++		 *
++		 * Caller must hold `this->statusLock`.
++		 */
++		inline void IncrementRequestCount()
++		{
++			requestCount++;
++		}
++
++		/**
++		 * GCRequestContext::EndRequest calls this to notify the GC that a
++		 * context has left an outermost request.
++		 *
++		 * Caller must hold `this->statusLock`.
++		 */
++		inline void DecrementRequestCount()
++		{
++			GCAssert(requestCount > 0);
++			if (--requestCount == 0)
++				condNoRequests.NotifyAll();
++		}
++#endif // MMGC_THREADSAFE
+ 
+ 		void PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item);
+ 
+diff --git a/MMgc/MMgc.h b/MMgc/MMgc.h
+--- a/MMgc/MMgc.h
++++ b/MMgc/MMgc.h
+@@ -107,6 +107,8 @@
+ #endif
+ #endif
+ 
++#include "GCThreads.h"
++
+ namespace MMgc
+ {
+ 	class GC;
new file mode 100644
--- /dev/null
+++ b/series
@@ -0,0 +1,10 @@
+configure-with-threadsafe-mmgc
+mmgc-rm-isd
+mmgc-threadsafe-take2
+mmgc-graphviz
+sm-features
+mmgc-threadsafe-v1
+DO-NOT-USE
+mmgc-decref-microfix
+mmgc-remove-precollect-hook
+mmgc-bit-checks
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/status
@@ -0,0 +1,1 @@
+8316d35f2b9d61b194d02b6eab63cda1ffad1188:configure-with-threadsafe-mmgc