Preliminarily split mmgc-threadsafe into two parts, mmgc-threadsafe-baseline containing only the changes that are in tamarin-central, and mmgc-threadsafe containing additional actionmonkey-specific stuff on top of that.
authorjorendorff@mozilla.com
Fri, 04 Jan 2008 16:11:56 -0600
changeset 32 5900e6424cbd
parent 31 e76f410f999e
child 33 89e259bfffb2
push id1
push userbsmedberg@mozilla.com
push dateMon, 21 Apr 2008 01:54:18 +0000
Preliminarily split mmgc-threadsafe into two parts, mmgc-threadsafe-baseline containing only the changes that are in tamarin-central, and mmgc-threadsafe containing additional actionmonkey-specific stuff on top of that.
mmgc-threadsafe
mmgc-threadsafe-baseline
series
--- a/mmgc-threadsafe
+++ b/mmgc-threadsafe
@@ -1,898 +1,145 @@
 diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 --- a/MMgc/GC.cpp
 +++ b/MMgc/GC.cpp
-@@ -199,8 +199,16 @@ namespace MMgc
- 	};
- 	const size_t kLargestAlloc = 1968;
- 
-+#ifdef MMGC_THREADSAFE
-+#define USING_CALLBACK_LIST(gc)  GCAcquireSpinlock _cblock((gc)->m_callbackListLock)
-+#define USING_PAGE_MAP()         GCAcquireSpinlock _pmlock(pageMapLock)
-+#else
-+#define USING_CALLBACK_LIST(gc)  ((gc)->CheckThread())
-+#define USING_PAGE_MAP()         ((void) 0)
-+#endif
-+
- 	GC::GC(GCHeap *gcheap)
--		: disableThreadCheck(false),
-+		:
- #ifdef MMGC_DRC
- 		  zct(gcheap),
- #endif
-@@ -256,8 +264,17 @@ namespace MMgc
- 		  heapSizeAtLastAlloc(gcheap->GetTotalHeapSize()),
- 		  finalizedValue(true),
- 		  // Expand, don't collect, until we hit this threshold
--		  collectThreshold(256)
--	{		
-+		  collectThreshold(256),
-+#if MMGC_THREADSAFE
-+		  m_exclusiveGCThread(NULL),
-+		  m_gcRunning(false),
-+		  m_condDone(m_lock),
-+		  m_requestCount(0),
-+		  m_condNoRequests(m_lock)
-+#else
-+		  disableThreadCheck(false)
-+#endif
-+	{
- 		// sanity check for all our types
- 		GCAssert (sizeof(int8) == 1);
- 		GCAssert (sizeof(uint8) == 1);		
-@@ -373,7 +390,9 @@ namespace MMgc
- 
- 		heap->Free(pageMap);
- 
-+#ifndef MMGC_THREADSAFE
- 		CheckThread();
-+#endif
- 
- 		GCAssert(!m_roots);
- 		GCAssert(!m_callbacks);
-@@ -393,20 +412,181 @@ namespace MMgc
- 
- 	void GC::Collect()
+@@ -418,15 +418,6 @@ namespace MMgc
+ 	void GC::CollectWithBookkeeping(bool callerHoldsLock,
+ 									bool callerHasActiveRequest)
  	{
-+		CollectWithBookkeeping(false, false);
-+	}
-+
-+	void GC::CollectWithBookkeeping(bool callerHoldsLock,
-+									bool callerHasActiveRequest)
-+	{
-+#ifdef MMGC_THREADSAFE
-+		GCAssert(callerHoldsLock == m_lock.IsHeld());
-+#ifdef _DEBUG
-+		GCAssert(callerHasActiveRequest == GCThread::GetCurrentThread()->IsInActiveRequest());
-+#endif
-+#else
-+		CheckThread();
-+#endif
-+
- 		// invoke precollect callback
- 		bool vetoed = false;
+-		// invoke precollect callback
+-		bool vetoed = false;
 -		GCCallback *cb = m_callbacks;
 -		while (cb) {
 -			if (!cb->precollect())
 -				vetoed = true;
 -			cb = cb->nextCB;
 -		}
 -
-+		{
-+			USING_CALLBACK_LIST(this);
-+			GCCallback *cb = m_callbacks;
-+			while (cb) {
-+				if (!cb->precollect())
-+					vetoed = true;
-+				cb = cb->nextCB;
-+			}
-+		}
-+
-+#ifdef MMGC_THREADSAFE
-+		if (!callerHoldsLock)
-+			m_lock.Acquire();
-+
-+		if (vetoed || nogc || m_gcRunning || zct.reaping) {
-+			if (!callerHoldsLock)
-+				m_lock.Release();
-+			return;
-+		}
-+#else
- 		if (vetoed || nogc || collecting || zct.reaping) {
- 			return;
- 		}
--
--		ReapZCT();
-+#endif
-+
-+#ifdef MMGC_THREADSAFE
-+		GCThread *thisThread = GCThread::GetCurrentThread();
-+		if (m_exclusiveGCThread != NULL) {
-+			// Someone is already collecting or waiting to collect.
-+			//
-+			// If it's the current thread, then we're being called
-+			// recursively.  This maybe shouldn't happen, but Collect can be
-+			// called indirectly from a finalizer.  Don't recurse, just ignore
-+			// it.
-+			//
-+			// If it's some other thread, wait for that thread to finish.
-+			//
-+			if (m_exclusiveGCThread != thisThread) {
-+				GCRoot *stackRoot;
+ #ifdef MMGC_THREADSAFE
+ 		GCAssert(callerHoldsLock == m_lock.IsHeld());
+ #ifdef _DEBUG
+@@ -467,6 +458,14 @@ namespace MMgc
+ 			//
+ 			if (m_exclusiveGCThread != thisThread) {
+ 				GCRoot *stackRoot;
 +				void *stack;
 +				size_t stackSize;
 +
 +				// Call this macro here, not under "if
 +				// (callerHasActiveRequest)", because it introduces local
 +				// variables that must survive for the whole lifetime of
 +				// stackRoot.
 +				MMGC_GET_STACK_EXTENTS(this, stack, stackSize);
-+
-+				// Before waiting, this thread must suspend any active
-+				// requests.  This avoids a deadlock where m_exclusiveGCThread
-+				// is waiting for m_requestCount to go to zero while we're
-+				// waiting for m_exclusiveGCThread to be done.
-+				//
-+				// When we do this, we root the calling thread's stack; this
-+				// way each collection scans the stack of every thread that is
-+				// in a suspended request.
-+				//
-+				if (callerHasActiveRequest) {
-+					stackRoot = new GCRoot(this, stack, stackSize);
-+
-+					OnLeaveRequestAlreadyLocked();
-+				}
-+
-+				while (m_exclusiveGCThread != NULL)
-+					m_condDone.Wait();
-+
-+				if (callerHasActiveRequest) {
-+					// Resume previously suspended request.
-+					delete stackRoot;
-+					OnEnterRequestAlreadyLocked();
-+				}
-+			}
-+
-+			if (!callerHoldsLock)
-+				m_lock.Release();
-+			return;
-+		}
-+
-+		// If we get here, this thread will collect.
-+		m_exclusiveGCThread = thisThread;
-+		if (callerHasActiveRequest) {
-+			// We must suspend any active requests on this thread.
-+			OnLeaveRequestAlreadyLocked();
-+		}
-+		// Wait for other threads to suspend or end their active requests,
-+		// if any.
-+		while (m_requestCount > 0)
-+			m_condNoRequests.Wait();
-+
-+		// These should not have changed while we were waiting, even though we
-+		// released the lock.
-+		GCAssert(m_requestCount == 0);
-+		GCAssert(m_exclusiveGCThread == thisThread);
-+
-+		// This thread has acquired exclusiveGC.
-+		m_gcRunning = true;
-+#endif
-+
-+		{
-+			USING_CALLBACK_LIST(this);
-+			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
-+				cb->enterExclusiveGC();
-+		}
-+
-+#ifdef MMGC_THREADSAFE
-+		m_lock.Release();
-+#endif
-+
-+		{
-+			USING_CALLBACK_LIST(this);
-+			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
-+				cb->enterExclusiveGCNoLock();
-+		}
-+
-+		CollectImpl();
-+
-+#ifdef MMGC_THREADSAFE
-+		m_lock.Acquire();
-+
-+		// These should not have changed during collection,
-+		// even though we released the lock.
-+		GCAssert(m_requestCount == 0);
-+		GCAssert(m_exclusiveGCThread == thisThread);
-+#endif
-+
-+		{
-+			USING_CALLBACK_LIST(this);
-+			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
-+				cb->leaveExclusiveGC();
-+		}
-+
-+#ifdef MMGC_THREADSAFE
-+		// This thread is relinquishing exclusiveGC.
-+		m_gcRunning = false;
-+
-+		m_exclusiveGCThread = NULL;
-+		if (callerHasActiveRequest)
-+			OnEnterRequestAlreadyLocked();
-+
-+		m_condDone.NotifyAll();
-+
-+		if (!callerHoldsLock)
-+			m_lock.Release();
-+#endif
+ 
+ 				// Before waiting, this thread must suspend any active
+ 				// requests.  This avoids a deadlock where m_exclusiveGCThread
+@@ -478,10 +477,6 @@ namespace MMgc
+ 				// in a suspended request.
+ 				//
+ 				if (callerHasActiveRequest) {
+-					void *stack;
+-					size_t stackSize;
+-
+-					MMGC_GET_STACK_EXENTS(this, stack, stackSize);
+ 					stackRoot = new GCRoot(this, stack, stackSize);
+ 
+ 					OnLeaveRequestAlreadyLocked();
+@@ -568,6 +563,12 @@ namespace MMgc
+ 		if (!callerHoldsLock)
+ 			m_lock.Release();
+ #endif
 +
 +		{
 +			USING_CALLBACK_LIST(this);
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->postcollect();
 +		}
-+	}
-+
-+	void GC::CollectImpl()
-+	{
-+#ifndef MMGC_THREADSAFE
-+ 		ReapZCT();
-+#endif
-+
- 		if(!marking)
- 			StartIncrementalMark();
- 		if(marking)
-@@ -418,13 +598,14 @@ namespace MMgc
- 		// reachable by the conservative stack walk
- 		//DumpStackTrace();
- 		FindUnmarkedPointers();
--		CheckThread();
- #endif
  	}
  
- #ifdef _DEBUG
- 	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		SAMPLE_FRAME("[mark]", core());
- 
- 		// Clear all mark bits.
-@@ -448,8 +629,11 @@ namespace MMgc
+ 	void GC::CollectImpl()
+@@ -618,8 +619,11 @@ namespace MMgc
  		SAMPLE_CHECK();
  
  		// invoke lastmark on all callbacks
 -		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 -			cb->lastmark(work);
 +		{
 +			USING_CALLBACK_LIST(this);
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->lastmark(work);
 +		}
  
  		if(stackStart == NULL) {
  			MarkQueueAndStack(work);
-@@ -465,6 +649,20 @@ namespace MMgc
+@@ -646,6 +650,9 @@ namespace MMgc
  
- 	void *GC::Alloc(size_t size, int flags/*0*/, int skip/*3*/)
+ 	void *GC::AllocAlreadyLocked(size_t size, int flags/*0*/, int skip/*3*/)
  	{
 +#ifdef MMGC_THREADSAFE
-+		GCAutoLock _lock(m_lock);
-+		GCAssert(!m_gcRunning);
-+#else
-+		CheckThread();
-+#endif
-+		return AllocAlreadyLocked(size, flags, skip);
-+	}
-+
-+	void *GC::AllocAlreadyLocked(size_t size, int flags/*0*/, int skip/*3*/)
-+	{
-+#ifdef MMGC_THREADSAFE
 +		GCAssert(GCThread::GetCurrentThread()->IsInActiveRequest());
 +#endif
  #ifdef DEBUGGER
  		avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
  		if(core)
-@@ -476,9 +674,8 @@ namespace MMgc
- 
- #ifdef _DEBUG
- 		GCAssert(size + 7 > size);
--		CheckThread();
- 		if (GC::greedy) {
--			Collect();
-+			CollectWithBookkeeping(true, true);
- 		}
- 		// always be marking in pedantic mode
- 		if(incrementalValidationPedantic) {
-@@ -561,15 +758,21 @@ namespace MMgc
- 
- 	void GC::Free(void *item)
- 	{
-+#ifdef MMGC_THREADSAFE
-+		GCAutoLock _lock(m_lock);
-+		GCAssert(!m_gcRunning);
-+#else
- 		CheckThread();
-+#endif
-+
-+		if(item == NULL) {
-+			return;
-+		}
-+
-+		bool isLarge;
-+
- 		// we can't allow free'ing something during Sweeping, otherwise alloc counters
- 		// get decremented twice and destructors will be called twice.
--		if(item == NULL) {
--			return;
--		}
--
--		bool isLarge;
--
- 		if(collecting) {
- 			goto bail;
- 		}
-@@ -618,6 +821,8 @@ bail:
- 
- 	void GC::ClearMarks()
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		for (int i=0; i < kNumSizeClasses; i++) {
- #ifdef MMGC_DRC
- 			containsPointersRCAllocs[i]->ClearMarks();
-@@ -630,6 +835,8 @@ bail:
- 
- 	void GC::Finalize()
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		for(int i=kNumSizeClasses; i-- > 0;) {
- #ifdef MMGC_DRC
- 			containsPointersRCAllocs[i]->Finalize();
-@@ -652,6 +859,8 @@ bail:
- 
- 	void GC::Sweep(bool force)
- 	{	
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		// collecting must be true because it indicates allocations should
- 		// start out marked, we can't rely on write barriers below since 
- 		// presweep could write a new GC object to a root
-@@ -671,10 +880,13 @@ bail:
- 		
- 
- 		// invoke presweep on all callbacks
--		GCCallback *cb = m_callbacks;
--		while(cb) {
--			cb->presweep();
--			cb = cb->nextCB;
-+		{
-+			USING_CALLBACK_LIST(this);
-+			GCCallback *cb = m_callbacks;
-+			while(cb) {
-+				cb->presweep();
-+				cb = cb->nextCB;
-+			}
- 		}
- 
- 		SAMPLE_CHECK();
-@@ -735,12 +947,15 @@ bail:
- 		collecting = false;
- 
- 		// invoke postsweep callback
--		cb = m_callbacks;
--		while(cb) {
--			cb->postsweep();
--			cb = cb->nextCB;
--		}
--		
-+		{
-+			USING_CALLBACK_LIST(this);
-+			GCCallback *cb = m_callbacks;
-+			while(cb) {
-+				cb->postsweep();
-+				cb = cb->nextCB;
-+			}
-+		}
-+
- 		SAMPLE_CHECK();
- 
- 		allocsSinceCollect = 0;
-@@ -777,6 +992,7 @@ bail:
- 
- 	void* GC::AllocBlock(int size, int pageType, bool zero)
- 	{
-+		MMGC_ASSERT_GC_LOCK(this);
- #ifdef DEBUGGER
- 		AllocActivity(size);
- #endif
-@@ -794,7 +1010,7 @@ bail:
- 			if(incremental)
- 				StartIncrementalMark();
- 			else
--				Collect();
-+				CollectWithBookkeeping(true, true);
- 		}
- 
- 		void *item;
-@@ -835,6 +1051,8 @@ bail:
- 
- 	void* GC::AllocBlockIncremental(int size, bool zero)
- 	{
-+		MMGC_ASSERT_GC_LOCK(this);
-+
- 		if(!nogc && !collecting) {
- 			uint64 now = GetPerformanceCounter();
- 			if (marking) {		
-@@ -862,12 +1080,14 @@ bail:
- 
- 	void* GC::AllocBlockNonIncremental(int size, bool zero)
- 	{
-+		MMGC_ASSERT_GC_LOCK(this);
-+
- 		void *item = heap->Alloc(size, false, zero);
- 		if (!item) {
- 			if (heap->GetTotalHeapSize() >= collectThreshold &&
- 				allocsSinceCollect >= totalGCPages / kFreeSpaceDivisor) 
- 			{
--				Collect();
-+				CollectWithBookkeeping(true, true);
- 				item = heap->Alloc(size, false, zero);
- 			}
- 		}
-@@ -876,6 +1096,11 @@ bail:
- 
- 	void GC::FreeBlock(void *ptr, uint32 size)
- 	{
-+#ifdef MMGC_THREADSAFE
-+		GCAssert(m_lock.IsHeld() || destroying
-+				 || (m_gcRunning
-+					 && m_exclusiveGCThread == GCThread::GetCurrentThread()));
-+#endif
- #ifdef DEBUGGER
- 		AllocActivity(- (int)size);
- #endif
-@@ -903,6 +1128,7 @@ bail:
- 
- 	void GC::Mark(GCStack<GCWorkItem> &work)
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
- 		while(work.Count()) {
- 			MarkItem(work);
- 		}
-@@ -910,6 +1136,7 @@ bail:
- 
- 	void GC::MarkGCPages(void *item, uint32 numPages, int to)
- 	{
-+		USING_PAGE_MAP();
- 		uintptr addr = (uintptr)item;
- 		uint32 shiftAmount=0;
- 		unsigned char *dst = pageMap;
-@@ -952,7 +1179,7 @@ bail:
- 		addr = (uintptr)item;
- 		while(numPages--)
- 		{
--			GCAssert(GetPageMapValue(addr) == 0);
-+			GCAssert(GetPageMapValueAlreadyLocked(addr) == 0);
- 			SetPageMapValue(addr, to);
- 			addr += GCHeap::kBlockSize;
- 		}
-@@ -961,6 +1188,8 @@ bail:
- 	void GC::UnmarkGCPages(void *item, uint32 numpages)
- 	{
- 		uintptr addr = (uintptr) item;
-+
-+		USING_PAGE_MAP();
- 		while(numpages--)
- 		{
- 			ClearPageMapValue(addr);
-@@ -1126,6 +1355,7 @@ bail:
- 		gc->Mark(work);
- 	}
- 
-+#ifndef MMGC_THREADSAFE
- 	void GC::CheckThread()
- 	{
- #ifdef _DEBUG
-@@ -1134,12 +1364,13 @@ bail:
- #endif
- #endif
- 	}
--
-+#endif
- 
- 	bool GC::IsPointerToGCPage(const void *item)
- 	{
-+		USING_PAGE_MAP();
- 		if((uintptr)item >= memStart && (uintptr)item < memEnd)
--			return GetPageMapValue((uintptr) item) != 0;
-+			return GetPageMapValueAlreadyLocked((uintptr) item) != 0;
- 		return false;
- 	}
- 
-@@ -1180,10 +1411,13 @@ bail:
- 		// note if we add things while reaping we could delete the object
- 		// here if we had a way to monitor our stack usage
- 		if(reaping && PLENTY_OF_STACK()) {
--			GCCallback *cb = gc->m_callbacks;
--			while(cb) {
--				cb->prereap(obj);
--				cb = cb->nextCB;
-+			{
-+				USING_CALLBACK_LIST(gc);
-+				GCCallback *cb = gc->m_callbacks;
-+				while(cb) {
-+					cb->prereap(obj);
-+					cb = cb->nextCB;
-+				}
- 			}
- 			if(gc->IsFinalized(obj))
- 				((GCFinalizable*)obj)->~GCFinalizable();
-@@ -1378,10 +1612,13 @@ bail:
- 		nextPinnedIndex = 0;
- 
- 		// invoke prereap on all callbacks
--		GCCallback *cb = gc->m_callbacks;
--		while(cb) {
--			cb->prereap();
--			cb = cb->nextCB;
-+		{
-+			USING_CALLBACK_LIST(gc);
-+			GCCallback *cb = gc->m_callbacks;
-+			while(cb) {
-+				cb->prereap();
-+				cb = cb->nextCB;
-+			}
- 		}
- 
- #ifdef _DEBUG
-@@ -1422,10 +1659,13 @@ bail:
- 				}
- #endif
- 				// invoke prereap on all callbacks
--				GCCallback *cbb = gc->m_callbacks;
--				while(cbb) {
--					cbb->prereap(rcobj);
--					cbb = cbb->nextCB;
-+				{
-+					USING_CALLBACK_LIST(gc);
-+					GCCallback *cbb = gc->m_callbacks;
-+					while(cbb) {
-+						cbb->prereap(rcobj);
-+						cbb = cbb->nextCB;
-+					}
- 				}
- 
- 				GCAssert(*(int*)rcobj != 0);
-@@ -1459,10 +1699,13 @@ bail:
- 		zctIndex = nextPinnedIndex = 0;
- 
- 		// invoke postreap on all callbacks
--		cb = gc->m_callbacks;
--		while(cb) {
--			cb->postreap();
--			cb = cb->nextCB;
-+		{
-+			USING_CALLBACK_LIST(gc);
-+			GCCallback *cb = gc->m_callbacks;
-+			while(cb) {
-+				cb->postreap();
-+				cb = cb->nextCB;
-+			}
- 		}
- #ifdef DEBUGGER
- 		if(gc->gcstats && numObjects) {
-@@ -1615,7 +1858,8 @@ bail:
- 		va_end(argptr);
- 
- 		GCAssert(strlen(buf) < 4096);
--			
-+
-+		USING_CALLBACK_LIST(this);
- 		GCCallback *cb = m_callbacks;
- 		while(cb) {
- 			cb->log(buf);
-@@ -1667,23 +1911,27 @@ bail:
- 
- 	bool GC::IsRCObject(const void *item)
- 	{
--		if((uintptr)item >= memStart && (uintptr)item < memEnd && ((uintptr)item&0xfff) != 0)
--		{
--			int bits = GetPageMapValue((uintptr)item);		
--			item = GetRealPointer(item);
--			switch(bits)
--			{
--			case kGCAllocPage:
--				if((char*)item < ((GCAlloc::GCBlock*)((uintptr)item&~0xfff))->items)
--					return false;
--				return GCAlloc::IsRCObject(item);
--			case kGCLargeAllocPageFirst:
--				return GCLargeAlloc::IsRCObject(item);
--			default:
-+		int bits;
-+		{
-+			USING_PAGE_MAP();
-+
-+			if ((uintptr)item < memStart || (uintptr)item >= memEnd || ((uintptr)item&0xfff) == 0)
- 				return false;
--			}
--		}
--		return false;
-+			bits = GetPageMapValueAlreadyLocked((uintptr)item);
-+		}
-+
-+		item = GetRealPointer(item);
-+		switch(bits)
-+		{
-+		case kGCAllocPage:
-+			if((char*)item < ((GCAlloc::GCBlock*)((uintptr)item&~0xfff))->items)
-+				return false;
-+			return GCAlloc::IsRCObject(item);
-+		case kGCLargeAllocPageFirst:
-+			return GCLargeAlloc::IsRCObject(item);
-+		default:
-+			return false;
-+		}
- 	}
- 
- #endif // MMGC_DRC
-@@ -1769,7 +2017,7 @@ bail:
- 				continue;
- 			
- 			// normalize and divide by 4K to get index
--			int bits = GetPageMapValue(val);
-+			int bits = GetPageMapValueAlreadyLocked(val);
- 			switch(bits)
- 			{
- 			case 0:
-@@ -1790,6 +2038,8 @@ bail:
- 
- 	void GC::FindUnmarkedPointers()
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		if(findUnmarkedPointers)
- 		{
- 			uintptr m = memStart;
-@@ -1928,6 +2178,11 @@ bail:
- 	 */
- 	void GC::WhosPointingAtMe(void* me, int recurseDepth, int currentDepth)
- 	{
-+#ifdef MMGC_THREADSAFE
-+		if (currentDepth == 0)
-+			pageMapLock.Acquire();
-+#endif
-+
- 		uintptr val = (uintptr)me;
- 		uintptr m = memStart;
- 
-@@ -1950,7 +2205,7 @@ bail:
- #endif
- 
- 			// divide by 4K to get index
--			int bits = GetPageMapValue(m);
-+			int bits = GetPageMapValueAlreadyLocked(m);
- 			if(bits == kNonGC) 
- 			{
- 				ProbeForMatch((const void*)m, GCHeap::kBlockSize, val, recurseDepth, currentDepth);
-@@ -1994,6 +2249,11 @@ bail:
- 			
- 			}
- 		}
-+
-+#ifdef MMGC_THREADSAFE
-+		if (currentDepth == 0)
-+			pageMapLock.Release();
-+#endif
- 	}
- #undef ALLOCA_AND_FILL_WITH_SPACES
- #endif
-@@ -2395,9 +2655,11 @@ bail:
- 		return 1000000;
- 		#endif
- 	}
--	
-+
- 	void GC::IncrementalMark(uint32 time)
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		SAMPLE_FRAME("[mark]", core());
- 		if(m_incrementalWork.Count() == 0 || hitZeroObjects) {
- 			FinishIncrementalMark();
-@@ -2452,6 +2714,8 @@ bail:
- 
- 	void GC::FinishIncrementalMark()
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		// Don't finish an incremental mark (i.e., sweep) if we
- 		// are in the midst of a ZCT reap.
- 		if (zct.reaping)
-@@ -2483,8 +2747,11 @@ bail:
+@@ -2730,8 +2737,11 @@ bail:
  		}
  
  		// invoke lastmark on all callbacks
 -		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 -			cb->lastmark(m_incrementalWork);
 +		{
 +			USING_CALLBACK_LIST(this);
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->lastmark(m_incrementalWork);
 +		}
  
  		MarkQueueAndStack(m_incrementalWork);
  
-@@ -2635,6 +2902,7 @@ bail:
- 	{
- 		uint32 *bits;
- 
-+		MMGC_ASSERT_GC_LOCK(this);
- 		GCAssert(numBytes % 4 == 0);
- 
- 		#ifdef MMGC_64BIT // we use first 8-byte slot for the free list
-@@ -2706,7 +2974,8 @@ bail:
- 
- 	void GC::AddCallback(GCCallback *cb)
- 	{
--		CheckThread();
-+		USING_CALLBACK_LIST(this);
-+
- 		cb->prevCB = NULL;
- 		cb->nextCB = m_callbacks;
- 		if(m_callbacks)
-@@ -2716,7 +2985,8 @@ bail:
- 
- 	void GC::RemoveCallback(GCCallback *cb)
- 	{
--		CheckThread();
-+		USING_CALLBACK_LIST(this);
-+
- 		if( m_callbacks == cb )
- 			m_callbacks = cb->nextCB;
- 		else
-@@ -2728,7 +2998,8 @@ bail:
+@@ -2978,7 +2988,8 @@ bail:
  
  	void GC::AddEdgeCallback(GCEdgeCallback *cb)
  	{
 -		CheckThread();
 +		USING_CALLBACK_LIST(this);
 +
  		cb->prevCB = NULL;
  		cb->nextCB = m_edgeCallbacks;
  		if(m_edgeCallbacks)
-@@ -2738,7 +3009,8 @@ bail:
+@@ -2988,7 +2999,8 @@ bail:
  
  	void GC::RemoveEdgeCallback(GCEdgeCallback *cb)
  	{
 -		CheckThread();
 +		USING_CALLBACK_LIST(this);
 +
  		if( m_edgeCallbacks == cb )
  			m_edgeCallbacks = cb->nextCB;
  		else
-@@ -2750,12 +3022,12 @@ bail:
+@@ -3000,12 +3012,12 @@ bail:
  
  	void GC::FireFoundEdgeTo(const void *p)
  	{
 +		// Don't acquire the spinlock here because (a) that would really hurt
 +		// performance; (b) the m_edgeCallbacks list, unlike the m_callbacks
 +		// list, is protected by the request model.
  		p = GetUserPointer(p);
 -		GCEdgeCallback *cb = m_edgeCallbacks;
 -		while(cb) {
 +		for (GCEdgeCallback *cb = m_edgeCallbacks; cb; cb = cb->nextCB)
  			cb->foundEdgeTo(p);
 -			cb = cb->nextCB;
 -		}
  	}
  
  	void GC::PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item)
-@@ -2768,7 +3040,11 @@ bail:
- 	GCWeakRef* GC::GetWeakRef(const void *item) 
- 	{
- 		GC *gc = GetGC(item);
-+#ifdef MMGC_THREADSAFE
-+		GCAutoLock _lock(gc->m_lock);
-+#endif
- 		GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(item);
-+
- 		if(ref == NULL) {
- 			ref = new (gc) GCWeakRef(item);
- 			gc->weakRefs.put(item, ref);
-@@ -2830,6 +3106,8 @@ bail:
- 
- 	void GC::FindMissingWriteBarriers()
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(this);
-+
- 		if(!incrementalValidation)
- 			return;
- 
-@@ -2891,6 +3169,7 @@ bail:
- 	void GC::StartGCActivity()
- 	{
- 		// invoke postsweep callback
-+		USING_CALLBACK_LIST(this);
- 		GCCallback *cb = m_callbacks;
- 		while(cb) {
- 			cb->startGCActivity();
-@@ -2901,6 +3180,7 @@ bail:
- 	void GC::StopGCActivity()
- 	{
- 		// invoke postsweep callback
-+		USING_CALLBACK_LIST(this);
- 		GCCallback *cb = m_callbacks;
- 		while(cb) {
- 			cb->stopGCActivity();
-@@ -2911,6 +3191,7 @@ bail:
- 	void GC::AllocActivity(int blocks)
- 	{
- 		// invoke postsweep callback
-+		USING_CALLBACK_LIST(this);
- 		GCCallback *cb = m_callbacks;
- 		while(cb) {
- 			cb->allocActivity(blocks);
 diff --git a/MMgc/GC.h b/MMgc/GC.h
 --- a/MMgc/GC.h
 +++ b/MMgc/GC.h
-@@ -144,23 +144,33 @@
- #define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size) \
- 		int regs[7];\
- 		asm("stmia %0,{r4-r10}" : : "r" (regs));\
--		asm("mov %0,sp" : "=r" (_stack));\
--		_size = (uintptr)StackTop - (uintptr)_stack;
-+               asm("mov %0,sp" : "=r" (_stack));\
-+               _size = (uintptr)StackTop - (uintptr)_stack;
- 
- #elif defined MMGC_SPARC
- #define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size) \
--		asm("ta 3");\
--		_stack = (void *) _getsp();\
--		_size = (uintptr)_gc->GetStackTop() - (uintptr)_stack;
-+               asm("ta 3");\
-+               _stack = (void *) _getsp();\
-+               _size = (uintptr)_gc->GetStackTop() - (uintptr)_stack;
-+#endif
-+
-+#ifdef MMGC_THREADSAFE
-+#define MMGC_ASSERT_GC_LOCK(gc)  GCAssert((gc)->m_lock.IsHeld() || (gc)->destroying)
-+#define MMGC_ASSERT_EXCLUSIVE_GC(gc) \
-+    GCAssert(((gc)->m_gcRunning \
-+                         && (gc)->m_exclusiveGCThread == GCThread::GetCurrentThread()) \
-+                        || (gc)->destroying)
-+#else
-+#define MMGC_ASSERT_GC_LOCK(gc)      ((void) 0)
-+#define MMGC_ASSERT_EXCLUSIVE_GC(gc) ((void) 0)
- #endif
- 
- #ifdef DEBUGGER
- namespace avmplus
- {
--	class AvmCore;
-+       class AvmCore;
- }
- #endif
--
- namespace MMgc
- {
- 	/**
-@@ -219,7 +229,10 @@ namespace MMgc
- 	private:
- 		FixedMalloc* fm;
- 		GC * gc;
-+
-+		/** @access Requires(gc->m_rootListLock) */
- 		GCRoot *next;
-+		/** @access Requires(gc->m_rootListLock) */
- 		GCRoot *prev;
- 		const void *object;
- 		size_t size;
-@@ -244,16 +257,25 @@ namespace MMgc
+@@ -258,16 +258,25 @@ namespace MMgc
  		void Destroy();
  
  		/**
 -		 * This method is called from GC::Collect(), before
 -		 * anything else, even in cases where it's logically
 -		 * certain that collection <em>will not start</em>
 -		 * (e.g. because we're already collecting, or GC is
 +		 * GC::Collect() fires this callback before anything else, even in
@@ -912,86 +159,26 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		/**
 +		 * GC::Collect() fires this callback after everything else,
 +		 * only if collection actually happened in this thread.
 +		 */
 +		virtual void postcollect() {}
  
  		/**
  		 * This method is invoked during the last mark phase
-@@ -265,6 +287,8 @@ namespace MMgc
+@@ -279,6 +288,8 @@ namespace MMgc
  		 * provided by the GCCallback base class.  The
  		 * corresponding methods of <code>class GC</code> are
  		 * private.)
 +		 *
 +		 * @access Requires(gc->exclusiveGC)
  		 */
  		virtual void lastmark(GCStack<GCWorkItem> &) {}
  
-@@ -272,11 +296,15 @@ namespace MMgc
- 		 * This method is invoked after all marking and before any
- 		 * sweeping, useful for bookkeeping based on whether things
- 		 * got marked
-+		 *
-+		 * @access Requires(gc->exclusiveGC)
- 		 */
- 		virtual void presweep() {}
- 
- 		/**
- 		 * This method is invoked after all sweeping
-+		 *
-+		 * @access Requires(gc->exclusiveGC)
- 		 */
- 		virtual void postsweep() {}
- 
-@@ -292,6 +320,43 @@ namespace MMgc
- 		// called after a ZCT reap completes
- 		virtual void postreap() {}
- #endif
-+
-+		/**
-+		 * This callback is the first thing a collecting thread does once it
-+		 * acquires exclusiveGC status.  When this is called, no other threads
-+		 * are running in requests and the calling thread holds gc->m_lock.
-+		 *
-+		 * Although this makes the most sense in an MMGC_THREADSAFE build, the
-+		 * callback is nonetheless called at the same point during collection
-+		 * even in non-MMGC_THREADSAFE builds.  This is so programs that build
-+		 * in both thread-safe and non-thread-safe configurations will see the
-+		 * same behavior in both.  The same idea applies to
-+		 * enterExclusiveGCNoLock and leaveExclusiveGC.
-+		 *
-+		 * @access Requires(gc->exclusiveGC && gc->m_lock)
-+		 */
-+		virtual void enterExclusiveGC() {}
-+
-+		/**
-+		 * This callback happens after enterExclusiveGC.  The only difference
-+		 * is that this is called after the GC has released gc->m_lock.
-+		 *
-+		 * See note at GCCallback::enterExclusiveGC().
-+		 *
-+		 * @access Requires(gc->exclusiveGC && !gc->m_lock)
-+		 */
-+		virtual void enterExclusiveGCNoLock() {}
-+
-+		/**
-+		 * This callback is the last thing a collecting thread does before it
-+		 * relinquishes exclusiveGC status, allowing other threads to run in
-+		 * requests.
-+		 *
-+		 * See note at GCCallback::enterExclusiveGC().
-+		 *
-+		 * @access Requires(gc->exclusiveGC && gc->m_lock)
-+		 */
-+		virtual void leaveExclusiveGC() {}
- 
- 		/**
- 		 * This method is called before an RC object is reaped
-@@ -336,6 +401,17 @@ namespace MMgc
+@@ -397,6 +408,17 @@ namespace MMgc
  	{
  		friend class GC;
  	public:
 +		/**
 +		 * Create an edge callback.
 +		 *
 +		 * If `gc` is non-null, this constructor adds this GCEdgeCallback to a
 +		 * list of callbacks managed by `gc`.  This means the new object
@@ -999,644 +186,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * constructed yet, potentially resulting in "Pure virtual method
 +		 * called" or some other crash.  To avoid this race condition, call
 +		 * this constructor with a non-null `gc` only from a thread that is in
 +		 * a request on `gc`.
 +		 */
  		GCEdgeCallback(GC *gc = NULL);
  		virtual ~GCEdgeCallback();
  
-@@ -533,12 +609,19 @@ namespace MMgc
- 		 * will cause a garbage collection.  This makes code run
- 		 * abysmally slow, but can be useful for detecting mark
- 		 * bugs.
-+		 *
-+		 * The GC reads this flag only when holding the GC lock.  It is best
-+		 * to set it as soon as the GC is created.
-+		 *
-+		 * @access Requires(m_lock)
- 		 */
- 		bool greedy;
- 
- 		/**
- 		 * nogc is a debugging flag.  When set, garbage collection
- 		 * never happens.
-+		 *
-+		 * @access Requires(m_lock)
- 		 */
- 		bool nogc;
- 
-@@ -554,8 +637,28 @@ namespace MMgc
- 		bool validateDefRef;		
- 		bool keepDRCHistory;
- 
-+		/**
-+		 * Expand, don't collect, until we reach this threshold. Units are
-+		 * pages, not KB, just like GCHeap::GetTotalHeapSize().
-+		 *
-+		 * In an MMGC_THREADSAFE build, the GC reads this configuration value
-+		 * only when holding the GC lock.  Set it during
-+		 * initialization, before the GC is visible to multiple threads.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
- 		size_t collectThreshold;
- 
-+		/**
-+		 * If true, log some statistics during each collection.
-+		 * The output goes to gclog().
-+		 *
-+		 * In an MMGC_THREADSAFE build, the GC reads this flag only during
-+		 * stop-the-world collection.  Set it during initialization, before
-+		 * the GC is visible to multiple threads.
-+		 *
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		bool gcstats;
- 
- 		bool dontAddToZCTDuringCollection;
-@@ -565,6 +668,14 @@ namespace MMgc
- 		bool incrementalValidationPedantic;
- #endif
- 
-+		/**
-+		 * Configuration flag enabling incremental collection.
-+		 *
-+		 * In an MMGC_THREADSAFE build, the GC reads this flag only when
-+		 * holding the GC lock.  Set it during initialization.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
- 		bool incremental;
- 
- 		// -- Interface
-@@ -572,7 +683,13 @@ namespace MMgc
- 		virtual ~GC();
- 		
- 		/**
--		 * Causes an immediate garbage collection
-+		 * Causes an immediate garbage collection.
-+		 *
-+		 * In an MMGC_THREADSAFE build, the caller must not be inside a
-+		 * request.  If the caller is inside a request, call
-+		 * CollectFromRequest() instead.
-+		 *
-+		 * @access Requires(!m_lock && !request)
- 		 */
- 		void Collect();
- 
-@@ -597,15 +714,26 @@ namespace MMgc
- 
- 		/**
- 		 * Main interface for allocating memory.  Default flags is no
--		 * finalization, contains pointers is set and zero is set
-+		 * finalization, contains pointers is set and zero is set.
-+		 *
-+		 * Do not call this from a finalizer.
-+		 *
-+		 * @access Requires(request)
- 		 */
- 		void *Alloc(size_t size, int flags=0, int skip=3);
-+
-+		/** @access Requires(request && m_lock) */
-+		void *AllocAlreadyLocked(size_t size, int flags=0, int skip=3);
- 
- 		
- 		/**
- 		 * overflow checking way to call Alloc for a # of n size'd items,
- 		 * all instance of Alloc(num*sizeof(thing)) should be replaced with:
- 		 * Calloc(num, sizeof(thing))
-+		 *
-+		 * Do not call this from a finalizer.
-+		 *
-+		 * @access Requires(request)
- 		 */
- 		void *Calloc(size_t num, size_t elsize, int flags=0, int skip=3);
- 
-@@ -613,11 +741,15 @@ namespace MMgc
- 		 * One can free a GC allocated pointer, this will throw an assertion
- 		 * if called during the Sweep phase (ie via a finalizer) it can only be
- 		 * used outside the scope of a collection
-+		 *
-+		 * @access Requires(request)
- 		 */
- 		void Free(void *ptr);
- 
- 		/**
- 		 * return the size of a piece of memory, may be bigger than what was asked for
-+		 *
-+		 * @access Requires(request || exclusiveGC)
- 		 */
- 		static size_t Size(const void *ptr)
- 		{
-@@ -631,6 +763,8 @@ namespace MMgc
- 		/**
- 		 * Tracers should employ GetMark and SetMark to
- 		 * set the mark bits during the mark pass.
-+		 *
-+		 * @access Requires(request || exclusiveGC)
- 		 */
- 		static int GetMark(const void *item)
- 		{
-@@ -737,9 +871,13 @@ namespace MMgc
- 		}
- 
- 		/**
--		 * Used by sub-allocators to obtain memory
-+		 * Used by sub-allocators to obtain memory.
-+		 *
-+		 * @access Requires(m_lock)
- 		 */
- 		void* AllocBlock(int size, int pageType, bool zero=true);
-+
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		void FreeBlock(void *ptr, uint32 size);
- 
- 		GCHeap *GetGCHeap() const { return heap; }
-@@ -767,11 +905,19 @@ namespace MMgc
- 		 */
- 		bool IncrementalMarking() { return marking; }
- 
--		// a magical write barriers that finds the container's address and the GC, just
--		// make sure address is a pointer to a GC page, only used by WB smart pointers
-+		/**
-+		 * A magical write barrier that finds the container's address and the
-+		 * GC, just make sure @a address is a pointer to a GC page. Only used
-+		 * by WB smart pointers.
-+		 *
-+		 * @access Requires(request)
-+		 */
- 		static void WriteBarrier(const void *address, const void *value);
-+
-+		/** @access Requires(request) */
- 		static void WriteBarrierNoSub(const void *address, const void *value);
- 
-+		/** @access Requires(request) */
- 		void writeBarrier(const void *container, const void *address, const void *value)
- 		{
- 			GCAssert(IsPointerToGCPage(container));
-@@ -783,13 +929,19 @@ namespace MMgc
- 			WriteBarrierWrite(address, value);
- 		}
- 
--		// optimized version with no RC checks or pointer masking
-+		/**
-+		 * optimized version with no RC checks or pointer masking
-+		 *
-+		 * @access Requires(request)
-+		 */
- 		void writeBarrierRC(const void *container, const void *address, const void *value);
- 
- 		/**
--		Write barrier when the value could be a pointer with anything in the lower 3 bits
--		FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
--		this would require the application to tell us what bit patterns are pointers.
-+		 * Write barrier when the value could be a pointer with anything in the lower 3 bits
-+		 * FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
-+		 * this would require the application to tell us what bit patterns are pointers.
-+		 *
-+		 * @access Requires(request)
- 		 */
- 		__forceinline void WriteBarrierNoSubstitute(const void *container, const void *value)
- 		{
-@@ -797,9 +949,11 @@ namespace MMgc
- 		}
- 			
- 		/**
--		 AVM+ write barrier, valuePtr is known to be pointer and the caller
--		does the write
--		*/
-+		 * AVM+ write barrier, valuePtr is known to be pointer and the caller
-+		 * does the write.
-+		 *
-+		 * @access Requires(request)
-+		 */
- 		__forceinline void WriteBarrierTrap(const void *container, const void *valuePtr)
- 		{
- 			GCAssert(IsPointerToGCPage(container));
-@@ -823,8 +977,10 @@ namespace MMgc
- 
- 	public:
- 
-+		/** @access Requires(request || exclusiveGC) */
- 		bool ContainsPointers(const void *item);
- 
-+		/** @access Requires(request) */
- 		void *FindBeginning(const void *gcItem)
- 		{
- 			GCAssert(gcItem != NULL);
-@@ -860,11 +1016,23 @@ namespace MMgc
- 		bool IsRCObject(const void *) { return false; }
- #endif
- 
--
--		bool Collecting() const { return collecting; }
--
-+		/**
-+		 * True during Sweep phase.  Application code can use this to
-+		 * determine if it's being called (directly or indirectly) from a
-+		 * finalizer.
-+		 *
-+		 * @see IsGCRunning()
-+		 * @access Requires(request || exclusiveGC)
-+		 */
-+		bool Collecting() const
-+		{
-+			return collecting;
-+		}
-+
-+		/** @access Requires(request || exclusiveGC) */
- 		bool IsGCMemory (const void *);
- 
-+		/** @access Requires(request || exclusiveGC) */
- 		bool IsQueued(const void *item);
- 
- 		static uint64 GetPerformanceCounter();
-@@ -875,9 +1043,12 @@ namespace MMgc
- 			return (double(GC::GetPerformanceCounter() - start) * 1000) / GC::GetPerformanceFrequency();
- 		}
- 
-+#ifndef MMGC_THREADSAFE
- 		void DisableThreadCheck() { disableThreadCheck = true; }
-+#endif
- 		
--		uint64 t0;
-+		/** GC initialization time, in ticks.  Used for logging. */
-+		const uint64 t0;
- 
- 		// a tick is the unit of GetPerformanceCounter()
- 		static uint64 ticksToMicros(uint64 ticks) 
-@@ -900,21 +1071,69 @@ namespace MMgc
- 		}
- 
- 		// marking rate counter
-+		/**
-+		 * Total number of bytes of pointer-containing memory scanned by this
-+		 * GC.  Used to measure marking rate, which is
-+		 * <code>bytesMarked/ticksToMillis(markTicks)</code>.
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC);
-+		 */
- 		uint64 bytesMarked;
-+
-+		/**
-+		 * Total time spent doing incremental marking, in ticks.  See
-+		 * bytesMarked.
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		uint64 markTicks;
- 
- 		// calls to mark item
- 		uint32 lastStartMarkIncrementCount;
- 		uint32 markIncrements;
-+
-+		/**
-+		 * Number of calls to MarkItem().
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		uint32 marks;
-+
-+		/**
-+		 * Number of calls to Sweep().
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
-         uint32 sweeps;
- 
-+		/**
-+		 * Number of calls to MarkItem() during the current (or most recent)
-+		 * IncrementalMark().
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		uint32 numObjects;
-+
-+		/**
-+		 * Number of bytes scanned in MarkItem() during the current (or most
-+		 * recent) IncrementalMark().
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		uint32 objSize;
- 
-+		/**
-+		 * Time of the latest FinishIncrementalMark() call, in ticks.
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		uint64 sweepStart;
- 
--		// if we hit zero in a marking incremental force completion in the next one
-+		/**
-+		 * True if we emptied the work queue during the most recent
-+		 * incremental mark.  This means the next mark will force the GC cycle
-+		 * through to completion.
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		bool hitZeroObjects;
- 
- 		// called at some apropos moment from the mututor, ideally at a point
-@@ -926,7 +1145,10 @@ namespace MMgc
- 
- 		bool Destroying() { return destroying; }
- 
-+		/** @access Requires(request) */
- 		static GCWeakRef *GetWeakRef(const void *obj);
-+
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		void ClearWeakRef(const void *obj);
- 
- 		uintptr	GetStackTop() const;
-@@ -943,7 +1165,10 @@ namespace MMgc
- 		// FIXME: only used for FixedAlloc, GCAlloc sized dynamically
- 		const static int kPageUsableSpace = 3936;
- 
-+		/** @access Requires(request && m_lock) */
- 		uint32 *GetBits(int numBytes, int sizeClass);
-+
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		void FreeBits(uint32 *bits, int sizeClass)
- 		{
- #ifdef _DEBUG
-@@ -952,32 +1177,55 @@ namespace MMgc
- 			*(uint32**)bits = m_bitsFreelists[sizeClass];
- 			m_bitsFreelists[sizeClass] = bits;
- 		}
-+
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		uint32 *m_bitsFreelists[kNumSizeClasses];
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		uint32 *m_bitsNext;
- 
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		GCHashtable weakRefs;
- 
- 		bool destroying;
- 
--		// we track the heap size so if it expands due to fixed memory allocations
--		// we can trigger a mark/sweep, other wise we can have lots of small GC 
--		// objects allocating tons of fixed memory which results in huge heaps
--		// and possible out of memory situations.
-+		/**
-+		 * We track the heap size so if it expands due to fixed memory
-+		 * allocations we can trigger a mark/sweep. Otherwise we can have lots
-+		 * of small GC objects allocating tons of fixed memory, which results
-+		 * in huge heaps and possible out of memory situations.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
- 		size_t heapSizeAtLastAlloc;
- 
- 		bool stackCleaned;
- 		const void *rememberedStackTop;
- 
-+#ifndef MMGC_THREADSAFE
- 		// for external which does thread safe multi-thread AS execution
- 		bool disableThreadCheck;
--
-+#endif
-+
-+		/**
-+		 * True if incremental marking is on and some objects have been marked.
-+		 * This means write barriers are enabled.
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 *
-+		 * The GC thread may read and write this flag.  Application threads in
-+		 * requests have read-only access.
-+		 */
- 		bool marking;
- 		GCStack<GCWorkItem> m_incrementalWork;
- 		void StartIncrementalMark();
- 		void FinishIncrementalMark();
-+
-+		/** @access Requires(request || exclusiveGC) */
- 		int IsWhite(const void *item);
--		
-+
-+		/** @access ReadWrite(request, exclusiveGC) */
- 		uint64 lastMarkTicks;
-+		/** @access ReadWrite(request, exclusiveGC) */
- 		uint64 lastSweepTicks;
- 
- 		const static int16 kSizeClasses[kNumSizeClasses];		
-@@ -989,14 +1237,44 @@ namespace MMgc
- 		// 0 - not in use
- 		// 1 - used by GCAlloc
- 		// 3 - used by GCLargeAlloc
-+
-+		/** @access Requires(pageMapLock) */
- 		uintptr memStart;
-+		/** @access Requires(pageMapLock) */
- 		uintptr memEnd;
- 
-+		/** @access Requires(m_lock) */
- 		size_t totalGCPages;
- 
-+		/**
-+		 * The bitmap for what pages are in use.  Any access to either the
-+		 * pageMap pointer or the bitmap requires pageMapLock.
-+		 *
-+		 * (Note: A better synchronization scheme might be to use atomic
-+		 * operations to read and write the pageMap pointer, writing it only
-+		 * from within m_lock; and then using atomic read and write
-+		 * operations--on Intel x86, these are just ordinary reads and
-+		 * writes--to access the bitmap, with writes again only from within
-+		 * m_lock.  This would require reallocating the pageMap more often,
-+		 * but at least write barriers wouldn't have to acquire the spinlock.)
-+		 *
-+		 * @access Requires(pageMapLock)
-+		 */
- 		unsigned char *pageMap;
- 
-+		/**
-+		 * This spinlock covers memStart, memEnd, and the contents of pageMap.
-+		 */
-+		mutable GCSpinLock pageMapLock;
-+
- 		inline int GetPageMapValue(uintptr addr) const
-+		{
-+			GCAcquireSpinlock lock(pageMapLock);
-+			return GetPageMapValueAlreadyLocked(addr);
-+		}
-+
-+		/** @access Requires(pageMapLock) */
-+		inline int GetPageMapValueAlreadyLocked(uintptr addr) const
- 		{
- 			uintptr index = (addr-memStart) >> 12;
- 
-@@ -1008,7 +1286,20 @@ namespace MMgc
- 			//return (pageMap[addr >> 2] & (3<<shiftAmount)) >> shiftAmount;
- 			return (pageMap[index >> 2] >> shiftAmount) & 3;
- 		}
-+
-+		/**
-+		 * Set the pageMap bits for the given address.  Those bits must be
-+		 * zero beforehand.
-+		 *
-+		 * @access Requires(pageMapLock)
-+		 */
- 		void SetPageMapValue(uintptr addr, int val);
-+
-+		/**
-+		 * Zero out the pageMap bits for the given address.
-+		 *
-+		 * @access Requires(pageMapLock)
-+		 */
- 		void ClearPageMapValue(uintptr addr);
- 
- 		void MarkGCPages(void *item, uint32 numpages, int val);
-@@ -1028,12 +1319,37 @@ namespace MMgc
- 		GCLargeAlloc *largeAlloc;
- 		GCHeap *heap;
- 		
-+		/** @access Requires(m_lock) */
- 		void* AllocBlockIncremental(int size, bool zero=true);
-+
-+		/** @access Requires(m_lock) */
- 		void* AllocBlockNonIncremental(int size, bool zero=true);
-+
-+	protected:
-+		/**
-+		 * Collect in a thread-safe, recursion-preventing way, with
-+		 * callbacks.
-+		 *
-+		 * Both parameters are ignored in non-MMGC_THREADSAFE builds.  In an
-+		 * MMGC_THREADSAFE build, callerHoldsLock must be true iff the calling
-+		 * thread already holds m_lock, and callerHasActiveRequest must be
-+		 * true iff the calling thread is already in an active request.
-+		 */
-+		void CollectWithBookkeeping(bool callerHoldsLock,
-+									bool callerHasActiveRequest);
-+
-+	private:
-+		/**
-+		 * Just collect.
-+		 *
-+		 * @access Requires(exclusiveGC)
-+		 */
-+		void CollectImpl();
- 
- #ifdef _DEBUG
- 		public:
- #endif
-+		/** @access Requires(exclusiveGC) */
- 		void ClearMarks();
- #ifdef _DEBUG
- 		private:
-@@ -1043,59 +1359,127 @@ namespace MMgc
- #ifdef _DEBUG
- 		public:
- 		// sometimes useful for mutator to call this
-+		/** @access Requires(exclusiveGC) */
- 		void Trace(const void *stackStart=NULL, size_t stackSize=0);
- 		private:
- #endif
- 
-+		/** @access Requires(exclusiveGC) */
- 		void Finalize();
-+		/** @access Requires(exclusiveGC) */
- 		void Sweep(bool force=false);
-+		/** @access Requires(exclusiveGC) */
- 		void ForceSweep() { Sweep(true); }
-+		/** @access Requires(exclusiveGC) */
- 		void Mark(GCStack<GCWorkItem> &work);
-+		/** @access Requires(exclusiveGC) */
- 		void MarkQueueAndStack(GCStack<GCWorkItem> &work);
-+		/** @access Requires(exclusiveGC) */
- 		void MarkItem(GCStack<GCWorkItem> &work)
- 		{
- 			GCWorkItem workitem = work.Pop();
- 			MarkItem(workitem, work);
- 		}
-+		/** @access Requires(exclusiveGC) */
- 		void MarkItem(GCWorkItem &workitem, GCStack<GCWorkItem> &work);
--		// Write barrier slow path
--
-+
-+		/**
-+		 * Write barrier slow path.  Queues the white object.
-+		 *
-+		 * @access Requires(request)
-+		 */
- 		void TrapWrite(const void *black, const void *white);
- 
-+		/**
-+		 * Number of pages allocated from the GCHeap since the start of the
-+		 * current GC cycle.
-+		 *
-+		 * @access Requires((request && m_lock) || exclusiveGC)
-+		 */
- 		unsigned int allocsSinceCollect;
--		// The collecting flag prevents an unwanted recursive collection.
-+
-+		/**
-+		 * True during the sweep phase of collection.  Several things have to
-+		 * behave a little differently during this phase.  For example,
-+		 * GC::Free() does nothing during sweep phase; otherwise finalizers
-+		 * could be called twice.
-+		 *
-+		 * Also, Collect() uses this to protect itself from recursive calls
-+		 * (from badly behaved finalizers).
-+		 *
-+		 * @access ReadWrite(request, exclusiveGC)
-+		 */
- 		bool collecting;
-  
-+		/** @access Requires((request && m_lock) || exclusiveGC) */
- 		bool finalizedValue;
- 
--		// list of pages to be swept, built up in Finalize
-+		/** @access Requires(exclusiveGC) */
- 		void AddToSmallEmptyBlockList(GCAlloc::GCBlock *b)
- 		{
- 			b->next = smallEmptyPageList;
- 			smallEmptyPageList = b;
- 		}
-+
-+		/**
-+		 * list of pages to be swept, built up in Finalize
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		GCAlloc::GCBlock *smallEmptyPageList;
- 		
--		// list of pages to be swept, built up in Finalize
-+		/** @access Requires(exclusiveGC) */
- 		void AddToLargeEmptyBlockList(GCLargeAlloc::LargeBlock *lb)
- 		{
- 			lb->next = largeEmptyPageList;
- 			largeEmptyPageList = lb;
- 		}
-+
-+		/**
-+		 * list of pages to be swept, built up in Finalize
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		GCLargeAlloc::LargeBlock *largeEmptyPageList;
- 		
- #ifdef GCHEAP_LOCK
- 		GCSpinLock m_rootListLock;
- #endif
- 
-+		/** @access Requires(m_rootListLock) */
- 		GCRoot *m_roots;
- 		void AddRoot(GCRoot *root);
- 		void RemoveRoot(GCRoot *root);
- 		
-+#ifdef MMGC_THREADSAFE
-+		GCSpinLock m_callbackListLock;
-+#endif
-+
-+		/**
-+		 * Points to the head of a linked list of callback objects.
-+		 *
-+		 * @access Requires(m_callbackListLock)
-+		 */
- 		GCCallback *m_callbacks;
+@@ -1446,6 +1468,24 @@ namespace MMgc
  		void AddCallback(GCCallback *cb);
  		void RemoveCallback(GCCallback *cb);
  
 +		/**
 +		 * Points to the head of a linked list of edge callback objects.
 +		 *
 +		 * @access Requires((request && m_callbackListLock) || exclusiveGC)
 +		 *
@@ -1651,739 +211,22 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * even if the calling thread is not in a request at all, so this
 +		 * policy would be insufficient for m_callbacks.  Second,
 +		 * m_edgeCallbacks fires very frequently during marking, so a
 +		 * lock-free policy is probably much faster.
 +		 */
  		GCEdgeCallback *m_edgeCallbacks;
  		void AddEdgeCallback(GCEdgeCallback *cb);
  		void RemoveEdgeCallback(GCEdgeCallback *cb);
-@@ -1103,9 +1487,10 @@ namespace MMgc
+@@ -1453,9 +1493,10 @@ namespace MMgc
  		/**
  		 * Notify GCEdgeCallbacks of an edge.
  		 *
 -		 * p is a "real pointer".  This method converts it to
 -		 * a "user pointer" using GetUserPointer() before
 -		 * calling callbacks.
 +		 * p is a "real pointer".  This method converts it to a "user pointer"
 +		 * using GetUserPointer() before calling callbacks.
 +		 *
 +		 * @access Requires(exclusiveGC)
  		 */
  		void FireFoundEdgeTo(const void *p);
  
-@@ -1128,7 +1513,9 @@ private:
- private:
- #endif
- 
-+#ifndef MMGC_THREADSAFE
- 		void CheckThread();
-+#endif
- 
- 		void PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item);
- 
-@@ -1145,17 +1532,37 @@ private:
- 		void CheckFreelists();
- 
- 		int m_gcLastStackTrace;
-+
-+		/**
-+		 * Used by FindUnmarkedPointers.
-+		 *
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		void UnmarkedScan(const void *mem, size_t size);
--		// find unmarked pointers in entire heap
-+
-+		/**
-+		 * Find unmarked pointers in the entire heap.
-+		 *
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		void FindUnmarkedPointers();
- 
- 		// methods for incremental verification
- 
--		// scan a region of memory for white pointers, used by FindMissingWriteBarriers
-+		/**
-+		 * Scan a region of memory for white pointers. Used by
-+		 * FindMissingWriteBarriers.
-+		 *
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		void WhitePointerScan(const void *mem, size_t size);
- 
--		// scan all GC memory (skipping roots) If a GC object is black make sure
--		// it has no white pointers, skip it if its grey.
-+		/**
-+		 * Scan all GC memory (skipping roots). If a GC object is black make sure
-+		 * it has no pointers to white objects.
-+		 *
-+		 * @access Requires(exclusiveGC)
-+		 */
- 		void FindMissingWriteBarriers();
- #ifdef WIN32
- 		// store a handle to the thread that create the GC to ensure thread safety
-@@ -1176,7 +1583,187 @@ public:
- #ifdef _DEBUG
- 		// Dump a list of objects that have pointers to the given location.
- 		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);
-+
-+		/**
-+		 * Used by WhosPointingAtMe.
-+		 * @access Requires(pageMapLock)
-+		 */
-     	void ProbeForMatch(const void *mem, size_t size, uintptr value, int recurseDepth, int currentDepth);
-+#endif
-+
-+#ifdef MMGC_THREADSAFE
-+	public:
-+		/**
-+		 * True if marking or sweeping is happening.  In an MMGC_THREADSAFE
-+		 * build, this implies that no threads in requests are running right
-+		 * now.
-+		 *
-+		 * Contrast Collecting().
-+		 *
-+		 * @access Requires(request || exclusiveGC || m_lock)
-+		 */
-+		bool IsGCRunning() const { return m_gcRunning; }
-+
-+	protected:
-+		/** @access Requires(m_lock) */
-+		void OnEnterRequestAlreadyLocked()
-+		{
-+			WaitForGCDone();
-+			m_requestCount++;
-+#ifdef _DEBUG
-+			GCThread::GetCurrentThread()->OnEnterRequest();
-+#endif
-+		}
-+
-+		/** @access Requires(m_lock) */
-+		void OnLeaveRequestAlreadyLocked()
-+		{
-+			GCAssert(m_requestCount > 0);
-+			m_requestCount--;
-+			if (m_requestCount == 0)
-+				m_condNoRequests.NotifyAll();
-+#ifdef _DEBUG
-+			GCThread::GetCurrentThread()->OnLeaveRequest();
-+#endif
-+		}
-+
-+	public:
-+		/**
-+		 * Call this when the number of active requests on a thread goes from
-+		 * zero to one.
-+		 */
-+		void OnEnterRequest()
-+		{
-+			GCAutoLock _lock(m_lock);
-+			OnEnterRequestAlreadyLocked();
-+		}
-+
-+		/**
-+		 * Call this when the number of active requests on a thread goes from 
-+		 * one to zero.
-+		 */
-+		void OnLeaveRequest()
-+		{
-+			GCAutoLock _lock(m_lock);
-+			OnLeaveRequestAlreadyLocked();
-+		}
-+
-+		/**
-+		 * Exactly like Collect(), except that the caller must be inside a
-+		 * request.
-+		 *
-+		 * @access Requires(request && !m_lock)
-+		 */
-+		void CollectFromRequest()
-+		{
-+			CollectWithBookkeeping(false, true);
-+		}
-+
-+	protected:
-+		/**
-+		 * Wait for the current GC operation, if any, to finish.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
-+		void WaitForGCDone()
-+		{
-+			GCAssert(m_exclusiveGCThread != GCThread::GetCurrentThread());
-+			while (m_exclusiveGCThread != NULL)
-+				m_condDone.Wait();
-+		}
-+
-+		/**
-+		 * This lock protects m_exclusiveGCThread and m_requestCount.
-+		 * Policies built on m_lock and m_exclusiveGCThread govern how threads
-+		 * access the rest of MMgc (both data and code).
-+		 *
-+		 * These policies are documented with "\@access" comments, which is a
-+		 * kind of shorthand.  Here are a few illustrative examples:
-+		 *
-+		 * <code>\@access Requires(m_lock)</code> on a member variable or
-+		 * method means it must be used only by a thread that holds m_lock.
-+		 *
-+		 * <code>\@access Requires(exclusiveGC)</code> means the member is to
-+		 * be used only by the GC thread, and only when no threads are running
-+		 * in requests.  This applies to methods like Trace(), MarkItem(),
-+		 * Sweep(), and Finalize().
-+		 *
-+		 * <code>\@access Requires(request)</code> means the member must be
-+		 * used only by a thread that is in a request.  This applies to
-+		 * Alloc(), Calloc(), and Free().
-+		 *
-+		 * <code>\@access Requires(exclusiveGC || request)</code> requires
-+		 * that the caller be <em>either</em> in a request <em>or</em> that it
-+		 * be the GC thread, with no threads running in requests.  (Of course
-+		 * a thread can never be both.)  This is just like
-+		 * <code>Requires(request)</code> except that a member marked this way
-+		 * is also safe to use from finalizer code.  (Finalizers run with
-+		 * <code>exclusiveGC</code>, not in a request.)
-+		 *
-+		 * <code>\@access ReadWrite(request, exclusiveGC)</code> applies to
-+		 * several data members.  This denotes a read-write locking scheme.
-+		 * Any thread with <code>request || exclusiveGC</code> can safely
-+		 * <em>read</em> these data members, but only a thread with
-+		 * <code>exclusiveGC</code> can <em>write</em> to them.
-+		 *
-+		 * Other policies are occasionally used.  For example, the list of
-+		 * GCRoots has its own spinlock, so the head of the list and each of
-+		 * the links has <code>@access(m_rootListLock)</code>.
-+		 *
-+		 * XXX TODO - worry about less-than-one-word fields (just bools, I
-+		 * think) that are packed with other fields that have different access
-+		 * policies!
-+		 *
-+		 * Some data structures, like the list of GCRoots, are protected by
-+		 * separate spinlocks.  The rule to avoid deadlock is: do not try to
-+		 * acquire m_lock if you already hold any spinlock.
-+		 */
-+		mutable GCLock m_lock;
-+
-+		/**
-+		 * The thread currently doing GC-related work (or waiting to do
-+		 * GC-related work); or NULL if there is no such thread.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
-+		GCThread *m_exclusiveGCThread;
-+
-+		/**
-+		 * True if a thread is doing GC-related work, having already ensured
-+		 * that no threads are in active requests.
-+		 *
-+		 * (The complicated thread-safety policy here makes reads lock-free.
-+		 * It also supports checking the status even if the currently running
-+		 * code isn't in a request at all; Collect() needs that.)
-+		 *
-+		 * @access ReadWrite(request || exclusiveGC || m_lock, exclusiveGC && m_lock)
-+		 */
-+		bool m_gcRunning;
-+
-+		/**
-+		 * This is notified whenever m_exclusiveGCThread becomes NULL.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
-+		GCCondition m_condDone;
-+
-+		/**
-+		 * The number of threads currently in active requests.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
-+		int m_requestCount;
-+
-+		/**
-+		 * This is notified whenever m_requestCount becomes zero.
-+		 *
-+		 * At most one thread is ever waiting on this condition at a time; if
-+		 * a thread is waiting on it, that thread is `m_exclusiveGCThread` and
-+		 * `m_gcRunning` is false.
-+		 *
-+		 * @access Requires(m_lock)
-+		 */
-+		GCCondition m_condNoRequests;
- #endif
- 	};
- 
-diff --git a/MMgc/GCAlloc.cpp b/MMgc/GCAlloc.cpp
---- a/MMgc/GCAlloc.cpp
-+++ b/MMgc/GCAlloc.cpp
-@@ -127,6 +127,8 @@ namespace MMgc
- 
- 	GCAlloc::GCBlock* GCAlloc::CreateChunk()
- 	{
-+		MMGC_ASSERT_GC_LOCK(m_gc);
-+
- 		// Get space in the bitmap.  Do this before allocating the actual block,
- 		// since we might call GC::AllocBlock for more bitmap space and thus
- 		// cause some incremental marking.
-@@ -229,6 +231,7 @@ namespace MMgc
- 
- 	void* GCAlloc::Alloc(size_t size, int flags)
- 	{
-+		MMGC_ASSERT_GC_LOCK(m_gc);
- 		(void)size;
- 		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");
- start:
-@@ -379,6 +382,8 @@ start:
- 
- 	void GCAlloc::Finalize()
- 	{
-+		MMGC_ASSERT_EXCLUSIVE_GC(m_gc);
-+
- 		m_finalized = true;
- 		// Go through every item of every block.  Look for items
- 		// that are in use but not marked as reachable, and delete
-diff --git a/MMgc/GCLargeAlloc.cpp b/MMgc/GCLargeAlloc.cpp
---- a/MMgc/GCLargeAlloc.cpp
-+++ b/MMgc/GCLargeAlloc.cpp
-@@ -53,6 +53,9 @@ namespace MMgc
- 
- 	void* GCLargeAlloc::Alloc(size_t size, int flags)
- 	{
-+#ifdef MMGC_THREADSAFE
-+		GCAssert(m_gc->m_lock.IsHeld());
-+#endif
- 		int blocks = (size+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize;
- 		
- 		LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, GC::kGCLargeAllocPageFirst, (flags&GC::kZero) != 0);
-diff --git a/MMgc/GCTests.cpp b/MMgc/GCTests.cpp
---- a/MMgc/GCTests.cpp
-+++ b/MMgc/GCTests.cpp
-@@ -37,7 +37,9 @@
- 
-  
- #include "MMgc.h"
-- 
-+
-+#include <stdio.h>
-+
- #ifdef _MSC_VER
- // "behavior change: an object of POD type constructed with an initializer of the form () will be default-initialized"
- #pragma warning(disable:4345) // b/c GCObject doesn't have a ctor
-@@ -53,8 +55,23 @@ namespace MMgc
- {
- 	GC *gc;
- 
--	GCWeakRef* createWeakRef(int extra=0)
-+	GCWeakRef* createWeakRef(int extra = 0)
- 	{
-+		// Bogusly use up some extra stack.
-+		//
-+		// Certain compilers/platforms require this (at least gcc/MacIntel).
-+		// A pointer to the temporary GCObject can be left on the stack, so
-+		// far down that CleanStack doesn't dare touch it.  Leaving the
-+		// pointer there causes the temporary to be marked, and not collected,
-+		// which causes tests to fail with assertions.
-+		///
-+		// The extra 32 bytes here causes the temporary to end up higher on
-+		// the stack (numerically lesser address, on Intel at least) where
-+		// CleanStack will clobber it.
-+		//
-+		char buf[32];
-+		sprintf(buf, "%d", extra);  // don't optimize away buf
-+
- 		return (new (gc, extra) GCObject())->GetWeakRef();
- 	}
- 
-diff --git a/MMgc/GCThreads.cpp b/MMgc/GCThreads.cpp
-new file mode 100644
---- /dev/null
-+++ b/MMgc/GCThreads.cpp
-@@ -0,0 +1,100 @@
-+/* ***** BEGIN LICENSE BLOCK *****
-+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
-+ *
-+ * The contents of this file are subject to the Mozilla Public License Version
-+ * 1.1 (the "License"); you may not use this file except in compliance with
-+ * the License. You may obtain a copy of the License at
-+ * http://www.mozilla.org/MPL/
-+ *
-+ * Software distributed under the License is distributed on an "AS IS" basis,
-+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-+ * for the specific language governing rights and limitations under the
-+ * License.
-+ *
-+ * The Original Code is [Open Source Virtual Machine.].
-+ *
-+ * The Initial Developer of the Original Code is
-+ * Adobe System Incorporated.
-+ * Portions created by the Initial Developer are Copyright (C) 2004-2006
-+ * the Initial Developer. All Rights Reserved.
-+ *
-+ * Contributor(s):
-+ *   Jason Orendorff
-+ *
-+ * Alternatively, the contents of this file may be used under the terms of
-+ * either the GNU General Public License Version 2 or later (the "GPL"), or
-+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-+ * in which case the provisions of the GPL or the LGPL are applicable instead
-+ * of those above. If you wish to allow use of your version of this file only
-+ * under the terms of either the GPL or the LGPL, and not to allow others to
-+ * use your version of this file under the terms of the MPL, indicate your
-+ * decision by deleting the provisions above and replace them with the notice
-+ * and other provisions required by the GPL or the LGPL. If you do not delete
-+ * the provisions above, a recipient may use your version of this file under
-+ * the terms of any one of the MPL, the GPL or the LGPL.
-+ *
-+ * ***** END LICENSE BLOCK ***** */
-+
-+#include "MMgc.h"
-+
-+#ifdef MMGC_THREADSAFE
-+#include "prtypes.h"
-+
-+using namespace MMgc;
-+
-+PRUintn GCThread::g_tlsIndex;
-+GCThread *GCThread::g_first = NULL;
-+static PRLock *g_threadListLock = NULL;
-+
-+void (PR_CALLBACK DestroyGCThread)(void *obj)
-+{
-+	delete static_cast<GCThread *>(obj);
-+}
-+
-+void GCThread::Init()
-+{
-+    GCAssert(g_threadListLock == NULL);
-+	g_threadListLock = PR_NewLock();
-+	GCAssert(g_threadListLock != NULL);
-+	if (g_threadListLock != NULL) {
-+		PRStatus st = PR_NewThreadPrivateIndex(&g_tlsIndex, DestroyGCThread);
-+		(void) st;
-+		GCAssert(st == PR_SUCCESS);
-+	}
-+}
-+
-+void GCThread::Destroy()
-+{
-+    GCAssert(g_threadListLock != NULL);
-+
-+	PR_DestroyLock(g_threadListLock);
-+	g_threadListLock = NULL;
-+    // NSPR provides no way to release g_tlsIndex to the system.
-+}
-+
-+GCThread::GCThread()
-+#ifdef _DEBUG
-+	: m_inRequest(false)
-+#endif
-+{
-+	PR_Lock(g_threadListLock);
-+	m_next = g_first;
-+	g_first = this;
-+	PR_Unlock(g_threadListLock);
-+}
-+
-+GCThread * GCThread::GetCurrentThread()
-+{
-+    GCAssert(g_threadListLock != NULL);
-+
-+    GCThread *t = static_cast<GCThread *>(PR_GetThreadPrivate(g_tlsIndex));
-+    if (t == NULL) {
-+        t = new GCThread;
-+        PRStatus st = PR_SetThreadPrivate(g_tlsIndex, t);
-+		(void) st;
-+        GCAssert(st == PR_SUCCESS);
-+    }
-+    return t;
-+}
-+
-+#endif  // MMGC_THREADSAFE
-diff --git a/MMgc/GCThreads.h b/MMgc/GCThreads.h
-new file mode 100644
---- /dev/null
-+++ b/MMgc/GCThreads.h
-@@ -0,0 +1,227 @@
-+/* ***** BEGIN LICENSE BLOCK *****
-+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
-+ *
-+ * The contents of this file are subject to the Mozilla Public License Version
-+ * 1.1 (the "License"); you may not use this file except in compliance with
-+ * the License. You may obtain a copy of the License at
-+ * http://www.mozilla.org/MPL/
-+ *
-+ * Software distributed under the License is distributed on an "AS IS" basis,
-+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-+ * for the specific language governing rights and limitations under the
-+ * License.
-+ *
-+ * The Original Code is [Open Source Virtual Machine.].
-+ *
-+ * The Initial Developer of the Original Code is
-+ * Adobe System Incorporated.
-+ * Portions created by the Initial Developer are Copyright (C) 2004-2006
-+ * the Initial Developer. All Rights Reserved.
-+ *
-+ * Contributor(s):
-+ *   Jason Orendorff
-+ *
-+ * Alternatively, the contents of this file may be used under the terms of
-+ * either the GNU General Public License Version 2 or later (the "GPL"), or
-+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-+ * in which case the provisions of the GPL or the LGPL are applicable instead
-+ * of those above. If you wish to allow use of your version of this file only
-+ * under the terms of either the GPL or the LGPL, and not to allow others to
-+ * use your version of this file under the terms of the MPL, indicate your
-+ * decision by deleting the provisions above and replace them with the notice
-+ * and other provisions required by the GPL or the LGPL. If you do not delete
-+ * the provisions above, a recipient may use your version of this file under
-+ * the terms of any one of the MPL, the GPL or the LGPL.
-+ *
-+ * ***** END LICENSE BLOCK ***** */
-+
-+#ifndef __MMGC_GCTHREADS__
-+#define __MMGC_GCTHREADS__
-+
-+#ifdef MMGC_THREADSAFE
-+
-+#include "prthread.h"
-+#include "prlock.h"
-+#include "prcvar.h"
-+
-+namespace MMgc
-+{
-+	/** A non-reentrant mutex. */
-+	class GCLock
-+	{
-+		friend class GCCondition;
-+	public:
-+		GCLock() 
-+			: m_lock(PR_NewLock())
-+#ifdef _DEBUG
-+			, m_holder(NULL)
-+#endif
-+		{
-+			GCAssert(m_lock != NULL);
-+		}
-+
-+		~GCLock() { PR_DestroyLock(m_lock); }
-+
-+		void Acquire()
-+		{
-+#ifdef _DEBUG
-+			PRThread *thr = PR_GetCurrentThread();
-+			GCAssertMsg(m_holder != thr, "deadlock in GCLock::Acquire");
-+#endif
-+			PR_Lock(m_lock);
-+#ifdef _DEBUG
-+			m_holder = thr;
-+#endif
-+		}
-+
-+		void Release()
-+		{
-+#ifdef _DEBUG
-+			GCAssert(IsHeld());
-+			m_holder = NULL;
-+#endif
-+			PR_Unlock(m_lock);
-+		}
-+
-+#ifdef _DEBUG
-+		/**
-+		 * True if the calling thread owns the lock.
-+		 *
-+		 * Warning: This is for use in debug-mode assertions about the lock
-+		 * state ONLY.  It's theoretically possible, on some non-IA32
-+		 * platforms, that reading m_holder outside the lock like this could
-+		 * return random junk, causing a false positive here.  On IA32, it's
-+		 * safe.
-+		 */
-+		bool IsHeld()
-+		{
-+			return m_holder == PR_GetCurrentThread();
-+		}
-+#endif
-+
-+	private:
-+		// Prohibit copying.
-+		GCLock(const GCLock &);
-+		GCLock & operator=(const GCLock &);
-+
-+		PRLock *m_lock;
-+#ifdef _DEBUG
-+		PRThread *m_holder;
-+#endif
-+	};
-+
-+	class GCAutoLock {
-+	public:
-+		explicit GCAutoLock(GCLock &lock)
-+			: m_lock(lock)
-+		{
-+			lock.Acquire();
-+		}
-+
-+		~GCAutoLock() { m_lock.Release(); }
-+
-+	private:
-+		// Prohibit copying.
-+		GCAutoLock(const GCAutoLock &);
-+		GCAutoLock & operator=(const GCAutoLock &);
-+
-+		GCLock &m_lock;
-+	};
-+
-+	/** A pthreads-style condition variable. */
-+	class GCCondition {
-+	public:
-+		explicit GCCondition(GCLock &lock)
-+			: m_cvar(PR_NewCondVar(lock.m_lock))
-+#ifdef _DEBUG
-+			, m_lockObject(lock)
-+#endif
-+		{
-+			GCAssert(m_cvar != NULL);
-+		}
-+
-+		~GCCondition() { PR_DestroyCondVar(m_cvar); }
-+
-+		void Wait()
-+		{
-+			PRStatus st;
-+
-+#ifdef _DEBUG
-+			m_lockObject.m_holder = NULL;
-+#endif
-+			st = PR_WaitCondVar(m_cvar, PR_INTERVAL_NO_TIMEOUT);
-+#ifdef _DEBUG
-+			m_lockObject.m_holder = PR_GetCurrentThread();
-+#endif
-+			(void)st;
-+			GCAssert(st == PR_SUCCESS);
-+		}
-+
-+		void NotifyAll()
-+		{
-+			PRStatus st;
-+
-+			st = PR_NotifyAllCondVar(m_cvar);
-+			(void)st;
-+			GCAssert(st == PR_SUCCESS);
-+		}
-+
-+	private:
-+		// Prohibit copying.
-+		GCCondition(const GCCondition &);
-+		GCCondition & operator=(const GCCondition &);
-+
-+		PRCondVar *m_cvar;
-+#ifdef _DEBUG
-+		GCLock &m_lockObject;
-+#endif
-+    };
-+
-+	/** A catchall object for per-thread state. */
-+	class GCThread {
-+	public:
-+		static void Init();
-+		static void Destroy();
-+
-+		static GCThread * GetCurrentThread();
-+
-+#ifdef _DEBUG
-+	public:
-+		void OnEnterRequest()
-+		{
-+			GCAssert(!m_inRequest);
-+			m_inRequest = true;
-+		}
-+
-+		void OnLeaveRequest()
-+		{
-+			GCAssert(m_inRequest);
-+			m_inRequest = false;
-+		}
-+
-+		bool IsInActiveRequest() { return m_inRequest; }
-+
-+	private:
-+		bool m_inRequest;
-+#else
-+	public:
-+		void OnEnterRequest() {}
-+		void OnLeaveRequest() {}
-+#endif
-+
-+	private:
-+		// Prohibit copying.
-+		GCThread(const GCThread &);
-+		GCThread & operator=(const GCThread &);
-+
-+		GCThread();
-+
-+		static PRUintn g_tlsIndex;
-+		static GCThread *g_first;
-+		GCThread *m_next;
-+	};
-+}
-+
-+#endif // MMGC_THREADSAFE
-+
-+#endif // __MMGC_GCTHREADS__
-diff --git a/MMgc/GCWeakRef.h b/MMgc/GCWeakRef.h
---- a/MMgc/GCWeakRef.h
-+++ b/MMgc/GCWeakRef.h
-@@ -58,10 +58,21 @@ namespace MMgc
- 			}
- 		}
- 	private:
--		// override new so we can be tell the GC we don't contain pointers
-+		/**
-+		 * When allocating a GCWeakRef, tell the GC we don't contain pointers
-+		 * (overriding the default base-class behavior).
-+		 *
-+		 * In MMGC_THREADSAFE builds, the caller always holds m_lock.
-+		 *
-+		 * @access Requires(gc->m_lock)
-+		 */
- 		static void *operator new(size_t size, GC *gc)
- 		{
-+#ifdef MMGC_THREADSAFE
-+			return gc->AllocAlreadyLocked(size, GC::kFinalize, 4);
-+#else
- 			return gc->Alloc(size, GC::kFinalize, 4);
-+#endif
- 		}
- 		// private, only GC can access
- 		GCWeakRef(const void *obj) : m_obj(obj) 
-diff --git a/MMgc/MMgc.h b/MMgc/MMgc.h
---- a/MMgc/MMgc.h
-+++ b/MMgc/MMgc.h
-@@ -125,6 +125,7 @@ namespace MMgc
- 
- #include "GCTypes.h"
- #include "GCStack.h"
-+#include "GCThreads.h"
- #include "GCAllocObject.h"
- #include "GCHeap.h"
- #include "GCAlloc.h"
-diff --git a/MMgc/manifest.mk b/MMgc/manifest.mk
---- a/MMgc/manifest.mk
-+++ b/MMgc/manifest.mk
-@@ -56,6 +56,7 @@ MMgc_CXXSRCS := $(MMgc_CXXSRCS) \
-   $(curdir)/GCMemoryProfiler.cpp \
-   $(curdir)/GCObject.cpp \
-   $(curdir)/GCTests.cpp \
-+  $(curdir)/GCThreads.cpp \
-   $(NULL)
- 
- ifeq (windows,$(TARGET_OS))
-diff --git a/configure.py b/configure.py
---- a/configure.py
-+++ b/configure.py
-@@ -95,6 +95,7 @@ MMGC_THREADSAFE = o.getBoolArg('threadsa
- MMGC_THREADSAFE = o.getBoolArg('threadsafe-mmgc', False)
- if MMGC_THREADSAFE:
-     MMGC_DEFINES['MMGC_THREADSAFE'] = None
-+    MMGC_DEFINES['MMGC_INTERIOR_PTRS'] = None
-     NSPR_INCLUDES = o.getStringArg('nspr-includes')
-     MMGC_CPPFLAGS += NSPR_INCLUDES + " "
-     APP_CPPFLAGS += NSPR_INCLUDES + " "
new file mode 100644
--- /dev/null
+++ b/mmgc-threadsafe-baseline
@@ -0,0 +1,2166 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -199,8 +199,16 @@ namespace MMgc
+ 	};
+ 	const size_t kLargestAlloc = 1968;
+ 
++#ifdef MMGC_THREADSAFE
++#define USING_CALLBACK_LIST(gc)  GCAcquireSpinlock _cblock((gc)->m_callbackListLock)
++#define USING_PAGE_MAP()         GCAcquireSpinlock _pmlock(pageMapLock)
++#else
++#define USING_CALLBACK_LIST(gc)  ((gc)->CheckThread())
++#define USING_PAGE_MAP()         ((void) 0)
++#endif
++
+ 	GC::GC(GCHeap *gcheap)
+-		: disableThreadCheck(false),
++		:
+ #ifdef MMGC_DRC
+ 		  zct(gcheap),
+ #endif
+@@ -256,8 +264,17 @@ namespace MMgc
+ 		  heapSizeAtLastAlloc(gcheap->GetTotalHeapSize()),
+ 		  finalizedValue(true),
+ 		  // Expand, don't collect, until we hit this threshold
+-		  collectThreshold(256)
+-	{		
++		  collectThreshold(256),
++#if MMGC_THREADSAFE
++		  m_exclusiveGCThread(NULL),
++		  m_gcRunning(false),
++		  m_condDone(m_lock),
++		  m_requestCount(0),
++		  m_condNoRequests(m_lock)
++#else
++		  disableThreadCheck(false)
++#endif
++	{
+ 		// sanity check for all our types
+ 		GCAssert (sizeof(int8) == 1);
+ 		GCAssert (sizeof(uint8) == 1);		
+@@ -373,7 +390,9 @@ namespace MMgc
+ 
+ 		heap->Free(pageMap);
+ 
++#ifndef MMGC_THREADSAFE
+ 		CheckThread();
++#endif
+ 
+ 		GCAssert(!m_roots);
+ 		GCAssert(!m_callbacks);
+@@ -392,6 +411,12 @@ namespace MMgc
+ 	}
+ 
+ 	void GC::Collect()
++	{
++		CollectWithBookkeeping(false, false);
++	}
++
++	void GC::CollectWithBookkeeping(bool callerHoldsLock,
++									bool callerHasActiveRequest)
+ 	{
+ 		// invoke precollect callback
+ 		bool vetoed = false;
+@@ -402,11 +427,155 @@ namespace MMgc
+ 			cb = cb->nextCB;
+ 		}
+ 
++#ifdef MMGC_THREADSAFE
++		GCAssert(callerHoldsLock == m_lock.IsHeld());
++#ifdef _DEBUG
++		GCAssert(callerHasActiveRequest == GCThread::GetCurrentThread()->IsInActiveRequest());
++#endif
++#else
++		(void) callerHoldsLock;
++		(void) callerHasActiveRequest;
++		CheckThread();
++#endif
++
++#ifdef MMGC_THREADSAFE
++		if (!callerHoldsLock)
++			m_lock.Acquire();
++
++		if (vetoed || nogc || m_gcRunning || zct.reaping) {
++			if (!callerHoldsLock)
++				m_lock.Release();
++			return;
++		}
++#else
+ 		if (vetoed || nogc || collecting || zct.reaping) {
+ 			return;
+ 		}
+-
++#endif
++
++#ifdef MMGC_THREADSAFE
++		GCThread *thisThread = GCThread::GetCurrentThread();
++		if (m_exclusiveGCThread != NULL) {
++			// Someone is already collecting or waiting to collect.
++			//
++			// If it's the current thread, then we're being called
++			// recursively.  This maybe shouldn't happen, but Collect can be
++			// called indirectly from a finalizer.  Don't recurse, just ignore
++			// it.
++			//
++			// If it's some other thread, wait for that thread to finish.
++			//
++			if (m_exclusiveGCThread != thisThread) {
++				GCRoot *stackRoot;
++
++				// Before waiting, this thread must suspend any active
++				// requests.  This avoids a deadlock where m_exclusiveGCThread
++				// is waiting for m_requestCount to go to zero while we're
++				// waiting for m_exclusiveGCThread to be done.
++				//
++				// When we do this, we root the calling thread's stack; this
++				// way each collection scans the stack of every thread that is
++				// in a suspended request.
++				//
++				if (callerHasActiveRequest) {
++					void *stack;
++					size_t stackSize;
++
++					MMGC_GET_STACK_EXENTS(this, stack, stackSize);
++					stackRoot = new GCRoot(this, stack, stackSize);
++
++					OnLeaveRequestAlreadyLocked();
++				}
++
++				while (m_exclusiveGCThread != NULL)
++					m_condDone.Wait();
++
++				if (callerHasActiveRequest) {
++					// Resume previously suspended request.
++					delete stackRoot;
++					OnEnterRequestAlreadyLocked();
++				}
++			}
++
++			if (!callerHoldsLock)
++				m_lock.Release();
++			return;
++		}
++
++		// If we get here, this thread will collect.
++		m_exclusiveGCThread = thisThread;
++		if (callerHasActiveRequest) {
++			// We must suspend any active requests on this thread.
++			OnLeaveRequestAlreadyLocked();
++		}
++		// Wait for other threads to suspend or end their active requests,
++		// if any.
++		while (m_requestCount > 0)
++			m_condNoRequests.Wait();
++
++		// These should not have changed while we were waiting, even though we
++		// released the lock.
++		GCAssert(m_requestCount == 0);
++		GCAssert(m_exclusiveGCThread == thisThread);
++
++		// This thread has acquired exclusiveGC.
++		m_gcRunning = true;
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->enterExclusiveGC();
++		}
++
++#ifdef MMGC_THREADSAFE
++		m_lock.Release();
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->enterExclusiveGCNoLock();
++		}
++
++		CollectImpl();
++
++#ifdef MMGC_THREADSAFE
++		m_lock.Acquire();
++
++		// These should not have changed during collection,
++		// even though we released the lock.
++		GCAssert(m_requestCount == 0);
++		GCAssert(m_exclusiveGCThread == thisThread);
++#endif
++
++		{
++			USING_CALLBACK_LIST(this);
++			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
++				cb->leaveExclusiveGC();
++		}
++
++#ifdef MMGC_THREADSAFE
++		// This thread is relinquishing exclusiveGC.
++		m_gcRunning = false;
++
++		m_exclusiveGCThread = NULL;
++		if (callerHasActiveRequest)
++			OnEnterRequestAlreadyLocked();
++
++		m_condDone.NotifyAll();
++
++		if (!callerHoldsLock)
++			m_lock.Release();
++#endif
++	}
++
++	void GC::CollectImpl()
++	{
++#ifndef MMGC_THREADSAFE
+ 		ReapZCT();
++#endif
++
+ 		if(!marking)
+ 			StartIncrementalMark();
+ 		if(marking)
+@@ -418,13 +587,14 @@ namespace MMgc
+ 		// reachable by the conservative stack walk
+ 		//DumpStackTrace();
+ 		FindUnmarkedPointers();
+-		CheckThread();
+ #endif
+ 	}
+ 
+ #ifdef _DEBUG
+ 	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		SAMPLE_FRAME("[mark]", core());
+ 
+ 		// Clear all mark bits.
+@@ -465,6 +635,17 @@ namespace MMgc
+ 
+ 	void *GC::Alloc(size_t size, int flags/*0*/, int skip/*3*/)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAutoLock _lock(m_lock);
++		GCAssert(!m_gcRunning);
++#else
++		CheckThread();
++#endif
++		return AllocAlreadyLocked(size, flags, skip);
++	}
++
++	void *GC::AllocAlreadyLocked(size_t size, int flags/*0*/, int skip/*3*/)
++	{
+ #ifdef DEBUGGER
+ 		avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
+ 		if(core)
+@@ -476,9 +657,8 @@ namespace MMgc
+ 
+ #ifdef _DEBUG
+ 		GCAssert(size + 7 > size);
+-		CheckThread();
+ 		if (GC::greedy) {
+-			Collect();
++			CollectWithBookkeeping(true, true);
+ 		}
+ 		// always be marking in pedantic mode
+ 		if(incrementalValidationPedantic) {
+@@ -561,15 +741,21 @@ namespace MMgc
+ 
+ 	void GC::Free(void *item)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAutoLock _lock(m_lock);
++		GCAssert(!m_gcRunning);
++#else
+ 		CheckThread();
++#endif
++
++		if(item == NULL) {
++			return;
++		}
++
++		bool isLarge;
++
+ 		// we can't allow free'ing something during Sweeping, otherwise alloc counters
+ 		// get decremented twice and destructors will be called twice.
+-		if(item == NULL) {
+-			return;
+-		}
+-
+-		bool isLarge;
+-
+ 		if(collecting) {
+ 			goto bail;
+ 		}
+@@ -618,6 +804,8 @@ bail:
+ 
+ 	void GC::ClearMarks()
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		for (int i=0; i < kNumSizeClasses; i++) {
+ #ifdef MMGC_DRC
+ 			containsPointersRCAllocs[i]->ClearMarks();
+@@ -630,6 +818,8 @@ bail:
+ 
+ 	void GC::Finalize()
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		for(int i=kNumSizeClasses; i-- > 0;) {
+ #ifdef MMGC_DRC
+ 			containsPointersRCAllocs[i]->Finalize();
+@@ -652,6 +842,8 @@ bail:
+ 
+ 	void GC::Sweep(bool force)
+ 	{	
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		// collecting must be true because it indicates allocations should
+ 		// start out marked, we can't rely on write barriers below since 
+ 		// presweep could write a new GC object to a root
+@@ -671,10 +863,13 @@ bail:
+ 		
+ 
+ 		// invoke presweep on all callbacks
+-		GCCallback *cb = m_callbacks;
+-		while(cb) {
+-			cb->presweep();
+-			cb = cb->nextCB;
++		{
++			USING_CALLBACK_LIST(this);
++			GCCallback *cb = m_callbacks;
++			while(cb) {
++				cb->presweep();
++				cb = cb->nextCB;
++			}
+ 		}
+ 
+ 		SAMPLE_CHECK();
+@@ -735,12 +930,15 @@ bail:
+ 		collecting = false;
+ 
+ 		// invoke postsweep callback
+-		cb = m_callbacks;
+-		while(cb) {
+-			cb->postsweep();
+-			cb = cb->nextCB;
+-		}
+-		
++		{
++			USING_CALLBACK_LIST(this);
++			GCCallback *cb = m_callbacks;
++			while(cb) {
++				cb->postsweep();
++				cb = cb->nextCB;
++			}
++		}
++
+ 		SAMPLE_CHECK();
+ 
+ 		allocsSinceCollect = 0;
+@@ -777,6 +975,7 @@ bail:
+ 
+ 	void* GC::AllocBlock(int size, int pageType, bool zero)
+ 	{
++		MMGC_ASSERT_GC_LOCK(this);
+ #ifdef DEBUGGER
+ 		AllocActivity(size);
+ #endif
+@@ -794,7 +993,7 @@ bail:
+ 			if(incremental)
+ 				StartIncrementalMark();
+ 			else
+-				Collect();
++				CollectWithBookkeeping(true, true);
+ 		}
+ 
+ 		void *item;
+@@ -835,6 +1034,8 @@ bail:
+ 
+ 	void* GC::AllocBlockIncremental(int size, bool zero)
+ 	{
++		MMGC_ASSERT_GC_LOCK(this);
++
+ 		if(!nogc && !collecting) {
+ 			uint64 now = GetPerformanceCounter();
+ 			if (marking) {		
+@@ -862,12 +1063,14 @@ bail:
+ 
+ 	void* GC::AllocBlockNonIncremental(int size, bool zero)
+ 	{
++		MMGC_ASSERT_GC_LOCK(this);
++
+ 		void *item = heap->Alloc(size, false, zero);
+ 		if (!item) {
+ 			if (heap->GetTotalHeapSize() >= collectThreshold &&
+ 				allocsSinceCollect >= totalGCPages / kFreeSpaceDivisor) 
+ 			{
+-				Collect();
++				CollectWithBookkeeping(true, true);
+ 				item = heap->Alloc(size, false, zero);
+ 			}
+ 		}
+@@ -876,6 +1079,11 @@ bail:
+ 
+ 	void GC::FreeBlock(void *ptr, uint32 size)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_lock.IsHeld() || destroying
++				 || (m_gcRunning
++					 && m_exclusiveGCThread == GCThread::GetCurrentThread()));
++#endif
+ #ifdef DEBUGGER
+ 		AllocActivity(- (int)size);
+ #endif
+@@ -903,6 +1111,7 @@ bail:
+ 
+ 	void GC::Mark(GCStack<GCWorkItem> &work)
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
+ 		while(work.Count()) {
+ 			MarkItem(work);
+ 		}
+@@ -910,6 +1119,7 @@ bail:
+ 
+ 	void GC::MarkGCPages(void *item, uint32 numPages, int to)
+ 	{
++		USING_PAGE_MAP();
+ 		uintptr addr = (uintptr)item;
+ 		uint32 shiftAmount=0;
+ 		unsigned char *dst = pageMap;
+@@ -952,7 +1162,7 @@ bail:
+ 		addr = (uintptr)item;
+ 		while(numPages--)
+ 		{
+-			GCAssert(GetPageMapValue(addr) == 0);
++			GCAssert(GetPageMapValueAlreadyLocked(addr) == 0);
+ 			SetPageMapValue(addr, to);
+ 			addr += GCHeap::kBlockSize;
+ 		}
+@@ -961,6 +1171,8 @@ bail:
+ 	void GC::UnmarkGCPages(void *item, uint32 numpages)
+ 	{
+ 		uintptr addr = (uintptr) item;
++
++		USING_PAGE_MAP();
+ 		while(numpages--)
+ 		{
+ 			ClearPageMapValue(addr);
+@@ -1126,6 +1338,7 @@ bail:
+ 		gc->Mark(work);
+ 	}
+ 
++#ifndef MMGC_THREADSAFE
+ 	void GC::CheckThread()
+ 	{
+ #ifdef _DEBUG
+@@ -1134,12 +1347,13 @@ bail:
+ #endif
+ #endif
+ 	}
+-
++#endif
+ 
+ 	bool GC::IsPointerToGCPage(const void *item)
+ 	{
++		USING_PAGE_MAP();
+ 		if((uintptr)item >= memStart && (uintptr)item < memEnd)
+-			return GetPageMapValue((uintptr) item) != 0;
++			return GetPageMapValueAlreadyLocked((uintptr) item) != 0;
+ 		return false;
+ 	}
+ 
+@@ -1180,10 +1394,13 @@ bail:
+ 		// note if we add things while reaping we could delete the object
+ 		// here if we had a way to monitor our stack usage
+ 		if(reaping && PLENTY_OF_STACK()) {
+-			GCCallback *cb = gc->m_callbacks;
+-			while(cb) {
+-				cb->prereap(obj);
+-				cb = cb->nextCB;
++			{
++				USING_CALLBACK_LIST(gc);
++				GCCallback *cb = gc->m_callbacks;
++				while(cb) {
++					cb->prereap(obj);
++					cb = cb->nextCB;
++				}
+ 			}
+ 			if(gc->IsFinalized(obj))
+ 				((GCFinalizable*)obj)->~GCFinalizable();
+@@ -1378,10 +1595,13 @@ bail:
+ 		nextPinnedIndex = 0;
+ 
+ 		// invoke prereap on all callbacks
+-		GCCallback *cb = gc->m_callbacks;
+-		while(cb) {
+-			cb->prereap();
+-			cb = cb->nextCB;
++		{
++			USING_CALLBACK_LIST(gc);
++			GCCallback *cb = gc->m_callbacks;
++			while(cb) {
++				cb->prereap();
++				cb = cb->nextCB;
++			}
+ 		}
+ 
+ #ifdef _DEBUG
+@@ -1422,10 +1642,13 @@ bail:
+ 				}
+ #endif
+ 				// invoke prereap on all callbacks
+-				GCCallback *cbb = gc->m_callbacks;
+-				while(cbb) {
+-					cbb->prereap(rcobj);
+-					cbb = cbb->nextCB;
++				{
++					USING_CALLBACK_LIST(gc);
++					GCCallback *cbb = gc->m_callbacks;
++					while(cbb) {
++						cbb->prereap(rcobj);
++						cbb = cbb->nextCB;
++					}
+ 				}
+ 
+ 				GCAssert(*(int*)rcobj != 0);
+@@ -1459,10 +1682,13 @@ bail:
+ 		zctIndex = nextPinnedIndex = 0;
+ 
+ 		// invoke postreap on all callbacks
+-		cb = gc->m_callbacks;
+-		while(cb) {
+-			cb->postreap();
+-			cb = cb->nextCB;
++		{
++			USING_CALLBACK_LIST(gc);
++			GCCallback *cb = gc->m_callbacks;
++			while(cb) {
++				cb->postreap();
++				cb = cb->nextCB;
++			}
+ 		}
+ #ifdef DEBUGGER
+ 		if(gc->gcstats && numObjects) {
+@@ -1615,7 +1841,8 @@ bail:
+ 		va_end(argptr);
+ 
+ 		GCAssert(strlen(buf) < 4096);
+-			
++
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->log(buf);
+@@ -1667,23 +1894,27 @@ bail:
+ 
+ 	bool GC::IsRCObject(const void *item)
+ 	{
+-		if((uintptr)item >= memStart && (uintptr)item < memEnd && ((uintptr)item&0xfff) != 0)
+-		{
+-			int bits = GetPageMapValue((uintptr)item);		
+-			item = GetRealPointer(item);
+-			switch(bits)
+-			{
+-			case kGCAllocPage:
+-				if((char*)item < ((GCAlloc::GCBlock*)((uintptr)item&~0xfff))->items)
+-					return false;
+-				return GCAlloc::IsRCObject(item);
+-			case kGCLargeAllocPageFirst:
+-				return GCLargeAlloc::IsRCObject(item);
+-			default:
++		int bits;
++		{
++			USING_PAGE_MAP();
++
++			if ((uintptr)item < memStart || (uintptr)item >= memEnd || ((uintptr)item&0xfff) == 0)
+ 				return false;
+-			}
+-		}
+-		return false;
++			bits = GetPageMapValueAlreadyLocked((uintptr)item);
++		}
++
++		item = GetRealPointer(item);
++		switch(bits)
++		{
++		case kGCAllocPage:
++			if((char*)item < ((GCAlloc::GCBlock*)((uintptr)item&~0xfff))->items)
++				return false;
++			return GCAlloc::IsRCObject(item);
++		case kGCLargeAllocPageFirst:
++			return GCLargeAlloc::IsRCObject(item);
++		default:
++			return false;
++		}
+ 	}
+ 
+ #endif // MMGC_DRC
+@@ -1769,7 +2000,7 @@ bail:
+ 				continue;
+ 			
+ 			// normalize and divide by 4K to get index
+-			int bits = GetPageMapValue(val);
++			int bits = GetPageMapValueAlreadyLocked(val);
+ 			switch(bits)
+ 			{
+ 			case 0:
+@@ -1790,6 +2021,8 @@ bail:
+ 
+ 	void GC::FindUnmarkedPointers()
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		if(findUnmarkedPointers)
+ 		{
+ 			uintptr m = memStart;
+@@ -1928,6 +2161,11 @@ bail:
+ 	 */
+ 	void GC::WhosPointingAtMe(void* me, int recurseDepth, int currentDepth)
+ 	{
++#ifdef MMGC_THREADSAFE
++		if (currentDepth == 0)
++			pageMapLock.Acquire();
++#endif
++
+ 		uintptr val = (uintptr)me;
+ 		uintptr m = memStart;
+ 
+@@ -1950,7 +2188,7 @@ bail:
+ #endif
+ 
+ 			// divide by 4K to get index
+-			int bits = GetPageMapValue(m);
++			int bits = GetPageMapValueAlreadyLocked(m);
+ 			if(bits == kNonGC) 
+ 			{
+ 				ProbeForMatch((const void*)m, GCHeap::kBlockSize, val, recurseDepth, currentDepth);
+@@ -1994,6 +2232,11 @@ bail:
+ 			
+ 			}
+ 		}
++
++#ifdef MMGC_THREADSAFE
++		if (currentDepth == 0)
++			pageMapLock.Release();
++#endif
+ 	}
+ #undef ALLOCA_AND_FILL_WITH_SPACES
+ #endif
+@@ -2395,9 +2638,11 @@ bail:
+ 		return 1000000;
+ 		#endif
+ 	}
+-	
++
+ 	void GC::IncrementalMark(uint32 time)
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		SAMPLE_FRAME("[mark]", core());
+ 		if(m_incrementalWork.Count() == 0 || hitZeroObjects) {
+ 			FinishIncrementalMark();
+@@ -2452,6 +2697,8 @@ bail:
+ 
+ 	void GC::FinishIncrementalMark()
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		// Don't finish an incremental mark (i.e., sweep) if we
+ 		// are in the midst of a ZCT reap.
+ 		if (zct.reaping)
+@@ -2635,6 +2882,7 @@ bail:
+ 	{
+ 		uint32 *bits;
+ 
++		MMGC_ASSERT_GC_LOCK(this);
+ 		GCAssert(numBytes % 4 == 0);
+ 
+ 		#ifdef MMGC_64BIT // we use first 8-byte slot for the free list
+@@ -2706,7 +2954,8 @@ bail:
+ 
+ 	void GC::AddCallback(GCCallback *cb)
+ 	{
+-		CheckThread();
++		USING_CALLBACK_LIST(this);
++
+ 		cb->prevCB = NULL;
+ 		cb->nextCB = m_callbacks;
+ 		if(m_callbacks)
+@@ -2716,7 +2965,8 @@ bail:
+ 
+ 	void GC::RemoveCallback(GCCallback *cb)
+ 	{
+-		CheckThread();
++		USING_CALLBACK_LIST(this);
++
+ 		if( m_callbacks == cb )
+ 			m_callbacks = cb->nextCB;
+ 		else
+@@ -2768,7 +3018,11 @@ bail:
+ 	GCWeakRef* GC::GetWeakRef(const void *item) 
+ 	{
+ 		GC *gc = GetGC(item);
++#ifdef MMGC_THREADSAFE
++		GCAutoLock _lock(gc->m_lock);
++#endif
+ 		GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(item);
++
+ 		if(ref == NULL) {
+ 			ref = new (gc) GCWeakRef(item);
+ 			gc->weakRefs.put(item, ref);
+@@ -2830,6 +3084,8 @@ bail:
+ 
+ 	void GC::FindMissingWriteBarriers()
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
++
+ 		if(!incrementalValidation)
+ 			return;
+ 
+@@ -2891,6 +3147,7 @@ bail:
+ 	void GC::StartGCActivity()
+ 	{
+ 		// invoke postsweep callback
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->startGCActivity();
+@@ -2901,6 +3158,7 @@ bail:
+ 	void GC::StopGCActivity()
+ 	{
+ 		// invoke postsweep callback
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->stopGCActivity();
+@@ -2911,6 +3169,7 @@ bail:
+ 	void GC::AllocActivity(int blocks)
+ 	{
+ 		// invoke postsweep callback
++		USING_CALLBACK_LIST(this);
+ 		GCCallback *cb = m_callbacks;
+ 		while(cb) {
+ 			cb->allocActivity(blocks);
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -154,6 +154,17 @@
+ 		_size = (uintptr)_gc->GetStackTop() - (uintptr)_stack;
+ #endif
+ 
++#ifdef MMGC_THREADSAFE
++#define MMGC_ASSERT_GC_LOCK(gc)  GCAssert((gc)->m_lock.IsHeld() || (gc)->destroying)
++#define MMGC_ASSERT_EXCLUSIVE_GC(gc) \
++    GCAssert(((gc)->m_gcRunning \
++			  && (gc)->m_exclusiveGCThread == GCThread::GetCurrentThread()) \
++			 || (gc)->destroying)
++#else
++#define MMGC_ASSERT_GC_LOCK(gc)      ((void) 0)
++#define MMGC_ASSERT_EXCLUSIVE_GC(gc) ((void) 0)
++#endif
++
+ #ifdef DEBUGGER
+ namespace avmplus
+ {
+@@ -219,7 +230,10 @@ namespace MMgc
+ 	private:
+ 		FixedMalloc* fm;
+ 		GC * gc;
++
++		/** @access Requires(gc->m_rootListLock) */
+ 		GCRoot *next;
++		/** @access Requires(gc->m_rootListLock) */
+ 		GCRoot *prev;
+ 		const void *object;
+ 		size_t size;
+@@ -272,11 +286,15 @@ namespace MMgc
+ 		 * This method is invoked after all marking and before any
+ 		 * sweeping, useful for bookkeeping based on whether things
+ 		 * got marked
++		 *
++		 * @access Requires(gc->exclusiveGC)
+ 		 */
+ 		virtual void presweep() {}
+ 
+ 		/**
+ 		 * This method is invoked after all sweeping
++		 *
++		 * @access Requires(gc->exclusiveGC)
+ 		 */
+ 		virtual void postsweep() {}
+ 
+@@ -292,6 +310,49 @@ namespace MMgc
+ 		// called after a ZCT reap completes
+ 		virtual void postreap() {}
+ #endif
++
++		/**
++		 * This callback is the first thing a collecting thread does once it
++		 * acquires exclusiveGC status.  When this is called, no other threads
++		 * are running in requests and the calling thread holds gc->m_lock.
++		 *
++		 * Although this makes the most sense in an MMGC_THREADSAFE build, the
++		 * callback is nonetheless called at the same point during collection
++		 * even in non-MMGC_THREADSAFE builds.  This is so programs that build
++		 * in both thread-safe and non-thread-safe configurations will see the
++		 * same behavior in both.  The same idea applies to
++		 * enterExclusiveGCNoLock and leaveExclusiveGC.
++		 *
++		 * @warning
++		 *     Because the system calls this callback while holding the
++		 *     GC-wide lock, implementations must keep it simple and not block
++		 *     or call back into the GC, to avoid deadlock.
++		 *
++		 * @access Requires(gc->exclusiveGC && gc->m_lock)
++		 */
++		virtual void enterExclusiveGC() {}
++
++		/**
++		 * This callback happens after enterExclusiveGC.  The only difference
++		 * is that this is called after the GC has released gc->m_lock.
++		 *
++		 * See notes and warning at GCCallback::enterExclusiveGC().
++		 *
++		 * @access Requires(gc->exclusiveGC && !gc->m_lock)
++		 */
++		virtual void enterExclusiveGCNoLock() {}
++
++		/**
++		 * This callback is the last thing a collecting thread does before it
++		 * relinquishes exclusiveGC status, allowing other threads to run in
++		 * requests.
++		 *
++		 * In an MMGC_THREADSAFE build, this is called while the GC is holding
++		 * the GC-wide lock.  See warning at GCCallback::enterExclusiveGC().
++		 *
++		 * @access Requires(gc->exclusiveGC && gc->m_lock)
++		 */
++		virtual void leaveExclusiveGC() {}
+ 
+ 		/**
+ 		 * This method is called before an RC object is reaped
+@@ -533,12 +594,19 @@ namespace MMgc
+ 		 * will cause a garbage collection.  This makes code run
+ 		 * abysmally slow, but can be useful for detecting mark
+ 		 * bugs.
++		 *
++		 * The GC reads this flag only when holding the GC lock.  It is best
++		 * to set it as soon as the GC is created.
++		 *
++		 * @access Requires(m_lock)
+ 		 */
+ 		bool greedy;
+ 
+ 		/**
+ 		 * nogc is a debugging flag.  When set, garbage collection
+ 		 * never happens.
++		 *
++		 * @access Requires(m_lock)
+ 		 */
+ 		bool nogc;
+ 
+@@ -554,8 +622,28 @@ namespace MMgc
+ 		bool validateDefRef;		
+ 		bool keepDRCHistory;
+ 
++		/**
++		 * Expand, don't collect, until we reach this threshold. Units are
++		 * pages, not KB, just like GCHeap::GetTotalHeapSize().
++		 *
++		 * In an MMGC_THREADSAFE build, the GC reads this configuration value
++		 * only when holding the GC lock.  Set it during
++		 * initialization, before the GC is visible to multiple threads.
++		 *
++		 * @access Requires(m_lock)
++		 */
+ 		size_t collectThreshold;
+ 
++		/**
++		 * If true, log some statistics during each collection.
++		 * The output goes to gclog().
++		 *
++		 * In an MMGC_THREADSAFE build, the GC reads this flag only during
++		 * stop-the-world collection.  Set it during initialization, before
++		 * the GC is visible to multiple threads.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		bool gcstats;
+ 
+ 		bool dontAddToZCTDuringCollection;
+@@ -565,6 +653,14 @@ namespace MMgc
+ 		bool incrementalValidationPedantic;
+ #endif
+ 
++		/**
++		 * Configuration flag enabling incremental collection.
++		 *
++		 * In an MMGC_THREADSAFE build, the GC reads this flag only when
++		 * holding the GC lock.  Set it during initialization.
++		 *
++		 * @access Requires(m_lock)
++		 */
+ 		bool incremental;
+ 
+ 		// -- Interface
+@@ -572,7 +668,13 @@ namespace MMgc
+ 		virtual ~GC();
+ 		
+ 		/**
+-		 * Causes an immediate garbage collection
++		 * Causes an immediate garbage collection.
++		 *
++		 * In an MMGC_THREADSAFE build, the caller must not be inside a
++		 * request.  If the caller is inside a request, call
++		 * CollectFromRequest() instead.
++		 *
++		 * @access Requires(!m_lock && !request)
+ 		 */
+ 		void Collect();
+ 
+@@ -597,15 +699,26 @@ namespace MMgc
+ 
+ 		/**
+ 		 * Main interface for allocating memory.  Default flags is no
+-		 * finalization, contains pointers is set and zero is set
++		 * finalization, contains pointers is set and zero is set.
++		 *
++		 * Do not call this from a finalizer.
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		void *Alloc(size_t size, int flags=0, int skip=3);
++
++		/** @access Requires(request && m_lock) */
++		void *AllocAlreadyLocked(size_t size, int flags=0, int skip=3);
+ 
+ 		
+ 		/**
+ 		 * overflow checking way to call Alloc for a # of n size'd items,
+ 		 * all instance of Alloc(num*sizeof(thing)) should be replaced with:
+ 		 * Calloc(num, sizeof(thing))
++		 *
++		 * Do not call this from a finalizer.
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		void *Calloc(size_t num, size_t elsize, int flags=0, int skip=3);
+ 
+@@ -613,11 +726,15 @@ namespace MMgc
+ 		 * One can free a GC allocated pointer, this will throw an assertion
+ 		 * if called during the Sweep phase (ie via a finalizer) it can only be
+ 		 * used outside the scope of a collection
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		void Free(void *ptr);
+ 
+ 		/**
+ 		 * return the size of a piece of memory, may be bigger than what was asked for
++		 *
++		 * @access Requires(request || exclusiveGC)
+ 		 */
+ 		static size_t Size(const void *ptr)
+ 		{
+@@ -631,6 +748,8 @@ namespace MMgc
+ 		/**
+ 		 * Tracers should employ GetMark and SetMark to
+ 		 * set the mark bits during the mark pass.
++		 *
++		 * @access Requires(request || exclusiveGC)
+ 		 */
+ 		static int GetMark(const void *item)
+ 		{
+@@ -737,9 +856,13 @@ namespace MMgc
+ 		}
+ 
+ 		/**
+-		 * Used by sub-allocators to obtain memory
++		 * Used by sub-allocators to obtain memory.
++		 *
++		 * @access Requires(m_lock)
+ 		 */
+ 		void* AllocBlock(int size, int pageType, bool zero=true);
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		void FreeBlock(void *ptr, uint32 size);
+ 
+ 		GCHeap *GetGCHeap() const { return heap; }
+@@ -767,11 +890,19 @@ namespace MMgc
+ 		 */
+ 		bool IncrementalMarking() { return marking; }
+ 
+-		// a magical write barriers that finds the container's address and the GC, just
+-		// make sure address is a pointer to a GC page, only used by WB smart pointers
++		/**
++		 * A magical write barrier that finds the container's address and the
++		 * GC, just make sure @a address is a pointer to a GC page. Only used
++		 * by WB smart pointers.
++		 *
++		 * @access Requires(request)
++		 */
+ 		static void WriteBarrier(const void *address, const void *value);
++
++		/** @access Requires(request) */
+ 		static void WriteBarrierNoSub(const void *address, const void *value);
+ 
++		/** @access Requires(request) */
+ 		void writeBarrier(const void *container, const void *address, const void *value)
+ 		{
+ 			GCAssert(IsPointerToGCPage(container));
+@@ -783,13 +914,19 @@ namespace MMgc
+ 			WriteBarrierWrite(address, value);
+ 		}
+ 
+-		// optimized version with no RC checks or pointer masking
++		/**
++		 * optimized version with no RC checks or pointer masking
++		 *
++		 * @access Requires(request)
++		 */
+ 		void writeBarrierRC(const void *container, const void *address, const void *value);
+ 
+ 		/**
+-		Write barrier when the value could be a pointer with anything in the lower 3 bits
+-		FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
+-		this would require the application to tell us what bit patterns are pointers.
++		 * Write barrier when the value could be a pointer with anything in the lower 3 bits
++		 * FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
++		 * this would require the application to tell us what bit patterns are pointers.
++		 *
++		 * @access Requires(request)
+ 		 */
+ 		__forceinline void WriteBarrierNoSubstitute(const void *container, const void *value)
+ 		{
+@@ -797,9 +934,11 @@ namespace MMgc
+ 		}
+ 			
+ 		/**
+-		 AVM+ write barrier, valuePtr is known to be pointer and the caller
+-		does the write
+-		*/
++		 * AVM+ write barrier, valuePtr is known to be pointer and the caller
++		 * does the write.
++		 *
++		 * @access Requires(request)
++		 */
+ 		__forceinline void WriteBarrierTrap(const void *container, const void *valuePtr)
+ 		{
+ 			GCAssert(IsPointerToGCPage(container));
+@@ -823,8 +962,10 @@ namespace MMgc
+ 
+ 	public:
+ 
++		/** @access Requires(request || exclusiveGC) */
+ 		bool ContainsPointers(const void *item);
+ 
++		/** @access Requires(request) */
+ 		void *FindBeginning(const void *gcItem)
+ 		{
+ 			GCAssert(gcItem != NULL);
+@@ -860,11 +1001,23 @@ namespace MMgc
+ 		bool IsRCObject(const void *) { return false; }
+ #endif
+ 
+-
+-		bool Collecting() const { return collecting; }
+-
++		/**
++		 * True during Sweep phase.  Application code can use this to
++		 * determine if it's being called (directly or indirectly) from a
++		 * finalizer.
++		 *
++		 * @see IsGCRunning()
++		 * @access Requires(request || exclusiveGC)
++		 */
++		bool Collecting() const
++		{
++			return collecting;
++		}
++
++		/** @access Requires(request || exclusiveGC) */
+ 		bool IsGCMemory (const void *);
+ 
++		/** @access Requires(request || exclusiveGC) */
+ 		bool IsQueued(const void *item);
+ 
+ 		static uint64 GetPerformanceCounter();
+@@ -875,9 +1028,12 @@ namespace MMgc
+ 			return (double(GC::GetPerformanceCounter() - start) * 1000) / GC::GetPerformanceFrequency();
+ 		}
+ 
++#ifndef MMGC_THREADSAFE
+ 		void DisableThreadCheck() { disableThreadCheck = true; }
+-		
+-		uint64 t0;
++#endif
++
++		/** GC initialization time, in ticks.  Used for logging. */
++		const uint64 t0;
+ 
+ 		// a tick is the unit of GetPerformanceCounter()
+ 		static uint64 ticksToMicros(uint64 ticks) 
+@@ -899,22 +1055,69 @@ namespace MMgc
+ #endif
+ 		}
+ 
+-		// marking rate counter
++		/**
++		 * Total number of bytes of pointer-containing memory scanned by this
++		 * GC.  Used to measure marking rate, which is
++		 * <code>bytesMarked/ticksToMillis(markTicks)</code>.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint64 bytesMarked;
++
++		/**
++		 * Total time spent doing incremental marking, in ticks.  See
++		 * bytesMarked.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint64 markTicks;
+ 
+ 		// calls to mark item
+ 		uint32 lastStartMarkIncrementCount;
+ 		uint32 markIncrements;
++
++		/**
++		 * Number of calls to MarkItem().
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint32 marks;
++
++		/**
++		 * Number of calls to Sweep().
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+         uint32 sweeps;
+ 
++		/**
++		 * Number of calls to MarkItem() during the current (or most recent)
++		 * IncrementalMark().
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint32 numObjects;
++
++		/**
++		 * Number of bytes scanned in MarkItem() during the current (or most
++		 * recent) IncrementalMark().
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint32 objSize;
+ 
++		/**
++		 * Time of the latest FinishIncrementalMark() call, in ticks.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		uint64 sweepStart;
+ 
+-		// if we hit zero in a marking incremental force completion in the next one
++		/**
++		 * True if we emptied the work queue during the most recent
++		 * incremental mark.  This means the next mark will force the GC cycle
++		 * through to completion.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		bool hitZeroObjects;
+ 
+ 		// called at some apropos moment from the mututor, ideally at a point
+@@ -926,7 +1129,10 @@ namespace MMgc
+ 
+ 		bool Destroying() { return destroying; }
+ 
++		/** @access Requires(request) */
+ 		static GCWeakRef *GetWeakRef(const void *obj);
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		void ClearWeakRef(const void *obj);
+ 
+ 		uintptr	GetStackTop() const;
+@@ -943,7 +1149,10 @@ namespace MMgc
+ 		// FIXME: only used for FixedAlloc, GCAlloc sized dynamically
+ 		const static int kPageUsableSpace = 3936;
+ 
++		/** @access Requires(request && m_lock) */
+ 		uint32 *GetBits(int numBytes, int sizeClass);
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		void FreeBits(uint32 *bits, int sizeClass)
+ 		{
+ #ifdef _DEBUG
+@@ -952,32 +1161,55 @@ namespace MMgc
+ 			*(uint32**)bits = m_bitsFreelists[sizeClass];
+ 			m_bitsFreelists[sizeClass] = bits;
+ 		}
++
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		uint32 *m_bitsFreelists[kNumSizeClasses];
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		uint32 *m_bitsNext;
+ 
++		/** @access Requires((request && m_lock) || exclusiveGC) */
+ 		GCHashtable weakRefs;
+ 
+ 		bool destroying;
+ 
+-		// we track the heap size so if it expands due to fixed memory allocations
+-		// we can trigger a mark/sweep, other wise we can have lots of small GC 
+-		// objects allocating tons of fixed memory which results in huge heaps
+-		// and possible out of memory situations.
++		/**
++		 * We track the heap size so if it expands due to fixed memory
++		 * allocations we can trigger a mark/sweep. Otherwise we can have lots
++		 * of small GC objects allocating tons of fixed memory, which results
++		 * in huge heaps and possible out of memory situations.
++		 *
++		 * @access Requires(m_lock)
++		 */
+ 		size_t heapSizeAtLastAlloc;
+ 
+ 		bool stackCleaned;
+ 		const void *rememberedStackTop;
+ 
++#ifndef MMGC_THREADSAFE
+ 		// for external which does thread safe multi-thread AS execution
+ 		bool disableThreadCheck;
+-
++#endif
++
++		/**
++		 * True if incremental marking is on and some objects have been marked.
++		 * This means write barriers are enabled.
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 *
++		 * The GC thread may read and write this flag.  Application threads in
++		 * requests have read-only access.
++		 */
+ 		bool marking;
+ 		GCStack<GCWorkItem> m_incrementalWork;
+ 		void StartIncrementalMark();
+ 		void FinishIncrementalMark();
++
++		/** @access Requires(request || exclusiveGC) */
+ 		int IsWhite(const void *item);
+ 		
++		/** @access ReadWrite(request, exclusiveGC) */
+ 		uint64 lastMarkTicks;
++		/** @access ReadWrite(request, exclusiveGC) */
+ 		uint64 lastSweepTicks;
+ 
+ 		const static int16 kSizeClasses[kNumSizeClasses];		
+@@ -989,14 +1221,44 @@ namespace MMgc
+ 		// 0 - not in use
+ 		// 1 - used by GCAlloc
+ 		// 3 - used by GCLargeAlloc
++
++		/** @access Requires(pageMapLock) */
+ 		uintptr memStart;
++		/** @access Requires(pageMapLock) */
+ 		uintptr memEnd;
+ 
++		/** @access Requires(m_lock) */
+ 		size_t totalGCPages;
+ 
++		/**
++		 * The bitmap for what pages are in use.  Any access to either the
++		 * pageMap pointer or the bitmap requires pageMapLock.
++		 *
++		 * (Note: A better synchronization scheme might be to use atomic
++		 * operations to read and write the pageMap pointer, writing it only
++		 * from within m_lock; and then using atomic read and write
++		 * operations--on Intel x86, these are just ordinary reads and
++		 * writes--to access the bitmap, with writes again only from within
++		 * m_lock.  This would require reallocating the pageMap more often,
++		 * but at least write barriers wouldn't have to acquire the spinlock.)
++		 *
++		 * @access Requires(pageMapLock)
++		 */
+ 		unsigned char *pageMap;
+ 
++		/**
++		 * This spinlock covers memStart, memEnd, and the contents of pageMap.
++		 */
++		mutable GCSpinLock pageMapLock;
++
+ 		inline int GetPageMapValue(uintptr addr) const
++		{
++			GCAcquireSpinlock lock(pageMapLock);
++			return GetPageMapValueAlreadyLocked(addr);
++		}
++
++		/** @access Requires(pageMapLock) */
++		inline int GetPageMapValueAlreadyLocked(uintptr addr) const
+ 		{
+ 			uintptr index = (addr-memStart) >> 12;
+ 
+@@ -1008,7 +1270,20 @@ namespace MMgc
+ 			//return (pageMap[addr >> 2] & (3<<shiftAmount)) >> shiftAmount;
+ 			return (pageMap[index >> 2] >> shiftAmount) & 3;
+ 		}
++
++		/**
++		 * Set the pageMap bits for the given address.  Those bits must be
++		 * zero beforehand.
++		 *
++		 * @access Requires(pageMapLock)
++		 */
+ 		void SetPageMapValue(uintptr addr, int val);
++
++		/**
++		 * Zero out the pageMap bits for the given address.
++		 *
++		 * @access Requires(pageMapLock)
++		 */
+ 		void ClearPageMapValue(uintptr addr);
+ 
+ 		void MarkGCPages(void *item, uint32 numpages, int val);
+@@ -1028,12 +1303,37 @@ namespace MMgc
+ 		GCLargeAlloc *largeAlloc;
+ 		GCHeap *heap;
+ 		
++		/** @access Requires(m_lock) */
+ 		void* AllocBlockIncremental(int size, bool zero=true);
++
++		/** @access Requires(m_lock) */
+ 		void* AllocBlockNonIncremental(int size, bool zero=true);
++
++	protected:
++		/**
++		 * Collect in a thread-safe, recursion-preventing way, with
++		 * callbacks.
++		 *
++		 * Both parameters are ignored in non-MMGC_THREADSAFE builds.  In an
++		 * MMGC_THREADSAFE build, callerHoldsLock must be true iff the calling
++		 * thread already holds m_lock, and callerHasActiveRequest must be
++		 * true iff the calling thread is already in an active request.
++		 */
++		void CollectWithBookkeeping(bool callerHoldsLock,
++									bool callerHasActiveRequest);
++
++	private:
++		/**
++		 * Just collect.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
++		void CollectImpl();
+ 
+ #ifdef _DEBUG
+ 		public:
+ #endif
++		/** @access Requires(exclusiveGC) */
+ 		void ClearMarks();
+ #ifdef _DEBUG
+ 		private:
+@@ -1043,55 +1343,105 @@ namespace MMgc
+ #ifdef _DEBUG
+ 		public:
+ 		// sometimes useful for mutator to call this
++		/** @access Requires(exclusiveGC) */
+ 		void Trace(const void *stackStart=NULL, size_t stackSize=0);
+ 		private:
+ #endif
+ 
++		/** @access Requires(exclusiveGC) */
+ 		void Finalize();
++		/** @access Requires(exclusiveGC) */
+ 		void Sweep(bool force=false);
++		/** @access Requires(exclusiveGC) */
+ 		void ForceSweep() { Sweep(true); }
++		/** @access Requires(exclusiveGC) */
+ 		void Mark(GCStack<GCWorkItem> &work);
++		/** @access Requires(exclusiveGC) */
+ 		void MarkQueueAndStack(GCStack<GCWorkItem> &work);
++		/** @access Requires(exclusiveGC) */
+ 		void MarkItem(GCStack<GCWorkItem> &work)
+ 		{
+ 			GCWorkItem workitem = work.Pop();
+ 			MarkItem(workitem, work);
+ 		}
++		/** @access Requires(exclusiveGC) */
+ 		void MarkItem(GCWorkItem &workitem, GCStack<GCWorkItem> &work);
+-		// Write barrier slow path
+-
++
++		/**
++		 * Write barrier slow path. Queues the white object.
++		 *
++		 * @access Requires(request)
++		 */
+ 		void TrapWrite(const void *black, const void *white);
+ 
++		/**
++		 * Number of pages allocated from the GCHeap since the start of the
++		 * current GC cycle.
++		 *
++		 * @access Requires((request && m_lock) || exclusiveGC)
++		 */
+ 		unsigned int allocsSinceCollect;
+-		// The collecting flag prevents an unwanted recursive collection.
++
++		/**
++		 * True during the sweep phase of collection.  Several things have to
++		 * behave a little differently during this phase.  For example,
++		 * GC::Free() does nothing during sweep phase; otherwise finalizers
++		 * could be called twice.
++		 *
++		 * Also, Collect() uses this to protect itself from recursive calls
++		 * (from badly behaved finalizers).
++		 *
++		 * @access ReadWrite(request, exclusiveGC)
++		 */
+ 		bool collecting;
+  
++		/** @access Requires((request && m_lock) || exclusiveGC) */ 
+ 		bool finalizedValue;
+ 
+-		// list of pages to be swept, built up in Finalize
++		/** @access Requires(exclusiveGC) */
+ 		void AddToSmallEmptyBlockList(GCAlloc::GCBlock *b)
+ 		{
+ 			b->next = smallEmptyPageList;
+ 			smallEmptyPageList = b;
+ 		}
++
++		/**
++		 * List of pages to be swept, built up in Finalize.
++		 * @access Requires(exclusiveGC)
++		 */
+ 		GCAlloc::GCBlock *smallEmptyPageList;
+ 		
+-		// list of pages to be swept, built up in Finalize
++		/** @access Requires(exclusiveGC) */
+ 		void AddToLargeEmptyBlockList(GCLargeAlloc::LargeBlock *lb)
+ 		{
+ 			lb->next = largeEmptyPageList;
+ 			largeEmptyPageList = lb;
+ 		}
++
++		/**
++		 * List of pages to be swept, built up in Finalize.
++		 * @access Requires(exclusiveGC)
++		 */
+ 		GCLargeAlloc::LargeBlock *largeEmptyPageList;
+ 		
+ #ifdef GCHEAP_LOCK
+ 		GCSpinLock m_rootListLock;
+ #endif
+ 
++		/** @access Requires(m_rootListLock) */
+ 		GCRoot *m_roots;
+ 		void AddRoot(GCRoot *root);
+ 		void RemoveRoot(GCRoot *root);
+ 		
++#ifdef MMGC_THREADSAFE
++		GCSpinLock m_callbackListLock;
++#endif
++
++		/**
++		 * Points to the head of a linked list of callback objects.
++		 *
++		 * @access Requires(m_callbackListLock)
++		 */
+ 		GCCallback *m_callbacks;
+ 		void AddCallback(GCCallback *cb);
+ 		void RemoveCallback(GCCallback *cb);
+@@ -1128,7 +1478,9 @@ private:
+ private:
+ #endif
+ 
++#ifndef MMGC_THREADSAFE
+ 		void CheckThread();
++#endif
+ 
+ 		void PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item);
+ 
+@@ -1145,17 +1497,37 @@ private:
+ 		void CheckFreelists();
+ 
+ 		int m_gcLastStackTrace;
++
++		/**
++		 * Used by FindUnmarkedPointers.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void UnmarkedScan(const void *mem, size_t size);
+-		// find unmarked pointers in entire heap
++
++		/**
++		 * Find unmarked pointers in the entire heap.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void FindUnmarkedPointers();
+ 
+ 		// methods for incremental verification
+ 
+-		// scan a region of memory for white pointers, used by FindMissingWriteBarriers
++		/**
++		 * Scan a region of memory for white pointers. Used by
++		 * FindMissingWriteBarriers.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void WhitePointerScan(const void *mem, size_t size);
+ 
+-		// scan all GC memory (skipping roots) If a GC object is black make sure
+-		// it has no white pointers, skip it if its grey.
++		/**
++		 * Scan all GC memory (skipping roots). If a GC object is black make sure
++		 * it has no pointers to white objects.
++		 *
++		 * @access Requires(exclusiveGC)
++		 */
+ 		void FindMissingWriteBarriers();
+ #ifdef WIN32
+ 		// store a handle to the thread that create the GC to ensure thread safety
+@@ -1176,7 +1548,187 @@ public:
+ #ifdef _DEBUG
+ 		// Dump a list of objects that have pointers to the given location.
+ 		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);
++
++		/**
++		 * Used by WhosPointingAtMe.
++		 * @access Requires(pageMapLock)
++		 */
+     	void ProbeForMatch(const void *mem, size_t size, uintptr value, int recurseDepth, int currentDepth);
++#endif
++
++#ifdef MMGC_THREADSAFE
++	public:
++		/**
++		 * True if marking or sweeping is happening.  In an MMGC_THREADSAFE
++		 * build, this implies that no threads in requests are running right
++		 * now.
++		 *
++		 * Contrast Collecting().
++		 *
++		 * @access Requires(request || exclusiveGC || m_lock)
++		 */
++		bool IsGCRunning() const { return m_gcRunning; }
++
++	protected:
++		/** @access Requires(m_lock) */
++		void OnEnterRequestAlreadyLocked()
++		{
++			WaitForGCDone();
++			m_requestCount++;
++#ifdef _DEBUG
++			GCThread::GetCurrentThread()->OnEnterRequest();
++#endif
++		}
++
++		/** @access Requires(m_lock) */
++		void OnLeaveRequestAlreadyLocked()
++		{
++			GCAssert(m_requestCount > 0);
++			m_requestCount--;
++			if (m_requestCount == 0)
++				m_condNoRequests.NotifyAll();
++#ifdef _DEBUG
++			GCThread::GetCurrentThread()->OnLeaveRequest();
++#endif
++		}
++
++	public:
++		/**
++		 * Call this when the number of active requests on a thread goes from
++		 * zero to one.
++		 */
++		void OnEnterRequest()
++		{
++			GCAutoLock _lock(m_lock);
++			OnEnterRequestAlreadyLocked();
++		}
++
++		/**
++		 * Call this when the number of active requests on a thread goes from 
++		 * one to zero.
++		 */
++		void OnLeaveRequest()
++		{
++			GCAutoLock _lock(m_lock);
++			OnLeaveRequestAlreadyLocked();
++		}
++
++		/**
++		 * Exactly like Collect(), except that the caller must be inside a
++		 * request.
++		 *
++		 * @access Requires(request && !m_lock)
++		 */
++		void CollectFromRequest()
++		{
++			CollectWithBookkeeping(false, true);
++		}
++
++	protected:
++		/**
++		 * Wait for the current GC operation, if any, to finish.
++		 *
++		 * @access Requires(!request && m_lock)
++		 */
++		void WaitForGCDone()
++		{
++			GCAssert(m_exclusiveGCThread != GCThread::GetCurrentThread());
++			while (m_exclusiveGCThread != NULL)
++				m_condDone.Wait();
++		}
++
++		/**
++		 * This lock protects m_exclusiveGCThread and m_requestCount.
++		 * Policies built on m_lock and m_exclusiveGCThread govern how threads
++		 * access the rest of MMgc (both data and code).
++		 *
++		 * These policies are documented with "\@access" comments, which is a
++		 * kind of shorthand.  Here are a few illustrative examples:
++		 *
++		 * <code>\@access Requires(m_lock)</code> on a member variable or
++		 * method means it must be used only by a thread that holds m_lock.
++		 *
++		 * <code>\@access Requires(exclusiveGC)</code> means the member is to
++		 * be used only by the GC thread, and only when no threads are running
++		 * in requests.  This applies to methods like Trace(), MarkItem(),
++		 * Sweep(), and Finalize().
++		 *
++		 * <code>\@access Requires(request)</code> means the member must be
++		 * used only by a thread that is in a request.  This applies to
++		 * Alloc(), Calloc(), and Free().
++		 *
++		 * <code>\@access Requires(exclusiveGC || request)</code> requires
++		 * that the caller be <em>either</em> in a request <em>or</em> that it
++		 * be the GC thread, with no threads running in requests.  (Of course
++		 * a thread can never be both.)  This is just like
++		 * <code>Requires(request)</code> except that a member marked this way
++		 * is also safe to use from finalizer code.  (Finalizers run with
++		 * <code>exclusiveGC</code>, not in a request.)
++		 *
++		 * <code>\@access ReadWrite(request, exclusiveGC)</code> applies to
++		 * several data members.  This denotes a read-write locking scheme.
++		 * Any thread with <code>request || exclusiveGC</code> can safely
++		 * <em>read</em> these data members, but only a thread with
++		 * <code>exclusiveGC</code> can <em>write</em> to them.
++		 *
++		 * Other policies are occasionally used.  For example, the list of
++		 * GCRoots has its own spinlock, so the head of the list and each of
++		 * the links has <code>@access(m_rootListLock)</code>.
++		 *
++		 * XXX TODO - worry about less-than-one-word fields (just bools, I
++		 * think) that are packed with other fields that have different access
++		 * policies!
++		 *
++		 * Some data structures, like the list of GCRoots, are protected by
++		 * separate spinlocks.  The rule to avoid deadlock is: do not try to
++		 * acquire m_lock if you already hold any spinlock.
++		 */
++		mutable GCLock m_lock;
++
++		/**
++		 * The thread currently doing GC-related work (or waiting to do
++		 * GC-related work); or NULL if there is no such thread.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCThread *m_exclusiveGCThread;
++
++		/**
++		 * True if a thread is doing GC-related work, having already ensured
++		 * that no threads are in active requests.
++		 *
++		 * (The complicated thread-safety policy here makes reads lock-free.
++		 * It also supports checking the status even if the currently running
++		 * code isn't in a request at all; Collect() needs that.)
++		 *
++		 * @access ReadWrite(request || exclusiveGC || m_lock, exclusiveGC && m_lock)
++		 */
++		bool m_gcRunning;
++
++		/**
++		 * This is notified whenever m_exclusiveGCThread becomes NULL.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCCondition m_condDone;
++
++		/**
++		 * The number of threads currently in active requests.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		int m_requestCount;
++
++		/**
++		 * This is notified whenever m_requestCount becomes zero.
++		 *
++		 * At most one thread is ever waiting on this condition at a time; if
++		 * a thread is waiting on it, that thread is `m_exclusiveGCThread` and
++		 * `m_gcRunning` is false.
++		 *
++		 * @access Requires(m_lock)
++		 */
++		GCCondition m_condNoRequests;
+ #endif
+ 	};
+ 
+diff --git a/MMgc/GCAlloc.cpp b/MMgc/GCAlloc.cpp
+--- a/MMgc/GCAlloc.cpp
++++ b/MMgc/GCAlloc.cpp
+@@ -127,6 +127,8 @@ namespace MMgc
+ 
+ 	GCAlloc::GCBlock* GCAlloc::CreateChunk()
+ 	{
++		MMGC_ASSERT_GC_LOCK(m_gc);
++
+ 		// Get space in the bitmap.  Do this before allocating the actual block,
+ 		// since we might call GC::AllocBlock for more bitmap space and thus
+ 		// cause some incremental marking.
+@@ -229,6 +231,7 @@ namespace MMgc
+ 
+ 	void* GCAlloc::Alloc(size_t size, int flags)
+ 	{
++		MMGC_ASSERT_GC_LOCK(m_gc);
+ 		(void)size;
+ 		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");
+ start:
+@@ -379,6 +382,8 @@ start:
+ 
+ 	void GCAlloc::Finalize()
+ 	{
++		MMGC_ASSERT_EXCLUSIVE_GC(m_gc);
++
+ 		m_finalized = true;
+ 		// Go through every item of every block.  Look for items
+ 		// that are in use but not marked as reachable, and delete
+diff --git a/MMgc/GCLargeAlloc.cpp b/MMgc/GCLargeAlloc.cpp
+--- a/MMgc/GCLargeAlloc.cpp
++++ b/MMgc/GCLargeAlloc.cpp
+@@ -53,6 +53,9 @@ namespace MMgc
+ 
+ 	void* GCLargeAlloc::Alloc(size_t size, int flags)
+ 	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(m_gc->m_lock.IsHeld());
++#endif
+ 		int blocks = (size+sizeof(LargeBlock)+GCHeap::kBlockSize-1) / GCHeap::kBlockSize;
+ 		
+ 		LargeBlock *block = (LargeBlock*) m_gc->AllocBlock(blocks, GC::kGCLargeAllocPageFirst, (flags&GC::kZero) != 0);
+diff --git a/MMgc/GCTests.cpp b/MMgc/GCTests.cpp
+--- a/MMgc/GCTests.cpp
++++ b/MMgc/GCTests.cpp
+@@ -55,6 +55,21 @@ namespace MMgc
+ 
+ 	GCWeakRef* createWeakRef(int extra=0)
+ 	{
++		// Bogusly use up some extra stack.
++		//
++		// Certain compilers/platforms require this (at least gcc/MacIntel).
++		// A pointer to the temporary GCObject can be left on the stack, so
++		// far down that CleanStack doesn't dare touch it.  Leaving the
++		// pointer there causes the temporary to be marked, and not collected,
++		// which causes tests to fail with assertions.
++		///
++		// The extra 32 bytes here causes the temporary to end up higher on
++		// the stack (numerically lesser address, on Intel at least) where
++		// CleanStack will clobber it.
++		//
++		char buf[32];
++		sprintf(buf, "%d", extra);  // don't optimize away buf
++
+ 		return (new (gc, extra) GCObject())->GetWeakRef();
+ 	}
+ 
+diff --git a/MMgc/GCThreads.cpp b/MMgc/GCThreads.cpp
+new file mode 100644
+--- /dev/null
++++ b/MMgc/GCThreads.cpp
+@@ -0,0 +1,100 @@
++/* ***** BEGIN LICENSE BLOCK *****
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
++ *
++ * The contents of this file are subject to the Mozilla Public License Version
++ * 1.1 (the "License"); you may not use this file except in compliance with
++ * the License. You may obtain a copy of the License at
++ * http://www.mozilla.org/MPL/
++ *
++ * Software distributed under the License is distributed on an "AS IS" basis,
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
++ * for the specific language governing rights and limitations under the
++ * License.
++ *
++ * The Original Code is [Open Source Virtual Machine.].
++ *
++ * The Initial Developer of the Original Code is
++ * Adobe System Incorporated.
++ * Portions created by the Initial Developer are Copyright (C) 2004-2006
++ * the Initial Developer. All Rights Reserved.
++ *
++ * Contributor(s):
++ *   Jason Orendorff
++ *
++ * Alternatively, the contents of this file may be used under the terms of
++ * either the GNU General Public License Version 2 or later (the "GPL"), or
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
++ * in which case the provisions of the GPL or the LGPL are applicable instead
++ * of those above. If you wish to allow use of your version of this file only
++ * under the terms of either the GPL or the LGPL, and not to allow others to
++ * use your version of this file under the terms of the MPL, indicate your
++ * decision by deleting the provisions above and replace them with the notice
++ * and other provisions required by the GPL or the LGPL. If you do not delete
++ * the provisions above, a recipient may use your version of this file under
++ * the terms of any one of the MPL, the GPL or the LGPL.
++ *
++ * ***** END LICENSE BLOCK ***** */
++
++#include "MMgc.h"
++
++#ifdef MMGC_THREADSAFE
++#include "prtypes.h"
++
++using namespace MMgc;
++
++PRUintn GCThread::g_tlsIndex;
++GCThread *GCThread::g_first = NULL;
++static PRLock *g_threadListLock = NULL;
++
++void (PR_CALLBACK DestroyGCThread)(void *obj)
++{
++	delete static_cast<GCThread *>(obj);
++}
++
++void GCThread::Init()
++{
++    GCAssert(g_threadListLock == NULL);
++	g_threadListLock = PR_NewLock();
++	GCAssert(g_threadListLock != NULL);
++	if (g_threadListLock != NULL) {
++		PRStatus st = PR_NewThreadPrivateIndex(&g_tlsIndex, DestroyGCThread);
++		(void) st;
++		GCAssert(st == PR_SUCCESS);
++	}
++}
++
++void GCThread::Destroy()
++{
++    GCAssert(g_threadListLock != NULL);
++
++	PR_DestroyLock(g_threadListLock);
++	g_threadListLock = NULL;
++    // NSPR provides no way to release g_tlsIndex to the system.
++}
++
++GCThread::GCThread()
++#ifdef _DEBUG
++	: m_inRequest(false)
++#endif
++{
++	PR_Lock(g_threadListLock);
++	m_next = g_first;
++	g_first = this;
++	PR_Unlock(g_threadListLock);
++}
++
++GCThread * GCThread::GetCurrentThread()
++{
++    GCAssert(g_threadListLock != NULL);
++
++    GCThread *t = static_cast<GCThread *>(PR_GetThreadPrivate(g_tlsIndex));
++    if (t == NULL) {
++        t = new GCThread;
++        PRStatus st = PR_SetThreadPrivate(g_tlsIndex, t);
++		(void) st;
++        GCAssert(st == PR_SUCCESS);
++    }
++    return t;
++}
++
++#endif  // MMGC_THREADSAFE
+diff --git a/MMgc/GCThreads.h b/MMgc/GCThreads.h
+new file mode 100644
+--- /dev/null
++++ b/MMgc/GCThreads.h
+@@ -0,0 +1,227 @@
++/* ***** BEGIN LICENSE BLOCK *****
++ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
++ *
++ * The contents of this file are subject to the Mozilla Public License Version
++ * 1.1 (the "License"); you may not use this file except in compliance with
++ * the License. You may obtain a copy of the License at
++ * http://www.mozilla.org/MPL/
++ *
++ * Software distributed under the License is distributed on an "AS IS" basis,
++ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
++ * for the specific language governing rights and limitations under the
++ * License.
++ *
++ * The Original Code is [Open Source Virtual Machine.].
++ *
++ * The Initial Developer of the Original Code is
++ * Adobe System Incorporated.
++ * Portions created by the Initial Developer are Copyright (C) 2004-2006
++ * the Initial Developer. All Rights Reserved.
++ *
++ * Contributor(s):
++ *   Jason Orendorff
++ *
++ * Alternatively, the contents of this file may be used under the terms of
++ * either the GNU General Public License Version 2 or later (the "GPL"), or
++ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
++ * in which case the provisions of the GPL or the LGPL are applicable instead
++ * of those above. If you wish to allow use of your version of this file only
++ * under the terms of either the GPL or the LGPL, and not to allow others to
++ * use your version of this file under the terms of the MPL, indicate your
++ * decision by deleting the provisions above and replace them with the notice
++ * and other provisions required by the GPL or the LGPL. If you do not delete
++ * the provisions above, a recipient may use your version of this file under
++ * the terms of any one of the MPL, the GPL or the LGPL.
++ *
++ * ***** END LICENSE BLOCK ***** */
++
++#ifndef __MMGC_GCTHREADS__
++#define __MMGC_GCTHREADS__
++
++#ifdef MMGC_THREADSAFE
++
++#include "prthread.h"
++#include "prlock.h"
++#include "prcvar.h"
++
++namespace MMgc
++{
++	/** A non-reentrant mutex. */
++	class GCLock
++	{
++		friend class GCCondition;
++	public:
++		GCLock() 
++			: m_lock(PR_NewLock())
++#ifdef _DEBUG
++			, m_holder(NULL)
++#endif
++		{
++			GCAssert(m_lock != NULL);
++		}
++
++		~GCLock() { PR_DestroyLock(m_lock); }
++
++		void Acquire()
++		{
++#ifdef _DEBUG
++			PRThread *thr = PR_GetCurrentThread();
++			GCAssertMsg(m_holder != thr, "deadlock in GCLock::Acquire");
++#endif
++			PR_Lock(m_lock);
++#ifdef _DEBUG
++			m_holder = thr;
++#endif
++		}
++
++		void Release()
++		{
++#ifdef _DEBUG
++			GCAssert(IsHeld());
++			m_holder = NULL;
++#endif
++			PR_Unlock(m_lock);
++		}
++
++#ifdef _DEBUG
++		/**
++		 * True if the calling thread owns the lock.
++		 *
++		 * Warning: This is for use in debug-mode assertions about the lock
++		 * state ONLY.  It's theoretically possible, on some non-IA32
++		 * platforms, that reading m_holder outside the lock like this could
++		 * return random junk, causing a false positive here.  On IA32, it's
++		 * safe.
++		 */
++		bool IsHeld()
++		{
++			return m_holder == PR_GetCurrentThread();
++		}
++#endif
++
++	private:
++		// Prohibit copying.
++		GCLock(const GCLock &);
++		GCLock & operator=(const GCLock &);
++
++		PRLock *m_lock;
++#ifdef _DEBUG
++		PRThread *m_holder;
++#endif
++	};
++
++	class GCAutoLock {
++	public:
++		explicit GCAutoLock(GCLock &lock)
++			: m_lock(lock)
++		{
++			lock.Acquire();
++		}
++
++		~GCAutoLock() { m_lock.Release(); }
++
++	private:
++		// Prohibit copying.
++		GCAutoLock(const GCAutoLock &);
++		GCAutoLock & operator=(const GCAutoLock &);
++
++		GCLock &m_lock;
++	};
++
++	/** A pthreads-style condition variable. */
++	class GCCondition {
++	public:
++		explicit GCCondition(GCLock &lock)
++			: m_cvar(PR_NewCondVar(lock.m_lock))
++#ifdef _DEBUG
++			, m_lockObject(lock)
++#endif
++		{
++			GCAssert(m_cvar != NULL);
++		}
++
++		~GCCondition() { PR_DestroyCondVar(m_cvar); }
++
++		void Wait()
++		{
++			PRStatus st;
++
++#ifdef _DEBUG
++			m_lockObject.m_holder = NULL;
++#endif
++			st = PR_WaitCondVar(m_cvar, PR_INTERVAL_NO_TIMEOUT);
++#ifdef _DEBUG
++			m_lockObject.m_holder = PR_GetCurrentThread();
++#endif
++			(void)st;
++			GCAssert(st == PR_SUCCESS);
++		}
++
++		void NotifyAll()
++		{
++			PRStatus st;
++
++			st = PR_NotifyAllCondVar(m_cvar);
++			(void)st;
++			GCAssert(st == PR_SUCCESS);
++		}
++
++	private:
++		// Prohibit copying.
++		GCCondition(const GCCondition &);
++		GCCondition & operator=(const GCCondition &);
++
++		PRCondVar *m_cvar;
++#ifdef _DEBUG
++		GCLock &m_lockObject;
++#endif
++    };
++
++	/** A catchall object for per-thread state. */
++	class GCThread {
++	public:
++		static void Init();
++		static void Destroy();
++
++		static GCThread * GetCurrentThread();
++
++#ifdef _DEBUG
++	public:
++		void OnEnterRequest()
++		{
++			GCAssert(!m_inRequest);
++			m_inRequest = true;
++		}
++
++		void OnLeaveRequest()
++		{
++			GCAssert(m_inRequest);
++			m_inRequest = false;
++		}
++
++		bool IsInActiveRequest() { return m_inRequest; }
++
++	private:
++		bool m_inRequest;
++#else
++	public:
++		void OnEnterRequest() {}
++		void OnLeaveRequest() {}
++#endif
++
++	private:
++		// Prohibit copying.
++		GCThread(const GCThread &);
++		GCThread & operator=(const GCThread &);
++
++		GCThread();
++
++		static PRUintn g_tlsIndex;
++		static GCThread *g_first;
++		GCThread *m_next;
++	};
++}
++
++#endif // MMGC_THREADSAFE
++
++#endif // __MMGC_GCTHREADS__
+diff --git a/MMgc/GCWeakRef.h b/MMgc/GCWeakRef.h
+--- a/MMgc/GCWeakRef.h
++++ b/MMgc/GCWeakRef.h
+@@ -58,10 +58,21 @@ namespace MMgc
+ 			}
+ 		}
+ 	private:
+-		// override new so we can be tell the GC we don't contain pointers
++		/**
++		 * When allocating a GCWeakRef, tell the GC we don't contain pointers
++		 * (overriding the default base-class behavior).
++		 *
++		 * In MMGC_THREADSAFE builds, the caller always holds m_lock.
++		 *
++		 * @access Requires(gc->m_lock)
++		 */
+ 		static void *operator new(size_t size, GC *gc)
+ 		{
++#ifdef MMGC_THREADSAFE
++			return gc->AllocAlreadyLocked(size, GC::kFinalize, 4);
++#else
+ 			return gc->Alloc(size, GC::kFinalize, 4);
++#endif
+ 		}
+ 		// private, only GC can access
+ 		GCWeakRef(const void *obj) : m_obj(obj) 
+diff --git a/MMgc/MMgc.h b/MMgc/MMgc.h
+--- a/MMgc/MMgc.h
++++ b/MMgc/MMgc.h
+@@ -125,6 +125,7 @@ namespace MMgc
+ 
+ #include "GCTypes.h"
+ #include "GCStack.h"
++#include "GCThreads.h"
+ #include "GCAllocObject.h"
+ #include "GCHeap.h"
+ #include "GCAlloc.h"
+diff --git a/MMgc/manifest.mk b/MMgc/manifest.mk
+--- a/MMgc/manifest.mk
++++ b/MMgc/manifest.mk
+@@ -56,6 +56,7 @@ MMgc_CXXSRCS := $(MMgc_CXXSRCS) \
+   $(curdir)/GCMemoryProfiler.cpp \
+   $(curdir)/GCObject.cpp \
+   $(curdir)/GCTests.cpp \
++  $(curdir)/GCThreads.cpp \
+   $(NULL)
+ 
+ ifeq (windows,$(TARGET_OS))
--- a/series
+++ b/series
@@ -1,7 +1,8 @@
 tweak-esc-main.sh
+mmgc-threadsafe-baseline
 mmgc-threadsafe
 unmerged-edits-to-mmgc-threadsafe
 mmgc-threadsafe-gctests
 mmgc-maybegc
 mmgc-graphviz
 mmgc-bit-checks