Merge changes from jorendorff
authorbenjamin@smedbergs.us
Mon, 10 Dec 2007 14:12:50 -0500
changeset 19 9440555b77389c85b8100c0e8215f3b460613e2d
parent 13 6a29ec7e67a19d85a0800edaa50ce47243b56e53 (current diff)
parent 18 964b0d139a110b363e3eba3d08426300b59ed477 (diff)
child 20 458babdaa518471a7a2e4b09f514955c86b7c60f
push id1
push userbsmedberg@mozilla.com
push dateMon, 21 Apr 2008 01:54:18 +0000
Merge changes from jorendorff
.hgignore
mmgc-threadsafe-take2
series
rename from mmgc-threadsafe-take2
rename to mmgc-threadsafe
--- a/mmgc-threadsafe-take2
+++ b/mmgc-threadsafe
@@ -1,37 +1,30 @@
 diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 --- a/MMgc/GC.cpp
 +++ b/MMgc/GC.cpp
-@@ -198,8 +198,23 @@ namespace MMgc
+@@ -198,8 +198,16 @@ namespace MMgc
  	};
  	const size_t kLargestAlloc = 1968;
  
 +#ifdef MMGC_THREADSAFE
 +#define USING_CALLBACK_LIST(gc)  GCAcquireSpinlock _cblock((gc)->m_callbackListLock)
 +#define USING_PAGE_MAP()         GCAcquireSpinlock _pmlock(pageMapLock)
-+#define ASSERT_GC_LOCK()         GCAssert(m_lock.IsHeld() || destroying)
-+#define ASSERT_EXCLUSIVE_GC() \
-+    GCAssert((m_gcRunning \
-+			  && m_exclusiveGCThread == GCThread::GetCurrentThread()) \
-+             || destroying)
 +#else
 +#define USING_CALLBACK_LIST(gc)  ((gc)->CheckThread())
 +#define USING_PAGE_MAP()         ((void) 0)
-+#define ASSERT_GC_LOCK()         ((void) 0)
-+#define ASSERT_EXCLUSIVE_GC()    ((void) 0)
 +#endif
 +
  	GC::GC(GCHeap *gcheap)
 -		: disableThreadCheck(false),
 +		:
  #ifdef MMGC_DRC
  		  zct(gcheap),
  #endif
-@@ -255,8 +270,17 @@ namespace MMgc
+@@ -255,8 +263,17 @@ namespace MMgc
  		  heapSizeAtLastAlloc(gcheap->GetTotalHeapSize()),
  		  finalizedValue(true),
  		  // Expand, don't collect, until we hit this threshold
 -		  collectThreshold(256)
 -	{		
 +		  collectThreshold(256),
 +#if MMGC_THREADSAFE
 +		  m_exclusiveGCThread(NULL),
@@ -41,27 +34,27 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +		  m_condNoRequests(m_lock)
 +#else
 +		  disableThreadCheck(false)
 +#endif
 +	{
  		// sanity check for all our types
  		GCAssert (sizeof(int8) == 1);
  		GCAssert (sizeof(uint8) == 1);		
-@@ -372,7 +396,9 @@ namespace MMgc
+@@ -372,7 +389,9 @@ namespace MMgc
  
  		heap->Free(pageMap);
  
 +#ifndef MMGC_THREADSAFE
  		CheckThread();
 +#endif
  
  		GCAssert(!m_roots);
  		GCAssert(!m_callbacks);
-@@ -380,15 +406,43 @@ namespace MMgc
+@@ -380,15 +399,43 @@ namespace MMgc
  
  	void GC::Collect()
  	{
 +		CollectWithBookkeeping(false, false);
 +	}
 +
 +	void GC::CollectWithBookkeeping(bool callerHoldsLock,
 +									bool callerHasActiveRequest)
@@ -102,17 +95,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			if (!callerHoldsLock)
 +				m_lock.Release();
 +			return;
 +		}
 +#else
  		if (vetoed || nogc || collecting) {
  
  			return;
-@@ -400,8 +454,136 @@ namespace MMgc
+@@ -400,8 +447,140 @@ namespace MMgc
  
  			return;
  		}
 -
 +#endif
 +
 +#ifdef MMGC_THREADSAFE
 +		GCThread *thisThread = GCThread::GetCurrentThread();
@@ -123,31 +116,35 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			// recursively.  This maybe shouldn't happen, but Collect can be
 +			// called indirectly from a finalizer.  Don't recurse, just ignore
 +			// it.
 +			//
 +			// If it's some other thread, wait for that thread to finish.
 +			//
 +			if (m_exclusiveGCThread != thisThread) {
 +				GCRoot *stackRoot;
++				void *stack;
++				size_t stackSize;
++
++				// Call this macro here, not under "if
++				// (callerHasActiveRequest)", because it introduces local
++				// variables that must survive for the whole lifetime of
++				// stackRoot.
++				MMGC_GET_STACK_EXTENTS(this, stack, stackSize);
 +
 +				// Before waiting, this thread must suspend any active
 +				// requests.  This avoids a deadlock where m_exclusiveGCThread
 +				// is waiting for m_requestCount to go to zero while we're
 +				// waiting for m_exclusiveGCThread to be done.
 +				//
 +				// When we do this, we root the calling thread's stack; this
 +				// way each collection scans the stack of every thread that is
 +				// in a suspended request.
 +				//
 +				if (callerHasActiveRequest) {
-+					void *stack;
-+					size_t stackSize;
-+
-+					MMGC_GET_STACK_EXENTS(this, stack, stackSize);
 +					stackRoot = new GCRoot(this, stack, stackSize);
 +
 +					OnLeaveRequestAlreadyLocked();
 +				}
 +
 +				while (m_exclusiveGCThread != NULL)
 +					m_condDone.Wait();
 +
@@ -240,63 +237,66 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +	void GC::CollectImpl()
 +	{
 +#ifndef MMGC_THREADSAFE
  		ReapZCT();
 +#endif
  
  		// if we're in the middle of an incremental mark kill it
  		// FIXME: we should just push it to completion 
-@@ -428,8 +610,6 @@ namespace MMgc
+@@ -428,8 +607,6 @@ namespace MMgc
  		FindUnmarkedPointers();
  #endif
  
 -		CheckThread();
 -
  #ifdef DEBUGGER
  		StopGCActivity();
  #endif
-@@ -437,6 +617,8 @@ namespace MMgc
+@@ -437,6 +614,8 @@ namespace MMgc
  
  	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		SAMPLE_FRAME("[mark]", core());
  
  		// Clear all mark bits.
-@@ -460,8 +642,11 @@ namespace MMgc
+@@ -460,8 +639,11 @@ namespace MMgc
  		SAMPLE_CHECK();
  
  		// invoke lastmark on all callbacks
 -		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 -			cb->lastmark(work);
 +		{
 +			USING_CALLBACK_LIST(this);
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->lastmark(work);
 +		}
  
  		if(stackStart == NULL) {
  			MarkQueueAndStack(work);
-@@ -476,6 +661,17 @@ namespace MMgc
+@@ -476,6 +658,20 @@ namespace MMgc
  
  	void *GC::Alloc(size_t size, int flags/*0*/, int skip/*3*/)
  	{
 +#ifdef MMGC_THREADSAFE
 +		GCAutoLock _lock(m_lock);
 +		GCAssert(!m_gcRunning);
 +#else
 +		CheckThread();
 +#endif
 +		return AllocAlreadyLocked(size, flags, skip);
 +	}
 +
 +	void *GC::AllocAlreadyLocked(size_t size, int flags/*0*/, int skip/*3*/)
 +	{
++#ifdef MMGC_THREADSAFE
++		GCAssert(GCThread::GetCurrentThread()->IsInActiveRequest());
++#endif
  #ifdef DEBUGGER
  		avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
  		if(core)
 @@ -487,9 +683,8 @@ namespace MMgc
  
  #ifdef _DEBUG
  		GCAssert(size + 7 > size);
 -		CheckThread();
@@ -333,37 +333,37 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 -
  		if(collecting) {
  			goto bail;
  		}
 @@ -629,6 +830,8 @@ bail:
  
  	void GC::ClearMarks()
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		for (int i=0; i < kNumSizeClasses; i++) {
  #ifdef MMGC_DRC
  			containsPointersRCAllocs[i]->ClearMarks();
 @@ -641,6 +844,8 @@ bail:
  
  	void GC::Finalize()
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		for(int i=kNumSizeClasses; i-- > 0;) {
  #ifdef MMGC_DRC
  			containsPointersRCAllocs[i]->Finalize();
 @@ -662,7 +867,9 @@ bail:
  	}
  
  	void GC::Sweep(bool force)
 -	{	
 +	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		SAMPLE_FRAME("[sweep]", core());
  		sweeps++;
  
 @@ -681,10 +888,13 @@ bail:
  		collecting = true;
  
  		// invoke presweep on all callbacks
@@ -402,43 +402,43 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +
  		SAMPLE_CHECK();
  
  		allocsSinceCollect = 0;
 @@ -779,6 +992,7 @@ bail:
  
  	void* GC::AllocBlock(int size, int pageType, bool zero)
  	{
-+		ASSERT_GC_LOCK();
++		MMGC_ASSERT_GC_LOCK(this);
  #ifdef DEBUGGER
  		AllocActivity(size);
  #endif
 @@ -796,7 +1010,7 @@ bail:
  			if(incremental)
  				StartIncrementalMark();
  			else
 -				Collect();
 +				CollectWithBookkeeping(true, true);
  		}
  
  		void *item;
 @@ -837,6 +1051,8 @@ bail:
  
  	void* GC::AllocBlockIncremental(int size, bool zero)
  	{
-+		ASSERT_GC_LOCK();
++		MMGC_ASSERT_GC_LOCK(this);
 +
  		if(!nogc && !collecting) {
  			uint64 now = GetPerformanceCounter();
  			if (marking) {		
 @@ -864,12 +1080,14 @@ bail:
  
  	void* GC::AllocBlockNonIncremental(int size, bool zero)
  	{
-+		ASSERT_GC_LOCK();
++		MMGC_ASSERT_GC_LOCK(this);
 +
  		void *item = heap->Alloc(size, false, zero);
  		if (!item) {
  			if (heap->GetTotalHeapSize() >= collectThreshold &&
  				allocsSinceCollect >= totalGCPages / kFreeSpaceDivisor) 
  			{
 -				Collect();
 +				CollectWithBookkeeping(true, true);
@@ -456,17 +456,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +#endif
  #ifdef DEBUGGER
  		AllocActivity(- (int)size);
  #endif
 @@ -905,6 +1128,7 @@ bail:
  
  	void GC::Mark(GCStack<GCWorkItem> &work)
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
  		while(work.Count()) {
  			MarkItem(work);
  		}
 @@ -912,6 +1136,7 @@ bail:
  
  	void GC::MarkGCPages(void *item, uint32 numPages, int to)
  	{
 +		USING_PAGE_MAP();
@@ -649,17 +649,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			int bits = GetPageMapValueAlreadyLocked(val);
  			switch(bits)
  			{
  			case 0:
 @@ -1782,6 +2028,8 @@ bail:
  
  	void GC::FindUnmarkedPointers()
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		if(findUnmarkedPointers)
  		{
  			uintptr m = memStart;
 @@ -1920,6 +2168,11 @@ bail:
  	 */
  	void GC::WhosPointingAtMe(void* me, int recurseDepth, int currentDepth)
  	{
@@ -695,26 +695,26 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 @@ -2389,9 +2647,11 @@ bail:
  		return 1000000;
  		#endif
  	}
 -	
 +
  	void GC::IncrementalMark(uint32 time)
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		SAMPLE_FRAME("[mark]", core());
  		if(m_incrementalWork.Count() == 0 || hitZeroObjects) {
  			FinishIncrementalMark();
 @@ -2446,6 +2706,8 @@ bail:
  
  	void GC::FinishIncrementalMark()
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		// Don't finish an incremental mark (i.e., sweep) if we
  		// are in the midst of a ZCT reap.
  		if (zct.reaping)
 @@ -2477,8 +2739,11 @@ bail:
  		}
  
  		// invoke lastmark on all callbacks
@@ -727,17 +727,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +		}
  
  		MarkQueueAndStack(m_incrementalWork);
  
 @@ -2629,6 +2894,7 @@ bail:
  	{
  		uint32 *bits;
  
-+		ASSERT_GC_LOCK();
++		MMGC_ASSERT_GC_LOCK(this);
  		GCAssert(numBytes % 4 == 0);
  
  		#ifdef MMGC_64BIT // we use first 8-byte slot for the free list
 @@ -2700,7 +2966,8 @@ bail:
  
  	void GC::AddCallback(GCCallback *cb)
  	{
 -		CheckThread();
@@ -788,83 +788,117 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 -		while(cb) {
 +		for (GCEdgeCallback *cb = m_edgeCallbacks; cb; cb = cb->nextCB)
  			cb->foundEdgeTo(p);
 -			cb = cb->nextCB;
 -		}
  	}
  
  	void GC::PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item)
-@@ -2762,9 +3032,18 @@ bail:
+@@ -2762,7 +3032,11 @@ bail:
  	GCWeakRef* GC::GetWeakRef(const void *item) 
  	{
  		GC *gc = GetGC(item);
 +#ifdef MMGC_THREADSAFE
 +		GCAutoLock _lock(gc->m_lock);
 +#endif
  		GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(item);
 +
  		if(ref == NULL) {
-+#ifdef MMGC_THREADSAFE
-+			void *p = gc->AllocAlreadyLocked(sizeof(GCWeakRef), GC::kFinalize, 4);
-+			ref = new (p) GCWeakRef(item);
-+#else
  			ref = new (gc) GCWeakRef(item);
-+#endif
  			gc->weakRefs.put(item, ref);
- 			item = GetRealPointer(item);
- 			if (GCLargeAlloc::IsLargeBlock(item)) {
-@@ -2824,6 +3103,8 @@ bail:
+@@ -2824,6 +3098,8 @@ bail:
  
  	void GC::FindMissingWriteBarriers()
  	{
-+		ASSERT_EXCLUSIVE_GC();
++		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		if(!incrementalValidation)
  			return;
  
-@@ -2885,6 +3166,7 @@ bail:
+@@ -2885,6 +3161,7 @@ bail:
  	void GC::StartGCActivity()
  	{
  		// invoke postsweep callback
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->startGCActivity();
-@@ -2895,6 +3177,7 @@ bail:
+@@ -2895,6 +3172,7 @@ bail:
  	void GC::StopGCActivity()
  	{
  		// invoke postsweep callback
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->stopGCActivity();
-@@ -2905,6 +3188,7 @@ bail:
+@@ -2905,6 +3183,7 @@ bail:
  	void GC::AllocActivity(int blocks)
  	{
  		// invoke postsweep callback
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->allocActivity(blocks);
 diff --git a/MMgc/GC.h b/MMgc/GC.h
 --- a/MMgc/GC.h
 +++ b/MMgc/GC.h
-@@ -195,7 +195,10 @@ namespace MMgc
+@@ -144,23 +144,33 @@
+ #define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size) \
+ 		int regs[7];\
+ 		asm("stmia %0,{r4-r10}" : : "r" (regs));\
+-		asm("mov %0,sp" : "=r" (_stack));\
+-		_size = (uintptr)StackTop - (uintptr)_stack;
++               asm("mov %0,sp" : "=r" (_stack));\
++               _size = (uintptr)StackTop - (uintptr)_stack;
+ 
+ #elif defined MMGC_SPARC
+ #define MMGC_GET_STACK_EXTENTS(_gc, _stack, _size) \
+-		asm("ta 3");\
+-		_stack = (void *) _getsp();\
+-		_size = (uintptr)_gc->GetStackTop() - (uintptr)_stack;
++               asm("ta 3");\
++               _stack = (void *) _getsp();\
++               _size = (uintptr)_gc->GetStackTop() - (uintptr)_stack;
++#endif
++
++#ifdef MMGC_THREADSAFE
++#define MMGC_ASSERT_GC_LOCK(gc)  GCAssert((gc)->m_lock.IsHeld() || (gc)->destroying)
++#define MMGC_ASSERT_EXCLUSIVE_GC(gc) \
++    GCAssert(((gc)->m_gcRunning \
++                         && (gc)->m_exclusiveGCThread == GCThread::GetCurrentThread()) \
++                        || (gc)->destroying)
++#else
++#define MMGC_ASSERT_GC_LOCK(gc)      ((void) 0)
++#define MMGC_ASSERT_EXCLUSIVE_GC(gc) ((void) 0)
+ #endif
+ 
+ #ifdef DEBUGGER
+ namespace avmplus
+ {
+-	class AvmCore;
++       class AvmCore;
+ }
+ #endif
+-
+ namespace MMgc
+ {
+ 	/**
+@@ -198,7 +208,10 @@ namespace MMgc
  	private:
  		FixedMalloc* fm;
  		GC * gc;
 +
 +		/** @access Requires(gc->m_rootListLock) */
  		GCRoot *next;
 +		/** @access Requires(gc->m_rootListLock) */
  		GCRoot *prev;
  		const void *object;
  		size_t size;
-@@ -220,16 +223,25 @@ namespace MMgc
+@@ -223,16 +236,25 @@ namespace MMgc
  		void Destroy();
  
  		/**
 -		 * This method is called from GC::Collect(), before
 -		 * anything else, even in cases where it's logically
 -		 * certain that collection <em>will not start</em>
 -		 * (e.g. because we're already collecting, or GC is
 +		 * GC::Collect() fires this callback before anything else, even in
@@ -884,42 +918,42 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		/**
 +		 * GC::Collect() fires this callback after everything else,
 +		 * only if collection actually happened in this thread.
 +		 */
 +		virtual void postcollect() {}
  
  		/**
  		 * This method is invoked during the last mark phase
-@@ -241,6 +253,8 @@ namespace MMgc
+@@ -244,6 +266,8 @@ namespace MMgc
  		 * provided by the GCCallback base class.  The
  		 * corresponding methods of <code>class GC</code> are
  		 * private.)
 +		 *
 +		 * @access Requires(gc->exclusiveGC)
  		 */
  		virtual void lastmark(GCStack<GCWorkItem> &) {}
  
-@@ -248,11 +262,15 @@ namespace MMgc
+@@ -251,11 +275,15 @@ namespace MMgc
  		 * This method is invoked after all marking and before any
  		 * sweeping, useful for bookkeeping based on whether things
  		 * got marked
 +		 *
 +		 * @access Requires(gc->exclusiveGC)
  		 */
  		virtual void presweep() {}
  
  		/**
  		 * This method is invoked after all sweeping
 +		 *
 +		 * @access Requires(gc->exclusiveGC)
  		 */
  		virtual void postsweep() {}
  
-@@ -268,6 +286,43 @@ namespace MMgc
+@@ -271,6 +299,43 @@ namespace MMgc
  		// called after a ZCT reap completes
  		virtual void postreap() {}
  #endif
 +
 +		/**
 +		 * This callback is the first thing a collecting thread does once it
 +		 * acquires exclusiveGC status.  When this is called, no other threads
 +		 * are running in requests and the calling thread holds gc->m_lock.
@@ -953,17 +987,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * See note at GCCallback::enterExclusiveGC().
 +		 *
 +		 * @access Requires(gc->exclusiveGC && gc->m_lock)
 +		 */
 +		virtual void leaveExclusiveGC() {}
  
  		/**
  		 * This method is called before an RC object is reaped
-@@ -312,6 +367,17 @@ namespace MMgc
+@@ -315,6 +380,17 @@ namespace MMgc
  	{
  		friend class GC;
  	public:
 +		/**
 +		 * Create an edge callback.
 +		 *
 +		 * If `gc` is non-null, this constructor adds this GCEdgeCallback to a
 +		 * list of callbacks managed by `gc`.  This means the new object
@@ -971,17 +1005,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * constructed yet, potentially resulting in "Pure virtual method
 +		 * called" or some other crash.  To avoid this race condition, call
 +		 * this constructor with a non-null `gc` only from a thread that is in
 +		 * a request on `gc`.
 +		 */
  		GCEdgeCallback(GC *gc = NULL);
  		virtual ~GCEdgeCallback();
  
-@@ -509,12 +575,19 @@ namespace MMgc
+@@ -512,12 +588,19 @@ namespace MMgc
  		 * will cause a garbage collection.  This makes code run
  		 * abysmally slow, but can be useful for detecting mark
  		 * bugs.
 +		 *
 +		 * The GC reads this flag only when holding the GC lock.  It is best
 +		 * to set it as soon as the GC is created.
 +		 *
 +		 * @access Requires(m_lock)
@@ -991,19 +1025,19 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
  		/**
  		 * nogc is a debugging flag.  When set, garbage collection
  		 * never happens.
 +		 *
 +		 * @access Requires(m_lock)
  		 */
  		bool nogc;
  
-@@ -535,8 +608,28 @@ namespace MMgc
- 		 */
- 		int ISD;
+@@ -533,8 +616,28 @@ namespace MMgc
+ 		bool validateDefRef;		
+ 		bool keepDRCHistory;
  
 +		/**
 +		 * Expand, don't collect, until we reach this threshold. Units are
 +		 * pages, not KB, just like GCHeap::GetTotalHeapSize().
 +		 *
 +		 * In an MMGC_THREADSAFE build, the GC reads this configuration value
 +		 * only when holding the GC lock.  Set it during
 +		 * initialization, before the GC is visible to multiple threads.
@@ -1020,47 +1054,47 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * stop-the-world collection.  Set it during initialization, before
 +		 * the GC is visible to multiple threads.
 +		 *
 +		 * @access Requires(exclusiveGC)
 +		 */
  		bool gcstats;
  
  		bool dontAddToZCTDuringCollection;
-@@ -546,6 +639,14 @@ namespace MMgc
+@@ -544,6 +647,14 @@ namespace MMgc
  		bool incrementalValidationPedantic;
  #endif
  
 +		/**
 +		 * Configuration flag enabling incremental collection.
 +		 *
 +		 * In an MMGC_THREADSAFE build, the GC reads this flag only when
 +		 * holding the GC lock.  Set it during initialization.
 +		 *
 +		 * @access Requires(m_lock)
 +		 */
  		bool incremental;
  
  		// -- Interface
-@@ -553,7 +654,13 @@ namespace MMgc
+@@ -551,7 +662,13 @@ namespace MMgc
  		virtual ~GC();
  		
  		/**
 -		 * Causes an immediate garbage collection
 +		 * Causes an immediate garbage collection.
 +		 *
 +		 * In an MMGC_THREADSAFE build, the caller must not be inside a
 +		 * request.  If the caller is inside a request, call
 +		 * CollectFromRequest() instead.
 +		 *
 +		 * @access Requires(!m_lock && !request)
  		 */
  		void Collect();
  
-@@ -578,15 +685,26 @@ namespace MMgc
+@@ -576,15 +693,26 @@ namespace MMgc
  
  		/**
  		 * Main interface for allocating memory.  Default flags is no
 -		 * finalization, contains pointers is set and zero is set
 +		 * finalization, contains pointers is set and zero is set.
 +		 *
 +		 * Do not call this from a finalizer.
 +		 *
@@ -1078,57 +1112,57 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
  		 * Calloc(num, sizeof(thing))
 +		 *
 +		 * Do not call this from a finalizer.
 +		 *
 +		 * @access Requires(request)
  		 */
  		void *Calloc(size_t num, size_t elsize, int flags=0, int skip=3);
  
-@@ -594,11 +712,15 @@ namespace MMgc
+@@ -592,11 +720,15 @@ namespace MMgc
  		 * One can free a GC allocated pointer, this will throw an assertion
  		 * if called during the Sweep phase (ie via a finalizer) it can only be
  		 * used outside the scope of a collection
 +		 *
 +		 * @access Requires(request)
  		 */
  		void Free(void *ptr);
  
  		/**
  		 * return the size of a piece of memory, may be bigger than what was asked for
 +		 *
 +		 * @access Requires(request || exclusiveGC)
  		 */
  		static size_t Size(const void *ptr)
  		{
-@@ -612,6 +734,8 @@ namespace MMgc
+@@ -610,6 +742,8 @@ namespace MMgc
  		/**
  		 * Tracers should employ GetMark and SetMark to
  		 * set the mark bits during the mark pass.
 +		 *
 +		 * @access Requires(request || exclusiveGC)
  		 */
  		static int GetMark(const void *item)
  		{
-@@ -718,9 +842,13 @@ namespace MMgc
+@@ -716,9 +850,13 @@ namespace MMgc
  		}
  
  		/**
 -		 * Used by sub-allocators to obtain memory
 +		 * Used by sub-allocators to obtain memory.
 +		 *
 +		 * @access Requires(m_lock)
  		 */
  		void* AllocBlock(int size, int pageType, bool zero=true);
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		void FreeBlock(void *ptr, uint32 size);
  
  		GCHeap *GetGCHeap() const { return heap; }
-@@ -748,11 +876,19 @@ namespace MMgc
+@@ -746,11 +884,19 @@ namespace MMgc
  		 */
  		bool IncrementalMarking() { return marking; }
  
 -		// a magical write barriers that finds the container's address and the GC, just
 -		// make sure address is a pointer to a GC page, only used by WB smart pointers
 +		/**
 +		 * A magical write barrier that finds the container's address and the
 +		 * GC, just make sure @a address is a pointer to a GC page. Only used
@@ -1140,17 +1174,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +
 +		/** @access Requires(request) */
  		static void WriteBarrierNoSub(const void *address, const void *value);
  
 +		/** @access Requires(request) */
  		void writeBarrier(const void *container, const void *address, const void *value)
  		{
  			GCAssert(IsPointerToGCPage(container));
-@@ -764,13 +900,19 @@ namespace MMgc
+@@ -762,13 +908,19 @@ namespace MMgc
  			WriteBarrierWrite(address, value);
  		}
  
 -		// optimized version with no RC checks or pointer masking
 +		/**
 +		 * optimized version with no RC checks or pointer masking
 +		 *
 +		 * @access Requires(request)
@@ -1164,43 +1198,43 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * Write barrier when the value could be a pointer with anything in the lower 3 bits
 +		 * FIXME: maybe assert that the lower 3 bits are either zero or a pointer type signature,
 +		 * this would require the application to tell us what bit patterns are pointers.
 +		 *
 +		 * @access Requires(request)
  		 */
  		__forceinline void WriteBarrierNoSubstitute(const void *container, const void *value)
  		{
-@@ -778,9 +920,11 @@ namespace MMgc
+@@ -776,9 +928,11 @@ namespace MMgc
  		}
  			
  		/**
 -		 AVM+ write barrier, valuePtr is known to be pointer and the caller
 -		does the write
 -		*/
 +		 * AVM+ write barrier, valuePtr is known to be pointer and the caller
 +		 * does the write.
 +		 *
 +		 * @access Requires(request)
 +		 */
  		__forceinline void WriteBarrierTrap(const void *container, const void *valuePtr)
  		{
  			GCAssert(IsPointerToGCPage(container));
-@@ -804,8 +948,10 @@ namespace MMgc
+@@ -802,8 +956,10 @@ namespace MMgc
  
  	public:
  
 +		/** @access Requires(request || exclusiveGC) */
  		bool ContainsPointers(const void *item);
  
 +		/** @access Requires(request) */
  		void *FindBeginning(const void *gcItem)
  		{
  			GCAssert(gcItem != NULL);
-@@ -841,11 +987,23 @@ namespace MMgc
+@@ -839,11 +995,23 @@ namespace MMgc
  		bool IsRCObject(const void *) { return false; }
  #endif
  
 -
 -		bool Collecting() const { return collecting; }
 -
 +		/**
 +		 * True during Sweep phase.  Application code can use this to
@@ -1217,17 +1251,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +
 +		/** @access Requires(request || exclusiveGC) */
  		bool IsGCMemory (const void *);
  
 +		/** @access Requires(request || exclusiveGC) */
  		bool IsQueued(const void *item);
  
  		static uint64 GetPerformanceCounter();
-@@ -856,30 +1014,81 @@ namespace MMgc
+@@ -854,30 +1022,81 @@ namespace MMgc
  			return (double(GC::GetPerformanceCounter() - start) * 1000) / GC::GetPerformanceFrequency();
  		}
  
 +#ifndef MMGC_THREADSAFE
  		void DisableThreadCheck() { disableThreadCheck = true; }
 -		
 -		uint64 t0;
 +#endif
@@ -1304,39 +1338,39 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * incremental mark.  This means the next mark will force the GC cycle
 +		 * through to completion.
 +		 *
 +		 * @access ReadWrite(request, exclusiveGC)
 +		 */
  		bool hitZeroObjects;
  
  		// called at some apropos moment from the mututor, ideally at a point
-@@ -891,7 +1100,10 @@ namespace MMgc
+@@ -889,7 +1108,10 @@ namespace MMgc
  
  		bool Destroying() { return destroying; }
  
 +		/** @access Requires(request) */
  		static GCWeakRef *GetWeakRef(const void *obj);
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		void ClearWeakRef(const void *obj);
  
  		uintptr	GetStackTop() const;		
-@@ -907,7 +1119,10 @@ namespace MMgc
+@@ -905,7 +1127,10 @@ namespace MMgc
  		// FIXME: only used for FixedAlloc, GCAlloc sized dynamically
  		const static int kPageUsableSpace = 3936;
  
 +		/** @access Requires(request && m_lock) */
  		uint32 *GetBits(int numBytes, int sizeClass);
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		void FreeBits(uint32 *bits, int sizeClass)
  		{
  #ifdef _DEBUG
-@@ -916,32 +1131,55 @@ namespace MMgc
+@@ -914,32 +1139,55 @@ namespace MMgc
  			*(uint32**)bits = m_bitsFreelists[sizeClass];
  			m_bitsFreelists[sizeClass] = bits;
  		}
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		uint32 *m_bitsFreelists[kNumSizeClasses];
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		uint32 *m_bitsNext;
@@ -1388,17 +1422,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 -		
 +
 +		/** @access ReadWrite(request, exclusiveGC) */
  		uint64 lastMarkTicks;
 +		/** @access ReadWrite(request, exclusiveGC) */
  		uint64 lastSweepTicks;
  
  		const static int16 kSizeClasses[kNumSizeClasses];		
-@@ -953,14 +1191,44 @@ namespace MMgc
+@@ -951,14 +1199,44 @@ namespace MMgc
  		// 0 - not in use
  		// 1 - used by GCAlloc
  		// 3 - used by GCLargeAlloc
 +
 +		/** @access Requires(pageMapLock) */
  		uintptr memStart;
 +		/** @access Requires(pageMapLock) */
  		uintptr memEnd;
@@ -1433,17 +1467,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +			return GetPageMapValueAlreadyLocked(addr);
 +		}
 +
 +		/** @access Requires(pageMapLock) */
 +		inline int GetPageMapValueAlreadyLocked(uintptr addr) const
  		{
  			uintptr index = (addr-memStart) >> 12;
  
-@@ -972,7 +1240,20 @@ namespace MMgc
+@@ -970,7 +1248,20 @@ namespace MMgc
  			//return (pageMap[addr >> 2] & (3<<shiftAmount)) >> shiftAmount;
  			return (pageMap[index >> 2] >> shiftAmount) & 3;
  		}
 +
 +		/**
 +		 * Set the pageMap bits for the given address.  Those bits must be
 +		 * zero beforehand.
 +		 *
@@ -1454,17 +1488,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		/**
 +		 * Zero out the pageMap bits for the given address.
 +		 *
 +		 * @access Requires(pageMapLock)
 +		 */
  		void ClearPageMapValue(uintptr addr);
  
  		void MarkGCPages(void *item, uint32 numpages, int val);
-@@ -991,69 +1272,163 @@ namespace MMgc
+@@ -989,69 +1280,163 @@ namespace MMgc
  		GCAlloc *noPointersAllocs[kNumSizeClasses];
  		GCLargeAlloc *largeAlloc;
  		GCHeap *heap;
 -		
 +
 +		/** @access Requires(m_lock) */
  		void* AllocBlockIncremental(int size, bool zero=true);
 +
@@ -1626,41 +1660,41 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * even if the calling thread is not in a request at all, so this
 +		 * policy would be insufficient for m_callbacks.  Second,
 +		 * m_edgeCallbacks fires very frequently during marking, so a
 +		 * lock-free policy is probably much faster.
 +		 */
  		GCEdgeCallback *m_edgeCallbacks;
  		void AddEdgeCallback(GCEdgeCallback *cb);
  		void RemoveEdgeCallback(GCEdgeCallback *cb);
-@@ -1061,9 +1436,10 @@ namespace MMgc
+@@ -1059,9 +1444,10 @@ namespace MMgc
  		/**
  		 * Notify GCEdgeCallbacks of an edge.
  		 *
 -		 * p is a "real pointer".  This method converts it to
 -		 * a "user pointer" using GetUserPointer() before
 -		 * calling callbacks.
 +		 * p is a "real pointer".  This method converts it to a "user pointer"
 +		 * using GetUserPointer() before calling callbacks.
 +		 *
 +		 * @access Requires(exclusiveGC)
  		 */
  		void FireFoundEdgeTo(const void *p);
  
-@@ -1086,7 +1462,9 @@ private:
+@@ -1084,7 +1470,9 @@ private:
  private:
  #endif
  
 +#ifndef MMGC_THREADSAFE
  		void CheckThread();
 +#endif
  
  		void PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item);
  
-@@ -1103,17 +1481,37 @@ private:
+@@ -1101,17 +1489,37 @@ private:
  		void CheckFreelists();
  
  		int m_gcLastStackTrace;
 +
 +		/**
 +		 * Used by FindUnmarkedPointers.
 +		 *
 +		 * @access Requires(exclusiveGC)
@@ -1692,17 +1726,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * Scan all GC memory (skipping roots). If a GC object is black make sure
 +		 * it has no pointers to white objects.
 +		 *
 +		 * @access Requires(exclusiveGC)
 +		 */
  		void FindMissingWriteBarriers();
  #ifdef WIN32
  		// store a handle to the thread that create the GC to ensure thread safety
-@@ -1134,7 +1532,187 @@ public:
+@@ -1132,7 +1540,187 @@ public:
  #ifdef _DEBUG
  		// Dump a list of objects that have pointers to the given location.
  		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);
 +
 +		/**
 +		 * Used by WhosPointingAtMe.
 +		 * @access Requires(pageMapLock)
 +		 */
@@ -1883,45 +1917,38 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 */
 +		GCCondition m_condNoRequests;
  #endif
  	};
  
 diff --git a/MMgc/GCAlloc.cpp b/MMgc/GCAlloc.cpp
 --- a/MMgc/GCAlloc.cpp
 +++ b/MMgc/GCAlloc.cpp
-@@ -127,6 +127,10 @@ namespace MMgc
+@@ -127,6 +127,8 @@ namespace MMgc
  
  	GCAlloc::GCBlock* GCAlloc::CreateChunk()
  	{
-+#ifdef MMGC_THREADSAFE
-+		GCAssert(m_gc->m_lock.IsHeld());
-+#endif
++		MMGC_ASSERT_GC_LOCK(m_gc);
 +
  		// Get space in the bitmap.  Do this before allocating the actual block,
  		// since we might call GC::AllocBlock for more bitmap space and thus
  		// cause some incremental marking.
-@@ -225,6 +229,9 @@ namespace MMgc
+@@ -225,6 +227,7 @@ namespace MMgc
  
  	void* GCAlloc::Alloc(size_t size, int flags)
  	{
-+#ifdef MMGC_THREADSAFE
-+		GCAssert(m_gc->m_lock.IsHeld());
-+#endif
++		MMGC_ASSERT_GC_LOCK(m_gc);
  		(void)size;
  		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");
  start:
-@@ -374,6 +381,11 @@ start:
+@@ -374,6 +377,8 @@ start:
  
  	void GCAlloc::Finalize()
  	{
-+#ifdef MMGC_THREADSAFE
-+		GCAssert(m_gc->m_exclusiveGCThread == GCThread::GetCurrentThread()
-+		         || m_gc->destroying);
-+#endif
++		MMGC_ASSERT_EXCLUSIVE_GC(m_gc);
 +
  		m_finalized = true;
  		// Go through every item of every block.  Look for items
  		// that are in use but not marked as reachable, and delete
 diff --git a/MMgc/GCLargeAlloc.cpp b/MMgc/GCLargeAlloc.cpp
 --- a/MMgc/GCLargeAlloc.cpp
 +++ b/MMgc/GCLargeAlloc.cpp
 @@ -53,6 +53,9 @@ namespace MMgc
@@ -2308,28 +2335,39 @@ new file mode 100644
 +}
 +
 +#endif // MMGC_THREADSAFE
 +
 +#endif // __MMGC_GCTHREADS__
 diff --git a/MMgc/GCWeakRef.h b/MMgc/GCWeakRef.h
 --- a/MMgc/GCWeakRef.h
 +++ b/MMgc/GCWeakRef.h
-@@ -63,6 +63,11 @@ namespace MMgc
- 		{
- 			return gc->Alloc(size, GC::kFinalize, 4);
+@@ -58,10 +58,21 @@ namespace MMgc
+ 			}
  		}
-+		// Placement new.
-+		void *operator new(size_t, void *p)
-+		{
-+			return p;
-+		}
+ 	private:
+-		// override new so we can be tell the GC we don't contain pointers
++		/**
++		 * When allocating a GCWeakRef, tell the GC we don't contain pointers
++		 * (overriding the default base-class behavior).
++		 *
++		 * In MMGC_THREADSAFE builds, the caller always holds m_lock.
++		 *
++		 * @access Requires(gc->m_lock)
++		 */
+ 		static void *operator new(size_t size, GC *gc)
+ 		{
++#ifdef MMGC_THREADSAFE
++			return gc->AllocAlreadyLocked(size, GC::kFinalize, 4);
++#else
+ 			return gc->Alloc(size, GC::kFinalize, 4);
++#endif
+ 		}
  		// private, only GC can access
  		GCWeakRef(const void *obj) : m_obj(obj) 
- 		{
 diff --git a/MMgc/MMgc.h b/MMgc/MMgc.h
 --- a/MMgc/MMgc.h
 +++ b/MMgc/MMgc.h
 @@ -125,6 +125,7 @@ namespace MMgc
  
  #include "GCTypes.h"
  #include "GCStack.h"
 +#include "GCThreads.h"
new file mode 100644
--- /dev/null
+++ b/mmgc-threadsafe-gctests
@@ -0,0 +1,57 @@
+diff --git a/MMgc/GCTests.cpp b/MMgc/GCTests.cpp
+--- a/MMgc/GCTests.cpp
++++ b/MMgc/GCTests.cpp
+@@ -55,6 +55,15 @@ namespace MMgc
+ {
+ 	GC *gc;
+ 
++	void collect()
++	{
++#ifdef MMGC_THREADSAFE
++		gc->CollectFromRequest();
++#else
++		gc->Collect();
++#endif
++	}
++
+ 	GCWeakRef* createWeakRef(int extra = 0)
+ 	{
+ 		// Bogusly use up some extra stack.
+@@ -78,9 +87,9 @@ namespace MMgc
+ 	void weakRefSweepSmall()
+ 	{
+ 		GCWeakRef *ref = createWeakRef();
+-		gc->Collect();
++		collect();
+ 		gc->CleanStack(true);
+-		gc->Collect();
++		collect();
+ 		(void)ref;
+ 		GCAssert(ref->get() == NULL);
+ 	}
+@@ -88,9 +97,9 @@ namespace MMgc
+ 	void weakRefSweepLarge()
+ 	{
+ 		GCWeakRef *ref = createWeakRef(5000);
+-		gc->Collect();
++		collect();
+ 		gc->CleanStack(true);
+-		gc->Collect();
++		collect();
+ 		(void)ref;
+ 		GCAssert(ref->get() == NULL);
+ 	}
+@@ -166,11 +175,13 @@ namespace MMgc
+ 	void RunGCTests(GC *g)
+ 	{
+ 		gc = g;
++		gc->OnEnterRequest();
+ 		weakRefSweepSmall();
+ 		weakRefSweepLarge();
+ 		weakRefFreeSmall();
+ 		weakRefFreeLarge();
+ 		drcApolloTest();
++		gc->OnLeaveRequest();
+ 	}
+ }
+ 
--- a/series
+++ b/series
@@ -1,9 +1,11 @@
+tweak-esc-main.sh
 const-workitem.patch
 gcstack-access
 workitems-notgc-noassert
 gc-graph #+graphviz
 alloc-backtrace #+graphviz
 configure-with-threadsafe-mmgc #+threadsafe
-mmgc-threadsafe-take2 #+threadsafe
+mmgc-threadsafe #+threadsafe
+mmgc-threadsafe-gctests #+threadsafe
 mmgc-graphviz #+jorendorff-graphviz
 mmgc-bit-checks #+threadsafe
new file mode 100644
--- /dev/null
+++ b/tweak-esc-main.sh
@@ -0,0 +1,7 @@
+diff --git a/esc/build/main.sh b/esc/build/main.sh
+--- a/esc/build/main.sh
++++ b/esc/build/main.sh
+@@ -1,1 +1,2 @@
+-/work/tamarin/bin/shell ../bin/debug.es.abc ../bin/ast.es.abc ../bin/ast-decode.es.abc ../bin/ast-encode.es.abc ../bin/util.es.abc ../bin/lex-char.es.abc ../bin/lex-token.es.abc ../bin/lex-scan.es.abc ../bin/parse.es.abc ../bin/util-tamarin.es.abc ../bin/bytes-tamarin.es.abc ../bin/util-tamarin.es.abc ../bin/asm.es.abc ../bin/abc.es.abc ../bin/emit.es.abc ../bin/cogen.es.abc ../bin/cogen-stmt.es.abc ../bin/cogen-expr.es.abc ../bin/main.es.abc -- $1
++AVM=${AVM:-/work/tamarin/bin/shell}
++$AVM ../bin/debug.es.abc ../bin/ast.es.abc ../bin/ast-decode.es.abc ../bin/ast-encode.es.abc ../bin/util.es.abc ../bin/lex-char.es.abc ../bin/lex-token.es.abc ../bin/lex-scan.es.abc ../bin/parse.es.abc ../bin/util-tamarin.es.abc ../bin/bytes-tamarin.es.abc ../bin/util-tamarin.es.abc ../bin/asm.es.abc ../bin/abc.es.abc ../bin/emit.es.abc ../bin/cogen.es.abc ../bin/cogen-stmt.es.abc ../bin/cogen-expr.es.abc ../bin/main.es.abc -- $1