Merge changes from jorendorff's actionmonkey-tamarin-patches
authorbenjamin@smedbergs.us
Thu, 20 Dec 2007 10:57:35 -0500
changeset 27 c1c4660628af
parent 26 9b49c7cbe92c (current diff)
parent 25 bc21193f2b1c (diff)
child 28 70c871a7a5e6
push id1
push userbsmedberg@mozilla.com
push dateMon, 21 Apr 2008 01:54:18 +0000
Merge changes from jorendorff's actionmonkey-tamarin-patches
configure-with-threadsafe-mmgc
series
deleted file mode 100644
--- a/configure-with-threadsafe-mmgc
+++ /dev/null
@@ -1,89 +0,0 @@
-diff --git a/configure b/configure
---- a/configure
-+++ b/configure
-@@ -35,5 +35,12 @@
- #
- # ***** END LICENSE BLOCK *****
- 
--echo "running $PYTHON $0.py --ignore-unknown-flags $@"
--exec $PYTHON $0.py --ignore-unknown-flags $@
-+args=''
-+while [ $# -ge 1 ]; do
-+    args="$args '$1'"
-+    shift
-+done
-+
-+echo "running $PYTHON $0.py --ignore-unknown-flags $args"
-+
-+eval "'$PYTHON' '$0.py' --ignore-unknown-flags $args"
-diff --git a/configure.py b/configure.py
---- a/configure.py
-+++ b/configure.py
-@@ -75,10 +75,13 @@ DEBUG_CXXFLAGS = ""
- DEBUG_CXXFLAGS = ""
- DEBUG_LDFLAGS = ""
- OS_LIBS = []
-+OS_LDFLAGS = ""
- MMGC_CPPFLAGS = ""
- AVMSHELL_CPPFLAGS = ""
- AVMSHELL_LDFLAGS = ""
- MMGC_DEFINES = {'SOFT_ASSERTS': None}
-+NSPR_INCLUDES = ""
-+NSPR_LDOPTS = ""
- 
- MMGC_INTERIOR_PTRS = o.getBoolArg('mmgc-interior-pointers', True)
- if MMGC_INTERIOR_PTRS:
-@@ -88,6 +91,16 @@ if MMGC_DYNAMIC:
- if MMGC_DYNAMIC:
-     MMGC_DEFINES['MMGC_DLL'] = None
-     MMGC_CPPFLAGS += "-DMMGC_IMPL "
-+
-+MMGC_THREADSAFE = o.getBoolArg('threadsafe-mmgc', False)
-+if MMGC_THREADSAFE:
-+    MMGC_DEFINES['MMGC_THREADSAFE'] = None
-+    NSPR_INCLUDES = o.getStringArg('nspr-includes')
-+    MMGC_CPPFLAGS += NSPR_INCLUDES + " "
-+    APP_CPPFLAGS += NSPR_INCLUDES + " "
-+
-+    NSPR_LDOPTS = o.getStringArg('nspr-ldopts')
-+    OS_LDFLAGS += " " + NSPR_LDOPTS
- 
- if config.COMPILER_IS_GCC:
-     APP_CXXFLAGS = "-fno-exceptions -Werror -Wall -Wno-reorder -Wno-switch -Wno-invalid-offsetof -Wno-uninitialized -Wno-strict-aliasing -fmessage-length=0 -finline-functions -finline-limit=65536 "
-@@ -188,6 +201,7 @@ config.subst("DEBUG_CXXFLAGS", DEBUG_CXX
- config.subst("DEBUG_CXXFLAGS", DEBUG_CXXFLAGS)
- config.subst("DEBUG_LDFLAGS", DEBUG_LDFLAGS)
- config.subst("OS_LIBS", " ".join(OS_LIBS))
-+config.subst("OS_LDFLAGS", OS_LDFLAGS)
- config.subst("MMGC_CPPFLAGS", MMGC_CPPFLAGS)
- config.subst("AVMSHELL_CPPFLAGS", AVMSHELL_CPPFLAGS)
- config.subst("AVMSHELL_LDFLAGS", AVMSHELL_LDFLAGS)
-diff --git a/pcre/config.h b/pcre/config.h
---- a/pcre/config.h
-+++ b/pcre/config.h
-@@ -40,7 +40,9 @@ them both to 0; an emulation function wi
- #define HAVE_LIMITS_H 1
- 
- /* Define to 1 if the system has the type `long long'. */
-+#ifndef HAVE_LONG_LONG
- #define HAVE_LONG_LONG 1
-+#endif
- 
- /* Define to 1 if you have the `memmove' function. */
- #define HAVE_MEMMOVE 1
-diff --git a/shell/avmshell.h b/shell/avmshell.h
---- a/shell/avmshell.h
-+++ b/shell/avmshell.h
-@@ -48,6 +48,12 @@
- // #define AVMPLUS_INTERACTIVE
- 
- using namespace avmplus;
-+
-+// avmplus and NSPR both typedef some basic types: we must disambiguate
-+using avmplus::uint64;
-+using avmplus::uint32;
-+using avmplus::uint16;
-+using avmplus::uint8;
- 
- namespace avmshell
- {
new file mode 100644
--- /dev/null
+++ b/mmgc-maybegc
@@ -0,0 +1,70 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -745,6 +745,42 @@ namespace MMgc
+ 		return item;
+ 	}
+ 
++	void GC::MaybeGC(bool callerHasActiveRequest)
++	{
++		if (greedy) {
++			CollectWithBookkeeping(false, callerHasActiveRequest);
++		} else if (marking) {
++			IncrementalMark();
++		} else {
++#ifdef MMGC_THREADSAFE
++			GCAutoLock _lock(m_lock);
++#endif
++
++			// Burst logic to prevent collections from happening back to back.
++			uint64 now = GetPerformanceCounter();
++			if (now - lastSweepTicks <= kMarkSweepBurstTicks)
++				return;
++
++			// Definitely start GC if the heap expanded due to FixedMalloc
++			// allocations.  The same heuristic applies to incremental and
++			// non-incremental.
++			bool force = (heapSizeAtLastAlloc > collectThreshold &&
++						  heapSizeAtLastAlloc < heap->GetTotalHeapSize());
++
++			if (incremental) {
++				if (force || (totalGCPages > collectThreshold &&
++							  allocsSinceCollect * kFreeSpaceDivisor >= totalGCPages)) {
++					StartIncrementalMark();
++				}
++			} else {
++				// Collect only if the heap is completely full (a conservative
++				// heuristic).
++				if (force || heap->GetFreeHeapSize() == 0)
++					CollectWithBookkeeping(true, callerHasActiveRequest);
++			}
++		}
++	}
++
+ 	void *GC::Calloc(size_t num, size_t elsize, int flags, int skip)
+ 	{
+ 		uint64 size = (uint64)num * (uint64)elsize;
+diff --git a/MMgc/GC.h b/MMgc/GC.h
+--- a/MMgc/GC.h
++++ b/MMgc/GC.h
+@@ -673,6 +673,20 @@ namespace MMgc
+ 		void Collect();
+ 
+ 		/**
++		 * Perform some GC-related work if needed.  Call this during
++		 * application down time.
++		 *
++		 * In incremental mode, this may result in a call to
++		 * StartIncrementalMark() or IncrementalMark(), which may in turn push
++		 * the current GC cycle to completion.  In non-incremental mode, this
++		 * heuristically decides whether to do a full Collect().
++		 *
++		 * @param callerHasActiveRequest
++		 *     Must be true iff the calling thread is already in a request.
++		 */
++		void MaybeGC(bool callerHasActiveRequest=false);
++
++		/**
+ 		* flags to be passed as second argument to alloc
+ 		*/
+ 		enum AllocFlags
--- a/mmgc-threadsafe
+++ b/mmgc-threadsafe
@@ -1,12 +1,12 @@
 diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 --- a/MMgc/GC.cpp
 +++ b/MMgc/GC.cpp
-@@ -198,8 +198,16 @@ namespace MMgc
+@@ -199,8 +199,16 @@ namespace MMgc
  	};
  	const size_t kLargestAlloc = 1968;
  
 +#ifdef MMGC_THREADSAFE
 +#define USING_CALLBACK_LIST(gc)  GCAcquireSpinlock _cblock((gc)->m_callbackListLock)
 +#define USING_PAGE_MAP()         GCAcquireSpinlock _pmlock(pageMapLock)
 +#else
 +#define USING_CALLBACK_LIST(gc)  ((gc)->CheckThread())
@@ -14,17 +14,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +#endif
 +
  	GC::GC(GCHeap *gcheap)
 -		: disableThreadCheck(false),
 +		:
  #ifdef MMGC_DRC
  		  zct(gcheap),
  #endif
-@@ -255,8 +263,17 @@ namespace MMgc
+@@ -256,8 +264,17 @@ namespace MMgc
  		  heapSizeAtLastAlloc(gcheap->GetTotalHeapSize()),
  		  finalizedValue(true),
  		  // Expand, don't collect, until we hit this threshold
 -		  collectThreshold(256)
 -	{		
 +		  collectThreshold(256),
 +#if MMGC_THREADSAFE
 +		  m_exclusiveGCThread(NULL),
@@ -34,27 +34,27 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +		  m_condNoRequests(m_lock)
 +#else
 +		  disableThreadCheck(false)
 +#endif
 +	{
  		// sanity check for all our types
  		GCAssert (sizeof(int8) == 1);
  		GCAssert (sizeof(uint8) == 1);		
-@@ -372,7 +389,9 @@ namespace MMgc
+@@ -373,7 +390,9 @@ namespace MMgc
  
  		heap->Free(pageMap);
  
 +#ifndef MMGC_THREADSAFE
  		CheckThread();
 +#endif
  
  		GCAssert(!m_roots);
  		GCAssert(!m_callbacks);
-@@ -380,15 +399,43 @@ namespace MMgc
+@@ -393,20 +412,181 @@ namespace MMgc
  
  	void GC::Collect()
  	{
 +		CollectWithBookkeeping(false, false);
 +	}
 +
 +	void GC::CollectWithBookkeeping(bool callerHoldsLock,
 +									bool callerHasActiveRequest)
@@ -92,24 +92,21 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			m_lock.Acquire();
 +
 +		if (vetoed || nogc || m_gcRunning || zct.reaping) {
 +			if (!callerHoldsLock)
 +				m_lock.Release();
 +			return;
 +		}
 +#else
- 		if (vetoed || nogc || collecting) {
- 
- 			return;
-@@ -400,8 +447,140 @@ namespace MMgc
- 
+ 		if (vetoed || nogc || collecting || zct.reaping) {
  			return;
  		}
 -
+-		ReapZCT();
 +#endif
 +
 +#ifdef MMGC_THREADSAFE
 +		GCThread *thisThread = GCThread::GetCurrentThread();
 +		if (m_exclusiveGCThread != NULL) {
 +			// Someone is already collecting or waiting to collect.
 +			//
 +			// If it's the current thread, then we're being called
@@ -232,54 +229,53 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->postcollect();
 +		}
 +	}
 +
 +	void GC::CollectImpl()
 +	{
 +#ifndef MMGC_THREADSAFE
- 		ReapZCT();
++ 		ReapZCT();
 +#endif
- 
- 		// if we're in the middle of an incremental mark kill it
- 		// FIXME: we should just push it to completion 
-@@ -428,8 +607,6 @@ namespace MMgc
++
+ 		if(!marking)
+ 			StartIncrementalMark();
+ 		if(marking)
+@@ -418,13 +598,14 @@ namespace MMgc
+ 		// reachable by the conservative stack walk
+ 		//DumpStackTrace();
  		FindUnmarkedPointers();
+-		CheckThread();
  #endif
+ 	}
  
--		CheckThread();
--
- #ifdef DEBUGGER
- 		StopGCActivity();
- #endif
-@@ -437,6 +614,8 @@ namespace MMgc
- 
+ #ifdef _DEBUG
  	void GC::Trace(const void *stackStart/*=NULL*/, size_t stackSize/*=0*/)
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		SAMPLE_FRAME("[mark]", core());
  
  		// Clear all mark bits.
-@@ -460,8 +639,11 @@ namespace MMgc
+@@ -448,8 +629,11 @@ namespace MMgc
  		SAMPLE_CHECK();
  
  		// invoke lastmark on all callbacks
 -		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 -			cb->lastmark(work);
 +		{
 +			USING_CALLBACK_LIST(this);
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->lastmark(work);
 +		}
  
  		if(stackStart == NULL) {
  			MarkQueueAndStack(work);
-@@ -476,6 +658,20 @@ namespace MMgc
+@@ -465,6 +649,20 @@ namespace MMgc
  
  	void *GC::Alloc(size_t size, int flags/*0*/, int skip/*3*/)
  	{
 +#ifdef MMGC_THREADSAFE
 +		GCAutoLock _lock(m_lock);
 +		GCAssert(!m_gcRunning);
 +#else
 +		CheckThread();
@@ -290,28 +286,28 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +	void *GC::AllocAlreadyLocked(size_t size, int flags/*0*/, int skip/*3*/)
 +	{
 +#ifdef MMGC_THREADSAFE
 +		GCAssert(GCThread::GetCurrentThread()->IsInActiveRequest());
 +#endif
  #ifdef DEBUGGER
  		avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
  		if(core)
-@@ -487,9 +683,8 @@ namespace MMgc
+@@ -476,9 +674,8 @@ namespace MMgc
  
  #ifdef _DEBUG
  		GCAssert(size + 7 > size);
 -		CheckThread();
  		if (GC::greedy) {
 -			Collect();
 +			CollectWithBookkeeping(true, true);
  		}
  		// always be marking in pedantic mode
  		if(incrementalValidationPedantic) {
-@@ -572,15 +767,21 @@ namespace MMgc
+@@ -561,15 +758,21 @@ namespace MMgc
  
  	void GC::Free(void *item)
  	{
 +#ifdef MMGC_THREADSAFE
 +		GCAutoLock _lock(m_lock);
 +		GCAssert(!m_gcRunning);
 +#else
  		CheckThread();
@@ -329,64 +325,62 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 -			return;
 -		}
 -
 -		bool isLarge;
 -
  		if(collecting) {
  			goto bail;
  		}
-@@ -629,6 +830,8 @@ bail:
+@@ -618,6 +821,8 @@ bail:
  
  	void GC::ClearMarks()
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		for (int i=0; i < kNumSizeClasses; i++) {
  #ifdef MMGC_DRC
  			containsPointersRCAllocs[i]->ClearMarks();
-@@ -641,6 +844,8 @@ bail:
+@@ -630,6 +835,8 @@ bail:
  
  	void GC::Finalize()
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		for(int i=kNumSizeClasses; i-- > 0;) {
  #ifdef MMGC_DRC
  			containsPointersRCAllocs[i]->Finalize();
-@@ -662,7 +867,9 @@ bail:
- 	}
+@@ -652,6 +859,8 @@ bail:
  
  	void GC::Sweep(bool force)
--	{	
-+	{
+ 	{	
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
- 		SAMPLE_FRAME("[sweep]", core());
- 		sweeps++;
- 
-@@ -681,10 +888,13 @@ bail:
- 		collecting = true;
+ 		// collecting must be true because it indicates allocations should
+ 		// start out marked, we can't rely on write barriers below since 
+ 		// presweep could write a new GC object to a root
+@@ -671,10 +880,13 @@ bail:
+ 		
  
  		// invoke presweep on all callbacks
 -		GCCallback *cb = m_callbacks;
 -		while(cb) {
 -			cb->presweep();
 -			cb = cb->nextCB;
 +		{
 +			USING_CALLBACK_LIST(this);
 +			GCCallback *cb = m_callbacks;
 +			while(cb) {
 +				cb->presweep();
 +				cb = cb->nextCB;
 +			}
  		}
  
  		SAMPLE_CHECK();
-@@ -737,12 +947,15 @@ bail:
+@@ -735,12 +947,15 @@ bail:
  		collecting = false;
  
  		// invoke postsweep callback
 -		cb = m_callbacks;
 -		while(cb) {
 -			cb->postsweep();
 -			cb = cb->nextCB;
 -		}
@@ -398,129 +392,129 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +				cb->postsweep();
 +				cb = cb->nextCB;
 +			}
 +		}
 +
  		SAMPLE_CHECK();
  
  		allocsSinceCollect = 0;
-@@ -779,6 +992,7 @@ bail:
+@@ -777,6 +992,7 @@ bail:
  
  	void* GC::AllocBlock(int size, int pageType, bool zero)
  	{
 +		MMGC_ASSERT_GC_LOCK(this);
  #ifdef DEBUGGER
  		AllocActivity(size);
  #endif
-@@ -796,7 +1010,7 @@ bail:
+@@ -794,7 +1010,7 @@ bail:
  			if(incremental)
  				StartIncrementalMark();
  			else
 -				Collect();
 +				CollectWithBookkeeping(true, true);
  		}
  
  		void *item;
-@@ -837,6 +1051,8 @@ bail:
+@@ -835,6 +1051,8 @@ bail:
  
  	void* GC::AllocBlockIncremental(int size, bool zero)
  	{
 +		MMGC_ASSERT_GC_LOCK(this);
 +
  		if(!nogc && !collecting) {
  			uint64 now = GetPerformanceCounter();
  			if (marking) {		
-@@ -864,12 +1080,14 @@ bail:
+@@ -862,12 +1080,14 @@ bail:
  
  	void* GC::AllocBlockNonIncremental(int size, bool zero)
  	{
 +		MMGC_ASSERT_GC_LOCK(this);
 +
  		void *item = heap->Alloc(size, false, zero);
  		if (!item) {
  			if (heap->GetTotalHeapSize() >= collectThreshold &&
  				allocsSinceCollect >= totalGCPages / kFreeSpaceDivisor) 
  			{
 -				Collect();
 +				CollectWithBookkeeping(true, true);
  				item = heap->Alloc(size, false, zero);
  			}
  		}
-@@ -878,6 +1096,11 @@ bail:
+@@ -876,6 +1096,11 @@ bail:
  
  	void GC::FreeBlock(void *ptr, uint32 size)
  	{
 +#ifdef MMGC_THREADSAFE
 +		GCAssert(m_lock.IsHeld() || destroying
 +				 || (m_gcRunning
 +					 && m_exclusiveGCThread == GCThread::GetCurrentThread()));
 +#endif
  #ifdef DEBUGGER
  		AllocActivity(- (int)size);
  #endif
-@@ -905,6 +1128,7 @@ bail:
+@@ -903,6 +1128,7 @@ bail:
  
  	void GC::Mark(GCStack<GCWorkItem> &work)
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
  		while(work.Count()) {
  			MarkItem(work);
  		}
-@@ -912,6 +1136,7 @@ bail:
+@@ -910,6 +1136,7 @@ bail:
  
  	void GC::MarkGCPages(void *item, uint32 numPages, int to)
  	{
 +		USING_PAGE_MAP();
  		uintptr addr = (uintptr)item;
  		uint32 shiftAmount=0;
  		unsigned char *dst = pageMap;
-@@ -954,7 +1179,7 @@ bail:
+@@ -952,7 +1179,7 @@ bail:
  		addr = (uintptr)item;
  		while(numPages--)
  		{
 -			GCAssert(GetPageMapValue(addr) == 0);
 +			GCAssert(GetPageMapValueAlreadyLocked(addr) == 0);
  			SetPageMapValue(addr, to);
  			addr += GCHeap::kBlockSize;
  		}
-@@ -963,6 +1188,8 @@ bail:
+@@ -961,6 +1188,8 @@ bail:
  	void GC::UnmarkGCPages(void *item, uint32 numpages)
  	{
  		uintptr addr = (uintptr) item;
 +
 +		USING_PAGE_MAP();
  		while(numpages--)
  		{
  			ClearPageMapValue(addr);
-@@ -1128,6 +1355,7 @@ bail:
+@@ -1126,6 +1355,7 @@ bail:
  		gc->Mark(work);
  	}
  
 +#ifndef MMGC_THREADSAFE
  	void GC::CheckThread()
  	{
  #ifdef _DEBUG
-@@ -1136,12 +1364,13 @@ bail:
+@@ -1134,12 +1364,13 @@ bail:
  #endif
  #endif
  	}
 -
 +#endif
  
  	bool GC::IsPointerToGCPage(const void *item)
  	{
 +		USING_PAGE_MAP();
  		if((uintptr)item >= memStart && (uintptr)item < memEnd)
 -			return GetPageMapValue((uintptr) item) != 0;
 +			return GetPageMapValueAlreadyLocked((uintptr) item) != 0;
  		return false;
  	}
  
-@@ -1182,10 +1411,13 @@ bail:
+@@ -1180,10 +1411,13 @@ bail:
  		// note if we add things while reaping we could delete the object
  		// here if we had a way to monitor our stack usage
  		if(reaping && PLENTY_OF_STACK()) {
 -			GCCallback *cb = gc->m_callbacks;
 -			while(cb) {
 -				cb->prereap(obj);
 -				cb = cb->nextCB;
 +			{
@@ -528,17 +522,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +				GCCallback *cb = gc->m_callbacks;
 +				while(cb) {
 +					cb->prereap(obj);
 +					cb = cb->nextCB;
 +				}
  			}
  			if(gc->IsFinalized(obj))
  				((GCFinalizable*)obj)->~GCFinalizable();
-@@ -1376,10 +1608,13 @@ bail:
+@@ -1378,10 +1612,13 @@ bail:
  		nextPinnedIndex = 0;
  
  		// invoke prereap on all callbacks
 -		GCCallback *cb = gc->m_callbacks;
 -		while(cb) {
 -			cb->prereap();
 -			cb = cb->nextCB;
 +		{
@@ -546,17 +540,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			GCCallback *cb = gc->m_callbacks;
 +			while(cb) {
 +				cb->prereap();
 +				cb = cb->nextCB;
 +			}
  		}
  
  #ifdef _DEBUG
-@@ -1420,10 +1655,13 @@ bail:
+@@ -1422,10 +1659,13 @@ bail:
  				}
  #endif
  				// invoke prereap on all callbacks
 -				GCCallback *cbb = gc->m_callbacks;
 -				while(cbb) {
 -					cbb->prereap(rcobj);
 -					cbb = cbb->nextCB;
 +				{
@@ -564,17 +558,17 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +					GCCallback *cbb = gc->m_callbacks;
 +					while(cbb) {
 +						cbb->prereap(rcobj);
 +						cbb = cbb->nextCB;
 +					}
  				}
  
  				GCAssert(*(int*)rcobj != 0);
-@@ -1457,10 +1695,13 @@ bail:
+@@ -1459,10 +1699,13 @@ bail:
  		zctIndex = nextPinnedIndex = 0;
  
  		// invoke postreap on all callbacks
 -		cb = gc->m_callbacks;
 -		while(cb) {
 -			cb->postreap();
 -			cb = cb->nextCB;
 +		{
@@ -582,27 +576,27 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +			GCCallback *cb = gc->m_callbacks;
 +			while(cb) {
 +				cb->postreap();
 +				cb = cb->nextCB;
 +			}
  		}
  #ifdef DEBUGGER
  		if(gc->gcstats && numObjects) {
-@@ -1607,7 +1848,8 @@ bail:
+@@ -1615,7 +1858,8 @@ bail:
  		va_end(argptr);
  
  		GCAssert(strlen(buf) < 4096);
 -			
 +
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->log(buf);
-@@ -1659,23 +1901,27 @@ bail:
+@@ -1667,23 +1911,27 @@ bail:
  
  	bool GC::IsRCObject(const void *item)
  	{
 -		if((uintptr)item >= memStart && (uintptr)item < memEnd && ((uintptr)item&0xfff) != 0)
 -		{
 -			int bits = GetPageMapValue((uintptr)item);		
 -			item = GetRealPointer(item);
 -			switch(bits)
@@ -636,206 +630,206 @@ diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
 +		case kGCLargeAllocPageFirst:
 +			return GCLargeAlloc::IsRCObject(item);
 +		default:
 +			return false;
 +		}
  	}
  
  #endif // MMGC_DRC
-@@ -1761,7 +2007,7 @@ bail:
+@@ -1769,7 +2017,7 @@ bail:
  				continue;
  			
  			// normalize and divide by 4K to get index
 -			int bits = GetPageMapValue(val);
 +			int bits = GetPageMapValueAlreadyLocked(val);
  			switch(bits)
  			{
  			case 0:
-@@ -1782,6 +2028,8 @@ bail:
+@@ -1790,6 +2038,8 @@ bail:
  
  	void GC::FindUnmarkedPointers()
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		if(findUnmarkedPointers)
  		{
  			uintptr m = memStart;
-@@ -1920,6 +2168,11 @@ bail:
+@@ -1928,6 +2178,11 @@ bail:
  	 */
  	void GC::WhosPointingAtMe(void* me, int recurseDepth, int currentDepth)
  	{
 +#ifdef MMGC_THREADSAFE
 +		if (currentDepth == 0)
 +			pageMapLock.Acquire();
 +#endif
 +
  		uintptr val = (uintptr)me;
  		uintptr m = memStart;
  
-@@ -1942,7 +2195,7 @@ bail:
+@@ -1950,7 +2205,7 @@ bail:
  #endif
  
  			// divide by 4K to get index
 -			int bits = GetPageMapValue(m);
 +			int bits = GetPageMapValueAlreadyLocked(m);
  			if(bits == kNonGC) 
  			{
  				ProbeForMatch((const void*)m, GCHeap::kBlockSize, val, recurseDepth, currentDepth);
-@@ -1986,6 +2239,11 @@ bail:
+@@ -1994,6 +2249,11 @@ bail:
  			
  			}
  		}
 +
 +#ifdef MMGC_THREADSAFE
 +		if (currentDepth == 0)
 +			pageMapLock.Release();
 +#endif
  	}
  #undef ALLOCA_AND_FILL_WITH_SPACES
  #endif
-@@ -2389,9 +2647,11 @@ bail:
+@@ -2395,9 +2655,11 @@ bail:
  		return 1000000;
  		#endif
  	}
 -	
 +
  	void GC::IncrementalMark(uint32 time)
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		SAMPLE_FRAME("[mark]", core());
  		if(m_incrementalWork.Count() == 0 || hitZeroObjects) {
  			FinishIncrementalMark();
-@@ -2446,6 +2706,8 @@ bail:
+@@ -2452,6 +2714,8 @@ bail:
  
  	void GC::FinishIncrementalMark()
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		// Don't finish an incremental mark (i.e., sweep) if we
  		// are in the midst of a ZCT reap.
  		if (zct.reaping)
-@@ -2477,8 +2739,11 @@ bail:
+@@ -2483,8 +2747,11 @@ bail:
  		}
  
  		// invoke lastmark on all callbacks
 -		for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 -			cb->lastmark(m_incrementalWork);
 +		{
 +			USING_CALLBACK_LIST(this);
 +			for (GCCallback *cb = m_callbacks; cb; cb = cb->nextCB)
 +				cb->lastmark(m_incrementalWork);
 +		}
  
  		MarkQueueAndStack(m_incrementalWork);
  
-@@ -2629,6 +2894,7 @@ bail:
+@@ -2635,6 +2902,7 @@ bail:
  	{
  		uint32 *bits;
  
 +		MMGC_ASSERT_GC_LOCK(this);
  		GCAssert(numBytes % 4 == 0);
  
  		#ifdef MMGC_64BIT // we use first 8-byte slot for the free list
-@@ -2700,7 +2966,8 @@ bail:
+@@ -2706,7 +2974,8 @@ bail:
  
  	void GC::AddCallback(GCCallback *cb)
  	{
 -		CheckThread();
 +		USING_CALLBACK_LIST(this);
 +
  		cb->prevCB = NULL;
  		cb->nextCB = m_callbacks;
  		if(m_callbacks)
-@@ -2710,7 +2977,8 @@ bail:
+@@ -2716,7 +2985,8 @@ bail:
  
  	void GC::RemoveCallback(GCCallback *cb)
  	{
 -		CheckThread();
 +		USING_CALLBACK_LIST(this);
 +
  		if( m_callbacks == cb )
  			m_callbacks = cb->nextCB;
  		else
-@@ -2722,7 +2990,8 @@ bail:
+@@ -2728,7 +2998,8 @@ bail:
  
  	void GC::AddEdgeCallback(GCEdgeCallback *cb)
  	{
 -		CheckThread();
 +		USING_CALLBACK_LIST(this);
 +
  		cb->prevCB = NULL;
  		cb->nextCB = m_edgeCallbacks;
  		if(m_edgeCallbacks)
-@@ -2732,7 +3001,8 @@ bail:
+@@ -2738,7 +3009,8 @@ bail:
  
  	void GC::RemoveEdgeCallback(GCEdgeCallback *cb)
  	{
 -		CheckThread();
 +		USING_CALLBACK_LIST(this);
 +
  		if( m_edgeCallbacks == cb )
  			m_edgeCallbacks = cb->nextCB;
  		else
-@@ -2744,12 +3014,12 @@ bail:
+@@ -2750,12 +3022,12 @@ bail:
  
  	void GC::FireFoundEdgeTo(const void *p)
  	{
 +		// Don't acquire the spinlock here because (a) that would really hurt
 +		// performance; (b) the m_edgeCallbacks list, unlike the m_callbacks
 +		// list, is protected by the request model.
  		p = GetUserPointer(p);
 -		GCEdgeCallback *cb = m_edgeCallbacks;
 -		while(cb) {
 +		for (GCEdgeCallback *cb = m_edgeCallbacks; cb; cb = cb->nextCB)
  			cb->foundEdgeTo(p);
 -			cb = cb->nextCB;
 -		}
  	}
  
  	void GC::PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item)
-@@ -2762,7 +3032,11 @@ bail:
+@@ -2768,7 +3040,11 @@ bail:
  	GCWeakRef* GC::GetWeakRef(const void *item) 
  	{
  		GC *gc = GetGC(item);
 +#ifdef MMGC_THREADSAFE
 +		GCAutoLock _lock(gc->m_lock);
 +#endif
  		GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(item);
 +
  		if(ref == NULL) {
  			ref = new (gc) GCWeakRef(item);
  			gc->weakRefs.put(item, ref);
-@@ -2824,6 +3098,8 @@ bail:
+@@ -2830,6 +3106,8 @@ bail:
  
  	void GC::FindMissingWriteBarriers()
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(this);
 +
  		if(!incrementalValidation)
  			return;
  
-@@ -2885,6 +3161,7 @@ bail:
+@@ -2891,6 +3169,7 @@ bail:
  	void GC::StartGCActivity()
  	{
  		// invoke postsweep callback
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->startGCActivity();
-@@ -2895,6 +3172,7 @@ bail:
+@@ -2901,6 +3180,7 @@ bail:
  	void GC::StopGCActivity()
  	{
  		// invoke postsweep callback
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->stopGCActivity();
-@@ -2905,6 +3183,7 @@ bail:
+@@ -2911,6 +3191,7 @@ bail:
  	void GC::AllocActivity(int blocks)
  	{
  		// invoke postsweep callback
 +		USING_CALLBACK_LIST(this);
  		GCCallback *cb = m_callbacks;
  		while(cb) {
  			cb->allocActivity(blocks);
 diff --git a/MMgc/GC.h b/MMgc/GC.h
@@ -1251,32 +1245,32 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +
 +		/** @access Requires(request || exclusiveGC) */
  		bool IsGCMemory (const void *);
  
 +		/** @access Requires(request || exclusiveGC) */
  		bool IsQueued(const void *item);
  
  		static uint64 GetPerformanceCounter();
-@@ -854,30 +1022,81 @@ namespace MMgc
+@@ -854,9 +1022,12 @@ namespace MMgc
  			return (double(GC::GetPerformanceCounter() - start) * 1000) / GC::GetPerformanceFrequency();
  		}
  
 +#ifndef MMGC_THREADSAFE
  		void DisableThreadCheck() { disableThreadCheck = true; }
--		
++#endif
+ 		
 -		uint64 t0;
-+#endif
-+
 +		/** GC initialization time, in ticks.  Used for logging. */
 +		const uint64 t0;
  
- 		static uint64 ticksToMicros(uint64 ticks) { return (ticks*1000000)/GetPerformanceFrequency(); }
- 
- 		static uint64 ticksToMillis(uint64 ticks) { return (ticks*1000)/GetPerformanceFrequency(); }
+ 		// a tick is the unit of GetPerformanceCounter()
+ 		static uint64 ticksToMicros(uint64 ticks) 
+@@ -879,21 +1050,69 @@ namespace MMgc
+ 		}
  
  		// marking rate counter
 +		/**
 +		 * Total number of bytes of pointer-containing memory scanned by this
 +		 * GC.  Used to measure marking rate, which is
 +		 * <code>bytesMarked/ticksToMillis(markTicks)</code>.
 +		 *
 +		 * @access ReadWrite(request, exclusiveGC);
@@ -1295,25 +1289,23 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
  		uint32 lastStartMarkIncrementCount;
  		uint32 markIncrements;
 +
 +		/**
 +		 * Number of calls to MarkItem().
 +		 * @access ReadWrite(request, exclusiveGC)
 +		 */
  		uint32 marks;
--        uint32 sweeps;
--
 +
 +		/**
 +		 * Number of calls to Sweep().
 +		 * @access ReadWrite(request, exclusiveGC)
 +		 */
-+		uint32 sweeps;
-+
+         uint32 sweeps;
+ 
 +		/**
 +		 * Number of calls to MarkItem() during the current (or most recent)
 +		 * IncrementalMark().
 +		 *
 +		 * @access ReadWrite(request, exclusiveGC)
 +		 */
  		uint32 numObjects;
 +
@@ -1338,39 +1330,39 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * incremental mark.  This means the next mark will force the GC cycle
 +		 * through to completion.
 +		 *
 +		 * @access ReadWrite(request, exclusiveGC)
 +		 */
  		bool hitZeroObjects;
  
  		// called at some apropos moment from the mututor, ideally at a point
-@@ -889,7 +1108,10 @@ namespace MMgc
+@@ -905,7 +1124,10 @@ namespace MMgc
  
  		bool Destroying() { return destroying; }
  
 +		/** @access Requires(request) */
  		static GCWeakRef *GetWeakRef(const void *obj);
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		void ClearWeakRef(const void *obj);
  
- 		uintptr	GetStackTop() const;		
-@@ -905,7 +1127,10 @@ namespace MMgc
+ 		uintptr	GetStackTop() const;
+@@ -922,7 +1144,10 @@ namespace MMgc
  		// FIXME: only used for FixedAlloc, GCAlloc sized dynamically
  		const static int kPageUsableSpace = 3936;
  
 +		/** @access Requires(request && m_lock) */
  		uint32 *GetBits(int numBytes, int sizeClass);
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		void FreeBits(uint32 *bits, int sizeClass)
  		{
  #ifdef _DEBUG
-@@ -914,32 +1139,55 @@ namespace MMgc
+@@ -931,32 +1156,55 @@ namespace MMgc
  			*(uint32**)bits = m_bitsFreelists[sizeClass];
  			m_bitsFreelists[sizeClass] = bits;
  		}
 +
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		uint32 *m_bitsFreelists[kNumSizeClasses];
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		uint32 *m_bitsNext;
@@ -1422,17 +1414,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 -		
 +
 +		/** @access ReadWrite(request, exclusiveGC) */
  		uint64 lastMarkTicks;
 +		/** @access ReadWrite(request, exclusiveGC) */
  		uint64 lastSweepTicks;
  
  		const static int16 kSizeClasses[kNumSizeClasses];		
-@@ -951,14 +1199,44 @@ namespace MMgc
+@@ -968,14 +1216,44 @@ namespace MMgc
  		// 0 - not in use
  		// 1 - used by GCAlloc
  		// 3 - used by GCLargeAlloc
 +
 +		/** @access Requires(pageMapLock) */
  		uintptr memStart;
 +		/** @access Requires(pageMapLock) */
  		uintptr memEnd;
@@ -1467,17 +1459,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +			return GetPageMapValueAlreadyLocked(addr);
 +		}
 +
 +		/** @access Requires(pageMapLock) */
 +		inline int GetPageMapValueAlreadyLocked(uintptr addr) const
  		{
  			uintptr index = (addr-memStart) >> 12;
  
-@@ -970,7 +1248,20 @@ namespace MMgc
+@@ -987,7 +1265,20 @@ namespace MMgc
  			//return (pageMap[addr >> 2] & (3<<shiftAmount)) >> shiftAmount;
  			return (pageMap[index >> 2] >> shiftAmount) & 3;
  		}
 +
 +		/**
 +		 * Set the pageMap bits for the given address.  Those bits must be
 +		 * zero beforehand.
 +		 *
@@ -1488,28 +1480,26 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		/**
 +		 * Zero out the pageMap bits for the given address.
 +		 *
 +		 * @access Requires(pageMapLock)
 +		 */
  		void ClearPageMapValue(uintptr addr);
  
  		void MarkGCPages(void *item, uint32 numpages, int val);
-@@ -989,69 +1280,163 @@ namespace MMgc
- 		GCAlloc *noPointersAllocs[kNumSizeClasses];
+@@ -1007,12 +1298,37 @@ namespace MMgc
  		GCLargeAlloc *largeAlloc;
  		GCHeap *heap;
--		
-+
+ 		
 +		/** @access Requires(m_lock) */
  		void* AllocBlockIncremental(int size, bool zero=true);
 +
 +		/** @access Requires(m_lock) */
  		void* AllocBlockNonIncremental(int size, bool zero=true);
- 
++
 +	protected:
 +		/**
 +		 * Collect in a thread-safe, recursion-preventing way, with
 +		 * callbacks.
 +		 *
 +		 * Both parameters are ignored in non-MMGC_THREADSAFE builds.  In an
 +		 * MMGC_THREADSAFE build, callerHoldsLock must be true iff the calling
 +		 * thread already holds m_lock, and callerHasActiveRequest must be
@@ -1520,28 +1510,31 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +
 +	private:
 +		/**
 +		 * Just collect.
 +		 *
 +		 * @access Requires(exclusiveGC)
 +		 */
 +		void CollectImpl();
-+
+ 
+ #ifdef _DEBUG
+ 		public:
+ #endif
 +		/** @access Requires(exclusiveGC) */
  		void ClearMarks();
  #ifdef _DEBUG
+ 		private:
+@@ -1022,59 +1338,127 @@ namespace MMgc
+ #ifdef _DEBUG
  		public:
  		// sometimes useful for mutator to call this
 +		/** @access Requires(exclusiveGC) */
  		void Trace(const void *stackStart=NULL, size_t stackSize=0);
  		private:
- #else
-+		/** @access Requires(exclusiveGC) */
- 		void Trace(const void *stackStart=NULL, size_t stackSize=0);
  #endif
  
 +		/** @access Requires(exclusiveGC) */
  		void Finalize();
 +		/** @access Requires(exclusiveGC) */
  		void Sweep(bool force=false);
 +		/** @access Requires(exclusiveGC) */
  		void ForceSweep() { Sweep(true); }
@@ -1583,33 +1576,32 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * could be called twice.
 +		 *
 +		 * Also, Collect() uses this to protect itself from recursive calls
 +		 * (from badly behaved finalizers).
 +		 *
 +		 * @access ReadWrite(request, exclusiveGC)
 +		 */
  		bool collecting;
-- 
-+
+  
 +		/** @access Requires((request && m_lock) || exclusiveGC) */
  		bool finalizedValue;
  
 -		// list of pages to be swept, built up in Finalize
 +		/** @access Requires(exclusiveGC) */
  		void AddToSmallEmptyBlockList(GCAlloc::GCBlock *b)
  		{
  			b->next = smallEmptyPageList;
  			smallEmptyPageList = b;
  		}
 +
 +		/**
 +		 * list of pages to be swept, built up in Finalize
 +		 * @access Requires(exclusiveGC)
-+		 */ 
++		 */
  		GCAlloc::GCBlock *smallEmptyPageList;
  		
 -		// list of pages to be swept, built up in Finalize
 +		/** @access Requires(exclusiveGC) */
  		void AddToLargeEmptyBlockList(GCLargeAlloc::LargeBlock *lb)
  		{
  			lb->next = largeEmptyPageList;
  			largeEmptyPageList = lb;
@@ -1624,18 +1616,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
  #ifdef GCHEAP_LOCK
  		GCSpinLock m_rootListLock;
  #endif
  
 +		/** @access Requires(m_rootListLock) */
  		GCRoot *m_roots;
  		void AddRoot(GCRoot *root);
  		void RemoveRoot(GCRoot *root);
--		
-+
+ 		
 +#ifdef MMGC_THREADSAFE
 +		GCSpinLock m_callbackListLock;
 +#endif
 +
 +		/**
 +		 * Points to the head of a linked list of callback objects.
 +		 *
 +		 * @access Requires(m_callbackListLock)
@@ -1660,41 +1651,41 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * even if the calling thread is not in a request at all, so this
 +		 * policy would be insufficient for m_callbacks.  Second,
 +		 * m_edgeCallbacks fires very frequently during marking, so a
 +		 * lock-free policy is probably much faster.
 +		 */
  		GCEdgeCallback *m_edgeCallbacks;
  		void AddEdgeCallback(GCEdgeCallback *cb);
  		void RemoveEdgeCallback(GCEdgeCallback *cb);
-@@ -1059,9 +1444,10 @@ namespace MMgc
+@@ -1082,9 +1466,10 @@ namespace MMgc
  		/**
  		 * Notify GCEdgeCallbacks of an edge.
  		 *
 -		 * p is a "real pointer".  This method converts it to
 -		 * a "user pointer" using GetUserPointer() before
 -		 * calling callbacks.
 +		 * p is a "real pointer".  This method converts it to a "user pointer"
 +		 * using GetUserPointer() before calling callbacks.
 +		 *
 +		 * @access Requires(exclusiveGC)
  		 */
  		void FireFoundEdgeTo(const void *p);
  
-@@ -1084,7 +1470,9 @@ private:
+@@ -1107,7 +1492,9 @@ private:
  private:
  #endif
  
 +#ifndef MMGC_THREADSAFE
  		void CheckThread();
 +#endif
  
  		void PushWorkItem(GCStack<GCWorkItem> &stack, GCWorkItem item);
  
-@@ -1101,17 +1489,37 @@ private:
+@@ -1124,17 +1511,37 @@ private:
  		void CheckFreelists();
  
  		int m_gcLastStackTrace;
 +
 +		/**
 +		 * Used by FindUnmarkedPointers.
 +		 *
 +		 * @access Requires(exclusiveGC)
@@ -1726,17 +1717,17 @@ diff --git a/MMgc/GC.h b/MMgc/GC.h
 +		 * Scan all GC memory (skipping roots). If a GC object is black make sure
 +		 * it has no pointers to white objects.
 +		 *
 +		 * @access Requires(exclusiveGC)
 +		 */
  		void FindMissingWriteBarriers();
  #ifdef WIN32
  		// store a handle to the thread that create the GC to ensure thread safety
-@@ -1132,7 +1540,187 @@ public:
+@@ -1155,7 +1562,187 @@ public:
  #ifdef _DEBUG
  		// Dump a list of objects that have pointers to the given location.
  		void WhosPointingAtMe(void* me, int recurseDepth=0, int currentDepth=0);
 +
 +		/**
 +		 * Used by WhosPointingAtMe.
 +		 * @access Requires(pageMapLock)
 +		 */
@@ -1926,25 +1917,25 @@ diff --git a/MMgc/GCAlloc.cpp b/MMgc/GCA
  
  	GCAlloc::GCBlock* GCAlloc::CreateChunk()
  	{
 +		MMGC_ASSERT_GC_LOCK(m_gc);
 +
  		// Get space in the bitmap.  Do this before allocating the actual block,
  		// since we might call GC::AllocBlock for more bitmap space and thus
  		// cause some incremental marking.
-@@ -225,6 +227,7 @@ namespace MMgc
+@@ -229,6 +231,7 @@ namespace MMgc
  
  	void* GCAlloc::Alloc(size_t size, int flags)
  	{
 +		MMGC_ASSERT_GC_LOCK(m_gc);
  		(void)size;
  		GCAssertMsg(((size_t)m_itemSize >= size), "allocator itemsize too small");
  start:
-@@ -374,6 +377,8 @@ start:
+@@ -379,6 +382,8 @@ start:
  
  	void GCAlloc::Finalize()
  	{
 +		MMGC_ASSERT_EXCLUSIVE_GC(m_gc);
 +
  		m_finalized = true;
  		// Go through every item of every block.  Look for items
  		// that are in use but not marked as reachable, and delete
--- a/series
+++ b/series
@@ -1,14 +1,15 @@
 tweak-esc-main.sh
 const-workitem.patch
 gcstack-access
 workitems-notgc-noassert
 gc-graph #+graphviz
 alloc-backtrace #+graphviz
-configure-with-threadsafe-mmgc #+threadsafe
+unmerged-edits-to-mmgc-threadsafe #+threadsafe
 mmgc-threadsafe #+threadsafe
 mmgc-threadsafe-gctests #+threadsafe
+mmgc-maybegc #+threadsafe
 mmgc-graphviz #+jorendorff-graphviz
 mmgc-bit-checks #+threadsafe
 enable-traces
 debug-print-finalizers
 finalizable-merge-fixup
new file mode 100644
--- /dev/null
+++ b/unmerged-edits-to-mmgc-threadsafe
@@ -0,0 +1,12 @@
+diff --git a/MMgc/GC.cpp b/MMgc/GC.cpp
+--- a/MMgc/GC.cpp
++++ b/MMgc/GC.cpp
+@@ -424,6 +424,8 @@ namespace MMgc
+ 		GCAssert(callerHasActiveRequest == GCThread::GetCurrentThread()->IsInActiveRequest());
+ #endif
+ #else
++		(void) callerHoldsLock;
++		(void) callerHasActiveRequest;
+ 		CheckThread();
+ #endif
+