Merged with tamarin-tracing (Moved SideExit and GuardRecord out of nanojit, improved labelling)
authorDavid Anderson <danderson@mozilla.com>
Tue, 08 Jul 2008 17:09:53 -0700
changeset 17516 842d94f41381b26c8e05d8fdbbc9f8a7c168cbdc
parent 17514 eeec9337de5e49c86806fd8959d887681cccdb71
child 17517 1dd22bd88472936afb0e2bf45bf9b2850244ee2c
push id1452
push usershaver@mozilla.com
push dateFri, 22 Aug 2008 00:08:22 +0000
treeherderautoland@d13bb0868596 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone1.9.1a1pre
Merged with tamarin-tracing (Moved SideExit and GuardRecord out of nanojit, improved labelling)
js/src/nanojit/Assembler.cpp
js/src/nanojit/Assembler.h
js/src/nanojit/Fragmento.cpp
js/src/nanojit/Fragmento.h
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/Native.h
js/src/nanojit/NativeARM.h
js/src/nanojit/Nativei386.cpp
js/src/nanojit/TraceTreeDrawer.cpp
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -33,16 +33,20 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "nanojit.h"
 
+#if defined(AVMPLUS_LINUX) && defined(AVMPLUS_ARM)
+#include <asm/unistd.h>
+#endif
+
 namespace nanojit
 {
 	#ifdef FEATURE_NANOJIT
 
 
 	class DeadCodeFilter: public LirFilter
 	{
 		Assembler *assm;
@@ -202,20 +206,17 @@ namespace nanojit
 		_resvFree = r->arIndex;
 		r->reg = UnknownReg;
 		r->arIndex = 0;
 		if (!item) 
 			setError(ResvFull); 
 
         if (i->isconst() || i->isconstq())
             r->cost = 0;
-        else if (i->isop(LIR_ld) && 
-                 i->oprnd1() == _thisfrag->param0 &&
-                 (i->oprnd2()->isconstval(offsetof(avmplus::InterpState,sp)) ||
-                  i->oprnd2()->isconstval(offsetof(avmplus::InterpState,rp))))
+        else if (i == _thisfrag->sp || i == _thisfrag->rp)
             r->cost = 2;
         else
             r->cost = 1;
 
         i->setresv(item);
 		return r;
 	}
 
@@ -553,25 +554,25 @@ namespace nanojit
 			CMP(ra, rb);
 		}
 	}
 
     void Assembler::patch(GuardRecord *lr)
     {
         Fragment *frag = lr->target;
 		NanoAssert(frag->fragEntry);
-		NIns* was = asm_adjustBranch(lr->jmp, frag->fragEntry);
+		NIns* was = asm_adjustBranch((NIns*)lr->jmp, frag->fragEntry);
 		if (!lr->origTarget) lr->origTarget = was;
 		verbose_only(verbose_outputf("patching jump at %p to target %p (was %p)\n",
 			lr->jmp, frag->fragEntry, was);)
     }
 
     void Assembler::unpatch(GuardRecord *lr)
     {
-		NIns* was = asm_adjustBranch(lr->jmp, lr->origTarget);
+		NIns* was = asm_adjustBranch((NIns*)lr->jmp, (NIns*)lr->origTarget);
 		(void)was;
 		verbose_only(verbose_outputf("unpatching jump at %p to original target %p (was %p)\n",
 			lr->jmp, lr->origTarget, was);)
     }
 
     NIns* Assembler::asm_exit(LInsp guard)
     {
 		SideExit *exit = guard->exit();
@@ -611,17 +612,23 @@ namespace nanojit
 		releaseRegisters();
 		
 		swapptrs();
 		_inExit = true;
 		
 		//verbose_only( verbose_outputf("         LIR_xend swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
 		debug_only( _sv_fpuStkDepth = _fpuStkDepth; _fpuStkDepth = 0; )
 
-		GuardRecord *lr = nFragExit(guard); (void)lr;
+		nFragExit(guard);
+
+		// if/when we patch this exit to jump over to another fragment,
+		// that fragment will need its parameters set up just like ours.
+        LInsp stateins = _thisfrag->state;
+		Register state = findSpecificRegFor(stateins, Register(stateins->imm8()));
+		asm_bailout(guard, state);
 
 		mergeRegisterState(capture);
 
 		// this can be useful for breaking whenever an exit is taken
 		//INT3();
 		//NOP();
 
 		// we are done producing the exit logic for the guard so demark where our exit block code begins
@@ -683,21 +690,21 @@ namespace nanojit
 	
 	void Assembler::assemble(Fragment* frag,  NInsList& loopJumps)
 	{
 		if (error()) return;	
 		AvmCore *core = _frago->core();
 		GC *gc = core->gc;
         _thisfrag = frag;
 
-		// set up backwards pipeline: assembler -> StoreFilter -> LirReader
+		// set up backwards pipeline: assembler -> StackFilter -> LirReader
 		LirReader bufreader(frag->lastIns);
-		StoreFilter storefilter(&bufreader, gc,
-			frag->param0, frag->sp, frag->rp);
-		DeadCodeFilter deadfilter(&storefilter, this);
+		StackFilter storefilter1(&bufreader, gc, frag, frag->sp);
+		StackFilter storefilter2(&storefilter1, gc, frag, frag->rp);
+		DeadCodeFilter deadfilter(&storefilter2, this);
 		LirFilter* rdr = &deadfilter;
 		verbose_only(
 			VerboseBlockReader vbr(rdr, this, frag->lirbuf->names);
 			if (verbose_enabled())
 				rdr = &vbr;
 		)
 
 		verbose_only(_thisfrag->compileNbr++; )
@@ -748,20 +755,42 @@ namespace nanojit
 		}
 		
 		AvmAssertMsg(error() || _fpuStkDepth == 0, ("_fpuStkDepth %d\n",_fpuStkDepth));
 
 		internalReset();  // clear the reservation tables and regalloc
 		NanoAssert(_branchStateMap->isEmpty());
 		_branchStateMap = 0;
 		
-		#ifdef UNDER_CE
+		#if defined(UNDER_CE)
 		// If we've modified the code, we need to flush so we don't end up trying 
 		// to execute junk
 		FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
+		#elif defined(AVMPLUS_LINUX) && defined(AVMPLUS_ARM)
+			// N A S T Y - obviously have to fix this
+		// determine our page range
+
+		Page *page=0, *first=0, *last=0;
+		for (int i=2;i!=0;i--) {
+			page = first = last = (i==2 ? _nativePages : _nativeExitPages);
+			while (page)
+			{
+				if (page<first)
+					first = page;
+				if (page>last)
+					last = page;
+				page = page->next;
+			}
+	
+			register unsigned long _beg __asm("a1") = (unsigned long)(first);
+			register unsigned long _end __asm("a2") = (unsigned long)(last+NJ_PAGE_SIZE);
+			register unsigned long _flg __asm("a3") = 0;
+			register unsigned long _swi __asm("r7") = 0xF0002;
+			__asm __volatile ("swi 0 	@ sys_cacheflush" : "=r" (_beg) : "0" (_beg), "r" (_end), "r" (_flg), "r" (_swi));
+		}
 		#endif
 	}
 	
 	void Assembler::copyRegisters(RegAlloc* copyTo)
 	{
 		*copyTo = _allocator;
 	}
 
@@ -1196,19 +1225,19 @@ namespace nanojit
 
                     #ifdef NJ_VERBOSE
                     // branching from this frag to ourself.
                     if (_frago->core()->config.show_stats)
                         LDi(argRegs[1], int((Fragment*)_thisfrag));
                     #endif
 
 					// restore first parameter, the only one we use
-                    LInsp param0 = _thisfrag->param0;
-                    Register a0 = Register(param0->imm8());
-					findSpecificRegFor(param0, a0); 
+                    LInsp state = _thisfrag->state;
+                    Register a0 = Register(state->imm8());
+					findSpecificRegFor(state, a0); 
 					break;
 				}
 #ifndef NJ_SOFTFLOAT
 				case LIR_feq:
 				case LIR_fle:
 				case LIR_flt:
 				case LIR_fgt:
 				case LIR_fge:
@@ -1327,16 +1356,17 @@ namespace nanojit
 #ifndef NJ_SOFTFLOAT
                     if (op == LIR_fcall)
                     {
 						rr = asm_prep_fcall(rR, ins);
                     }
                     else
 #endif
                     {
+						(void)rR;
                         rr = retRegs[0];
 						prepResultReg(ins, rmask(rr));
                     }
 
 					// do this after we've handled the call result, so we dont
 					// force the call result to be spilled unnecessarily.
 					restoreCallerSaved();
 
@@ -1562,32 +1592,31 @@ namespace nanojit
 	 * The last guard record is used for the unconditional jump
 	 * at the end of the trace. 
 	 * 
 	 * NOTE:  It is also not guaranteed that the native code 
 	 *        is contained on a single page.
 	 */
 	GuardRecord* Assembler::placeGuardRecord(LInsp guard)
 	{
+		// we align the guards to 4Byte boundary
+		size_t size = GuardRecordSize(guard);
 		SideExit *exit = guard->exit();
-		// we align the guards to 4Byte boundary
-		NIns* ptr = (NIns*)alignTo(_nIns-sizeof(GuardRecord), 4);
+		NIns* ptr = (NIns*)alignTo(_nIns-size, 4);
 		underrunProtect( (int)_nIns-(int)ptr );  // either got us a new page or there is enough space for us
-		GuardRecord* rec = (GuardRecord*) alignTo(_nIns-sizeof(GuardRecord),4);
+		GuardRecord* rec = (GuardRecord*) alignTo(_nIns-size,4);
 		rec->outgoing = _latestGuard;
 		_latestGuard = rec;
 		_nIns = (NIns*)rec;
 		rec->next = 0;
 		rec->origTarget = 0;		
 		rec->target = exit->target;
 		rec->from = _thisfrag;
 		rec->guard = guard;
-		rec->calldepth = exit->calldepth;
-		rec->vmprivate = exit->vmprivate;
-		verbose_only( rec->sid = exit->sid; )
+		initGuardRecord(guard,rec);
 		if (exit->target) 
 			exit->target->addLink(rec);
 		verbose_only( rec->compileNbr = _thisfrag->compileNbr; )
 		return rec;
 	}
 
 	void Assembler::setCallTable(const CallInfo* functions)
 	{
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -36,18 +36,16 @@
  *
  * ***** END LICENSE BLOCK ***** */
 
 
 #ifndef __nanojit_Assembler__
 #define __nanojit_Assembler__
 
 
-namespace avmplus { class InterpState; }
-
 namespace nanojit
 {
 	/**
 	 * Some notes on this Assembler (Emitter).
 	 * 
 	 *     LIR_call is a generic call operation that is encoded using form [2].  The 24bit
 	 *     integer is used as an index into a function look-up table that contains information
 	 *     about the target that is to be called; including address, # parameters, etc.
@@ -63,35 +61,16 @@ namespace nanojit
 	 * 
 	 *   A negative is that we require state as we 'push' and 'pop' nodes along the tree.  
 	 *   Also, this is most easily performed using recursion which may not be desirable in 
 	 *   the mobile environment. 
 	 *   
 	 */
 
 	/**
-	 * These objects lie directly in the native code pages and are used to house 
-	 * state information across the edge of side exits from a fragment.  
-	 */
-	struct GuardRecord
-	{
-		GuardRecord* next;			/* link to next record in chain for branches that target the same fragment */
-        Fragment *   from;
-        Fragment *   target;
-		NIns*		 jmp;
-		NIns*        origTarget;
-		int32_t		 calldepth;
-		LInsp		 guard;
-		GuardRecord* outgoing;			/* list of guards in a fragment */
-		void*        vmprivate;
-		verbose_only( uint32_t sid; )
-		verbose_only( uint32_t compileNbr; )
-	};
-
-	/**
 	 * The Assembler is only concerned with transforming LIR to native instructions
 	 */
     struct Reservation
 	{
 		uint32_t arIndex:16;	/* index into stack frame.  displ is -4*arIndex */
 		Register reg:8;			/* register UnkownReg implies not in register */
         int cost:8;
 	};
@@ -245,16 +224,17 @@ namespace nanojit
 			
 			void		gen(LirFilter* toCompile, NInsList& loopJumps);
 			NIns*		genPrologue(RegisterMask);
 			NIns*		genEpilogue(RegisterMask);
 
 			bool		ignoreInstruction(LInsp ins);
 
 			GuardRecord* placeGuardRecord(LInsp guard);
+			void		initGuardRecord(LInsp guard, GuardRecord*);
 
 			uint32_t	arReserve(LIns* l);
 			uint32_t	arFree(uint32_t idx);
 			void		arReset();
 
 			Register	registerAlloc(RegisterMask allow);
 			void		registerResetAll();
 			void		restoreCallerSaved();
@@ -326,28 +306,29 @@ namespace nanojit
 			bool		asm_qlo(LInsp ins, LInsp q);
 			void		asm_fneg(LInsp ins);
 			void		asm_farg(LInsp ins);
 			void		asm_fop(LInsp ins);
 			void		asm_i2f(LInsp ins);
 			void		asm_u2f(LInsp ins);
 			Register	asm_prep_fcall(Reservation *rR, LInsp ins);
 			void		asm_nongp_copy(Register r, Register s);
+			void		asm_bailout(LInsp guard, Register state);
 
 			// platform specific implementation (see NativeXXX.cpp file)
 			void		nInit(uint32_t flags);
 			void		nInit(AvmCore *);
 			Register	nRegisterAllocFromSet(int32_t set);
 			void		nRegisterResetAll(RegAlloc& a);
 			void		nMarkExecute(Page* page, int32_t count=1, bool enable=true);
 			void		nPostCallCleanup(const CallInfo* call);
 			void		nArgEmitted(const CallInfo* call, uint32_t stackSlotCount, uint32_t iargs, uint32_t fargs);
 			void		nFrameRestore(RegisterMask rmask);
 			static void	nPatchBranch(NIns* branch, NIns* location);
-			GuardRecord *nFragExit(LInsp guard);
+			void		nFragExit(LIns* guard);
 
 			// platform specific methods
         public:
 			DECLARE_PLATFORM_ASSEMBLER()
 
 		private:
 			debug_only( int32_t	_fpuStkDepth; )
 			debug_only( int32_t	_sv_fpuStkDepth; )
--- a/js/src/nanojit/Fragmento.cpp
+++ b/js/src/nanojit/Fragmento.cpp
@@ -52,17 +52,16 @@ namespace nanojit
 	{
 #ifdef MEMORY_INFO
 		_allocList.set_meminfo_name("Fragmento._allocList");
 #endif
 		_core = core;
 		GC *gc = core->GetGC();
 		_frags = new (gc) FragmentMap(gc, 128);
 		_assm = new (gc) nanojit::Assembler(this);
-		_pageGrowth = 1;
 		verbose_only( enterCounts = new (gc) BlockHist(gc); )
 		verbose_only( mergeCounts = new (gc) BlockHist(gc); )
 	}
 
 	Fragmento::~Fragmento()
 	{
 		debug_only( clearFrags() );
         _frags->clear();		
@@ -75,21 +74,18 @@ namespace nanojit
 			_gcHeap->Free( _allocList.removeLast() );	
 		}
 		NanoAssert(_stats.freePages == _stats.pages );
 	}
 
 	Page* Fragmento::pageAlloc()
 	{
         NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
-		if (!_pageList) {
-			pagesGrow(_pageGrowth);	// try to get more mem
-            if ((_pageGrowth << 1) < (uint32_t)NJ_PAGES)
-                _pageGrowth <<= 1;
-		}
+		if (!_pageList)
+			pagesGrow(NJ_PAGES);	// try to get more mem
 		Page *page = _pageList;
 		if (page)
 		{
 			_pageList = page->next;
 			verbose_only(_stats.freePages--;)
 		}
 		//fprintf(stderr, "Fragmento::pageAlloc %X,  %d free pages of %d\n", (int)page, _stats.freePages, _stats.pages);
 		NanoAssert(pageCount()==_stats.freePages);
@@ -109,22 +105,16 @@ namespace nanojit
 
 	void Fragmento::pagesGrow(int32_t count)
 	{
 		NanoAssert(!_pageList);
 		MMGC_MEM_TYPE("NanojitFragmentoMem"); 
 		Page* memory = 0;
 		if (NJ_UNLIMITED_GROWTH || _stats.pages < (uint32_t)NJ_PAGES)
 		{
-		    // make sure we don't grow beyond NJ_PAGES
-		    if (_stats.pages + count > (uint32_t)NJ_PAGES) 
-		        count = NJ_PAGES - _stats.pages;
-		    if (count < 0)
-		        count = 0;
-		    
 			// @todo nastiness that needs a fix'n
 			_gcHeap = _core->GetGC()->GetGCHeap();
 			NanoAssert(NJ_PAGE_SIZE<=_gcHeap->kNativePageSize);
 			
 			// convert NJ_PAGES to gc page count 
 			int32_t gcpages = (count*NJ_PAGE_SIZE) / _gcHeap->kNativePageSize;
 			MMGC_MEM_TYPE("NanojitMem"); 
 			memory = (Page*)_gcHeap->Alloc(gcpages);
@@ -255,28 +245,18 @@ namespace nanojit
 		for(Page* page=_pageList; page; page = page->next)
 			n++;
 		return n;
 	}
 
 	void Fragmento::dumpFragStats(Fragment *f, int level, int& size,
 		uint64_t &traceDur, uint64_t &interpDur)
     {
-        avmplus::String *filep = f->file;
-        if (!filep)
-            filep = _core->k_str[avmplus::kstrconst_emptyString];
-        avmplus::StringNullTerminatedUTF8 file(_core->gc, filep);
-        const char *s = file.c_str();
-        const char *t = strrchr(s,'\\');
-        if (!t) t = strrchr(s,'/');
-        if (t) s = t+1;
-
-        char buf[500];
-		int namewidth = 35;
-        sprintf(buf, "%*c%s %.*s:%d", 1+level, ' ', labels->format(f), namewidth, s, f->line);
+        char buf[50];
+        sprintf(buf, "%*c%s", 1+level, ' ', labels->format(f));
 
         int called = f->hits();
         if (called >= 0)
             called += f->_called;
         else
             called = -(1<<f->blacklistLevel) - called - 1;
 
         uint32_t main = f->_native - f->_exitNative;
@@ -289,20 +269,19 @@ namespace nanojit
 				sprintf(cause,"%s %s", f->_token, labels->format(f->eot_target));
 			} else {
 	            strcpy(cause, f->_token);
 			}
 		}
         else
             cause[0] = 0;
         
-        		const void* ip = f->ip;
-        _assm->outputf("%-*s %7d %6d %6d %6d %4d %9llu %9llu %-12s %s", namewidth, buf,
+        _assm->outputf("%-10s %7d %6d %6d %6d %4d %9llu %9llu %-12s %s", buf,
             called, f->guardCount, main, f->_native, f->compileNbr, f->traceTicks/1000, f->interpTicks/1000,
-			cause, core()->interp.labels->format(ip));
+			cause, labels->format(f->ip));
         
         size += main;
 		traceDur += f->traceTicks;
 		interpDur += f->interpTicks;
 
 		for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
 			if (x->kind != MergeTrace)
 	            dumpFragStats(x,level+1,size,traceDur,interpDur);
@@ -369,17 +348,17 @@ namespace nanojit
             return;
 		}
 
         _assm->outputf("\nFragment statistics");
 		_assm->outputf("  loop trees:     %d", count);
 		_assm->outputf("  flushes:        %d", flushes);
 		_assm->outputf("  compiles:       %d / %d", _stats.compiles, _stats.totalCompiles);
 		_assm->outputf("  used:           %dk / %dk", (pages-free)<<(NJ_LOG2_PAGE_SIZE-10), pages<<(NJ_LOG2_PAGE_SIZE-10));
-		_assm->output("\n         location                     calls guards   main native  gen   T-trace  T-interp");
+		_assm->output("\ntrace         calls guards   main native  gen   T-trace  T-interp");
 
 		avmplus::SortedMap<uint64_t, DurData, avmplus::LIST_NonGCObjects> durs(_core->gc);
 		uint64_t totaldur=0;
 		uint64_t totaltrace=0;
 		int totalsize=0;
         for (int32_t i=0; i<count; i++)
         {
             Fragment *f = _frags->at(i);
@@ -402,22 +381,23 @@ namespace nanojit
 			totaltrace/1000, int(100.0*totaltrace/totaldur),
 			(totaldur-totaltrace)/1000, int(100.0*(totaldur-totaltrace)/totaldur));
 		_assm->outputf("");
 		_assm->outputf("trace      ticks            trace           interp           size");
 		for (int32_t i=durs.size()-1; i >= 0; i--) {
 			uint64_t bothDur = durs.keyAt(i);
 			DurData d = durs.get(bothDur);
 			int size = d.size;
-			_assm->outputf("%-4s %9lld (%2d%%)  %9lld (%2d%%)  %9lld (%2d%%)  %6d (%2d%%)", 
+			_assm->outputf("%-4s %9lld (%2d%%)  %9lld (%2d%%)  %9lld (%2d%%)  %6d (%2d%%)  %s", 
 				labels->format(d.frag),
 				bothDur/1000, int(100.0*bothDur/totaldur),
 				d.traceDur/1000, int(100.0*d.traceDur/totaldur),
 				d.interpDur/1000, int(100.0*d.interpDur/totaldur),
-				size, int(100.0*size/totalsize));
+				size, int(100.0*size/totalsize),
+				labels->format(d.frag->ip));
 		}
 
 		_assm->_verbose = vsave;
 
 	}
 
 	void Fragmento::countBlock(BlockHist *hist, const void* ip)
 	{
@@ -590,22 +570,16 @@ namespace nanojit
         _hits = -(1<<blacklistLevel);
     }
 
     Fragment *Fragmento::newFrag(const void* ip)
     {
 		GC *gc = _core->gc;
         Fragment *f = new (gc) Fragment(ip);
 		f->blacklistLevel = 5;
-#ifdef AVMPLUS_VERBOSE
-        if (_core->interp.currentState->f->filename) {
-            f->line = _core->interp.currentState->f->linenum;
-            f->file = _core->interp.currentState->f->filename;
-        }
-#endif
         return f;
     }
 
 	Fragment *Fragmento::newBranch(Fragment *from, const void* ip)
 	{
 		Fragment *f = newFrag(ip);
 		f->anchor = from->anchor;
 		f->root = from->root;
--- a/js/src/nanojit/Fragmento.h
+++ b/js/src/nanojit/Fragmento.h
@@ -128,36 +128,22 @@ namespace nanojit
 
 		private:
 			void		pagesGrow(int32_t count);
 
 			AvmCore*			_core;
 			DWB(Assembler*)		_assm;
 			DWB(FragmentMap*)	_frags;		/* map from ip -> Fragment ptr  */
 			Page*			_pageList;
-			uint32_t        _pageGrowth;
 
 			/* unmanaged mem */
 			AllocList	_allocList;
 			GCHeap*		_gcHeap;
 	};
 
-    struct SideExit
-    {
-		int32_t f_adj;
-        int32_t ip_adj;
-		int32_t sp_adj;
-		int32_t rp_adj;
-        Fragment *target;
-		int32_t calldepth;
-		void* vmprivate;
-		verbose_only( uint32_t sid; )
-		verbose_only(Fragment *from;)
-    };
-
 	enum TraceKind {
 		LoopTrace,
 		BranchTrace,
 		MergeTrace
 	};
 	
 	/**
 	 * Fragments are linear sequences of native code that have a single entry 
@@ -196,18 +182,16 @@ namespace nanojit
 			
 			verbose_only( uint32_t		_called; )
 			verbose_only( uint32_t		_native; )
             verbose_only( uint32_t      _exitNative; )
 			verbose_only( uint32_t		_lir; )
 			verbose_only( const char*	_token; )
             verbose_only( uint64_t      traceTicks; )
             verbose_only( uint64_t      interpTicks; )
-            verbose_only( int32_t line; )
-            verbose_only( DRCWB(avmplus::String *)file; )
 			verbose_only( DWB(Fragment*) eot_target; )
 			verbose_only( uint32_t mergeid;)
 			verbose_only( uint32_t		sid;)
 			verbose_only( uint32_t		compileNbr;)
 
             DWB(Fragment*) treeBranches;
             DWB(Fragment*) branches;
             DWB(Fragment*) nextbranch;
@@ -220,17 +204,17 @@ namespace nanojit
 			GuardRecord*	outbound;
 			
 			TraceKind kind;
 			const void* ip;
 			uint32_t guardCount;
             uint32_t xjumpCount;
             int32_t blacklistLevel;
             NIns* fragEntry;
-            LInsp param0,param1,sp,rp;
+            LInsp state,param1,sp,rp;
 			int32_t calldepth;
 			void* vmprivate;
 			
 		private:
 			NIns*			_code;		// ptr to start of code
 			GuardRecord*	_links;		// code which is linked (or pending to be) to this fragment
 			int32_t			_hits;
 			Page*			_pages;		// native code pages 
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -327,17 +327,17 @@ namespace nanojit
 
 	LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, LInsp d)
 	{
 		return ins2(op,base,d);
 	}
 
 	LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, SideExit *x)
 	{
-		LInsp data = skip(sizeof(SideExit));
+		LInsp data = skip(SideExitSize(x));
 		*((SideExit*)data->payload()) = *x;
 		return ins2(op, c, data);
 	}
 
 	LInsp LirBufWriter::insImm8(LOpcode op, int32_t a, int32_t b)
 	{
 		ensureRoom(1);
 		LInsp l = _buf->next();
@@ -458,17 +458,17 @@ namespace nanojit
     
 	bool LIns::isCall() const
 	{
 		return (u.code&~LIR64) == LIR_call;
 	}
 
 	bool LIns::isGuard() const
 	{
-		return u.code==LIR_x || u.code==LIR_xf || u.code==LIR_xt;
+		return u.code==LIR_x || u.code==LIR_xf || u.code==LIR_xt || u.code==LIR_loop;
 	}
 
     bool LIns::isStore() const
     {
 		int c = u.code & ~LIR64;
         return c == LIR_st || c == LIR_sti;
     }
 
@@ -805,17 +805,17 @@ namespace nanojit
 			return i;
 		}
 
 		return out->ins2(v, oprnd1, oprnd2);
 	}
 
 	LIns* ExprFilter::insGuard(LOpcode v, LInsp c, SideExit *x)
 	{
-		if (v != LIR_x) {
+		if (v == LIR_xt || v == LIR_xf) {
 			if (c->isconst()) {
 				if (v == LIR_xt && !c->constval() || v == LIR_xf && c->constval()) {
 					return 0; // no guard needed
 				}
 				else {
 					// need a way to EOT now, since this is trace end.
 					return out->insGuard(LIR_x, 0, x);
 				}
@@ -924,47 +924,35 @@ namespace nanojit
 			ins1(k_argmap[(argt&3)-1], args[i]);
 		}
 
 		return insImm8(op==LIR_callh ? LIR_call : op, fid, argc);
 	}
 
     using namespace avmplus;
 
-	StoreFilter::StoreFilter(LirFilter *in, GC *gc, LInsp p0, LInsp sp, LInsp rp) 
-		: LirFilter(in), gc(gc), param0(p0), sp(sp), rp(rp), stop(0), rtop(0)
+	StackFilter::StackFilter(LirFilter *in, GC *gc, Fragment *frag, LInsp sp) 
+		: LirFilter(in), gc(gc), frag(frag), sp(sp), top(0)
 	{}
 
-	LInsp StoreFilter::read() 
+	LInsp StackFilter::read() 
 	{
 		for (;;) 
 		{
 			LInsp i = in->read();
 			if (!i)
 				return i;
 			bool remove = false;
 			if (i->isStore())
 			{
 				LInsp base = i->oprnd2();
-				if (base == param0) 
-				{
-					// update stop/rstop
-					int d = i->immdisp();
-					if (d == offsetof(InterpState,sp)) {
-						stop = i->oprnd1()->oprnd2()->constval() >> 2;
-						NanoAssert(!(stop&1));
-					}
-					else if (d == offsetof(InterpState,rp))
-						rtop = i->oprnd1()->oprnd2()->constval() >> 2;
-				}
-				else if (base == sp) 
+				if (base == sp) 
 				{
 					LInsp v = i->oprnd1();
 					int d = i->immdisp() >> 2;
-					int top = stop+2;
 					if (d >= top) {
 						remove = true;
 					} else {
 						d = top - d;
 						if (v->isQuad()) {
 							// storing 8 bytes
 							if (stk.get(d) && stk.get(d-1)) {
 								remove = true;
@@ -977,38 +965,21 @@ namespace nanojit
 							// storing 4 bytes
 							if (stk.get(d))
 								remove = true;
 							else
 								stk.set(gc, d);
 						}
 					}
 				}
-				else if (base == rp) 
-				{
-					int d = i->immdisp() >> 2;
-					if (d >= rtop) {
-						remove = true;
-					} else {
-						d = rtop - d;
-						if (rstk.get(d))
-							remove = true;
-						else
-							rstk.set(gc, d);
-					}
-				}
 			}
 			else if (i->isGuard())
 			{
-				rstk.reset();
 				stk.reset();
-				SideExit *exit = i->exit();
-				stop = exit->sp_adj >> 2;
-				rtop = exit->rp_adj >> 2;
-				NanoAssert(!(stop&1));
+				top = getTop(i) >> 2;
 			}
 			if (!remove)
 				return i;
 		}
 	}
 
 	//
 	// inlined/separated version of SuperFastHash
@@ -1345,17 +1316,17 @@ namespace nanojit
                 live.put(i,use);
             }
 		}
         void retire(LInsp i, GC *gc) {
             RetiredEntry *e = new (gc) RetiredEntry(gc);
             e->i = i;
             for (int j=0, n=live.size(); j < n; j++) {
                 LInsp l = live.keyAt(j);
-                if (!l->isStore() && !l->isGuard() && !l->isArg() && !l->isop(LIR_loop))
+                if (!l->isStore() && !l->isGuard() && !l->isArg())
                     e->live.add(l);
             }
             int size=0;
 		    if ((size = e->live.size()) > maxlive)
 			    maxlive = size;
 
             live.remove(i);
             retired.add(e);
@@ -1370,33 +1341,34 @@ namespace nanojit
 		// traverse backwards to find live exprs and a few other stats.
 
 		LInsp sp = frag->sp;
 		LInsp rp = frag->rp;
 		LiveTable live(gc);
 		uint32_t exits = 0;
 		LirBuffer *lirbuf = frag->lirbuf;
         LirReader br(lirbuf);
-		StoreFilter r(&br, gc, frag->param0, sp, rp);
+		StackFilter sf(&br, gc, frag, sp);
+		StackFilter r(&sf, gc, frag, rp);
         bool skipargs = false;
         int total = 0;
-        live.add(frag->param0, r.pos());
+        live.add(frag->state, r.pos());
 		for (LInsp i = r.read(); i != 0; i = r.read())
 		{
             total++;
 
             if (i->isArg()) {
                 if (!skipargs)
                     live.add(i->oprnd1(),0);
             } else {
                 skipargs = false;
             }
 
             // first handle side-effect instructions
-			if (i->isStore() || i->isGuard() || i->isop(LIR_loop) ||
+			if (i->isStore() || i->isGuard() ||
 				i->isCall() && !assm->callInfoFor(i->imm8())->_cse)
 			{
 				live.add(i,0);
                 if (i->isGuard())
                     exits++;
 			}
 
 			// now propagate liveness
@@ -1501,17 +1473,17 @@ namespace nanojit
 		}
 		return labels->dup(buffer);
 	}
 
 	const char* LirNameMap::formatIns(LIns* i)
 	{
 		char sbuf[200];
 		char *s = sbuf;
-		if (!i->isStore() && !i->isGuard() && !i->isop(LIR_trace) && !i->isop(LIR_loop)) {
+		if (!i->isStore() && !i->isGuard() && !i->isop(LIR_trace)) {
 			sprintf(s, "%s = ", formatRef(i));
 			s += strlen(s);
 		}
 
 		LOpcode op = i->opcode();
 		switch(op)
 		{
 			case LIR_short:
@@ -1545,52 +1517,37 @@ namespace nanojit
 				sprintf(s, ")");
 				break;
 			}
 
 			case LIR_param:
                 sprintf(s, "%s %s", lirNames[op], gpn(i->imm8()));
 				break;
 
-			case LIR_x: {
-                SideExit *x = (SideExit*) i->oprnd2()->payload();
-				uint32_t ip = uint32_t(x->from->ip) + x->ip_adj;
-				sprintf(s, "%s: %s -> %s sp%+d rp%+d f%+d", 
-					formatRef(i), lirNames[op],
-					labels->format((void*)ip),
-					x->sp_adj, x->rp_adj, x->f_adj);
-                break;
-			}
-
             case LIR_callh:
 			case LIR_neg:
 			case LIR_fneg:
 			case LIR_arg:
 			case LIR_farg:
 			case LIR_i2f:
 			case LIR_u2f:
 			case LIR_qlo:
 			case LIR_qhi:
 			case LIR_ref:
             case LIR_ov:
             case LIR_cs:
 				sprintf(s, "%s %s", lirNames[op], formatRef(i->oprnd1()));
 				break;
 
+			case LIR_x:
 			case LIR_xt:
-			case LIR_xf: {
-                SideExit *x = (SideExit*) i->oprnd2()->payload();
-				uint32_t ip = int32_t(x->from->ip) + x->ip_adj;
-				sprintf(s, "%s: %s %s -> %s sp%+d rp%+d f%+d",
-					formatRef(i), lirNames[op],
-					formatRef(i->oprnd1()),
-					labels->format((void*)ip),
-					x->sp_adj, x->rp_adj, x->f_adj);
+			case LIR_xf:
+				formatGuard(i, s);
 				break;
-            }
+
 			case LIR_add:
 			case LIR_sub: 
 		 	case LIR_mul: 
 			case LIR_fadd:
 			case LIR_fsub: 
 		 	case LIR_fmul: 
 			case LIR_fdiv: 
 			case LIR_and: 
@@ -1848,23 +1805,25 @@ namespace nanojit
 
 #if defined(NJ_VERBOSE)
     LabelMap::LabelMap(AvmCore *core, LabelMap* parent)
         : parent(parent), names(core->gc), addrs(core->config.verbose_addrs), end(buf), core(core)
 	{}
 
     void LabelMap::add(const void *p, size_t size, size_t align, const char *name)
 	{
-		if (!this) return;
+		if (!this || names.containsKey(p))
+			return;
 		add(p, size, align, core->newString(name));
 	}
 
     void LabelMap::add(const void *p, size_t size, size_t align, Stringp name)
     {
-		if (!this) return;
+		if (!this || names.containsKey(p))
+			return;
 		Entry *e = new (core->gc) Entry(name, size<<align, align);
 		names.put(p, e);
     }
 
     const char *LabelMap::format(const void *p)
     {
 		char b[200];
 		int i = names.findNear(p);
@@ -1884,16 +1843,23 @@ namespace nanojit
 			else if (p > start && p < end) {
 				int d = (int(p)-int(start)) >> e->align;
 				if (addrs)
 					sprintf(b, "%p %s+%d", p, name, d);
 				else
 					sprintf(b,"%s+%d", name, d);
 				return dup(b);
 			}
+			else {
+				if (parent)
+					return parent->format(p);
+
+				sprintf(b, "%p", p);
+				return dup(b);
+			}
 		}
 		if (parent)
 			return parent->format(p);
 
 		sprintf(b, "%p", p);
 		return dup(b);
     }
 
@@ -1904,11 +1870,20 @@ namespace nanojit
 		end += need;
 		if (end > buf+sizeof(buf)) {
 			s = buf;
 			end = s+need;
 		}
 		strcpy(s, b);
 		return s;
 	}
+
+	// copy all labels to parent, adding newbase to label addresses
+	void LabelMap::promoteAll(const void *newbase)
+	{
+		for (int i=0, n=names.size(); i < n; i++) {
+			void *base = (char*)newbase + (int)names.keyAt(i);
+			parent->names.put(base, names.at(i));
+		}
+	}
 #endif // NJ_VERBOSE
 }
 	
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -395,16 +395,17 @@ namespace nanojit
         void formatAddr(const void *p, char *buf);
     public:
 		AvmCore *core;
         LabelMap(AvmCore *, LabelMap* parent);
         void add(const void *p, size_t size, size_t align, const char *name);
 		void add(const void *p, size_t size, size_t align, avmplus::String*);
 		const char *dup(const char *);
 		const char *format(const void *p);
+		void promoteAll(const void *newbase);
     };
 
 	class LirNameMap
 	{
 		class CountMap: public avmplus::SortedMap<int, int, avmplus::LIST_NonGCObjects> {
 		public:
 			CountMap(GC*gc) : avmplus::SortedMap<int, int, avmplus::LIST_NonGCObjects>(gc) {};
 			int add(int i) {
@@ -436,18 +437,20 @@ namespace nanojit
 			labels(r)
 		{}
 
 		void addName(LInsp i, const char *s);
 		void addName(LInsp i, avmplus::String *s);
 		void copyName(LInsp i, const char *s, int suffix);
         const char *formatRef(LIns *ref);
 		const char *formatIns(LInsp i);
+		void formatGuard(LInsp i, char *buf);
 	};
 
+
 	class VerboseWriter : public LirWriter
 	{
 		avmplus::List<LInsp, avmplus::LIST_NonGCObjects> code;
 		LirNameMap *names;
     public:
 		VerboseWriter(GC *gc, LirWriter *out, LirNameMap* names) 
 			: LirWriter(out), code(gc), names(names) 
 		{}
@@ -671,25 +674,27 @@ namespace nanojit
 	};
 
     class Assembler;
 
     void compile(Assembler *assm, Fragment *frag);
     verbose_only( void printTracker(const char* s, avmplus::RegionTracker& trk, Assembler* assm); )
 	verbose_only(void live(GC *gc, Assembler *assm, Fragment *frag);)
 
-	class StoreFilter: public LirFilter
+	class StackFilter: public LirFilter
 	{
 		GC *gc;
-		LInsp param0, sp, rp;
-		avmplus::BitSet rstk, stk;
-        int stop, rtop;
+		Fragment *frag;
+		LInsp sp;
+		avmplus::BitSet stk;
+        int top;
+		int getTop(LInsp guard);
 	public:
-		StoreFilter(LirFilter *in, GC *gc, LInsp p0, LInsp sp, LInsp rp); 
-		virtual ~StoreFilter() {}
+		StackFilter(LirFilter *in, GC *gc, Fragment *frag, LInsp sp); 
+		virtual ~StackFilter() {}
 		LInsp read();
 	};
 
 	class CseReader: public LirFilter
 	{
 		LInsHashSet *exprs;
 		const CallInfo *functions;
 	public:
--- a/js/src/nanojit/Native.h
+++ b/js/src/nanojit/Native.h
@@ -42,17 +42,17 @@
 
 
 #ifdef NANOJIT_IA32
 #include "Nativei386.h"
 #elif defined(NANOJIT_ARM)
 #ifdef THUMB
 #include "NativeThumb.h"
 #else
-#include "NativeArm.h"
+#include "NativeARM.h"
 #endif
 #elif defined(NANOJIT_PPC)
 #include "NativePpc.h"
 #else
 #error "unknown nanojit architecture"
 #endif
 
 	#ifdef NJ_STACK_GROWTH_UP
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -607,23 +607,16 @@ ShiftOperator;
 
 #define B_cond(_c,_t)\
 	underrunProtect(4);\
 	intptr_t tt = (intptr_t)(_t) - ((intptr_t)_nIns + 4);\
 	*(--_nIns) = (NIns)( ((_c)<<28) | (0xA<<24) | ((tt >>2)& 0xFFFFFF) );	\
 	asm_output2("b(cond) 0x%08x (%tX)",(_t), tt);
 
 
-//#define B(c,t) \
-//	intptr_t tt = (intptr_t)t - (intptr_t)_nIns;	\
-//	NanoAssert( ((int)tt < 256) && ((int)tt>-255) );						\
-//	underrunProtect(2);							\
-//	*(--_nIns) = (NIns)(0xD000 | (c<<8) | (tt>>1)&0xFF );
-
-
 #define JA(t)	do {B_cond(HI,t); asm_output1("ja 0x%08x",t); } while(0)
 #define JNA(t)	do {B_cond(LS,t); asm_output1("jna 0x%08x",t); } while(0)
 #define JB(t)	do {B_cond(CC,t); asm_output1("jb 0x%08x",t); } while(0)
 #define JNB(t)	do {B_cond(CS,t); asm_output1("jnb 0x%08x",t); } while(0)
 #define JE(t)	do {B_cond(EQ,t); asm_output1("je 0x%08x",t); } while(0)
 #define JNE(t)	do {B_cond(NE,t); asm_output1("jne 0x%08x",t); } while(0)						
 #define JBE(t)	do {B_cond(LS,t); asm_output1("jbe 0x%08x",t); } while(0)
 #define JNBE(t) do {B_cond(HI,t); asm_output1("jnbe 0x%08x",t); } while(0)
--- a/js/src/nanojit/Nativei386.cpp
+++ b/js/src/nanojit/Nativei386.cpp
@@ -119,17 +119,17 @@ namespace nanojit
 		// here is cheap.
 		ANDi(SP, -NJ_ALIGN_STACK);
 		MR(FP,SP);
 		PUSHr(FP); // Save caller's FP.
 
 		return patchEntry;
 	}
 
-	GuardRecord * Assembler::nFragExit(LInsp guard)
+	void Assembler::nFragExit(LInsp guard)
 	{
 		SideExit *exit = guard->exit();
 		bool trees = _frago->core()->config.tree_opt;
         Fragment *frag = exit->target;
         GuardRecord *lr = 0;
 		bool destKnown = (frag && frag->fragEntry);
 		if (destKnown && !trees)
 		{
@@ -160,38 +160,18 @@ namespace nanojit
 			// will make use of this when calling fragenter().
             int fromfrag = int((Fragment*)_thisfrag);
             LDi(argRegs[1], fromfrag);
         }
         #endif
 
 		// return value is GuardRecord*
         LDi(EAX, int(lr));
-
-		// if/when we patch this exit to jump over to another fragment,
-		// that fragment will need its parameters set up just like ours.
-        LInsp param0 = _thisfrag->param0;
-		Register state = findSpecificRegFor(param0, Register(param0->imm8()));
-
-        // update InterpState
-        
-        if (exit->rp_adj)
-            ADDmi((int32_t)offsetof(avmplus::InterpState, rp), state, exit->rp_adj);
+	}
 
-        if (exit->sp_adj)
-            ADDmi((int32_t)offsetof(avmplus::InterpState, sp), state, exit->sp_adj);
-
-        if (exit->ip_adj)
-			ADDmi((int32_t)offsetof(avmplus::InterpState, ip), state, exit->ip_adj);
-
-        if (exit->f_adj)
-            ADDmi((int32_t)offsetof(avmplus::InterpState, f), state, exit->f_adj);
-
-        return lr;
-	}
 
     NIns *Assembler::genEpilogue(RegisterMask restore)
     {
         RET();
         POPr(FP); // Restore caller's FP.
         MR(SP,FP); // Undo forced alignment.
 
 		// Restore saved registers.
--- a/js/src/nanojit/TraceTreeDrawer.cpp
+++ b/js/src/nanojit/TraceTreeDrawer.cpp
@@ -33,17 +33,17 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include <stdio.h>
 #include "nanojit.h"
 
-#define verbose_draw_only(x)
+#define verbose_draw_only(x) 
 
 namespace nanojit {
 
 #ifdef AVMPLUS_VERBOSE
 	using namespace avmplus;
 
 	TraceTreeDrawer::TraceTreeDrawer(Fragmento *frago, AvmCore *core, AvmString fileName) {
 		this->_frago = frago;
@@ -51,33 +51,30 @@ namespace nanojit {
 		this->_labels = frago->labels;
 		this->_fileName = fileName;
 	}
 	
 	TraceTreeDrawer::~TraceTreeDrawer() {
 	}
 	
 	void TraceTreeDrawer::addNode(Fragment *fragment, const char *color) {
-		avmplus::String *filep = fragment->file;
-        if (!filep)
-            filep = _core->k_str[avmplus::kstrconst_emptyString];
-        avmplus::StringNullTerminatedUTF8 file(_core->gc, filep);
-        const char *fileName = file.c_str();
-		
         fprintf(_fstream, "<node id=\"%d\">\n"
         		"<data key=\"nodeGraphicsID\">\n"
             	"<y:ShapeNode>\n"
             	"<y:Shape type=\"roundrectangle\"/>\n"
             	"<y:NodeLabel alignment=\"center\">%s"
-            	" %s:%d"	//filename:line number
+            	" %s"	//fragment->ip
             	"</y:NodeLabel>\n"
             	"<y:Fill color=\"#%s\" transparent=\"false\"/>\n"
             	"</y:ShapeNode>\n"
             	"</data>\n"
-            	"</node>\n", (int)fragment, _labels->format(fragment), fileName, fragment->line, color);
+            	"</node>\n", 
+				(int)fragment,
+				_labels->format(fragment), 
+				_labels->format(fragment->ip), color);
 	}
 	
 	void TraceTreeDrawer::addNode(Fragment *fragment) {
 		if (!fragment->isAnchor())
     		addNode(fragment, "FFCC99");	// standard color
 		else
 			addNode(fragment, "CCFFFF");	// Root node
 	}
@@ -116,18 +113,20 @@ namespace nanojit {
 				addMergeNode(treeBranch);
 			} // end ifelse
     	}	// end for loop
     }
     
     void TraceTreeDrawer::addBackEdges(Fragment *root) {
     	// At the end of a tree, find out where it goes
     	if (isCrossFragment(root)) {
-    		verbose_draw_only(printf("Found a cross fragment %s TO %s \n", _labels->format(root), _labels->format(root->eot_target)));
-    		this->addEdge(root, root->eot_target);
+			if (root->eot_target) {
+    			verbose_draw_only(printf("Found a cross fragment %s TO %s \n", _labels->format(root), _labels->format(root->eot_target)));
+    			this->addEdge(root, root->eot_target);
+			}
     	}
     	else if (isBackEdgeSideExit(root)) {
 			verbose_draw_only(printf("Adding anchor branch edge from %s TO %s\n", _labels->format(root), _labels->format(root->anchor)));
 			this->addEdge(root, root->anchor);
     	}
     	else if (isSingleTrace(root)) {
     		verbose_draw_only(printf("Found a single trace %s\n", _labels->format(root)));
     		this->addEdge(root, root);
@@ -169,19 +168,20 @@ namespace nanojit {
 		
 		verbose_draw_only(printf("\nFinished drawing, printing status\n"));
 		verbose_draw_only(this->printTreeStatus(root));
     }
     
     void TraceTreeDrawer::createGraphHeader() {
     	Stringp graphMLExtension = _core->newString(".graphml");
     	Stringp outputFileName = _core->concatStrings(this->_fileName, graphMLExtension);
+		StringNullTerminatedUTF8 fn(_core->gc, outputFileName);
     	
-    	verbose_draw_only(printf("output file name is %s\n", (char *)(outputFileName->getData())));
-    	this->_fstream = fopen((char *)outputFileName->getData(), "w");
+    	verbose_draw_only(printf("output file name is %s\n", fn.c_str());)
+    	this->_fstream = fopen(fn.c_str(), "w");
     	
 		fprintf(_fstream, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"
     			"<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns/graphml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns/graphml http://www.yworks.com/xml/schema/graphml/1.0/ygraphml.xsd\">\n"
     			"<key for=\"node\" id=\"nodeGraphicsID\" yfiles.type=\"nodegraphics\"/>\n"
     			"<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"nodeDescID\"/>\n"
     			"<key for=\"edge\" id=\"edgeGraphicsID\" yfiles.type=\"edgegraphics\"/>\n"
     			"<key attr.name=\"description\" attr.type=\"string\" for=\"edge\" id=\"edgeDescID\"/>\n"
     			"<graph edgedefault=\"directed\" id=\"rootGraph\">\n");