Merge tracemonkey to mozilla-central.
authorRobert Sayre <sayrer@gmail.com>
Thu, 28 May 2009 18:52:29 -0400
changeset 28818 ac3e487c5fffad1d1ef9f3353f3cadad297b1f66
parent 28809 bfb383af1903bc620eef56a3baabbc33276cc86d (current diff)
parent 28817 26f38f9b8f493b1c381d6c89763a531a493f82cd (diff)
child 28819 8bbbfa16fe349d87077506715223f7bbb0a23b5c
push id7273
push userrsayre@mozilla.com
push dateThu, 28 May 2009 22:52:43 +0000
treeherdermozilla-central@ac3e487c5fff [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone1.9.2a1pre
Merge tracemonkey to mozilla-central.
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -70,17 +70,17 @@ ifdef INTEL_CXX
 # icc gets special optimize flags
 ifdef MOZ_PROFILE_GENERATE
 MODULE_OPTIMIZE_FLAGS = -O0
 else
 MODULE_OPTIMIZE_FLAGS = -O2 -ip
 #XXX: do we want different INTERP_OPTIMIZER flags here?
 endif
 else # not INTEL_CXX
-MODULE_OPTIMIZE_FLAGS = -Os -fstrict-aliasing $(MOZ_OPTIMIZE_SIZE_TWEAK)
+MODULE_OPTIMIZE_FLAGS = -O3 -fstrict-aliasing $(MOZ_OPTIMIZE_SIZE_TWEAK)
 # Special optimization flags for jsinterp.c
 INTERP_OPTIMIZER = -O3 -fstrict-aliasing
 endif
 else # not GNU_CXX
 ifeq ($(OS_ARCH),SunOS)
 MODULE_OPTIMIZE_FLAGS = -xO4
 endif
 ifeq ($(OS_ARCH),WINNT)
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -7111,26 +7111,24 @@ js_Interpret(JSContext *cx)
 #if !JS_THREADED_INTERP
         } /* switch (op) */
     } /* for (;;) */
 #endif /* !JS_THREADED_INTERP */
 
   error:
     if (fp->imacpc && cx->throwing) {
         // To keep things simple, we hard-code imacro exception handlers here.
-        if (*fp->imacpc == JSOP_NEXTITER) {
+        if (*fp->imacpc == JSOP_NEXTITER && js_ValueIsStopIteration(cx->exception)) {
             // pc may point to JSOP_DUP here due to bug 474854.
             JS_ASSERT(*regs.pc == JSOP_CALL || *regs.pc == JSOP_DUP || *regs.pc == JSOP_TRUE);
-            if (js_ValueIsStopIteration(cx->exception)) {
-                cx->throwing = JS_FALSE;
-                cx->exception = JSVAL_VOID;
-                regs.sp[-1] = JSVAL_HOLE;
-                PUSH(JSVAL_FALSE);
-                goto end_imacro;
-            }
+            cx->throwing = JS_FALSE;
+            cx->exception = JSVAL_VOID;
+            regs.sp[-1] = JSVAL_HOLE;
+            PUSH(JSVAL_FALSE);
+            goto end_imacro;
         }
 
         // Handle other exceptions as if they came from the imacro-calling pc.
         regs.pc = fp->imacpc;
         fp->imacpc = NULL;
         atoms = script->atomMap.vector;
     }
 
--- a/js/src/jsparse.cpp
+++ b/js/src/jsparse.cpp
@@ -289,16 +289,23 @@ JSCompiler::newFunctionBox(JSObject *obj
     funbox->object = obj;
     funbox->node = fn;
     funbox->siblings = tc->functionList;
     tc->functionList = funbox;
     ++tc->compiler->functionCount;
     funbox->kids = NULL;
     funbox->parent = tc->funbox;
     funbox->queued = false;
+    funbox->inLoop = false;
+    for (JSStmtInfo *stmt = tc->topStmt; stmt; stmt = stmt->down) {
+        if (STMT_IS_LOOP(stmt)) {
+            funbox->inLoop = true;
+            break;
+        }
+    }
     funbox->level = tc->staticLevel;
     funbox->tcflags = TCF_IN_FUNCTION | (tc->flags & TCF_COMPILE_N_GO);
     return funbox;
 }
 
 void
 JSCompiler::trace(JSTracer *trc)
 {
@@ -739,16 +746,18 @@ JSCompiler::parse(JSObject *chain)
         } else {
             if (!js_FoldConstants(context, pn, &tc))
                 pn = NULL;
         }
     }
     return pn;
 }
 
+JS_STATIC_ASSERT(FREE_STATIC_LEVEL == JS_BITMASK(JSFB_LEVEL_BITS));
+
 static inline bool
 SetStaticLevel(JSTreeContext *tc, uintN staticLevel)
 {
     /*
      * Reserve FREE_STATIC_LEVEL (0xffff) in order to reserve FREE_UPVAR_COOKIE
      * (0xffffffff) and other cookies with that level.
      *
      * This is a lot simpler than error-checking every MAKE_UPVAR_COOKIE, and
@@ -801,67 +810,71 @@ JSCompiler::compileScript(JSContext *cx,
     JS_INIT_ARENA_POOL(&codePool, "code", 1024, sizeof(jsbytecode),
                        &cx->scriptStackQuota);
     JS_INIT_ARENA_POOL(&notePool, "note", 1024, sizeof(jssrcnote),
                        &cx->scriptStackQuota);
 
     JSCodeGenerator cg(&jsc, &codePool, &notePool, jsc.tokenStream.lineno);
 
     MUST_FLOW_THROUGH("out");
+
+    /* Null script early in case of error, to reduce our code footprint. */
+    script = NULL;
+
     cg.flags |= (uint16) tcflags;
     cg.scopeChain = scopeChain;
     if (!SetStaticLevel(&cg, TCF_GET_STATIC_LEVEL(tcflags)))
-        return NULL;
+        goto out;
 
     /*
      * If funbox is non-null after we create the new script, callerFrame->fun
      * was saved in the 0th object table entry.
      */
-    JSObjectBox *funbox = NULL;
+    JSObjectBox *funbox;
+    funbox = NULL;
 
     if (tcflags & TCF_COMPILE_N_GO) {
         if (source) {
             /*
              * Save eval program source in script->atomMap.vector[0] for the
              * eval cache (see obj_eval in jsobj.cpp).
              */
             JSAtom *atom = js_AtomizeString(cx, source, 0);
             if (!atom || !cg.atomList.add(&jsc, atom))
-                return NULL;
+                goto out;
         }
 
         if (callerFrame && callerFrame->fun) {
             /*
              * An eval script in a caller frame needs to have its enclosing
              * function captured in case it refers to an upvar, and someone
              * wishes to decompile it while it's running.
              */
             funbox = jsc.newObjectBox(FUN_OBJECT(callerFrame->fun));
             if (!funbox)
-                return NULL;
+                goto out;
             funbox->emitLink = cg.objectList.lastbox;
             cg.objectList.lastbox = funbox;
             cg.objectList.length++;
         }
     }
 
     /*
      * Inline Statements to emit as we go to save AST space. We must generate
      * our script-body blockid since we aren't calling Statements.
      */
     uint32 bodyid;
     if (!GenerateBlockId(&cg, bodyid))
-        return NULL;
+        goto out;
     cg.bodyid = bodyid;
 
-    /* Null script early in case of error, to reduce our code footprint. */
-    script = NULL;
 #if JS_HAS_XML_SUPPORT
     pn = NULL;
-    bool onlyXML = true;
+    bool onlyXML;
+    onlyXML = true;
 #endif
 
     for (;;) {
         jsc.tokenStream.flags |= TSF_OPERAND;
         tt = js_PeekToken(cx, &jsc.tokenStream);
         jsc.tokenStream.flags &= ~TSF_OPERAND;
         if (tt <= TOK_EOF) {
             if (tt == TOK_EOF)
@@ -1496,16 +1509,17 @@ JSCompiler::compileFunctionBody(JSContex
             pn = NULL;
         } else if (funcg.functionList &&
                    !jsc.analyzeFunctions(funcg.functionList, funcg.flags)) {
             pn = NULL;
         } else {
             if (fn->pn_body) {
                 JS_ASSERT(PN_TYPE(fn->pn_body) == TOK_ARGSBODY);
                 fn->pn_body->append(pn);
+                fn->pn_body->pn_pos = pn->pn_pos;
                 pn = fn->pn_body;
             }
 
             if (!js_EmitFunctionScript(cx, &funcg, pn))
                 pn = NULL;
         }
     }
 
@@ -1951,60 +1965,65 @@ JSCompiler::setFunctionKinds(JSFunctionB
                         JSFunctionBox *afunbox = funbox;
                         uintN lexdepLevel = lexdep->frameLevel();
 
                         JS_ASSERT(lexdepLevel <= funbox->level);
                         while (afunbox->level != lexdepLevel) {
                             afunbox = afunbox->parent;
 
                             /*
-                             * afunbox cannot be null here. That is, we are
-                             * sure to find a function box whose level ==
-                             * lexdepLevel before walking off the top of the
-                             * funbox tree.
+                             * afunbox can't be null because we are sure
+                             * to find a function box whose level == lexdepLevel
+                             * before walking off the top of the funbox tree.
+                             * See bug 493260 comments 16-18.
                              *
-                             * Proof: lexdepLevel is at least the base
-                             * staticLevel for this compilation (often 0 but
-                             * nonzero when compiling for local eval) and at
-                             * most funbox->level. The path we are walking
-                             * includes one function box each of precisely that
-                             * range of levels.
-                             *
-                             * Assert but check anyway (bug 493260 comment 16).
+                             * Assert but check anyway, to check future changes
+                             * that bind eval upvars in the parser.
                              */
                             JS_ASSERT(afunbox);
 
                             /*
                              * If this function is reaching up across an
                              * enclosing funarg, we cannot make a flat
                              * closure. The display stops working once the
                              * funarg escapes.
                              */
                             if (!afunbox || afunbox->node->isFunArg())
                                 goto break2;
                         }
 
                         /*
+                         * If afunbox's function (which is at the same level as
+                         * lexdep) is in a loop, pessimistically assume the
+                         * variable initializer may be in the same loop. A flat
+                         * closure would then be unsafe, as the captured
+                         * variable could be assigned after the closure is
+                         * created. See bug 493232.
+                         */
+                        if (afunbox->inLoop)
+                            break;
+
+                        /*
                          * with and eval defeat lexical scoping; eval anywhere
                          * in a variable's scope can assign to it. Both defeat
                          * the flat closure optimization. The parser detects
                          * these cases and flags the function heavyweight.
                          */
                         JSFunctionBox *parentbox = afunbox->parent ? afunbox->parent : afunbox;
                         if (parentbox->tcflags & TCF_FUN_HEAVYWEIGHT)
                             break;
 
                         /*
-                         * If afunbox's function (which is at the same level as
-                         * lexdep) is not a lambda, it will be hoisted, so it
-                         * could capture the undefined value that by default
-                         * initializes var/let/const bindings. And if lexdep is
-                         * a function that comes at (meaning a function refers
-                         * to its own name) or strictly after afunbox, we also
-                         * break to defeat the flat closure optimization.
+                         * If afunbox's function is not a lambda, it will be
+                         * hoisted, so it could capture the undefined value
+                         * that by default initializes var/let/const
+                         * bindings. And if lexdep is a function that comes at
+                         * (meaning a function refers to its own name) or
+                         * strictly after afunbox, we also break to defeat the
+                         * flat closure optimization.
                          */
                         JSFunction *afun = (JSFunction *) afunbox->object;
                         if (!(afun->flags & JSFUN_LAMBDA)) {
                             if (lexdep->isBindingForm())
                                 break;
                             if (lexdep->pn_pos >= afunbox->node->pn_pos)
                                 break;
                         }
@@ -2761,20 +2780,22 @@ FunctionDef(JSContext *cx, JSTokenStream
     } else {
         op = JSOP_NOP;
     }
 
     funbox->kids = funtc.functionList;
 
     pn->pn_funbox = funbox;
     pn->pn_op = op;
-    if (pn->pn_body)
+    if (pn->pn_body) {
         pn->pn_body->append(body);
-    else
+        pn->pn_body->pn_pos = body->pn_pos;
+    } else {
         pn->pn_body = body;
+    }
 
     pn->pn_blockid = tc->blockid();
 
     if (!LeaveFunction(pn, &funtc, tc, funAtom, lambda))
         return NULL;
 
     return result;
 }
@@ -3283,17 +3304,17 @@ NoteLValue(JSContext *cx, JSParseNode *p
             dflag = PND_INITIALIZED;
         }
 
         dn->pn_dflags |= dflag;
 
         if (dn->frameLevel() != tc->staticLevel) {
             /*
              * The above condition takes advantage of the all-ones nature of
-             * FREE_UPVAR_COOKIE, and the reserved frame level JS_BITMASK(16).
+             * FREE_UPVAR_COOKIE, and the reserved level FREE_STATIC_LEVEL.
              * We make a stronger assertion by excluding FREE_UPVAR_COOKIE.
              */
             JS_ASSERT_IF(dn->pn_cookie != FREE_UPVAR_COOKIE,
                          dn->frameLevel() < tc->staticLevel);
             tc->flags |= TCF_FUN_SETS_OUTER_NAME;
         }
     }
 
@@ -6079,25 +6100,32 @@ class CompExprTransplanter {
     bool transplant(JSParseNode *pn);
 };
 
 /*
  * Any definitions nested within the comprehension expression of a generator
  * expression must move "down" one static level, which of course increases the
  * upvar-frame-skip count.
  */
-static void
+static bool
 BumpStaticLevel(JSParseNode *pn, JSTreeContext *tc)
 {
     if (pn->pn_cookie != FREE_UPVAR_COOKIE) {
         uintN level = UPVAR_FRAME_SKIP(pn->pn_cookie) + 1;
 
         JS_ASSERT(level >= tc->staticLevel);
+        if (level >= FREE_STATIC_LEVEL) {
+            JS_ReportErrorNumber(tc->compiler->context, js_GetErrorMessage, NULL,
+                                 JSMSG_TOO_DEEP, js_function_str);
+            return false;
+        }
+
         pn->pn_cookie = MAKE_UPVAR_COOKIE(level, UPVAR_FRAME_SLOT(pn->pn_cookie));
     }
+    return true;
 }
 
 static void
 AdjustBlockId(JSParseNode *pn, uintN adjust, JSTreeContext *tc)
 {
     JS_ASSERT(pn->pn_arity == PN_LIST || pn->pn_arity == PN_FUNC || pn->pn_arity == PN_NAME);
     pn->pn_blockid += adjust;
     if (pn->pn_blockid >= tc->blockidGen)
@@ -6168,18 +6196,18 @@ CompExprTransplanter::transplant(JSParse
       }
 
       case PN_NAME:
         transplant(pn->maybeExpr());
         if (pn->pn_arity == PN_FUNC)
             --funcLevel;
 
         if (pn->pn_defn) {
-            if (genexp)
-                BumpStaticLevel(pn, tc);
+            if (genexp && !BumpStaticLevel(pn, tc))
+                return false;
         } else if (pn->pn_used) {
             JS_ASSERT(pn->pn_op != JSOP_NOP);
             JS_ASSERT(pn->pn_cookie == FREE_UPVAR_COOKIE);
 
             JSDefinition *dn = pn->pn_lexdef;
             JS_ASSERT(dn->pn_defn);
 
             /*
@@ -6187,41 +6215,41 @@ CompExprTransplanter::transplant(JSParse
              * to the left of the root node, and if pn is the last use visited
              * in the comprehension expression (to avoid adjusting the blockid
              * multiple times).
              *
              * Non-placeholder definitions within the comprehension expression
              * will be visited further below.
              */
             if (dn->isPlaceholder() && dn->pn_pos >= root->pn_pos && dn->dn_uses == pn) {
-                if (genexp)
-                    BumpStaticLevel(dn, tc);
+                if (genexp && !BumpStaticLevel(dn, tc))
+                    return false;
                 AdjustBlockId(dn, adjust, tc);
             }
 
             JSAtom *atom = pn->pn_atom;
 #ifdef DEBUG
             JSStmtInfo *stmt = js_LexicalLookup(tc, atom, NULL);
             JS_ASSERT(!stmt || stmt != tc->topStmt);
 #endif
             if (genexp && PN_OP(dn) != JSOP_CALLEE) {
                 JS_ASSERT(!tc->decls.lookup(atom));
 
                 if (dn->pn_pos < root->pn_pos || dn->isPlaceholder()) {
                     JSAtomListElement *ale = tc->lexdeps.add(tc->compiler, dn->pn_atom);
                     if (!ale)
-                        return NULL;
+                        return false;
 
                     if (dn->pn_pos >= root->pn_pos) {
                         tc->parent->lexdeps.remove(tc->compiler, atom);
                     } else {
                         JSDefinition *dn2 = (JSDefinition *)
                             NewNameNode(tc->compiler->context, TS(tc->compiler), dn->pn_atom, tc);
                         if (!dn2)
-                            return NULL;
+                            return false;
 
                         dn2->pn_type = dn->pn_type;
                         dn2->pn_pos = root->pn_pos;
                         dn2->pn_defn = true;
                         dn2->pn_dflags |= PND_PLACEHOLDER;
 
                         JSParseNode **pnup = &dn->dn_uses;
                         JSParseNode *pnu;
--- a/js/src/jsparse.h
+++ b/js/src/jsparse.h
@@ -747,24 +747,27 @@ JSParseNode::setFunArg()
 }
 
 struct JSObjectBox {
     JSObjectBox         *traceLink;
     JSObjectBox         *emitLink;
     JSObject            *object;
 };
 
+#define JSFB_LEVEL_BITS 14
+
 struct JSFunctionBox : public JSObjectBox
 {
     JSParseNode         *node;
     JSFunctionBox       *siblings;
     JSFunctionBox       *kids;
     JSFunctionBox       *parent;
     uint32              queued:1,
-                        level:15,
+                        inLoop:1,               /* in a loop in parent function */
+                        level:JSFB_LEVEL_BITS,
                         tcflags:16;
 };
 
 struct JSFunctionBoxQueue {
     JSFunctionBox       **vector;
     size_t              head, tail;
     size_t              lengthMask;
 
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -82,17 +82,17 @@ typedef struct JSObjectArray {
 } JSObjectArray;
 
 typedef struct JSUpvarArray {
     uint32          *vector;    /* array of indexed upvar cookies */
     uint32          length;     /* count of indexed upvar cookies */
 } JSUpvarArray;
 
 #define CALLEE_UPVAR_SLOT               0xffff
-#define FREE_STATIC_LEVEL               0xffff
+#define FREE_STATIC_LEVEL               0x3fff
 #define FREE_UPVAR_COOKIE               0xffffffff
 #define MAKE_UPVAR_COOKIE(skip,slot)    ((skip) << 16 | (slot))
 #define UPVAR_FRAME_SKIP(cookie)        ((uint32)(cookie) >> 16)
 #define UPVAR_FRAME_SLOT(cookie)        ((uint16)(cookie))
 
 #define JS_OBJECT_ARRAY_SIZE(length)                                          \
     (offsetof(JSObjectArray, vector) + sizeof(JSObject *) * (length))
 
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -1230,26 +1230,26 @@ TypeMap::captureTypes(JSContext* cx, Slo
     setLength(js_NativeStackSlots(cx, callDepth) + ngslots);
     uint8* map = data();
     uint8* m = map;
     FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
         uint8 type = getCoercedType(*vp);
         if ((type == JSVAL_INT) && oracle.isStackSlotUndemotable(cx, unsigned(m - map)))
             type = JSVAL_DOUBLE;
         JS_ASSERT(type != JSVAL_BOXED);
-        debug_only_v(printf("capture stack type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);)
+        debug_only_v(nj_dprintf("capture stack type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);)
         JS_ASSERT(uintptr_t(m - map) < length());
         *m++ = type;
     );
     FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
         uint8 type = getCoercedType(*vp);
         if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, gslots[n]))
             type = JSVAL_DOUBLE;
         JS_ASSERT(type != JSVAL_BOXED);
-        debug_only_v(printf("capture global type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);)
+        debug_only_v(nj_dprintf("capture global type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);)
         JS_ASSERT(uintptr_t(m - map) < length());
         *m++ = type;
     );
     JS_ASSERT(uintptr_t(m - map) == length());
 }
 
 JS_REQUIRES_STACK void
 TypeMap::captureMissingGlobalTypes(JSContext* cx, SlotList& slots, unsigned stackSlots)
@@ -1263,17 +1263,17 @@ TypeMap::captureMissingGlobalTypes(JSCon
     uint8* map = data() + stackSlots;
     uint8* m = map;
     FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
         if (n >= oldSlots) {
             uint8 type = getCoercedType(*vp);
             if ((type == JSVAL_INT) && oracle.isGlobalSlotUndemotable(cx, gslots[n]))
                 type = JSVAL_DOUBLE;
             JS_ASSERT(type != JSVAL_BOXED);
-            debug_only_v(printf("capture global type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);)
+            debug_only_v(nj_dprintf("capture global type %s%d: %d=%c\n", vpname, vpnum, type, typeChar[type]);)
             *m = type;
             JS_ASSERT((m > map + oldSlots) || (*m == type));
         }
         m++;
     );
 }
 
 /* Compare this type map to another one and see whether they match. */
@@ -1350,19 +1350,19 @@ TraceRecorder::TraceRecorder(JSContext* 
     this->wasRootFragment = _fragment == _fragment->root;
     this->outer = outer;
     this->outerArgc = outerArgc;
     this->pendingTraceableNative = NULL;
     this->newobj_ins = NULL;
     this->generatedTraceableNative = new JSTraceableNative();
     JS_ASSERT(generatedTraceableNative);
 
-    debug_only_v(printf("recording starting from %s:%u@%u\n",
-                        ti->treeFileName, ti->treeLineNumber, ti->treePCOffset);)
-    debug_only_v(printf("globalObj=%p, shape=%d\n", (void*)this->globalObj, OBJ_SHAPE(this->globalObj));)
+    debug_only_v(nj_dprintf("recording starting from %s:%u@%u\n",
+                            ti->treeFileName, ti->treeLineNumber, ti->treePCOffset);)
+    debug_only_v(nj_dprintf("globalObj=%p, shape=%d\n", (void*)this->globalObj, OBJ_SHAPE(this->globalObj));)
 
     lir = lir_buf_writer = new (&gc) LirBufWriter(lirbuf);
     debug_only_v(lir = verbose_filter = new (&gc) VerboseWriter(&gc, lir, lirbuf->names);)
     if (nanojit::AvmCore::config.soft_float)
         lir = float_filter = new (&gc) SoftFloatFilter(lir);
     else
         float_filter = 0;
     lir = cse_filter = new (&gc) CseFilter(lir, &gc);
@@ -1455,17 +1455,17 @@ TraceRecorder::~TraceRecorder()
 
 void TraceRecorder::removeFragmentoReferences()
 {
     fragment = NULL;
 }
 
 void TraceRecorder::deepAbort()
 {
-    debug_only_v(printf("deep abort");)
+    debug_only_v(nj_dprintf("deep abort");)
     deepAborted = true;
 }
 
 /* Add debug information to a LIR instruction as we emit it. */
 inline LIns*
 TraceRecorder::addName(LIns* ins, const char* name)
 {
 #ifdef JS_JIT_SPEW
@@ -1588,70 +1588,70 @@ static void
 ValueToNative(JSContext* cx, jsval v, uint8 type, double* slot)
 {
     unsigned tag = JSVAL_TAG(v);
     switch (type) {
       case JSVAL_OBJECT:
         JS_ASSERT(tag == JSVAL_OBJECT);
         JS_ASSERT(!JSVAL_IS_NULL(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)));
         *(JSObject**)slot = JSVAL_TO_OBJECT(v);
-        debug_only_v(printf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
-                            JSVAL_IS_NULL(v)
-                            ? "null"
-                            : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
+        debug_only_v(nj_dprintf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
+                                JSVAL_IS_NULL(v)
+                                ? "null"
+                                : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
         return;
       case JSVAL_INT:
         jsint i;
         if (JSVAL_IS_INT(v))
             *(jsint*)slot = JSVAL_TO_INT(v);
         else if ((tag == JSVAL_DOUBLE) && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i))
             *(jsint*)slot = i;
         else
             JS_ASSERT(JSVAL_IS_INT(v));
-        debug_only_v(printf("int<%d> ", *(jsint*)slot);)
+        debug_only_v(nj_dprintf("int<%d> ", *(jsint*)slot);)
         return;
       case JSVAL_DOUBLE:
         jsdouble d;
         if (JSVAL_IS_INT(v))
             d = JSVAL_TO_INT(v);
         else
             d = *JSVAL_TO_DOUBLE(v);
         JS_ASSERT(JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v));
         *(jsdouble*)slot = d;
-        debug_only_v(printf("double<%g> ", d);)
+        debug_only_v(nj_dprintf("double<%g> ", d);)
         return;
       case JSVAL_BOXED:
         JS_NOT_REACHED("found boxed type in an entry type map");
         return;
       case JSVAL_STRING:
         JS_ASSERT(tag == JSVAL_STRING);
         *(JSString**)slot = JSVAL_TO_STRING(v);
-        debug_only_v(printf("string<%p> ", (void*)(*(JSString**)slot));)
+        debug_only_v(nj_dprintf("string<%p> ", (void*)(*(JSString**)slot));)
         return;
       case JSVAL_TNULL:
         JS_ASSERT(tag == JSVAL_OBJECT);
         *(JSObject**)slot = NULL;
-        debug_only_v(printf("null ");)
+        debug_only_v(nj_dprintf("null ");)
         return;
       case JSVAL_BOOLEAN:
         /* Watch out for pseudo-booleans. */
         JS_ASSERT(tag == JSVAL_BOOLEAN);
         *(JSBool*)slot = JSVAL_TO_PSEUDO_BOOLEAN(v);
-        debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);)
+        debug_only_v(nj_dprintf("boolean<%d> ", *(JSBool*)slot);)
         return;
       case JSVAL_TFUN: {
         JS_ASSERT(tag == JSVAL_OBJECT);
         JSObject* obj = JSVAL_TO_OBJECT(v);
         *(JSObject**)slot = obj;
 #ifdef DEBUG
         JSFunction* fun = GET_FUNCTION_PRIVATE(cx, obj);
-        debug_only_v(printf("function<%p:%s> ", (void*) obj,
-                            fun->atom
-                            ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
-                            : "unnamed");)
+        debug_only_v(nj_dprintf("function<%p:%s> ", (void*) obj,
+                                fun->atom
+                                ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
+                                : "unnamed");)
 #endif
         return;
       }
     }
 
     JS_NOT_REACHED("unexpected type");
 }
 
@@ -1720,34 +1720,34 @@ NativeToValue(JSContext* cx, jsval& v, u
 {
     jsint i;
     jsdouble d;
     switch (type) {
       case JSVAL_OBJECT:
         v = OBJECT_TO_JSVAL(*(JSObject**)slot);
         JS_ASSERT(JSVAL_TAG(v) == JSVAL_OBJECT); /* if this fails the pointer was not aligned */
         JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
-        debug_only_v(printf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
-                            JSVAL_IS_NULL(v)
-                            ? "null"
-                            : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
+        debug_only_v(nj_dprintf("object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
+                                JSVAL_IS_NULL(v)
+                                ? "null"
+                                : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);)
         break;
       case JSVAL_INT:
         i = *(jsint*)slot;
-        debug_only_v(printf("int<%d> ", i);)
+        debug_only_v(nj_dprintf("int<%d> ", i);)
       store_int:
         if (INT_FITS_IN_JSVAL(i)) {
             v = INT_TO_JSVAL(i);
             break;
         }
         d = (jsdouble)i;
         goto store_double;
       case JSVAL_DOUBLE:
         d = *slot;
-        debug_only_v(printf("double<%g> ", d);)
+        debug_only_v(nj_dprintf("double<%g> ", d);)
         if (JSDOUBLE_IS_INT(d, i))
             goto store_int;
       store_double: {
         /* Its not safe to trigger the GC here, so use an emergency heap if we are out of
            double boxes. */
         if (cx->doubleFreeList) {
 #ifdef DEBUG
             JSBool ok =
@@ -1759,84 +1759,84 @@ NativeToValue(JSContext* cx, jsval& v, u
         v = AllocateDoubleFromReservedPool(cx);
         JS_ASSERT(JSVAL_IS_DOUBLE(v) && *JSVAL_TO_DOUBLE(v) == 0.0);
         *JSVAL_TO_DOUBLE(v) = d;
         return;
       }
       case JSVAL_BOXED:
         v = *(jsval*)slot;
         JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
-        debug_only_v(printf("box<%p> ", (void*)v));
+        debug_only_v(nj_dprintf("box<%p> ", (void*)v));
         break;
       case JSVAL_STRING:
         v = STRING_TO_JSVAL(*(JSString**)slot);
         JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING); /* if this fails the pointer was not aligned */
-        debug_only_v(printf("string<%p> ", (void*)(*(JSString**)slot));)
+        debug_only_v(nj_dprintf("string<%p> ", (void*)(*(JSString**)slot));)
         break;
       case JSVAL_TNULL:
         JS_ASSERT(*(JSObject**)slot == NULL);
         v = JSVAL_NULL;
-        debug_only_v(printf("null<%p> ", (void*)(*(JSObject**)slot)));
+        debug_only_v(nj_dprintf("null<%p> ", (void*)(*(JSObject**)slot)));
         break;
       case JSVAL_BOOLEAN:
         /* Watch out for pseudo-booleans. */
         v = PSEUDO_BOOLEAN_TO_JSVAL(*(JSBool*)slot);
-        debug_only_v(printf("boolean<%d> ", *(JSBool*)slot);)
+        debug_only_v(nj_dprintf("boolean<%d> ", *(JSBool*)slot);)
         break;
       case JSVAL_TFUN: {
         JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot));
         v = OBJECT_TO_JSVAL(*(JSObject**)slot);
 #ifdef DEBUG
         JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v));
-        debug_only_v(printf("function<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
-                            fun->atom
-                            ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
-                            : "unnamed");)
+        debug_only_v(nj_dprintf("function<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
+                                fun->atom
+                                ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
+                                : "unnamed");)
 #endif
         break;
       }
     }
 }
 
 /* Attempt to unbox the given list of interned globals onto the native global frame. */
 static JS_REQUIRES_STACK void
 BuildNativeGlobalFrame(JSContext* cx, unsigned ngslots, uint16* gslots, uint8* mp, double* np)
 {
-    debug_only_v(printf("global: ");)
+    debug_only_v(nj_dprintf("global: ");)
     FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
         ValueToNative(cx, *vp, *mp, np + gslots[n]);
         ++mp;
     );
-    debug_only_v(printf("\n");)
+    debug_only_v(nj_dprintf("\n");)
 }
 
 /* Attempt to unbox the given JS frame onto a native frame. */
 static JS_REQUIRES_STACK void
 BuildNativeStackFrame(JSContext* cx, unsigned callDepth, uint8* mp, double* np)
 {
-    debug_only_v(printf("stack: ");)
+    debug_only_v(nj_dprintf("stack: ");)
     FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
-        debug_only_v(printf("%s%u=", vpname, vpnum);)
+        debug_only_v(nj_dprintf("%s%u=", vpname, vpnum);)
         ValueToNative(cx, *vp, *mp, np);
         ++mp; ++np;
     );
-    debug_only_v(printf("\n");)
+    debug_only_v(nj_dprintf("\n");)
 }
 
 /* Box the given native frame into a JS frame. This is infallible. */
 static JS_REQUIRES_STACK int
 FlushNativeGlobalFrame(JSContext* cx, unsigned ngslots, uint16* gslots, uint8* mp, double* np)
 {
     uint8* mp_base = mp;
     FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
-        debug_only_v(printf("%s%u=", vpname, vpnum);)
+        debug_only_v(nj_dprintf("%s%u=", vpname, vpnum);)
         NativeToValue(cx, *vp, *mp, np + gslots[n]);
         ++mp;
     );
-    debug_only_v(printf("\n");)
+    debug_only_v(nj_dprintf("\n");)
     return mp - mp_base;
 }
 
 /*
  * Builtin to get an upvar on trace. See js_GetUpvar for the meaning
  * of the first three arguments. The value of the upvar is stored in
  * *result as an unboxed native. The return value is the typemap type.
  */
@@ -1905,17 +1905,17 @@ static JS_REQUIRES_STACK int
 FlushNativeStackFrame(JSContext* cx, unsigned callDepth, uint8* mp, double* np,
                       JSStackFrame* stopFrame)
 {
     jsval* stopAt = stopFrame ? &stopFrame->argv[-2] : NULL;
     uint8* mp_base = mp;
     /* Root all string and object references first (we don't need to call the GC for this). */
     FORALL_SLOTS_IN_PENDING_FRAMES(cx, callDepth,
         if (vp == stopAt) goto skip;
-        debug_only_v(printf("%s%u=", vpname, vpnum);)
+        debug_only_v(nj_dprintf("%s%u=", vpname, vpnum);)
         NativeToValue(cx, *vp, *mp, np);
         ++mp; ++np
     );
 skip:
     // Restore thisp from the now-restored argv[-1] in each pending frame.
     // Keep in mind that we didn't restore frames at stopFrame and above!
     // Scope to keep |fp| from leaking into the macros we're using.
     {
@@ -1974,17 +1974,17 @@ skip:
                     }
                 }
                 fp->thisp = JSVAL_TO_OBJECT(fp->argv[-1]);
                 if (fp->flags & JSFRAME_CONSTRUCTING) // constructors always compute 'this'
                     fp->flags |= JSFRAME_COMPUTED_THIS;
             }
         }
     }
-    debug_only_v(printf("\n");)
+    debug_only_v(nj_dprintf("\n");)
     return mp - mp_base;
 }
 
 /* Emit load instructions onto the trace that read the initial stack state. */
 JS_REQUIRES_STACK void
 TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, uint8 t,
                       const char *prefix, uintN index, JSStackFrame *fp)
 {
@@ -2038,18 +2038,18 @@ TraceRecorder::import(LIns* base, ptrdif
 
     if (mark)
         JS_ARENA_RELEASE(&cx->tempPool, mark);
     addName(ins, name);
 
     static const char* typestr[] = {
         "object", "int", "double", "boxed", "string", "null", "boolean", "function"
     };
-    debug_only_v(printf("import vp=%p name=%s type=%s flags=%d\n",
-                        (void*)p, name, typestr[t & 7], t >> 3);)
+    debug_only_v(nj_dprintf("import vp=%p name=%s type=%s flags=%d\n",
+                            (void*)p, name, typestr[t & 7], t >> 3);)
 #endif
 }
 
 JS_REQUIRES_STACK void
 TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigned ngslots,
                       unsigned callDepth, uint8* typeMap)
 {
     /* If we get a partial list that doesn't have all the types (i.e. recording from a side
@@ -2224,17 +2224,17 @@ TraceRecorder::known(jsval* p)
  * The dslots of the global object are sometimes reallocated by the interpreter.
  * This function check for that condition and re-maps the entries of the tracker
  * accordingly.
  */
 JS_REQUIRES_STACK void
 TraceRecorder::checkForGlobalObjectReallocation()
 {
     if (global_dslots != globalObj->dslots) {
-        debug_only_v(printf("globalObj->dslots relocated, updating tracker\n");)
+        debug_only_v(nj_dprintf("globalObj->dslots relocated, updating tracker\n");)
         jsval* src = global_dslots;
         jsval* dst = globalObj->dslots;
         jsuint length = globalObj->dslots[-1] - JS_INITIAL_NSLOTS;
         LIns** map = (LIns**)alloca(sizeof(LIns*) * length);
         for (jsuint n = 0; n < length; ++n) {
             map[n] = tracker.get(src);
             tracker.set(src++, NULL);
         }
@@ -2479,19 +2479,19 @@ TraceRecorder::guard(bool expected, LIns
     if (!cond->isCond()) {
         expected = !expected;
         cond = lir->ins_eq0(cond);
     }
 
     LIns* guardIns =
         lir->insGuard(expected ? LIR_xf : LIR_xt, cond, guardRec);
     if (guardIns) {
-        debug_only_v(printf("    SideExit=%p exitType=%d\n", (void*)exit, exit->exitType);)
+        debug_only_v(nj_dprintf("    SideExit=%p exitType=%d\n", (void*)exit, exit->exitType);)
     } else {
-        debug_only_v(printf("    redundant guard, eliminated\n");)
+        debug_only_v(nj_dprintf("    redundant guard, eliminated\n");)
     }
 }
 
 JS_REQUIRES_STACK VMSideExit*
 TraceRecorder::copy(VMSideExit* copy)
 {
     size_t typemap_size = copy->numGlobalSlots + copy->numStackSlots;
     LIns* data = lir->insSkip(sizeof(VMSideExit) + typemap_size * sizeof(uint8));
@@ -2532,41 +2532,41 @@ TraceRecorder::guard(bool expected, LIns
  * @param stage_count   Outparam for set() buffer count.
  * @return              True if types are compatible, false otherwise.
  */
 JS_REQUIRES_STACK bool
 TraceRecorder::checkType(jsval& v, uint8 t, jsval*& stage_val, LIns*& stage_ins,
                          unsigned& stage_count)
 {
     if (t == JSVAL_INT) { /* initially all whole numbers cause the slot to be demoted */
-        debug_only_v(printf("checkType(tag=1, t=%d, isnum=%d, i2f=%d) stage_count=%d\n",
-                            t,
-                            isNumber(v),
-                            isPromoteInt(get(&v)),
-                            stage_count);)
+        debug_only_v(nj_dprintf("checkType(tag=1, t=%d, isnum=%d, i2f=%d) stage_count=%d\n",
+                                t,
+                                isNumber(v),
+                                isPromoteInt(get(&v)),
+                                stage_count);)
         if (!isNumber(v))
             return false; /* not a number? type mismatch */
         LIns* i = get(&v);
         /* This is always a type mismatch, we can't close a double to an int. */
         if (!isPromoteInt(i))
             return false;
         /* Looks good, slot is an int32, the last instruction should be promotable. */
         JS_ASSERT(isInt32(v) && isPromoteInt(i));
         /* Overwrite the value in this slot with the argument promoted back to an integer. */
         stage_val = &v;
         stage_ins = f2i(i);
         stage_count++;
         return true;
     }
     if (t == JSVAL_DOUBLE) {
-        debug_only_v(printf("checkType(tag=2, t=%d, isnum=%d, promote=%d) stage_count=%d\n",
-                            t,
-                            isNumber(v),
-                            isPromoteInt(get(&v)),
-                            stage_count);)
+        debug_only_v(nj_dprintf("checkType(tag=2, t=%d, isnum=%d, promote=%d) stage_count=%d\n",
+                                t,
+                                isNumber(v),
+                                isPromoteInt(get(&v)),
+                                stage_count);)
         if (!isNumber(v))
             return false; /* not a number? type mismatch */
         LIns* i = get(&v);
         /* We sink i2f conversions into the side exit, but at the loop edge we have to make
            sure we promote back to double if at loop entry we want a double. */
         if (isPromoteInt(i)) {
             stage_val = &v;
             stage_ins = lir->ins1(LIR_i2f, i);
@@ -2580,22 +2580,22 @@ TraceRecorder::checkType(jsval& v, uint8
         return !JSVAL_IS_PRIMITIVE(v) && HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v));
     if (t == JSVAL_OBJECT)
         return !JSVAL_IS_PRIMITIVE(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v));
 
     /* for non-number types we expect a precise match of the type */
     uint8 vt = getCoercedType(v);
 #ifdef DEBUG
     if (vt != t) {
-        debug_only_v(printf("Type mismatch: val %c, map %c ", typeChar[vt],
-                            typeChar[t]);)
+        debug_only_v(nj_dprintf("Type mismatch: val %c, map %c ", typeChar[vt],
+                                typeChar[t]);)
     }
 #endif
-    debug_only_v(printf("checkType(vt=%d, t=%d) stage_count=%d\n",
-                        (int) vt, t, stage_count);)
+    debug_only_v(nj_dprintf("checkType(vt=%d, t=%d) stage_count=%d\n",
+                            (int) vt, t, stage_count);)
     return vt == t;
 }
 
 /**
  * Make sure that the current values in the given stack frame and all stack frames
  * up and including entryFrame are type-compatible with the entry map.
  *
  * @param root_peer         First fragment in peer list.
@@ -2623,35 +2623,35 @@ TraceRecorder::deduceTypeStability(Fragm
     unsigned stage_count;
     jsval** stage_vals = (jsval**)alloca(sizeof(jsval*) * (treeInfo->typeMap.length()));
     LIns** stage_ins = (LIns**)alloca(sizeof(LIns*) * (treeInfo->typeMap.length()));
 
     /* First run through and see if we can close ourselves - best case! */
     stage_count = 0;
     success = false;
 
-    debug_only_v(printf("Checking type stability against self=%p\n", (void*)fragment);)
+    debug_only_v(nj_dprintf("Checking type stability against self=%p\n", (void*)fragment);)
 
     m = typemap = treeInfo->globalTypeMap();
     FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
-        debug_only_v(printf("%s%d ", vpname, vpnum);)
+        debug_only_v(nj_dprintf("%s%d ", vpname, vpnum);)
         if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) {
             /* If the failure was an int->double, tell the oracle. */
             if (*m == JSVAL_INT && isNumber(*vp) && !isPromoteInt(get(vp))) {
                 oracle.markGlobalSlotUndemotable(cx, gslots[n]);
                 demote = true;
             } else {
                 goto checktype_fail_1;
             }
         }
         ++m;
     );
     m = typemap = treeInfo->stackTypeMap();
     FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0,
-        debug_only_v(printf("%s%d ", vpname, vpnum);)
+        debug_only_v(nj_dprintf("%s%d ", vpname, vpnum);)
         if (!checkType(*vp, *m, stage_vals[stage_count], stage_ins[stage_count], stage_count)) {
             if (*m == JSVAL_INT && isNumber(*vp) && !isPromoteInt(get(vp))) {
                 oracle.markStackSlotUndemotable(cx, unsigned(m - typemap));
                 demote = true;
             } else {
                 goto checktype_fail_1;
             }
         }
@@ -2674,17 +2674,17 @@ checktype_fail_1:
     demote = false;
 
     /* At this point the tree is about to be incomplete, so let's see if we can connect to any
      * peer fragment that is type stable.
      */
     Fragment* f;
     TreeInfo* ti;
     for (f = root_peer; f != NULL; f = f->peer) {
-        debug_only_v(printf("Checking type stability against peer=%p (code=%p)\n", (void*)f, f->code());)
+        debug_only_v(nj_dprintf("Checking type stability against peer=%p (code=%p)\n", (void*)f, f->code());)
         if (!f->code())
             continue;
         ti = (TreeInfo*)f->vmprivate;
         /* Don't allow varying stack depths */
         if ((ti->nStackTypes != treeInfo->nStackTypes) ||
             (ti->typeMap.length() != treeInfo->typeMap.length()) ||
             (ti->globalSlots->length() != treeInfo->globalSlots->length()))
             continue;
@@ -2770,29 +2770,29 @@ checktype_fail_2:
 }
 
 static JS_REQUIRES_STACK void
 FlushJITCache(JSContext* cx)
 {
     if (!TRACING_ENABLED(cx))
         return;
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
-    debug_only_v(printf("Flushing cache.\n");)
+    debug_only_v(nj_dprintf("Flushing cache.\n");)
     if (tm->recorder)
         js_AbortRecording(cx, "flush cache");
     TraceRecorder* tr;
     while ((tr = tm->abortStack) != NULL) {
         tr->removeFragmentoReferences();
         tr->deepAbort();
         tr->popAbortStack();
     }
     Fragmento* fragmento = tm->fragmento;
     if (fragmento) {
         if (tm->prohibitFlush) {
-            debug_only_v(printf("Deferring fragmento flush due to deep bail.\n");)
+            debug_only_v(nj_dprintf("Deferring fragmento flush due to deep bail.\n");)
             tm->needFlush = JS_TRUE;
             return;
         }
 
         fragmento->clearFrags();
 #ifdef DEBUG
         JS_ASSERT(fragmento->labels);
         fragmento->labels->clear();
@@ -2820,31 +2820,31 @@ JS_REQUIRES_STACK void
 TraceRecorder::compile(JSTraceMonitor* tm)
 {
     if (tm->needFlush) {
         FlushJITCache(cx);
         return;
     }
     Fragmento* fragmento = tm->fragmento;
     if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
-        debug_only_v(printf("Blacklist: excessive stack use.\n"));
+        debug_only_v(nj_dprintf("Blacklist: excessive stack use.\n"));
         js_Blacklist((jsbytecode*) fragment->root->ip);
         return;
     }
     if (anchor && anchor->exitType != CASE_EXIT)
         ++treeInfo->branchCount;
     if (lirbuf->outOMem()) {
         fragmento->assm()->setError(nanojit::OutOMem);
         return;
     }
     ::compile(fragmento->assm(), fragment);
     if (fragmento->assm()->error() == nanojit::OutOMem)
         return;
     if (fragmento->assm()->error() != nanojit::None) {
-        debug_only_v(printf("Blacklisted: error during compilation\n");)
+        debug_only_v(nj_dprintf("Blacklisted: error during compilation\n");)
         js_Blacklist((jsbytecode*) fragment->root->ip);
         return;
     }
     js_resetRecordingAttempts(cx, (jsbytecode*) fragment->ip);
     js_resetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip);
     if (anchor) {
 #ifdef NANOJIT_IA32
         if (anchor->exitType == CASE_EXIT)
@@ -2902,17 +2902,17 @@ TraceRecorder::closeLoop(JSTraceMonitor*
     JS_ASSERT((*cx->fp->regs->pc == JSOP_LOOP || *cx->fp->regs->pc == JSOP_NOP) && !cx->fp->imacpc);
 
     bool stable;
     Fragment* peer;
     VMFragment* peer_root;
     Fragmento* fragmento = tm->fragmento;
 
     if (callDepth != 0) {
-        debug_only_v(printf("Blacklisted: stack depth mismatch, possible recursion.\n");)
+        debug_only_v(nj_dprintf("Blacklisted: stack depth mismatch, possible recursion.\n");)
         js_Blacklist((jsbytecode*) fragment->root->ip);
         trashSelf = true;
         return;
     }
 
     VMSideExit* exit = snapshot(UNSTABLE_LOOP_EXIT);
     JS_ASSERT(exit->numStackSlots == treeInfo->nStackTypes);
 
@@ -2923,17 +2923,17 @@ TraceRecorder::closeLoop(JSTraceMonitor*
     stable = deduceTypeStability(peer_root, &peer, demote);
 
 #if DEBUG
     if (!stable)
         AUDIT(unstableLoopVariable);
 #endif
 
     if (trashSelf) {
-        debug_only_v(printf("Trashing tree from type instability.\n");)
+        debug_only_v(nj_dprintf("Trashing tree from type instability.\n");)
         return;
     }
 
     if (stable && demote) {
         JS_ASSERT(fragment->kind == LoopTrace);
         return;
     }
 
@@ -2945,57 +2945,57 @@ TraceRecorder::closeLoop(JSTraceMonitor*
          * hope it becomes stable later.
          */
         if (!peer) {
             /*
              * If such a fragment does not exist, let's compile the loop ahead
              * of time anyway.  Later, if the loop becomes type stable, we will
              * connect these two fragments together.
              */
-            debug_only_v(printf("Trace has unstable loop variable with no stable peer, "
+            debug_only_v(nj_dprintf("Trace has unstable loop variable with no stable peer, "
                                 "compiling anyway.\n");)
             UnstableExit* uexit = new UnstableExit;
             uexit->fragment = fragment;
             uexit->exit = exit;
             uexit->next = treeInfo->unstableExits;
             treeInfo->unstableExits = uexit;
         } else {
             JS_ASSERT(peer->code());
             exit->target = peer;
-            debug_only_v(printf("Joining type-unstable trace to target fragment %p.\n", (void*)peer);)
+            debug_only_v(nj_dprintf("Joining type-unstable trace to target fragment %p.\n", (void*)peer);)
             stable = true;
             ((TreeInfo*)peer->vmprivate)->dependentTrees.addUnique(fragment->root);
             treeInfo->linkedTrees.addUnique(peer);
         }
     } else {
         exit->target = fragment->root;
         fragment->lastIns = lir->insGuard(LIR_loop, lir->insImm(1), createGuardRecord(exit));
     }
     compile(tm);
 
     if (fragmento->assm()->error() != nanojit::None)
         return;
 
     joinEdgesToEntry(fragmento, peer_root);
 
-    debug_only_v(printf("updating specializations on dependent and linked trees\n"))
+    debug_only_v(nj_dprintf("updating specializations on dependent and linked trees\n"))
     if (fragment->root->vmprivate)
         specializeTreesToMissingGlobals(cx, (TreeInfo*)fragment->root->vmprivate);
 
     /* 
      * If this is a newly formed tree, and the outer tree has not been compiled yet, we
      * should try to compile the outer tree again.
      */
     if (outer)
         js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc);
     
-    debug_only_v(printf("recording completed at %s:%u@%u via closeLoop\n",
-                        cx->fp->script->filename,
-                        js_FramePCToLineNumber(cx, cx->fp),
-                        FramePCOffset(cx->fp));)
+    debug_only_v(nj_dprintf("recording completed at %s:%u@%u via closeLoop\n",
+                            cx->fp->script->filename,
+                            js_FramePCToLineNumber(cx, cx->fp),
+                            FramePCOffset(cx->fp));)
 }
 
 JS_REQUIRES_STACK void
 TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root)
 {
     if (fragment->kind == LoopTrace) {
         TreeInfo* ti;
         Fragment* peer;
@@ -3009,17 +3009,17 @@ TraceRecorder::joinEdgesToEntry(Fragment
                 continue;
             ti = (TreeInfo*)peer->vmprivate;
             uexit = ti->unstableExits;
             unext = &ti->unstableExits;
             while (uexit != NULL) {
                 bool remove = js_JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit);
                 JS_ASSERT(!remove || fragment != peer);
                 debug_only_v(if (remove) {
-                             printf("Joining type-stable trace to target exit %p->%p.\n",
+                             nj_dprintf("Joining type-stable trace to target exit %p->%p.\n",
                                     (void*)uexit->fragment, (void*)uexit->exit); });
                 if (!remove) {
                     /* See if this exit contains mismatch demotions, which imply trashing a tree.
                        This is actually faster than trashing the original tree as soon as the
                        instability is detected, since we could have compiled a fairly stable
                        tree that ran faster with integers. */
                     unsigned stackCount = 0;
                     unsigned globalCount = 0;
@@ -3073,17 +3073,17 @@ TraceRecorder::joinEdgesToEntry(Fragment
                                       peer_root->globalShape, peer_root->argc);)
 }
 
 /* Emit an always-exit guard and compile the tree (used for break statements. */
 JS_REQUIRES_STACK void
 TraceRecorder::endLoop(JSTraceMonitor* tm)
 {
     if (callDepth != 0) {
-        debug_only_v(printf("Blacklisted: stack depth mismatch, possible recursion.\n");)
+        debug_only_v(nj_dprintf("Blacklisted: stack depth mismatch, possible recursion.\n");)
         js_Blacklist((jsbytecode*) fragment->root->ip);
         trashSelf = true;
         return;
     }
 
     fragment->lastIns =
         lir->insGuard(LIR_x, lir->insImm(1), createGuardRecord(snapshot(LOOP_EXIT)));
     compile(tm);
@@ -3091,31 +3091,31 @@ TraceRecorder::endLoop(JSTraceMonitor* t
     if (tm->fragmento->assm()->error() != nanojit::None)
         return;
 
     VMFragment* root = (VMFragment*)fragment->root;
     joinEdgesToEntry(tm->fragmento, getLoop(tm, root->ip, root->globalObj, root->globalShape, root->argc));
 
     /* Note: this must always be done, in case we added new globals on trace and haven't yet 
        propagated those to linked and dependent trees. */
-    debug_only_v(printf("updating specializations on dependent and linked trees\n"))
+    debug_only_v(nj_dprintf("updating specializations on dependent and linked trees\n"))
     if (fragment->root->vmprivate)
         specializeTreesToMissingGlobals(cx, (TreeInfo*)fragment->root->vmprivate);
 
     /* 
      * If this is a newly formed tree, and the outer tree has not been compiled yet, we
      * should try to compile the outer tree again.
      */
     if (outer)
         js_AttemptCompilation(cx, tm, globalObj, outer, outerArgc);
     
-    debug_only_v(printf("recording completed at %s:%u@%u via endLoop\n",
-                        cx->fp->script->filename,
-                        js_FramePCToLineNumber(cx, cx->fp),
-                        FramePCOffset(cx->fp));)
+    debug_only_v(nj_dprintf("recording completed at %s:%u@%u via endLoop\n",
+                            cx->fp->script->filename,
+                            js_FramePCToLineNumber(cx, cx->fp),
+                            FramePCOffset(cx->fp));)
 }
 
 /* Emit code to adjust the stack to match the inner tree's stack expectations. */
 JS_REQUIRES_STACK void
 TraceRecorder::prepareTreeCall(Fragment* inner)
 {
     TreeInfo* ti = (TreeInfo*)inner->vmprivate;
     inner_sp_ins = lirbuf->sp;
@@ -3126,18 +3126,18 @@ TraceRecorder::prepareTreeCall(Fragment*
     if (callDepth > 0) {
         /* Calculate the amount we have to lift the native stack pointer by to compensate for
            any outer frames that the inner tree doesn't expect but the outer tree has. */
         ptrdiff_t sp_adj = nativeStackOffset(&cx->fp->argv[-2]);
         /* Calculate the amount we have to lift the call stack by */
         ptrdiff_t rp_adj = callDepth * sizeof(FrameInfo*);
         /* Guard that we have enough stack space for the tree we are trying to call on top
            of the new value for sp. */
-        debug_only_v(printf("sp_adj=%d outer=%d inner=%d\n",
-                          sp_adj, treeInfo->nativeStackBase, ti->nativeStackBase));
+        debug_only_v(nj_dprintf("sp_adj=%d outer=%d inner=%d\n",
+                                sp_adj, treeInfo->nativeStackBase, ti->nativeStackBase));
         LIns* sp_top = lir->ins2i(LIR_piadd, lirbuf->sp,
                 - treeInfo->nativeStackBase /* rebase sp to beginning of outer tree's stack */
                 + sp_adj /* adjust for stack in outer frame inner tree can't see */
                 + ti->maxNativeStackSlots * sizeof(double)); /* plus the inner tree's stack */
         guard(true, lir->ins2(LIR_lt, sp_top, eos_ins), OOM_EXIT);
         /* Guard that we have enough call stack space. */
         LIns* rp_top = lir->ins2i(LIR_piadd, lirbuf->rp, rp_adj +
                 ti->maxCallDepth * sizeof(FrameInfo*));
@@ -3212,17 +3212,17 @@ TraceRecorder::emitIf(jsbytecode* pc, bo
         exitType = LOOP_EXIT;
 
         /*
          * If we are about to walk out of the loop, generate code for the inverse loop
          * condition, pretending we recorded the case that stays on trace.
          */
         if ((*pc == JSOP_IFEQ || *pc == JSOP_IFEQX) == cond) {
             JS_ASSERT(*pc == JSOP_IFNE || *pc == JSOP_IFNEX || *pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
-            debug_only_v(printf("Walking out of the loop, terminating it anyway.\n");)
+            debug_only_v(nj_dprintf("Walking out of the loop, terminating it anyway.\n");)
             cond = !cond;
         }
 
         /*
          * Conditional guards do not have to be emitted if the condition is constant. We
          * make a note whether the loop condition is true or false here, so we later know
          * whether to emit a loop edge or a loop end.
          */
@@ -3405,19 +3405,19 @@ CheckGlobalObjectShape(JSContext* cx, JS
     uint32 globalShape = OBJ_SHAPE(globalObj);
 
     if (tm->recorder) {
         VMFragment* root = (VMFragment*)tm->recorder->getFragment()->root;
         TreeInfo* ti = tm->recorder->getTreeInfo();
         /* Check the global shape matches the recorder's treeinfo's shape. */
         if (globalObj != root->globalObj || globalShape != root->globalShape) {
             AUDIT(globalShapeMismatchAtEntry);
-            debug_only_v(printf("Global object/shape mismatch (%p/%u vs. %p/%u), flushing cache.\n",
-                                (void*)globalObj, globalShape, (void*)root->globalObj,
-                                root->globalShape);)
+            debug_only_v(nj_dprintf("Global object/shape mismatch (%p/%u vs. %p/%u), flushing cache.\n",
+                                    (void*)globalObj, globalShape, (void*)root->globalObj,
+                                    root->globalShape);)
             js_Backoff(cx, (jsbytecode*) root->ip);
             FlushJITCache(cx);
             return false;
         }
         if (shape)
             *shape = globalShape;
         if (slots)
             *slots = ti->globalSlots;
@@ -3441,18 +3441,18 @@ CheckGlobalObjectShape(JSContext* cx, JS
             if (slots)
                 *slots = state.globalSlots;
             return true;
         }
     }
 
     /* No currently-tracked-global found and no room to allocate, abort. */
     AUDIT(globalShapeMismatchAtEntry);
-    debug_only_v(printf("No global slotlist for global shape %u, flushing cache.\n",
-                        globalShape));
+    debug_only_v(nj_dprintf("No global slotlist for global shape %u, flushing cache.\n",
+                            globalShape));
     FlushJITCache(cx);
     return false;
 }
 
 static JS_REQUIRES_STACK bool
 js_StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
                  unsigned stackSlots, unsigned ngslots, uint8* typeMap,
                  VMSideExit* expectedInnerExit, jsbytecode* outer, uint32 outerArgc)
@@ -3482,17 +3482,17 @@ js_StartRecorder(JSContext* cx, VMSideEx
 static void
 js_TrashTree(JSContext* cx, Fragment* f)
 {
     JS_ASSERT((!f->code()) == (!f->vmprivate));
     JS_ASSERT(f == f->root);
     if (!f->code())
         return;
     AUDIT(treesTrashed);
-    debug_only_v(printf("Trashing tree info.\n");)
+    debug_only_v(nj_dprintf("Trashing tree info.\n");)
     Fragmento* fragmento = JS_TRACE_MONITOR(cx).fragmento;
     TreeInfo* ti = (TreeInfo*)f->vmprivate;
     f->vmprivate = NULL;
     f->releaseCode(fragmento);
     Fragment** data = ti->dependentTrees.data();
     unsigned length = ti->dependentTrees.length();
     for (unsigned n = 0; n < length; ++n)
         js_TrashTree(cx, data[n]);
@@ -3729,17 +3729,17 @@ js_RecordTree(JSContext* cx, JSTraceMoni
     }
 
     f->root = f;
     f->lirbuf = tm->lirbuf;
 
     if (f->lirbuf->outOMem() || js_OverfullFragmento(tm, tm->fragmento)) {
         js_Backoff(cx, (jsbytecode*) f->root->ip);
         FlushJITCache(cx);
-        debug_only_v(printf("Out of memory recording new tree, flushing cache.\n");)
+        debug_only_v(nj_dprintf("Out of memory recording new tree, flushing cache.\n");)
         return false;
     }
 
     JS_ASSERT(!f->code() && !f->vmprivate);
 
     /* setup the VM-private treeInfo structure for this fragment */
     TreeInfo* ti = new (&gc) TreeInfo(f, globalSlots);
 
@@ -3937,17 +3937,17 @@ js_AttemptToExtendTree(JSContext* cx, VM
 
     /*
      * If we are recycling a fragment, it might have a different ip so reset it here. This
      * can happen when attaching a branch to a NESTED_EXIT, which might extend along separate paths
      * (i.e. after the loop edge, and after a return statement).
      */
     c->ip = cx->fp->regs->pc;
 
-    debug_only_v(printf("trying to attach another branch to the tree (hits = %d)\n", c->hits());)
+    debug_only_v(nj_dprintf("trying to attach another branch to the tree (hits = %d)\n", c->hits());)
 
     int32_t& hits = c->hits();
     if (outer || (hits++ >= HOTEXIT && hits <= HOTEXIT+MAXEXIT)) {
         /* start tracing secondary trace from this point */
         c->lirbuf = f->lirbuf;
         unsigned stackSlots;
         unsigned ngslots;
         uint8* typeMap;
@@ -4009,19 +4009,19 @@ js_RecordLoopEdge(JSContext* cx, TraceRe
 
     /* Does this branch go to an inner loop? */
     Fragment* first = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc,
                               root->globalObj, root->globalShape, cx->fp->argc);
     if (!first) {
         /* Not an inner loop we can call, abort trace. */
         AUDIT(returnToDifferentLoopHeader);
         JS_ASSERT(!cx->fp->imacpc);
-        debug_only_v(printf("loop edge to %d, header %d\n",
-                            cx->fp->regs->pc - cx->fp->script->code,
-                            (jsbytecode*)r->getFragment()->root->ip - cx->fp->script->code));
+        debug_only_v(nj_dprintf("loop edge to %d, header %d\n",
+                                cx->fp->regs->pc - cx->fp->script->code,
+                                (jsbytecode*)r->getFragment()->root->ip - cx->fp->script->code));
         js_AbortRecording(cx, "Loop edge does not return to header");
         return false;
     }
 
     /* Make sure inner tree call will not run into an out-of-memory condition. */
     if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
         !js_ReplenishReservedPool(cx, tm)) {
         js_AbortRecording(cx, "Couldn't call inner tree (out of memory)");
@@ -4031,20 +4031,20 @@ js_RecordLoopEdge(JSContext* cx, TraceRe
     /* Make sure the shape of the global object still matches (this might flush
        the JIT cache). */
     JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
     uint32 globalShape = -1;
     SlotList* globalSlots = NULL;
     if (!CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots))
         return false;
 
-    debug_only_v(printf("Looking for type-compatible peer (%s:%d@%d)\n",
-                        cx->fp->script->filename,
-                        js_FramePCToLineNumber(cx, cx->fp),
-                        FramePCOffset(cx->fp));)
+    debug_only_v(nj_dprintf("Looking for type-compatible peer (%s:%d@%d)\n",
+                            cx->fp->script->filename,
+                            js_FramePCToLineNumber(cx, cx->fp),
+                            FramePCOffset(cx->fp));)
 
     // Find a matching inner tree. If none can be found, compile one.
     Fragment* f = r->findNestedCompatiblePeer(first);
     if (!f || !f->code()) {
         AUDIT(noCompatInnerTrees);
 
         VMFragment* outerFragment = (VMFragment*) tm->recorder->getFragment()->root;
         jsbytecode* outer = (jsbytecode*) outerFragment->ip;
@@ -4094,75 +4094,75 @@ js_RecordLoopEdge(JSContext* cx, TraceRe
         /* abort recording so the inner loop can become type stable. */
         js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
         return js_AttemptToStabilizeTree(cx, lr, outer, outerFragment->argc);
       case BRANCH_EXIT:
         /* abort recording the outer tree, extend the inner tree */
         js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
         return js_AttemptToExtendTree(cx, lr, NULL, outer);
       default:
-        debug_only_v(printf("exit_type=%d\n", lr->exitType);)
+        debug_only_v(nj_dprintf("exit_type=%d\n", lr->exitType);)
             js_AbortRecording(cx, "Inner tree not suitable for calling");
         return false;
     }
 }
 
 static bool
 js_IsEntryTypeCompatible(jsval* vp, uint8* m)
 {
     unsigned tag = JSVAL_TAG(*vp);
 
-    debug_only_v(printf("%c/%c ", tagChar[tag], typeChar[*m]);)
+    debug_only_v(nj_dprintf("%c/%c ", tagChar[tag], typeChar[*m]);)
 
     switch (*m) {
       case JSVAL_OBJECT:
         if (tag == JSVAL_OBJECT && !JSVAL_IS_NULL(*vp) &&
             !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) {
             return true;
         }
-        debug_only_v(printf("object != tag%u ", tag);)
+        debug_only_v(nj_dprintf("object != tag%u ", tag);)
         return false;
       case JSVAL_INT:
         jsint i;
         if (JSVAL_IS_INT(*vp))
             return true;
         if ((tag == JSVAL_DOUBLE) && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(*vp), i))
             return true;
-        debug_only_v(printf("int != tag%u(value=%lu) ", tag, (unsigned long)*vp);)
+        debug_only_v(nj_dprintf("int != tag%u(value=%lu) ", tag, (unsigned long)*vp);)
         return false;
       case JSVAL_DOUBLE:
         if (JSVAL_IS_INT(*vp) || tag == JSVAL_DOUBLE)
             return true;
-        debug_only_v(printf("double != tag%u ", tag);)
+        debug_only_v(nj_dprintf("double != tag%u ", tag);)
         return false;
       case JSVAL_BOXED:
         JS_NOT_REACHED("shouldn't see boxed type in entry");
         return false;
       case JSVAL_STRING:
         if (tag == JSVAL_STRING)
             return true;
-        debug_only_v(printf("string != tag%u ", tag);)
+        debug_only_v(nj_dprintf("string != tag%u ", tag);)
         return false;
       case JSVAL_TNULL:
         if (JSVAL_IS_NULL(*vp))
             return true;
-        debug_only_v(printf("null != tag%u ", tag);)
+        debug_only_v(nj_dprintf("null != tag%u ", tag);)
         return false;
       case JSVAL_BOOLEAN:
         if (tag == JSVAL_BOOLEAN)
             return true;
-        debug_only_v(printf("bool != tag%u ", tag);)
+        debug_only_v(nj_dprintf("bool != tag%u ", tag);)
         return false;
       default:
         JS_ASSERT(*m == JSVAL_TFUN);
         if (tag == JSVAL_OBJECT && !JSVAL_IS_NULL(*vp) &&
             HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) {
             return true;
         }
-        debug_only_v(printf("fun != tag%u ", tag);)
+        debug_only_v(nj_dprintf("fun != tag%u ", tag);)
         return false;
     }
 }
 
 JS_REQUIRES_STACK Fragment*
 TraceRecorder::findNestedCompatiblePeer(Fragment* f)
 {
     JSTraceMonitor* tm;
@@ -4173,17 +4173,17 @@ TraceRecorder::findNestedCompatiblePeer(
 
     TreeInfo* ti;
     for (; f != NULL; f = f->peer) {
         if (!f->code())
             continue;
 
         ti = (TreeInfo*)f->vmprivate;
 
-        debug_only_v(printf("checking nested types %p: ", (void*)f);)
+        debug_only_v(nj_dprintf("checking nested types %p: ", (void*)f);)
 
         if (ngslots > ti->nGlobalTypes())
             specializeTreesToMissingGlobals(cx, ti);
 
         uint8* typemap = ti->typeMap.data();
 
         /*
          * Determine whether the typemap of the inner tree matches the outer tree's
@@ -4191,42 +4191,42 @@ TraceRecorder::findNestedCompatiblePeer(
          * doesn't guarantee an integer for that slot, we mark the slot undemotable
          * and mismatch here. This will force a new tree to be compiled that accepts
          * a double for the slot. If the inner tree expects a double, but the outer
          * tree has an integer, we can proceed, but we mark the location undemotable.
          */
         bool ok = true;
         uint8* m = typemap;
         FORALL_SLOTS_IN_PENDING_FRAMES(cx, 0,
-            debug_only_v(printf("%s%d=", vpname, vpnum);)
+            debug_only_v(nj_dprintf("%s%d=", vpname, vpnum);)
             if (!js_IsEntryTypeCompatible(vp, m)) {
                 ok = false;
             } else if (!isPromoteInt(get(vp)) && *m == JSVAL_INT) {
                 oracle.markStackSlotUndemotable(cx, unsigned(m - typemap));
                 ok = false;
             } else if (JSVAL_IS_INT(*vp) && *m == JSVAL_DOUBLE) {
                 oracle.markStackSlotUndemotable(cx, unsigned(m - typemap));
             }
             m++;
         );
         FORALL_GLOBAL_SLOTS(cx, ngslots, gslots,
-            debug_only_v(printf("%s%d=", vpname, vpnum);)
+            debug_only_v(nj_dprintf("%s%d=", vpname, vpnum);)
             if (!js_IsEntryTypeCompatible(vp, m)) {
                 ok = false;
             } else if (!isPromoteInt(get(vp)) && *m == JSVAL_INT) {
                 oracle.markGlobalSlotUndemotable(cx, gslots[n]);
                 ok = false;
             } else if (JSVAL_IS_INT(*vp) && *m == JSVAL_DOUBLE) {
                 oracle.markGlobalSlotUndemotable(cx, gslots[n]);
             }
             m++;
         );
         JS_ASSERT(unsigned(m - ti->typeMap.data()) == ti->typeMap.length());
 
-        debug_only_v(printf(" %s\n", ok ? "match" : "");)
+        debug_only_v(nj_dprintf(" %s\n", ok ? "match" : "");)
 
         if (ok)
             return f;
     }
 
     return NULL;
 }
 
@@ -4249,29 +4249,29 @@ js_CheckEntryTypes(JSContext* cx, TreeIn
         specializeTreesToMissingGlobals(cx, ti);
 
     uint8* m = ti->typeMap.data();
 
     JS_ASSERT(ti->typeMap.length() == js_NativeStackSlots(cx, 0) + ngslots);
     JS_ASSERT(ti->typeMap.length() == ti->nStackTypes + ngslots);
     JS_ASSERT(ti->nGlobalTypes() == ngslots);
     FORALL_SLOTS(cx, ngslots, gslots, 0,
-        debug_only_v(printf("%s%d=", vpname, vpnum);)
+        debug_only_v(nj_dprintf("%s%d=", vpname, vpnum);)
         JS_ASSERT(*m != 0xCD);
         if (!js_IsEntryTypeCompatible(vp, m))
             goto check_fail;
         m++;
     );
     JS_ASSERT(unsigned(m - ti->typeMap.data()) == ti->typeMap.length());
 
-    debug_only_v(printf("\n");)
+    debug_only_v(nj_dprintf("\n");)
     return true;
 
 check_fail:
-    debug_only_v(printf("\n");)
+    debug_only_v(nj_dprintf("\n");)
     return false;
 }
 
 /**
  * Find an acceptable entry tree given a PC.
  *
  * @param cx            Context.
  * @param f             First peer fragment.
@@ -4280,17 +4280,17 @@ check_fail:
  */
 static JS_REQUIRES_STACK Fragment*
 js_FindVMCompatiblePeer(JSContext* cx, Fragment* f, uintN& count)
 {
     count = 0;
     for (; f != NULL; f = f->peer) {
         if (f->vmprivate == NULL)
             continue;
-        debug_only_v(printf("checking vm types %p (ip: %p): ", (void*)f, f->ip);)
+        debug_only_v(nj_dprintf("checking vm types %p (ip: %p): ", (void*)f, f->ip);)
         if (js_CheckEntryTypes(cx, (TreeInfo*)f->vmprivate))
             return f;
         ++count;
     }
     return NULL;
 }
 
 static void
@@ -4360,22 +4360,22 @@ js_ExecuteTree(JSContext* cx, Fragment* 
 
 #ifdef DEBUG
     memset(stack_buffer, 0xCD, sizeof(stack_buffer));
     memset(global, 0xCD, (globalFrameSize+1)*sizeof(double));
     JS_ASSERT(globalFrameSize <= MAX_GLOBAL_SLOTS);
 #endif
 
     debug_only(*(uint64*)&global[globalFrameSize] = 0xdeadbeefdeadbeefLL;)
-    debug_only_v(printf("entering trace at %s:%u@%u, native stack slots: %u code: %p\n",
-                        cx->fp->script->filename,
-                        js_FramePCToLineNumber(cx, cx->fp),
-                        FramePCOffset(cx->fp),
-                        ti->maxNativeStackSlots,
-                        f->code());)
+    debug_only_v(nj_dprintf("entering trace at %s:%u@%u, native stack slots: %u code: %p\n",
+                            cx->fp->script->filename,
+                            js_FramePCToLineNumber(cx, cx->fp),
+                            FramePCOffset(cx->fp),
+                            ti->maxNativeStackSlots,
+                            f->code());)
 
     JS_ASSERT(ti->nGlobalTypes() == ngslots);
 
     if (ngslots)
         BuildNativeGlobalFrame(cx, ngslots, gslots, ti->globalTypeMap(), global);
     BuildNativeStackFrame(cx, 0/*callDepth*/, ti->typeMap.data(), stack_buffer);
 
     union { NIns *code; GuardRecord* (FASTCALL *func)(InterpState*, Fragment*); } u;
@@ -4520,21 +4520,21 @@ LeaveTree(InterpState& state, VMSideExit
     JS_ARENA_RELEASE(&cx->stackPool, state.stackMark);
     while (callstack < rp) {
         /* Synthesize a stack frame and write out the values in it using the type map pointer
            on the native call stack. */
         js_SynthesizeFrame(cx, **callstack);
         int slots = FlushNativeStackFrame(cx, 1/*callDepth*/, (uint8*)(*callstack+1), stack, cx->fp);
 #ifdef DEBUG
         JSStackFrame* fp = cx->fp;
-        debug_only_v(printf("synthesized deep frame for %s:%u@%u, slots=%d\n",
-                            fp->script->filename,
-                            js_FramePCToLineNumber(cx, fp),
-                            FramePCOffset(fp),
-                            slots);)
+        debug_only_v(nj_dprintf("synthesized deep frame for %s:%u@%u, slots=%d\n",
+                                fp->script->filename,
+                                js_FramePCToLineNumber(cx, fp),
+                                FramePCOffset(fp),
+                                slots);)
 #endif
         /* Keep track of the additional frames we put on the interpreter stack and the native
            stack slots we consumed. */
         ++*state.inlineCallCountp;
         ++callstack;
         stack += slots;
     }
 
@@ -4543,19 +4543,19 @@ LeaveTree(InterpState& state, VMSideExit
     JS_ASSERT(rp == callstack);
     unsigned calldepth = innermost->calldepth;
     unsigned calldepth_slots = 0;
     for (unsigned n = 0; n < calldepth; ++n) {
         calldepth_slots += js_SynthesizeFrame(cx, *callstack[n]);
         ++*state.inlineCallCountp;
 #ifdef DEBUG
         JSStackFrame* fp = cx->fp;
-        debug_only_v(printf("synthesized shallow frame for %s:%u@%u\n",
-                            fp->script->filename, js_FramePCToLineNumber(cx, fp),
-                            FramePCOffset(fp));)
+        debug_only_v(nj_dprintf("synthesized shallow frame for %s:%u@%u\n",
+                                fp->script->filename, js_FramePCToLineNumber(cx, fp),
+                                FramePCOffset(fp));)
 #endif
     }
 
     /* Adjust sp and pc relative to the tree we exited from (not the tree we entered into).
        These are our final values for sp and pc since js_SynthesizeFrame has already taken
        care of all frames in between. But first we recover fp->blockChain, which comes from
        the side exit struct. */
     JSStackFrame* fp = cx->fp;
@@ -4572,27 +4572,27 @@ LeaveTree(InterpState& state, VMSideExit
                  js_ReconstructStackDepth(cx, fp->script, fp->regs->pc) == fp->regs->sp);
 
 #ifdef EXECUTE_TREE_TIMER
     uint64 cycles = rdtsc() - state.startTime;
 #elif defined(JS_JIT_SPEW)
     uint64 cycles = 0;
 #endif
 
-    debug_only_v(printf("leaving trace at %s:%u@%u, op=%s, lr=%p, exitType=%d, sp=%d, "
-                        "calldepth=%d, cycles=%llu\n",
-                        fp->script->filename,
-                        js_FramePCToLineNumber(cx, fp),
-                        FramePCOffset(fp),
-                        js_CodeName[fp->imacpc ? *fp->imacpc : *fp->regs->pc],
-                        (void*)lr,
-                        lr->exitType,
-                        fp->regs->sp - StackBase(fp),
-                        calldepth,
-                        cycles));
+    debug_only_v(nj_dprintf("leaving trace at %s:%u@%u, op=%s, lr=%p, exitType=%d, sp=%d, "
+                            "calldepth=%d, cycles=%llu\n",
+                            fp->script->filename,
+                            js_FramePCToLineNumber(cx, fp),
+                            FramePCOffset(fp),
+                            js_CodeName[fp->imacpc ? *fp->imacpc : *fp->regs->pc],
+                            (void*)lr,
+                            lr->exitType,
+                            fp->regs->sp - StackBase(fp),
+                            calldepth,
+                            cycles));
 
     /* If this trace is part of a tree, later branches might have added additional globals for
        which we don't have any type information available in the side exit. We merge in this
        information from the entry type-map. See also comment in the constructor of TraceRecorder
        why this is always safe to do. */
     TreeInfo* outermostTree = state.outermostTree;
     uint16* gslots = outermostTree->globalSlots->data();
     unsigned ngslots = outermostTree->globalSlots->length();
@@ -4719,28 +4719,28 @@ js_MonitorLoopEdge(JSContext* cx, uintN&
         if (++f->hits() < HOTLOOP)
             return false;
         /* We can give RecordTree the root peer. If that peer is already taken, it will
            walk the peer list and find us a free slot or allocate a new tree if needed. */
         return js_RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape, 
                              globalSlots, argc);
     }
 
-    debug_only_v(printf("Looking for compat peer %d@%d, from %p (ip: %p)\n",
-                        js_FramePCToLineNumber(cx, cx->fp),
-                        FramePCOffset(cx->fp), (void*)f, f->ip);)
+    debug_only_v(nj_dprintf("Looking for compat peer %d@%d, from %p (ip: %p)\n",
+                            js_FramePCToLineNumber(cx, cx->fp),
+                            FramePCOffset(cx->fp), (void*)f, f->ip);)
 
     uintN count;
     Fragment* match = js_FindVMCompatiblePeer(cx, f, count);
     if (!match) {
         if (count < MAXPEERS)
             goto record;
         /* If we hit the max peers ceiling, don't try to lookup fragments all the time. Thats
            expensive. This must be a rather type-unstable loop. */
-        debug_only_v(printf("Blacklisted: too many peer trees.\n");)
+        debug_only_v(nj_dprintf("Blacklisted: too many peer trees.\n");)
         js_Blacklist((jsbytecode*) f->root->ip);
         return false;
     }
 
     VMSideExit* lr = NULL;
     VMSideExit* innermostNestedGuard = NULL;
 
     lr = js_ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard);
@@ -4869,24 +4869,24 @@ js_AbortRecording(JSContext* cx, const c
     if (!f || f->lastIns) {
         js_DeleteRecorder(cx);
         return;
     }
 
     JS_ASSERT(!f->vmprivate);
 #ifdef DEBUG
     TreeInfo* ti = tm->recorder->getTreeInfo();
-    debug_only_a(printf("Abort recording of tree %s:%d@%d at %s:%d@%d: %s.\n",
-                        ti->treeFileName,
-                        ti->treeLineNumber,
-                        ti->treePCOffset,
-                        cx->fp->script->filename,
-                        js_FramePCToLineNumber(cx, cx->fp),
-                        FramePCOffset(cx->fp),
-                        reason);)
+    debug_only_a(nj_dprintf("Abort recording of tree %s:%d@%d at %s:%d@%d: %s.\n",
+                            ti->treeFileName,
+                            ti->treeLineNumber,
+                            ti->treePCOffset,
+                            cx->fp->script->filename,
+                            js_FramePCToLineNumber(cx, cx->fp),
+                            FramePCOffset(cx->fp),
+                            reason);)
 #endif
 
     js_Backoff(cx, (jsbytecode*) f->root->ip, f->root);
 
     /*
      * If js_DeleteRecorder flushed the code cache, we can't rely on f any more.
      */
     if (!js_DeleteRecorder(cx))
@@ -5210,26 +5210,26 @@ js_InitJIT(JSTraceMonitor *tm)
 #endif
 }
 
 void
 js_FinishJIT(JSTraceMonitor *tm)
 {
 #ifdef JS_JIT_SPEW
     if (js_verboseStats && jitstats.recorderStarted) {
-        printf("recorder: started(%llu), aborted(%llu), completed(%llu), different header(%llu), "
-               "trees trashed(%llu), slot promoted(%llu), unstable loop variable(%llu), "
-               "breaks(%llu), returns(%llu), unstableInnerCalls(%llu)\n",
-               jitstats.recorderStarted, jitstats.recorderAborted, jitstats.traceCompleted,
-               jitstats.returnToDifferentLoopHeader, jitstats.treesTrashed, jitstats.slotPromoted,
-               jitstats.unstableLoopVariable, jitstats.breakLoopExits, jitstats.returnLoopExits,
-               jitstats.noCompatInnerTrees);
-        printf("monitor: triggered(%llu), exits(%llu), type mismatch(%llu), "
-               "global mismatch(%llu)\n", jitstats.traceTriggered, jitstats.sideExitIntoInterpreter,
-               jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry);
+        nj_dprintf("recorder: started(%llu), aborted(%llu), completed(%llu), different header(%llu), "
+                   "trees trashed(%llu), slot promoted(%llu), unstable loop variable(%llu), "
+                   "breaks(%llu), returns(%llu), unstableInnerCalls(%llu)\n",
+                   jitstats.recorderStarted, jitstats.recorderAborted, jitstats.traceCompleted,
+                   jitstats.returnToDifferentLoopHeader, jitstats.treesTrashed, jitstats.slotPromoted,
+                   jitstats.unstableLoopVariable, jitstats.breakLoopExits, jitstats.returnLoopExits,
+                   jitstats.noCompatInnerTrees);
+        nj_dprintf("monitor: triggered(%llu), exits(%llu), type mismatch(%llu), "
+                   "global mismatch(%llu)\n", jitstats.traceTriggered, jitstats.sideExitIntoInterpreter,
+                   jitstats.typeMapMismatchAtEntry, jitstats.globalShapeMismatchAtEntry);
     }
 #endif
     if (tm->fragmento != NULL) {
         JS_ASSERT(tm->reservedDoublePool);
         verbose_only(delete tm->fragmento->labels;)
 #ifdef DEBUG
         delete tm->lirbuf->names;
         tm->lirbuf->names = NULL;
@@ -5307,28 +5307,28 @@ js_PurgeScriptRecordingAttempts(JSDHashT
     return JS_DHASH_NEXT;
 }
 
 JS_REQUIRES_STACK void
 js_PurgeScriptFragments(JSContext* cx, JSScript* script)
 {
     if (!TRACING_ENABLED(cx))
         return;
-    debug_only_v(printf("Purging fragments for JSScript %p.\n", (void*)script);)
+    debug_only_v(nj_dprintf("Purging fragments for JSScript %p.\n", (void*)script);)
     JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
     for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
         for (VMFragment **f = &(tm->vmfragments[i]); *f; ) {
             VMFragment* frag = *f;
             /* Disable future use of any script-associated VMFragment.*/
             if (JS_UPTRDIFF(frag->ip, script->code) < script->length) {
                 JS_ASSERT(frag->root == frag);
-                debug_only_v(printf("Disconnecting VMFragment %p "
-                                    "with ip %p, in range [%p,%p).\n",
-                                    (void*)frag, frag->ip, script->code,
-                                    script->code + script->length));
+                debug_only_v(nj_dprintf("Disconnecting VMFragment %p "
+                                        "with ip %p, in range [%p,%p).\n",
+                                        (void*)frag, frag->ip, script->code,
+                                        script->code + script->length));
                 VMFragment* next = frag->next;
                 js_TrashTree(cx, frag);
                 *f = next;
             } else {
                 f = &((*f)->next);
             }
         }
     }
@@ -5402,17 +5402,17 @@ js_DeepBail(JSContext *cx)
     JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
     JSContext *tracecx = tm->tracecx;
 
     /* It's a bug if a non-FAIL_STATUS builtin gets here. */
     JS_ASSERT(tracecx->bailExit);
 
     tm->tracecx = NULL;
     tm->prohibitFlush++;
-    debug_only_v(printf("Deep bail.\n");)
+    debug_only_v(nj_dprintf("Deep bail.\n");)
     LeaveTree(*tracecx->interpState, tracecx->bailExit);
     tracecx->bailExit = NULL;
     tracecx->interpState->builtinStatus |= JSBUILTIN_BAILED;
 }
 
 JS_REQUIRES_STACK jsval&
 TraceRecorder::argval(unsigned n) const
 {
@@ -6843,22 +6843,22 @@ TraceRecorder::record_EnterFrame()
 
     if (++callDepth >= MAX_CALLDEPTH)
         ABORT_TRACE("exceeded maximum call depth");
     // FIXME: Allow and attempt to inline a single level of recursion until we compile
     //        recursive calls as independent trees (459301).
     if (fp->script == fp->down->script && fp->down->down && fp->down->down->script == fp->script)
         ABORT_TRACE("recursive call");
 
-    debug_only_v(printf("EnterFrame %s, callDepth=%d\n",
-                        js_AtomToPrintableString(cx, cx->fp->fun->atom),
-                        callDepth);)
+    debug_only_v(nj_dprintf("EnterFrame %s, callDepth=%d\n",
+                            js_AtomToPrintableString(cx, cx->fp->fun->atom),
+                            callDepth);)
     debug_only_v(
         js_Disassemble(cx, cx->fp->script, JS_TRUE, stdout);
-        printf("----\n");)
+        nj_dprintf("----\n");)
     LIns* void_ins = INS_CONST(JSVAL_TO_PSEUDO_BOOLEAN(JSVAL_VOID));
 
     jsval* vp = &fp->argv[fp->argc];
     jsval* vpstop = vp + ptrdiff_t(fp->fun->nargs) - ptrdiff_t(fp->argc);
     while (vp < vpstop) {
         if (vp >= fp->down->regs->sp)
             nativeFrameTracker.set(vp, (LIns*)0);
         set(vp++, void_ins, true);
@@ -6871,19 +6871,19 @@ TraceRecorder::record_EnterFrame()
     return JSRS_CONTINUE;
 }
 
 JS_REQUIRES_STACK JSRecordingStatus
 TraceRecorder::record_LeaveFrame()
 {
     debug_only_v(
         if (cx->fp->fun)
-            printf("LeaveFrame (back to %s), callDepth=%d\n",
-                   js_AtomToPrintableString(cx, cx->fp->fun->atom),
-                   callDepth);
+            nj_dprintf("LeaveFrame (back to %s), callDepth=%d\n",
+                       js_AtomToPrintableString(cx, cx->fp->fun->atom),
+                       callDepth);
         );
     if (callDepth-- <= 0)
         ABORT_TRACE("returned out of a loop we started tracing");
 
     // LeaveFrame gets called after the interpreter popped the frame and
     // stored rval, so cx->fp not cx->fp->down, and -1 not 0.
     atoms = FrameAtomBase(cx, cx->fp);
     set(&stackval(-1), rval_ins, true);
@@ -6938,17 +6938,17 @@ TraceRecorder::record_JSOP_RETURN()
     jsval& rval = stackval(-1);
     JSStackFrame *fp = cx->fp;
     if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && JSVAL_IS_PRIMITIVE(rval)) {
         JS_ASSERT(OBJECT_TO_JSVAL(fp->thisp) == fp->argv[-1]);
         rval_ins = get(&fp->argv[-1]);
     } else {
         rval_ins = get(&rval);
     }
-    debug_only_v(printf("returning from %s\n", js_AtomToPrintableString(cx, cx->fp->fun->atom));)
+    debug_only_v(nj_dprintf("returning from %s\n", js_AtomToPrintableString(cx, cx->fp->fun->atom));)
     clearFrameSlotsFromCache();
 
     return JSRS_CONTINUE;
 }
 
 JS_REQUIRES_STACK JSRecordingStatus
 TraceRecorder::record_JSOP_GOTO()
 {
@@ -10699,34 +10699,34 @@ js_DumpPeerStability(JSTraceMonitor* tm,
     Fragment* f;
     TreeInfo* ti;
     bool looped = false;
     unsigned length = 0;
 
     for (f = getLoop(tm, ip, globalObj, globalShape, argc); f != NULL; f = f->peer) {
         if (!f->vmprivate)
             continue;
-        printf("fragment %p:\nENTRY: ", (void*)f);
+        nj_dprintf("fragment %p:\nENTRY: ", (void*)f);
         ti = (TreeInfo*)f->vmprivate;
         if (looped)
             JS_ASSERT(ti->nStackTypes == length);
         for (unsigned i = 0; i < ti->nStackTypes; i++)
-            printf("S%d ", ti->stackTypeMap()[i]);
+            nj_dprintf("S%d ", ti->stackTypeMap()[i]);
         for (unsigned i = 0; i < ti->nGlobalTypes(); i++)
-            printf("G%d ", ti->globalTypeMap()[i]);
-        printf("\n");
+            nj_dprintf("G%d ", ti->globalTypeMap()[i]);
+        nj_dprintf("\n");
         UnstableExit* uexit = ti->unstableExits;
         while (uexit != NULL) {
-            printf("EXIT:  ");
+            nj_dprintf("EXIT:  ");
             uint8* m = getFullTypeMap(uexit->exit);
             for (unsigned i = 0; i < uexit->exit->numStackSlots; i++)
-                printf("S%d ", m[i]);
+                nj_dprintf("S%d ", m[i]);
             for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++)
-                printf("G%d ", m[uexit->exit->numStackSlots + i]);
-            printf("\n");
+                nj_dprintf("G%d ", m[uexit->exit->numStackSlots + i]);
+            nj_dprintf("\n");
             uexit = uexit->next;
         }
         length = ti->nStackTypes;
         looped = true;
     }
 }
 #endif
 
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -316,17 +316,17 @@ namespace nanojit
 		else if (samepage(_nIns,_stats.codeStart))
 			main = bytesBetween(_stats.codeStart, _nIns);
 		else
 		{
 			pages--;
 			main = ((intptr_t)_stats.codeStart & (NJ_PAGE_SIZE-1)) ? bytesFromTop(_stats.codeStart)+1 : 0;
 			main += bytesToBottom(_nIns)+1;
 		}
-		//fprintf(stderr,"size %d, exit is %d, main is %d, page count %d, sizeof %d\n", (int)((pages) * NJ_PAGE_SIZE + main + exit),(int)exit, (int)main, (int)_stats.pages, (int)sizeof(Page));
+		//nj_dprintf("size %d, exit is %d, main is %d, page count %d, sizeof %d\n", (int)((pages) * NJ_PAGE_SIZE + main + exit),(int)exit, (int)main, (int)_stats.pages, (int)sizeof(Page));
 		return (pages) * NJ_PAGE_SIZE + main + exit;		
 	}
 
 	#undef bytesFromTop
 	#undef bytesToBottom
 	#undef byteBetween
 	
 	Page* Assembler::handoverPages(bool exitPages)
@@ -745,17 +745,17 @@ namespace nanojit
 
 		// make sure we got memory at least one page
 		if (error()) return;
 			
 #ifdef PERFM
 		_stats.pages = 0;
 		_stats.codeStart = _nIns-1;
 		_stats.codeExitStart = _nExitIns-1;		
-		//fprintf(stderr,"pageReset %d start %x exit start %x\n", _stats.pages, (int)_stats.codeStart, (int)_stats.codeExitStart);
+		//nj_dprintf("pageReset %d start %x exit start %x\n", _stats.pages, (int)_stats.codeStart, (int)_stats.codeExitStart);
 #endif /* PERFM */
 
         _epilogue = genEpilogue();
 		_branchStateMap = branchStateMap;
         _labels.clear();
         _patches.clear();
 
 		verbose_only( outputAddr=true; )
@@ -783,17 +783,17 @@ namespace nanojit
 
 		verbose_only(_thisfrag->compileNbr++; )
 		verbose_only(_frago->_stats.compiles++; )
 		verbose_only(_frago->_stats.totalCompiles++; )
 		_inExit = false;	
         gen(rdr, loopJumps);
 		frag->loopEntry = _nIns;
 		//frag->outbound = core->config.tree_opt? _latestGuard : 0;
-		//fprintf(stderr, "assemble frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
+		//nj_dprintf("assemble frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
 
         if (!error()) {
 		    // patch all branches
 		    while(!_patches.isEmpty())
 		    {
 			    NIns* where = _patches.lastKey();
 			    LInsp targ = _patches.removeLast();
                 LabelState *label = _labels.get(targ);
@@ -857,17 +857,17 @@ namespace nanojit
             frag->fragEntry = fragEntry;
 			NIns* code = _nIns;
 #ifdef PERFM
 			_nvprof("code", codeBytes());  // requires that all pages are released between begin/endAssembly()otherwise we double count
 #endif
 			// let the fragment manage the pages if we're using trees and there are branches
 			Page* manage = (_frago->core()->config.tree_opt) ? handoverPages() : 0;			
 			frag->setCode(code, manage); // root of tree should manage all pages
-			//fprintf(stderr, "endAssembly frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
+			//nj_dprintf("endAssembly frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
 		}
 		else
 		{
             // In case of failure, reset _nIns ready for the next assembly run.
             resetInstructionPointer();
 		}
 		
 		NanoAssertMsgf(error() || _fpuStkDepth == 0,"_fpuStkDepth %d",_fpuStkDepth);
@@ -1842,17 +1842,17 @@ namespace nanojit
 			if (_outputCache)
 			{
 				char* str = (char*)_gc->Alloc(strlen(s)+1);
 				strcpy(str, s);
 				_outputCache->add(str);
 			}
 			else
 			{
-				_frago->core()->console << s << "\n";
+                nj_dprintf("%s\n", s);
 			}
 		}
 
 		void Assembler::output_asm(const char* s)
 		{
 			if (!verbose_enabled())
 				return;
 				output(s);
--- a/js/src/nanojit/Fragmento.cpp
+++ b/js/src/nanojit/Fragmento.cpp
@@ -110,17 +110,17 @@ namespace nanojit
 	{
         AllocEntry *entry;
 
 		clearFrags();
         _frags.clear();		
 		_freePages.clear();
 		while( _allocList.size() > 0 )
 		{
-			//fprintf(stderr,"dealloc %x\n", (intptr_t)_allocList.get(_allocList.size()-1));
+			//nj_dprintf("dealloc %x\n", (intptr_t)_allocList.get(_allocList.size()-1));
 #ifdef MEMORY_INFO
 			ChangeSizeExplicit("NanoJitMem", -1, _gcHeap->Size(_allocList.last()));
 #endif
             entry = _allocList.removeLast();
 			_gcHeap->Free( entry->page, entry->allocSize );
             NJ_DELETE(entry);
 		}
         NJ_DELETE(_assm);
@@ -188,28 +188,28 @@ namespace nanojit
 			// convert _max_pages to gc page count 
 			int32_t gcpages = (count*NJ_PAGE_SIZE) / _gcHeap->kNativePageSize;
 			MMGC_MEM_TYPE("NanoJitMem"); 
 			memory = (Page*)_gcHeap->Alloc(gcpages);
 #ifdef MEMORY_INFO
 			ChangeSizeExplicit("NanoJitMem", 1, _gcHeap->Size(memory));
 #endif
 			NanoAssert((int*)memory == pageTop(memory));
-			//fprintf(stderr,"head alloc of %d at %x of %d pages using nj page size of %d\n", gcpages, (intptr_t)memory, (intptr_t)_gcHeap->kNativePageSize, NJ_PAGE_SIZE);
+			//nj_dprintf("head alloc of %d at %x of %d pages using nj page size of %d\n", gcpages, (intptr_t)memory, (intptr_t)_gcHeap->kNativePageSize, NJ_PAGE_SIZE);
 
             entry = NJ_NEW(gc, AllocEntry);
             entry->page = memory;
             entry->allocSize = gcpages;
             _allocList.add(entry);
 
 			_stats.pages += count;
 			Page* page = memory;
 			while(--count >= 0)
 			{
-				//fprintf(stderr,"Fragmento::pageGrow adding page %x ; %d\n", (unsigned)page, _freePages.size()+1);
+				//nj_dprintf("Fragmento::pageGrow adding page %x ; %d\n", (unsigned)page, _freePages.size()+1);
 				_freePages.add(page++);
 			}
 			trackPages();
 		}
 	}
 	
 	// Clear the fragment. This *does not* remove the fragment from the
 	// map--the caller must take care of this.
@@ -234,17 +234,17 @@ namespace nanojit
         while (!_frags.isEmpty()) {
             clearFragment(_frags.removeLast());
 		}
 
 		verbose_only( enterCounts->clear();)
 		verbose_only( mergeCounts->clear();)
 		verbose_only( _stats.flushes++ );
 		verbose_only( _stats.compiles = 0 );
-		//fprintf(stderr, "Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
+		//nj_dprintf("Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
 	}
 
 	Assembler* Fragmento::assm()
 	{
 		return _assm;
 	}
 
 	AvmCore* Fragmento::core()
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -39,16 +39,29 @@
 #include "nanojit.h"
 #include <stdio.h>
 #include <ctype.h>
 
 #ifdef PERFM
 #include "../vprof/vprof.h"
 #endif /* PERFM */
 
+
+#if defined(NJ_VERBOSE)
+void nj_dprintf( const char* format, ... )
+{
+	va_list vargs;
+	va_start(vargs, format);
+	vfprintf(stdout, format, vargs);
+	va_end(vargs);
+}
+#endif /* NJ_VERBOSE */
+
+
+
 namespace nanojit
 {
     using namespace avmplus;
 	#ifdef FEATURE_NANOJIT
 
 	const uint8_t operandCount[] = {
 #define OPDEF(op, number, operands) \
         operands,
@@ -1419,41 +1432,41 @@ namespace nanojit
 				}
 				else if (i->isCall()) {
 					for (int j=0, c=i->argc(); j < c; j++)
 						live.add(i->arg(j),i);
 				}
 			}
 		}
  
-		printf("live instruction count %d, total %u, max pressure %d\n",
+		nj_dprintf("live instruction count %d, total %u, max pressure %d\n",
 			live.retired.size(), total, live.maxlive);
-        printf("side exits %u\n", exits);
+        nj_dprintf("side exits %u\n", exits);
 
 		// print live exprs, going forwards
 		LirNameMap *names = lirbuf->names;
         bool newblock = true;
 		for (int j=live.retired.size()-1; j >= 0; j--) 
         {
             RetiredEntry *e = live.retired[j];
             char livebuf[4000], *s=livebuf;
             *s = 0;
             if (!newblock && e->i->isop(LIR_label)) {
-                printf("\n");
+                nj_dprintf("\n");
             }
             newblock = false;
             for (int k=0,n=e->live.size(); k < n; k++) {
 				strcpy(s, names->formatRef(e->live[k]));
 				s += strlen(s);
 				*s++ = ' '; *s = 0;
 				NanoAssert(s < livebuf+sizeof(livebuf));
             }
-			printf("%-60s %s\n", livebuf, names->formatIns(e->i));
+			nj_dprintf("%-60s %s\n", livebuf, names->formatIns(e->i));
             if (e->i->isGuard() || e->i->isBranch() || e->i->isRet()) {
-				printf("\n");
+				nj_dprintf("\n");
                 newblock = true;
             }
 		}
 	}
 
     LabelMap::Entry::~Entry()
     {
     }
@@ -1874,17 +1887,17 @@ namespace nanojit
 		NInsList loopJumps(gc);
 #ifdef MEMORY_INFO
 //		loopJumps.set_meminfo_name("LIR loopjumps");
 #endif
 		assm->beginAssembly(triggerFrag, &regMap);
 		if (assm->error())
 			return;
 
-		//fprintf(stderr, "recompile trigger %X kind %d\n", (int)triggerFrag, triggerFrag->kind);
+		//nj_dprintf("recompile trigger %X kind %d\n", (int)triggerFrag, triggerFrag->kind);
 		Fragment* root = triggerFrag;
 		if (treeCompile)
 		{
 			// recompile the entire tree
 			root = triggerFrag->root;
 			root->fragEntry = 0;
 			root->loopEntry = 0;
 			root->releaseCode(frago);
@@ -1917,16 +1930,21 @@ namespace nanojit
 		assm->assemble(root, loopJumps);
 		verbose_only(if (assm->_verbose) 
 			assm->outputf("compiling trunk %s",
 				frago->labels->format(root));)
 		NanoAssert(!frago->core()->config.tree_opt || root == root->anchor || root->kind == MergeTrace);			
 		assm->endAssembly(root, loopJumps);
 			
 		// reverse output so that assembly is displayed low-to-high
+		// Up to this point, assm->_outputCache has been non-NULL, and so
+		// has been accumulating output.  Now we set it to NULL, traverse
+		// the entire list of stored strings, and hand them a second time
+		// to assm->output.  Since _outputCache is now NULL, outputf just
+		// hands these strings directly onwards to nj_dprintf.
 		verbose_only( assm->_outputCache = 0; )
 		verbose_only(for(int i=asmOutput.size()-1; i>=0; --i) { assm->outputf("%s",asmOutput.get(i)); } );
 
 		if (assm->error()) {
 			root->fragEntry = 0;
 			root->loopEntry = 0;
 		}
     }
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -533,20 +533,20 @@ namespace nanojit
             return i;
         }
 
 		void flush()
 		{
             int n = code.size();
             if (n) {
 			    for (int i=0; i < n; i++)
-				    printf("    %s\n",names->formatIns(code[i]));
+				    nj_dprintf("    %s\n",names->formatIns(code[i]));
 			    code.clear();
                 if (n > 1)
-        			printf("\n");
+        			nj_dprintf("\n");
             }
 		}
 
 		LIns* insGuard(LOpcode op, LInsp cond, LIns *x) {
 			return add_flush(out->insGuard(op,cond,x));
 		}
 
 		LIns* insBranch(LOpcode v, LInsp condition, LInsp to) {
--- a/js/src/nanojit/NativeARM.cpp
+++ b/js/src/nanojit/NativeARM.cpp
@@ -840,17 +840,17 @@ Assembler::nativePageReset()
     _nExitSlot = 0;
 }
 
 void
 Assembler::nativePageSetup()
 {
     if (!_nIns)      _nIns     = pageAlloc();
     if (!_nExitIns)  _nExitIns = pageAlloc(true);
-    //fprintf(stderr, "assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns);
+    //nj_dprintf("assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns);
 
     if (!_nSlot)
     {
         // This needs to be done or the samepage macro gets confused; pageAlloc
         // gives us a pointer to just past the end of the page.
         _nIns--;
         _nExitIns--;
 
@@ -944,17 +944,17 @@ Assembler::JMP_far(NIns* addr)
     }
 }
 
 void
 Assembler::BL(NIns* addr)
 {
     intptr_t offs = PC_OFFSET_FROM(addr,_nIns-1);
 
-    //fprintf (stderr, "BL: 0x%x (offs: %d [%x]) @ 0x%08x\n", addr, offs, offs, (intptr_t)(_nIns-1));
+    //nj_dprintf ("BL: 0x%x (offs: %d [%x]) @ 0x%08x\n", addr, offs, offs, (intptr_t)(_nIns-1));
 
     // try to do this with a single S24 call
     if (isS24(offs>>2)) {
         underrunProtect(4);
 
         // recompute offset in case underrunProtect had to allocate a new page.
         offs = PC_OFFSET_FROM(addr,_nIns-1);
         *(--_nIns) = (NIns)( COND_AL | (0xB<<24) | ((offs>>2) & 0xFFFFFF) );
@@ -1095,17 +1095,17 @@ Assembler::asm_ld_imm(Register d, int32_
 // Otherwise, emit the conditional load into pc from a nearby constant,
 // and emit a jump to jump over it it in case the condition fails.
 //
 // NB: JMP_nochk depends on this not calling samepage() when _c == AL
 void
 Assembler::B_cond_chk(ConditionCode _c, NIns* _t, bool _chk)
 {
     int32_t offs = PC_OFFSET_FROM(_t,_nIns-1);
-    //fprintf(stderr, "B_cond_chk target: 0x%08x offset: %d @0x%08x\n", _t, offs, _nIns-1);
+    //nj_dprintf("B_cond_chk target: 0x%08x offset: %d @0x%08x\n", _t, offs, _nIns-1);
 
     // optimistically check if this will fit in 24 bits
     if (isS24(offs>>2)) {
         if (_chk) underrunProtect(4);
         // recalculate the offset, because underrunProtect may have
         // moved _nIns to a new page
         offs = PC_OFFSET_FROM(_t,_nIns-1);
     }
--- a/js/src/nanojit/NativeARM.h
+++ b/js/src/nanojit/NativeARM.h
@@ -196,17 +196,17 @@ verbose_only( extern const char* shiftNa
     void asm_ldr_chk(Register d, Register b, int32_t off, bool chk);    \
     void asm_ld_imm(Register d, int32_t imm);                           \
     void asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd);        \
     int* _nSlot;                                                        \
     int* _startingSlot;                                                \
     int* _nExitSlot;
 
 
-//printf("jmp_l_n count=%d, nins=%X, %X = %X\n", (_c), nins, _nIns, ((intptr_t)(nins+(_c))-(intptr_t)_nIns - 4) );
+//nj_dprintf("jmp_l_n count=%d, nins=%X, %X = %X\n", (_c), nins, _nIns, ((intptr_t)(nins+(_c))-(intptr_t)_nIns - 4) );
 
 #define swapptrs()  {                                                   \
         NIns* _tins = _nIns; _nIns=_nExitIns; _nExitIns=_tins;          \
         int* _nslot = _nSlot;                                           \
         _nSlot = _nExitSlot;                                            \
         _nExitSlot = _nslot;                                            \
     }
 
--- a/js/src/nanojit/NativeThumb.cpp
+++ b/js/src/nanojit/NativeThumb.cpp
@@ -260,17 +260,17 @@ namespace nanojit
 
 		// This is ALWAYS going to be a long branch (using the BL instruction)
 		// Which is really 2 instructions, so we need to modify both
 		// XXX -- this is B, not BL, at least on non-Thumb..
 
 		// branch+2 because PC is always 2 instructions ahead on ARM/Thumb
 		int32_t offset = int(target) - int(branch+2);
 
-		//printf("---patching branch at 0x%08x to location 0x%08x (%d-0x%08x)\n", branch, target, offset, offset);
+		//nj_dprintf("---patching branch at 0x%08x to location 0x%08x (%d-0x%08x)\n", branch, target, offset, offset);
 
 		NanoAssert(-(1<<21) <= offset && offset < (1<<21)); 
 		*branch++ = (NIns)(0xF000 | (offset>>12)&0x7FF);
 		*branch =   (NIns)(0xF800 | (offset>>1)&0x7FF);
 	}
 
 	RegisterMask Assembler::hint(LIns* i, RegisterMask allow /* = ~0 */)
 	{
@@ -905,17 +905,17 @@ namespace nanojit
 			_nExitPool = 0;
 			_nExitSlot = 0;
 	}
 
 	void Assembler::nativePageSetup()
 	{
 		if (!_nIns)		 _nIns	   = pageAlloc();
 		if (!_nExitIns)  _nExitIns = pageAlloc(true);
-		//fprintf(stderr, "assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns);
+		//nj_dprintf("assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns);
 	
 		if (!_nPool) {
 			_nSlot = _nPool = (int*)_nIns;
 
 			// Make original pool at end of page. Currently
 			// we are pointing off the end of the original page,
 			// so back up 1+NJ_CPOOL_SIZE
 			_nPool = (int*)((int)_nIns - (sizeof(int32_t)*NJ_CPOOL_SIZE));
--- a/js/src/nanojit/TraceTreeDrawer.cpp
+++ b/js/src/nanojit/TraceTreeDrawer.cpp
@@ -96,17 +96,17 @@ namespace nanojit {
     	
     	addBackEdges(root);
     	
         Fragment *lastDrawnBranch = root;
     	for (Fragment *treeBranch = root->branches; treeBranch != 0; treeBranch = treeBranch->nextbranch) {
 			if (!isMergeFragment(treeBranch)) {
 				struct SideExit* exit = treeBranch->spawnedFrom->exit();
 				if (isValidSideExit(exit) && isCompiled(treeBranch)) {
-					verbose_draw_only(printf("Adding edge between %s and %s\n", _labels->format(lastDrawnBranch), _labels->format(treeBranch)));
+					verbose_draw_only(nj_dprintf("Adding edge between %s and %s\n", _labels->format(lastDrawnBranch), _labels->format(treeBranch)));
 					
 					this->addEdge(lastDrawnBranch, treeBranch);
 					lastDrawnBranch = treeBranch;
 				}
 				
                 recursiveDraw(treeBranch);
 			}
 			else {
@@ -114,77 +114,77 @@ namespace nanojit {
 			} // end ifelse
     	}	// end for loop
     }
     
     void TraceTreeDrawer::addBackEdges(Fragment *root) {
     	// At the end of a tree, find out where it goes
     	if (isCrossFragment(root)) {
 			if (root->eot_target) {
-    			verbose_draw_only(printf("Found a cross fragment %s TO %s \n", _labels->format(root), _labels->format(root->eot_target)));
+    			verbose_draw_only(nj_dprintf("Found a cross fragment %s TO %s \n", _labels->format(root), _labels->format(root->eot_target)));
     			this->addEdge(root, root->eot_target);
 			}
     	}
     	else if (isBackEdgeSideExit(root)) {
-			verbose_draw_only(printf("Adding anchor branch edge from %s TO %s\n", _labels->format(root), _labels->format(root->anchor)));
+			verbose_draw_only(nj_dprintf("Adding anchor branch edge from %s TO %s\n", _labels->format(root), _labels->format(root->anchor)));
 			this->addEdge(root, root->anchor);
     	}
     	else if (isSingleTrace(root)) {
-    		verbose_draw_only(printf("Found a single trace %s\n", _labels->format(root)));
+    		verbose_draw_only(nj_dprintf("Found a single trace %s\n", _labels->format(root)));
     		this->addEdge(root, root);
     	}
     	else if (isSpawnedTrace(root)) {
     		struct SideExit *exit = root->spawnedFrom->exit();
 			if (isValidSideExit(exit) && isCompiled(root)) {
-				verbose_draw_only(printf("Found a spawned side exit from %s that is a spawn and compiled %s\n", _labels->format(root), _labels->format(exit->from)));
+				verbose_draw_only(nj_dprintf("Found a spawned side exit from %s that is a spawn and compiled %s\n", _labels->format(root), _labels->format(exit->from)));
 				this->addEdge(root, root->parent);
 			}
     	}
     	else if (hasEndOfTraceFrag(root)) {
-    		verbose_draw_only(printf("%s has an EOT to %s\n", _labels->format(root), _labels->format(root->eot_target)));
+    		verbose_draw_only(nj_dprintf("%s has an EOT to %s\n", _labels->format(root), _labels->format(root->eot_target)));
     		addEdge(root, root->eot_target);
     	}
     }
     
 	void TraceTreeDrawer::addMergeNode(Fragment *mergeRoot) {
-        verbose_draw_only(printf("Found a merge fragment %s and anchor %s\n", _labels->format(mergeRoot), _labels->format(mergeRoot->anchor)));
+        verbose_draw_only(nj_dprintf("Found a merge fragment %s and anchor %s\n", _labels->format(mergeRoot), _labels->format(mergeRoot->anchor)));
         
 		if (hasCompiledBranch(mergeRoot)) {
-			verbose_draw_only(printf("Found a branch to %s\n", _labels->format(mergeRoot->branches)));
+			verbose_draw_only(nj_dprintf("Found a branch to %s\n", _labels->format(mergeRoot->branches)));
 			addEdge(mergeRoot, mergeRoot->branches);
 			recursiveDraw(mergeRoot->branches);
 		}
 		
 		if (hasEndOfTraceFrag(mergeRoot)) {
-            verbose_draw_only(printf("Merge with an EOT to %s\n", _labels->format(mergeRoot->eot_target)));
+            verbose_draw_only(nj_dprintf("Merge with an EOT to %s\n", _labels->format(mergeRoot->eot_target)));
 			addEdge(mergeRoot, mergeRoot->eot_target);
 		}
 		else {
-            verbose_draw_only(printf("Merge to anchor %s\n", _labels->format(mergeRoot->anchor)));
+            verbose_draw_only(nj_dprintf("Merge to anchor %s\n", _labels->format(mergeRoot->anchor)));
 			addEdge(mergeRoot, mergeRoot->anchor);
 		}
 	}
 					    
     void TraceTreeDrawer::draw(Fragment *root) {
 		this->recursiveDraw(root);
 		
-		verbose_draw_only(printf("\nFinished drawing, printing status\n"));
+		verbose_draw_only(nj_dprintf("\nFinished drawing, printing status\n"));
 		verbose_draw_only(this->printTreeStatus(root));
     }
     
     void TraceTreeDrawer::createGraphHeader() {
     	char outputFileName[128];
     	const char *graphMLExtension = ".graphml";
     	
     	int fileNameLength = strlen(this->_fileName);
     	memset(outputFileName, 0, sizeof(outputFileName));
     	strncat(outputFileName, this->_fileName, 128);
     	strncat(outputFileName + fileNameLength - 1, graphMLExtension, 128);	// -1 to overwrite the \0
     	
-    	verbose_draw_only(printf("output file name is %s\n", outputFileName));
+    	verbose_draw_only(nj_dprintf("output file name is %s\n", outputFileName));
     	this->_fstream = fopen(outputFileName, "w");
     	
 		fprintf(_fstream, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"
     			"<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns/graphml\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:y=\"http://www.yworks.com/xml/graphml\" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns/graphml http://www.yworks.com/xml/schema/graphml/1.0/ygraphml.xsd\">\n"
     			"<key for=\"node\" id=\"nodeGraphicsID\" yfiles.type=\"nodegraphics\"/>\n"
     			"<key attr.name=\"description\" attr.type=\"string\" for=\"node\" id=\"nodeDescID\"/>\n"
     			"<key for=\"edge\" id=\"edgeGraphicsID\" yfiles.type=\"edgegraphics\"/>\n"
     			"<key attr.name=\"description\" attr.type=\"string\" for=\"edge\" id=\"edgeDescID\"/>\n"
@@ -252,34 +252,34 @@ namespace nanojit {
 				"</data>\n");
     }
 	
 	void TraceTreeDrawer::printTreeStatus(Fragment *root) {
 		if (!isCompiled(root)) {
 			return;
 		}
 		
-    	printf("\nRoot is %s\n", _labels->format(root));
+    	nj_dprintf("\nRoot is %s\n", _labels->format(root));
     	if (root->spawnedFrom) {
 			if (root->compileNbr) {
-					printf("Found a root that is a spawn and compiled %s\n", _labels->format(root->parent));
+					nj_dprintf("Found a root that is a spawn and compiled %s\n", _labels->format(root->parent));
 			}
     	}
     	
     	for (Fragment *x = root->branches; x != 0; x = x->nextbranch) {
     			if (x->kind != MergeTrace) {
     				struct SideExit* exit = x->spawnedFrom->exit();
     				if (exit && x->compileNbr) {
-    					printf("Found one with an SID and compiled %s\n", _labels->format(x));
+    					nj_dprintf("Found one with an SID and compiled %s\n", _labels->format(x));
     				}
     				
     				printTreeStatus(x);
     			}
     	}
-    	printf("\n");
+    	nj_dprintf("\n");
     }
 #endif
 }
 	
 
 void drawTraceTrees(nanojit::Fragmento *frago, nanojit::FragmentMap * _frags, avmplus::AvmCore *core, char *fileName) {
 #ifdef AVMPLUS_VERBOSE
 	nanojit::TraceTreeDrawer *traceDrawer = new (core->gc) nanojit::TraceTreeDrawer(frago, core, fileName);
--- a/js/src/nanojit/nanojit.h
+++ b/js/src/nanojit/nanojit.h
@@ -237,16 +237,36 @@ namespace nanojit
 #define alignTo(x,s)		((((uintptr_t)(x)))&~(((uintptr_t)s)-1))
 #define alignUp(x,s)		((((uintptr_t)(x))+(((uintptr_t)s)-1))&~(((uintptr_t)s)-1))
 
 #define pageTop(x)			( (int*)alignTo(x,NJ_PAGE_SIZE) )
 #define pageDataStart(x)    ( (int*)(alignTo(x,NJ_PAGE_SIZE) + sizeof(PageHeader)) )
 #define pageBottom(x)		( (int*)(alignTo(x,NJ_PAGE_SIZE)+NJ_PAGE_SIZE)-1 )
 #define samepage(x,y)		(pageTop(x) == pageTop(y))
 
+
+/* Debug printing stuff.  All Nanojit debug printing should be routed
+   through this function.  Don't use ad-hoc calls to printf,
+   fprintf(stderr, ...) etc. */
+
+#if defined(NJ_VERBOSE)
+
+# if defined(__GNUC__)
+# define PRINTF_CHECK(x, y) __attribute__((format(__printf__, x, y)))
+# else
+# define PRINTF_CHECK(x, y)
+# endif
+
+/* is in LIR.cpp */
+void nj_dprintf( const char* format, ... ) PRINTF_CHECK(1,2);
+
+#endif /* NJ_VERBOSE */
+
+
+
 #include "Native.h"
 #include "LIR.h"
 #include "RegAlloc.h"
 #include "Fragmento.h"
 #include "Assembler.h"
 #include "TraceTreeDrawer.h"
 
 #endif // FEATURE_NANOJIT