Bug 531687 - Duplicate node names in TMFLAGS=aftersf printout. r=edwsmith.
authorNicholas Nethercote <nnethercote@mozilla.com>
Thu, 18 Mar 2010 10:42:30 +1100
changeset 40298 42a26f80a36ca8379a4be46b8cb39b20f9fc2d3b
parent 40297 7e0ebb967dfefb3dbefdec275848c6fa0e921f2c
child 40299 f929faee2964b4b52c3f0e27e41f6f1dbd448df1
push id12610
push userrsayre@mozilla.com
push dateMon, 05 Apr 2010 17:26:41 +0000
treeherdermozilla-central@1942c0b4e101 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersedwsmith
bugs531687
milestone1.9.3a3pre
Bug 531687 - Duplicate node names in TMFLAGS=aftersf printout. r=edwsmith.
js/src/lirasm/lirasm.cpp
js/src/nanojit/Assembler.cpp
js/src/nanojit/Assembler.h
js/src/nanojit/LIR.cpp
js/src/nanojit/LIR.h
js/src/nanojit/VMPI.h
--- a/js/src/lirasm/lirasm.cpp
+++ b/js/src/lirasm/lirasm.cpp
@@ -92,44 +92,42 @@ struct LasmSideExit : public SideExit {
 int
 nanojit::StackFilter::getTop(LIns*)
 {
     return 0;
 }
 
 #if defined NJ_VERBOSE
 void
-nanojit::LirNameMap::formatGuard(LIns *i, char *out)
+nanojit::LInsPrinter::formatGuard(InsBuf *buf, LIns *ins)
 {
-    LasmSideExit *x;
-
-    x = (LasmSideExit *)i->record()->exit;
-    sprintf(out,
+    RefBuf b1, b2;
+    LasmSideExit *x = (LasmSideExit *)ins->record()->exit;
+    snprintf(buf->buf, buf->len,
             "%s: %s %s -> line=%ld (GuardID=%03d)",
-            formatRef(i),
-            lirNames[i->opcode()],
-            i->oprnd1() ? formatRef(i->oprnd1()) : "",
+            formatRef(&b1, ins),
+            lirNames[ins->opcode()],
+            ins->oprnd1() ? formatRef(&b2, ins->oprnd1()) : "",
             (long)x->line,
-            i->record()->profGuardID);
+            ins->record()->profGuardID);
 }
 
 void
-nanojit::LirNameMap::formatGuardXov(LIns *i, char *out)
+nanojit::LInsPrinter::formatGuardXov(InsBuf *buf, LIns *ins)
 {
-    LasmSideExit *x;
-
-    x = (LasmSideExit *)i->record()->exit;
-    sprintf(out,
+    RefBuf b1, b2, b3;
+    LasmSideExit *x = (LasmSideExit *)ins->record()->exit;
+    snprintf(buf->buf, buf->len,
             "%s = %s %s, %s -> line=%ld (GuardID=%03d)",
-            formatRef(i),
-            lirNames[i->opcode()],
-            formatRef(i->oprnd1()),
-            formatRef(i->oprnd2()),
+            formatRef(&b1, ins),
+            lirNames[ins->opcode()],
+            formatRef(&b2, ins->oprnd1()),
+            formatRef(&b3, ins->oprnd2()),
             (long)x->line,
-            i->record()->profGuardID);
+            ins->record()->profGuardID);
 }
 #endif
 
 typedef int32_t (FASTCALL *RetInt)();
 typedef double (FASTCALL *RetFloat)();
 typedef GuardRecord* (FASTCALL *RetGuard)();
 
 struct Function {
@@ -264,17 +262,16 @@ public:
     Lirasm(bool verbose);
     ~Lirasm();
 
     void assemble(istream &in, bool optimize);
     void assembleRandom(int nIns, bool optimize);
     bool lookupFunction(const string &name, CallInfo *&ci);
 
     LirBuffer *mLirbuf;
-    verbose_only( LabelMap *mLabelMap; )
     LogControl mLogc;
     avmplus::AvmCore mCore;
     Allocator mAlloc;
     CodeAlloc mCodeAlloc;
     bool mVerbose;
     Fragments mFragments;
     Assembler mAssm;
     map<string, LOpcode> mOpMap;
@@ -519,17 +516,17 @@ FragmentAssembler::FragmentAssembler(Lir
 #ifdef DEBUG
     if (optimize) {     // don't re-validate if no optimization has taken place
         mLir = mValidateWriter2 = new ValidateWriter(mLir, "end of writer pipeline");
     }
 #endif
 #ifdef DEBUG
     if (mParent.mVerbose) {
         mLir = mVerboseWriter = new VerboseWriter(mParent.mAlloc, mLir,
-                                                  mParent.mLirbuf->names,
+                                                  mParent.mLirbuf->printer,
                                                   &mParent.mLogc);
     }
 #endif
     if (optimize) {
         mLir = mCseFilter = new CseFilter(mLir, mParent.mAlloc);
     }
 #if NJ_SOFTFLOAT_SUPPORTED
     if (avmplus::AvmCore::config.soft_float) {
@@ -800,17 +797,17 @@ FragmentAssembler::endFragment()
         cerr << "warning: multiple return types in fragment '"
              << mFragName << "'" << endl;
     }
 
     mFragment->lastIns =
         mLir->insGuard(LIR_x, NULL, createGuardRecord(createSideExit()));
 
     mParent.mAssm.compile(mFragment, mParent.mAlloc, optimize
-              verbose_only(, mParent.mLabelMap));
+              verbose_only(, mParent.mLirbuf->printer));
 
     if (mParent.mAssm.error() != nanojit::None) {
         cerr << "error during assembly: ";
         switch (mParent.mAssm.error()) {
           case nanojit::ConditionalBranchTooFar: cerr << "ConditionalBranchTooFar"; break;
           case nanojit::StackFull: cerr << "StackFull"; break;
           case nanojit::UnknownBranch:  cerr << "UnknownBranch"; break;
           case nanojit::None: cerr << "None"; break;
@@ -1963,18 +1960,17 @@ Lirasm::Lirasm(bool verbose) :
 {
     mVerbose = verbose;
     mLogc.lcbits = 0;
 
     mLirbuf = new (mAlloc) LirBuffer(mAlloc);
 #ifdef DEBUG
     if (mVerbose) {
         mLogc.lcbits = LC_ReadLIR | LC_Assembly | LC_RegAlloc | LC_Activation;
-        mLabelMap = new (mAlloc) LabelMap(mAlloc, &mLogc);
-        mLirbuf->names = new (mAlloc) LirNameMap(mAlloc, mLabelMap);
+        mLirbuf->printer = new (mAlloc) LInsPrinter(mAlloc);
     }
 #endif
 
     // Populate the mOpMap table.
 #define OP___(op, number, repKind, retType, isCse) \
     mOpMap[#op] = LIR_##op;
 #include "nanojit/LIRopcode.tbl"
 #undef OP___
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -642,19 +642,20 @@ namespace nanojit
         asm_spilli(ins, pop);
         return r;
     }
 
     void Assembler::asm_spilli(LInsp ins, bool pop)
     {
         int d = ins->isInAr() ? arDisp(ins) : 0;
         Register r = ins->getReg();
-        verbose_only( if (d && (_logc->lcbits & LC_Assembly)) {
+        verbose_only( RefBuf b;
+                      if (d && (_logc->lcbits & LC_Assembly)) {
                          setOutputForEOL("  <= spill %s",
-                         _thisfrag->lirbuf->names->formatRef(ins)); } )
+                         _thisfrag->lirbuf->printer->formatRef(&b, ins)); } )
         asm_spill(r, d, pop, ins->isN64());
     }
 
     // XXX: This function is error-prone and should be phased out; see bug 513615.
     void Assembler::deprecated_freeRsrcOf(LIns *ins, bool pop)
     {
         if (ins->isInReg()) {
             asm_spilli(ins, pop);
@@ -705,19 +706,20 @@ namespace nanojit
         // Not free, need to steal.
         counter_increment(steals);
 
         Register r = vic->getReg();
 
         NanoAssert(!_allocator.isFree(r));
         NanoAssert(vic == _allocator.getActive(r));
 
-        verbose_only( if (_logc->lcbits & LC_Assembly) {
+        verbose_only( RefBuf b;
+                      if (_logc->lcbits & LC_Assembly) {
                         setOutputForEOL("  <= restore %s",
-                        _thisfrag->lirbuf->names->formatRef(vic)); } )
+                        _thisfrag->lirbuf->printer->formatRef(&b, vic)); } )
         asm_restore(vic, r);
 
         _allocator.retire(r);
         vic->clearReg();
 
         // At this point 'vic' is unused (if rematerializable), or in a spill
         // slot (if not).
     }
@@ -820,17 +822,17 @@ namespace nanojit
         debug_only( _fpuStkDepth = _sv_fpuStkDepth; _sv_fpuStkDepth = 9999; )
 #endif
 
         verbose_only(_stats.exitnative += (_stats.native-nativeSave));
 
         return jmpTarget;
     }
 
-    void Assembler::compile(Fragment* frag, Allocator& alloc, bool optimize verbose_only(, LabelMap* labels))
+    void Assembler::compile(Fragment* frag, Allocator& alloc, bool optimize verbose_only(, LInsPrinter* printer))
     {
         verbose_only(
         bool anyVerb = (_logc->lcbits & 0xFFFF & ~LC_FragProfile) > 0;
         bool asmVerb = (_logc->lcbits & 0xFFFF & LC_Assembly) > 0;
         bool liveVerb = (_logc->lcbits & 0xFFFF & LC_Liveness) > 0;
         )
 
         /* BEGIN decorative preamble */
@@ -867,19 +869,19 @@ namespace nanojit
 
         //_logc->printf("recompile trigger %X kind %d\n", (int)frag, frag->kind);
 
         verbose_only( if (anyVerb) {
             _logc->printf("=== Translating LIR fragments into assembly:\n");
         })
 
         // now the the main trunk
+        verbose_only( RefBuf b; )
         verbose_only( if (anyVerb) {
-            _logc->printf("=== -- Compile trunk %s: begin\n",
-                          labels->format(frag));
+            _logc->printf("=== -- Compile trunk %s: begin\n", printer->formatAddr(&b, frag));
         })
 
         // Used for debug printing, if needed
         debug_only(ValidateReader *validate = NULL;)
         verbose_only(
         ReverseLister *pp_init = NULL;
         ReverseLister *pp_after_sf = NULL;
         )
@@ -893,50 +895,49 @@ namespace nanojit
 #ifdef DEBUG
         // VALIDATION
         validate = new (alloc) ValidateReader(lir);
         lir = validate;
 #endif
 
         // INITIAL PRINTING
         verbose_only( if (_logc->lcbits & LC_ReadLIR) {
-        pp_init = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->names, _logc,
+        pp_init = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->printer, _logc,
                                     "Initial LIR");
         lir = pp_init;
         })
 
         // STACKFILTER
         if (optimize) {
             StackFilter* stackfilter = new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp);
             lir = stackfilter;
         }
 
         verbose_only( if (_logc->lcbits & LC_AfterSF) {
-        pp_after_sf = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->names, _logc,
+        pp_after_sf = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->printer, _logc,
                                                 "After StackFilter");
         lir = pp_after_sf;
         })
 
         assemble(frag, lir);
 
         // If we were accumulating debug info in the various ReverseListers,
         // call finish() to emit whatever contents they have accumulated.
         verbose_only(
         if (pp_init)        pp_init->finish();
         if (pp_after_sf)    pp_after_sf->finish();
         )
 
         verbose_only( if (anyVerb) {
-            _logc->printf("=== -- Compile trunk %s: end\n",
-                         labels->format(frag));
+            _logc->printf("=== -- Compile trunk %s: end\n", printer->formatAddr(&b, frag));
         })
 
         verbose_only(
             if (asmVerb)
-                outputf("## compiling trunk %s", labels->format(frag));
+                outputf("## compiling trunk %s", printer->formatAddr(&b, frag));
         )
         endAssembly(frag);
 
         // Reverse output so that assembly is displayed low-to-high.
         // Up to this point, _outputCache has been non-NULL, and so has been
         // accumulating output.  Now we set it to NULL, traverse the entire
         // list of stored strings, and hand them a second time to output.
         // Since _outputCache is now NULL, outputf just hands these strings
@@ -1599,17 +1600,18 @@ namespace nanojit
                     bool has_back_edges = false;
 
                     // Merge the regstates of labels we have already seen.
                     for (uint32_t i = count; i-- > 0;) {
                         LIns* to = ins->getTarget(i);
                         LabelState *lstate = _labels.get(to);
                         if (lstate) {
                             unionRegisterState(lstate->regs);
-                            asm_output("   %u: [&%s]", i, _thisfrag->lirbuf->names->formatRef(to));
+                            verbose_only( RefBuf b; )
+                            asm_output("   %u: [&%s]", i, _thisfrag->lirbuf->printer->formatRef(&b, to));
                         } else {
                             has_back_edges = true;
                         }
                     }
                     asm_output("forward edges");
 
                     // In a multi-way jump, the register allocator has no ability to deal
                     // with two existing edges that have conflicting register assignments, unlike
@@ -1622,17 +1624,18 @@ namespace nanojit
                     if (has_back_edges) {
                         handleLoopCarriedExprs(pending_lives);
                         // save merged (empty) register state at target labels we haven't seen yet
                         for (uint32_t i = count; i-- > 0;) {
                             LIns* to = ins->getTarget(i);
                             LabelState *lstate = _labels.get(to);
                             if (!lstate) {
                                 _labels.add(to, 0, _allocator);
-                                asm_output("   %u: [&%s]", i, _thisfrag->lirbuf->names->formatRef(to));
+                                verbose_only( RefBuf b; )
+                                asm_output("   %u: [&%s]", i, _thisfrag->lirbuf->printer->formatRef(&b, to));
                             }
                         }
                         asm_output("backward edges");
                     }
 
                     // Emit the jump instruction, which allocates 1 register for the jump index.
                     NIns** native_table = new (_dataAlloc) NIns*[count];
                     asm_output("[%p]:", (void*)native_table);
@@ -1657,18 +1660,20 @@ namespace nanojit
                     }
                     else {
                         // we're at the top of a loop
                         NanoAssert(label->addr == 0);
                         //evictAllActiveRegs();
                         intersectRegisterState(label->regs);
                         label->addr = _nIns;
                     }
-                    verbose_only( if (_logc->lcbits & LC_Assembly) {
-                        asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins));
+                    verbose_only(
+                        RefBuf b;
+                        if (_logc->lcbits & LC_Assembly) {
+                            asm_output("[%s]", _thisfrag->lirbuf->printer->formatRef(&b, ins));
                     })
                     break;
                 }
                 case LIR_xbarrier: {
                     break;
                 }
 #ifdef NANOJIT_IA32
                 case LIR_xtbl: {
@@ -1793,40 +1798,41 @@ namespace nanojit
             // code, because Assembler::outputf() prints everything in reverse.
             //
             // Note that some live LIR instructions won't be printed.  Eg. an
             // immediate won't be printed unless it is explicitly loaded into
             // a register (as opposed to being incorporated into an immediate
             // field in another machine instruction).
             //
             if (_logc->lcbits & LC_Assembly) {
-                LirNameMap* names = _thisfrag->lirbuf->names;
-                outputf("    %s", names->formatIns(ins));
+                InsBuf b;
+                LInsPrinter* printer = _thisfrag->lirbuf->printer;
+                outputf("    %s", printer->formatIns(&b, ins));
                 if (ins->isGuard() && ins->oprnd1() && ins->oprnd1()->isCmp()) {
                     // Special case: code is generated for guard conditions at
                     // the same time that code is generated for the guard
                     // itself.  If the condition is only used by the guard, we
                     // must print it now otherwise it won't get printed.  So
                     // we do print it now, with an explanatory comment.  If
                     // the condition *is* used again we'll end up printing it
                     // twice, but that's ok.
                     outputf("    %s       # codegen'd with the %s",
-                            names->formatIns(ins->oprnd1()), lirNames[op]);
+                            printer->formatIns(&b, ins->oprnd1()), lirNames[op]);
 
                 } else if (ins->isCmov()) {
                     // Likewise for cmov conditions.
                     outputf("    %s       # codegen'd with the %s",
-                            names->formatIns(ins->oprnd1()), lirNames[op]);
+                            printer->formatIns(&b, ins->oprnd1()), lirNames[op]);
 
                 }
 #if defined NANOJIT_IA32 || defined NANOJIT_X64
                 else if (ins->isop(LIR_mod)) {
                     // There's a similar case when a div feeds into a mod.
                     outputf("    %s       # codegen'd with the mod",
-                            names->formatIns(ins->oprnd1()));
+                            printer->formatIns(&b, ins->oprnd1()));
                 }
 #endif
             }
 #endif
 
             if (error())
                 return;
 
@@ -1935,17 +1941,18 @@ namespace nanojit
         VMPI_sprintf(s, "RR");
         s += VMPI_strlen(s);
 
         for (Register r = FirstReg; r <= LastReg; r = nextreg(r)) {
             LIns *ins = _allocator.getActive(r);
             if (ins) {
                 NanoAssertMsg(!_allocator.isFree(r),
                               "Coding error; register is both free and active! " );
-                const char* n = _thisfrag->lirbuf->names->formatRef(ins);
+                RefBuf b;
+                const char* n = _thisfrag->lirbuf->printer->formatRef(&b, ins);
 
                 if (ins->isop(LIR_param) && ins->paramKind()==1 &&
                     r == Assembler::savedRegs[ins->paramArg()])
                 {
                     // dont print callee-saved regs that arent used
                     continue;
                 }
 
@@ -1964,17 +1971,18 @@ namespace nanojit
         VMPI_sprintf(s, "AR");
         s += VMPI_strlen(s);
 
         LIns* ins = 0;
         uint32_t nStackSlots = 0;
         int32_t arIndex = 0;
         for (AR::Iter iter(_activation); iter.next(ins, nStackSlots, arIndex); )
         {
-            const char* n = _thisfrag->lirbuf->names->formatRef(ins);
+            RefBuf b;
+            const char* n = _thisfrag->lirbuf->printer->formatRef(&b, ins);
             if (nStackSlots > 1) {
                 VMPI_sprintf(s," %d-%d(%s)", 4*arIndex, 4*(arIndex+nStackSlots-1), n);
             }
             else {
                 VMPI_sprintf(s," %d(%s)", 4*arIndex, n);
             }
             s += VMPI_strlen(s);
         }
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -292,17 +292,17 @@ namespace nanojit
         public:
             #ifdef VTUNE
             avmplus::CodegenLIR *cgen;
             #endif
 
             Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc, const Config& config);
 
             void        compile(Fragment *frag, Allocator& alloc, bool optimize
-                                verbose_only(, LabelMap*));
+                                verbose_only(, LInsPrinter*));
 
             void        endAssembly(Fragment* frag);
             void        assemble(Fragment* frag, LirFilter* reader);
             void        beginAssembly(Fragment *frag);
 
             void        releaseRegisters();
             void        patch(GuardRecord *lr);
             void        patch(SideExit *exit);
--- a/js/src/nanojit/LIR.cpp
+++ b/js/src/nanojit/LIR.cpp
@@ -92,34 +92,35 @@ namespace nanojit
             _logc->printf("  %02d: %s\n", j++, p->head);
         _logc->printf("=== END %s ===\n", _title);
         _logc->printf("\n");
     }
 
     LInsp ReverseLister::read()
     {
         LInsp i = in->read();
-        const char* str = _names->formatIns(i);
+        InsBuf b;
+        const char* str = _printer->formatIns(&b, i);
         char* cpy = new (_alloc) char[strlen(str)+1];
         VMPI_strcpy(cpy, str);
         _strs.insert(cpy);
         return i;
     }
 #endif
 
 #ifdef NJ_PROFILE
     // @todo fixup move to nanojit.h
     #undef counter_value
     #define counter_value(x)        x
 #endif /* NJ_PROFILE */
 
     // LCompressedBuffer
     LirBuffer::LirBuffer(Allocator& alloc) :
 #ifdef NJ_VERBOSE
-          names(NULL),
+          printer(NULL),
 #endif
           abi(ABI_FASTCALL), state(NULL), param1(NULL), sp(NULL), rp(NULL),
           _allocator(alloc)
     {
         clear();
     }
 
     void LirBuffer::clear()
@@ -1647,284 +1648,342 @@ namespace nanojit
         logc->printf("  Live instruction count %d, total %u, max pressure %d\n",
                      live.retiredCount, total, live.maxlive);
         if (exits > 0)
             logc->printf("  Side exits %u\n", exits);
         logc->printf("  Showing LIR instructions with live-after variables\n");
         logc->printf("\n");
 
         // print live exprs, going forwards
-        LirNameMap *names = frag->lirbuf->names;
+        LInsPrinter *printer = frag->lirbuf->printer;
         bool newblock = true;
         for (Seq<RetiredEntry*>* p = live.retired.get(); p != NULL; p = p->tail) {
             RetiredEntry* e = p->head;
+            InsBuf ib;
+            RefBuf rb;
             char livebuf[4000], *s=livebuf;
             *s = 0;
             if (!newblock && e->i->isop(LIR_label)) {
                 logc->printf("\n");
             }
             newblock = false;
             for (Seq<LIns*>* p = e->live; p != NULL; p = p->tail) {
-                VMPI_strcpy(s, names->formatRef(p->head));
+                VMPI_strcpy(s, printer->formatRef(&rb, p->head));
                 s += VMPI_strlen(s);
                 *s++ = ' '; *s = 0;
                 NanoAssert(s < livebuf+sizeof(livebuf));
             }
             /* If the LIR insn is pretty short, print it and its
                live-after set on the same line.  If not, put
                live-after set on a new line, suitably indented. */
-            const char* insn_text = names->formatIns(e->i);
+            const char* insn_text = printer->formatIns(&ib, e->i);
             if (VMPI_strlen(insn_text) >= 30-2) {
-                logc->printf("  %-30s\n  %-30s %s\n", names->formatIns(e->i), "", livebuf);
+                logc->printf("  %-30s\n  %-30s %s\n", insn_text, "", livebuf);
             } else {
-                logc->printf("  %-30s %s\n", names->formatIns(e->i), livebuf);
+                logc->printf("  %-30s %s\n", insn_text, livebuf);
             }
 
             if (e->i->isGuard() || e->i->isBranch() || e->i->isRet()) {
                 logc->printf("\n");
                 newblock = true;
             }
         }
     }
 
-    void LirNameMap::addName(LInsp i, const char* name) {
-        if (!names.containsKey(i)) {
-            char *copy = new (alloc) char[VMPI_strlen(name)+1];
-            VMPI_strcpy(copy, name);
+
+    void LirNameMap::addNameWithSuffix(LInsp ins, const char *name, int suffix,
+                                       bool ignoreOneSuffix) {
+        // The lookup may succeed, ie. we may already have a name for this
+        // instruction.  This can happen because of CSE.  Eg. if we have this:
+        //
+        //   ins = addName("foo", insImm(0))
+        //
+        // that assigns the name "foo1" to 'ins'.  If we later do this:
+        //
+        //   ins2 = addName("foo", insImm(0))
+        //
+        // then CSE will cause 'ins' and 'ins2' to be equal.  So 'ins2'
+        // already has a name ("foo1") and there's no need to generate a new
+        // name "foo2".
+        //
+        if (!names.containsKey(ins)) {
+            const int N = 100;
+            char name2[N];
+            if (suffix == 1 && ignoreOneSuffix) {
+                VMPI_snprintf(name2, N, "%s", name);                // don't add '1' suffix
+            } else if (VMPI_isdigit(name[VMPI_strlen(name)-1])) {
+                VMPI_snprintf(name2, N, "%s_%d", name, suffix);     // use '_' to avoid confusion
+            } else {
+                VMPI_snprintf(name2, N, "%s%d", name, suffix);      // normal case
+            }
+
+            char *copy = new (alloc) char[VMPI_strlen(name2)+1];
+            VMPI_strcpy(copy, name2);
             Entry *e = new (alloc) Entry(copy);
-            names.put(i, e);
+            names.put(ins, e);
         }
     }
 
-    char* LirNameMap::formatAccSet(LInsp ins, bool isLoad, char* buf) {
+    void LirNameMap::addName(LInsp ins, const char* name) {
+        addNameWithSuffix(ins, name, namecounts.add(name), /*ignoreOneSuffix*/true);
+    }
+
+    const char* LirNameMap::createName(LInsp ins) {
+        if (ins->isCall()) {
+#if NJ_SOFTFLOAT_SUPPORTED
+            if (ins->isop(LIR_callh)) {
+                ins = ins->oprnd1();    // we've presumably seen the other half already
+            } else
+#endif
+            {
+                addNameWithSuffix(ins, ins->callInfo()->_name, funccounts.add(ins->callInfo()),
+                                  /*ignoreOneSuffix*/false);
+            }
+        } else {
+            addNameWithSuffix(ins, lirNames[ins->opcode()], lircounts.add(ins->opcode()),
+                              /*ignoreOneSuffix*/false);
+                
+        }
+        return names.get(ins)->name;
+    }
+
+    const char* LirNameMap::lookupName(LInsp ins)
+    {
+        Entry* e = names.get(ins);
+        return e ? e->name : NULL;
+    }
+
+
+    char* LInsPrinter::formatAccSet(RefBuf* buf, LInsp ins, bool isLoad) {
         AccSet accSet = ins->accSet();
         int i = 0;
         if ((isLoad && accSet == ACC_LOAD_ANY) ||
             (!isLoad && accSet == ACC_STORE_ANY))
         {
             // boring, don't bother with a suffix
         } else {
-            buf[i++] = '.';
-            if (accSet & ACC_READONLY) { buf[i++] = 'r'; accSet &= ~ACC_READONLY; }
-            if (accSet & ACC_STACK)    { buf[i++] = 's'; accSet &= ~ACC_STACK; }
-            if (accSet & ACC_OTHER)    { buf[i++] = 'o'; accSet &= ~ACC_OTHER; }
+            buf->buf[i++] = '.';
+            if (accSet & ACC_READONLY) { buf->buf[i++] = 'r'; accSet &= ~ACC_READONLY; }
+            if (accSet & ACC_STACK)    { buf->buf[i++] = 's'; accSet &= ~ACC_STACK; }
+            if (accSet & ACC_OTHER)    { buf->buf[i++] = 'o'; accSet &= ~ACC_OTHER; }
             // This assertion will fail if we add a new accSet value but
             // forget to handle it here.
             NanoAssert(accSet == 0);
         }
-        buf[i] = 0;
-        return buf;
+        buf->buf[i] = 0;
+        return buf->buf;
     }
 
-    void LirNameMap::copyName(LInsp i, const char *s, int suffix) {
-        char s2[200];
-        if (VMPI_isdigit(s[VMPI_strlen(s)-1])) {
-            // if s ends with a digit, add '_' to clarify the suffix
-            VMPI_sprintf(s2,"%s_%d", s, suffix);
-        } else {
-            VMPI_sprintf(s2,"%s%d", s, suffix);
-        }
-        addName(i, s2);
-    }
-
-    void LirNameMap::formatImm(int32_t c, char *buf) {
+    void LInsPrinter::formatImm(RefBuf* buf, int32_t c) {
         if (-10000 < c || c < 10000) {
-            VMPI_sprintf(buf,"%d", c);
+            VMPI_snprintf(buf->buf, buf->len, "%d", c);
         } else {
 #if !defined NANOJIT_64BIT
-            VMPI_sprintf(buf, "%s", labels->format((void*)c));
+            formatAddr(buf, (void*)c);
 #else
-            VMPI_sprintf(buf, "0x%x", (unsigned int)c);
+            VMPI_snprintf(buf->buf, buf->len, "0x%x", (unsigned int)c);
 #endif
         }
     }
 
-    void LirNameMap::formatImmq(uint64_t c, char *buf) {
+    void LInsPrinter::formatImmq(RefBuf* buf, uint64_t c) {
         if (-10000 < (int64_t)c || c < 10000) {
-            VMPI_sprintf(buf, "%dLL", (int)c);
+            VMPI_snprintf(buf->buf, buf->len, "%dLL", (int)c);
         } else {
 #if defined NANOJIT_64BIT
-            VMPI_sprintf(buf, "%s", labels->format((void*)c));
+            formatAddr(buf, (void*)c);
 #else
-            VMPI_sprintf(buf, "0x%llxLL", c);
+            VMPI_snprintf(buf->buf, buf->len, "0x%llxLL", c);
 #endif
         }
     }
 
-    const char* LirNameMap::formatRef(LIns *ref)
+    char* LInsPrinter::formatAddr(RefBuf* buf, void* p)
     {
-        char buffer[200], *buf=buffer;
-        buf[0]=0;
-        if (names.containsKey(ref)) {
-            const char* name = names.get(ref)->name;
-            VMPI_strcat(buf, name);
+        char*   name;
+        int32_t offset;
+        addrNameMap->lookupAddr(p, name, offset);
+
+        if (name) {
+            if (offset != 0) {
+                VMPI_snprintf(buf->buf, buf->len, "%p %s+%d", p, name, offset);
+            } else {
+                VMPI_snprintf(buf->buf, buf->len, "%p %s", p, name);
+            }
+        } else {
+            VMPI_snprintf(buf->buf, buf->len, "%p", p);
+        }
+
+        return buf->buf;
+    }
+
+    char* LInsPrinter::formatRef(RefBuf* buf, LIns *ref)
+    {
+        // - If 'ref' already has a name, use it.
+        // - Otherwise, if it's a constant, use the constant.
+        // - Otherwise, give it a name and use it.
+        const char* name = lirNameMap->lookupName(ref);
+        if (name) {
+            VMPI_snprintf(buf->buf, buf->len, "%s", name);
         }
         else if (ref->isconstf()) {
-            VMPI_sprintf(buf, "%g", ref->imm64f());
+            VMPI_snprintf(buf->buf, buf->len, "%g", ref->imm64f());
         }
         else if (ref->isconstq()) {
-            formatImmq(ref->imm64(), buf);
+            formatImmq(buf, ref->imm64());
         }
         else if (ref->isconst()) {
-            formatImm(ref->imm32(), buf);
+            formatImm(buf, ref->imm32());
         }
         else {
-            if (ref->isCall()) {
-#if NJ_SOFTFLOAT_SUPPORTED
-                if (ref->isop(LIR_callh)) {
-                    // we've presumably seen the other half already
-                    ref = ref->oprnd1();
-                } else
-#endif
-                {
-                    copyName(ref, ref->callInfo()->_name, funccounts.add(ref->callInfo()));
-                }
-            } else {
-                NanoAssert(size_t(ref->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
-                copyName(ref, lirNames[ref->opcode()], lircounts.add(ref->opcode()));
-            }
-            const char* name = names.get(ref)->name;
-            VMPI_strcat(buf, name);
+            name = lirNameMap->createName(ref);
+            VMPI_snprintf(buf->buf, buf->len, "%s", name);
         }
-        return labels->dup(buffer);
+        return buf->buf;
     }
 
-    const char* LirNameMap::formatIns(LIns* i)
+    char* LInsPrinter::formatIns(InsBuf* buf, LIns* i)
     {
-        char sbuf[4096];
-        char *s = sbuf;
+        char *s = buf->buf;
+        size_t n = buf->len;
+        RefBuf b1, b2, b3, b4;
         LOpcode op = i->opcode();
         switch (op)
         {
             case LIR_int:
-                VMPI_sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], i->imm32());
+                VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], i->imm32());
                 break;
 
             case LIR_alloc:
-                VMPI_sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], i->size());
+                VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], i->size());
                 break;
 
 #ifdef NANOJIT_64BIT
             case LIR_quad:
-                VMPI_sprintf(s, "%s = %s %X:%X", formatRef(i), lirNames[op],
+                VMPI_snprintf(s, n, "%s = %s %X:%X", formatRef(&b1, i), lirNames[op],
                              i->imm64_1(), i->imm64_0());
                 break;
 #endif
 
             case LIR_float:
-                VMPI_sprintf(s, "%s = %s %g", formatRef(i), lirNames[op], i->imm64f());
+                VMPI_snprintf(s, n, "%s = %s %g", formatRef(&b1, i), lirNames[op], i->imm64f());
                 break;
 
             case LIR_start:
             case LIR_regfence:
-                VMPI_sprintf(s, "%s", lirNames[op]);
+                VMPI_snprintf(s, n, "%s", lirNames[op]);
                 break;
 
             case LIR_icall:
             case LIR_fcall:
             CASE64(LIR_qcall:) {
                 const CallInfo* call = i->callInfo();
                 int32_t argc = i->argc();
+                ssize_t m = n;
                 if (call->isIndirect())
-                    VMPI_sprintf(s, "%s = %s [%s] ( ", formatRef(i), lirNames[op], formatRef(i->arg(--argc)));
+                    m -= VMPI_snprintf(s, m, "%s = %s [%s] ( ", formatRef(&b1, i), lirNames[op],
+                                       formatRef(&b2, i->arg(--argc)));
                 else
-                    VMPI_sprintf(s, "%s = %s #%s ( ", formatRef(i), lirNames[op], call->_name);
+                    m -= VMPI_snprintf(s, m, "%s = %s #%s ( ", formatRef(&b1, i), lirNames[op],
+                                       call->_name);
+                if (m < 0) break;
                 for (int32_t j = argc - 1; j >= 0; j--) {
                     s += VMPI_strlen(s);
-                    VMPI_sprintf(s, "%s ",formatRef(i->arg(j)));
+                    m -= VMPI_snprintf(s, m, "%s ",formatRef(&b2, i->arg(j)));
+                    if (m < 0) break;
                 }
                 s += VMPI_strlen(s);
-                VMPI_sprintf(s, ")");
+                m -= VMPI_snprintf(s, m, ")");
                 break;
             }
 
-            case LIR_jtbl:
-                VMPI_sprintf(s, "%s %s [ ", lirNames[op], formatRef(i->oprnd1()));
-                for (uint32_t j = 0, n = i->getTableSize(); j < n; j++) {
-                    if (VMPI_strlen(sbuf) + 50 > sizeof(sbuf)) {
-                        s += VMPI_strlen(s);
-                        VMPI_sprintf(s, "... ");
-                        break;
-                    }
+            case LIR_jtbl: {
+                ssize_t m = n;
+                m -= VMPI_snprintf(s, m, "%s %s [ ", lirNames[op], formatRef(&b1, i->oprnd1()));
+                if (m < 0) break;
+                for (uint32_t j = 0, sz = i->getTableSize(); j < sz; j++) {
                     LIns* target = i->getTarget(j);
                     s += VMPI_strlen(s);
-                    VMPI_sprintf(s, "%s ", target ? formatRef(target) : "unpatched");
+                    m -= VMPI_snprintf(s, m, "%s ", target ? formatRef(&b2, target) : "unpatched");
+                    if (m < 0) break;
                 }
                 s += VMPI_strlen(s);
-                VMPI_sprintf(s, "]");
+                m -= VMPI_snprintf(s, m, "]");
                 break;
+            }
 
             case LIR_param: {
                 uint32_t arg = i->paramArg();
                 if (!i->paramKind()) {
                     if (arg < sizeof(Assembler::argRegs)/sizeof(Assembler::argRegs[0])) {
-                        VMPI_sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op],
+                        VMPI_snprintf(s, n, "%s = %s %d %s", formatRef(&b1, i), lirNames[op],
                             arg, gpn(Assembler::argRegs[arg]));
                     } else {
-                        VMPI_sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], arg);
+                        VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], arg);
                     }
                 } else {
-                    VMPI_sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op],
+                    VMPI_snprintf(s, n, "%s = %s %d %s", formatRef(&b1, i), lirNames[op],
                         arg, gpn(Assembler::savedRegs[arg]));
                 }
                 break;
             }
 
             case LIR_label:
-                VMPI_sprintf(s, "%s:", formatRef(i));
+                VMPI_snprintf(s, n, "%s:", formatRef(&b1, i));
                 break;
 
             case LIR_jt:
             case LIR_jf:
-                VMPI_sprintf(s, "%s %s -> %s", lirNames[op], formatRef(i->oprnd1()),
-                    i->oprnd2() ? formatRef(i->oprnd2()) : "unpatched");
+                VMPI_snprintf(s, n, "%s %s -> %s", lirNames[op], formatRef(&b1, i->oprnd1()),
+                    i->oprnd2() ? formatRef(&b2, i->oprnd2()) : "unpatched");
                 break;
 
             case LIR_j:
-                VMPI_sprintf(s, "%s -> %s", lirNames[op],
-                    i->oprnd2() ? formatRef(i->oprnd2()) : "unpatched");
+                VMPI_snprintf(s, n, "%s -> %s", lirNames[op],
+                    i->oprnd2() ? formatRef(&b1, i->oprnd2()) : "unpatched");
                 break;
 
             case LIR_live:
             case LIR_flive:
             CASE64(LIR_qlive:)
             case LIR_ret:
             CASE64(LIR_qret:)
             case LIR_fret:
-                VMPI_sprintf(s, "%s %s", lirNames[op], formatRef(i->oprnd1()));
+                VMPI_snprintf(s, n, "%s %s", lirNames[op], formatRef(&b1, i->oprnd1()));
                 break;
 
             CASESF(LIR_callh:)
             case LIR_neg:
             case LIR_fneg:
             case LIR_i2f:
             case LIR_u2f:
             CASESF(LIR_qlo:)
             CASESF(LIR_qhi:)
             case LIR_not:
             CASE86(LIR_mod:)
             CASE64(LIR_i2q:)
             CASE64(LIR_u2q:)
             CASE64(LIR_q2i:)
             case LIR_f2i:
-                VMPI_sprintf(s, "%s = %s %s", formatRef(i), lirNames[op], formatRef(i->oprnd1()));
+                VMPI_snprintf(s, n, "%s = %s %s", formatRef(&b1, i), lirNames[op],
+                             formatRef(&b2, i->oprnd1()));
                 break;
 
             case LIR_x:
             case LIR_xt:
             case LIR_xf:
             case LIR_xbarrier:
             case LIR_xtbl:
-                formatGuard(i, s);
+                formatGuard(buf, i);
                 break;
 
             case LIR_addxov:
             case LIR_subxov:
             case LIR_mulxov:
-                formatGuardXov(i, s);
+                formatGuardXov(buf, i);
                 break;
 
             case LIR_add:       CASE64(LIR_qiadd:)
             case LIR_addp:
             case LIR_sub:
             case LIR_mul:
             CASE86(LIR_div:)
             case LIR_fadd:
@@ -1946,74 +2005,64 @@ namespace nanojit
             case LIR_ule:       CASE64(LIR_qule:)
             case LIR_ugt:       CASE64(LIR_qugt:)
             case LIR_uge:       CASE64(LIR_quge:)
             case LIR_feq:
             case LIR_flt:
             case LIR_fle:
             case LIR_fgt:
             case LIR_fge:
-                VMPI_sprintf(s, "%s = %s %s, %s", formatRef(i), lirNames[op],
-                    formatRef(i->oprnd1()),
-                    formatRef(i->oprnd2()));
-                break;
-
 #if NJ_SOFTFLOAT_SUPPORTED
             case LIR_qjoin:
-                VMPI_sprintf(s, "%s (%s), %s", lirNames[op],
-                     formatRef(i->oprnd1()),
-                     formatRef(i->oprnd2()));
-                 break;
 #endif
+                VMPI_snprintf(s, n, "%s = %s %s, %s", formatRef(&b1, i), lirNames[op],
+                    formatRef(&b2, i->oprnd1()),
+                    formatRef(&b3, i->oprnd2()));
+                break;
 
             CASE64(LIR_qcmov:)
             case LIR_cmov:
-                VMPI_sprintf(s, "%s = %s %s ? %s : %s", formatRef(i), lirNames[op],
-                    formatRef(i->oprnd1()),
-                    formatRef(i->oprnd2()),
-                    formatRef(i->oprnd3()));
+                VMPI_snprintf(s, n, "%s = %s %s ? %s : %s", formatRef(&b1, i), lirNames[op],
+                    formatRef(&b2, i->oprnd1()),
+                    formatRef(&b3, i->oprnd2()),
+                    formatRef(&b4, i->oprnd3()));
                 break;
 
             case LIR_ld:
             CASE64(LIR_ldq:)
             case LIR_ldf:
             case LIR_ldzb:
             case LIR_ldzs:
             case LIR_ldsb:
             case LIR_ldss:
-            case LIR_ld32f: {
-                char b[32];
-                VMPI_sprintf(s, "%s = %s%s %s[%d]", formatRef(i), lirNames[op],
-                    formatAccSet(i, /*isLoad*/true, b),
-                    formatRef(i->oprnd1()),
+            case LIR_ld32f:
+                VMPI_snprintf(s, n, "%s = %s%s %s[%d]", formatRef(&b1, i), lirNames[op],
+                    formatAccSet(&b2, i, /*isLoad*/true),
+                    formatRef(&b3, i->oprnd1()),
                     i->disp());
                 break;
-            }
 
             case LIR_sti:
             CASE64(LIR_stqi:)
             case LIR_stfi:
             case LIR_stb:
             case LIR_sts:
-            case LIR_st32f: {
-                char b[32];
-                VMPI_sprintf(s, "%s%s %s[%d] = %s", lirNames[op],
-                    formatAccSet(i, /*isLoad*/false, b),
-                    formatRef(i->oprnd2()),
+            case LIR_st32f:
+                VMPI_snprintf(s, n, "%s%s %s[%d] = %s", lirNames[op],
+                    formatAccSet(&b1, i, /*isLoad*/false),
+                    formatRef(&b2, i->oprnd2()),
                     i->disp(),
-                    formatRef(i->oprnd1()));
+                    formatRef(&b3, i->oprnd1()));
                 break;
-            }
 
             default:
                 NanoAssertMsgf(0, "Can't handle opcode %s\n", lirNames[op]);
                 break;
         }
-        NanoAssert(VMPI_strlen(sbuf) < sizeof(sbuf)-1);
-        return labels->dup(sbuf);
+        return buf->buf;
     }
 #endif
 
 
     CseFilter::CseFilter(LirWriter *out, Allocator& alloc)
         : LirWriter(out)
     {
         uint32_t kInitialCaps[LInsLast + 1];
@@ -2382,69 +2431,52 @@ namespace nanojit
         return out->insCall(ci, args);
     }
 #endif // NJ_SOFTFLOAT_SUPPORTED
 
 
     #endif /* FEATURE_NANOJIT */
 
 #if defined(NJ_VERBOSE)
-    LabelMap::LabelMap(Allocator& a, LogControl *logc)
-        : allocator(a), names(a), logc(logc), end(buf)
+    AddrNameMap::AddrNameMap(Allocator& a)
+        : allocator(a), names(a)
     {}
 
-    void LabelMap::add(const void *p, size_t size, size_t align, const char *name)
+    void AddrNameMap::addAddrRange(const void *p, size_t size, size_t align, const char *name)
     {
         if (!this || names.containsKey(p))
             return;
         char* copy = new (allocator) char[VMPI_strlen(name)+1];
         VMPI_strcpy(copy, name);
         Entry *e = new (allocator) Entry(copy, size << align, align);
         names.put(p, e);
     }
 
-    const char *LabelMap::format(const void *p)
+    void AddrNameMap::lookupAddr(void *p, char*& name, int32_t& offset)
     {
-        char b[200];
-
         const void *start = names.findNear(p);
-        if (start != NULL) {
+        if (start) {
             Entry *e = names.get(start);
             const void *end = (const char*)start + e->size;
-            const char *name = e->name;
             if (p == start) {
-                VMPI_sprintf(b, "%p %s", p, name);
-                return dup(b);
+                name = e->name;
+                offset = 0;
             }
             else if (p > start && p < end) {
-                int32_t d = int32_t(intptr_t(p)-intptr_t(start)) >> e->align;
-                VMPI_sprintf(b, "%p %s+%d", p, name, d);
-                return dup(b);
+                name = e->name;
+                offset = int32_t(intptr_t(p)-intptr_t(start)) >> e->align;
             }
             else {
-                VMPI_sprintf(b, "%p", p);
-                return dup(b);
+                name = NULL;
+                offset = 0;
             }
+        } else {
+            name = NULL;
+            offset = 0;
         }
-        VMPI_sprintf(b, "%p", p);
-        return dup(b);
-    }
-
-    const char *LabelMap::dup(const char *b)
-    {
-        size_t need = VMPI_strlen(b)+1;
-        NanoAssert(need <= sizeof(buf));
-        char *s = end;
-        end += need;
-        if (end > buf+sizeof(buf)) {
-            s = buf;
-            end = s+need;
-        }
-        VMPI_strcpy(s, b);
-        return s;
     }
 
     // ---------------------------------------------------------------
     // START debug-logging definitions
     // ---------------------------------------------------------------
 
     void LogControl::printf( const char* format, ... )
     {
--- a/js/src/nanojit/LIR.h
+++ b/js/src/nanojit/LIR.h
@@ -1509,103 +1509,139 @@ namespace nanojit
             return insStorei(value, base, d, ACC_STORE_ANY);
         }
     };
 
 
 #ifdef NJ_VERBOSE
     extern const char* lirNames[];
 
-    /**
-     * map address ranges to meaningful names.
-     */
-    class LabelMap
+    // Maps address ranges to meaningful names.
+    class AddrNameMap
     {
         Allocator& allocator;
         class Entry
         {
         public:
             Entry(int) : name(0), size(0), align(0) {}
-            Entry(char *n, size_t s, size_t a) : name(n),size(s),align(a) {}
+            Entry(char *n, size_t s, size_t a) : name(n), size(s), align(a) {}
             char* name;
             size_t size:29, align:3;
         };
-        TreeMap<const void*, Entry*> names;
-        LogControl *logc;
-        char buf[5000], *end;
-        void formatAddr(const void *p, char *buf);
+        TreeMap<const void*, Entry*> names;     // maps code regions to names
     public:
-        LabelMap(Allocator& allocator, LogControl* logc);
-        void add(const void *p, size_t size, size_t align, const char *name);
-        const char *dup(const char *);
-        const char *format(const void *p);
+        AddrNameMap(Allocator& allocator);
+        void addAddrRange(const void *p, size_t size, size_t align, const char *name);
+        void lookupAddr(void *p, char*& name, int32_t& offset);
     };
 
+    // Maps LIR instructions to meaningful names.
     class LirNameMap
     {
+    private:
         Allocator& alloc;
 
         template <class Key>
         class CountMap: public HashMap<Key, int> {
         public:
             CountMap(Allocator& alloc) : HashMap<Key, int>(alloc) {}
             int add(Key k) {
                 int c = 1;
                 if (containsKey(k)) {
                     c = 1+get(k);
                 }
                 put(k,c);
                 return c;
             }
         };
+
         CountMap<int> lircounts;
         CountMap<const CallInfo *> funccounts;
+        CountMap<const char *> namecounts;
+
+        void addNameWithSuffix(LInsp i, const char *s, int suffix, bool ignoreOneSuffix);
 
         class Entry
         {
         public:
             Entry(int) : name(0) {}
             Entry(char* n) : name(n) {}
             char* name;
         };
+
         HashMap<LInsp, Entry*> names;
-        void formatImm(int32_t c, char *buf);
-        void formatImmq(uint64_t c, char *buf);
 
     public:
-        LabelMap *labels;
-        LirNameMap(Allocator& alloc, LabelMap *lm)
+        LirNameMap(Allocator& alloc)
             : alloc(alloc),
             lircounts(alloc),
             funccounts(alloc),
-            names(alloc),
-            labels(lm)
+            namecounts(alloc),
+            names(alloc)
         {}
 
-        void addName(LInsp i, const char *s);
-        void copyName(LInsp i, const char *s, int suffix);
-        char* formatAccSet(LInsp ins, bool isLoad, char* buf);
-        const char *formatRef(LIns *ref);
-        const char *formatIns(LInsp i);
-        void formatGuard(LInsp i, char *buf);
-        void formatGuardXov(LInsp i, char *buf);
+        void        addName(LInsp ins, const char *s);  // gives 'ins' a special name
+        const char* createName(LInsp ins);              // gives 'ins' a generic name
+        const char* lookupName(LInsp ins);
+    };
+
+    // We use big buffers for cases where we need to fit a whole instruction,
+    // and smaller buffers for all the others.  These should easily be long
+    // enough, but for safety the formatXyz() functions check and won't exceed
+    // those limits.
+    class InsBuf {
+    public:
+        static const size_t len = 1000;
+        char buf[len];
+    };
+    class RefBuf {
+    public:
+        static const size_t len = 200;
+        char buf[len];
+    };
+
+    class LInsPrinter
+    {
+    private:
+        Allocator& alloc;
+
+        void formatImm(RefBuf* buf, int32_t c);
+        void formatImmq(RefBuf* buf, uint64_t c);
+        void formatGuard(InsBuf* buf, LInsp ins);
+        void formatGuardXov(InsBuf* buf, LInsp ins);
+        char* formatAccSet(RefBuf* buf, LInsp ins, bool isLoad);
+
+    public:
+        LInsPrinter(Allocator& alloc)
+            : alloc(alloc)
+        {
+            addrNameMap = new (alloc) AddrNameMap(alloc);
+            lirNameMap = new (alloc) LirNameMap(alloc);
+        }
+
+        char *formatAddr(RefBuf* buf, void* p);
+        char *formatRef(RefBuf* buf, LInsp ref);
+        char *formatIns(InsBuf* buf, LInsp ins);
+
+        AddrNameMap* addrNameMap;
+        LirNameMap* lirNameMap;
     };
 
 
     class VerboseWriter : public LirWriter
     {
         InsList code;
-        LirNameMap* names;
+        LInsPrinter* printer;
         LogControl* logc;
         const char* const prefix;
         bool const always_flush;
     public:
-        VerboseWriter(Allocator& alloc, LirWriter *out,
-                      LirNameMap* names, LogControl* logc, const char* prefix = "", bool always_flush = false)
-            : LirWriter(out), code(alloc), names(names), logc(logc), prefix(prefix), always_flush(always_flush)
+        VerboseWriter(Allocator& alloc, LirWriter *out, LInsPrinter* printer, LogControl* logc,
+                      const char* prefix = "", bool always_flush = false)
+            : LirWriter(out), code(alloc), printer(printer), logc(logc), prefix(prefix), always_flush(always_flush)
         {}
 
         LInsp add(LInsp i) {
             if (i) {
                 code.add(i);
                 if (always_flush)
                     flush();
             }
@@ -1616,19 +1652,20 @@ namespace nanojit
             if ((i = add(i)) != 0)
                 flush();
             return i;
         }
 
         void flush()
         {
             if (!code.isEmpty()) {
+                InsBuf b;
                 int32_t count = 0;
                 for (Seq<LIns*>* p = code.get(); p != NULL; p = p->tail) {
-                    logc->printf("%s    %s\n",prefix,names->formatIns(p->head));
+                    logc->printf("%s    %s\n", prefix, printer->formatIns(&b, p->head));
                     count++;
                 }
                 code.clear();
                 if (count > 1)
                     logc->printf("\n");
             }
         }
 
@@ -1817,17 +1854,17 @@ namespace nanojit
     class LirBuffer
     {
         public:
             LirBuffer(Allocator& alloc);
             void        clear();
             uintptr_t   makeRoom(size_t szB);   // make room for an instruction
 
             debug_only (void validate() const;)
-            verbose_only(LirNameMap* names;)
+            verbose_only(LInsPrinter* printer;)
 
             int32_t insCount();
             size_t  byteCount();
 
             // stats
             struct
             {
                 uint32_t lir;    // # instructions
@@ -2067,26 +2104,26 @@ namespace nanojit
     /* A listing filter for LIR, going through backwards.  It merely
        passes its input to its output, but notes it down too.  When
        finish() is called, prints out what went through.  Is intended to be
        used to print arbitrary intermediate transformation stages of
        LIR. */
     class ReverseLister : public LirFilter
     {
         Allocator&   _alloc;
-        LirNameMap*  _names;
+        LInsPrinter* _printer;
         const char*  _title;
         StringList   _strs;
         LogControl*  _logc;
     public:
         ReverseLister(LirFilter* in, Allocator& alloc,
-                      LirNameMap* names, LogControl* logc, const char* title)
+                      LInsPrinter* printer, LogControl* logc, const char* title)
             : LirFilter(in)
             , _alloc(alloc)
-            , _names(names)
+            , _printer(printer)
             , _title(title)
             , _strs(alloc)
             , _logc(logc)
         { }
 
         void finish();
         LInsp read();
     };
--- a/js/src/nanojit/VMPI.h
+++ b/js/src/nanojit/VMPI.h
@@ -80,16 +80,17 @@ typedef unsigned __int64 uint64_t;
 #endif
 
 #define VMPI_strlen strlen
 #define VMPI_strcat strcat
 #define VMPI_strcmp strcmp
 #define VMPI_strncat strncat
 #define VMPI_strcpy strcpy
 #define VMPI_sprintf sprintf
+#define VMPI_snprintf snprintf
 #define VMPI_vfprintf vfprintf
 #define VMPI_memset memset
 #define VMPI_isdigit isdigit
 #define VMPI_getDate()
 
 extern void VMPI_setPageProtection(void *address,
                                    size_t size,
                                    bool executableFlag,