Bug 1020468 - IonMonkey: More debugging output. r=bhackett
authorDan Gohman <sunfish@mozilla.com>
Fri, 06 Jun 2014 15:59:17 -0700
changeset 206596 5f6ae40e368d7acf69cd21df653a761c5784d858
parent 206595 6cf2f1cd655cfcb1f2290b9387b6cd86f3f06764
child 206597 b0561d529f9587dd26c9174150e031a41eed433f
push id3741
push userasasaki@mozilla.com
push dateMon, 21 Jul 2014 20:25:18 +0000
treeherdermozilla-beta@4d6f46f5af68 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbhackett
bugs1020468
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1020468 - IonMonkey: More debugging output. r=bhackett
js/src/assembler/jit/ExecutableAllocator.h
js/src/jit/BacktrackingAllocator.cpp
js/src/jit/LIR.cpp
js/src/jit/LiveRangeAllocator.cpp
js/src/jit/MIR.cpp
js/src/jit/RegisterAllocator.cpp
js/src/jit/shared/CodeGenerator-shared.cpp
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -116,18 +116,17 @@ private:
     size_t m_baselineCodeBytes;
     size_t m_regexpCodeBytes;
     size_t m_otherCodeBytes;
 
 public:
     void release(bool willDestroy = false)
     {
         JS_ASSERT(m_refCount != 0);
-        // XXX: disabled, see bug 654820.
-        //JS_ASSERT_IF(willDestroy, m_refCount == 1);
+        JS_ASSERT_IF(willDestroy, m_refCount == 1);
         if (--m_refCount == 0)
             js_delete(this);
     }
     void release(size_t n, CodeKind kind)
     {
         switch (kind) {
           case ION_CODE:
             m_ionCodeBytes -= n;
--- a/js/src/jit/BacktrackingAllocator.cpp
+++ b/js/src/jit/BacktrackingAllocator.cpp
@@ -286,16 +286,19 @@ BacktrackingAllocator::tryGroupReusedReg
         return false;
 
     LiveIntervalVector newIntervals;
     if (!newIntervals.append(preInterval) || !newIntervals.append(postInterval))
         return false;
 
     distributeUses(interval, newIntervals);
 
+    IonSpew(IonSpew_RegAlloc, "  splitting reused input at %u to try to help grouping",
+            inputOf(reg.ins()));
+
     if (!split(interval, newIntervals))
         return false;
 
     JS_ASSERT(usedReg.numIntervals() == 2);
 
     usedReg.setCanonicalSpillExclude(inputOf(reg.ins()));
 
     return tryGroupRegisters(use, def);
@@ -390,17 +393,17 @@ BacktrackingAllocator::groupAndQueueRegi
 static const size_t MAX_ATTEMPTS = 2;
 
 bool
 BacktrackingAllocator::tryAllocateFixed(LiveInterval *interval, bool *success,
                                         bool *pfixed, LiveInterval **pconflicting)
 {
     // Spill intervals which are required to be in a certain stack slot.
     if (!interval->requirement()->allocation().isRegister()) {
-        IonSpew(IonSpew_RegAlloc, "stack allocation requirement");
+        IonSpew(IonSpew_RegAlloc, "  stack allocation requirement");
         interval->setAllocation(interval->requirement()->allocation());
         *success = true;
         return true;
     }
 
     AnyRegister reg = interval->requirement()->allocation().toRegister();
     return tryAllocateRegister(registers[reg.code()], interval, success, pfixed, pconflicting);
 }
@@ -580,32 +583,32 @@ BacktrackingAllocator::setIntervalRequir
     interval->setRequirement(Requirement());
 
     BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
 
     // Set a hint if another interval in the same group is in a register.
     if (VirtualRegisterGroup *group = reg->group()) {
         if (group->allocation.isRegister()) {
             if (IonSpewEnabled(IonSpew_RegAlloc)) {
-                IonSpew(IonSpew_RegAlloc, "Hint %s, used by group allocation",
+                IonSpew(IonSpew_RegAlloc, "  Hint %s, used by group allocation",
                         group->allocation.toString());
             }
             interval->setHint(Requirement(group->allocation));
         }
     }
 
     if (interval->index() == 0) {
         // The first interval is the definition, so deal with any definition
         // constraints/hints.
 
         LDefinition::Policy policy = reg->def()->policy();
         if (policy == LDefinition::PRESET) {
             // Preset policies get a FIXED requirement.
             if (IonSpewEnabled(IonSpew_RegAlloc)) {
-                IonSpew(IonSpew_RegAlloc, "Requirement %s, preset by definition",
+                IonSpew(IonSpew_RegAlloc, "  Requirement %s, preset by definition",
                         reg->def()->output()->toString());
             }
             interval->setRequirement(Requirement(*reg->def()->output()));
         } else if (reg->ins()->isPhi()) {
             // Phis don't have any requirements, but they should prefer their
             // input allocations. This is captured by the group hints above.
         } else {
             // Non-phis get a REGISTER requirement.
@@ -618,17 +621,17 @@ BacktrackingAllocator::setIntervalRequir
          iter != interval->usesEnd();
          iter++)
     {
         LUse::Policy policy = iter->use->policy();
         if (policy == LUse::FIXED) {
             AnyRegister required = GetFixedRegister(reg->def(), iter->use);
 
             if (IonSpewEnabled(IonSpew_RegAlloc)) {
-                IonSpew(IonSpew_RegAlloc, "Requirement %s, due to use at %u",
+                IonSpew(IonSpew_RegAlloc, "  Requirement %s, due to use at %u",
                         required.name(), iter->pos.pos());
             }
 
             // If there are multiple fixed registers which the interval is
             // required to use, fail. The interval will need to be split before
             // it can be allocated.
             if (!interval->addRequirement(Requirement(LAllocation(required))))
                 return false;
@@ -706,35 +709,35 @@ BacktrackingAllocator::tryAllocateRegist
     JS_ASSERT_IF(interval->requirement()->kind() == Requirement::FIXED,
                  interval->requirement()->allocation() == LAllocation(r.reg));
 
     for (size_t i = 0; i < interval->numRanges(); i++) {
         AllocatedRange range(interval, interval->getRange(i)), existing;
         if (r.allocations.contains(range, &existing)) {
             if (existing.interval->hasVreg()) {
                 if (IonSpewEnabled(IonSpew_RegAlloc)) {
-                    IonSpew(IonSpew_RegAlloc, "%s collides with v%u [%u,%u> [weight %lu]",
+                    IonSpew(IonSpew_RegAlloc, "  %s collides with v%u [%u,%u> [weight %lu]",
                             r.reg.name(), existing.interval->vreg(),
                             existing.range->from.pos(), existing.range->to.pos(),
                             computeSpillWeight(existing.interval));
                 }
                 if (!*pconflicting || computeSpillWeight(existing.interval) < computeSpillWeight(*pconflicting))
                     *pconflicting = existing.interval;
             } else {
                 if (IonSpewEnabled(IonSpew_RegAlloc)) {
-                    IonSpew(IonSpew_RegAlloc, "%s collides with fixed use [%u,%u>",
+                    IonSpew(IonSpew_RegAlloc, "  %s collides with fixed use [%u,%u>",
                             r.reg.name(), existing.range->from.pos(), existing.range->to.pos());
                 }
                 *pfixed = true;
             }
             return true;
         }
     }
 
-    IonSpew(IonSpew_RegAlloc, "allocated to %s", r.reg.name());
+    IonSpew(IonSpew_RegAlloc, "  allocated to %s", r.reg.name());
 
     for (size_t i = 0; i < interval->numRanges(); i++) {
         AllocatedRange range(interval, interval->getRange(i));
         if (!r.allocations.insert(range))
             return false;
     }
 
     // Set any register hint for allocating other intervals in the same group.
@@ -747,17 +750,17 @@ BacktrackingAllocator::tryAllocateRegist
     *success = true;
     return true;
 }
 
 bool
 BacktrackingAllocator::evictInterval(LiveInterval *interval)
 {
     if (IonSpewEnabled(IonSpew_RegAlloc)) {
-        IonSpew(IonSpew_RegAlloc, "Evicting interval v%u: %s",
+        IonSpew(IonSpew_RegAlloc, "  Evicting interval v%u: %s",
                 interval->vreg(), interval->rangesToString());
     }
 
     JS_ASSERT(interval->getAllocation()->isRegister());
 
     AnyRegister reg(interval->getAllocation()->toRegister());
     PhysicalRegister &physical = registers[reg.code()];
     JS_ASSERT(physical.reg == reg && physical.allocatable);
@@ -800,20 +803,20 @@ BacktrackingAllocator::distributeUses(Li
     }
 }
 
 bool
 BacktrackingAllocator::split(LiveInterval *interval,
                              const LiveIntervalVector &newIntervals)
 {
     if (IonSpewEnabled(IonSpew_RegAlloc)) {
-        IonSpew(IonSpew_RegAlloc, "splitting interval v%u %s into:",
+        IonSpew(IonSpew_RegAlloc, "    splitting interval v%u %s into:",
                 interval->vreg(), interval->rangesToString());
         for (size_t i = 0; i < newIntervals.length(); i++)
-            IonSpew(IonSpew_RegAlloc, "    %s", newIntervals[i]->rangesToString());
+            IonSpew(IonSpew_RegAlloc, "      %s", newIntervals[i]->rangesToString());
     }
 
     JS_ASSERT(newIntervals.length() >= 2);
 
     // Find the earliest interval in the new list.
     LiveInterval *first = newIntervals[0];
     for (size_t i = 1; i < newIntervals.length(); i++) {
         if (newIntervals[i]->start() < first->start())
@@ -841,53 +844,53 @@ bool BacktrackingAllocator::requeueInter
             return false;
     }
     return true;
 }
 
 void
 BacktrackingAllocator::spill(LiveInterval *interval)
 {
-    IonSpew(IonSpew_RegAlloc, "Spilling interval");
+    IonSpew(IonSpew_RegAlloc, "  Spilling interval");
 
     JS_ASSERT(interval->requirement()->kind() == Requirement::NONE);
     JS_ASSERT(!interval->getAllocation()->isStackSlot());
 
     // We can't spill bogus intervals.
     JS_ASSERT(interval->hasVreg());
 
     BacktrackingVirtualRegister *reg = &vregs[interval->vreg()];
 
     bool useCanonical = !reg->hasCanonicalSpillExclude()
         || interval->start() < reg->canonicalSpillExclude();
 
     if (useCanonical) {
         if (reg->canonicalSpill()) {
-            IonSpew(IonSpew_RegAlloc, "  Picked canonical spill location %s",
+            IonSpew(IonSpew_RegAlloc, "    Picked canonical spill location %s",
                     reg->canonicalSpill()->toString());
             interval->setAllocation(*reg->canonicalSpill());
             return;
         }
 
         if (reg->group() && !reg->group()->spill.isUse()) {
-            IonSpew(IonSpew_RegAlloc, "  Reusing group spill location %s",
+            IonSpew(IonSpew_RegAlloc, "    Reusing group spill location %s",
                     reg->group()->spill.toString());
             interval->setAllocation(reg->group()->spill);
             reg->setCanonicalSpill(reg->group()->spill);
             return;
         }
     }
 
     uint32_t stackSlot = stackSlotAllocator.allocateSlot(reg->type());
     JS_ASSERT(stackSlot <= stackSlotAllocator.stackHeight());
 
     LStackSlot alloc(stackSlot);
     interval->setAllocation(alloc);
 
-    IonSpew(IonSpew_RegAlloc, "  Allocating spill location %s", alloc.toString());
+    IonSpew(IonSpew_RegAlloc, "    Allocating spill location %s", alloc.toString());
 
     if (useCanonical) {
         reg->setCanonicalSpill(alloc);
         if (reg->group())
             reg->group()->spill = alloc;
     }
 }
 
@@ -1041,16 +1044,18 @@ BacktrackingAllocator::isRegisterDefinit
         return false;
 
     return true;
 }
 
 bool
 BacktrackingAllocator::reifyAllocations()
 {
+    IonSpew(IonSpew_RegAlloc, "Reifying Allocations");
+
     // Virtual register number 0 is unused.
     JS_ASSERT(vregs[0u].numIntervals() == 0);
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         VirtualRegister *reg = &vregs[i];
 
         if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
             return false;
 
@@ -1101,16 +1106,18 @@ BacktrackingAllocator::reifyAllocations(
 
     graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
     return true;
 }
 
 bool
 BacktrackingAllocator::populateSafepoints()
 {
+    IonSpew(IonSpew_RegAlloc, "Populating Safepoints");
+
     size_t firstSafepoint = 0;
 
     // Virtual register number 0 is unused.
     JS_ASSERT(!vregs[0u].def());
     for (uint32_t i = 1; i < vregs.numVirtualRegisters(); i++) {
         BacktrackingVirtualRegister *reg = &vregs[i];
 
         if (!reg->def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
@@ -1251,27 +1258,27 @@ BacktrackingAllocator::dumpLiveness()
 
             fprintf(stderr, "\n");
         }
     }
 
     fprintf(stderr, "\nLive Ranges:\n\n");
 
     for (size_t i = 0; i < AnyRegister::Total; i++)
-        if (registers[i].allocatable)
+        if (registers[i].allocatable && fixedIntervals[i]->numRanges() != 0)
             fprintf(stderr, "reg %s: %s\n", AnyRegister::FromCode(i).name(), fixedIntervals[i]->rangesToString());
 
     // Virtual register number 0 is unused.
     JS_ASSERT(vregs[0u].numIntervals() == 0);
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         fprintf(stderr, "v%lu:", static_cast<unsigned long>(i));
         VirtualRegister &vreg = vregs[i];
         for (size_t j = 0; j < vreg.numIntervals(); j++) {
             if (j)
-                fprintf(stderr, " *");
+                fprintf(stderr, " /");
             fprintf(stderr, "%s", vreg.getInterval(j)->rangesToString());
         }
         fprintf(stderr, "\n");
     }
 
     fprintf(stderr, "\n");
 #endif // DEBUG
 }
@@ -1302,27 +1309,27 @@ BacktrackingAllocator::dumpAllocations()
 
     // Virtual register number 0 is unused.
     JS_ASSERT(vregs[0u].numIntervals() == 0);
     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
         fprintf(stderr, "v%lu:", static_cast<unsigned long>(i));
         VirtualRegister &vreg = vregs[i];
         for (size_t j = 0; j < vreg.numIntervals(); j++) {
             if (j)
-                fprintf(stderr, " *");
+                fprintf(stderr, " /");
             LiveInterval *interval = vreg.getInterval(j);
-            fprintf(stderr, "%s :: %s", interval->rangesToString(), interval->getAllocation()->toString());
+            fprintf(stderr, "%s in %s", interval->rangesToString(), interval->getAllocation()->toString());
         }
         fprintf(stderr, "\n");
     }
 
     fprintf(stderr, "\n");
 
     for (size_t i = 0; i < AnyRegister::Total; i++) {
-        if (registers[i].allocatable) {
+        if (registers[i].allocatable && !registers[i].allocations.empty()) {
             fprintf(stderr, "reg %s:\n", AnyRegister::FromCode(i).name());
             registers[i].allocations.forEach(PrintLiveIntervalRange());
         }
     }
 
     fprintf(stderr, "\n");
 #endif // DEBUG
 }
@@ -1502,29 +1509,36 @@ BacktrackingAllocator::trySplitAcrossHot
         AllocatedRange range(interval, interval->getRange(i)), existing;
         if (hotcode.contains(range, &existing)) {
             hotRange = existing.range;
             break;
         }
     }
 
     // Don't split if there is no hot code in the interval.
-    if (!hotRange)
+    if (!hotRange) {
+        IonSpew(IonSpew_RegAlloc, "  interval does not contain hot code");
         return true;
+    }
 
     // Don't split if there is no cold code in the interval.
     bool coldCode = false;
     for (size_t i = 0; i < interval->numRanges(); i++) {
         if (!hotRange->contains(interval->getRange(i))) {
             coldCode = true;
             break;
         }
     }
-    if (!coldCode)
+    if (!coldCode) {
+        IonSpew(IonSpew_RegAlloc, "  interval does not contain cold code");
         return true;
+    }
+
+    IonSpew(IonSpew_RegAlloc, "  split across hot range [%u,%u>",
+            hotRange->from.pos(), hotRange->to.pos());
 
     SplitPositionVector splitPositions;
     if (!splitPositions.append(hotRange->from) || !splitPositions.append(hotRange->to))
         return false;
     *success = true;
     return splitAt(interval, splitPositions);
 }
 
@@ -1551,20 +1565,28 @@ BacktrackingAllocator::trySplitAfterLast
         if (!conflict || outputOf(ins) < conflict->start()) {
             if (isRegisterUse(use, ins, /* considerCopy = */ true)) {
                 lastRegisterFrom = inputOf(ins);
                 lastRegisterTo = iter->pos.next();
             }
         }
     }
 
-    if (!lastRegisterFrom.pos() || lastRegisterFrom == lastUse) {
-        // Can't trim non-register uses off the end by splitting.
+    // Can't trim non-register uses off the end by splitting.
+    if (!lastRegisterFrom.pos()) {
+        IonSpew(IonSpew_RegAlloc, "  interval has no register uses");
         return true;
     }
+    if (lastRegisterFrom == lastUse) {
+        IonSpew(IonSpew_RegAlloc, "  interval's last use is a register use");
+        return true;
+    }
+
+    IonSpew(IonSpew_RegAlloc, "  split after last register use at %u",
+            lastRegisterTo.pos());
 
     SplitPositionVector splitPositions;
     if (!splitPositions.append(lastRegisterTo))
         return false;
     *success = true;
     return splitAt(interval, splitPositions);
 }
 
@@ -1572,16 +1594,18 @@ bool
 BacktrackingAllocator::splitAtAllRegisterUses(LiveInterval *interval)
 {
     // Split this interval so that all its register uses become minimal
     // intervals and allow the vreg to be spilled throughout its range.
 
     LiveIntervalVector newIntervals;
     uint32_t vreg = interval->vreg();
 
+    IonSpew(IonSpew_RegAlloc, "  split at all register uses");
+
     // If this LiveInterval is the result of an earlier split which created a
     // spill interval, that spill interval covers the whole range, so we don't
     // need to create a new one.
     bool spillIntervalIsNew = false;
     LiveInterval *spillInterval = interval->spillInterval();
     if (!spillInterval) {
         spillInterval = LiveInterval::New(alloc(), vreg, 0);
         spillIntervalIsNew = true;
@@ -1789,16 +1813,24 @@ BacktrackingAllocator::splitAcrossCalls(
         const LiveInterval::Range *range = fixedIntervalsUnion->getRange(i - 1);
         if (interval->covers(range->from) && interval->covers(range->from.previous())) {
             if (!callPositions.append(range->from))
                 return false;
         }
     }
     JS_ASSERT(callPositions.length());
 
+#ifdef DEBUG
+    IonSpewStart(IonSpew_RegAlloc, "  split across calls at ");
+    for (size_t i = 0; i < callPositions.length(); ++i) {
+        IonSpewCont(IonSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].pos());
+    }
+    IonSpewFin(IonSpew_RegAlloc);
+#endif
+
     return splitAt(interval, callPositions);
 }
 
 bool
 BacktrackingAllocator::chooseIntervalSplit(LiveInterval *interval, LiveInterval *conflict)
 {
     bool success = false;
 
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -157,16 +157,20 @@ LBlock::getExitMoveGroup(TempAllocator &
     insertBefore(*rbegin(), exitMoveGroup_);
     return exitMoveGroup_;
 }
 
 void
 LBlock::dump(FILE *fp)
 {
     fprintf(fp, "block%u:\n", mir()->id());
+    for (size_t i = 0; i < numPhis(); ++i) {
+        getPhi(i)->dump(fp);
+        fprintf(fp, "\n");
+    }
     for (LInstructionIterator iter = begin(); iter != end(); iter++) {
         iter->dump(fp);
         fprintf(fp, "\n");
     }
 }
 
 void
 LBlock::dump()
@@ -453,23 +457,25 @@ LInstruction::assignSnapshot(LSnapshot *
         fprintf(IonSpewFile, ")\n");
     }
 #endif
 }
 
 void
 LInstruction::dump(FILE *fp)
 {
-    fprintf(fp, "{");
-    for (size_t i = 0; i < numDefs(); i++) {
-        PrintDefinition(fp, *getDef(i));
-        if (i != numDefs() - 1)
-            fprintf(fp, ", ");
+    if (numDefs() != 0) {
+        fprintf(fp, "{");
+        for (size_t i = 0; i < numDefs(); i++) {
+            PrintDefinition(fp, *getDef(i));
+            if (i != numDefs() - 1)
+                fprintf(fp, ", ");
+        }
+        fprintf(fp, "} <- ");
     }
-    fprintf(fp, "} <- ");
 
     printName(fp);
     printInfo(fp);
 
     if (numTemps()) {
         fprintf(fp, " t=(");
         for (size_t i = 0; i < numTemps(); i++) {
             PrintDefinition(fp, *getTemp(i));
--- a/js/src/jit/LiveRangeAllocator.cpp
+++ b/js/src/jit/LiveRangeAllocator.cpp
@@ -898,11 +898,13 @@ LiveInterval::rangesToString() const
 #else
     return " ???";
 #endif
 }
 
 void
 LiveInterval::dump()
 {
-    fprintf(stderr, "v%u: index=%u allocation=%s %s\n",
-            vreg(), index(), getAllocation()->toString(), rangesToString());
+    if (hasVreg())
+        fprintf(stderr, "v%u: ", vreg());
+    fprintf(stderr, "index=%u allocation=%s %s\n",
+            index(), getAllocation()->toString(), rangesToString());
 }
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -650,17 +650,20 @@ MParameter::New(TempAllocator &alloc, in
 {
     return new(alloc) MParameter(index, types);
 }
 
 void
 MParameter::printOpcode(FILE *fp) const
 {
     PrintOpcodeName(fp, op());
-    fprintf(fp, " %d", index());
+    if (index() == THIS_SLOT)
+        fprintf(fp, " THIS_SLOT");
+    else
+        fprintf(fp, " %d", index());
 }
 
 HashNumber
 MParameter::valueHash() const
 {
     return index_; // Why not?
 }
 
--- a/js/src/jit/RegisterAllocator.cpp
+++ b/js/src/jit/RegisterAllocator.cpp
@@ -426,33 +426,35 @@ AllocationIntegrityState::dump()
                 fprintf(stderr, " [use %s", info.inputs[index++].toString());
                 fprintf(stderr, " %s]", alloc->toString());
             }
 
             fprintf(stderr, "\n");
         }
     }
 
-    fprintf(stderr, "\nIntermediate Allocations:\n\n");
-
     // Print discovered allocations at the ends of blocks, in the order they
     // were discovered.
 
     Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
     seenOrdered.appendN(IntegrityItem(), seen.count());
 
     for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
         IntegrityItem item = iter.front();
         seenOrdered[item.index] = item;
     }
 
-    for (size_t i = 0; i < seenOrdered.length(); i++) {
-        IntegrityItem item = seenOrdered[i];
-        fprintf(stderr, "block %u reg v%u alloc %s\n",
-               item.block->mir()->id(), item.vreg, item.alloc.toString());
+    if (!seenOrdered.empty()) {
+        fprintf(stderr, "\nIntermediate Allocations:\n\n");
+
+        for (size_t i = 0; i < seenOrdered.length(); i++) {
+            IntegrityItem item = seenOrdered[i];
+            fprintf(stderr, "block %u reg v%u alloc %s\n",
+                   item.block->mir()->id(), item.vreg, item.alloc.toString());
+        }
     }
 
     fprintf(stderr, "\n");
 #endif
 }
 
 const CodePosition CodePosition::MAX(UINT_MAX);
 const CodePosition CodePosition::MIN(0);
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -85,16 +85,19 @@ CodeGeneratorShared::CodeGeneratorShared
 }
 
 bool
 CodeGeneratorShared::generateOutOfLineCode()
 {
     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
         if (!gen->alloc().ensureBallast())
             return false;
+
+        IonSpew(IonSpew_Codegen, "# Emitting out of line code");
+
         masm.setFramePushed(outOfLineCode_[i]->framePushed());
         lastPC_ = outOfLineCode_[i]->pc();
         if (!sps_.prepareForOOL())
             return false;
         sps_.setPushed(outOfLineCode_[i]->script());
         outOfLineCode_[i]->bind(&masm);
 
         oolIns = outOfLineCode_[i];