Bug 822148 (part 9) - DMD: rename "groups" as "records". r=jlebar.
authorNicholas Nethercote <nnethercote@mozilla.com>
Mon, 17 Dec 2012 21:54:08 -0800
changeset 125709 eb2a563b2786298fe4bef5a383ca200d2b0e07d0
parent 125708 f8a12acba9edd5ad5ba629888846fcdcdd47a24f
child 125710 cc3d5a4f662c0b55fc1fe6de9b2bf64c2411010e
push id2151
push userlsblakk@mozilla.com
push dateTue, 19 Feb 2013 18:06:57 +0000
treeherdermozilla-beta@4952e88741ec [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjlebar
bugs822148
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 822148 (part 9) - DMD: rename "groups" as "records". r=jlebar.
memory/replace/dmd/DMD.cpp
memory/replace/dmd/test-expected.dmd
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -581,20 +581,20 @@ class LocationService
     size_t SizeOfExcludingThis() {
       // Don't measure mLibrary because it's externally owned.
       return MallocSizeOf(mFunction);
     }
   };
 
   // A direct-mapped cache.  When doing a dump just after starting desktop
   // Firefox (which is similar to dumping after a longer-running session,
-  // thanks to the limit on how many groups we dump), a cache with 2^24 entries
-  // (which approximates an infinite-entry cache) has a ~91% hit rate.  A cache
-  // with 2^12 entries has a ~83% hit rate, and takes up ~85 KiB (on 32-bit
-  // platforms) or ~150 KiB (on 64-bit platforms).
+  // thanks to the limit on how many records we dump), a cache with 2^24
+  // entries (which approximates an infinite-entry cache) has a ~91% hit rate.
+  // A cache with 2^12 entries has a ~83% hit rate, and takes up ~85 KiB (on
+  // 32-bit platforms) or ~150 KiB (on 64-bit platforms).
   static const size_t kNumEntries = 1 << 12;
   static const size_t kMask = kNumEntries - 1;
   Entry mEntries[kNumEntries];
 
   size_t mNumCacheHits;
   size_t mNumCacheMisses;
 
 public:
@@ -1192,65 +1192,65 @@ replace_free(void* aPtr)
   FreeCallback(aPtr, t);
   gMallocTable->free(aPtr);
 }
 
 namespace mozilla {
 namespace dmd {
 
 //---------------------------------------------------------------------------
-// Block groups
+// Stack trace records
 //---------------------------------------------------------------------------
 
-class BlockGroupKey
+class TraceRecordKey
 {
 public:
   const StackTrace* const mAllocStackTrace;   // never null
 protected:
   const StackTrace* const mReportStackTrace1; // nullptr if unreported
   const StackTrace* const mReportStackTrace2; // nullptr if not 2x-reported
 
 public:
-  BlockGroupKey(const Block& aB)
+  TraceRecordKey(const Block& aB)
     : mAllocStackTrace(aB.AllocStackTrace()),
       mReportStackTrace1(aB.ReportStackTrace1()),
       mReportStackTrace2(aB.ReportStackTrace2())
   {
     MOZ_ASSERT(mAllocStackTrace);
   }
 
   // Hash policy.
 
-  typedef BlockGroupKey Lookup;
-
-  static uint32_t hash(const BlockGroupKey& aKey)
+  typedef TraceRecordKey Lookup;
+
+  static uint32_t hash(const TraceRecordKey& aKey)
   {
     return mozilla::HashGeneric(aKey.mAllocStackTrace,
                                 aKey.mReportStackTrace1,
                                 aKey.mReportStackTrace2);
   }
 
-  static bool match(const BlockGroupKey& aA, const BlockGroupKey& aB)
+  static bool match(const TraceRecordKey& aA, const TraceRecordKey& aB)
   {
     return aA.mAllocStackTrace   == aB.mAllocStackTrace &&
            aA.mReportStackTrace1 == aB.mReportStackTrace1 &&
            aA.mReportStackTrace2 == aB.mReportStackTrace2;
   }
 };
 
-class GroupSize
+class RecordSize
 {
   static const size_t kReqBits = sizeof(size_t) * 8 - 1;  // 31 or 63
 
   size_t mReq;              // size requested
   size_t mSlop:kReqBits;    // slop bytes
   size_t mSampled:1;        // were one or more blocks contributing to this
-                            //   GroupSize sampled?
+                            //   RecordSize sampled?
 public:
-  GroupSize()
+  RecordSize()
     : mReq(0),
       mSlop(0),
       mSampled(false)
   {}
 
   size_t Req()    const { return mReq; }
   size_t Slop()   const { return mSlop; }
   size_t Usable() const { return mReq + mSlop; }
@@ -1259,114 +1259,113 @@ public:
 
   void Add(const Block& aB)
   {
     mReq  += aB.ReqSize();
     mSlop += aB.SlopSize();
     mSampled = mSampled || aB.IsSampled();
   }
 
-  void Add(const GroupSize& aGroupSize)
+  void Add(const RecordSize& aRecordSize)
   {
-    mReq  += aGroupSize.Req();
-    mSlop += aGroupSize.Slop();
-    mSampled = mSampled || aGroupSize.IsSampled();
+    mReq  += aRecordSize.Req();
+    mSlop += aRecordSize.Slop();
+    mSampled = mSampled || aRecordSize.IsSampled();
   }
 
-  static int Cmp(const GroupSize& aA, const GroupSize& aB)
+  static int Cmp(const RecordSize& aA, const RecordSize& aB)
   {
     // Primary sort: put bigger usable sizes first.
     if (aA.Usable() > aB.Usable()) return -1;
     if (aA.Usable() < aB.Usable()) return  1;
 
     // Secondary sort: put bigger requested sizes first.
     if (aA.Req() > aB.Req()) return -1;
     if (aA.Req() < aB.Req()) return  1;
 
-    // Tertiary sort: put non-sampled groups before sampled groups.
+    // Tertiary sort: put non-sampled records before sampled records.
     if (!aA.mSampled &&  aB.mSampled) return -1;
     if ( aA.mSampled && !aB.mSampled) return  1;
 
     return 0;
   }
 };
 
-// A group of one or more heap blocks with a common BlockGroupKey.
-class BlockGroup : public BlockGroupKey
+// A collection of one or more heap blocks with a common TraceRecordKey.
+class TraceRecord : public TraceRecordKey
 {
-  // The BlockGroupKey base class serves as the key in BlockGroupTables.  These
-  // two fields constitute the value, so it's ok for them to be |mutable|.
-  mutable uint32_t  mNumBlocks;     // number of blocks with this BlockGroupKey
-  mutable GroupSize mGroupSize;     // combined size of those blocks
+  // The TraceRecordKey base class serves as the key in TraceRecordTables.
+  // These two fields constitute the value, so it's ok for them to be
+  // |mutable|.
+  mutable uint32_t    mNumBlocks; // number of blocks with this TraceRecordKey
+  mutable RecordSize mRecordSize; // combined size of those blocks
 
 public:
-  explicit BlockGroup(const BlockGroupKey& aKey)
-    : BlockGroupKey(aKey),
+  explicit TraceRecord(const TraceRecordKey& aKey)
+    : TraceRecordKey(aKey),
       mNumBlocks(0),
-      mGroupSize()
+      mRecordSize()
   {}
 
   uint32_t NumBlocks() const { return mNumBlocks; }
 
-  const GroupSize& GetGroupSize() const { return mGroupSize; }
+  const RecordSize& GetRecordSize() const { return mRecordSize; }
 
   // This is |const| thanks to the |mutable| fields above.
   void Add(const Block& aB) const
   {
     mNumBlocks++;
-    mGroupSize.Add(aB);
+    mRecordSize.Add(aB);
   }
 
-  static const char* const kName;   // for PrintSortedGroups
+  static const char* const kRecordKind;   // for PrintSortedRecords
 
   void Print(const Writer& aWriter, LocationService* aLocService,
              uint32_t aM, uint32_t aN, const char* aStr, const char* astr,
              size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
              size_t aTotalUsableSize) const;
 
   static int QsortCmp(const void* aA, const void* aB)
   {
-    const BlockGroup* const a =
-      *static_cast<const BlockGroup* const*>(aA);
-    const BlockGroup* const b =
-      *static_cast<const BlockGroup* const*>(aB);
-
-    return GroupSize::Cmp(a->mGroupSize, b->mGroupSize);
+    const TraceRecord* const a = *static_cast<const TraceRecord* const*>(aA);
+    const TraceRecord* const b = *static_cast<const TraceRecord* const*>(aB);
+
+    return RecordSize::Cmp(a->mRecordSize, b->mRecordSize);
   }
 };
 
-const char* const BlockGroup::kName = "block";
-
-typedef js::HashSet<BlockGroup, BlockGroup, InfallibleAllocPolicy>
-        BlockGroupTable;
+const char* const TraceRecord::kRecordKind = "trace";
+
+typedef js::HashSet<TraceRecord, TraceRecord, InfallibleAllocPolicy>
+        TraceRecordTable;
 
 void
-BlockGroup::Print(const Writer& aWriter, LocationService* aLocService,
-                  uint32_t aM, uint32_t aN, const char* aStr, const char* astr,
-                  size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
-                  size_t aTotalUsableSize) const
+TraceRecord::Print(const Writer& aWriter, LocationService* aLocService,
+                   uint32_t aM, uint32_t aN, const char* aStr, const char* astr,
+                   size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
+                   size_t aTotalUsableSize) const
 {
-  bool showTilde = mGroupSize.IsSampled();
-
-  W("%s: %s block%s in block group %s of %s\n",
+  bool showTilde = mRecordSize.IsSampled();
+
+  W("%s: %s block%s in stack trace record %s of %s\n",
     aStr,
     Show(mNumBlocks, gBuf1, kBufLen, showTilde), Plural(mNumBlocks),
     Show(aM, gBuf2, kBufLen),
     Show(aN, gBuf3, kBufLen));
 
   W(" %s bytes (%s requested / %s slop)\n",
-    Show(mGroupSize.Usable(), gBuf1, kBufLen, showTilde),
-    Show(mGroupSize.Req(),    gBuf2, kBufLen, showTilde),
-    Show(mGroupSize.Slop(),   gBuf3, kBufLen, showTilde));
+    Show(mRecordSize.Usable(), gBuf1, kBufLen, showTilde),
+    Show(mRecordSize.Req(),    gBuf2, kBufLen, showTilde),
+    Show(mRecordSize.Slop(),   gBuf3, kBufLen, showTilde));
 
   W(" %4.2f%% of the heap (%4.2f%% cumulative); "
     " %4.2f%% of %s (%4.2f%% cumulative)\n",
-    Percent(mGroupSize.Usable(), aTotalUsableSize),
+    Percent(mRecordSize.Usable(), aTotalUsableSize),
     Percent(aCumulativeUsableSize, aTotalUsableSize),
-    Percent(mGroupSize.Usable(), aCategoryUsableSize),
+    Percent(mRecordSize.Usable(), aCategoryUsableSize),
     astr,
     Percent(aCumulativeUsableSize, aCategoryUsableSize));
 
   W(" Allocated at\n");
   mAllocStackTrace->Print(aWriter, aLocService);
 
   if (mReportStackTrace1) {
     W("\n Reported at\n");
@@ -1376,108 +1375,108 @@ BlockGroup::Print(const Writer& aWriter,
     W("\n Reported again at\n");
     mReportStackTrace2->Print(aWriter, aLocService);
   }
 
   W("\n");
 }
 
 //---------------------------------------------------------------------------
-// Stack frame groups
+// Stack frame records
 //---------------------------------------------------------------------------
 
-// A group of one or more stack frames (from heap block allocation stack
+// A collection of one or more stack frames (from heap block allocation stack
 // traces) with a common PC.
-class FrameGroup
+class FrameRecord
 {
-  // mPc is used as the key in FrameGroupTable, and the other members
+  // mPc is used as the key in FrameRecordTable, and the other members
   // constitute the value, so it's ok for them to be |mutable|.
-  const void* const mPc;
-  mutable size_t    mNumBlocks;
-  mutable size_t    mNumBlockGroups;
-  mutable GroupSize mGroupSize;
+  const void* const  mPc;
+  mutable size_t     mNumBlocks;
+  mutable size_t     mNumTraceRecords;
+  mutable RecordSize mRecordSize;
 
 public:
-  explicit FrameGroup(const void* aPc)
+  explicit FrameRecord(const void* aPc)
     : mPc(aPc),
       mNumBlocks(0),
-      mNumBlockGroups(0),
-      mGroupSize()
+      mNumTraceRecords(0),
+      mRecordSize()
   {}
 
-  const GroupSize& GetGroupSize() const { return mGroupSize; }
+  const RecordSize& GetRecordSize() const { return mRecordSize; }
 
   // This is |const| thanks to the |mutable| fields above.
-  void Add(const BlockGroup& aBg) const
+  void Add(const TraceRecord& aTr) const
   {
-    mNumBlocks += aBg.NumBlocks();
-    mNumBlockGroups++;
-    mGroupSize.Add(aBg.GetGroupSize());
+    mNumBlocks += aTr.NumBlocks();
+    mNumTraceRecords++;
+    mRecordSize.Add(aTr.GetRecordSize());
   }
 
   void Print(const Writer& aWriter, LocationService* aLocService,
              uint32_t aM, uint32_t aN, const char* aStr, const char* astr,
              size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
              size_t aTotalUsableSize) const;
 
   static int QsortCmp(const void* aA, const void* aB)
   {
-    const FrameGroup* const a = *static_cast<const FrameGroup* const*>(aA);
-    const FrameGroup* const b = *static_cast<const FrameGroup* const*>(aB);
-
-    return GroupSize::Cmp(a->mGroupSize, b->mGroupSize);
+    const FrameRecord* const a = *static_cast<const FrameRecord* const*>(aA);
+    const FrameRecord* const b = *static_cast<const FrameRecord* const*>(aB);
+
+    return RecordSize::Cmp(a->mRecordSize, b->mRecordSize);
   }
 
-  static const char* const kName;   // for PrintSortedGroups
+  static const char* const kRecordKind;   // for PrintSortedRecords
 
   // Hash policy.
 
   typedef const void* Lookup;
 
   static uint32_t hash(const void* const& aPc)
   {
     return mozilla::HashGeneric(aPc);
   }
 
-  static bool match(const FrameGroup& aFg, const void* const& aPc)
+  static bool match(const FrameRecord& aFr, const void* const& aPc)
   {
-    return aFg.mPc == aPc;
+    return aFr.mPc == aPc;
   }
 };
 
-const char* const FrameGroup::kName = "frame";
-
-typedef js::HashSet<FrameGroup, FrameGroup, InfallibleAllocPolicy>
-        FrameGroupTable;
+const char* const FrameRecord::kRecordKind = "frame";
+
+typedef js::HashSet<FrameRecord, FrameRecord, InfallibleAllocPolicy>
+        FrameRecordTable;
 
 void
-FrameGroup::Print(const Writer& aWriter, LocationService* aLocService,
-                  uint32_t aM, uint32_t aN, const char* aStr, const char* astr,
-                  size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
-                  size_t aTotalUsableSize) const
+FrameRecord::Print(const Writer& aWriter, LocationService* aLocService,
+                   uint32_t aM, uint32_t aN, const char* aStr, const char* astr,
+                   size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
+                   size_t aTotalUsableSize) const
 {
   (void)aCumulativeUsableSize;
 
-  bool showTilde = mGroupSize.IsSampled();
-
-  W("%s: %s block%s and %s block group%s in frame group %s of %s\n",
+  bool showTilde = mRecordSize.IsSampled();
+
+  W("%s: %s block%s from %s stack trace record%s in stack frame record %s of %s\n",
     aStr,
     Show(mNumBlocks, gBuf1, kBufLen, showTilde), Plural(mNumBlocks),
-    Show(mNumBlockGroups, gBuf2, kBufLen, showTilde), Plural(mNumBlockGroups),
+    Show(mNumTraceRecords, gBuf2, kBufLen, showTilde), Plural(mNumTraceRecords),
     Show(aM, gBuf3, kBufLen),
     Show(aN, gBuf4, kBufLen));
 
   W(" %s bytes (%s requested / %s slop)\n",
-    Show(mGroupSize.Usable(), gBuf1, kBufLen, showTilde),
-    Show(mGroupSize.Req(),    gBuf2, kBufLen, showTilde),
-    Show(mGroupSize.Slop(),   gBuf3, kBufLen, showTilde));
+    Show(mRecordSize.Usable(), gBuf1, kBufLen, showTilde),
+    Show(mRecordSize.Req(),    gBuf2, kBufLen, showTilde),
+    Show(mRecordSize.Slop(),   gBuf3, kBufLen, showTilde));
 
   W(" %4.2f%% of the heap;  %4.2f%% of %s\n",
-    Percent(mGroupSize.Usable(), aTotalUsableSize),
-    Percent(mGroupSize.Usable(), aCategoryUsableSize),
+    Percent(mRecordSize.Usable(), aTotalUsableSize),
+    Percent(mRecordSize.Usable(), aCategoryUsableSize),
     astr);
 
   W(" PC is\n");
   aLocService->WriteLocation(aWriter, mPc);
   W("\n");
 }
 
 //---------------------------------------------------------------------------
@@ -1746,117 +1745,118 @@ ReportOnAlloc(const void* aPtr)
 {
   ReportHelper(aPtr, /* onAlloc */ true);
 }
 
 //---------------------------------------------------------------------------
 // DMD output
 //---------------------------------------------------------------------------
 
-// This works for BlockGroups and FrameGroups.
-template <class TGroup>
+// This works for both TraceRecords and StackFrameRecords.
+template <class Record>
 static void
-PrintSortedGroups(const Writer& aWriter, LocationService* aLocService,
-                  const char* aStr, const char* astr,
-                  const js::HashSet<TGroup, TGroup, InfallibleAllocPolicy>& aTGroupTable,
-                  size_t aCategoryUsableSize, size_t aTotalUsableSize)
+PrintSortedRecords(const Writer& aWriter, LocationService* aLocService,
+                   const char* aStr, const char* astr,
+                   const js::HashSet<Record, Record, InfallibleAllocPolicy>&
+                         aRecordTable,
+                   size_t aCategoryUsableSize, size_t aTotalUsableSize)
 {
-  const char* name = TGroup::kName;
-  StatusMsg("  creating and sorting %s %s group array...\n", astr, name);
+  const char* kind = Record::kRecordKind;
+  StatusMsg("  creating and sorting %s stack %s record array...\n", astr, kind);
 
   // Convert the table into a sorted array.
-  js::Vector<const TGroup*, 0, InfallibleAllocPolicy> tgArray;
-  tgArray.reserve(aTGroupTable.count());
-  typedef js::HashSet<TGroup, TGroup, InfallibleAllocPolicy> TGroupTable;
-  for (typename TGroupTable::Range r = aTGroupTable.all();
+  js::Vector<const Record*, 0, InfallibleAllocPolicy> recordArray;
+  recordArray.reserve(aRecordTable.count());
+  typedef js::HashSet<Record, Record, InfallibleAllocPolicy> RecordTable;
+  for (typename RecordTable::Range r = aRecordTable.all();
        !r.empty();
        r.popFront()) {
-    tgArray.infallibleAppend(&r.front());
+    recordArray.infallibleAppend(&r.front());
   }
-  qsort(tgArray.begin(), tgArray.length(), sizeof(tgArray[0]),
-        TGroup::QsortCmp);
-
-  WriteTitle("%s %ss\n", aStr, name);
-
-  if (tgArray.length() == 0) {
+  qsort(recordArray.begin(), recordArray.length(), sizeof(recordArray[0]),
+        Record::QsortCmp);
+
+  WriteTitle("%s stack %s records\n", aStr, kind);
+
+  if (recordArray.length() == 0) {
     W("(none)\n\n");
     return;
   }
 
-  // Limit the number of block groups printed, because fix-linux-stack.pl is
-  // too damn slow.  Note that we don't break out of this loop because we need
-  // to keep adding to |cumulativeUsableSize|.
-  static const uint32_t MaxTGroups = 1000;
-  uint32_t numTGroups = tgArray.length();
-
-  StatusMsg("  printing %s %s group array...\n", astr, name);
+  // Limit the number of records printed, because fix-linux-stack.pl is too
+  // damn slow.  Note that we don't break out of this loop because we need to
+  // keep adding to |cumulativeUsableSize|.
+  static const uint32_t MaxRecords = 1000;
+  uint32_t numRecords = recordArray.length();
+
+  StatusMsg("  printing %s stack %s record array...\n", astr, kind);
   size_t cumulativeUsableSize = 0;
-  for (uint32_t i = 0; i < numTGroups; i++) {
-    const TGroup* tg = tgArray[i];
-    cumulativeUsableSize += tg->GetGroupSize().Usable();
-    if (i < MaxTGroups) {
-      tg->Print(aWriter, aLocService, i+1, numTGroups, aStr, astr,
-                aCategoryUsableSize, cumulativeUsableSize, aTotalUsableSize);
-    } else if (i == MaxTGroups) {
-      W("%s: stopping after %s %s groups\n\n", aStr,
-        Show(MaxTGroups, gBuf1, kBufLen), name);
+  for (uint32_t i = 0; i < numRecords; i++) {
+    const Record* r = recordArray[i];
+    cumulativeUsableSize += r->GetRecordSize().Usable();
+    if (i < MaxRecords) {
+      r->Print(aWriter, aLocService, i+1, numRecords, aStr, astr,
+               aCategoryUsableSize, cumulativeUsableSize, aTotalUsableSize);
+    } else if (i == MaxRecords) {
+      W("%s: stopping after %s stack %s records\n\n", aStr,
+        Show(MaxRecords, gBuf1, kBufLen), kind);
     }
   }
 
   MOZ_ASSERT(aCategoryUsableSize == kNoSize ||
              aCategoryUsableSize == cumulativeUsableSize);
 }
 
 static void
-PrintSortedBlockAndFrameGroups(const Writer& aWriter,
-                               LocationService* aLocService,
-                               const char* aStr, const char* astr,
-                               const BlockGroupTable& aBlockGroupTable,
-                               size_t aCategoryUsableSize,
-                               size_t aTotalUsableSize)
+PrintSortedTraceAndFrameRecords(const Writer& aWriter,
+                                LocationService* aLocService,
+                                const char* aStr, const char* astr,
+                                const TraceRecordTable& aTraceRecordTable,
+                                size_t aCategoryUsableSize,
+                                size_t aTotalUsableSize)
 {
-  PrintSortedGroups(aWriter, aLocService, aStr, astr, aBlockGroupTable,
-                    aCategoryUsableSize, aTotalUsableSize);
-
-  // Frame groups are totally dependent on vagaries of stack traces, so we
+  PrintSortedRecords(aWriter, aLocService, aStr, astr, aTraceRecordTable,
+                     aCategoryUsableSize, aTotalUsableSize);
+
+  // Frame records are totally dependent on vagaries of stack traces, so we
   // can't show them in test mode.
   if (gMode == Test) {
     return;
   }
 
-  FrameGroupTable frameGroupTable;
-  (void)frameGroupTable.init(2048);
-  for (BlockGroupTable::Range r = aBlockGroupTable.all();
+  FrameRecordTable frameRecordTable;
+  (void)frameRecordTable.init(2048);
+  for (TraceRecordTable::Range r = aTraceRecordTable.all();
        !r.empty();
        r.popFront()) {
-    const BlockGroup& bg = r.front();
-    const StackTrace* st = bg.mAllocStackTrace;
+    const TraceRecord& tr = r.front();
+    const StackTrace* st = tr.mAllocStackTrace;
 
     // A single PC can appear multiple times in a stack trace.  We ignore
     // duplicates by first sorting and then ignoring adjacent duplicates.
     StackTrace sorted(*st);
     sorted.Sort();              // sorts the copy, not the original
     void* prevPc = (void*)intptr_t(-1);
     for (uint32_t i = 0; i < sorted.Length(); i++) {
       void* pc = sorted.Pc(i);
       if (pc == prevPc) {
         continue;               // ignore duplicate
       }
       prevPc = pc;
 
-      FrameGroupTable::AddPtr p = frameGroupTable.lookupForAdd(pc);
+      FrameRecordTable::AddPtr p = frameRecordTable.lookupForAdd(pc);
       if (!p) {
-        FrameGroup fg(pc);
-        (void)frameGroupTable.add(p, fg);
+        FrameRecord fr(pc);
+        (void)frameRecordTable.add(p, fr);
       }
-      p->Add(bg);
+      p->Add(tr);
     }
   }
-  PrintSortedGroups(aWriter, aLocService, aStr, astr, frameGroupTable, kNoSize,
-                    aTotalUsableSize);
+  PrintSortedRecords(aWriter, aLocService, aStr, astr, frameRecordTable,
+                     kNoSize, aTotalUsableSize);
 }
 
 // Note that, unlike most SizeOf* functions, this function does not take a
 // |nsMallocSizeOfFun| argument.  That's because those arguments are primarily
 // to aid DMD track heap blocks... but DMD deliberately doesn't track heap
 // blocks it allocated for itself!
 MOZ_EXPORT void
 SizeOf(Sizes* aSizes)
@@ -1901,59 +1901,59 @@ Dump(Writer aWriter)
   }
 
   AutoBlockIntercepts block(Thread::Fetch());
   AutoLockState lock;
 
   static int dumpCount = 1;
   StatusMsg("Dump %d {\n", dumpCount++);
 
-  StatusMsg("  gathering block groups...\n");
-
-  BlockGroupTable unreportedBlockGroupTable;
-  (void)unreportedBlockGroupTable.init(1024);
+  StatusMsg("  gathering stack trace records...\n");
+
+  TraceRecordTable unreportedTraceRecordTable;
+  (void)unreportedTraceRecordTable.init(1024);
   size_t unreportedUsableSize = 0;
   size_t unreportedNumBlocks = 0;
 
-  BlockGroupTable onceReportedBlockGroupTable;
-  (void)onceReportedBlockGroupTable.init(1024);
+  TraceRecordTable onceReportedTraceRecordTable;
+  (void)onceReportedTraceRecordTable.init(1024);
   size_t onceReportedUsableSize = 0;
   size_t onceReportedNumBlocks = 0;
 
-  BlockGroupTable twiceReportedBlockGroupTable;
-  (void)twiceReportedBlockGroupTable.init(0);
+  TraceRecordTable twiceReportedTraceRecordTable;
+  (void)twiceReportedTraceRecordTable.init(0);
   size_t twiceReportedUsableSize = 0;
   size_t twiceReportedNumBlocks = 0;
 
   bool anyBlocksSampled = false;
 
   for (BlockTable::Range r = gBlockTable->all(); !r.empty(); r.popFront()) {
     const Block& b = r.front();
 
-    BlockGroupTable* table;
+    TraceRecordTable* table;
     uint32_t numReports = b.NumReports();
     if (numReports == 0) {
       unreportedUsableSize += b.UsableSize();
       unreportedNumBlocks++;
-      table = &unreportedBlockGroupTable;
+      table = &unreportedTraceRecordTable;
     } else if (numReports == 1) {
       onceReportedUsableSize += b.UsableSize();
       onceReportedNumBlocks++;
-      table = &onceReportedBlockGroupTable;
+      table = &onceReportedTraceRecordTable;
     } else {
       MOZ_ASSERT(numReports == 2);
       twiceReportedUsableSize += b.UsableSize();
       twiceReportedNumBlocks++;
-      table = &twiceReportedBlockGroupTable;
+      table = &twiceReportedTraceRecordTable;
     }
-    BlockGroupKey key(b);
-    BlockGroupTable::AddPtr p = table->lookupForAdd(key);
+    TraceRecordKey key(b);
+    TraceRecordTable::AddPtr p = table->lookupForAdd(key);
     if (!p) {
-      BlockGroup bg(b);
-      (void)table->add(p, bg);
+      TraceRecord tr(b);
+      (void)table->add(p, tr);
     }
     p->Add(b);
 
     anyBlocksSampled = anyBlocksSampled || b.IsSampled();
   }
   size_t totalUsableSize =
     unreportedUsableSize + onceReportedUsableSize + twiceReportedUsableSize;
   size_t totalNumBlocks =
@@ -1961,28 +1961,28 @@ Dump(Writer aWriter)
 
   WriteTitle("Invocation\n");
   W("$DMD = '%s'\n", gDMDEnvVar);
   W("Sample-below size = %lld\n\n", (long long)(gSampleBelowSize));
 
   // Allocate this on the heap instead of the stack because it's fairly large.
   LocationService* locService = InfallibleAllocPolicy::new_<LocationService>();
 
-  PrintSortedGroups(aWriter, locService, "Twice-reported", "twice-reported",
-                    twiceReportedBlockGroupTable, twiceReportedUsableSize,
-                    totalUsableSize);
-
-  PrintSortedBlockAndFrameGroups(aWriter, locService,
-                                 "Unreported", "unreported",
-                                 unreportedBlockGroupTable,
-                                 unreportedUsableSize, totalUsableSize);
-
-  PrintSortedBlockAndFrameGroups(aWriter, locService,
+  PrintSortedRecords(aWriter, locService, "Twice-reported", "twice-reported",
+                     twiceReportedTraceRecordTable, twiceReportedUsableSize,
+                     totalUsableSize);
+
+  PrintSortedTraceAndFrameRecords(aWriter, locService,
+                                  "Unreported", "unreported",
+                                  unreportedTraceRecordTable,
+                                  unreportedUsableSize, totalUsableSize);
+
+  PrintSortedTraceAndFrameRecords(aWriter, locService,
                                  "Once-reported", "once-reported",
-                                 onceReportedBlockGroupTable,
+                                 onceReportedTraceRecordTable,
                                  onceReportedUsableSize, totalUsableSize);
 
   bool showTilde = anyBlocksSampled;
   WriteTitle("Summary\n");
 
   W("Total:          %12s bytes (%6.2f%%) in %7s blocks (%6.2f%%)\n",
     Show(totalUsableSize, gBuf1, kBufLen, showTilde),
     100.0,
@@ -2029,35 +2029,35 @@ Dump(Writer aWriter)
     W("  Block table:          %10s bytes (%s entries, %s used)\n",
       Show(sizes.mBlockTable,       gBuf1, kBufLen),
       Show(gBlockTable->capacity(), gBuf2, kBufLen),
       Show(gBlockTable->count(),    gBuf3, kBufLen));
 
     W("\nData structures that are destroyed after Dump() ends:\n");
 
     size_t unreportedSize =
-      unreportedBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
+      unreportedTraceRecordTable.sizeOfIncludingThis(MallocSizeOf);
     W("  Unreported table:     %10s bytes (%s entries, %s used)\n",
-      Show(unreportedSize,                       gBuf1, kBufLen),
-      Show(unreportedBlockGroupTable.capacity(), gBuf2, kBufLen),
-      Show(unreportedBlockGroupTable.count(),    gBuf3, kBufLen));
+      Show(unreportedSize,                        gBuf1, kBufLen),
+      Show(unreportedTraceRecordTable.capacity(), gBuf2, kBufLen),
+      Show(unreportedTraceRecordTable.count(),    gBuf3, kBufLen));
 
     size_t onceReportedSize =
-      onceReportedBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
+      onceReportedTraceRecordTable.sizeOfIncludingThis(MallocSizeOf);
     W("  Once-reported table:  %10s bytes (%s entries, %s used)\n",
-      Show(onceReportedSize,                       gBuf1, kBufLen),
-      Show(onceReportedBlockGroupTable.capacity(), gBuf2, kBufLen),
-      Show(onceReportedBlockGroupTable.count(),    gBuf3, kBufLen));
+      Show(onceReportedSize,                        gBuf1, kBufLen),
+      Show(onceReportedTraceRecordTable.capacity(), gBuf2, kBufLen),
+      Show(onceReportedTraceRecordTable.count(),    gBuf3, kBufLen));
 
     size_t twiceReportedSize =
-      twiceReportedBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
+      twiceReportedTraceRecordTable.sizeOfIncludingThis(MallocSizeOf);
     W("  Twice-reported table: %10s bytes (%s entries, %s used)\n",
-      Show(twiceReportedSize,                       gBuf1, kBufLen),
-      Show(twiceReportedBlockGroupTable.capacity(), gBuf2, kBufLen),
-      Show(twiceReportedBlockGroupTable.count(),    gBuf3, kBufLen));
+      Show(twiceReportedSize,                        gBuf1, kBufLen),
+      Show(twiceReportedTraceRecordTable.capacity(), gBuf2, kBufLen),
+      Show(twiceReportedTraceRecordTable.count(),    gBuf3, kBufLen));
 
     W("  Location service:     %10s bytes\n",
       Show(locService->SizeOfIncludingThis(), gBuf1, kBufLen));
 
     W("\nCounts:\n");
 
     size_t hits   = locService->NumCacheHits();
     size_t misses = locService->NumCacheMisses();
@@ -2257,22 +2257,22 @@ RunTestMode(FILE* fp)
 
   // Reset the counter just in case |sample-size| was specified in $DMD.
   // Otherwise the assertions fail.
   gSmallBlockActualSizeCounter = 0;
   gSampleBelowSize = 128;
 
   char* s;
 
-  // This equals the sample size, and so is recorded exactly.  It should be
-  // listed before groups of the same size that are sampled.
+  // This equals the sample size, and so is reported exactly.  It should be
+  // listed before records of the same size that are sampled.
   s = (char*) malloc(128);
   UseItOrLoseIt(s);
 
-  // This exceeds the sample size, and so is recorded exactly.
+  // This exceeds the sample size, and so is reported exactly.
   s = (char*) malloc(144);
   UseItOrLoseIt(s);
 
   // These together constitute exactly one sample.
   for (int i = 0; i < 16; i++) {
     s = (char*) malloc(8);
     UseItOrLoseIt(s);
   }
@@ -2297,19 +2297,19 @@ RunTestMode(FILE* fp)
 
   // This gets to another full sample.
   for (int i = 0; i < 5; i++) {
     s = (char*) malloc(8);
     UseItOrLoseIt(s);
   }
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 0);
 
-  // This allocates 16, 32, ..., 128 bytes, which results a block group that
-  // contains a mix of sample and non-sampled blocks, and so should be printed
-  // with '~' signs.
+  // This allocates 16, 32, ..., 128 bytes, which results in a stack trace
+  // record that contains a mix of sample and non-sampled blocks, and so should
+  // be printed with '~' signs.
   for (int i = 1; i <= 8; i++) {
     s = (char*) malloc(i * 16);
     UseItOrLoseIt(s);
   }
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 64);
 
   // At the end we're 64 bytes into the current sample so we report ~1,424
   // bytes of allocation overall, which is 64 less than the real value 1,488.
--- a/memory/replace/dmd/test-expected.dmd
+++ b/memory/replace/dmd/test-expected.dmd
@@ -1,29 +1,29 @@
 ------------------------------------------------------------------
 Invocation
 ------------------------------------------------------------------
 
 $DMD = '--mode=test'
 Sample-below size = 1
 
 ------------------------------------------------------------------
-Twice-reported blocks
+Twice-reported stack trace records
 ------------------------------------------------------------------
 
 (none)
 
 ------------------------------------------------------------------
-Unreported blocks
+Unreported stack trace records
 ------------------------------------------------------------------
 
 (none)
 
 ------------------------------------------------------------------
-Once-reported blocks
+Once-reported stack trace records
 ------------------------------------------------------------------
 
 (none)
 
 ------------------------------------------------------------------
 Summary
 ------------------------------------------------------------------
 
@@ -35,190 +35,190 @@ Twice-reported:            0 bytes (  0.
 ------------------------------------------------------------------
 Invocation
 ------------------------------------------------------------------
 
 $DMD = '--mode=test'
 Sample-below size = 1
 
 ------------------------------------------------------------------
-Twice-reported blocks
+Twice-reported stack trace records
 ------------------------------------------------------------------
 
-Twice-reported: 1 block in block group 1 of 4
+Twice-reported: 1 block in stack trace record 1 of 4
  80 bytes (79 requested / 1 slop)
  0.53% of the heap (0.53% cumulative);  29.41% of twice-reported (29.41% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
  Reported again at
    (stack omitted due to test mode)
 
-Twice-reported: 1 block in block group 2 of 4
+Twice-reported: 1 block in stack trace record 2 of 4
  80 bytes (78 requested / 2 slop)
  0.53% of the heap (1.05% cumulative);  29.41% of twice-reported (58.82% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
  Reported again at
    (stack omitted due to test mode)
 
-Twice-reported: 1 block in block group 3 of 4
+Twice-reported: 1 block in stack trace record 3 of 4
  80 bytes (77 requested / 3 slop)
  0.53% of the heap (1.58% cumulative);  29.41% of twice-reported (88.24% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
  Reported again at
    (stack omitted due to test mode)
 
-Twice-reported: 1 block in block group 4 of 4
+Twice-reported: 1 block in stack trace record 4 of 4
  32 bytes (30 requested / 2 slop)
  0.21% of the heap (1.79% cumulative);  11.76% of twice-reported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
  Reported again at
    (stack omitted due to test mode)
 
 ------------------------------------------------------------------
-Unreported blocks
+Unreported stack trace records
 ------------------------------------------------------------------
 
-Unreported: 1 block in block group 1 of 4
+Unreported: 1 block in stack trace record 1 of 4
  4,096 bytes (1 requested / 4,095 slop)
  27.00% of the heap (27.00% cumulative);  76.88% of unreported (76.88% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 9 blocks in block group 2 of 4
+Unreported: 9 blocks in stack trace record 2 of 4
  1,008 bytes (900 requested / 108 slop)
  6.65% of the heap (33.65% cumulative);  18.92% of unreported (95.80% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 2 blocks in block group 3 of 4
+Unreported: 2 blocks in stack trace record 3 of 4
  112 bytes (112 requested / 0 slop)
  0.74% of the heap (34.39% cumulative);  2.10% of unreported (97.90% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 2 blocks in block group 4 of 4
+Unreported: 2 blocks in stack trace record 4 of 4
  112 bytes (112 requested / 0 slop)
  0.74% of the heap (35.13% cumulative);  2.10% of unreported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
 ------------------------------------------------------------------
-Once-reported blocks
+Once-reported stack trace records
 ------------------------------------------------------------------
 
-Once-reported: 1 block in block group 1 of 11
+Once-reported: 1 block in stack trace record 1 of 11
  8,192 bytes (4,097 requested / 4,095 slop)
  54.01% of the heap (54.01% cumulative);  85.62% of once-reported (85.62% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 2 of 11
+Once-reported: 1 block in stack trace record 2 of 11
  512 bytes (512 requested / 0 slop)
  3.38% of the heap (57.38% cumulative);  5.35% of once-reported (90.97% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 2 blocks in block group 3 of 11
+Once-reported: 2 blocks in stack trace record 3 of 11
  240 bytes (240 requested / 0 slop)
  1.58% of the heap (58.97% cumulative);  2.51% of once-reported (93.48% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 2 blocks in block group 4 of 11
+Once-reported: 2 blocks in stack trace record 4 of 11
  240 bytes (240 requested / 0 slop)
  1.58% of the heap (60.55% cumulative);  2.51% of once-reported (95.99% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 5 of 11
+Once-reported: 1 block in stack trace record 5 of 11
  96 bytes (96 requested / 0 slop)
  0.63% of the heap (61.18% cumulative);  1.00% of once-reported (96.99% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 6 of 11
+Once-reported: 1 block in stack trace record 6 of 11
  96 bytes (96 requested / 0 slop)
  0.63% of the heap (61.81% cumulative);  1.00% of once-reported (97.99% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 7 of 11
+Once-reported: 1 block in stack trace record 7 of 11
  80 bytes (80 requested / 0 slop)
  0.53% of the heap (62.34% cumulative);  0.84% of once-reported (98.83% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 8 of 11
+Once-reported: 1 block in stack trace record 8 of 11
  80 bytes (80 requested / 0 slop)
  0.53% of the heap (62.87% cumulative);  0.84% of once-reported (99.67% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 9 of 11
+Once-reported: 1 block in stack trace record 9 of 11
  16 bytes (10 requested / 6 slop)
  0.11% of the heap (62.97% cumulative);  0.17% of once-reported (99.83% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 10 of 11
+Once-reported: 1 block in stack trace record 10 of 11
  8 bytes (0 requested / 8 slop)
  0.05% of the heap (63.03% cumulative);  0.08% of once-reported (99.92% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 11 of 11
+Once-reported: 1 block in stack trace record 11 of 11
  8 bytes (0 requested / 8 slop)
  0.05% of the heap (63.08% cumulative);  0.08% of once-reported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
@@ -234,97 +234,97 @@ Twice-reported:          272 bytes (  1.
 ------------------------------------------------------------------
 Invocation
 ------------------------------------------------------------------
 
 $DMD = '--mode=test'
 Sample-below size = 1
 
 ------------------------------------------------------------------
-Twice-reported blocks
+Twice-reported stack trace records
 ------------------------------------------------------------------
 
-Twice-reported: 1 block in block group 1 of 2
+Twice-reported: 1 block in stack trace record 1 of 2
  80 bytes (77 requested / 3 slop)
  2.82% of the heap (2.82% cumulative);  90.91% of twice-reported (90.91% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
  Reported again at
    (stack omitted due to test mode)
 
-Twice-reported: 1 block in block group 2 of 2
+Twice-reported: 1 block in stack trace record 2 of 2
  8 bytes (0 requested / 8 slop)
  0.28% of the heap (3.10% cumulative);  9.09% of twice-reported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
  Reported again at
    (stack omitted due to test mode)
 
 ------------------------------------------------------------------
-Unreported blocks
+Unreported stack trace records
 ------------------------------------------------------------------
 
-Unreported: 9 blocks in block group 1 of 3
+Unreported: 9 blocks in stack trace record 1 of 3
  1,008 bytes (900 requested / 108 slop)
  35.49% of the heap (35.49% cumulative);  48.84% of unreported (48.84% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 6 blocks in block group 2 of 3
+Unreported: 6 blocks in stack trace record 2 of 3
  528 bytes (528 requested / 0 slop)
  18.59% of the heap (54.08% cumulative);  25.58% of unreported (74.42% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 6 blocks in block group 3 of 3
+Unreported: 6 blocks in stack trace record 3 of 3
  528 bytes (528 requested / 0 slop)
  18.59% of the heap (72.68% cumulative);  25.58% of unreported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
 ------------------------------------------------------------------
-Once-reported blocks
+Once-reported stack trace records
 ------------------------------------------------------------------
 
-Once-reported: 1 block in block group 1 of 4
+Once-reported: 1 block in stack trace record 1 of 4
  512 bytes (512 requested / 0 slop)
  18.03% of the heap (18.03% cumulative);  74.42% of once-reported (74.42% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 2 of 4
+Once-reported: 1 block in stack trace record 2 of 4
  80 bytes (79 requested / 1 slop)
  2.82% of the heap (20.85% cumulative);  11.63% of once-reported (86.05% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 3 of 4
+Once-reported: 1 block in stack trace record 3 of 4
  80 bytes (78 requested / 2 slop)
  2.82% of the heap (23.66% cumulative);  11.63% of once-reported (97.67% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
-Once-reported: 1 block in block group 4 of 4
+Once-reported: 1 block in stack trace record 4 of 4
  16 bytes (10 requested / 6 slop)
  0.56% of the heap (24.23% cumulative);  2.33% of once-reported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
  Reported at
    (stack omitted due to test mode)
 
@@ -340,69 +340,69 @@ Twice-reported:           88 bytes (  3.
 ------------------------------------------------------------------
 Invocation
 ------------------------------------------------------------------
 
 $DMD = '--mode=test'
 Sample-below size = 128
 
 ------------------------------------------------------------------
-Twice-reported blocks
+Twice-reported stack trace records
 ------------------------------------------------------------------
 
 (none)
 
 ------------------------------------------------------------------
-Unreported blocks
+Unreported stack trace records
 ------------------------------------------------------------------
 
-Unreported: ~4 blocks in block group 1 of 7
+Unreported: ~4 blocks in stack trace record 1 of 7
  ~512 bytes (~512 requested / ~0 slop)
  35.96% of the heap (35.96% cumulative);  35.96% of unreported (35.96% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 1 block in block group 2 of 7
+Unreported: 1 block in stack trace record 2 of 7
  256 bytes (256 requested / 0 slop)
  17.98% of the heap (53.93% cumulative);  17.98% of unreported (53.93% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 1 block in block group 3 of 7
+Unreported: 1 block in stack trace record 3 of 7
  144 bytes (144 requested / 0 slop)
  10.11% of the heap (64.04% cumulative);  10.11% of unreported (64.04% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: 1 block in block group 4 of 7
+Unreported: 1 block in stack trace record 4 of 7
  128 bytes (128 requested / 0 slop)
  8.99% of the heap (73.03% cumulative);  8.99% of unreported (73.03% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: ~1 block in block group 5 of 7
+Unreported: ~1 block in stack trace record 5 of 7
  ~128 bytes (~128 requested / ~0 slop)
  8.99% of the heap (82.02% cumulative);  8.99% of unreported (82.02% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: ~1 block in block group 6 of 7
+Unreported: ~1 block in stack trace record 6 of 7
  ~128 bytes (~128 requested / ~0 slop)
  8.99% of the heap (91.01% cumulative);  8.99% of unreported (91.01% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
-Unreported: ~1 block in block group 7 of 7
+Unreported: ~1 block in stack trace record 7 of 7
  ~128 bytes (~128 requested / ~0 slop)
  8.99% of the heap (100.00% cumulative);  8.99% of unreported (100.00% cumulative)
  Allocated at
    (stack omitted due to test mode)
 
 ------------------------------------------------------------------
-Once-reported blocks
+Once-reported stack trace records
 ------------------------------------------------------------------
 
 (none)
 
 ------------------------------------------------------------------
 Summary
 ------------------------------------------------------------------