Bug 820652 (part 1) - DMD: Split BlockKey in two, and fix the ensuing fall-out. r=jlebar.
authorNicholas Nethercote <nnethercote@mozilla.com>
Tue, 11 Dec 2012 19:08:38 -0800
changeset 115984 f5fbf4fc5df282891115bee516bfdb1f373fed03
parent 115983 6464b79a1cf9a03d3bae3ed31c39f803c574fdbc
child 115985 a77821dd8903272559f491197d5acd1f0c2fb26e
push id19664
push usernnethercote@mozilla.com
push dateThu, 13 Dec 2012 23:31:59 +0000
treeherdermozilla-inbound@8c77de31ddc4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjlebar
bugs820652
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 820652 (part 1) - DMD: Split BlockKey in two, and fix the ensuing fall-out. r=jlebar.
memory/replace/dmd/DMD.cpp
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -637,185 +637,143 @@ StackTrace::Get(Thread* aT)
 }
 
 //---------------------------------------------------------------------------
 // Heap blocks
 //---------------------------------------------------------------------------
 
 static const char* gUnreportedName = "unreported";
 
-// This is used by both |Block|s and |BlockGroups|.
-class BlockKey
+// This is used by both LiveBlocks and LiveBlockGroups.
+class LiveBlockKey
 {
-protected:
-  enum Kind {
-    Live,               // for all live blocks, reported or not
-    DoubleReport        // for blocks that have been double-reported
-  };
-
-  const Kind mKind;
-
 public:
   const StackTrace* const mAllocStackTrace;     // never null
 
 protected:
-  union
-  {
-    // Blocks can be reported in two ways.
-    // - The most common is via a memory reporter traversal -- the block is
-    //   reported when the reporter runs, causing DMD to mark it as reported,
-    //   and DMD must clear the marking once it has finished its analysis.
-    // - Less common are ones that are reported immediately on allocation.  DMD
-    //   must *not* clear the markings of these blocks once it has finished its
-    //   analysis.  The |mReportedOnAlloc| field is set for such blocks.
-    struct
-    {
-      const StackTrace* mReportStackTrace;  // nullptr if unreported
-      const char*       mReporterName;      // gUnreportedName if unreported
-      bool              mReportedOnAlloc;   // true if block was reported
-    } mLive;                                //   immediately on allocation
-
-    struct
-    {
-      // When double-reports occur we record (and later print) the stack trace
-      // and reporter name of *both* the reporting locations.
-      // Nb: These are really |* const|, but that confuses some compilers.
-      const StackTrace* mReportStackTrace1;   // const, never null
-      const StackTrace* mReportStackTrace2;   // const, never null
-      const char*       mReporterName1;       // const, never gUnreportedName
-      const char*       mReporterName2;       // const, never gUnreportedName
-    } mDoubleReport;
-  };
-
-  // Use these safer accessors where possible instead of raw union accesses.
-
-  #define GETTER(kind, type, name) \
-    type name() const { \
-      MOZ_ASSERT(mKind == kind); \
-      return m##kind.m##name; \
-    }
-  #define GETTER_AND_SETTER(kind, type, name) \
-    GETTER(kind, type, name) \
-    void Set##name(type a##name) { \
-      MOZ_ASSERT(mKind == kind); \
-      m##kind.m##name = a##name; \
-    }
-
-  GETTER_AND_SETTER(Live, const StackTrace*, ReportStackTrace)
-  GETTER_AND_SETTER(Live, const char*,       ReporterName)
-  GETTER_AND_SETTER(Live, bool,              ReportedOnAlloc)
-
-  GETTER(DoubleReport, const StackTrace*, ReportStackTrace1)
-  GETTER(DoubleReport, const StackTrace*, ReportStackTrace2)
-  GETTER(DoubleReport, const char*,       ReporterName1)
-  GETTER(DoubleReport, const char*,       ReporterName2)
-
-  #undef GETTER
-  #undef SETTER
+  // Live blocks can be reported in two ways.
+  // - The most common is via a memory reporter traversal -- the block is
+  //   reported when the reporter runs, causing DMD to mark it as reported,
+  //   and DMD must clear the marking once it has finished its analysis.
+  // - Less common are ones that are reported immediately on allocation.  DMD
+  //   must *not* clear the markings of these blocks once it has finished its
+  //   analysis.  The |mReportedOnAlloc| field is set for such blocks.
+  const StackTrace* mReportStackTrace;  // nullptr if unreported
+  const char*       mReporterName;      // gUnreportedName if unreported
+  bool              mReportedOnAlloc;   // true if block was reported
 
 public:
-  // This constructor is used for |Live| Blocks.
-  BlockKey(const StackTrace* aAllocStackTrace)
-    : mKind(Live),
-      mAllocStackTrace(aAllocStackTrace)
+  LiveBlockKey(const StackTrace* aAllocStackTrace)
+    : mAllocStackTrace(aAllocStackTrace),
+      mReportStackTrace(nullptr),
+      mReporterName(gUnreportedName),
+      mReportedOnAlloc(false)
   {
-    mLive.mReportStackTrace = nullptr;
-    mLive.mReporterName = gUnreportedName;
-    mLive.mReportedOnAlloc = false;
-    MOZ_ASSERT(IsSaneLiveBlock());
+    MOZ_ASSERT(IsSane());
   }
 
-  // This constructor is used for |DoubleReport| Blocks.
-  BlockKey(const StackTrace* aAllocStackTrace,
-           const StackTrace* aReportStackTrace1,
-           const StackTrace* aReportStackTrace2,
-           const char* aReporterName1, const char* aReporterName2)
-    : mKind(DoubleReport),
-      mAllocStackTrace(aAllocStackTrace)
-  {
-    mDoubleReport.mReportStackTrace1 = aReportStackTrace1;
-    mDoubleReport.mReportStackTrace2 = aReportStackTrace2;
-    mDoubleReport.mReporterName1 = aReporterName1;
-    mDoubleReport.mReporterName2 = aReporterName2;
-    MOZ_ASSERT(IsSaneDoubleReportBlock());
-  }
-
-  bool IsSaneLiveBlock() const
+  bool IsSane() const
   {
-    bool hasReporterName = ReporterName() != gUnreportedName;
-    return mKind == Live &&
-           mAllocStackTrace &&
-           (( ReportStackTrace() &&  hasReporterName) ||
-            (!ReportStackTrace() && !hasReporterName && !ReportedOnAlloc()));
+    bool hasReporterName = mReporterName != gUnreportedName;
+    return mAllocStackTrace &&
+           (( mReportStackTrace &&  hasReporterName) ||
+            (!mReportStackTrace && !hasReporterName && !mReportedOnAlloc));
   }
 
-  bool IsSaneDoubleReportBlock() const
-  {
-    return mKind == DoubleReport &&
-           mAllocStackTrace &&
-           ReportStackTrace1() &&
-           ReportStackTrace2() &&
-           ReporterName1() != gUnreportedName &&
-           ReporterName2() != gUnreportedName;
-  }
-
-  bool IsLive() const { return mKind == Live; }
-
   bool IsReported() const
   {
-    MOZ_ASSERT(IsSaneLiveBlock());  // should only call this on live blocks
-    bool isRep = ReporterName() != gUnreportedName;
+    MOZ_ASSERT(IsSane());
+    bool isRep = mReporterName != gUnreportedName;
     return isRep;
   }
 
-  // Quasi-hash policy (used by BlockGroup's hash policy).
+  // Quasi-hash policy (used by LiveBlockGroup's hash policy).
   //
   // Hash() and Match() both assume that identical reporter names have
   // identical pointers.  In practice this always happens because they are
   // static strings (as specified in the NS_MEMORY_REPORTER_MALLOC_SIZEOF_FUN
   // macro).  This is true even for multi-reporters.  (If it ever became
   // untrue, the worst that would happen is that some blocks that should be in
   // the same block group would end up in separate block groups.)
 
-  static uint32_t Hash(const BlockKey& aKey)
+  static uint32_t Hash(const LiveBlockKey& aKey)
   {
-    if (aKey.mKind == Live) {
-      return mozilla::HashGeneric(aKey.mAllocStackTrace,
-                                  aKey.ReportStackTrace(),
-                                  aKey.ReporterName());
-    }
-
-    if (aKey.mKind == DoubleReport) {
-      return mozilla::HashGeneric(aKey.mAllocStackTrace,
-                                  aKey.ReportStackTrace1(),
-                                  aKey.ReportStackTrace2(),
-                                  aKey.ReporterName1(),
-                                  aKey.ReporterName2());
-    }
-
-    MOZ_CRASH();
+    return mozilla::HashGeneric(aKey.mAllocStackTrace,
+                                aKey.mReportStackTrace,
+                                aKey.mReporterName);
   }
 
-  static bool Match(const BlockKey& aA, const BlockKey& aB)
+  static bool Match(const LiveBlockKey& aA, const LiveBlockKey& aB)
   {
-    if (aA.mKind == Live && aB.mKind == Live) {
-      return aA.mAllocStackTrace   == aB.mAllocStackTrace &&
-             aA.ReportStackTrace() == aB.ReportStackTrace() &&
-             aA.ReporterName()     == aB.ReporterName();
-    }
+    return aA.mAllocStackTrace  == aB.mAllocStackTrace &&
+           aA.mReportStackTrace == aB.mReportStackTrace &&
+           aA.mReporterName     == aB.mReporterName;
+  }
+};
+
+// This is used by DoubleReportBlockGroups.
+class DoubleReportBlockKey
+{
+public:
+  const StackTrace* const mAllocStackTrace;     // never null
+
+protected:
+  // When double-reports occur we record (and later print) the stack trace
+  // and reporter name of *both* the reporting locations.
+  const StackTrace* const mReportStackTrace1;   // never null
+  const StackTrace* const mReportStackTrace2;   // never null
+  const char*       const mReporterName1;       // never gUnreportedName
+  const char*       const mReporterName2;       // never gUnreportedName
 
-    if (aA.mKind == DoubleReport && aB.mKind == DoubleReport) {
-      return aA.mAllocStackTrace    == aB.mAllocStackTrace &&
-             aA.ReportStackTrace1() == aB.ReportStackTrace1() &&
-             aA.ReportStackTrace2() == aB.ReportStackTrace2() &&
-             aA.ReporterName1()     == aB.ReporterName1() &&
-             aA.ReporterName2()     == aB.ReporterName2();
-    }
+public:
+  DoubleReportBlockKey(const StackTrace* aAllocStackTrace,
+                       const StackTrace* aReportStackTrace1,
+                       const StackTrace* aReportStackTrace2,
+                       const char* aReporterName1,
+                       const char* aReporterName2)
+    : mAllocStackTrace(aAllocStackTrace),
+      mReportStackTrace1(aReportStackTrace1),
+      mReportStackTrace2(aReportStackTrace2),
+      mReporterName1(aReporterName1),
+      mReporterName2(aReporterName2)
+  {
+    MOZ_ASSERT(IsSane());
+  }
 
-    MOZ_CRASH();  // Nb: aA.mKind should always equal aB.mKind.
+  bool IsSane() const
+  {
+    return mAllocStackTrace &&
+           mReportStackTrace1 &&
+           mReportStackTrace2 &&
+           mReporterName1 != gUnreportedName &&
+           mReporterName2 != gUnreportedName;
+  }
+
+  // Quasi-hash policy (used by DoubleReportBlockGroup's hash policy).
+  //
+  // Hash() and Match() both assume that identical reporter names have
+  // identical pointers.  See LiveBlockKey for more.
+
+  static uint32_t Hash(const DoubleReportBlockKey& aKey)
+  {
+    return mozilla::HashGeneric(aKey.mAllocStackTrace,
+                                aKey.mReportStackTrace1,
+                                aKey.mReportStackTrace2,
+                                aKey.mReporterName1,
+                                aKey.mReporterName2);
+  }
+
+  static bool Match(const DoubleReportBlockKey& aA,
+                    const DoubleReportBlockKey& aB)
+  {
+    return aA.mAllocStackTrace   == aB.mAllocStackTrace &&
+           aA.mReportStackTrace1 == aB.mReportStackTrace1 &&
+           aA.mReportStackTrace2 == aB.mReportStackTrace2 &&
+           aA.mReporterName1     == aB.mReporterName1 &&
+           aA.mReporterName2     == aB.mReporterName2;
   }
 };
 
 class BlockSize
 {
   static const size_t kSlopBits = sizeof(size_t) * 8 - 1;  // 31 or 63
 
 public:
@@ -854,37 +812,37 @@ public:
     if (!aA.mSampled &&  aB.mSampled) return -1;
     if ( aA.mSampled && !aB.mSampled) return  1;
 
     return 0;
   }
 };
 
 // A live heap block.
-class Block : public BlockKey
+class LiveBlock : public LiveBlockKey
 {
 public:
   const BlockSize mBlockSize;
 
 public:
-  Block(size_t aReqSize, size_t aSlopSize, const StackTrace* aAllocStackTrace,
-        bool aIsExact)
-    : BlockKey(aAllocStackTrace),
+  LiveBlock(size_t aReqSize, size_t aSlopSize,
+            const StackTrace* aAllocStackTrace, bool aIsExact)
+    : LiveBlockKey(aAllocStackTrace),
       mBlockSize(aReqSize, aSlopSize, aIsExact)
   {}
 
   void Report(Thread* aT, const char* aReporterName, bool aReportedOnAlloc);
 
   void UnreportIfNotReportedOnAlloc();
 };
 
 // Nb: js::DefaultHasher<void*> is a high quality hasher.
-typedef js::HashMap<const void*, Block, js::DefaultHasher<const void*>,
-                    InfallibleAllocPolicy> BlockTable;
-static BlockTable* gLiveBlockTable = nullptr;
+typedef js::HashMap<const void*, LiveBlock, js::DefaultHasher<const void*>,
+                    InfallibleAllocPolicy> LiveBlockTable;
+static LiveBlockTable* gLiveBlockTable = nullptr;
 
 //---------------------------------------------------------------------------
 // malloc/free callbacks
 //---------------------------------------------------------------------------
 
 static size_t gSampleBelowSize = 0;
 static size_t gSmallBlockActualSizeCounter = 0;
 
@@ -907,23 +865,23 @@ AllocCallback(void* aPtr, size_t aReqSiz
     // If this allocation is smaller than the sample-below size, increment the
     // cumulative counter.  Then, if that counter now exceeds the sample size,
     // blame this allocation for gSampleBelowSize bytes.  This precludes the
     // measurement of slop.
     gSmallBlockActualSizeCounter += actualSize;
     if (gSmallBlockActualSizeCounter >= gSampleBelowSize) {
       gSmallBlockActualSizeCounter -= gSampleBelowSize;
 
-      Block b(gSampleBelowSize, /* slopSize */ 0, StackTrace::Get(aT),
-              /* sampled */ true);
+      LiveBlock b(gSampleBelowSize, /* slopSize */ 0, StackTrace::Get(aT),
+                  /* sampled */ true);
       (void)gLiveBlockTable->putNew(aPtr, b);
     }
   } else {
     // If this block size is larger than the sample size, record it exactly.
-    Block b(aReqSize, slopSize, StackTrace::Get(aT), /* sampled */ false);
+    LiveBlock b(aReqSize, slopSize, StackTrace::Get(aT), /* sampled */ false);
     (void)gLiveBlockTable->putNew(aPtr, b);
   }
 }
 
 static void
 FreeCallback(void* aPtr, Thread* aT)
 {
   MOZ_ASSERT(gIsDMDRunning);
@@ -1075,131 +1033,204 @@ replace_free(void* aPtr)
   FreeCallback(aPtr, t);
   gMallocTable->free(aPtr);
 }
 
 namespace mozilla {
 namespace dmd {
 
 //---------------------------------------------------------------------------
-// Block groups
+// Live and double-report block groups
 //---------------------------------------------------------------------------
 
-// A group of one or more heap blocks with a common BlockKey.
-class BlockGroup : public BlockKey
+class BlockGroup
 {
-  friend class FrameGroup;      // FrameGroups are created from BlockGroups
-
-  // The (inherited) BlockKey is used as the key in BlockGroupTable, and the
-  // other members constitute the value, so it's ok for them to be |mutable|.
-private:
-  mutable uint32_t  mNumBlocks;     // number of blocks with this BlockKey
+protected:
+  // {Live,DoubleReport}BlockKey serve as the key in
+  // {Live,DoubleReport}BlockGroupTable.  Thes two fields constitute the value,
+  // so it's ok for them to be |mutable|.
+  mutable uint32_t  mNumBlocks;     // number of blocks with this LiveBlockKey
   mutable BlockSize mCombinedSize;  // combined size of those blocks
 
 public:
-  explicit BlockGroup(const BlockKey& aKey)
-    : BlockKey(aKey),
-      mNumBlocks(0),
+  BlockGroup()
+    : mNumBlocks(0),
       mCombinedSize()
   {}
 
   const BlockSize& CombinedSize() const { return mCombinedSize; }
 
   // The |const| qualifier is something of a lie, but is necessary so this type
   // can be used in js::HashSet, and it fits with the |mutable| fields above.
-  void Add(const Block& aB) const
+  void Add(const LiveBlock& aB) const
   {
     mNumBlocks++;
     mCombinedSize.Add(aB.mBlockSize);
   }
 
+  static const char* const kName;   // for PrintSortedGroups
+};
+
+const char* const BlockGroup::kName = "block";
+
+// A group of one or more live heap blocks with a common LiveBlockKey.
+class LiveBlockGroup : public LiveBlockKey, public BlockGroup
+{
+  friend class FrameGroup;      // FrameGroups are created from LiveBlockGroups
+
+public:
+  explicit LiveBlockGroup(const LiveBlockKey& aKey)
+    : LiveBlockKey(aKey),
+      BlockGroup()
+  {}
+
   void Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
              const char* aStr, const char* astr,
              size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
              size_t aTotalUsableSize) const;
 
   static int QsortCmp(const void* aA, const void* aB)
   {
-    const BlockGroup* const a = *static_cast<const BlockGroup* const*>(aA);
-    const BlockGroup* const b = *static_cast<const BlockGroup* const*>(aB);
+    const LiveBlockGroup* const a =
+      *static_cast<const LiveBlockGroup* const*>(aA);
+    const LiveBlockGroup* const b =
+      *static_cast<const LiveBlockGroup* const*>(aB);
 
     return BlockSize::Cmp(a->mCombinedSize, b->mCombinedSize);
   }
 
-  static const char* const kName;   // for PrintSortedGroups
-
   // Hash policy
 
-  typedef BlockKey Lookup;
+  typedef LiveBlockKey Lookup;
 
-  static uint32_t hash(const BlockKey& aKey)
+  static uint32_t hash(const LiveBlockKey& aKey)
   {
-    return BlockKey::Hash(aKey);
+    return LiveBlockKey::Hash(aKey);
   }
 
-  static bool match(const BlockGroup& aBg, const BlockKey& aKey)
+  static bool match(const LiveBlockGroup& aBg, const LiveBlockKey& aKey)
   {
-    return BlockKey::Match(aBg, aKey);
+    return LiveBlockKey::Match(aBg, aKey);
   }
 };
 
-const char* const BlockGroup::kName = "block";
-
-typedef js::HashSet<BlockGroup, BlockGroup, InfallibleAllocPolicy>
-        BlockGroupTable;
-BlockGroupTable* gDoubleReportBlockGroupTable = nullptr;
+typedef js::HashSet<LiveBlockGroup, LiveBlockGroup, InfallibleAllocPolicy>
+        LiveBlockGroupTable;
 
 void
-BlockGroup::Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
-                  const char* aStr, const char* astr,
-                  size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
-                  size_t aTotalUsableSize) const
+LiveBlockGroup::Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
+                      const char* aStr, const char* astr,
+                      size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
+                      size_t aTotalUsableSize) const
 {
   bool showTilde = mCombinedSize.mSampled;
 
   W("%s: %s block%s in block group %s of %s\n",
     aStr,
     Show(mNumBlocks, gBuf1, kBufLen, showTilde), Plural(mNumBlocks),
     Show(aM, gBuf2, kBufLen),
     Show(aN, gBuf3, kBufLen));
 
   W(" %s bytes (%s requested / %s slop)\n",
     Show(mCombinedSize.Usable(), gBuf1, kBufLen, showTilde),
     Show(mCombinedSize.mReq,     gBuf2, kBufLen, showTilde),
     Show(mCombinedSize.mSlop,    gBuf3, kBufLen, showTilde));
 
-  if (mKind == BlockKey::Live) {
-    W(" %4.2f%% of the heap (%4.2f%% cumulative); "
-      " %4.2f%% of %s (%4.2f%% cumulative)\n",
-      Percent(mCombinedSize.Usable(), aTotalUsableSize),
-      Percent(aCumulativeUsableSize, aTotalUsableSize),
-      Percent(mCombinedSize.Usable(), aCategoryUsableSize),
-      astr,
-      Percent(aCumulativeUsableSize, aCategoryUsableSize));
-  }
+  W(" %4.2f%% of the heap (%4.2f%% cumulative); "
+    " %4.2f%% of %s (%4.2f%% cumulative)\n",
+    Percent(mCombinedSize.Usable(), aTotalUsableSize),
+    Percent(aCumulativeUsableSize, aTotalUsableSize),
+    Percent(mCombinedSize.Usable(), aCategoryUsableSize),
+    astr,
+    Percent(aCumulativeUsableSize, aCategoryUsableSize));
 
   W(" Allocated at\n");
   mAllocStackTrace->Print(aWriter);
 
-  if (mKind == BlockKey::Live) {
-    if (IsReported()) {
-      W("\n Reported by '%s' at\n", ReporterName());
-      ReportStackTrace()->Print(aWriter);
-    }
+  if (IsReported()) {
+    W("\n Reported by '%s' at\n", mReporterName);
+    mReportStackTrace->Print(aWriter);
+  }
+
+  W("\n");
+}
+
+// A group of one or more double-reported heap blocks with a common
+// DoubleReportBlockKey.
+class DoubleReportBlockGroup : public DoubleReportBlockKey, public BlockGroup
+{
+public:
+  explicit DoubleReportBlockGroup(const DoubleReportBlockKey& aKey)
+    : DoubleReportBlockKey(aKey),
+      BlockGroup()
+  {}
+
+  void Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
+             const char* aStr, const char* astr,
+             size_t aCategoryUsableSize, size_t aCumulativeUsableSize,
+             size_t aTotalUsableSize) const;
+
+  static int QsortCmp(const void* aA, const void* aB)
+  {
+    const DoubleReportBlockGroup* const a =
+      *static_cast<const DoubleReportBlockGroup* const*>(aA);
+    const DoubleReportBlockGroup* const b =
+      *static_cast<const DoubleReportBlockGroup* const*>(aB);
+
+    return BlockSize::Cmp(a->mCombinedSize, b->mCombinedSize);
+  }
+
+  // Hash policy
+
+  typedef DoubleReportBlockKey Lookup;
 
-  } else if (mKind == BlockKey::DoubleReport) {
-    W("\n Previously reported by '%s' at\n", ReporterName1());
-    ReportStackTrace1()->Print(aWriter);
+  static uint32_t hash(const DoubleReportBlockKey& aKey)
+  {
+    return DoubleReportBlockKey::Hash(aKey);
+  }
+
+  static bool match(const DoubleReportBlockGroup& aBg,
+                    const DoubleReportBlockKey& aKey)
+  {
+    return DoubleReportBlockKey::Match(aBg, aKey);
+  }
+};
+
+typedef js::HashSet<DoubleReportBlockGroup, DoubleReportBlockGroup,
+                    InfallibleAllocPolicy> DoubleReportBlockGroupTable;
+DoubleReportBlockGroupTable* gDoubleReportBlockGroupTable = nullptr;
 
-    W("\n Now reported by '%s' at\n", ReporterName2());
-    ReportStackTrace2()->Print(aWriter);
+void
+DoubleReportBlockGroup::Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
+                              const char* aStr, const char* astr,
+                              size_t aCategoryUsableSize,
+                              size_t aCumulativeUsableSize,
+                              size_t aTotalUsableSize) const
+{
+  bool showTilde = mCombinedSize.mSampled;
 
-  } else {
-    MOZ_NOT_REACHED();
-  }
+  W("%s: %s block%s in block group %s of %s\n",
+    aStr,
+    Show(mNumBlocks, gBuf1, kBufLen, showTilde), Plural(mNumBlocks),
+    Show(aM, gBuf2, kBufLen),
+    Show(aN, gBuf3, kBufLen));
+
+  W(" %s bytes (%s requested / %s slop)\n",
+    Show(mCombinedSize.Usable(), gBuf1, kBufLen, showTilde),
+    Show(mCombinedSize.mReq,     gBuf2, kBufLen, showTilde),
+    Show(mCombinedSize.mSlop,    gBuf3, kBufLen, showTilde));
+
+  W(" Allocated at\n");
+  mAllocStackTrace->Print(aWriter);
+
+  W("\n Previously reported by '%s' at\n", mReporterName1);
+  mReportStackTrace1->Print(aWriter);
+
+  W("\n Now reported by '%s' at\n", mReporterName2);
+  mReportStackTrace2->Print(aWriter);
 
   W("\n");
 }
 
 //---------------------------------------------------------------------------
 // Stack frame groups
 //---------------------------------------------------------------------------
 
@@ -1221,17 +1252,17 @@ public:
       mNumBlockGroups(0),
       mCombinedSize()
   {}
 
   const BlockSize& CombinedSize() const { return mCombinedSize; }
 
   // The |const| qualifier is something of a lie, but is necessary so this type
   // can be used in js::HashSet, and it fits with the |mutable| fields above.
-  void Add(const BlockGroup& aBg) const
+  void Add(const LiveBlockGroup& aBg) const
   {
     mNumBlocks += aBg.mNumBlocks;
     mNumBlockGroups++;
     mCombinedSize.Add(aBg.mCombinedSize);
   }
 
   void Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
              const char* aStr, const char* astr,
@@ -1247,22 +1278,22 @@ public:
   }
 
   static const char* const kName;   // for PrintSortedGroups
 
   // Hash policy
 
   typedef const void* Lookup;
 
-  static uint32_t hash(const Lookup& aPc)
+  static uint32_t hash(const void* const& aPc)
   {
     return mozilla::HashGeneric(aPc);
   }
 
-  static bool match(const FrameGroup& aFg, const Lookup& aPc)
+  static bool match(const FrameGroup& aFg, const void* const& aPc)
   {
     return aFg.mPc == aPc;
   }
 };
 
 const char* const FrameGroup::kName = "frame";
 
 typedef js::HashSet<FrameGroup, FrameGroup, InfallibleAllocPolicy>
@@ -1479,20 +1510,21 @@ Init(const malloc_table_t* aMallocTable)
 
   gSmallBlockActualSizeCounter = 0;
 
   DMD_CREATE_TLS_INDEX(gTlsIndex);
 
   gStackTraceTable = InfallibleAllocPolicy::new_<StackTraceTable>();
   gStackTraceTable->init(8192);
 
-  gLiveBlockTable = InfallibleAllocPolicy::new_<BlockTable>();
+  gLiveBlockTable = InfallibleAllocPolicy::new_<LiveBlockTable>();
   gLiveBlockTable->init(8192);
 
-  gDoubleReportBlockGroupTable = InfallibleAllocPolicy::new_<BlockGroupTable>();
+  gDoubleReportBlockGroupTable =
+    InfallibleAllocPolicy::new_<DoubleReportBlockGroupTable>();
   gDoubleReportBlockGroupTable->init(0);
 
   if (gMode == Test) {
     // OpenTestOrStressFile() can allocate.  So do this before setting
     // gIsDMDRunning so those allocations don't show up in our results.  Once
     // gIsDMDRunning is set we are intercepting malloc et al. in earnest.
     FILE* fp = OpenTestOrStressFile("test.dmd");
     gIsDMDRunning = true;
@@ -1517,68 +1549,60 @@ Init(const malloc_table_t* aMallocTable)
 
   gIsDMDRunning = true;
 }
 
 //---------------------------------------------------------------------------
 // DMD reporting and unreporting
 //---------------------------------------------------------------------------
 
-static void
-AddBlockToBlockGroupTable(BlockGroupTable& aTable, const BlockKey& aKey,
-                          const Block& aBlock)
+void
+LiveBlock::Report(Thread* aT, const char* aReporterName, bool aOnAlloc)
 {
-  BlockGroupTable::AddPtr p = aTable.lookupForAdd(aKey);
-  if (!p) {
-    BlockGroup bg(aKey);
-    (void)aTable.add(p, bg);
-  }
-  p->Add(aBlock);
-}
+  if (IsReported()) {
+    DoubleReportBlockKey doubleReportKey(mAllocStackTrace,
+                                         mReportStackTrace, StackTrace::Get(aT),
+                                         mReporterName, aReporterName);
+    DoubleReportBlockGroupTable::AddPtr p =
+      gDoubleReportBlockGroupTable->lookupForAdd(doubleReportKey);
+    if (!p) {
+      DoubleReportBlockGroup bg(doubleReportKey);
+      (void)gDoubleReportBlockGroupTable->add(p, bg);
+    }
+    p->Add(*this);
 
-void
-Block::Report(Thread* aT, const char* aReporterName, bool aOnAlloc)
-{
-  MOZ_ASSERT(mKind == BlockKey::Live);
-  if (IsReported()) {
-    BlockKey doubleReportKey(mAllocStackTrace,
-                             ReportStackTrace(), StackTrace::Get(aT),
-                             ReporterName(), aReporterName);
-    AddBlockToBlockGroupTable(*gDoubleReportBlockGroupTable,
-                              doubleReportKey, *this);
   } else {
-    SetReporterName(aReporterName);
-    SetReportStackTrace(StackTrace::Get(aT));
-    SetReportedOnAlloc(aOnAlloc);
+    mReporterName     = aReporterName;
+    mReportStackTrace = StackTrace::Get(aT);
+    mReportedOnAlloc  = aOnAlloc;
   }
 }
 
 void
-Block::UnreportIfNotReportedOnAlloc()
+LiveBlock::UnreportIfNotReportedOnAlloc()
 {
-  MOZ_ASSERT(mKind == BlockKey::Live);
-  if (!ReportedOnAlloc()) {
-    SetReporterName(gUnreportedName);
-    SetReportStackTrace(nullptr);
+  if (!mReportedOnAlloc) {
+    mReporterName     = gUnreportedName;
+    mReportStackTrace = nullptr;
   }
 }
 
 static void
 ReportHelper(const void* aPtr, const char* aReporterName, bool aOnAlloc)
 {
   if (!gIsDMDRunning || !aPtr) {
     return;
   }
 
   Thread* t = Thread::Fetch();
 
   AutoBlockIntercepts block(t);
   AutoLockState lock;
 
-  if (BlockTable::Ptr p = gLiveBlockTable->lookup(aPtr)) {
+  if (LiveBlockTable::Ptr p = gLiveBlockTable->lookup(aPtr)) {
     p->value.Report(t, aReporterName, aOnAlloc);
   } else {
     // We have no record of the block.  Do nothing.  Either:
     // - We're sampling and we skipped this block.  This is likely.
     // - It's a bogus pointer.  This is unlikely because Report() is almost
     //   always called in conjunction with a malloc_size_of-style function.
   }
 }
@@ -1594,17 +1618,17 @@ ReportOnAlloc(const void* aPtr, const ch
 {
   ReportHelper(aPtr, aReporterName, /* onAlloc */ true);
 }
 
 //---------------------------------------------------------------------------
 // DMD output
 //---------------------------------------------------------------------------
 
-// This works for both BlockGroups and FrameGroups.
+// This works for LiveBlockGroups, DoubleReportBlockGroups and FrameGroups.
 template <class TGroup>
 static void
 PrintSortedGroups(const Writer& aWriter, const char* aStr, const char* astr,
                   const js::HashSet<TGroup, TGroup, InfallibleAllocPolicy>& aTGroupTable,
                   size_t aCategoryUsableSize, size_t aTotalUsableSize)
 {
   const char* name = TGroup::kName;
   StatusMsg("  creating and sorting %s %s group array...\n", astr, name);
@@ -1650,37 +1674,36 @@ PrintSortedGroups(const Writer& aWriter,
 
   MOZ_ASSERT(aCategoryUsableSize == kNoSize ||
              aCategoryUsableSize == cumulativeUsableSize);
 }
 
 static void
 PrintSortedBlockAndFrameGroups(const Writer& aWriter,
                                const char* aStr, const char* astr,
-                               const BlockGroupTable& aBlockGroupTable,
+                               const LiveBlockGroupTable& aLiveBlockGroupTable,
                                size_t aCategoryUsableSize,
                                size_t aTotalUsableSize)
 {
-  PrintSortedGroups(aWriter, aStr, astr, aBlockGroupTable, aCategoryUsableSize,
-                    aTotalUsableSize);
+  PrintSortedGroups(aWriter, aStr, astr, aLiveBlockGroupTable,
+                    aCategoryUsableSize, aTotalUsableSize);
 
   // Frame groups are totally dependent on vagaries of stack traces, so we
   // can't show them in test mode.
   if (gMode == Test) {
     return;
   }
 
   FrameGroupTable frameGroupTable;
   frameGroupTable.init(2048);
-  for (BlockGroupTable::Range r = aBlockGroupTable.all();
+  for (LiveBlockGroupTable::Range r = aLiveBlockGroupTable.all();
        !r.empty();
        r.popFront()) {
-    const BlockGroup& bg = r.front();
+    const LiveBlockGroup& bg = r.front();
     const StackTrace* st = bg.mAllocStackTrace;
-    MOZ_ASSERT(bg.IsLive());
 
     // A single PC can appear multiple times in a stack trace.  We ignore
     // duplicates by first sorting and then ignoring adjacent duplicates.
     StackTrace sorted(*st);
     sorted.Sort();              // sorts the copy, not the original
     void* prevPc = (void*)intptr_t(-1);
     for (uint32_t i = 0; i < sorted.Length(); i++) {
       void* pc = sorted.Pc(i);
@@ -1732,17 +1755,19 @@ SizeOf(Sizes* aSizes)
     gDoubleReportBlockGroupTable->sizeOfIncludingThis(MallocSizeOf);
 }
 
 static void
 ClearState()
 {
   // Unreport all blocks, except those that were reported on allocation,
   // because they need to keep their reported marking.
-  for (BlockTable::Range r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
+  for (LiveBlockTable::Range r = gLiveBlockTable->all();
+       !r.empty();
+       r.popFront()) {
     r.front().value.UnreportIfNotReportedOnAlloc();
   }
 
   // Clear errors.
   gDoubleReportBlockGroupTable->finish();
   gDoubleReportBlockGroupTable->init();
 }
 
@@ -1759,52 +1784,61 @@ Dump(Writer aWriter)
   AutoBlockIntercepts block(Thread::Fetch());
   AutoLockState lock;
 
   static int dumpCount = 1;
   StatusMsg("Dump %d {\n", dumpCount++);
 
   StatusMsg("  gathering live block groups...\n");
 
-  BlockGroupTable unreportedBlockGroupTable;
-  (void)unreportedBlockGroupTable.init(1024);
+  LiveBlockGroupTable unreportedLiveBlockGroupTable;
+  (void)unreportedLiveBlockGroupTable.init(1024);
   size_t unreportedUsableSize = 0;
 
-  BlockGroupTable reportedBlockGroupTable;
-  (void)reportedBlockGroupTable.init(1024);
+  LiveBlockGroupTable reportedLiveBlockGroupTable;
+  (void)reportedLiveBlockGroupTable.init(1024);
   size_t reportedUsableSize = 0;
 
   bool anyBlocksSampled = false;
 
-  for (BlockTable::Range r = gLiveBlockTable->all(); !r.empty(); r.popFront()) {
-    const Block& b = r.front().value;
-    if (!b.IsReported()) {
-      unreportedUsableSize += b.mBlockSize.Usable();
-      AddBlockToBlockGroupTable(unreportedBlockGroupTable, b, b);
-    } else {
-      reportedUsableSize += b.mBlockSize.Usable();
-      AddBlockToBlockGroupTable(reportedBlockGroupTable, b, b);
+  for (LiveBlockTable::Range r = gLiveBlockTable->all();
+       !r.empty();
+       r.popFront()) {
+    const LiveBlock& b = r.front().value;
+
+    size_t& size = !b.IsReported() ? unreportedUsableSize : reportedUsableSize;
+    size += b.mBlockSize.Usable();
+
+    LiveBlockGroupTable& table = !b.IsReported()
+                               ? unreportedLiveBlockGroupTable
+                               : reportedLiveBlockGroupTable;
+    LiveBlockGroupTable::AddPtr p = table.lookupForAdd(b);
+    if (!p) {
+      LiveBlockGroup bg(b);
+      (void)table.add(p, bg);
     }
+    p->Add(b);
+
     anyBlocksSampled = anyBlocksSampled || b.mBlockSize.mSampled;
   }
   size_t totalUsableSize = unreportedUsableSize + reportedUsableSize;
 
   WriteTitle("Invocation\n");
   W("$DMD = '%s'\n", gDMDEnvVar);
   W("Sample-below size = %lld\n\n", (long long)(gSampleBelowSize));
 
   PrintSortedGroups(aWriter, "Double-reported", "double-reported",
                     *gDoubleReportBlockGroupTable, kNoSize, kNoSize);
 
   PrintSortedBlockAndFrameGroups(aWriter, "Unreported", "unreported",
-                                 unreportedBlockGroupTable,
+                                 unreportedLiveBlockGroupTable,
                                  unreportedUsableSize, totalUsableSize);
 
   PrintSortedBlockAndFrameGroups(aWriter, "Reported", "reported",
-                                 reportedBlockGroupTable,
+                                 reportedLiveBlockGroupTable,
                                  reportedUsableSize, totalUsableSize);
 
   bool showTilde = anyBlocksSampled;
   WriteTitle("Summary\n");
   W("Total:      %s bytes\n",
     Show(totalUsableSize, gBuf1, kBufLen, showTilde));
   W("Reported:   %s bytes (%5.2f%%)\n",
     Show(reportedUsableSize, gBuf1, kBufLen, showTilde),
@@ -1840,28 +1874,28 @@ Dump(Writer aWriter)
     W("\nData structures that are cleared after Dump() ends:\n");
 
     W("  Double-report table: %10s bytes (%s entries, %s used)\n",
       Show(sizes.mDoubleReportTable,                 gBuf1, kBufLen),
       Show(gDoubleReportBlockGroupTable->capacity(), gBuf2, kBufLen),
       Show(gDoubleReportBlockGroupTable->count(),    gBuf3, kBufLen));
 
     size_t unreportedSize =
-      unreportedBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
+      unreportedLiveBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
     W("  Unreported table:    %10s bytes (%s entries, %s used)\n",
-      Show(unreportedSize,                       gBuf1, kBufLen),
-      Show(unreportedBlockGroupTable.capacity(), gBuf2, kBufLen),
-      Show(unreportedBlockGroupTable.count(),    gBuf3, kBufLen));
+      Show(unreportedSize,                           gBuf1, kBufLen),
+      Show(unreportedLiveBlockGroupTable.capacity(), gBuf2, kBufLen),
+      Show(unreportedLiveBlockGroupTable.count(),    gBuf3, kBufLen));
 
     size_t reportedSize =
-      reportedBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
+      reportedLiveBlockGroupTable.sizeOfIncludingThis(MallocSizeOf);
     W("  Reported table:      %10s bytes (%s entries, %s used)\n",
-      Show(reportedSize,                       gBuf1, kBufLen),
-      Show(reportedBlockGroupTable.capacity(), gBuf2, kBufLen),
-      Show(reportedBlockGroupTable.count(),    gBuf3, kBufLen));
+      Show(reportedSize,                           gBuf1, kBufLen),
+      Show(reportedLiveBlockGroupTable.capacity(), gBuf2, kBufLen),
+      Show(reportedLiveBlockGroupTable.count(),    gBuf3, kBufLen));
 
     W("\n");
   }
 
   ClearState();
 
   StatusMsg("}\n");
 }