Bug 820652 (part 4) - DMD: Inline BlockSize into LiveBlock. r=jlebar.
authorNicholas Nethercote <nnethercote@mozilla.com>
Wed, 12 Dec 2012 20:16:20 -0800
changeset 125109 2c2785739e2563840ceb7beaedad4343201bea58
parent 125108 f54a6f692591a32581373200fbe4618cc13a3854
child 125110 5ac16858d004081963395e018ef4d4c1423429f1
push id2151
push userlsblakk@mozilla.com
push dateTue, 19 Feb 2013 18:06:57 +0000
treeherdermozilla-beta@4952e88741ec [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjlebar
bugs820652
milestone20.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 820652 (part 4) - DMD: Inline BlockSize into LiveBlock. r=jlebar.
memory/replace/dmd/DMD.cpp
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -778,58 +778,51 @@ public:
     return aA.mAllocStackTrace   == aB.mAllocStackTrace &&
            aA.mReportStackTrace1 == aB.mReportStackTrace1 &&
            aA.mReportStackTrace2 == aB.mReportStackTrace2 &&
            aA.mReporterName1     == aB.mReporterName1 &&
            aA.mReporterName2     == aB.mReporterName2;
   }
 };
 
-class BlockSize
+// A live heap block.
+class LiveBlock : public LiveBlockKey
 {
   static const size_t kReqBits = sizeof(size_t) * 8 - 1;    // 31 or 63
 
   // This assumes that we'll never request an allocation of 2 GiB or more on
   // 32-bit platforms.
-  const size_t mReq:kReqBits;   // size requested
-  const size_t mSampled:1;      // was this block sampled?  (if so, slop == 0)
-
-public:
-  BlockSize(size_t aReq, bool aSampled)
-    : mReq(aReq),
-      mSampled(aSampled)
-  {}
-
-  size_t Req() const { return mReq; }
-
-  // Sampled blocks always have zero slop.
-  size_t Slop(const void* aPtr) const
-  {
-    return mSampled ? 0 : MallocSizeOf(aPtr) - mReq;
-  }
-
-  size_t Usable(const void* aPtr) const
-  {
-    return mSampled ? mReq : MallocSizeOf(aPtr);
-  }
-
-  bool IsSampled() const { return mSampled; }
-};
-
-// A live heap block.
-class LiveBlock : public LiveBlockKey
-{
-public:
-  const BlockSize mBlockSize;
+  const size_t mReqSize:kReqBits; // size requested
+  const size_t mSampled:1;        // was this block sampled? (if so, slop == 0)
 
 public:
   LiveBlock(size_t aReqSize, const StackTrace* aAllocStackTrace, bool aSampled)
     : LiveBlockKey(aAllocStackTrace),
-      mBlockSize(aReqSize, aSampled)
-  {}
+      mReqSize(aReqSize),
+      mSampled(aSampled)
+  {
+    if (mReqSize != aReqSize) {
+      MOZ_CRASH();              // overflowed mReqSize
+    }
+  }
+
+  size_t ReqSize() const { return mReqSize; }
+
+  // Sampled blocks always have zero slop.
+  size_t SlopSize(const void* aPtr) const
+  {
+    return mSampled ? 0 : MallocSizeOf(aPtr) - mReqSize;
+  }
+
+  size_t UsableSize(const void* aPtr) const
+  {
+    return mSampled ? mReqSize : MallocSizeOf(aPtr);
+  }
+
+  bool IsSampled() const { return mSampled; }
 
   void Report(Thread* aT, const void* aPtr, const char* aReporterName,
               bool aReportedOnAlloc);
 
   void UnreportIfNotReportedOnAlloc();
 };
 
 // Nb: js::DefaultHasher<void*> is a high quality hasher.
@@ -1053,21 +1046,21 @@ public:
   {}
 
   size_t Req()    const { return mReq; }
   size_t Slop()   const { return mSlop; }
   size_t Usable() const { return mReq + mSlop; }
 
   bool IsSampled() const { return mSampled; }
 
-  void Add(const void* aPtr, const BlockSize& aBlockSize)
+  void Add(const void* aPtr, const LiveBlock& aB)
   {
-    mReq  += aBlockSize.Req();
-    mSlop += aBlockSize.Slop(aPtr);
-    mSampled = mSampled || aBlockSize.IsSampled();
+    mReq  += aB.ReqSize();
+    mSlop += aB.SlopSize(aPtr);
+    mSampled = mSampled || aB.IsSampled();
   }
 
   void Add(const GroupSize& aGroupSize)
   {
     mReq  += aGroupSize.Req();
     mSlop += aGroupSize.Slop();
     mSampled = mSampled || aGroupSize.IsSampled();
   }
@@ -1103,17 +1096,17 @@ public:
 
   const GroupSize& GroupSize() const { return mGroupSize; }
 
   // The |const| qualifier is something of a lie, but is necessary so this type
   // can be used in js::HashSet, and it fits with the |mutable| fields above.
   void Add(const void* aPtr, const LiveBlock& aB) const
   {
     mNumBlocks++;
-    mGroupSize.Add(aPtr, aB.mBlockSize);
+    mGroupSize.Add(aPtr, aB);
   }
 
   static const char* const kName;   // for PrintSortedGroups
 };
 
 const char* const BlockGroup::kName = "block";
 
 // A group of one or more live heap blocks with a common LiveBlockKey.
@@ -1811,29 +1804,29 @@ Dump(Writer aWriter)
 
   for (LiveBlockTable::Range r = gLiveBlockTable->all();
        !r.empty();
        r.popFront()) {
     const void* pc = r.front().key;
     const LiveBlock& b = r.front().value;
 
     size_t& size = !b.IsReported() ? unreportedUsableSize : reportedUsableSize;
-    size += b.mBlockSize.Usable(pc);
+    size += b.UsableSize(pc);
 
     LiveBlockGroupTable& table = !b.IsReported()
                                ? unreportedLiveBlockGroupTable
                                : reportedLiveBlockGroupTable;
     LiveBlockGroupTable::AddPtr p = table.lookupForAdd(b);
     if (!p) {
       LiveBlockGroup bg(b);
       (void)table.add(p, bg);
     }
     p->Add(pc, b);
 
-    anyBlocksSampled = anyBlocksSampled || b.mBlockSize.IsSampled();
+    anyBlocksSampled = anyBlocksSampled || b.IsSampled();
   }
   size_t totalUsableSize = unreportedUsableSize + reportedUsableSize;
 
   WriteTitle("Invocation\n");
   W("$DMD = '%s'\n", gDMDEnvVar);
   W("Sample-below size = %lld\n\n", (long long)(gSampleBelowSize));
 
   PrintSortedGroups(aWriter, "Double-reported", "double-reported",