Bug 820652 (part 4) - DMD: Inline BlockSize into LiveBlock. r+a=jlebar
authorNicholas Nethercote <nnethercote@mozilla.com>
Wed, 12 Dec 2012 20:16:20 -0800
changeset 118908 b33ca67de6137349ba6a815d905f79ed720f2a81
parent 118907 1c4ab9d6959d311b0898d28dd1d4ef38c77d851c
child 118909 935851593ad461e22706b7b0350cfb6a6e9e806b
push id2984
push userryanvm@gmail.com
push dateTue, 18 Dec 2012 03:08:28 +0000
treeherdermozilla-aurora@68ae24dc739c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs820652
milestone19.0a2
Bug 820652 (part 4) - DMD: Inline BlockSize into LiveBlock. r+a=jlebar
memory/replace/dmd/DMD.cpp
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -743,58 +743,51 @@ public:
     return aA.mAllocStackTrace   == aB.mAllocStackTrace &&
            aA.mReportStackTrace1 == aB.mReportStackTrace1 &&
            aA.mReportStackTrace2 == aB.mReportStackTrace2 &&
            aA.mReporterName1     == aB.mReporterName1 &&
            aA.mReporterName2     == aB.mReporterName2;
   }
 };
 
-class BlockSize
+// A live heap block.
+class LiveBlock : public LiveBlockKey
 {
   static const size_t kReqBits = sizeof(size_t) * 8 - 1;    // 31 or 63
 
   // This assumes that we'll never request an allocation of 2 GiB or more on
   // 32-bit platforms.
-  const size_t mReq:kReqBits;   // size requested
-  const size_t mSampled:1;      // was this block sampled?  (if so, slop == 0)
-
-public:
-  BlockSize(size_t aReq, bool aSampled)
-    : mReq(aReq),
-      mSampled(aSampled)
-  {}
-
-  size_t Req() const { return mReq; }
-
-  // Sampled blocks always have zero slop.
-  size_t Slop(const void* aPtr) const
-  {
-    return mSampled ? 0 : MallocSizeOf(aPtr) - mReq;
-  }
-
-  size_t Usable(const void* aPtr) const
-  {
-    return mSampled ? mReq : MallocSizeOf(aPtr);
-  }
-
-  bool IsSampled() const { return mSampled; }
-};
-
-// A live heap block.
-class LiveBlock : public LiveBlockKey
-{
-public:
-  const BlockSize mBlockSize;
+  const size_t mReqSize:kReqBits; // size requested
+  const size_t mSampled:1;        // was this block sampled? (if so, slop == 0)
 
 public:
   LiveBlock(size_t aReqSize, const StackTrace* aAllocStackTrace, bool aSampled)
     : LiveBlockKey(aAllocStackTrace),
-      mBlockSize(aReqSize, aSampled)
-  {}
+      mReqSize(aReqSize),
+      mSampled(aSampled)
+  {
+    if (mReqSize != aReqSize) {
+      MOZ_CRASH();              // overflowed mReqSize
+    }
+  }
+
+  size_t ReqSize() const { return mReqSize; }
+
+  // Sampled blocks always have zero slop.
+  size_t SlopSize(const void* aPtr) const
+  {
+    return mSampled ? 0 : MallocSizeOf(aPtr) - mReqSize;
+  }
+
+  size_t UsableSize(const void* aPtr) const
+  {
+    return mSampled ? mReqSize : MallocSizeOf(aPtr);
+  }
+
+  bool IsSampled() const { return mSampled; }
 
   void Report(Thread* aT, const void* aPtr, const char* aReporterName,
               bool aReportedOnAlloc);
 
   void UnreportIfNotReportedOnAlloc();
 };
 
 // Nb: js::DefaultHasher<void*> is a high quality hasher.
@@ -1018,21 +1011,21 @@ public:
   {}
 
   size_t Req()    const { return mReq; }
   size_t Slop()   const { return mSlop; }
   size_t Usable() const { return mReq + mSlop; }
 
   bool IsSampled() const { return mSampled; }
 
-  void Add(const void* aPtr, const BlockSize& aBlockSize)
+  void Add(const void* aPtr, const LiveBlock& aB)
   {
-    mReq  += aBlockSize.Req();
-    mSlop += aBlockSize.Slop(aPtr);
-    mSampled = mSampled || aBlockSize.IsSampled();
+    mReq  += aB.ReqSize();
+    mSlop += aB.SlopSize(aPtr);
+    mSampled = mSampled || aB.IsSampled();
   }
 
   void Add(const GroupSize& aGroupSize)
   {
     mReq  += aGroupSize.Req();
     mSlop += aGroupSize.Slop();
     mSampled = mSampled || aGroupSize.IsSampled();
   }
@@ -1068,17 +1061,17 @@ public:
 
   const GroupSize& GroupSize() const { return mGroupSize; }
 
   // The |const| qualifier is something of a lie, but is necessary so this type
   // can be used in js::HashSet, and it fits with the |mutable| fields above.
   void Add(const void* aPtr, const LiveBlock& aB) const
   {
     mNumBlocks++;
-    mGroupSize.Add(aPtr, aB.mBlockSize);
+    mGroupSize.Add(aPtr, aB);
   }
 
   static const char* const kName;   // for PrintSortedGroups
 };
 
 const char* const BlockGroup::kName = "block";
 
 // A group of one or more live heap blocks with a common LiveBlockKey.
@@ -1776,29 +1769,29 @@ Dump(Writer aWriter)
 
   for (LiveBlockTable::Range r = gLiveBlockTable->all();
        !r.empty();
        r.popFront()) {
     const void* pc = r.front().key;
     const LiveBlock& b = r.front().value;
 
     size_t& size = !b.IsReported() ? unreportedUsableSize : reportedUsableSize;
-    size += b.mBlockSize.Usable(pc);
+    size += b.UsableSize(pc);
 
     LiveBlockGroupTable& table = !b.IsReported()
                                ? unreportedLiveBlockGroupTable
                                : reportedLiveBlockGroupTable;
     LiveBlockGroupTable::AddPtr p = table.lookupForAdd(b);
     if (!p) {
       LiveBlockGroup bg(b);
       (void)table.add(p, bg);
     }
     p->Add(pc, b);
 
-    anyBlocksSampled = anyBlocksSampled || b.mBlockSize.IsSampled();
+    anyBlocksSampled = anyBlocksSampled || b.IsSampled();
   }
   size_t totalUsableSize = unreportedUsableSize + reportedUsableSize;
 
   WriteTitle("Invocation\n");
   W("$DMD = '%s'\n", gDMDEnvVar);
   W("Sample-below size = %lld\n\n", (long long)(gSampleBelowSize));
 
   PrintSortedGroups(aWriter, "Double-reported", "double-reported",