Bug 820652 (part 5) - DMD: Store the block address in LiveBlock. r+a=jlebar
authorNicholas Nethercote <nnethercote@mozilla.com>
Wed, 12 Dec 2012 21:16:28 -0800
changeset 117654 dfbe0ae6fd8a02ea8f45818b2e38c353d47c6928
parent 117653 3f3f1cd001148196b9bca64de3b1420e83411be6
child 117655 32a54dfc62e845b0fd018587124418f3fa93b2f6
push id47
push userryanvm@gmail.com
push dateTue, 18 Dec 2012 03:10:02 +0000
bugs820652
milestone18.0
Bug 820652 (part 5) - DMD: Store the block address in LiveBlock. r+a=jlebar
memory/replace/dmd/DMD.cpp
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -623,19 +623,22 @@ public:
 protected:
   // Live blocks can be reported in two ways.
   // - The most common is via a memory reporter traversal -- the block is
   //   reported when the reporter runs, causing DMD to mark it as reported,
   //   and DMD must clear the marking once it has finished its analysis.
   // - Less common are ones that are reported immediately on allocation.  DMD
   //   must *not* clear the markings of these blocks once it has finished its
   //   analysis.  The |mReportedOnAlloc| field is set for such blocks.
-  const StackTrace* mReportStackTrace;  // nullptr if unreported
-  const char*       mReporterName;      // gUnreportedName if unreported
-  bool              mReportedOnAlloc;   // true if block was reported
+  //
+  // These fields are used as the value in LiveBlock, so it's ok for them to be
+  // |mutable|.
+  mutable const StackTrace* mReportStackTrace;  // nullptr if unreported
+  mutable const char*       mReporterName;      // gUnreportedName if unreported
+  mutable bool              mReportedOnAlloc;   // true if block was reported
 
 public:
   LiveBlockKey(const StackTrace* aAllocStackTrace)
     : mAllocStackTrace(aAllocStackTrace),
       mReportStackTrace(nullptr),
       mReporterName(gUnreportedName),
       mReportedOnAlloc(false)
   {
@@ -746,58 +749,75 @@ public:
            aA.mReporterName1     == aB.mReporterName1 &&
            aA.mReporterName2     == aB.mReporterName2;
   }
 };
 
 // A live heap block.
 class LiveBlock : public LiveBlockKey
 {
-  static const size_t kReqBits = sizeof(size_t) * 8 - 1;    // 31 or 63
+  const void*  mPtr;
 
   // This assumes that we'll never request an allocation of 2 GiB or more on
   // 32-bit platforms.
+  static const size_t kReqBits = sizeof(size_t) * 8 - 1;    // 31 or 63
   const size_t mReqSize:kReqBits; // size requested
   const size_t mSampled:1;        // was this block sampled? (if so, slop == 0)
 
 public:
-  LiveBlock(size_t aReqSize, const StackTrace* aAllocStackTrace, bool aSampled)
+  LiveBlock(const void* aPtr, size_t aReqSize,
+            const StackTrace* aAllocStackTrace, bool aSampled)
     : LiveBlockKey(aAllocStackTrace),
+      mPtr(aPtr),
       mReqSize(aReqSize),
       mSampled(aSampled)
   {
     if (mReqSize != aReqSize) {
       MOZ_CRASH();              // overflowed mReqSize
     }
   }
 
   size_t ReqSize() const { return mReqSize; }
 
   // Sampled blocks always have zero slop.
-  size_t SlopSize(const void* aPtr) const
+  size_t SlopSize() const
   {
-    return mSampled ? 0 : MallocSizeOf(aPtr) - mReqSize;
+    return mSampled ? 0 : MallocSizeOf(mPtr) - mReqSize;
   }
 
-  size_t UsableSize(const void* aPtr) const
+  size_t UsableSize() const
   {
-    return mSampled ? mReqSize : MallocSizeOf(aPtr);
+    return mSampled ? mReqSize : MallocSizeOf(mPtr);
   }
 
   bool IsSampled() const { return mSampled; }
 
-  void Report(Thread* aT, const void* aPtr, const char* aReporterName,
-              bool aReportedOnAlloc);
+  // This is |const| thanks to the |mutable| fields above.
+  void Report(Thread* aT, const char* aReporterName, bool aReportedOnAlloc)
+       const;
+
+  void UnreportIfNotReportedOnAlloc() const;
+
+  // Hash policy.
+
+  typedef const void* Lookup;
 
-  void UnreportIfNotReportedOnAlloc();
+  static uint32_t hash(const void* const& aPc)
+  {
+    return mozilla::HashGeneric(aPc);
+  }
+
+  static bool match(const LiveBlock& aB, const void* const& aPtr)
+  {
+    return aB.mPtr == aPtr;
+  }
 };
 
 // Nb: js::DefaultHasher<void*> is a high quality hasher.
-typedef js::HashMap<const void*, LiveBlock, js::DefaultHasher<const void*>,
-                    InfallibleAllocPolicy> LiveBlockTable;
+typedef js::HashSet<LiveBlock, LiveBlock, InfallibleAllocPolicy> LiveBlockTable;
 static LiveBlockTable* gLiveBlockTable = nullptr;
 
 //---------------------------------------------------------------------------
 // malloc/free callbacks
 //---------------------------------------------------------------------------
 
 static size_t gSampleBelowSize = 0;
 static size_t gSmallBlockActualSizeCounter = 0;
@@ -820,22 +840,23 @@ AllocCallback(void* aPtr, size_t aReqSiz
     // If this allocation is smaller than the sample-below size, increment the
     // cumulative counter.  Then, if that counter now exceeds the sample size,
     // blame this allocation for gSampleBelowSize bytes.  This precludes the
     // measurement of slop.
     gSmallBlockActualSizeCounter += actualSize;
     if (gSmallBlockActualSizeCounter >= gSampleBelowSize) {
       gSmallBlockActualSizeCounter -= gSampleBelowSize;
 
-      LiveBlock b(gSampleBelowSize, StackTrace::Get(aT), /* sampled */ true);
+      LiveBlock b(aPtr, gSampleBelowSize, StackTrace::Get(aT),
+                  /* sampled */ true);
       (void)gLiveBlockTable->putNew(aPtr, b);
     }
   } else {
     // If this block size is larger than the sample size, record it exactly.
-    LiveBlock b(aReqSize, StackTrace::Get(aT), /* sampled */ false);
+    LiveBlock b(aPtr, aReqSize, StackTrace::Get(aT), /* sampled */ false);
     (void)gLiveBlockTable->putNew(aPtr, b);
   }
 }
 
 static void
 FreeCallback(void* aPtr, Thread* aT)
 {
   MOZ_ASSERT(gIsDMDRunning);
@@ -1011,20 +1032,20 @@ public:
   {}
 
   size_t Req()    const { return mReq; }
   size_t Slop()   const { return mSlop; }
   size_t Usable() const { return mReq + mSlop; }
 
   bool IsSampled() const { return mSampled; }
 
-  void Add(const void* aPtr, const LiveBlock& aB)
+  void Add(const LiveBlock& aB)
   {
     mReq  += aB.ReqSize();
-    mSlop += aB.SlopSize(aPtr);
+    mSlop += aB.SlopSize();
     mSampled = mSampled || aB.IsSampled();
   }
 
   void Add(const GroupSize& aGroupSize)
   {
     mReq  += aGroupSize.Req();
     mSlop += aGroupSize.Slop();
     mSampled = mSampled || aGroupSize.IsSampled();
@@ -1056,22 +1077,21 @@ protected:
 public:
   BlockGroup()
     : mNumBlocks(0),
       mGroupSize()
   {}
 
   const GroupSize& GroupSize() const { return mGroupSize; }
 
-  // The |const| qualifier is something of a lie, but is necessary so this type
-  // can be used in js::HashSet, and it fits with the |mutable| fields above.
-  void Add(const void* aPtr, const LiveBlock& aB) const
+  // This is |const| thanks to the |mutable| fields above.
+  void Add(const LiveBlock& aB) const
   {
     mNumBlocks++;
-    mGroupSize.Add(aPtr, aB);
+    mGroupSize.Add(aB);
   }
 
   static const char* const kName;   // for PrintSortedGroups
 };
 
 const char* const BlockGroup::kName = "block";
 
 // A group of one or more live heap blocks with a common LiveBlockKey.
@@ -1224,18 +1244,17 @@ public:
     : mPc(aPc),
       mNumBlocks(0),
       mNumBlockGroups(0),
       mGroupSize()
   {}
 
   const GroupSize& GroupSize() const { return mGroupSize; }
 
-  // The |const| qualifier is something of a lie, but is necessary so this type
-  // can be used in js::HashSet, and it fits with the |mutable| fields above.
+  // This is |const| thanks to the |mutable| fields above.
   void Add(const LiveBlockGroup& aBg) const
   {
     mNumBlocks += aBg.mNumBlocks;
     mNumBlockGroups++;
     mGroupSize.Add(aBg.mGroupSize);
   }
 
   void Print(const Writer& aWriter, uint32_t aM, uint32_t aN,
@@ -1248,17 +1267,17 @@ public:
     const FrameGroup* const a = *static_cast<const FrameGroup* const*>(aA);
     const FrameGroup* const b = *static_cast<const FrameGroup* const*>(aB);
 
     return GroupSize::Cmp(a->mGroupSize, b->mGroupSize);
   }
 
   static const char* const kName;   // for PrintSortedGroups
 
-  // Hash policy
+  // Hash policy.
 
   typedef const void* Lookup;
 
   static uint32_t hash(const void* const& aPc)
   {
     return mozilla::HashGeneric(aPc);
   }
 
@@ -1524,40 +1543,39 @@ Init(const malloc_table_t* aMallocTable)
   gIsDMDRunning = true;
 }
 
 //---------------------------------------------------------------------------
 // DMD reporting and unreporting
 //---------------------------------------------------------------------------
 
 void
-LiveBlock::Report(Thread* aT, const void* aPtr, const char* aReporterName,
-                  bool aOnAlloc)
+LiveBlock::Report(Thread* aT, const char* aReporterName, bool aOnAlloc) const
 {
   if (IsReported()) {
     DoubleReportBlockKey doubleReportKey(mAllocStackTrace,
                                          mReportStackTrace, StackTrace::Get(aT),
                                          mReporterName, aReporterName);
     DoubleReportBlockGroupTable::AddPtr p =
       gDoubleReportBlockGroupTable->lookupForAdd(doubleReportKey);
     if (!p) {
       DoubleReportBlockGroup bg(doubleReportKey);
       (void)gDoubleReportBlockGroupTable->add(p, bg);
     }
-    p->Add(aPtr, *this);
+    p->Add(*this);
 
   } else {
     mReporterName     = aReporterName;
     mReportStackTrace = StackTrace::Get(aT);
     mReportedOnAlloc  = aOnAlloc;
   }
 }
 
 void
-LiveBlock::UnreportIfNotReportedOnAlloc()
+LiveBlock::UnreportIfNotReportedOnAlloc() const
 {
   if (!mReportedOnAlloc) {
     mReporterName     = gUnreportedName;
     mReportStackTrace = nullptr;
   }
 }
 
 static void
@@ -1568,17 +1586,17 @@ ReportHelper(const void* aPtr, const cha
   }
 
   Thread* t = Thread::Fetch();
 
   AutoBlockIntercepts block(t);
   AutoLockState lock;
 
   if (LiveBlockTable::Ptr p = gLiveBlockTable->lookup(aPtr)) {
-    p->value.Report(t, aPtr, aReporterName, aOnAlloc);
+    p->Report(t, aReporterName, aOnAlloc);
   } else {
     // We have no record of the block.  Do nothing.  Either:
     // - We're sampling and we skipped this block.  This is likely.
     // - It's a bogus pointer.  This is unlikely because Report() is almost
     //   always called in conjunction with a malloc_size_of-style function.
   }
 }
 
@@ -1726,17 +1744,17 @@ SizeOf(Sizes* aSizes)
 static void
 ClearState()
 {
   // Unreport all blocks, except those that were reported on allocation,
   // because they need to keep their reported marking.
   for (LiveBlockTable::Range r = gLiveBlockTable->all();
        !r.empty();
        r.popFront()) {
-    r.front().value.UnreportIfNotReportedOnAlloc();
+    r.front().UnreportIfNotReportedOnAlloc();
   }
 
   // Clear errors.
   gDoubleReportBlockGroupTable->finish();
   gDoubleReportBlockGroupTable->init();
 }
 
 MOZ_EXPORT void
@@ -1765,31 +1783,30 @@ Dump(Writer aWriter)
   (void)reportedLiveBlockGroupTable.init(1024);
   size_t reportedUsableSize = 0;
 
   bool anyBlocksSampled = false;
 
   for (LiveBlockTable::Range r = gLiveBlockTable->all();
        !r.empty();
        r.popFront()) {
-    const void* pc = r.front().key;
-    const LiveBlock& b = r.front().value;
+    const LiveBlock& b = r.front();
 
     size_t& size = !b.IsReported() ? unreportedUsableSize : reportedUsableSize;
-    size += b.UsableSize(pc);
+    size += b.UsableSize();
 
     LiveBlockGroupTable& table = !b.IsReported()
                                ? unreportedLiveBlockGroupTable
                                : reportedLiveBlockGroupTable;
     LiveBlockGroupTable::AddPtr p = table.lookupForAdd(b);
     if (!p) {
       LiveBlockGroup bg(b);
       (void)table.add(p, bg);
     }
-    p->Add(pc, b);
+    p->Add(b);
 
     anyBlocksSampled = anyBlocksSampled || b.IsSampled();
   }
   size_t totalUsableSize = unreportedUsableSize + reportedUsableSize;
 
   WriteTitle("Invocation\n");
   W("$DMD = '%s'\n", gDMDEnvVar);
   W("Sample-below size = %lld\n\n", (long long)(gSampleBelowSize));