Bug 1027921 - Part 2: Switch to nsClassHashtable. r=froydnj
authorEric Rahm <erahm@mozilla.com>
Mon, 04 Aug 2014 16:15:46 -0700 (2014-08-04)
changeset 197758 88235d9f38823cbcac31f7cf7e8385e3d1c74c23
parent 197757 64e5930f032398edb8c82c78c912e6c6a3ff3f5b
child 197759 9c16b710b1c79e6eb8389a948ba704f33fdb8cd1
push id27250
push useremorley@mozilla.com
push dateTue, 05 Aug 2014 14:25:40 +0000 (2014-08-05)
treeherdermozilla-central@2aaedcdf69f6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1027921
milestone34.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1027921 - Part 2: Switch to nsClassHashtable. r=froydnj
xpcom/glue/DeadlockDetector.h
--- a/xpcom/glue/DeadlockDetector.h
+++ b/xpcom/glue/DeadlockDetector.h
@@ -5,19 +5,19 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #ifndef mozilla_DeadlockDetector_h
 #define mozilla_DeadlockDetector_h
 
 #include "mozilla/Attributes.h"
 
 #include <stdlib.h>
 
-#include "plhash.h"
 #include "prlock.h"
 
+#include "nsClassHashtable.h"
 #include "nsTArray.h"
 
 #ifdef NS_TRACE_MALLOC
 #  include "nsTraceMalloc.h"
 #endif  // ifdef NS_TRACE_MALLOC
 
 namespace mozilla {
 
@@ -172,17 +172,18 @@ public:
       mResource = aFrom.mResource;
       mCallContext = aFrom.mCallContext;
       return *this;
     }
   };
   typedef nsTArray<ResourceAcquisition> ResourceAcquisitionArray;
 
 private:
-  typedef nsTArray<PLHashEntry*> HashEntryArray;
+  struct OrderingEntry;
+  typedef nsTArray<OrderingEntry*> HashEntryArray;
   typedef typename HashEntryArray::index_type index_type;
   typedef typename HashEntryArray::size_type size_type;
   static const index_type NoIndex = HashEntryArray::NoIndex;
 
   /**
    * Value type for the ordering table.  Contains the other
    * resources on which an ordering constraint |key < other|
    * exists.  The catch is that we also store the calling context at
@@ -201,117 +202,83 @@ private:
     {
     }
 
     CallStack mFirstSeen; // first site from which the resource appeared
     HashEntryArray mOrderedLT; // this <_o Other
     const T* mResource;
   };
 
-  static void* TableAlloc(void* /*aPool*/, size_t aSize)
-  {
-    return operator new(aSize);
-  }
-  static void TableFree(void* /*aPool*/, void* aItem)
-  {
-    operator delete(aItem);
-  }
-  static PLHashEntry* EntryAlloc(void* /*aPool*/, const void* aKey)
-  {
-    return new PLHashEntry;
-  }
-  static void EntryFree(void* /*aPool*/, PLHashEntry* aEntry, unsigned aFlag)
-  {
-    delete static_cast<T*>(const_cast<void*>(aEntry->key));
-    delete static_cast<OrderingEntry*>(aEntry->value);
-    aEntry->value = 0;
-    if (aFlag == HT_FREE_ENTRY) {
-      delete aEntry;
-    }
-  }
-  static PLHashNumber HashKey(const void* aKey)
-  {
-    return static_cast<PLHashNumber>(NS_PTR_TO_INT32(aKey) >> 2);
-  }
-  static const PLHashAllocOps kAllocOps;
-
   // Hash table "interface" the rest of the code should use
 
-  PLHashEntry** GetEntry(const T* aKey)
-  {
-    return PL_HashTableRawLookup(mOrdering, HashKey(aKey), aKey);
-  }
-
   void PutEntry(T* aKey)
   {
-    PL_HashTableAdd(mOrdering, aKey, new OrderingEntry(aKey));
+    mOrdering.Put(aKey, new OrderingEntry(aKey));
   }
 
   // XXX need these helper methods because OrderingEntry doesn't have
   // XXX access to underlying PLHashEntry
 
   /**
    * Add the order |aFirst <_o aSecond|.
    *
    * WARNING: this does not check whether it's sane to add this
    * order.  In the "best" bad case, when this order already exists,
    * adding it anyway may unnecessarily result in O(n^2) space.  In
    * the "worst" bad case, adding it anyway will cause
    * |InTransitiveClosure()| to diverge.
    */
-  void AddOrder(PLHashEntry* aLT, PLHashEntry* aGT)
+  void AddOrder(OrderingEntry* aLT, OrderingEntry* aGT)
   {
-    static_cast<OrderingEntry*>(aLT->value)->mOrderedLT
+    aLT->mOrderedLT
       .InsertElementSorted(aGT);
   }
 
   /**
    * Return true iff the order |aFirst < aSecond| has been
    * *explicitly* added.
    *
    * Does not consider transitivity.
    */
-  bool IsOrdered(const PLHashEntry* aFirst, const PLHashEntry* aSecond)
+  bool IsOrdered(const OrderingEntry* aFirst, const OrderingEntry* aSecond)
   const
   {
-    const OrderingEntry* entry =
-      static_cast<const OrderingEntry*>(aFirst->value);
-    return entry->mOrderedLT.BinaryIndexOf(aSecond) != NoIndex;
+    return aFirst->mOrderedLT.BinaryIndexOf(aSecond) != NoIndex;
   }
 
   /**
    * Return a pointer to the array of all elements "that" for
    * which the order |this < that| has been explicitly added.
    *
    * NOTE: this does *not* consider transitive orderings.
    */
-  PLHashEntry* const* GetOrders(const PLHashEntry* aEntry) const
+  OrderingEntry* const* GetOrders(const OrderingEntry* aEntry) const
   {
     return
-      static_cast<const OrderingEntry*>(aEntry->value)->mOrderedLT.Elements();
+      aEntry->mOrderedLT.Elements();
   }
 
   /**
    * Return the number of elements "that" for which the order
    * |this < that| has been explicitly added.
    *
    * NOTE: this does *not* consider transitive orderings.
    */
-  size_type NumOrders(const PLHashEntry* aEntry) const
+  size_type NumOrders(const OrderingEntry* aEntry) const
   {
     return
-      static_cast<const OrderingEntry*>(aEntry->value)->mOrderedLT.Length();
+      aEntry->mOrderedLT.Length();
   }
 
   /** Make a ResourceAcquisition out of |aEntry|. */
-  ResourceAcquisition MakeResourceAcquisition(const PLHashEntry* aEntry) const
+  ResourceAcquisition MakeResourceAcquisition(const OrderingEntry* aEntry) const
   {
     return ResourceAcquisition(
-      static_cast<const T*>(aEntry->key),
-      static_cast<const OrderingEntry*>(aEntry->value)->mFirstSeen);
+      aEntry->mResource,
+      aEntry->mFirstSeen);
   }
 
   // Throwaway RAII lock to make the following code safer.
   struct PRAutoLock
   {
     explicit PRAutoLock(PRLock* aLock) : mLock(aLock) { PR_Lock(mLock); }
     ~PRAutoLock() { PR_Unlock(mLock); }
     PRLock* mLock;
@@ -323,39 +290,31 @@ public:
   /**
    * DeadlockDetector
    * Create a new deadlock detector.
    *
    * @param aNumResourcesGuess Guess at approximate number of resources
    *        that will be checked.
    */
   explicit DeadlockDetector(uint32_t aNumResourcesGuess = kDefaultNumBuckets)
+    : mOrdering(aNumResourcesGuess)
   {
-    mOrdering = PL_NewHashTable(aNumResourcesGuess,
-                                HashKey,
-                                PL_CompareValues, PL_CompareValues,
-                                &kAllocOps, 0);
-    if (!mOrdering) {
-      NS_RUNTIMEABORT("couldn't initialize resource ordering table");
-    }
-
     mLock = PR_NewLock();
     if (!mLock) {
       NS_RUNTIMEABORT("couldn't allocate deadlock detector lock");
     }
   }
 
   /**
    * ~DeadlockDetector
    *
    * *NOT* thread safe.
    */
   ~DeadlockDetector()
   {
-    PL_HashTableDestroy(mOrdering);
     PR_DestroyLock(mLock);
   }
 
   /**
    * Add
    * Make the deadlock detector aware of |aResource|.
    *
    * WARNING: The deadlock detector owns |aResource|.
@@ -401,82 +360,84 @@ public:
    */
   ResourceAcquisitionArray* CheckAcquisition(const T* aLast,
                                              const T* aProposed,
                                              const CallStack& aCallContext)
   {
     NS_ASSERTION(aProposed, "null resource");
     PRAutoLock _(mLock);
 
-    PLHashEntry* second = *GetEntry(aProposed);
-    OrderingEntry* e = static_cast<OrderingEntry*>(second->value);
-    if (CallStack::kNone == e->mFirstSeen) {
-      e->mFirstSeen = aCallContext;
+    OrderingEntry* proposed = mOrdering.Get(aProposed);
+    NS_ASSERTION(proposed, "missing ordering entry");
+
+    if (CallStack::kNone == proposed->mFirstSeen) {
+      proposed->mFirstSeen = aCallContext;
     }
 
     if (!aLast) {
       // don't check if |0 < aProposed|; just vamoose
       return 0;
     }
 
-    PLHashEntry* first = *GetEntry(aLast);
+    OrderingEntry* current = mOrdering.Get(aLast);
+    NS_ASSERTION(current, "missing ordering entry");
 
     // this is the crux of the deadlock detector algorithm
 
-    if (first == second) {
+    if (current == proposed) {
       // reflexive deadlock.  fastpath b/c InTransitiveClosure is
       // not applicable here.
       ResourceAcquisitionArray* cycle = new ResourceAcquisitionArray();
       if (!cycle) {
         NS_RUNTIMEABORT("can't allocate dep. cycle array");
       }
-      cycle->AppendElement(MakeResourceAcquisition(first));
+      cycle->AppendElement(MakeResourceAcquisition(current));
       cycle->AppendElement(ResourceAcquisition(aProposed,
                                                aCallContext));
       return cycle;
     }
-    if (InTransitiveClosure(first, second)) {
+    if (InTransitiveClosure(current, proposed)) {
       // we've already established |aLast < aProposed|.  all is well.
       return 0;
     }
-    if (InTransitiveClosure(second, first)) {
+    if (InTransitiveClosure(proposed, current)) {
       // the order |aProposed < aLast| has been deduced, perhaps
       // transitively.  we're attempting to violate that
       // constraint by acquiring resources in the order
       // |aLast < aProposed|, and thus we may deadlock under the
       // right conditions.
-      ResourceAcquisitionArray* cycle = GetDeductionChain(second, first);
+      ResourceAcquisitionArray* cycle = GetDeductionChain(proposed, current);
       // show how acquiring |aProposed| would complete the cycle
       cycle->AppendElement(ResourceAcquisition(aProposed,
                                                aCallContext));
       return cycle;
     }
     // |aLast|, |aProposed| are unordered according to our
     // poset.  this is fine, but we now need to add this
     // ordering constraint.
-    AddOrder(first, second);
+    AddOrder(current, proposed);
     return 0;
   }
 
   /**
    * Return true iff |aTarget| is in the transitive closure of |aStart|
    * over the ordering relation `<_this'.
    *
    * @precondition |aStart != aTarget|
    */
-  bool InTransitiveClosure(const PLHashEntry* aStart,
-                           const PLHashEntry* aTarget) const
+  bool InTransitiveClosure(const OrderingEntry* aStart,
+                           const OrderingEntry* aTarget) const
   {
     if (IsOrdered(aStart, aTarget)) {
       return true;
     }
 
     index_type i = 0;
     size_type len = NumOrders(aStart);
-    for (const PLHashEntry* const* it = GetOrders(aStart); i < len; ++i, ++it) {
+    for (const OrderingEntry* const* it = GetOrders(aStart); i < len; ++i, ++it) {
       if (InTransitiveClosure(*it, aTarget)) {
         return true;
       }
     }
     return false;
   }
 
   /**
@@ -490,79 +451,73 @@ public:
    * depth-first search.
    *
    * Nb: |InTransitiveClosure| could be replaced by this function.
    * However, this one is more expensive because we record the DFS
    * search stack on the heap whereas the other doesn't.
    *
    * @precondition |aStart != aTarget|
    */
-  ResourceAcquisitionArray* GetDeductionChain(const PLHashEntry* aStart,
-                                              const PLHashEntry* aTarget)
+  ResourceAcquisitionArray* GetDeductionChain(const OrderingEntry* aStart,
+                                              const OrderingEntry* aTarget)
   {
     ResourceAcquisitionArray* chain = new ResourceAcquisitionArray();
     if (!chain) {
       NS_RUNTIMEABORT("can't allocate dep. cycle array");
     }
     chain->AppendElement(MakeResourceAcquisition(aStart));
 
     NS_ASSERTION(GetDeductionChain_Helper(aStart, aTarget, chain),
                  "GetDeductionChain called when there's no deadlock");
     return chain;
   }
 
   // precondition: |aStart != aTarget|
   // invariant: |aStart| is the last element in |aChain|
-  bool GetDeductionChain_Helper(const PLHashEntry* aStart,
-                                const PLHashEntry* aTarget,
+  bool GetDeductionChain_Helper(const OrderingEntry* aStart,
+                                const OrderingEntry* aTarget,
                                 ResourceAcquisitionArray* aChain)
   {
     if (IsOrdered(aStart, aTarget)) {
       aChain->AppendElement(MakeResourceAcquisition(aTarget));
       return true;
     }
 
     index_type i = 0;
     size_type len = NumOrders(aStart);
-    for (const PLHashEntry* const* it = GetOrders(aStart); i < len; ++i, ++it) {
+    for (const OrderingEntry* const* it = GetOrders(aStart); i < len; ++i, ++it) {
       aChain->AppendElement(MakeResourceAcquisition(*it));
       if (GetDeductionChain_Helper(*it, aTarget, aChain)) {
         return true;
       }
       aChain->RemoveElementAt(aChain->Length() - 1);
     }
     return false;
   }
 
   /**
    * The partial order on resource acquisitions used by the deadlock
    * detector.
    */
-  PLHashTable* mOrdering;     // T* -> PLHashEntry<OrderingEntry>
+  nsClassHashtable<nsPtrHashKey<const T>, OrderingEntry> mOrdering;
+
 
   /**
    * Protects contentious methods.
    * Nb: can't use mozilla::Mutex since we are used as its deadlock
    * detector.
    */
   PRLock* mLock;
 
 private:
   DeadlockDetector(const DeadlockDetector& aDD) MOZ_DELETE;
   DeadlockDetector& operator=(const DeadlockDetector& aDD) MOZ_DELETE;
 };
 
 
 template<typename T>
-const PLHashAllocOps DeadlockDetector<T>::kAllocOps = {
-  DeadlockDetector<T>::TableAlloc, DeadlockDetector<T>::TableFree,
-  DeadlockDetector<T>::EntryAlloc, DeadlockDetector<T>::EntryFree
-};
-
-
-template<typename T>
 // FIXME bug 456272: tune based on average workload
 const uint32_t DeadlockDetector<T>::kDefaultNumBuckets = 64;
 
 
 } // namespace mozilla
 
 #endif // ifndef mozilla_DeadlockDetector_h