Bug 1057912 - Privatize most of PLDHashTable's fields. r=roc.
authorNicholas Nethercote <nnethercote@mozilla.com>
Mon, 25 Aug 2014 16:56:33 -0700
changeset 224670 c840195920bd2874bda1c7639bce046878bbf4dd
parent 224669 36da0216faedd8d085b521784cf4eff0e4698363
child 224671 fff4d503ad889519d0e97359fa2536a747a463a8
push idunknown
push userunknown
push dateunknown
reviewersroc
bugs1057912
milestone34.0a1
Bug 1057912 - Privatize most of PLDHashTable's fields. r=roc.
content/base/src/nsContentList.cpp
content/base/src/nsContentUtils.cpp
dom/plugins/base/nsJSNPRuntime.cpp
js/xpconnect/src/XPCMaps.cpp
js/xpconnect/src/XPCMaps.h
layout/style/nsCSSRuleProcessor.cpp
layout/style/nsHTMLStyleSheet.cpp
layout/style/nsRuleNode.cpp
layout/tables/SpanningCellSorter.cpp
modules/libpref/Preferences.cpp
modules/libpref/nsPrefBranch.cpp
modules/libpref/prefapi.cpp
netwerk/base/src/nsLoadGroup.cpp
netwerk/protocol/http/nsHttp.cpp
parser/htmlparser/nsHTMLEntities.cpp
rdf/base/nsInMemoryDataSource.cpp
uriloader/base/nsDocLoader.cpp
xpcom/base/nsCycleCollector.cpp
xpcom/ds/nsAtomTable.cpp
xpcom/ds/nsPersistentProperties.cpp
xpcom/ds/nsStaticNameTable.cpp
xpcom/glue/nsBaseHashtable.h
xpcom/glue/nsRefPtrHashtable.h
xpcom/glue/nsTHashtable.h
xpcom/glue/pldhash.cpp
xpcom/glue/pldhash.h
--- a/content/base/src/nsContentList.cpp
+++ b/content/base/src/nsContentList.cpp
@@ -988,17 +988,17 @@ nsContentList::RemoveFromHashtable()
 
   if (!gContentListHashTable.ops)
     return;
 
   PL_DHashTableOperate(&gContentListHashTable,
                        &key,
                        PL_DHASH_REMOVE);
 
-  if (gContentListHashTable.entryCount == 0) {
+  if (gContentListHashTable.EntryCount() == 0) {
     PL_DHashTableFinish(&gContentListHashTable);
     gContentListHashTable.ops = nullptr;
   }
 }
 
 void
 nsContentList::BringSelfUpToDate(bool aDoFlush)
 {
@@ -1031,17 +1031,17 @@ nsCacheableFuncStringContentList::Remove
     return;
   }
 
   nsFuncStringCacheKey key(mRootNode, mFunc, mString);
   PL_DHashTableOperate(&gFuncStringContentListHashTable,
                        &key,
                        PL_DHASH_REMOVE);
 
-  if (gFuncStringContentListHashTable.entryCount == 0) {
+  if (gFuncStringContentListHashTable.EntryCount() == 0) {
     PL_DHashTableFinish(&gFuncStringContentListHashTable);
     gFuncStringContentListHashTable.ops = nullptr;
   }
 }
 
 #ifdef DEBUG_CONTENT_LIST
 void
 nsContentList::AssertInSync()
--- a/content/base/src/nsContentUtils.cpp
+++ b/content/base/src/nsContentUtils.cpp
@@ -1776,29 +1776,29 @@ nsContentUtils::Shutdown()
   delete sAtomEventTable;
   sAtomEventTable = nullptr;
   delete sStringEventTable;
   sStringEventTable = nullptr;
   delete sUserDefinedEvents;
   sUserDefinedEvents = nullptr;
 
   if (sEventListenerManagersHash.ops) {
-    NS_ASSERTION(sEventListenerManagersHash.entryCount == 0,
+    NS_ASSERTION(sEventListenerManagersHash.EntryCount() == 0,
                  "Event listener manager hash not empty at shutdown!");
 
     // See comment above.
 
     // However, we have to handle this table differently.  If it still
     // has entries, we want to leak it too, so that we can keep it alive
     // in case any elements are destroyed.  Because if they are, we need
     // their event listener managers to be destroyed too, or otherwise
     // it could leave dangling references in DOMClassInfo's preserved
     // wrapper table.
 
-    if (sEventListenerManagersHash.entryCount == 0) {
+    if (sEventListenerManagersHash.EntryCount() == 0) {
       PL_DHashTableFinish(&sEventListenerManagersHash);
       sEventListenerManagersHash.ops = nullptr;
     }
   }
 
   NS_ASSERTION(!sBlockedScriptRunners ||
                sBlockedScriptRunners->Length() == 0,
                "How'd this happen?");
--- a/dom/plugins/base/nsJSNPRuntime.cpp
+++ b/dom/plugins/base/nsJSNPRuntime.cpp
@@ -269,17 +269,17 @@ OnWrapperDestroyed()
 
       // No more wrappers, and our hash was initialized. Finish the
       // hash to prevent leaking it.
       sJSObjWrappers.finish();
       sJSObjWrappersAccessible = false;
     }
 
     if (sNPObjWrappers.ops) {
-      MOZ_ASSERT(sNPObjWrappers.entryCount == 0);
+      MOZ_ASSERT(sNPObjWrappers.EntryCount() == 0);
 
       // No more wrappers, and our hash was initialized. Finish the
       // hash to prevent leaking it.
       PL_DHashTableFinish(&sNPObjWrappers);
 
       sNPObjWrappers.ops = nullptr;
     }
 
@@ -1758,24 +1758,24 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JS
       return nullptr;
     }
     return obj;
   }
 
   entry->mNPObj = npobj;
   entry->mNpp = npp;
 
-  uint32_t generation = sNPObjWrappers.generation;
+  uint32_t generation = sNPObjWrappers.Generation();
 
   // No existing JSObject, create one.
 
   JS::Rooted<JSObject*> obj(cx, ::JS_NewObject(cx, &sNPObjectJSWrapperClass, JS::NullPtr(),
                                                JS::NullPtr()));
 
-  if (generation != sNPObjWrappers.generation) {
+  if (generation != sNPObjWrappers.Generation()) {
       // Reload entry if the JS_NewObject call caused a GC and reallocated
       // the table (see bug 445229). This is guaranteed to succeed.
 
       entry = static_cast<NPObjWrapperHashEntry *>
         (PL_DHashTableOperate(&sNPObjWrappers, npobj, PL_DHASH_LOOKUP));
       NS_ASSERTION(entry && PL_DHASH_ENTRY_IS_BUSY(entry),
                    "Hashtable didn't find what we just added?");
   }
--- a/js/xpconnect/src/XPCMaps.cpp
+++ b/js/xpconnect/src/XPCMaps.cpp
@@ -480,17 +480,17 @@ IID2ThisTranslatorMap::Entry::Match(PLDH
 {
     return ((const nsID*)key)->Equals(((Entry*)entry)->key);
 }
 
 void
 IID2ThisTranslatorMap::Entry::Clear(PLDHashTable *table, PLDHashEntryHdr *entry)
 {
     static_cast<Entry*>(entry)->value = nullptr;
-    memset(entry, 0, table->entrySize);
+    memset(entry, 0, table->EntrySize());
 }
 
 const struct PLDHashTableOps IID2ThisTranslatorMap::Entry::sOps =
 {
     PL_DHashAllocTable,
     PL_DHashFreeTable,
     HashIIDPtrKey,
     Match,
--- a/js/xpconnect/src/XPCMaps.h
+++ b/js/xpconnect/src/XPCMaps.h
@@ -142,17 +142,17 @@ public:
         MOZ_ASSERT(!wrapperInMap || wrapperInMap == wrapper,
                    "About to remove a different wrapper with the same "
                    "nsISupports identity! This will most likely cause serious "
                    "problems!");
 #endif
         PL_DHashTableOperate(mTable, wrapper->GetIdentityObject(), PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
 
     ~Native2WrappedNativeMap();
 private:
     Native2WrappedNativeMap();    // no implementation
@@ -204,17 +204,17 @@ public:
     }
 
     inline void Remove(nsXPCWrappedJSClass* clazz)
     {
         NS_PRECONDITION(clazz,"bad param");
         PL_DHashTableOperate(mTable, &clazz->GetIID(), PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     ~IID2WrappedJSClassMap();
 private:
     IID2WrappedJSClassMap();    // no implementation
     IID2WrappedJSClassMap(int size);
 private:
@@ -261,17 +261,17 @@ public:
     }
 
     inline void Remove(XPCNativeInterface* iface)
     {
         NS_PRECONDITION(iface,"bad param");
         PL_DHashTableOperate(mTable, iface->GetIID(), PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
 
     ~IID2NativeInterfaceMap();
 private:
     IID2NativeInterfaceMap();    // no implementation
@@ -320,17 +320,17 @@ public:
     }
 
     inline void Remove(nsIClassInfo* info)
     {
         NS_PRECONDITION(info,"bad param");
         PL_DHashTableOperate(mTable, info, PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     // ClassInfo2NativeSetMap holds pointers to *some* XPCNativeSets.
     // So we don't want to count those XPCNativeSets, because they are better
     // counted elsewhere (i.e. in XPCJSRuntime::mNativeSetMap, which holds
     // pointers to *all* XPCNativeSets).  Hence the "Shallow".
     size_t ShallowSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
@@ -380,17 +380,17 @@ public:
     }
 
     inline void Remove(nsIClassInfo* info)
     {
         NS_PRECONDITION(info,"bad param");
         PL_DHashTableOperate(mTable, info, PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
 
     ~ClassInfo2WrappedNativeProtoMap();
 private:
     ClassInfo2WrappedNativeProtoMap();    // no implementation
@@ -453,17 +453,17 @@ public:
     inline void Remove(XPCNativeSet* set)
     {
         NS_PRECONDITION(set,"bad param");
 
         XPCNativeSetKey key(set, nullptr, 0);
         PL_DHashTableOperate(mTable, &key, PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
 
     ~NativeSetMap();
 private:
     NativeSetMap();    // no implementation
@@ -520,17 +520,17 @@ public:
         return obj;
     }
 
     inline void Remove(REFNSIID iid)
     {
         PL_DHashTableOperate(mTable, &iid, PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     ~IID2ThisTranslatorMap();
 private:
     IID2ThisTranslatorMap();    // no implementation
     IID2ThisTranslatorMap(int size);
 private:
@@ -557,17 +557,17 @@ public:
         static const struct PLDHashTableOps sOps;
     };
 
     static XPCNativeScriptableSharedMap* newMap(int length);
 
     bool GetNewOrUsed(uint32_t flags, char* name, uint32_t interfacesBitmap,
                       XPCNativeScriptableInfo* si);
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     ~XPCNativeScriptableSharedMap();
 private:
     XPCNativeScriptableSharedMap();    // no implementation
     XPCNativeScriptableSharedMap(int size);
 private:
@@ -595,17 +595,17 @@ public:
     }
 
     inline void Remove(XPCWrappedNativeProto* proto)
     {
         NS_PRECONDITION(proto,"bad param");
         PL_DHashTableOperate(mTable, proto, PL_DHASH_REMOVE);
     }
 
-    inline uint32_t Count() {return mTable->entryCount;}
+    inline uint32_t Count() { return mTable->EntryCount(); }
     inline uint32_t Enumerate(PLDHashEnumerator f, void *arg)
         {return PL_DHashTableEnumerate(mTable, f, arg);}
 
     ~XPCWrappedNativeProtoMap();
 private:
     XPCWrappedNativeProtoMap();    // no implementation
     XPCWrappedNativeProtoMap(int size);
 private:
--- a/layout/style/nsCSSRuleProcessor.cpp
+++ b/layout/style/nsCSSRuleProcessor.cpp
@@ -2545,17 +2545,17 @@ nsCSSRuleProcessor::RulesMatching(Pseudo
   }
 }
 
 /* virtual */ void
 nsCSSRuleProcessor::RulesMatching(AnonBoxRuleProcessorData* aData)
 {
   RuleCascadeData* cascade = GetRuleCascade(aData->mPresContext);
 
-  if (cascade && cascade->mAnonBoxRules.entryCount) {
+  if (cascade && cascade->mAnonBoxRules.EntryCount()) {
     RuleHashTagTableEntry* entry = static_cast<RuleHashTagTableEntry*>
       (PL_DHashTableOperate(&cascade->mAnonBoxRules, aData->mPseudoTag,
                             PL_DHASH_LOOKUP));
     if (PL_DHASH_ENTRY_IS_BUSY(entry)) {
       nsTArray<RuleValue>& rules = entry->mRules;
       for (RuleValue *value = rules.Elements(), *end = value + rules.Length();
            value != end; ++value) {
         value->mRule->RuleMatched();
@@ -2566,17 +2566,17 @@ nsCSSRuleProcessor::RulesMatching(AnonBo
 }
 
 #ifdef MOZ_XUL
 /* virtual */ void
 nsCSSRuleProcessor::RulesMatching(XULTreeRuleProcessorData* aData)
 {
   RuleCascadeData* cascade = GetRuleCascade(aData->mPresContext);
 
-  if (cascade && cascade->mXULTreeRules.entryCount) {
+  if (cascade && cascade->mXULTreeRules.EntryCount()) {
     RuleHashTagTableEntry* entry = static_cast<RuleHashTagTableEntry*>
       (PL_DHashTableOperate(&cascade->mXULTreeRules, aData->mPseudoTag,
                             PL_DHASH_LOOKUP));
     if (PL_DHASH_ENTRY_IS_BUSY(entry)) {
       NodeMatchContext nodeContext(EventStates(),
                                    nsCSSRuleProcessor::IsLink(aData->mElement));
       nsTArray<RuleValue>& rules = entry->mRules;
       for (RuleValue *value = rules.Elements(), *end = value + rules.Length();
@@ -3504,17 +3504,17 @@ nsCSSRuleProcessor::RefreshRuleCascade(n
         return; /* out of memory */
 
       for (uint32_t i = 0; i < mSheets.Length(); ++i) {
         if (!CascadeSheet(mSheets.ElementAt(i), &data))
           return; /* out of memory */
       }
 
       // Sort the hash table of per-weight linked lists by weight.
-      uint32_t weightCount = data.mRulesByWeight.entryCount;
+      uint32_t weightCount = data.mRulesByWeight.EntryCount();
       nsAutoArrayPtr<PerWeightData> weightArray(new PerWeightData[weightCount]);
       FillWeightArrayData fwData(weightArray);
       PL_DHashTableEnumerate(&data.mRulesByWeight, FillWeightArray, &fwData);
       NS_QuickSort(weightArray, weightCount, sizeof(PerWeightData),
                    CompareWeightData, nullptr);
 
       // Put things into the rule hash.
       // The primary sort is by weight...
--- a/layout/style/nsHTMLStyleSheet.cpp
+++ b/layout/style/nsHTMLStyleSheet.cpp
@@ -495,22 +495,22 @@ nsHTMLStyleSheet::UniqueMappedAttributes
 
 void
 nsHTMLStyleSheet::DropMappedAttributes(nsMappedAttributes* aMapped)
 {
   NS_ENSURE_TRUE_VOID(aMapped);
 
   NS_ASSERTION(mMappedAttrTable.ops, "table uninitialized");
 #ifdef DEBUG
-  uint32_t entryCount = mMappedAttrTable.entryCount - 1;
+  uint32_t entryCount = mMappedAttrTable.EntryCount() - 1;
 #endif
 
   PL_DHashTableOperate(&mMappedAttrTable, aMapped, PL_DHASH_REMOVE);
 
-  NS_ASSERTION(entryCount == mMappedAttrTable.entryCount, "not removed");
+  NS_ASSERTION(entryCount == mMappedAttrTable.EntryCount(), "not removed");
 }
 
 nsIStyleRule*
 nsHTMLStyleSheet::LangRuleFor(const nsString& aLanguage)
 {
   if (!mLangRuleTable.ops) {
     PL_DHashTableInit(&mLangRuleTable, &LangRuleTable_Ops,
                       nullptr, sizeof(LangRuleTableEntry));
--- a/layout/style/nsRuleNode.cpp
+++ b/layout/style/nsRuleNode.cpp
@@ -9000,19 +9000,19 @@ nsRuleNode::SweepChildren(nsTArray<nsRul
   NS_ASSERTION(!(mDependentBits & NS_RULE_NODE_GC_MARK),
                "missing DestroyIfNotMarked() call");
   NS_ASSERTION(HaveChildren(),
                "why call SweepChildren with no children?");
   uint32_t childrenDestroyed = 0;
   nsRuleNode* survivorsWithChildren = nullptr;
   if (ChildrenAreHashed()) {
     PLDHashTable* children = ChildrenHash();
-    uint32_t oldChildCount = children->entryCount;
+    uint32_t oldChildCount = children->EntryCount();
     PL_DHashTableEnumerate(children, SweepHashEntry, &survivorsWithChildren);
-    childrenDestroyed = oldChildCount - children->entryCount;
+    childrenDestroyed = oldChildCount - children->EntryCount();
     if (childrenDestroyed == oldChildCount) {
       PL_DHashTableDestroy(children);
       mChildren.asVoid = nullptr;
     }
   } else {
     for (nsRuleNode** children = ChildrenListPtr(); *children; ) {
       nsRuleNode* next = (*children)->mNextSibling;
       if ((*children)->DestroyIfNotMarked()) {
--- a/layout/tables/SpanningCellSorter.cpp
+++ b/layout/tables/SpanningCellSorter.cpp
@@ -148,30 +148,30 @@ SpanningCellSorter::GetNext(int32_t *aCo
                 ++mEnumerationIndex;
                 return result;
             }
             /* prepare to enumerate the hash */
             mState = ENUMERATING_HASH;
             mEnumerationIndex = 0;
             if (mHashTable.ops) {
                 HashTableEntry **sh =
-                    new HashTableEntry*[mHashTable.entryCount];
+                    new HashTableEntry*[mHashTable.EntryCount()];
                 if (!sh) {
                     // give up
                     mState = DONE;
                     return nullptr;
                 }
                 PL_DHashTableEnumerate(&mHashTable, FillSortedArray, sh);
-                NS_QuickSort(sh, mHashTable.entryCount, sizeof(sh[0]),
+                NS_QuickSort(sh, mHashTable.EntryCount(), sizeof(sh[0]),
                              SortArray, nullptr);
                 mSortedHashTable = sh;
             }
             /* fall through */
         case ENUMERATING_HASH:
-            if (mHashTable.ops && mEnumerationIndex < mHashTable.entryCount) {
+            if (mHashTable.ops && mEnumerationIndex < mHashTable.EntryCount()) {
                 Item *result = mSortedHashTable[mEnumerationIndex]->mItems;
                 *aColSpan = mSortedHashTable[mEnumerationIndex]->mColSpan;
                 NS_ASSERTION(result, "holes in hash table");
 #ifdef DEBUG_SPANNING_CELL_SORTER
                 printf("SpanningCellSorter[%p]:"
                        " returning list for colspan=%d from hash\n",
                        static_cast<void*>(this), *aColSpan);
 #endif
--- a/modules/libpref/Preferences.cpp
+++ b/modules/libpref/Preferences.cpp
@@ -740,17 +740,17 @@ Preferences::GetPreference(PrefSetting* 
     return;
 
   pref_GetPrefFromEntry(entry, aPref);
 }
 
 void
 Preferences::GetPreferences(InfallibleTArray<PrefSetting>* aPrefs)
 {
-  aPrefs->SetCapacity(PL_DHASH_TABLE_CAPACITY(&gHashTable));
+  aPrefs->SetCapacity(gHashTable.Capacity());
   PL_DHashTableEnumerate(&gHashTable, pref_GetPrefs, aPrefs);
 }
 
 NS_IMETHODIMP
 Preferences::GetBranch(const char *aPrefRoot, nsIPrefBranch **_retval)
 {
   nsresult rv;
 
@@ -966,33 +966,33 @@ Preferences::WritePrefFile(nsIFile* aFil
                                        -1,
                                        0600);
   if (NS_FAILED(rv)) 
       return rv;
   rv = NS_NewBufferedOutputStream(getter_AddRefs(outStream), outStreamSink, 4096);
   if (NS_FAILED(rv)) 
       return rv;  
 
-  nsAutoArrayPtr<char*> valueArray(new char*[gHashTable.entryCount]);
-  memset(valueArray, 0, gHashTable.entryCount * sizeof(char*));
+  nsAutoArrayPtr<char*> valueArray(new char*[gHashTable.EntryCount()]);
+  memset(valueArray, 0, gHashTable.EntryCount() * sizeof(char*));
   pref_saveArgs saveArgs;
   saveArgs.prefArray = valueArray;
   saveArgs.saveTypes = SAVE_ALL;
   
   // get the lines that we're supposed to be writing to the file
   PL_DHashTableEnumerate(&gHashTable, pref_savePref, &saveArgs);
     
   /* Sort the preferences to make a readable file on disk */
-  NS_QuickSort(valueArray, gHashTable.entryCount, sizeof(char *), pref_CompareStrings, nullptr);
+  NS_QuickSort(valueArray, gHashTable.EntryCount(), sizeof(char *), pref_CompareStrings, nullptr);
   
   // write out the file header
   outStream->Write(outHeader, sizeof(outHeader) - 1, &writeAmount);
 
   char** walker = valueArray;
-  for (uint32_t valueIdx = 0; valueIdx < gHashTable.entryCount; valueIdx++, walker++) {
+  for (uint32_t valueIdx = 0; valueIdx < gHashTable.EntryCount(); valueIdx++, walker++) {
     if (*walker) {
       outStream->Write(*walker, strlen(*walker), &writeAmount);
       outStream->Write(NS_LINEBREAK, NS_LINEBREAK_LEN, &writeAmount);
       NS_Free(*walker);
     }
   }
 
   // tell the safe output stream to overwrite the real prefs file
--- a/modules/libpref/nsPrefBranch.cpp
+++ b/modules/libpref/nsPrefBranch.cpp
@@ -890,9 +890,9 @@ NS_IMETHODIMP nsRelativeFilePref::GetRel
 }
 
 NS_IMETHODIMP nsRelativeFilePref::SetRelativeToKey(const nsACString& aRelativeToKey)
 {
   mRelativeToKey.Assign(aRelativeToKey);
   return NS_OK;
 }
 
-#undef ENSURE_MAIN_PROCESS
\ No newline at end of file
+#undef ENSURE_MAIN_PROCESS
--- a/modules/libpref/prefapi.cpp
+++ b/modules/libpref/prefapi.cpp
@@ -42,17 +42,17 @@ clearPrefEntry(PLDHashTable *table, PLDH
         if (pref->defaultPref.stringVal)
             PL_strfree(pref->defaultPref.stringVal);
         if (pref->userPref.stringVal)
             PL_strfree(pref->userPref.stringVal);
     }
     // don't need to free this as it's allocated in memory owned by
     // gPrefNameArena
     pref->key = nullptr;
-    memset(entry, 0, table->entrySize);
+    memset(entry, 0, table->EntrySize());
 }
 
 static bool
 matchPrefEntry(PLDHashTable*, const PLDHashEntryHdr* entry,
                const void* key)
 {
     const PrefHashEntry *prefEntry =
         static_cast<const PrefHashEntry*>(entry);
@@ -60,17 +60,17 @@ matchPrefEntry(PLDHashTable*, const PLDH
     if (prefEntry->key == key) return true;
 
     if (!prefEntry->key || !key) return false;
 
     const char *otherKey = reinterpret_cast<const char*>(key);
     return (strcmp(prefEntry->key, otherKey) == 0);
 }
 
-PLDHashTable        gHashTable = { nullptr };
+PLDHashTable        gHashTable;
 static PLArenaPool  gPrefNameArena;
 bool                gDirty = false;
 
 static struct CallbackNode* gCallbacks = nullptr;
 static bool         gIsAnyPrefLocked = false;
 // These are only used during the call to pref_DoCallback
 static bool         gCallbacksInProgress = false;
 static bool         gShouldCleanupDeadNodes = false;
--- a/netwerk/base/src/nsLoadGroup.cpp
+++ b/netwerk/base/src/nsLoadGroup.cpp
@@ -225,17 +225,17 @@ AppendRequestsToArray(PLDHashTable *tabl
 
 NS_IMETHODIMP
 nsLoadGroup::Cancel(nsresult status)
 {
     MOZ_ASSERT(NS_IsMainThread());
 
     NS_ASSERTION(NS_FAILED(status), "shouldn't cancel with a success code");
     nsresult rv;
-    uint32_t count = mRequests.entryCount;
+    uint32_t count = mRequests.EntryCount();
 
     nsAutoTArray<nsIRequest*, 8> requests;
 
     PL_DHashTableEnumerate(&mRequests, AppendRequestsToArray,
                            static_cast<nsTArray<nsIRequest*> *>(&requests));
 
     if (requests.Length() != count) {
         for (uint32_t i = 0, len = requests.Length(); i < len; ++i) {
@@ -296,32 +296,32 @@ nsLoadGroup::Cancel(nsresult status)
         // Remember the first failure and return it...
         if (NS_FAILED(rv) && NS_SUCCEEDED(firstError))
             firstError = rv;
 
         NS_RELEASE(request);
     }
 
 #if defined(DEBUG)
-    NS_ASSERTION(mRequests.entryCount == 0, "Request list is not empty.");
+    NS_ASSERTION(mRequests.EntryCount() == 0, "Request list is not empty.");
     NS_ASSERTION(mForegroundCount == 0, "Foreground URLs are active.");
 #endif
 
     mStatus = NS_OK;
     mIsCanceling = false;
 
     return firstError;
 }
 
 
 NS_IMETHODIMP
 nsLoadGroup::Suspend()
 {
     nsresult rv, firstError;
-    uint32_t count = mRequests.entryCount;
+    uint32_t count = mRequests.EntryCount();
 
     nsAutoTArray<nsIRequest*, 8> requests;
 
     PL_DHashTableEnumerate(&mRequests, AppendRequestsToArray,
                            static_cast<nsTArray<nsIRequest*> *>(&requests));
 
     if (requests.Length() != count) {
         for (uint32_t i = 0, len = requests.Length(); i < len; ++i) {
@@ -363,17 +363,17 @@ nsLoadGroup::Suspend()
     return firstError;
 }
 
 
 NS_IMETHODIMP
 nsLoadGroup::Resume()
 {
     nsresult rv, firstError;
-    uint32_t count = mRequests.entryCount;
+    uint32_t count = mRequests.EntryCount();
 
     nsAutoTArray<nsIRequest*, 8> requests;
 
     PL_DHashTableEnumerate(&mRequests, AppendRequestsToArray,
                            static_cast<nsTArray<nsIRequest*> *>(&requests));
 
     if (requests.Length() != count) {
         for (uint32_t i = 0, len = requests.Length(); i < len; ++i) {
@@ -484,17 +484,17 @@ nsLoadGroup::AddRequest(nsIRequest *requ
 {
     nsresult rv;
 
 #if defined(PR_LOGGING)
     {
         nsAutoCString nameStr;
         request->GetName(nameStr);
         LOG(("LOADGROUP [%x]: Adding request %x %s (count=%d).\n",
-             this, request, nameStr.get(), mRequests.entryCount));
+             this, request, nameStr.get(), mRequests.EntryCount()));
     }
 #endif /* PR_LOGGING */
 
 #ifdef DEBUG
     {
       RequestMapEntry *entry =
           static_cast<RequestMapEntry *>
                      (PL_DHashTableOperate(&mRequests, request,
@@ -597,17 +597,17 @@ nsLoadGroup::RemoveRequest(nsIRequest *r
     NS_ENSURE_ARG_POINTER(request);
     nsresult rv;
 
 #if defined(PR_LOGGING)
     {
         nsAutoCString nameStr;
         request->GetName(nameStr);
         LOG(("LOADGROUP [%x]: Removing request %x %s status %x (count=%d).\n",
-            this, request, nameStr.get(), aStatus, mRequests.entryCount-1));
+            this, request, nameStr.get(), aStatus, mRequests.EntryCount() - 1));
     }
 #endif
 
     // Make sure we have a owning reference to the request we're about
     // to remove.
 
     nsCOMPtr<nsIRequest> kungFuDeathGrip(request);
 
@@ -659,17 +659,17 @@ nsLoadGroup::RemoveRequest(nsIRequest *r
                     Telemetry::HTTP_SUBITEM_FIRST_BYTE_LATENCY_TIME,
                     mDefaultRequestCreationTime, timeStamp);
             }
 
             TelemetryReportChannel(timedChannel, false);
         }
     }
 
-    if (mRequests.entryCount == 0) {
+    if (mRequests.EntryCount() == 0) {
         TelemetryReport();
     }
 
     // Undo any group priority delta...
     if (mPriority != 0)
         RescheduleRequest(request, -mPriority);
 
     nsLoadFlags flags;
@@ -715,17 +715,17 @@ AppendRequestsToCOMArray(PLDHashTable *t
     static_cast<nsCOMArray<nsIRequest>*>(arg)->AppendObject(e->mKey);
     return PL_DHASH_NEXT;
 }
 
 NS_IMETHODIMP
 nsLoadGroup::GetRequests(nsISimpleEnumerator * *aRequests)
 {
     nsCOMArray<nsIRequest> requests;
-    requests.SetCapacity(mRequests.entryCount);
+    requests.SetCapacity(mRequests.EntryCount());
 
     PL_DHashTableEnumerate(&mRequests, AppendRequestsToCOMArray, &requests);
 
     return NS_NewArrayEnumerator(aRequests, requests);
 }
 
 NS_IMETHODIMP
 nsLoadGroup::SetGroupObserver(nsIRequestObserver* aObserver)
--- a/netwerk/protocol/http/nsHttp.cpp
+++ b/netwerk/protocol/http/nsHttp.cpp
@@ -37,17 +37,17 @@ enum {
 // the atom table is destroyed.  The structure and value string are allocated
 // as one contiguous block.
 
 struct HttpHeapAtom {
     struct HttpHeapAtom *next;
     char                 value[1];
 };
 
-static struct PLDHashTable  sAtomTable = {0};
+static struct PLDHashTable  sAtomTable;
 static struct HttpHeapAtom *sHeapAtoms = nullptr;
 static Mutex               *sLock = nullptr;
 
 HttpHeapAtom *
 NewHeapAtom(const char *value) {
     int len = strlen(value);
 
     HttpHeapAtom *a =
--- a/parser/htmlparser/nsHTMLEntities.cpp
+++ b/parser/htmlparser/nsHTMLEntities.cpp
@@ -68,18 +68,18 @@ static const PLDHashTableOps UnicodeToEn
   hashUnicodeValue,
   matchNodeUnicode,
   PL_DHashMoveEntryStub,
   PL_DHashClearEntryStub,
   PL_DHashFinalizeStub,
   nullptr,
 };
 
-static PLDHashTable gEntityToUnicode = { 0 };
-static PLDHashTable gUnicodeToEntity = { 0 };
+static PLDHashTable gEntityToUnicode;
+static PLDHashTable gUnicodeToEntity;
 static nsrefcnt gTableRefCnt = 0;
 
 #define HTML_ENTITY(_name, _value) { #_name, _value },
 static const EntityNode gEntityArray[] = {
 #include "nsHTMLEntityList.h"
 };
 #undef HTML_ENTITY
 
--- a/rdf/base/nsInMemoryDataSource.cpp
+++ b/rdf/base/nsInMemoryDataSource.cpp
@@ -1311,17 +1311,17 @@ InMemoryDataSource::LockedUnassert(nsIRD
                 if (hdr) {
                     Entry* entry = reinterpret_cast<Entry*>(hdr);
                     entry->mNode = aProperty;
                     entry->mAssertions = next->mNext;
                 }
             }
             else {
                 // If this second-level hash empties out, clean it up.
-                if (!root->u.hash.mPropertyHash->entryCount) {
+                if (!root->u.hash.mPropertyHash->EntryCount()) {
                     root->Release();
                     SetForwardArcs(aSource, nullptr);
                 }
             }
         }
         else {
             prev->mNext = next->mNext;
         }
@@ -1656,17 +1656,17 @@ InMemoryDataSource::ResourceEnumerator(P
     return PL_DHASH_NEXT;
 }
 
 
 NS_IMETHODIMP
 InMemoryDataSource::GetAllResources(nsISimpleEnumerator** aResult)
 {
     nsCOMArray<nsIRDFNode> nodes;
-    nodes.SetCapacity(mForwardArcs.entryCount);
+    nodes.SetCapacity(mForwardArcs.EntryCount());
 
     // Enumerate all of our entries into an nsCOMArray
     PL_DHashTableEnumerate(&mForwardArcs, ResourceEnumerator, &nodes);
 
     return NS_NewArrayEnumerator(aResult, nodes);
 }
 
 NS_IMETHODIMP
@@ -1915,17 +1915,17 @@ InMemoryDataSource::SweepForwardArcsEntr
     Assertion* as = entry->mAssertions;
     if (as && (as->mHashEntry))
     {
         // Stuff in sub-hashes must be swept recursively (max depth: 1)
         PL_DHashTableEnumerate(as->u.hash.mPropertyHash,
                                SweepForwardArcsEntries, info);
 
         // If the sub-hash is now empty, clean it up.
-        if (!as->u.hash.mPropertyHash->entryCount) {
+        if (!as->u.hash.mPropertyHash->EntryCount()) {
             as->Release();
             result = PL_DHASH_REMOVE;
         }
 
         return result;
     }
 
     Assertion* prev = nullptr;
--- a/uriloader/base/nsDocLoader.cpp
+++ b/uriloader/base/nsDocLoader.cpp
@@ -1380,17 +1380,17 @@ static PLDHashOperator
 RemoveInfoCallback(PLDHashTable *table, PLDHashEntryHdr *hdr, uint32_t number,
                    void *arg)
 {
   return PL_DHASH_REMOVE;
 }
 
 void nsDocLoader::ClearRequestInfoHash(void)
 {
-  if (!mRequestInfoHash.ops || !mRequestInfoHash.entryCount) {
+  if (!mRequestInfoHash.ops || !mRequestInfoHash.EntryCount()) {
     // No hash, or the hash is empty, nothing to do here then...
 
     return;
   }
 
   PL_DHashTableEnumerate(&mRequestInfoHash, RemoveInfoCallback, nullptr);
 }
 
--- a/xpcom/base/nsCycleCollector.cpp
+++ b/xpcom/base/nsCycleCollector.cpp
@@ -873,17 +873,17 @@ public:
 #endif
 
   PtrInfo* FindNode(void* aPtr);
   PtrToNodeEntry* AddNodeToMap(void* aPtr);
   void RemoveNodeFromMap(void* aPtr);
 
   uint32_t MapCount() const
   {
-    return mPtrToNodeMap.entryCount;
+    return mPtrToNodeMap.EntryCount();
   }
 
   void SizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
                            size_t* aNodesSize, size_t* aEdgesSize,
                            size_t* aWeakMapsSize) const
   {
     *aNodesSize = mNodes.SizeOfExcludingThis(aMallocSizeOf);
     *aEdgesSize = mEdges.SizeOfExcludingThis(aMallocSizeOf);
--- a/xpcom/ds/nsAtomTable.cpp
+++ b/xpcom/ds/nsAtomTable.cpp
@@ -338,23 +338,22 @@ NS_PurgeAtomTable()
   delete gStaticAtomTable;
 
   if (gAtomTable.ops) {
 #ifdef DEBUG
     const char* dumpAtomLeaks = PR_GetEnv("MOZ_DUMP_ATOM_LEAKS");
     if (dumpAtomLeaks && *dumpAtomLeaks) {
       uint32_t leaked = 0;
       printf("*** %d atoms still exist (including permanent):\n",
-             gAtomTable.entryCount);
+             gAtomTable.EntryCount());
       PL_DHashTableEnumerate(&gAtomTable, DumpAtomLeaks, &leaked);
       printf("*** %u non-permanent atoms leaked\n", leaked);
     }
 #endif
     PL_DHashTableFinish(&gAtomTable);
-    gAtomTable.entryCount = 0;
     gAtomTable.ops = nullptr;
   }
 }
 
 AtomImpl::AtomImpl(const nsAString& aString, uint32_t aHash)
 {
   mLength = aString.Length();
   nsRefPtr<nsStringBuffer> buf = nsStringBuffer::FromString(aString);
@@ -401,19 +400,19 @@ AtomImpl::~AtomImpl()
 {
   NS_PRECONDITION(gAtomTable.ops, "uninitialized atom hashtable");
   // Permanent atoms are removed from the hashtable at shutdown, and we
   // don't want to remove them twice.  See comment above in
   // |AtomTableClearEntry|.
   if (!IsPermanentInDestructor()) {
     AtomTableKey key(mString, mLength, mHash);
     PL_DHashTableOperate(&gAtomTable, &key, PL_DHASH_REMOVE);
-    if (gAtomTable.entryCount == 0) {
+    if (gAtomTable.ops && gAtomTable.EntryCount() == 0) {
       PL_DHashTableFinish(&gAtomTable);
-      NS_ASSERTION(gAtomTable.entryCount == 0,
+      NS_ASSERTION(gAtomTable.EntryCount() == 0,
                    "PL_DHashTableFinish changed the entry count");
     }
   }
 
   nsStringBuffer::FromData(mString)->Release();
 }
 
 NS_IMPL_ISUPPORTS(AtomImpl, nsIAtom)
@@ -553,31 +552,31 @@ static inline AtomTableEntry*
 GetAtomHashEntry(const char* aString, uint32_t aLength, uint32_t* aHashOut)
 {
   MOZ_ASSERT(NS_IsMainThread(), "wrong thread");
   EnsureTableExists();
   AtomTableKey key(aString, aLength, aHashOut);
   AtomTableEntry* e = static_cast<AtomTableEntry*>(
     PL_DHashTableOperate(&gAtomTable, &key, PL_DHASH_ADD));
   if (!e) {
-    NS_ABORT_OOM(gAtomTable.entryCount * gAtomTable.entrySize);
+    NS_ABORT_OOM(gAtomTable.EntryCount() * gAtomTable.EntrySize());
   }
   return e;
 }
 
 static inline AtomTableEntry*
 GetAtomHashEntry(const char16_t* aString, uint32_t aLength, uint32_t* aHashOut)
 {
   MOZ_ASSERT(NS_IsMainThread(), "wrong thread");
   EnsureTableExists();
   AtomTableKey key(aString, aLength, aHashOut);
   AtomTableEntry* e = static_cast<AtomTableEntry*>(
     PL_DHashTableOperate(&gAtomTable, &key, PL_DHASH_ADD));
   if (!e) {
-    NS_ABORT_OOM(gAtomTable.entryCount * gAtomTable.entrySize);
+    NS_ABORT_OOM(gAtomTable.EntryCount() * gAtomTable.EntrySize());
   }
   return e;
 }
 
 class CheckStaticAtomSizes
 {
   CheckStaticAtomSizes()
   {
@@ -714,17 +713,17 @@ NS_NewPermanentAtom(const nsAString& aUT
 
   // No need to addref since permanent atoms aren't refcounted anyway
   return atom;
 }
 
 nsrefcnt
 NS_GetNumberOfAtoms(void)
 {
-  return gAtomTable.entryCount;
+  return gAtomTable.EntryCount();
 }
 
 nsIAtom*
 NS_GetStaticAtom(const nsAString& aUTF16String)
 {
   NS_PRECONDITION(gStaticAtomTable, "Static atom table not created yet.");
   NS_PRECONDITION(gStaticAtomTableSealed, "Static atom table not sealed yet.");
   StaticAtomEntry* entry = gStaticAtomTable->GetEntry(aUTF16String);
--- a/xpcom/ds/nsPersistentProperties.cpp
+++ b/xpcom/ds/nsPersistentProperties.cpp
@@ -602,21 +602,21 @@ AddElemToArray(PLDHashTable* aTable, PLD
 
 
 NS_IMETHODIMP
 nsPersistentProperties::Enumerate(nsISimpleEnumerator** aResult)
 {
   nsCOMArray<nsIPropertyElement> props;
 
   // We know the necessary size; we can avoid growing it while adding elements
-  props.SetCapacity(mTable.entryCount);
+  props.SetCapacity(mTable.EntryCount());
 
   // Step through hash entries populating a transient array
   uint32_t n = PL_DHashTableEnumerate(&mTable, AddElemToArray, (void*)&props);
-  if (n < mTable.entryCount) {
+  if (n < mTable.EntryCount()) {
     return NS_ERROR_OUT_OF_MEMORY;
   }
 
   return NS_NewArrayEnumerator(aResult, props);
 }
 
 ////////////////////////////////////////////////////////////////////////////////
 // XXX Some day we'll unify the nsIPersistentProperties interface with
--- a/xpcom/ds/nsStaticNameTable.cpp
+++ b/xpcom/ds/nsStaticNameTable.cpp
@@ -111,17 +111,17 @@ nsStaticCaseInsensitiveNameTable::nsStat
   MOZ_COUNT_CTOR(nsStaticCaseInsensitiveNameTable);
   mNameTable.ops = nullptr;
 }
 
 nsStaticCaseInsensitiveNameTable::~nsStaticCaseInsensitiveNameTable()
 {
   if (mNameArray) {
     // manually call the destructor on placement-new'ed objects
-    for (uint32_t index = 0; index < mNameTable.entryCount; index++) {
+    for (uint32_t index = 0; index < mNameTable.EntryCount(); index++) {
       mNameArray[index].~nsDependentCString();
     }
     nsMemory::Free((void*)mNameArray);
   }
   if (mNameTable.ops) {
     PL_DHashTableFinish(&mNameTable);
   }
   MOZ_COUNT_DTOR(nsStaticCaseInsensitiveNameTable);
@@ -226,13 +226,13 @@ nsStaticCaseInsensitiveNameTable::Lookup
 }
 
 const nsAFlatCString&
 nsStaticCaseInsensitiveNameTable::GetStringValue(int32_t aIndex)
 {
   NS_ASSERTION(mNameArray, "not inited");
   NS_ASSERTION(mNameTable.ops, "not inited");
 
-  if ((NOT_FOUND < aIndex) && ((uint32_t)aIndex < mNameTable.entryCount)) {
+  if ((NOT_FOUND < aIndex) && ((uint32_t)aIndex < mNameTable.EntryCount())) {
     return mNameArray[aIndex];
   }
   return mNullStr;
 }
--- a/xpcom/glue/nsBaseHashtable.h
+++ b/xpcom/glue/nsBaseHashtable.h
@@ -119,17 +119,17 @@ public:
    * put a new value for the associated key
    * @param aKey the key to put
    * @param aData the new data
    * @return always true, unless memory allocation failed
    */
   void Put(KeyType aKey, const UserDataType& aData)
   {
     if (!Put(aKey, aData, fallible_t())) {
-      NS_ABORT_OOM(this->mTable.entrySize * this->mTable.entryCount);
+      NS_ABORT_OOM(this->mTable.EntrySize() * this->mTable.EntryCount());
     }
   }
 
   NS_WARN_UNUSED_RESULT bool Put(KeyType aKey, const UserDataType& aData,
                                  const fallible_t&)
   {
     EntryType* ent = this->PutEntry(aKey);
     if (!ent) {
--- a/xpcom/glue/nsRefPtrHashtable.h
+++ b/xpcom/glue/nsRefPtrHashtable.h
@@ -143,17 +143,17 @@ nsRefPtrHashtable<KeyClass, RefPtr>::Get
 }
 
 template<class KeyClass, class RefPtr>
 void
 nsRefPtrHashtable<KeyClass, RefPtr>::Put(KeyType aKey,
                                          already_AddRefed<RefPtr> aData)
 {
   if (!Put(aKey, mozilla::Move(aData), mozilla::fallible_t())) {
-    NS_ABORT_OOM(this->mTable.entrySize * this->mTable.entryCount);
+    NS_ABORT_OOM(this->mTable.EntrySize() * this->mTable.EntryCount());
   }
 }
 
 template<class KeyClass, class RefPtr>
 bool
 nsRefPtrHashtable<KeyClass, RefPtr>::Put(KeyType aKey,
                                          already_AddRefed<RefPtr> aData,
                                          const mozilla::fallible_t&)
--- a/xpcom/glue/nsTHashtable.h
+++ b/xpcom/glue/nsTHashtable.h
@@ -94,33 +94,33 @@ public:
   ~nsTHashtable();
 
   nsTHashtable(nsTHashtable<EntryType>&& aOther);
 
   /**
    * Return the generation number for the table. This increments whenever
    * the table data items are moved.
    */
-  uint32_t GetGeneration() const { return mTable.generation; }
+  uint32_t GetGeneration() const { return mTable.Generation(); }
 
   /**
    * KeyType is typedef'ed for ease of use.
    */
   typedef typename EntryType::KeyType KeyType;
 
   /**
    * KeyTypePointer is typedef'ed for ease of use.
    */
   typedef typename EntryType::KeyTypePointer KeyTypePointer;
 
   /**
    * Return the number of entries in the table.
    * @return    number of entries
    */
-  uint32_t Count() const { return mTable.entryCount; }
+  uint32_t Count() const { return mTable.EntryCount(); }
 
   /**
    * Get the entry associated with a key.
    * @param     aKey the key to retrieve
    * @return    pointer to the entry class, if the key exists; nullptr if the
    *            key doesn't exist
    */
   EntryType* GetEntry(KeyType aKey) const
@@ -145,17 +145,17 @@ public:
    * @param     aKey the key to retrieve
    * @return    pointer to the entry class retreived; nullptr only if memory
                 can't be allocated
    */
   EntryType* PutEntry(KeyType aKey)
   {
     EntryType* e = PutEntry(aKey, fallible_t());
     if (!e) {
-      NS_ABORT_OOM(mTable.entrySize * mTable.entryCount);
+      NS_ABORT_OOM(mTable.EntrySize() * mTable.EntryCount());
     }
     return e;
   }
 
   EntryType* PutEntry(KeyType aKey, const fallible_t&) NS_WARN_UNUSED_RESULT {
     NS_ASSERTION(mTable.ops, "nsTHashtable was not initialized properly.");
 
     return static_cast<EntryType*>(PL_DHashTableOperate(
--- a/xpcom/glue/pldhash.cpp
+++ b/xpcom/glue/pldhash.cpp
@@ -110,37 +110,56 @@ PL_DHashMatchStringKey(PLDHashTable* aTa
   const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
 
   /* XXX tolerate null keys on account of sloppy Mozilla callers. */
   return stub->key == aKey ||
          (stub->key && aKey &&
           strcmp((const char*)stub->key, (const char*)aKey) == 0);
 }
 
+MOZ_ALWAYS_INLINE void
+PLDHashTable::MoveEntryStub(const PLDHashEntryHdr* aFrom,
+                            PLDHashEntryHdr* aTo)
+{
+  memcpy(aTo, aFrom, entrySize);
+}
+
 void
 PL_DHashMoveEntryStub(PLDHashTable* aTable,
                       const PLDHashEntryHdr* aFrom,
                       PLDHashEntryHdr* aTo)
 {
-  memcpy(aTo, aFrom, aTable->entrySize);
+  aTable->MoveEntryStub(aFrom, aTo);
+}
+
+MOZ_ALWAYS_INLINE void
+PLDHashTable::ClearEntryStub(PLDHashEntryHdr* aEntry)
+{
+  memset(aEntry, 0, entrySize);
 }
 
 void
 PL_DHashClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry)
 {
-  memset(aEntry, 0, aTable->entrySize);
+  aTable->ClearEntryStub(aEntry);
+}
+
+MOZ_ALWAYS_INLINE void
+PLDHashTable::FreeStringKey(PLDHashEntryHdr* aEntry)
+{
+  const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
+
+  free((void*)stub->key);
+  memset(aEntry, 0, entrySize);
 }
 
 void
 PL_DHashFreeStringKey(PLDHashTable* aTable, PLDHashEntryHdr* aEntry)
 {
-  const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
-
-  free((void*)stub->key);
-  memset(aEntry, 0, aTable->entrySize);
+  aTable->FreeStringKey(aEntry);
 }
 
 void
 PL_DHashFinalizeStub(PLDHashTable* aTable)
 {
 }
 
 static const PLDHashTableOps stub_ops = {
@@ -215,72 +234,79 @@ MinLoad(uint32_t aCapacity)
 }
 
 static inline uint32_t
 MinCapacity(uint32_t aLength)
 {
   return (aLength * 4 + (3 - 1)) / 3;   // == ceil(aLength * 4 / 3)
 }
 
-bool
-PL_DHashTableInit(PLDHashTable* aTable, const PLDHashTableOps* aOps,
-                  void* aData, uint32_t aEntrySize, const fallible_t&,
-                  uint32_t aLength)
+MOZ_ALWAYS_INLINE bool
+PLDHashTable::Init(const PLDHashTableOps* aOps, void* aData,
+                   uint32_t aEntrySize, const fallible_t&, uint32_t aLength)
 {
 #ifdef DEBUG
   if (aEntrySize > 16 * sizeof(void*)) {
     printf_stderr(
-      "pldhash: for the aTable at address %p, the given aEntrySize"
+      "pldhash: for the table at address %p, the given aEntrySize"
       " of %lu definitely favors chaining over double hashing.\n",
-      (void*)aTable,
+      (void*)this,
       (unsigned long) aEntrySize);
   }
 #endif
 
   if (aLength > PL_DHASH_MAX_INITIAL_LENGTH) {
     return false;
   }
 
-  aTable->ops = aOps;
-  aTable->data = aData;
+  ops = aOps;
+  data = aData;
 
   // Compute the smallest capacity allowing |aLength| elements to be inserted
   // without rehashing.
   uint32_t capacity = MinCapacity(aLength);
   if (capacity < PL_DHASH_MIN_CAPACITY) {
     capacity = PL_DHASH_MIN_CAPACITY;
   }
 
   int log2 = CeilingLog2(capacity);
 
   capacity = 1u << log2;
   MOZ_ASSERT(capacity <= PL_DHASH_MAX_CAPACITY);
-  aTable->hashShift = PL_DHASH_BITS - log2;
-  aTable->entrySize = aEntrySize;
-  aTable->entryCount = aTable->removedCount = 0;
-  aTable->generation = 0;
+  hashShift = PL_DHASH_BITS - log2;
+  entrySize = aEntrySize;
+  entryCount = removedCount = 0;
+  generation = 0;
   uint32_t nbytes;
   if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
     return false;  // overflowed
   }
 
-  aTable->entryStore = (char*)aOps->allocTable(aTable, nbytes);
-  if (!aTable->entryStore) {
+  entryStore = (char*)aOps->allocTable(this, nbytes);
+  if (!entryStore) {
     return false;
   }
-  memset(aTable->entryStore, 0, nbytes);
-  METER(memset(&aTable->stats, 0, sizeof(aTable->stats)));
+  memset(entryStore, 0, nbytes);
+  METER(memset(&stats, 0, sizeof(stats)));
 
 #ifdef DEBUG
-  aTable->recursionLevel = 0;
+  recursionLevel = 0;
 #endif
 
   return true;
 }
 
+bool
+PL_DHashTableInit(PLDHashTable* aTable, const PLDHashTableOps* aOps,
+                  void* aData, uint32_t aEntrySize,
+                  const fallible_t& aFallible, uint32_t aLength)
+{
+  return aTable->Init(aOps, aData, aEntrySize, aFallible, aLength);
+}
+
 void
 PL_DHashTableInit(PLDHashTable* aTable, const PLDHashTableOps* aOps,
                   void* aData, uint32_t aEntrySize, uint32_t aLength)
 {
   if (!PL_DHashTableInit(aTable, aOps, aData, aEntrySize, fallible_t(),
                          aLength)) {
     if (aLength > PL_DHASH_MAX_INITIAL_LENGTH) {
       MOZ_CRASH();          // the asked-for length was too big
@@ -302,93 +328,93 @@ PL_DHashTableInit(PLDHashTable* aTable, 
 
 /*
  * Reserve keyHash 0 for free entries and 1 for removed-entry sentinels.  Note
  * that a removed-entry sentinel need be stored only if the removed entry had
  * a colliding entry added after it.  Therefore we can use 1 as the collision
  * flag in addition to the removed-entry sentinel value.  Multiplicative hash
  * uses the high order bits of keyHash, so this least-significant reservation
  * should not hurt the hash function's effectiveness much.
- *
- * If you change any of these magic numbers, also update PL_DHASH_ENTRY_IS_LIVE
- * in pldhash.h.  It used to be private to pldhash.c, but then became public to
- * assist iterator writers who inspect table->entryStore directly.
  */
 #define COLLISION_FLAG              ((PLDHashNumber) 1)
 #define MARK_ENTRY_FREE(entry)      ((entry)->keyHash = 0)
 #define MARK_ENTRY_REMOVED(entry)   ((entry)->keyHash = 1)
 #define ENTRY_IS_REMOVED(entry)     ((entry)->keyHash == 1)
-#define ENTRY_IS_LIVE(entry)        PL_DHASH_ENTRY_IS_LIVE(entry)
+#define ENTRY_IS_LIVE(entry)        ((entry)->keyHash >= 2)
 #define ENSURE_LIVE_KEYHASH(hash0)  if (hash0 < 2) hash0 -= 2; else (void)0
 
 /* Match an entry's keyHash against an unstored one computed from a key. */
 #define MATCH_ENTRY_KEYHASH(entry,hash0) \
     (((entry)->keyHash & ~COLLISION_FLAG) == (hash0))
 
 /* Compute the address of the indexed entry in table. */
 #define ADDRESS_ENTRY(table, index) \
     ((PLDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize))
 
-void
-PL_DHashTableFinish(PLDHashTable* aTable)
+MOZ_ALWAYS_INLINE void
+PLDHashTable::Finish()
 {
-  INCREMENT_RECURSION_LEVEL(aTable);
+  INCREMENT_RECURSION_LEVEL(this);
 
   /* Call finalize before clearing entries, so it can enumerate them. */
-  aTable->ops->finalize(aTable);
+  ops->finalize(this);
 
   /* Clear any remaining live entries. */
-  char* entryAddr = aTable->entryStore;
-  uint32_t entrySize = aTable->entrySize;
-  char* entryLimit = entryAddr + PL_DHASH_TABLE_CAPACITY(aTable) * entrySize;
+  char* entryAddr = entryStore;
+  char* entryLimit = entryAddr + Capacity() * entrySize;
   while (entryAddr < entryLimit) {
     PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr;
     if (ENTRY_IS_LIVE(entry)) {
-      METER(aTable->stats.removeEnums++);
-      aTable->ops->clearEntry(aTable, entry);
+      METER(stats.removeEnums++);
+      ops->clearEntry(this, entry);
     }
     entryAddr += entrySize;
   }
 
-  DECREMENT_RECURSION_LEVEL(aTable);
-  MOZ_ASSERT(RECURSION_LEVEL_SAFE_TO_FINISH(aTable));
+  DECREMENT_RECURSION_LEVEL(this);
+  MOZ_ASSERT(RECURSION_LEVEL_SAFE_TO_FINISH(this));
 
   /* Free entry storage last. */
-  aTable->ops->freeTable(aTable, aTable->entryStore);
+  ops->freeTable(this, entryStore);
 }
 
-static PLDHashEntryHdr* PL_DHASH_FASTCALL
-SearchTable(PLDHashTable* aTable, const void* aKey, PLDHashNumber aKeyHash,
-            PLDHashOperator aOp)
+void
+PL_DHashTableFinish(PLDHashTable* aTable)
 {
-  METER(aTable->stats.searches++);
+  aTable->Finish();
+}
+
+PLDHashEntryHdr* PL_DHASH_FASTCALL
+PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash,
+                          PLDHashOperator aOp)
+{
+  METER(stats.searches++);
   NS_ASSERTION(!(aKeyHash & COLLISION_FLAG),
                "!(aKeyHash & COLLISION_FLAG)");
 
   /* Compute the primary hash address. */
-  int hashShift = aTable->hashShift;
   PLDHashNumber hash1 = HASH1(aKeyHash, hashShift);
-  PLDHashEntryHdr* entry = ADDRESS_ENTRY(aTable, hash1);
+  PLDHashEntryHdr* entry = ADDRESS_ENTRY(this, hash1);
 
   /* Miss: return space for a new entry. */
   if (PL_DHASH_ENTRY_IS_FREE(entry)) {
-    METER(aTable->stats.misses++);
+    METER(stats.misses++);
     return entry;
   }
 
   /* Hit: return entry. */
-  PLDHashMatchEntry matchEntry = aTable->ops->matchEntry;
+  PLDHashMatchEntry matchEntry = ops->matchEntry;
   if (MATCH_ENTRY_KEYHASH(entry, aKeyHash) &&
-      matchEntry(aTable, entry, aKey)) {
-    METER(aTable->stats.hits++);
+      matchEntry(this, entry, aKey)) {
+    METER(stats.hits++);
     return entry;
   }
 
   /* Collision: double hash. */
-  int sizeLog2 = PL_DHASH_BITS - aTable->hashShift;
+  int sizeLog2 = PL_DHASH_BITS - hashShift;
   PLDHashNumber hash2 = HASH2(aKeyHash, sizeLog2, hashShift);
   uint32_t sizeMask = (1u << sizeLog2) - 1;
 
   /* Save the first removed entry pointer so PL_DHASH_ADD can recycle it. */
   PLDHashEntryHdr* firstRemoved = nullptr;
 
   for (;;) {
     if (MOZ_UNLIKELY(ENTRY_IS_REMOVED(entry))) {
@@ -396,29 +422,29 @@ SearchTable(PLDHashTable* aTable, const 
         firstRemoved = entry;
       }
     } else {
       if (aOp == PL_DHASH_ADD) {
         entry->keyHash |= COLLISION_FLAG;
       }
     }
 
-    METER(aTable->stats.steps++);
+    METER(stats.steps++);
     hash1 -= hash2;
     hash1 &= sizeMask;
 
-    entry = ADDRESS_ENTRY(aTable, hash1);
+    entry = ADDRESS_ENTRY(this, hash1);
     if (PL_DHASH_ENTRY_IS_FREE(entry)) {
-      METER(aTable->stats.misses++);
+      METER(stats.misses++);
       return (firstRemoved && aOp == PL_DHASH_ADD) ? firstRemoved : entry;
     }
 
     if (MATCH_ENTRY_KEYHASH(entry, aKeyHash) &&
-        matchEntry(aTable, entry, aKey)) {
-      METER(aTable->stats.hits++);
+        matchEntry(this, entry, aKey)) {
+      METER(stats.hits++);
       return entry;
     }
   }
 
   /* NOTREACHED */
   return nullptr;
 }
 
@@ -427,261 +453,268 @@ SearchTable(PLDHashTable* aTable, const 
  *   1. assume |aOp == PL_DHASH_ADD|,
  *   2. assume that |aKey| will never match an existing entry, and
  *   3. assume that no entries have been removed from the current table
  *      structure.
  * Avoiding the need for |aKey| means we can avoid needing a way to map
  * entries to keys, which means callers can use complex key types more
  * easily.
  */
-static PLDHashEntryHdr* PL_DHASH_FASTCALL
-FindFreeEntry(PLDHashTable* aTable, PLDHashNumber aKeyHash)
+PLDHashEntryHdr* PL_DHASH_FASTCALL
+PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash)
 {
-  METER(aTable->stats.searches++);
+  METER(stats.searches++);
   NS_ASSERTION(!(aKeyHash & COLLISION_FLAG),
                "!(aKeyHash & COLLISION_FLAG)");
 
   /* Compute the primary hash address. */
-  int hashShift = aTable->hashShift;
   PLDHashNumber hash1 = HASH1(aKeyHash, hashShift);
-  PLDHashEntryHdr* entry = ADDRESS_ENTRY(aTable, hash1);
+  PLDHashEntryHdr* entry = ADDRESS_ENTRY(this, hash1);
 
   /* Miss: return space for a new entry. */
   if (PL_DHASH_ENTRY_IS_FREE(entry)) {
-    METER(aTable->stats.misses++);
+    METER(stats.misses++);
     return entry;
   }
 
   /* Collision: double hash. */
-  int sizeLog2 = PL_DHASH_BITS - aTable->hashShift;
+  int sizeLog2 = PL_DHASH_BITS - hashShift;
   PLDHashNumber hash2 = HASH2(aKeyHash, sizeLog2, hashShift);
   uint32_t sizeMask = (1u << sizeLog2) - 1;
 
   for (;;) {
     NS_ASSERTION(!ENTRY_IS_REMOVED(entry),
                  "!ENTRY_IS_REMOVED(entry)");
     entry->keyHash |= COLLISION_FLAG;
 
-    METER(aTable->stats.steps++);
+    METER(stats.steps++);
     hash1 -= hash2;
     hash1 &= sizeMask;
 
-    entry = ADDRESS_ENTRY(aTable, hash1);
+    entry = ADDRESS_ENTRY(this, hash1);
     if (PL_DHASH_ENTRY_IS_FREE(entry)) {
-      METER(aTable->stats.misses++);
+      METER(stats.misses++);
       return entry;
     }
   }
 
   /* NOTREACHED */
   return nullptr;
 }
 
-static bool
-ChangeTable(PLDHashTable* aTable, int aDeltaLog2)
+bool
+PLDHashTable::ChangeTable(int aDeltaLog2)
 {
   /* Look, but don't touch, until we succeed in getting new entry store. */
-  int oldLog2 = PL_DHASH_BITS - aTable->hashShift;
+  int oldLog2 = PL_DHASH_BITS - hashShift;
   int newLog2 = oldLog2 + aDeltaLog2;
   uint32_t newCapacity = 1u << newLog2;
   if (newCapacity > PL_DHASH_MAX_CAPACITY) {
     return false;
   }
 
-  uint32_t entrySize = aTable->entrySize;
   uint32_t nbytes;
   if (!SizeOfEntryStore(newCapacity, entrySize, &nbytes)) {
     return false;   // overflowed
   }
 
-  char* newEntryStore = (char*)aTable->ops->allocTable(aTable, nbytes);
+  char* newEntryStore = (char*)ops->allocTable(this, nbytes);
   if (!newEntryStore) {
     return false;
   }
 
   /* We can't fail from here on, so update table parameters. */
 #ifdef DEBUG
-  uint32_t recursionLevel = aTable->recursionLevel;
+  uint32_t recursionLevelTmp = recursionLevel;
 #endif
-  aTable->hashShift = PL_DHASH_BITS - newLog2;
-  aTable->removedCount = 0;
-  aTable->generation++;
+  hashShift = PL_DHASH_BITS - newLog2;
+  removedCount = 0;
+  generation++;
 
   /* Assign the new entry store to table. */
   memset(newEntryStore, 0, nbytes);
   char* oldEntryStore;
   char* oldEntryAddr;
-  oldEntryAddr = oldEntryStore = aTable->entryStore;
-  aTable->entryStore = newEntryStore;
-  PLDHashMoveEntry moveEntry = aTable->ops->moveEntry;
+  oldEntryAddr = oldEntryStore = entryStore;
+  entryStore = newEntryStore;
+  PLDHashMoveEntry moveEntry = ops->moveEntry;
 #ifdef DEBUG
-  aTable->recursionLevel = recursionLevel;
+  recursionLevel = recursionLevelTmp;
 #endif
 
   /* Copy only live entries, leaving removed ones behind. */
   uint32_t oldCapacity = 1u << oldLog2;
   for (uint32_t i = 0; i < oldCapacity; ++i) {
     PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr;
     if (ENTRY_IS_LIVE(oldEntry)) {
       oldEntry->keyHash &= ~COLLISION_FLAG;
-      PLDHashEntryHdr* newEntry = FindFreeEntry(aTable, oldEntry->keyHash);
+      PLDHashEntryHdr* newEntry = FindFreeEntry(oldEntry->keyHash);
       NS_ASSERTION(PL_DHASH_ENTRY_IS_FREE(newEntry),
                    "PL_DHASH_ENTRY_IS_FREE(newEntry)");
-      moveEntry(aTable, oldEntry, newEntry);
+      moveEntry(this, oldEntry, newEntry);
       newEntry->keyHash = oldEntry->keyHash;
     }
     oldEntryAddr += entrySize;
   }
 
-  aTable->ops->freeTable(aTable, oldEntryStore);
+  ops->freeTable(this, oldEntryStore);
   return true;
 }
 
-PLDHashEntryHdr* PL_DHASH_FASTCALL
-PL_DHashTableOperate(PLDHashTable* aTable, const void* aKey, PLDHashOperator aOp)
+MOZ_ALWAYS_INLINE PLDHashEntryHdr*
+PLDHashTable::Operate(const void* aKey, PLDHashOperator aOp)
 {
   PLDHashEntryHdr* entry;
 
-  MOZ_ASSERT(aOp == PL_DHASH_LOOKUP || aTable->recursionLevel == 0);
-  INCREMENT_RECURSION_LEVEL(aTable);
+  MOZ_ASSERT(aOp == PL_DHASH_LOOKUP || recursionLevel == 0);
+  INCREMENT_RECURSION_LEVEL(this);
 
-  PLDHashNumber keyHash = aTable->ops->hashKey(aTable, aKey);
+  PLDHashNumber keyHash = ops->hashKey(this, aKey);
   keyHash *= PL_DHASH_GOLDEN_RATIO;
 
   /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */
   ENSURE_LIVE_KEYHASH(keyHash);
   keyHash &= ~COLLISION_FLAG;
 
   switch (aOp) {
     case PL_DHASH_LOOKUP:
-      METER(aTable->stats.lookups++);
-      entry = SearchTable(aTable, aKey, keyHash, aOp);
+      METER(stats.lookups++);
+      entry = SearchTable(aKey, keyHash, aOp);
       break;
 
     case PL_DHASH_ADD: {
       /*
        * If alpha is >= .75, grow or compress the table.  If aKey is already
        * in the table, we may grow once more than necessary, but only if we
        * are on the edge of being overloaded.
        */
-      uint32_t capacity = PL_DHASH_TABLE_CAPACITY(aTable);
-      if (aTable->entryCount + aTable->removedCount >= MaxLoad(capacity)) {
+      uint32_t capacity = Capacity();
+      if (entryCount + removedCount >= MaxLoad(capacity)) {
         /* Compress if a quarter or more of all entries are removed. */
         int deltaLog2;
-        if (aTable->removedCount >= capacity >> 2) {
-          METER(aTable->stats.compresses++);
+        if (removedCount >= capacity >> 2) {
+          METER(stats.compresses++);
           deltaLog2 = 0;
         } else {
-          METER(aTable->stats.grows++);
+          METER(stats.grows++);
           deltaLog2 = 1;
         }
 
         /*
-         * Grow or compress aTable.  If ChangeTable() fails, allow
+         * Grow or compress the table.  If ChangeTable() fails, allow
          * overloading up to the secondary max.  Once we hit the secondary
          * max, return null.
          */
-        if (!ChangeTable(aTable, deltaLog2) &&
-            aTable->entryCount + aTable->removedCount >=
+        if (!ChangeTable(deltaLog2) &&
+            entryCount + removedCount >=
             MaxLoadOnGrowthFailure(capacity)) {
-          METER(aTable->stats.addFailures++);
+          METER(stats.addFailures++);
           entry = nullptr;
           break;
         }
       }
 
       /*
        * Look for entry after possibly growing, so we don't have to add it,
        * then skip it while growing the table and re-add it after.
        */
-      entry = SearchTable(aTable, aKey, keyHash, aOp);
+      entry = SearchTable(aKey, keyHash, aOp);
       if (!ENTRY_IS_LIVE(entry)) {
         /* Initialize the entry, indicating that it's no longer free. */
-        METER(aTable->stats.addMisses++);
+        METER(stats.addMisses++);
         if (ENTRY_IS_REMOVED(entry)) {
-          METER(aTable->stats.addOverRemoved++);
-          aTable->removedCount--;
+          METER(stats.addOverRemoved++);
+          removedCount--;
           keyHash |= COLLISION_FLAG;
         }
-        if (aTable->ops->initEntry &&
-            !aTable->ops->initEntry(aTable, entry, aKey)) {
+        if (ops->initEntry && !ops->initEntry(this, entry, aKey)) {
           /* We haven't claimed entry yet; fail with null return. */
-          memset(entry + 1, 0, aTable->entrySize - sizeof(*entry));
+          memset(entry + 1, 0, entrySize - sizeof(*entry));
           entry = nullptr;
           break;
         }
         entry->keyHash = keyHash;
-        aTable->entryCount++;
+        entryCount++;
       }
       METER(else {
-        aTable->stats.addHits++;
+        stats.addHits++;
       });
       break;
     }
 
     case PL_DHASH_REMOVE:
-      entry = SearchTable(aTable, aKey, keyHash, aOp);
+      entry = SearchTable(aKey, keyHash, aOp);
       if (ENTRY_IS_LIVE(entry)) {
         /* Clear this entry and mark it as "removed". */
-        METER(aTable->stats.removeHits++);
-        PL_DHashTableRawRemove(aTable, entry);
+        METER(stats.removeHits++);
+        PL_DHashTableRawRemove(this, entry);
 
-        /* Shrink if alpha is <= .25 and aTable isn't too small already. */
-        uint32_t capacity = PL_DHASH_TABLE_CAPACITY(aTable);
+        /* Shrink if alpha is <= .25 and the table isn't too small already. */
+        uint32_t capacity = Capacity();
         if (capacity > PL_DHASH_MIN_CAPACITY &&
-            aTable->entryCount <= MinLoad(capacity)) {
-          METER(aTable->stats.shrinks++);
-          (void) ChangeTable(aTable, -1);
+            entryCount <= MinLoad(capacity)) {
+          METER(stats.shrinks++);
+          (void) ChangeTable(-1);
         }
       }
       METER(else {
-        aTable->stats.removeMisses++;
+        stats.removeMisses++;
       });
       entry = nullptr;
       break;
 
     default:
       NS_NOTREACHED("0");
       entry = nullptr;
   }
 
-  DECREMENT_RECURSION_LEVEL(aTable);
+  DECREMENT_RECURSION_LEVEL(this);
 
   return entry;
 }
 
+PLDHashEntryHdr* PL_DHASH_FASTCALL
+PL_DHashTableOperate(PLDHashTable* aTable, const void* aKey, PLDHashOperator aOp)
+{
+  return aTable->Operate(aKey, aOp);
+}
+
+MOZ_ALWAYS_INLINE void
+PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry)
+{
+  MOZ_ASSERT(recursionLevel != IMMUTABLE_RECURSION_LEVEL);
+
+  NS_ASSERTION(ENTRY_IS_LIVE(aEntry), "ENTRY_IS_LIVE(aEntry)");
+
+  /* Load keyHash first in case clearEntry() goofs it. */
+  PLDHashNumber keyHash = aEntry->keyHash;
+  ops->clearEntry(this, aEntry);
+  if (keyHash & COLLISION_FLAG) {
+    MARK_ENTRY_REMOVED(aEntry);
+    removedCount++;
+  } else {
+    METER(stats.removeFrees++);
+    MARK_ENTRY_FREE(aEntry);
+  }
+  entryCount--;
+}
+
 void
 PL_DHashTableRawRemove(PLDHashTable* aTable, PLDHashEntryHdr* aEntry)
 {
-  MOZ_ASSERT(aTable->recursionLevel != IMMUTABLE_RECURSION_LEVEL);
-
-  NS_ASSERTION(PL_DHASH_ENTRY_IS_LIVE(aEntry),
-               "PL_DHASH_ENTRY_IS_LIVE(aEntry)");
-
-  /* Load keyHash first in case clearEntry() goofs it. */
-  PLDHashNumber keyHash = aEntry->keyHash;
-  aTable->ops->clearEntry(aTable, aEntry);
-  if (keyHash & COLLISION_FLAG) {
-    MARK_ENTRY_REMOVED(aEntry);
-    aTable->removedCount++;
-  } else {
-    METER(aTable->stats.removeFrees++);
-    MARK_ENTRY_FREE(aEntry);
-  }
-  aTable->entryCount--;
+  aTable->RawRemove(aEntry);
 }
 
-uint32_t
-PL_DHashTableEnumerate(PLDHashTable* aTable, PLDHashEnumerator aEtor, void* aArg)
+MOZ_ALWAYS_INLINE uint32_t
+PLDHashTable::Enumerate(PLDHashEnumerator aEtor, void* aArg)
 {
-  INCREMENT_RECURSION_LEVEL(aTable);
+  INCREMENT_RECURSION_LEVEL(this);
 
-  char* entryAddr = aTable->entryStore;
-  uint32_t entrySize = aTable->entrySize;
-  uint32_t capacity = PL_DHASH_TABLE_CAPACITY(aTable);
+  char* entryAddr = entryStore;
+  uint32_t capacity = Capacity();
   uint32_t tableSize = capacity * entrySize;
   char* entryLimit = entryAddr + tableSize;
   uint32_t i = 0;
   bool didRemove = false;
 
   if (ChaosMode::isActive()) {
     // Start iterating at a random point in the hashtable. It would be
     // even more chaotic to iterate in fully random order, but that's a lot
@@ -690,63 +723,70 @@ PL_DHashTableEnumerate(PLDHashTable* aTa
     if (entryAddr >= entryLimit) {
       entryAddr -= tableSize;
     }
   }
 
   for (uint32_t e = 0; e < capacity; ++e) {
     PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr;
     if (ENTRY_IS_LIVE(entry)) {
-      PLDHashOperator op = aEtor(aTable, entry, i++, aArg);
+      PLDHashOperator op = aEtor(this, entry, i++, aArg);
       if (op & PL_DHASH_REMOVE) {
-        METER(aTable->stats.removeEnums++);
-        PL_DHashTableRawRemove(aTable, entry);
+        METER(stats.removeEnums++);
+        PL_DHashTableRawRemove(this, entry);
         didRemove = true;
       }
       if (op & PL_DHASH_STOP) {
         break;
       }
     }
     entryAddr += entrySize;
     if (entryAddr >= entryLimit) {
       entryAddr -= tableSize;
     }
   }
 
-  MOZ_ASSERT(!didRemove || aTable->recursionLevel == 1);
+  MOZ_ASSERT(!didRemove || recursionLevel == 1);
 
   /*
    * Shrink or compress if a quarter or more of all entries are removed, or
    * if the table is underloaded according to the minimum alpha, and is not
    * minimal-size already.  Do this only if we removed above, so non-removing
-   * enumerations can count on stable aTable->entryStore until the next
+   * enumerations can count on stable |entryStore| until the next
    * non-lookup-Operate or removing-Enumerate.
    */
   if (didRemove &&
-      (aTable->removedCount >= capacity >> 2 ||
+      (removedCount >= capacity >> 2 ||
        (capacity > PL_DHASH_MIN_CAPACITY &&
-        aTable->entryCount <= MinLoad(capacity)))) {
-    METER(aTable->stats.enumShrinks++);
-    capacity = aTable->entryCount;
+        entryCount <= MinLoad(capacity)))) {
+    METER(stats.enumShrinks++);
+    capacity = entryCount;
     capacity += capacity >> 1;
     if (capacity < PL_DHASH_MIN_CAPACITY) {
       capacity = PL_DHASH_MIN_CAPACITY;
     }
 
     uint32_t ceiling = CeilingLog2(capacity);
-    ceiling -= PL_DHASH_BITS - aTable->hashShift;
+    ceiling -= PL_DHASH_BITS - hashShift;
 
-    (void) ChangeTable(aTable, ceiling);
+    (void) ChangeTable(ceiling);
   }
 
-  DECREMENT_RECURSION_LEVEL(aTable);
+  DECREMENT_RECURSION_LEVEL(this);
 
   return i;
 }
 
+uint32_t
+PL_DHashTableEnumerate(PLDHashTable* aTable, PLDHashEnumerator aEtor,
+                       void* aArg)
+{
+  return aTable->Enumerate(aEtor, aArg);
+}
+
 struct SizeOfEntryExcludingThisArg
 {
   size_t total;
   PLDHashSizeOfEntryExcludingThisFun sizeOfEntryExcludingThis;
   MallocSizeOf mallocSizeOf;
   void* arg;  // the arg passed by the user
 };
 
@@ -754,155 +794,180 @@ static PLDHashOperator
 SizeOfEntryExcludingThisEnumerator(PLDHashTable* aTable, PLDHashEntryHdr* aHdr,
                                    uint32_t aNumber, void* aArg)
 {
   SizeOfEntryExcludingThisArg* e = (SizeOfEntryExcludingThisArg*)aArg;
   e->total += e->sizeOfEntryExcludingThis(aHdr, e->mallocSizeOf, e->arg);
   return PL_DHASH_NEXT;
 }
 
+MOZ_ALWAYS_INLINE size_t
+PLDHashTable::SizeOfExcludingThis(
+    PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
+    MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */) const
+{
+  size_t n = 0;
+  n += aMallocSizeOf(entryStore);
+  if (aSizeOfEntryExcludingThis) {
+    SizeOfEntryExcludingThisArg arg2 = {
+      0, aSizeOfEntryExcludingThis, aMallocSizeOf, aArg
+    };
+    PL_DHashTableEnumerate(const_cast<PLDHashTable*>(this),
+                           SizeOfEntryExcludingThisEnumerator, &arg2);
+    n += arg2.total;
+  }
+  return n;
+}
+
+MOZ_ALWAYS_INLINE size_t
+PLDHashTable::SizeOfIncludingThis(
+    PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
+    MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */) const
+{
+  return aMallocSizeOf(this) +
+         SizeOfExcludingThis(aSizeOfEntryExcludingThis, aMallocSizeOf, aArg);
+}
+
 size_t
 PL_DHashTableSizeOfExcludingThis(
     const PLDHashTable* aTable,
     PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
     MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */)
 {
-  size_t n = 0;
-  n += aMallocSizeOf(aTable->entryStore);
-  if (aSizeOfEntryExcludingThis) {
-    SizeOfEntryExcludingThisArg arg2 = {
-      0, aSizeOfEntryExcludingThis, aMallocSizeOf, aArg
-    };
-    PL_DHashTableEnumerate(const_cast<PLDHashTable*>(aTable),
-                           SizeOfEntryExcludingThisEnumerator, &arg2);
-    n += arg2.total;
-  }
-  return n;
+  return aTable->SizeOfExcludingThis(aSizeOfEntryExcludingThis,
+                                     aMallocSizeOf, aArg);
 }
 
 size_t
 PL_DHashTableSizeOfIncludingThis(
     const PLDHashTable* aTable,
     PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
     MallocSizeOf aMallocSizeOf, void* aArg /* = nullptr */)
 {
-  return aMallocSizeOf(aTable) +
-         PL_DHashTableSizeOfExcludingThis(aTable, aSizeOfEntryExcludingThis,
-                                          aMallocSizeOf, aArg);
+  return aTable->SizeOfIncludingThis(aSizeOfEntryExcludingThis,
+                                     aMallocSizeOf, aArg);
 }
 
 #ifdef DEBUG
+MOZ_ALWAYS_INLINE void
+PLDHashTable::MarkImmutable()
+{
+  recursionLevel = IMMUTABLE_RECURSION_LEVEL;
+}
+
 void
 PL_DHashMarkTableImmutable(PLDHashTable* aTable)
 {
-  aTable->recursionLevel = IMMUTABLE_RECURSION_LEVEL;
+  aTable->MarkImmutable();
 }
 #endif
 
 #ifdef PL_DHASHMETER
 #include <math.h>
 
 void
-PL_DHashTableDumpMeter(PLDHashTable* aTable, PLDHashEnumerator aDump, FILE* aFp)
+PLDHashTable::DumpMeter(PLDHashEnumerator aDump, FILE* aFp)
 {
   PLDHashNumber hash1, hash2, maxChainHash1, maxChainHash2;
   double sqsum, mean, variance, sigma;
   PLDHashEntryHdr* entry;
 
-  char* entryAddr = aTable->entryStore;
-  uint32_t entrySize = aTable->entrySize;
-  int hashShift = aTable->hashShift;
+  char* entryAddr = entryStore;
   int sizeLog2 = PL_DHASH_BITS - hashShift;
-  uint32_t capacity = PL_DHASH_TABLE_CAPACITY(aTable);
+  uint32_t capacity = Capacity();
   uint32_t sizeMask = (1u << sizeLog2) - 1;
   uint32_t chainCount = 0, maxChainLen = 0;
   hash2 = 0;
   sqsum = 0;
 
   for (uint32_t i = 0; i < capacity; i++) {
     entry = (PLDHashEntryHdr*)entryAddr;
     entryAddr += entrySize;
     if (!ENTRY_IS_LIVE(entry)) {
       continue;
     }
     hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift);
     PLDHashNumber saveHash1 = hash1;
-    PLDHashEntryHdr* probe = ADDRESS_ENTRY(aTable, hash1);
+    PLDHashEntryHdr* probe = ADDRESS_ENTRY(this, hash1);
     uint32_t chainLen = 1;
     if (probe == entry) {
       /* Start of a (possibly unit-length) chain. */
       chainCount++;
     } else {
       hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2,
                     hashShift);
       do {
         chainLen++;
         hash1 -= hash2;
         hash1 &= sizeMask;
-        probe = ADDRESS_ENTRY(aTable, hash1);
+        probe = ADDRESS_ENTRY(this, hash1);
       } while (probe != entry);
     }
     sqsum += chainLen * chainLen;
     if (chainLen > maxChainLen) {
       maxChainLen = chainLen;
       maxChainHash1 = saveHash1;
       maxChainHash2 = hash2;
     }
   }
 
-  uint32_t entryCount = aTable->entryCount;
   if (entryCount && chainCount) {
     mean = (double)entryCount / chainCount;
     variance = chainCount * sqsum - entryCount * entryCount;
     if (variance < 0 || chainCount == 1) {
       variance = 0;
     } else {
       variance /= chainCount * (chainCount - 1);
     }
     sigma = sqrt(variance);
   } else {
     mean = sigma = 0;
   }
 
   fprintf(aFp, "Double hashing statistics:\n");
   fprintf(aFp, "    table size (in entries): %u\n", tableSize);
-  fprintf(aFp, "          number of entries: %u\n", aTable->entryCount);
-  fprintf(aFp, "  number of removed entries: %u\n", aTable->removedCount);
-  fprintf(aFp, "         number of searches: %u\n", aTable->stats.searches);
-  fprintf(aFp, "             number of hits: %u\n", aTable->stats.hits);
-  fprintf(aFp, "           number of misses: %u\n", aTable->stats.misses);
+  fprintf(aFp, "          number of entries: %u\n", entryCount);
+  fprintf(aFp, "  number of removed entries: %u\n", removedCount);
+  fprintf(aFp, "         number of searches: %u\n", stats.searches);
+  fprintf(aFp, "             number of hits: %u\n", stats.hits);
+  fprintf(aFp, "           number of misses: %u\n", stats.misses);
   fprintf(aFp, "      mean steps per search: %g\n",
-          aTable->stats.searches ?
-            (double)aTable->stats.steps / aTable->stats.searches : 0.);
+          stats.searches ? (double)stats.steps / stats.searches : 0.);
   fprintf(aFp, "     mean hash chain length: %g\n", mean);
   fprintf(aFp, "         standard deviation: %g\n", sigma);
   fprintf(aFp, "  maximum hash chain length: %u\n", maxChainLen);
-  fprintf(aFp, "          number of lookups: %u\n", aTable->stats.lookups);
-  fprintf(aFp, " adds that made a new entry: %u\n", aTable->stats.addMisses);
-  fprintf(aFp, "adds that recycled removeds: %u\n", aTable->stats.addOverRemoved);
-  fprintf(aFp, "   adds that found an entry: %u\n", aTable->stats.addHits);
-  fprintf(aFp, "               add failures: %u\n", aTable->stats.addFailures);
-  fprintf(aFp, "             useful removes: %u\n", aTable->stats.removeHits);
-  fprintf(aFp, "            useless removes: %u\n", aTable->stats.removeMisses);
-  fprintf(aFp, "removes that freed an entry: %u\n", aTable->stats.removeFrees);
-  fprintf(aFp, "  removes while enumerating: %u\n", aTable->stats.removeEnums);
-  fprintf(aFp, "            number of grows: %u\n", aTable->stats.grows);
-  fprintf(aFp, "          number of shrinks: %u\n", aTable->stats.shrinks);
-  fprintf(aFp, "       number of compresses: %u\n", aTable->stats.compresses);
-  fprintf(aFp, "number of enumerate shrinks: %u\n", aTable->stats.enumShrinks);
+  fprintf(aFp, "          number of lookups: %u\n", stats.lookups);
+  fprintf(aFp, " adds that made a new entry: %u\n", stats.addMisses);
+  fprintf(aFp, "adds that recycled removeds: %u\n", stats.addOverRemoved);
+  fprintf(aFp, "   adds that found an entry: %u\n", stats.addHits);
+  fprintf(aFp, "               add failures: %u\n", stats.addFailures);
+  fprintf(aFp, "             useful removes: %u\n", stats.removeHits);
+  fprintf(aFp, "            useless removes: %u\n", stats.removeMisses);
+  fprintf(aFp, "removes that freed an entry: %u\n", stats.removeFrees);
+  fprintf(aFp, "  removes while enumerating: %u\n", stats.removeEnums);
+  fprintf(aFp, "            number of grows: %u\n", stats.grows);
+  fprintf(aFp, "          number of shrinks: %u\n", stats.shrinks);
+  fprintf(aFp, "       number of compresses: %u\n", stats.compresses);
+  fprintf(aFp, "number of enumerate shrinks: %u\n", stats.enumShrinks);
 
   if (aDump && maxChainLen && hash2) {
     fputs("Maximum hash chain:\n", aFp);
     hash1 = maxChainHash1;
     hash2 = maxChainHash2;
-    entry = ADDRESS_ENTRY(aTable, hash1);
+    entry = ADDRESS_ENTRY(this, hash1);
     uint32_t i = 0;
     do {
-      if (aDump(aTable, entry, i++, aFp) != PL_DHASH_NEXT) {
+      if (aDump(this, entry, i++, aFp) != PL_DHASH_NEXT) {
         break;
       }
       hash1 -= hash2;
       hash1 &= sizeMask;
-      entry = ADDRESS_ENTRY(aTable, hash1);
+      entry = ADDRESS_ENTRY(this, hash1);
     } while (PL_DHASH_ENTRY_IS_BUSY(entry));
   }
 }
+
+void
+PL_DHashTableDumpMeter(PLDHashTable* aTable, PLDHashEnumerator aDump, FILE* aFp)
+{
+  aTable->DumpMeter(aDump, aFp);
+}
 #endif /* PL_DHASHMETER */
--- a/xpcom/glue/pldhash.h
+++ b/xpcom/glue/pldhash.h
@@ -70,21 +70,16 @@ typedef struct PLDHashTableOps  PLDHashT
  * Each hash table sub-type should nest the PLDHashEntryHdr structure at the
  * front of its particular entry type.  The keyHash member contains the result
  * of multiplying the hash code returned from the hashKey callback (see below)
  * by PL_DHASH_GOLDEN_RATIO, then constraining the result to avoid the magic 0
  * and 1 values.  The stored keyHash value is table size invariant, and it is
  * maintained automatically by PL_DHashTableOperate -- users should never set
  * it, and its only uses should be via the entry macros below.
  *
- * The PL_DHASH_ENTRY_IS_LIVE function tests whether entry is neither free nor
- * removed.  An entry may be either busy or free; if busy, it may be live or
- * removed.  Consumers of this API should not access members of entries that
- * are not live.
- *
  * However, use PL_DHASH_ENTRY_IS_BUSY for faster liveness testing of entries
  * returned by PL_DHashTableOperate, as PL_DHashTableOperate never returns a
  * non-live, busy (i.e., removed) entry pointer to its caller.  See below for
  * more details on PL_DHashTableOperate's calling rules.
  */
 struct PLDHashEntryHdr
 {
   PLDHashNumber keyHash;  /* every entry must begin like this */
@@ -97,21 +92,76 @@ PL_DHASH_ENTRY_IS_FREE(PLDHashEntryHdr* 
 }
 
 MOZ_ALWAYS_INLINE bool
 PL_DHASH_ENTRY_IS_BUSY(PLDHashEntryHdr* aEntry)
 {
   return !PL_DHASH_ENTRY_IS_FREE(aEntry);
 }
 
-MOZ_ALWAYS_INLINE bool
-PL_DHASH_ENTRY_IS_LIVE(PLDHashEntryHdr* aEntry)
+/*
+ * To consolidate keyHash computation and table grow/shrink code, we use a
+ * single entry point for lookup, add, and remove operations.  The operation
+ * codes are declared here, along with codes returned by PLDHashEnumerator
+ * functions, which control PL_DHashTableEnumerate's behavior.
+ */
+typedef enum PLDHashOperator
 {
-  return aEntry->keyHash >= 2;
-}
+  PL_DHASH_LOOKUP = 0,        /* lookup entry */
+  PL_DHASH_ADD = 1,           /* add entry */
+  PL_DHASH_REMOVE = 2,        /* remove entry, or enumerator says remove */
+  PL_DHASH_NEXT = 0,          /* enumerator says continue */
+  PL_DHASH_STOP = 1           /* enumerator says stop */
+} PLDHashOperator;
+
+/*
+ * Enumerate entries in table using etor:
+ *
+ *   count = PL_DHashTableEnumerate(table, etor, arg);
+ *
+ * PL_DHashTableEnumerate calls etor like so:
+ *
+ *   op = etor(table, entry, number, arg);
+ *
+ * where number is a zero-based ordinal assigned to live entries according to
+ * their order in aTable->entryStore.
+ *
+ * The return value, op, is treated as a set of flags.  If op is PL_DHASH_NEXT,
+ * then continue enumerating.  If op contains PL_DHASH_REMOVE, then clear (via
+ * aTable->ops->clearEntry) and free entry.  Then we check whether op contains
+ * PL_DHASH_STOP; if so, stop enumerating and return the number of live entries
+ * that were enumerated so far.  Return the total number of live entries when
+ * enumeration completes normally.
+ *
+ * If etor calls PL_DHashTableOperate on table with op != PL_DHASH_LOOKUP, it
+ * must return PL_DHASH_STOP; otherwise undefined behavior results.
+ *
+ * If any enumerator returns PL_DHASH_REMOVE, aTable->entryStore may be shrunk
+ * or compressed after enumeration, but before PL_DHashTableEnumerate returns.
+ * Such an enumerator therefore can't safely set aside entry pointers, but an
+ * enumerator that never returns PL_DHASH_REMOVE can set pointers to entries
+ * aside, e.g., to avoid copying live entries into an array of the entry type.
+ * Copying entry pointers is cheaper, and safe so long as the caller of such a
+ * "stable" Enumerate doesn't use the set-aside pointers after any call either
+ * to PL_DHashTableOperate, or to an "unstable" form of Enumerate, which might
+ * grow or shrink entryStore.
+ *
+ * If your enumerator wants to remove certain entries, but set aside pointers
+ * to other entries that it retains, it can use PL_DHashTableRawRemove on the
+ * entries to be removed, returning PL_DHASH_NEXT to skip them.  Likewise, if
+ * you want to remove entries, but for some reason you do not want entryStore
+ * to be shrunk or compressed, you can call PL_DHashTableRawRemove safely on
+ * the entry being enumerated, rather than returning PL_DHASH_REMOVE.
+ */
+typedef PLDHashOperator (*PLDHashEnumerator)(PLDHashTable* aTable,
+                                             PLDHashEntryHdr* aHdr,
+                                             uint32_t aNumber, void* aArg);
+
+typedef size_t (*PLDHashSizeOfEntryExcludingThisFun)(
+  PLDHashEntryHdr* aHdr, mozilla::MallocSizeOf aMallocSizeOf, void* aArg);
 
 /*
  * A PLDHashTable is currently 8 words (without the PL_DHASHMETER overhead)
  * on most architectures, and may be allocated on the stack or within another
  * structure or class (see below for the Init and Finish functions to use).
  *
  * To decide whether to use double hashing vs. chaining, we need to develop a
  * trade-off relation, as follows:
@@ -177,31 +227,39 @@ PL_DHASH_ENTRY_IS_LIVE(PLDHashEntryHdr* 
  * only if aTable->generation has not changed.
  *
  * The moral of this story: there is no one-size-fits-all hash table scheme,
  * but for small table entry size, and assuming entry address stability is not
  * required, double hashing wins.
  */
 struct PLDHashTable
 {
-  const PLDHashTableOps* ops;         /* virtual operations, see below */
+  /*
+   * Virtual operations; see below. This field is public because it's commonly
+   * zeroed to indicate that a table is no longer live.
+   */
+  const PLDHashTableOps* ops;
+
   void*               data;           /* ops- and instance-specific data */
+
+private:
   int16_t             hashShift;      /* multiplicative hash shift */
   /*
    * |recursionLevel| is only used in debug builds, but is present in opt
    * builds to avoid binary compatibility problems when mixing DEBUG and
    * non-DEBUG components.  (Actually, even if it were removed,
    * sizeof(PLDHashTable) wouldn't change, due to struct padding.)
    */
   uint16_t            recursionLevel; /* used to detect unsafe re-entry */
   uint32_t            entrySize;      /* number of bytes in an entry */
   uint32_t            entryCount;     /* number of entries in table */
   uint32_t            removedCount;   /* removed entry sentinels in table */
   uint32_t            generation;     /* entry storage generation number */
   char*               entryStore;     /* entry storage */
+
 #ifdef PL_DHASHMETER
   struct PLDHashStats
   {
     uint32_t        searches;       /* total number of table searches */
     uint32_t        steps;          /* hash chain links traversed */
     uint32_t        hits;           /* searches that found key */
     uint32_t        misses;         /* searches that didn't find key */
     uint32_t        lookups;        /* number of PL_DHASH_LOOKUPs */
@@ -214,25 +272,73 @@ struct PLDHashTable
     uint32_t        removeFrees;    /* removes that freed entry directly */
     uint32_t        removeEnums;    /* removes done by Enumerate */
     uint32_t        grows;          /* table expansions */
     uint32_t        shrinks;        /* table contractions */
     uint32_t        compresses;     /* table compressions */
     uint32_t        enumShrinks;    /* contractions after Enumerate */
   } stats;
 #endif
-};
+
+public:
+  /*
+   * Size in entries (gross, not net of free and removed sentinels) for table.
+   * We store hashShift rather than sizeLog2 to optimize the collision-free case
+   * in SearchTable.
+   */
+  uint32_t Capacity() const
+  {
+    return ((uint32_t)1 << (PL_DHASH_BITS - hashShift));
+  }
+
+  uint32_t EntrySize()  const { return entrySize; }
+  uint32_t EntryCount() const { return entryCount; }
+  uint32_t Generation() const { return generation; }
+
+  bool Init(const PLDHashTableOps* aOps, void* aData, uint32_t aEntrySize,
+            const mozilla::fallible_t&, uint32_t aLength);
+
+  void Finish();
+
+  PLDHashEntryHdr* Operate(const void* aKey, PLDHashOperator aOp);
+
+  void RawRemove(PLDHashEntryHdr* aEntry);
+
+  uint32_t Enumerate(PLDHashEnumerator aEtor, void* aArg);
 
-/*
- * Size in entries (gross, not net of free and removed sentinels) for table.
- * We store hashShift rather than sizeLog2 to optimize the collision-free case
- * in SearchTable.
- */
-#define PL_DHASH_TABLE_CAPACITY(table) \
-    ((uint32_t)1 << (PL_DHASH_BITS - (table)->hashShift))
+  size_t SizeOfIncludingThis(
+    PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
+    mozilla::MallocSizeOf aMallocSizeOf, void* aArg = nullptr) const;
+
+  size_t SizeOfExcludingThis(
+    PLDHashSizeOfEntryExcludingThisFun aSizeOfEntryExcludingThis,
+    mozilla::MallocSizeOf aMallocSizeOf, void* aArg = nullptr) const;
+
+#ifdef DEBUG
+  void MarkImmutable();
+#endif
+
+  void MoveEntryStub(const PLDHashEntryHdr* aFrom, PLDHashEntryHdr* aTo);
+
+  void ClearEntryStub(PLDHashEntryHdr* aEntry);
+
+  void FreeStringKey(PLDHashEntryHdr* aEntry);
+
+#ifdef PL_DHASHMETER
+  void DumpMeter(PLDHashEnumerator aDump, FILE* aFp);
+#endif
+
+private:
+  PLDHashEntryHdr* PL_DHASH_FASTCALL
+    SearchTable(const void* aKey, PLDHashNumber aKeyHash, PLDHashOperator aOp);
+
+  PLDHashEntryHdr* PL_DHASH_FASTCALL FindFreeEntry(PLDHashNumber aKeyHash);
+
+  bool ChangeTable(int aDeltaLog2);
+};
 
 /*
  * Table space at entryStore is allocated and freed using these callbacks.
  * The allocator should return null on error only (not if called with aNBytes
  * equal to 0; but note that pldhash.c code will never call with 0 aNBytes).
  */
 typedef void* (*PLDHashAllocTable)(PLDHashTable* aTable, uint32_t aNBytes);
 
@@ -420,31 +526,16 @@ MOZ_WARN_UNUSED_RESULT NS_COM_GLUE bool 
  * Finalize aTable's data, free its entry storage using aTable->ops->freeTable,
  * and leave its members unchanged from their last live values (which leaves
  * pointers dangling).  If you want to burn cycles clearing aTable, it's up to
  * your code to call memset.
  */
 NS_COM_GLUE void PL_DHashTableFinish(PLDHashTable* aTable);
 
 /*
- * To consolidate keyHash computation and table grow/shrink code, we use a
- * single entry point for lookup, add, and remove operations.  The operation
- * codes are declared here, along with codes returned by PLDHashEnumerator
- * functions, which control PL_DHashTableEnumerate's behavior.
- */
-typedef enum PLDHashOperator
-{
-  PL_DHASH_LOOKUP = 0,        /* lookup entry */
-  PL_DHASH_ADD = 1,           /* add entry */
-  PL_DHASH_REMOVE = 2,        /* remove entry, or enumerator says remove */
-  PL_DHASH_NEXT = 0,          /* enumerator says continue */
-  PL_DHASH_STOP = 1           /* enumerator says stop */
-} PLDHashOperator;
-
-/*
  * To lookup a key in table, call:
  *
  *  entry = PL_DHashTableOperate(table, key, PL_DHASH_LOOKUP);
  *
  * If PL_DHASH_ENTRY_IS_BUSY(entry) is true, key was found and it identifies
  * entry.  If PL_DHASH_ENTRY_IS_FREE(entry) is true, key was not found.
  *
  * To add an entry identified by key to table, call:
@@ -480,66 +571,20 @@ PL_DHashTableOperate(PLDHashTable* aTabl
  * the inefficiency of a full PL_DHashTableOperate (which rehashes in order
  * to find the entry given its key) is not tolerable.  This function does not
  * shrink the table if it is underloaded.  It does not update stats #ifdef
  * PL_DHASHMETER, either.
  */
 NS_COM_GLUE void PL_DHashTableRawRemove(PLDHashTable* aTable,
                                         PLDHashEntryHdr* aEntry);
 
-/*
- * Enumerate entries in table using etor:
- *
- *   count = PL_DHashTableEnumerate(table, etor, arg);
- *
- * PL_DHashTableEnumerate calls etor like so:
- *
- *   op = etor(table, entry, number, arg);
- *
- * where number is a zero-based ordinal assigned to live entries according to
- * their order in aTable->entryStore.
- *
- * The return value, op, is treated as a set of flags.  If op is PL_DHASH_NEXT,
- * then continue enumerating.  If op contains PL_DHASH_REMOVE, then clear (via
- * aTable->ops->clearEntry) and free entry.  Then we check whether op contains
- * PL_DHASH_STOP; if so, stop enumerating and return the number of live entries
- * that were enumerated so far.  Return the total number of live entries when
- * enumeration completes normally.
- *
- * If etor calls PL_DHashTableOperate on table with op != PL_DHASH_LOOKUP, it
- * must return PL_DHASH_STOP; otherwise undefined behavior results.
- *
- * If any enumerator returns PL_DHASH_REMOVE, aTable->entryStore may be shrunk
- * or compressed after enumeration, but before PL_DHashTableEnumerate returns.
- * Such an enumerator therefore can't safely set aside entry pointers, but an
- * enumerator that never returns PL_DHASH_REMOVE can set pointers to entries
- * aside, e.g., to avoid copying live entries into an array of the entry type.
- * Copying entry pointers is cheaper, and safe so long as the caller of such a
- * "stable" Enumerate doesn't use the set-aside pointers after any call either
- * to PL_DHashTableOperate, or to an "unstable" form of Enumerate, which might
- * grow or shrink entryStore.
- *
- * If your enumerator wants to remove certain entries, but set aside pointers
- * to other entries that it retains, it can use PL_DHashTableRawRemove on the
- * entries to be removed, returning PL_DHASH_NEXT to skip them.  Likewise, if
- * you want to remove entries, but for some reason you do not want entryStore
- * to be shrunk or compressed, you can call PL_DHashTableRawRemove safely on
- * the entry being enumerated, rather than returning PL_DHASH_REMOVE.
- */
-typedef PLDHashOperator (*PLDHashEnumerator)(PLDHashTable* aTable,
-                                             PLDHashEntryHdr* aHdr,
-                                             uint32_t aNumber, void* aArg);
-
 NS_COM_GLUE uint32_t
 PL_DHashTableEnumerate(PLDHashTable* aTable, PLDHashEnumerator aEtor,
                        void* aArg);
 
-typedef size_t (*PLDHashSizeOfEntryExcludingThisFun)(
-  PLDHashEntryHdr* aHdr, mozilla::MallocSizeOf aMallocSizeOf, void* aArg);
-
 /**
  * Measure the size of the table's entry storage, and if
  * |aSizeOfEntryExcludingThis| is non-nullptr, measure the size of things
  * pointed to by entries.  Doesn't measure |ops| because it's often shared
  * between tables, nor |data| because it's opaque.
  */
 NS_COM_GLUE size_t PL_DHashTableSizeOfExcludingThis(
   const PLDHashTable* aTable,