Bug 1508873 - part 2 - convert HashTable to work primarily in terms of slots; r=luke
authorNathan Froyd <froydnj@mozilla.com>
Thu, 13 Dec 2018 12:21:19 -0500
changeset 450392 20d92a98e3a37f3fd5a58717f28567f214429b3e
parent 450391 ff944ef6b8a4bf09a8a8083ee7bf7dfa964e75b0
child 450393 95e3de1d66432b6029b10d5a653e176c65462543
push id110492
push usernfroyd@mozilla.com
push dateThu, 13 Dec 2018 17:21:43 +0000
treeherdermozilla-inbound@95e3de1d6643 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1508873
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1508873 - part 2 - convert HashTable to work primarily in terms of slots; r=luke HashTableEntry's data layout currently wastes a fair amount of space due to ABI-mandated padding. For instance, HashTableEntry<T*> on a 64-bit platform looks like: class HashTableEntry { HashNumber mKeyHash; // Four bytes of wasted space here to pad mValueData to the correct place. unsigned char mValueData[sizeof(T*)]; }; This wasted space means that sets of pointers backed by mozilla::HashTable waste a quarter of their entry storage space. Maps of pointers to pointers waste a sixth of their entry storage space. We'd like to fix this by packing all the cached hashes together, followed by all the hash table entries. As a first step to laying out the hash table storage differently, we have to make HashTable not access entries directly, but go through an abstraction that represents the key and the entry. We call this abstraction "slots". This commit is similar to the change done for PLDHashTable previously. Parts of HashTable still work in terms of Entry; the creation and destruction of tables was not worth changing here. We'll address that in the next commit.
mfbt/HashTable.h
--- a/mfbt/HashTable.h
+++ b/mfbt/HashTable.h
@@ -75,16 +75,17 @@
 #define mozilla_HashTable_h
 
 #include "mozilla/AllocPolicy.h"
 #include "mozilla/Assertions.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/Casting.h"
 #include "mozilla/HashFunctions.h"
 #include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
 #include "mozilla/MemoryChecking.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Move.h"
 #include "mozilla/Opaque.h"
 #include "mozilla/OperatorNewExtensions.h"
 #include "mozilla/PodOperations.h"
 #include "mozilla/ReentrancyGuard.h"
 #include "mozilla/TypeTraits.h"
@@ -1073,137 +1074,212 @@ class HashTableEntry {
   void setLive(HashNumber aHashNumber, Args&&... aArgs) {
     MOZ_ASSERT(!isLive());
     mKeyHash = aHashNumber;
     new (KnownNotNull, valuePtr()) T(std::forward<Args>(aArgs)...);
     MOZ_ASSERT(isLive());
   }
 };
 
+// A slot represents a cached hash value and its associated entry stored
+// in the hash table. While these two things currently belong to the same
+// object, HashTableEntry, above, they do not necessarily need to be
+// contiguous in memory and this abstraction helps enforce the separation
+// between the two.
+template <class T>
+class EntrySlot {
+  using NonConstT = typename RemoveConst<T>::Type;
+
+  using Entry = HashTableEntry<T>;
+
+  Entry* mEntry;
+
+ public:
+  EntrySlot(Entry* aEntry) : mEntry(aEntry) {}
+
+  EntrySlot(const EntrySlot&) = default;
+  EntrySlot(EntrySlot&& aOther) = default;
+
+  EntrySlot& operator=(const EntrySlot&) = default;
+  EntrySlot& operator=(EntrySlot&&) = default;
+
+  bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; }
+
+  bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; }
+
+  EntrySlot& operator++() {
+    ++mEntry;
+    return *this;
+  }
+
+  void destroy() { mEntry->destroy(); }
+
+  void swap(EntrySlot& aOther) { mEntry->swap(aOther.mEntry); }
+
+  T& get() const { return mEntry->get(); }
+
+  NonConstT& getMutable() { return mEntry->getMutable(); }
+
+  bool isFree() const { return mEntry->isFree(); }
+
+  void clearLive() { mEntry->clearLive(); }
+
+  void clear() { mEntry->clear(); }
+
+  bool isRemoved() const { return mEntry->isRemoved(); }
+
+  void removeLive() { mEntry->removeLive(); }
+
+  bool isLive() const { return mEntry->isLive(); }
+
+  void setCollision() { mEntry->setCollision(); }
+  void unsetCollision() { mEntry->unsetCollision(); }
+  bool hasCollision() const { return mEntry->hasCollision(); }
+  bool matchHash(HashNumber hn) { return mEntry->matchHash(hn); }
+  HashNumber getKeyHash() const { return mEntry->getKeyHash(); }
+
+  template <typename... Args>
+  void setLive(HashNumber aHashNumber, Args&&... aArgs) {
+    mEntry->setLive(aHashNumber, std::forward<Args>(aArgs)...);
+  }
+
+  Entry* toEntry() const { return mEntry; }
+};
+
 template <class T, class HashPolicy, class AllocPolicy>
 class HashTable : private AllocPolicy {
   friend class mozilla::ReentrancyGuard;
 
   using NonConstT = typename RemoveConst<T>::Type;
   using Key = typename HashPolicy::KeyType;
   using Lookup = typename HashPolicy::Lookup;
 
  public:
   using Entry = HashTableEntry<T>;
+  using Slot = EntrySlot<T>;
+
+  template <typename F>
+  static void forEachSlot(Entry* aEntries, uint32_t aCapacity, F&& f) {
+    Entry* end = aEntries + aCapacity;
+    for (Entry* start = aEntries; start < end; ++start) {
+      Slot slot(start);
+      f(slot);
+    }
+  }
 
   // A nullable pointer to a hash table element. A Ptr |p| can be tested
   // either explicitly |if (p.found()) p->...| or using boolean conversion
   // |if (p) p->...|. Ptr objects must not be used after any mutating hash
   // table operations unless |generation()| is tested.
   class Ptr {
     friend class HashTable;
 
-    Entry* mEntry;
+    Slot mSlot;
 #ifdef DEBUG
     const HashTable* mTable;
     Generation mGeneration;
 #endif
 
    protected:
-    Ptr(Entry& aEntry, const HashTable& aTable)
-        : mEntry(&aEntry)
+    Ptr(Slot aSlot, const HashTable& aTable)
+        : mSlot(aSlot)
 #ifdef DEBUG
           ,
           mTable(&aTable),
           mGeneration(aTable.generation())
 #endif
     {
     }
 
     // This constructor is used only by AddPtr() within lookupForAdd().
     explicit Ptr(const HashTable& aTable)
-        : mEntry(nullptr)
+        : mSlot(nullptr)
 #ifdef DEBUG
           ,
           mTable(&aTable),
           mGeneration(aTable.generation())
 #endif
     {
     }
 
-    bool isValid() const { return !!mEntry; }
+    bool isValid() const { return !!mSlot.toEntry(); }
 
    public:
     Ptr()
-        : mEntry(nullptr)
+        : mSlot(nullptr)
 #ifdef DEBUG
           ,
           mTable(nullptr),
           mGeneration(0)
 #endif
     {
     }
 
     bool found() const {
       if (!isValid()) {
         return false;
       }
 #ifdef DEBUG
       MOZ_ASSERT(mGeneration == mTable->generation());
 #endif
-      return mEntry->isLive();
+      return mSlot.isLive();
     }
 
     explicit operator bool() const { return found(); }
 
     bool operator==(const Ptr& aRhs) const {
       MOZ_ASSERT(found() && aRhs.found());
-      return mEntry == aRhs.mEntry;
+      return mSlot == aRhs.mSlot;
     }
 
     bool operator!=(const Ptr& aRhs) const {
 #ifdef DEBUG
       MOZ_ASSERT(mGeneration == mTable->generation());
 #endif
       return !(*this == aRhs);
     }
 
     T& operator*() const {
 #ifdef DEBUG
       MOZ_ASSERT(found());
       MOZ_ASSERT(mGeneration == mTable->generation());
 #endif
-      return mEntry->get();
+      return mSlot.get();
     }
 
     T* operator->() const {
 #ifdef DEBUG
       MOZ_ASSERT(found());
       MOZ_ASSERT(mGeneration == mTable->generation());
 #endif
-      return &mEntry->get();
+      return &mSlot.get();
     }
   };
 
   // A Ptr that can be used to add a key after a failed lookup.
   class AddPtr : public Ptr {
     friend class HashTable;
 
     HashNumber mKeyHash;
 #ifdef DEBUG
     uint64_t mMutationCount;
 #endif
 
-    AddPtr(Entry& aEntry, const HashTable& aTable, HashNumber aHashNumber)
-        : Ptr(aEntry, aTable),
+    AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber)
+        : Ptr(aSlot, aTable),
           mKeyHash(aHashNumber)
 #ifdef DEBUG
           ,
           mMutationCount(aTable.mMutationCount)
 #endif
     {
     }
 
     // This constructor is used when lookupForAdd() is performed on a table
-    // lacking entry storage; it leaves mEntry null but initializes everything
+    // lacking entry storage; it leaves mSlot null but initializes everything
     // else.
     AddPtr(const HashTable& aTable, HashNumber aHashNumber)
         : Ptr(aTable),
           mKeyHash(aHashNumber)
 #ifdef DEBUG
           ,
           mMutationCount(aTable.mMutationCount)
 #endif
@@ -1220,33 +1296,33 @@ class HashTable : private AllocPolicy {
   // A hash table iterator that (mostly) doesn't allow table modifications.
   // As with Ptr/AddPtr, Iterator objects must not be used after any mutating
   // hash table operation unless the |generation()| is tested.
   class Iterator {
    protected:
     friend class HashTable;
 
     explicit Iterator(const HashTable& aTable)
-        : mCur(aTable.mTable),
-          mEnd(aTable.mTable + aTable.capacity())
+        : mCur(aTable.slotForIndex(0)),
+          mEnd(aTable.slotForIndex(aTable.capacity()))
 #ifdef DEBUG
           ,
           mTable(aTable),
           mMutationCount(aTable.mMutationCount),
           mGeneration(aTable.generation()),
           mValidEntry(true)
 #endif
     {
-      while (mCur < mEnd && !mCur->isLive()) {
+      while (mCur < mEnd && !mCur.isLive()) {
         ++mCur;
       }
     }
 
-    Entry* mCur;
-    Entry* mEnd;
+    Slot mCur;
+    Slot mEnd;
 #ifdef DEBUG
     const HashTable& mTable;
     uint64_t mMutationCount;
     Generation mGeneration;
     bool mValidEntry;
 #endif
 
    public:
@@ -1260,26 +1336,26 @@ class HashTable : private AllocPolicy {
 
     T& get() const {
       MOZ_ASSERT(!done());
 #ifdef DEBUG
       MOZ_ASSERT(mValidEntry);
       MOZ_ASSERT(mGeneration == mTable.generation());
       MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
 #endif
-      return mCur->get();
+      return mCur.get();
     }
 
     void next() {
       MOZ_ASSERT(!done());
 #ifdef DEBUG
       MOZ_ASSERT(mGeneration == mTable.generation());
       MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
 #endif
-      while (++mCur < mEnd && !mCur->isLive()) {
+      while (++mCur < mEnd && !mCur.isLive()) {
         continue;
       }
 #ifdef DEBUG
       mValidEntry = true;
 #endif
     }
   };
 
@@ -1311,40 +1387,40 @@ class HashTable : private AllocPolicy {
           mRemoved(aOther.mRemoved) {
       aOther.mRekeyed = false;
       aOther.mRemoved = false;
     }
 
     // Removes the current element from the table, leaving |get()|
     // invalid until the next call to |next()|.
     void remove() {
-      mTable.remove(*this->mCur);
+      mTable.remove(this->mCur);
       mRemoved = true;
 #ifdef DEBUG
       this->mValidEntry = false;
       this->mMutationCount = mTable.mMutationCount;
 #endif
     }
 
     NonConstT& getMutable() {
       MOZ_ASSERT(!this->done());
 #ifdef DEBUG
       MOZ_ASSERT(this->mValidEntry);
       MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation());
       MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount);
 #endif
-      return this->mCur->getMutable();
+      return this->mCur.getMutable();
     }
 
     // Removes the current element and re-inserts it into the table with
     // a new key at the new Lookup position.  |get()| is invalid after
     // this operation until the next call to |next()|.
     void rekey(const Lookup& l, const Key& k) {
-      MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur->get()));
-      Ptr p(*this->mCur, mTable);
+      MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()));
+      Ptr p(this->mCur, mTable);
       mTable.rekeyWithoutRehash(p, l, k);
       mRekeyed = true;
 #ifdef DEBUG
       this->mValidEntry = false;
       this->mMutationCount = mTable.mMutationCount;
 #endif
     }
 
@@ -1509,29 +1585,27 @@ class HashTable : private AllocPolicy {
 
   static Entry* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity,
                             FailureBehavior aReportFailure = ReportFailure) {
     Entry* table =
         aReportFailure
             ? aAllocPolicy.template pod_malloc<Entry>(aCapacity)
             : aAllocPolicy.template maybe_pod_malloc<Entry>(aCapacity);
     if (table) {
-      for (uint32_t i = 0; i < aCapacity; i++) {
-        new (KnownNotNull, &table[i]) Entry();
-      }
+      forEachSlot(table, aCapacity, [&](const Slot& slot) {
+        new (KnownNotNull, slot.toEntry()) Entry();
+      });
     }
     return table;
   }
 
   static void destroyTable(AllocPolicy& aAllocPolicy, Entry* aOldTable,
                            uint32_t aCapacity) {
-    Entry* end = aOldTable + aCapacity;
-    for (Entry* e = aOldTable; e < end; ++e) {
-      e->~Entry();
-    }
+    forEachSlot(aOldTable, aCapacity,
+                [&](const Slot& slot) { slot.toEntry()->~Entry(); });
     aAllocPolicy.free_(aOldTable, aCapacity);
   }
 
  public:
   HashTable(AllocPolicy aAllocPolicy, uint32_t aLen)
       : AllocPolicy(aAllocPolicy),
         mGen(0),
         mHashShift(hashShift(aLen)),
@@ -1570,102 +1644,104 @@ class HashTable : private AllocPolicy {
     return dh;
   }
 
   static HashNumber applyDoubleHash(HashNumber aHash1,
                                     const DoubleHash& aDoubleHash) {
     return (aHash1 - aDoubleHash.mHash2) & aDoubleHash.mSizeMask;
   }
 
-  static MOZ_ALWAYS_INLINE bool match(Entry& aEntry, const Lookup& aLookup) {
-    return HashPolicy::match(HashPolicy::getKey(aEntry.get()), aLookup);
+  static MOZ_ALWAYS_INLINE bool match(T& aEntry, const Lookup& aLookup) {
+    return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup);
   }
 
   enum LookupReason { ForNonAdd, ForAdd };
 
+  Slot slotForIndex(HashNumber aIndex) const { return Slot(&mTable[aIndex]); }
+
   // Warning: in order for readonlyThreadsafeLookup() to be safe this
   // function must not modify the table in any way when Reason==ForNonAdd.
   template <LookupReason Reason>
-  MOZ_ALWAYS_INLINE Entry& lookup(const Lookup& aLookup,
-                                  HashNumber aKeyHash) const {
+  MOZ_ALWAYS_INLINE Slot lookup(const Lookup& aLookup,
+                                HashNumber aKeyHash) const {
     MOZ_ASSERT(isLiveHash(aKeyHash));
     MOZ_ASSERT(!(aKeyHash & sCollisionBit));
     MOZ_ASSERT(mTable);
 
     // Compute the primary hash address.
     HashNumber h1 = hash1(aKeyHash);
-    Entry* entry = &mTable[h1];
+    Slot slot = slotForIndex(h1);
 
     // Miss: return space for a new entry.
-    if (entry->isFree()) {
-      return *entry;
+    if (slot.isFree()) {
+      return slot;
     }
 
     // Hit: return entry.
-    if (entry->matchHash(aKeyHash) && match(*entry, aLookup)) {
-      return *entry;
+    if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
+      return slot;
     }
 
     // Collision: double hash.
     DoubleHash dh = hash2(aKeyHash);
 
     // Save the first removed entry pointer so we can recycle later.
-    Entry* firstRemoved = nullptr;
+    Maybe<Slot> firstRemoved;
 
     while (true) {
       if (Reason == ForAdd && !firstRemoved) {
-        if (MOZ_UNLIKELY(entry->isRemoved())) {
-          firstRemoved = entry;
+        if (MOZ_UNLIKELY(slot.isRemoved())) {
+          firstRemoved.emplace(slot);
         } else {
-          entry->setCollision();
+          slot.setCollision();
         }
       }
 
       h1 = applyDoubleHash(h1, dh);
 
-      entry = &mTable[h1];
-      if (entry->isFree()) {
-        return firstRemoved ? *firstRemoved : *entry;
+      slot = slotForIndex(h1);
+      if (slot.isFree()) {
+        return firstRemoved.refOr(slot);
       }
 
-      if (entry->matchHash(aKeyHash) && match(*entry, aLookup)) {
-        return *entry;
+      if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
+        return slot;
       }
     }
   }
 
   // This is a copy of lookup() hardcoded to the assumptions:
   //   1. the lookup is for an add;
   //   2. the key, whose |keyHash| has been passed, is not in the table.
-  Entry& findNonLiveEntry(HashNumber aKeyHash) {
+  Slot findNonLiveSlot(HashNumber aKeyHash) {
     MOZ_ASSERT(!(aKeyHash & sCollisionBit));
     MOZ_ASSERT(mTable);
 
     // We assume 'aKeyHash' has already been distributed.
 
     // Compute the primary hash address.
     HashNumber h1 = hash1(aKeyHash);
-    Entry* entry = &mTable[h1];
+    Slot slot = slotForIndex(h1);
 
     // Miss: return space for a new entry.
-    if (!entry->isLive()) {
-      return *entry;
+    if (!slot.isLive()) {
+      return slot;
     }
 
     // Collision: double hash.
     DoubleHash dh = hash2(aKeyHash);
 
     while (true) {
-      entry->setCollision();
+      slot.setCollision();
 
       h1 = applyDoubleHash(h1, dh);
 
-      entry = &mTable[h1];
-      if (!entry->isLive()) {
-        return *entry;
+      slot = slotForIndex(h1);
+      if (!slot.isLive()) {
+        return slot;
       }
     }
   }
 
   enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
 
   RebuildStatus changeTableSize(
       uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) {
@@ -1691,26 +1767,25 @@ class HashTable : private AllocPolicy {
 
     // We can't fail from here on, so update table parameters.
     mHashShift = kHashNumberBits - newLog2;
     mRemovedCount = 0;
     mGen++;
     mTable = newTable;
 
     // Copy only live entries, leaving removed ones behind.
-    Entry* end = oldTable + oldCapacity;
-    for (Entry* src = oldTable; src < end; ++src) {
-      if (src->isLive()) {
-        HashNumber hn = src->getKeyHash();
-        findNonLiveEntry(hn).setLive(
-            hn, std::move(const_cast<typename Entry::NonConstT&>(src->get())));
+    forEachSlot(oldTable, oldCapacity, [&](Slot& slot) {
+      if (slot.isLive()) {
+        HashNumber hn = slot.getKeyHash();
+        findNonLiveSlot(hn).setLive(
+            hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get())));
       }
 
-      src->~Entry();
-    }
+      slot.clear();
+    });
 
     // All entries have been destroyed, no need to destroyTable.
     this->free_(oldTable, oldCapacity);
     return Rehashed;
   }
 
   RebuildStatus rehashIfOverloaded(
       FailureBehavior aReportFailure = ReportFailure) {
@@ -1736,31 +1811,36 @@ class HashTable : private AllocPolicy {
   }
 
   void infallibleRehashIfOverloaded() {
     if (rehashIfOverloaded(DontReportFailure) == RehashFailed) {
       rehashTableInPlace();
     }
   }
 
-  void remove(Entry& aEntry) {
+  void remove(Slot& aSlot) {
     MOZ_ASSERT(mTable);
 
-    if (aEntry.hasCollision()) {
-      aEntry.removeLive();
+    if (aSlot.hasCollision()) {
+      aSlot.removeLive();
       mRemovedCount++;
     } else {
-      aEntry.clearLive();
+      aSlot.clearLive();
     }
     mEntryCount--;
 #ifdef DEBUG
     mMutationCount++;
 #endif
   }
 
+  void remove(Entry& aEntry) {
+    Slot slot(&aEntry);
+    remove(slot);
+  }
+
   void shrinkIfUnderloaded() {
     static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
                   "multiplication below could overflow");
     bool underloaded =
         capacity() > sMinCapacity &&
         mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator;
 
     if (underloaded) {
@@ -1771,40 +1851,38 @@ class HashTable : private AllocPolicy {
   // This is identical to changeTableSize(currentSize), but without requiring
   // a second table.  We do this by recycling the collision bits to tell us if
   // the element is already inserted or still waiting to be inserted.  Since
   // already-inserted elements win any conflicts, we get the same table as we
   // would have gotten through random insertion order.
   void rehashTableInPlace() {
     mRemovedCount = 0;
     mGen++;
-    for (uint32_t i = 0; i < capacity(); ++i) {
-      mTable[i].unsetCollision();
-    }
+    forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); });
     for (uint32_t i = 0; i < capacity();) {
-      Entry* src = &mTable[i];
+      Slot src = slotForIndex(i);
 
-      if (!src->isLive() || src->hasCollision()) {
+      if (!src.isLive() || src.hasCollision()) {
         ++i;
         continue;
       }
 
-      HashNumber keyHash = src->getKeyHash();
+      HashNumber keyHash = src.getKeyHash();
       HashNumber h1 = hash1(keyHash);
       DoubleHash dh = hash2(keyHash);
-      Entry* tgt = &mTable[h1];
+      Slot tgt = slotForIndex(h1);
       while (true) {
-        if (!tgt->hasCollision()) {
-          src->swap(tgt);
-          tgt->setCollision();
+        if (!tgt.hasCollision()) {
+          src.swap(tgt);
+          tgt.setCollision();
           break;
         }
 
         h1 = applyDoubleHash(h1, dh);
-        tgt = &mTable[h1];
+        tgt = slotForIndex(h1);
       }
     }
 
     // TODO: this algorithm leaves collision bits on *all* elements, even if
     // they are on no collision path. We have the option of setting the
     // collision bits correctly on a subsequent pass or skipping the rehash
     // unless we are totally filled with tombstones: benchmark to find out
     // which approach is best.
@@ -1815,37 +1893,33 @@ class HashTable : private AllocPolicy {
   //
   // Prefer to use putNewInfallible; this function does not check
   // invariants.
   template <typename... Args>
   void putNewInfallibleInternal(const Lookup& aLookup, Args&&... aArgs) {
     MOZ_ASSERT(mTable);
 
     HashNumber keyHash = prepareHash(aLookup);
-    Entry* entry = &findNonLiveEntry(keyHash);
-    MOZ_ASSERT(entry);
+    Slot slot = findNonLiveSlot(keyHash);
 
-    if (entry->isRemoved()) {
+    if (slot.isRemoved()) {
       mRemovedCount--;
       keyHash |= sCollisionBit;
     }
 
-    entry->setLive(keyHash, std::forward<Args>(aArgs)...);
+    slot.setLive(keyHash, std::forward<Args>(aArgs)...);
     mEntryCount++;
 #ifdef DEBUG
     mMutationCount++;
 #endif
   }
 
  public:
   void clear() {
-    Entry* end = mTable + capacity();
-    for (Entry* e = mTable; e < end; ++e) {
-      e->clear();
-    }
+    forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); });
     mRemovedCount = 0;
     mEntryCount = 0;
 #ifdef DEBUG
     mMutationCount++;
 #endif
   }
 
   // Resize the table down to the smallest capacity that doesn't overload the
@@ -1966,42 +2040,42 @@ class HashTable : private AllocPolicy {
     if (!aPtr.isValid()) {
       MOZ_ASSERT(!mTable && mEntryCount == 0);
       uint32_t newCapacity = rawCapacity();
       RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
       MOZ_ASSERT(status != NotOverloaded);
       if (status == RehashFailed) {
         return false;
       }
-      aPtr.mEntry = &findNonLiveEntry(aPtr.mKeyHash);
+      aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
 
-    } else if (aPtr.mEntry->isRemoved()) {
+    } else if (aPtr.mSlot.isRemoved()) {
       // Changing an entry from removed to live does not affect whether we are
       // overloaded and can be handled separately.
       if (!this->checkSimulatedOOM()) {
         return false;
       }
       mRemovedCount--;
       aPtr.mKeyHash |= sCollisionBit;
 
     } else {
-      // Preserve the validity of |aPtr.mEntry|.
+      // Preserve the validity of |aPtr.mSlot|.
       RebuildStatus status = rehashIfOverloaded();
       if (status == RehashFailed) {
         return false;
       }
       if (status == NotOverloaded && !this->checkSimulatedOOM()) {
         return false;
       }
       if (status == Rehashed) {
-        aPtr.mEntry = &findNonLiveEntry(aPtr.mKeyHash);
+        aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
       }
     }
 
-    aPtr.mEntry->setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
+    aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
     mEntryCount++;
 #ifdef DEBUG
     mMutationCount++;
     aPtr.mGeneration = generation();
     aPtr.mMutationCount = mMutationCount;
 #endif
     return true;
   }
@@ -2044,45 +2118,45 @@ class HashTable : private AllocPolicy {
 #ifdef DEBUG
     aPtr.mGeneration = generation();
     aPtr.mMutationCount = mMutationCount;
 #endif
     if (mTable) {
       ReentrancyGuard g(*this);
       // Check that aLookup has not been destroyed.
       MOZ_ASSERT(prepareHash(aLookup) == aPtr.mKeyHash);
-      aPtr.mEntry = &lookup<ForAdd>(aLookup, aPtr.mKeyHash);
+      aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash);
       if (aPtr.found()) {
         return true;
       }
     } else {
       // Clear aPtr so it's invalid; add() will allocate storage and redo the
       // lookup.
-      aPtr.mEntry = nullptr;
+      aPtr.mSlot = Slot(nullptr);
     }
     return add(aPtr, std::forward<Args>(aArgs)...);
   }
 
   void remove(Ptr aPtr) {
     MOZ_ASSERT(mTable);
     ReentrancyGuard g(*this);
     MOZ_ASSERT(aPtr.found());
     MOZ_ASSERT(aPtr.mGeneration == generation());
-    remove(*aPtr.mEntry);
+    remove(aPtr.mSlot);
     shrinkIfUnderloaded();
   }
 
   void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
     MOZ_ASSERT(mTable);
     ReentrancyGuard g(*this);
     MOZ_ASSERT(aPtr.found());
     MOZ_ASSERT(aPtr.mGeneration == generation());
     typename HashTableEntry<T>::NonConstT t(std::move(*aPtr));
     HashPolicy::setKey(t, const_cast<Key&>(aKey));
-    remove(*aPtr.mEntry);
+    remove(aPtr.mSlot);
     putNewInfallibleInternal(aLookup, std::move(t));
   }
 
   void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
     rekeyWithoutRehash(aPtr, aLookup, aKey);
     infallibleRehashIfOverloaded();
   }
 };