Bug 1422575 - Give hash tables a minimum capacity based on the parameter passed to init() r=sfink
authorJon Coppeard <jcoppeard@mozilla.com>
Thu, 07 Dec 2017 18:28:42 +0000
changeset 395639 115d70e6c818c34624e7b9a0a2a1579ad92f1b40
parent 395638 82f6a0139402245a07f1e05e4bebd2861b78f4dd
child 395640 1938afc341937d701acfb196279193f4cc640696
push id33049
push usercsabou@mozilla.com
push dateFri, 08 Dec 2017 09:57:17 +0000
treeherdermozilla-central@437bfd403b76 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1422575
milestone59.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1422575 - Give hash tables a minimum capacity based on the parameter passed to init() r=sfink
js/public/HashTable.h
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -221,19 +221,21 @@ class HashMap
     // guarantee that |impl| is the first field in HashMap.
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return impl.sizeOfExcludingThis(mallocSizeOf);
     }
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
     }
 
+#ifdef JS_DEBUG
     Generation generation() const {
         return impl.generation();
     }
+#endif
 
     /************************************************** Shorthand operations */
 
     bool has(const Lookup& l) const {
         return impl.lookup(l).found();
     }
 
     // Overwrite existing value with v. Return false on oom.
@@ -468,19 +470,21 @@ class HashSet
     // guarantee that |impl| is the first field in HashSet.
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return impl.sizeOfExcludingThis(mallocSizeOf);
     }
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
     }
 
+#ifdef JS_DEBUG
     Generation generation() const {
         return impl.generation();
     }
+#endif
 
     /************************************************** Shorthand operations */
 
     bool has(const Lookup& l) const {
         return impl.lookup(l).found();
     }
 
     // Add |u| if it is not present already. Return false on oom.
@@ -1120,17 +1124,19 @@ class HashTable : private AllocPolicy
 
         void rekeyFront(const Key& k) {
             rekeyFront(k, k);
         }
 
         // Potentially rehashes the table.
         ~Enum() {
             if (rekeyed) {
+#ifdef JS_DEBUG
                 table_.gen++;
+#endif
                 table_.checkOverRemoved();
             }
 
             if (removed)
                 table_.compactIfUnderloaded();
         }
     };
 
@@ -1153,23 +1159,24 @@ class HashTable : private AllocPolicy
     // HashTable is not copyable or assignable
     HashTable(const HashTable&) = delete;
     void operator=(const HashTable&) = delete;
 
   private:
     static const size_t CAP_BITS = 30;
 
   public:
-    uint64_t    gen:56;                 // entry storage generation number
-    uint64_t    hashShift:8;            // multiplicative hash shift
     Entry*      table;                  // entry storage
+    uint32_t    hashShift;              // multiplicative hash shift
+    uint32_t    minCapacity;            // minimum table capacity
     uint32_t    entryCount;             // number of entries in table
     uint32_t    removedCount;           // removed entry sentinels in table
 
 #ifdef JS_DEBUG
+    uint64_t     gen;                   // entry storage generation number
     uint64_t     mutationCount;
     mutable bool mEntered;
     // Note that some updates to these stats are not thread-safe. See the
     // comment on the three-argument overloading of HashTable::lookup().
     mutable struct Stats
     {
         uint32_t        searches;       // total number of table searches
         uint32_t        steps;          // hash chain links traversed
@@ -1252,22 +1259,22 @@ class HashTable : private AllocPolicy
         for (Entry* e = oldTable; e < end; ++e)
             e->destroyIfLive();
         alloc.free_(oldTable);
     }
 
   public:
     explicit HashTable(AllocPolicy ap)
       : AllocPolicy(ap)
-      , gen(0)
+      , table(nullptr)
       , hashShift(sHashBits)
-      , table(nullptr)
       , entryCount(0)
       , removedCount(0)
 #ifdef JS_DEBUG
+      , gen(0)
       , mutationCount(0)
       , mEntered(false)
 #endif
     {}
 
     MOZ_MUST_USE bool init(uint32_t length)
     {
         MOZ_ASSERT(!initialized());
@@ -1295,21 +1302,21 @@ class HashTable : private AllocPolicy
 
         // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
         uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
         while (roundUp < newCapacity) {
             roundUp <<= 1;
             ++roundUpLog2;
         }
 
-        newCapacity = roundUp;
-        MOZ_ASSERT(newCapacity >= length);
-        MOZ_ASSERT(newCapacity <= sMaxCapacity);
+        minCapacity = roundUp;
+        MOZ_ASSERT(minCapacity >= length);
+        MOZ_ASSERT(minCapacity <= sMaxCapacity);
 
-        table = createTable(*this, newCapacity);
+        table = createTable(*this, minCapacity);
         if (!table)
             return false;
 
         setTableSizeLog2(roundUpLog2);
         METER(memset(&stats, 0, sizeof(stats)));
         return true;
     }
 
@@ -1354,28 +1361,29 @@ class HashTable : private AllocPolicy
     bool overloaded()
     {
         static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
                       "multiplication below could overflow");
         return entryCount + removedCount >=
                capacity() * sMaxAlphaNumerator / sAlphaDenominator;
     }
 
-    // Would the table be underloaded if it had the given capacity and entryCount?
-    static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
+    // Considering its current entryCount, would the table be underloaded if it
+    // had the given capacity?
+    bool wouldBeUnderloaded(uint32_t capacity)
     {
         static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
                       "multiplication below could overflow");
-        return capacity > sMinCapacity &&
+        return capacity > minCapacity &&
                entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
     }
 
     bool underloaded()
     {
-        return wouldBeUnderloaded(capacity(), entryCount);
+        return wouldBeUnderloaded(capacity());
     }
 
     static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
     {
         return HashPolicy::match(HashPolicy::getKey(e.get()), l);
     }
 
     // Warning: in order for readonlyThreadsafeLookup() to be safe this
@@ -1498,18 +1506,21 @@ class HashTable : private AllocPolicy
 
         Entry* newTable = createTable(*this, newCapacity, reportFailure);
         if (!newTable)
             return RehashFailed;
 
         // We can't fail from here on, so update table parameters.
         setTableSizeLog2(newLog2);
         removedCount = 0;
+        table = newTable;
+
+#ifdef JS_DEBUG
         gen++;
-        table = newTable;
+#endif
 
         // Copy only live entries, leaving removed ones behind.
         Entry* end = oldTable + oldCap;
         for (Entry* src = oldTable; src < end; ++src) {
             if (src->isLive()) {
                 HashNumber hn = src->getKeyHash();
                 findFreeEntry(hn).setLive(
                     hn, mozilla::Move(const_cast<typename Entry::NonConstT&>(src->get())));
@@ -1582,17 +1593,17 @@ class HashTable : private AllocPolicy
 
     // Resize the table down to the largest capacity which doesn't underload the
     // table.  Since we call checkUnderloaded() on every remove, you only need
     // to call this after a bulk removal of items done without calling remove().
     void compactIfUnderloaded()
     {
         int32_t resizeLog2 = 0;
         uint32_t newCapacity = capacity();
-        while (wouldBeUnderloaded(newCapacity, entryCount)) {
+        while (wouldBeUnderloaded(newCapacity)) {
             newCapacity = newCapacity >> 1;
             resizeLog2--;
         }
 
         if (resizeLog2 != 0)
             (void) changeTableSize(resizeLog2, DontReportFailure);
     }
 
@@ -1600,17 +1611,19 @@ class HashTable : private AllocPolicy
     // a second table.  We do this by recycling the collision bits to tell us if
     // the element is already inserted or still waiting to be inserted.  Since
     // already-inserted elements win any conflicts, we get the same table as we
     // would have gotten through random insertion order.
     void rehashTableInPlace()
     {
         METER(stats.rehashes++);
         removedCount = 0;
+#ifdef JS_DEBUG
         gen++;
+#endif
         for (size_t i = 0; i < capacity(); ++i)
             table[i].unsetCollision();
 
         for (size_t i = 0; i < capacity();) {
             Entry* src = &table[i];
 
             if (!src->isLive() || src->hasCollision()) {
                 ++i;
@@ -1696,20 +1709,20 @@ class HashTable : private AllocPolicy
 #ifdef JS_DEBUG
         MOZ_ASSERT(!mEntered);
 #endif
         if (!table)
             return;
 
         destroyTable(*this, table, capacity());
         table = nullptr;
-        gen++;
         entryCount = 0;
         removedCount = 0;
 #ifdef JS_DEBUG
+        gen++;
         mutationCount++;
 #endif
     }
 
     Range all() const
     {
         MOZ_ASSERT(table);
         return Range(*this, table, table + capacity());
@@ -1728,21 +1741,23 @@ class HashTable : private AllocPolicy
     }
 
     uint32_t capacity() const
     {
         MOZ_ASSERT(table);
         return JS_BIT(sHashBits - hashShift);
     }
 
+#ifdef JS_DEBUG
     Generation generation() const
     {
         MOZ_ASSERT(table);
         return Generation(gen);
     }
+#endif
 
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
     {
         return mallocSizeOf(table);
     }
 
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
     {