Backed out changesets 115d70e6c818 and 1938afc34193 for memory and performance regressions (bug 1422575)
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 12 Dec 2017 16:29:19 -0600
changeset 396124 12a50b0af80f3c82a63dc408736ea17192d63f3d
parent 396123 d1fd7e71f31009e2f0ec6baed04b3d7862993914
child 396125 e66e48637c6578350a43e157988b2a9787a3ab4a
push id98248
push userjcoppeard@mozilla.com
push dateTue, 12 Dec 2017 22:29:49 +0000
treeherdermozilla-inbound@12a50b0af80f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1422575
milestone59.0a1
backs out115d70e6c818c34624e7b9a0a2a1579ad92f1b40
1938afc341937d701acfb196279193f4cc640696
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changesets 115d70e6c818 and 1938afc34193 for memory and performance regressions (bug 1422575)
js/public/HashTable.h
js/src/gc/StoreBuffer.h
--- a/js/public/HashTable.h
+++ b/js/public/HashTable.h
@@ -221,21 +221,19 @@ class HashMap
     // guarantee that |impl| is the first field in HashMap.
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return impl.sizeOfExcludingThis(mallocSizeOf);
     }
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
     }
 
-#ifdef JS_DEBUG
     Generation generation() const {
         return impl.generation();
     }
-#endif
 
     /************************************************** Shorthand operations */
 
     bool has(const Lookup& l) const {
         return impl.lookup(l).found();
     }
 
     // Overwrite existing value with v. Return false on oom.
@@ -470,21 +468,19 @@ class HashSet
     // guarantee that |impl| is the first field in HashSet.
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return impl.sizeOfExcludingThis(mallocSizeOf);
     }
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
     }
 
-#ifdef JS_DEBUG
     Generation generation() const {
         return impl.generation();
     }
-#endif
 
     /************************************************** Shorthand operations */
 
     bool has(const Lookup& l) const {
         return impl.lookup(l).found();
     }
 
     // Add |u| if it is not present already. Return false on oom.
@@ -1124,19 +1120,17 @@ class HashTable : private AllocPolicy
 
         void rekeyFront(const Key& k) {
             rekeyFront(k, k);
         }
 
         // Potentially rehashes the table.
         ~Enum() {
             if (rekeyed) {
-#ifdef JS_DEBUG
                 table_.gen++;
-#endif
                 table_.checkOverRemoved();
             }
 
             if (removed)
                 table_.compactIfUnderloaded();
         }
     };
 
@@ -1159,24 +1153,23 @@ class HashTable : private AllocPolicy
     // HashTable is not copyable or assignable
     HashTable(const HashTable&) = delete;
     void operator=(const HashTable&) = delete;
 
   private:
     static const size_t CAP_BITS = 30;
 
   public:
+    uint64_t    gen:56;                 // entry storage generation number
+    uint64_t    hashShift:8;            // multiplicative hash shift
     Entry*      table;                  // entry storage
-    uint32_t    hashShift;              // multiplicative hash shift
-    uint32_t    minCapacity;            // minimum table capacity
     uint32_t    entryCount;             // number of entries in table
     uint32_t    removedCount;           // removed entry sentinels in table
 
 #ifdef JS_DEBUG
-    uint64_t     gen;                   // entry storage generation number
     uint64_t     mutationCount;
     mutable bool mEntered;
     // Note that some updates to these stats are not thread-safe. See the
     // comment on the three-argument overloading of HashTable::lookup().
     mutable struct Stats
     {
         uint32_t        searches;       // total number of table searches
         uint32_t        steps;          // hash chain links traversed
@@ -1259,22 +1252,22 @@ class HashTable : private AllocPolicy
         for (Entry* e = oldTable; e < end; ++e)
             e->destroyIfLive();
         alloc.free_(oldTable);
     }
 
   public:
     explicit HashTable(AllocPolicy ap)
       : AllocPolicy(ap)
+      , gen(0)
+      , hashShift(sHashBits)
       , table(nullptr)
-      , hashShift(sHashBits)
       , entryCount(0)
       , removedCount(0)
 #ifdef JS_DEBUG
-      , gen(0)
       , mutationCount(0)
       , mEntered(false)
 #endif
     {}
 
     MOZ_MUST_USE bool init(uint32_t length)
     {
         MOZ_ASSERT(!initialized());
@@ -1302,21 +1295,21 @@ class HashTable : private AllocPolicy
 
         // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
         uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
         while (roundUp < newCapacity) {
             roundUp <<= 1;
             ++roundUpLog2;
         }
 
-        minCapacity = roundUp;
-        MOZ_ASSERT(minCapacity >= length);
-        MOZ_ASSERT(minCapacity <= sMaxCapacity);
+        newCapacity = roundUp;
+        MOZ_ASSERT(newCapacity >= length);
+        MOZ_ASSERT(newCapacity <= sMaxCapacity);
 
-        table = createTable(*this, minCapacity);
+        table = createTable(*this, newCapacity);
         if (!table)
             return false;
 
         setTableSizeLog2(roundUpLog2);
         METER(memset(&stats, 0, sizeof(stats)));
         return true;
     }
 
@@ -1361,29 +1354,28 @@ class HashTable : private AllocPolicy
     bool overloaded()
     {
         static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
                       "multiplication below could overflow");
         return entryCount + removedCount >=
                capacity() * sMaxAlphaNumerator / sAlphaDenominator;
     }
 
-    // Considering its current entryCount, would the table be underloaded if it
-    // had the given capacity?
-    bool wouldBeUnderloaded(uint32_t capacity)
+    // Would the table be underloaded if it had the given capacity and entryCount?
+    static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
     {
         static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
                       "multiplication below could overflow");
-        return capacity > minCapacity &&
+        return capacity > sMinCapacity &&
                entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
     }
 
     bool underloaded()
     {
-        return wouldBeUnderloaded(capacity());
+        return wouldBeUnderloaded(capacity(), entryCount);
     }
 
     static MOZ_ALWAYS_INLINE bool match(Entry& e, const Lookup& l)
     {
         return HashPolicy::match(HashPolicy::getKey(e.get()), l);
     }
 
     // Warning: in order for readonlyThreadsafeLookup() to be safe this
@@ -1506,22 +1498,19 @@ class HashTable : private AllocPolicy
 
         Entry* newTable = createTable(*this, newCapacity, reportFailure);
         if (!newTable)
             return RehashFailed;
 
         // We can't fail from here on, so update table parameters.
         setTableSizeLog2(newLog2);
         removedCount = 0;
+        gen++;
         table = newTable;
 
-#ifdef JS_DEBUG
-        gen++;
-#endif
-
         // Copy only live entries, leaving removed ones behind.
         Entry* end = oldTable + oldCap;
         for (Entry* src = oldTable; src < end; ++src) {
             if (src->isLive()) {
                 HashNumber hn = src->getKeyHash();
                 findFreeEntry(hn).setLive(
                     hn, mozilla::Move(const_cast<typename Entry::NonConstT&>(src->get())));
                 src->destroy();
@@ -1593,17 +1582,17 @@ class HashTable : private AllocPolicy
 
     // Resize the table down to the largest capacity which doesn't underload the
     // table.  Since we call checkUnderloaded() on every remove, you only need
     // to call this after a bulk removal of items done without calling remove().
     void compactIfUnderloaded()
     {
         int32_t resizeLog2 = 0;
         uint32_t newCapacity = capacity();
-        while (wouldBeUnderloaded(newCapacity)) {
+        while (wouldBeUnderloaded(newCapacity, entryCount)) {
             newCapacity = newCapacity >> 1;
             resizeLog2--;
         }
 
         if (resizeLog2 != 0)
             (void) changeTableSize(resizeLog2, DontReportFailure);
     }
 
@@ -1611,19 +1600,17 @@ class HashTable : private AllocPolicy
     // a second table.  We do this by recycling the collision bits to tell us if
     // the element is already inserted or still waiting to be inserted.  Since
     // already-inserted elements win any conflicts, we get the same table as we
     // would have gotten through random insertion order.
     void rehashTableInPlace()
     {
         METER(stats.rehashes++);
         removedCount = 0;
-#ifdef JS_DEBUG
         gen++;
-#endif
         for (size_t i = 0; i < capacity(); ++i)
             table[i].unsetCollision();
 
         for (size_t i = 0; i < capacity();) {
             Entry* src = &table[i];
 
             if (!src->isLive() || src->hasCollision()) {
                 ++i;
@@ -1709,20 +1696,20 @@ class HashTable : private AllocPolicy
 #ifdef JS_DEBUG
         MOZ_ASSERT(!mEntered);
 #endif
         if (!table)
             return;
 
         destroyTable(*this, table, capacity());
         table = nullptr;
+        gen++;
         entryCount = 0;
         removedCount = 0;
 #ifdef JS_DEBUG
-        gen++;
         mutationCount++;
 #endif
     }
 
     Range all() const
     {
         MOZ_ASSERT(table);
         return Range(*this, table, table + capacity());
@@ -1741,23 +1728,21 @@ class HashTable : private AllocPolicy
     }
 
     uint32_t capacity() const
     {
         MOZ_ASSERT(table);
         return JS_BIT(sHashBits - hashShift);
     }
 
-#ifdef JS_DEBUG
     Generation generation() const
     {
         MOZ_ASSERT(table);
         return Generation(gen);
     }
-#endif
 
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
     {
         return mallocSizeOf(table);
     }
 
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
     {
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -72,40 +72,33 @@ class StoreBuffer
         StoreSet stores_;
 
         /*
          * A one element cache in front of the canonical set to speed up
          * temporary instances of HeapPtr.
          */
         T last_;
 
-        /*
-         * Maximum number of entries before we request a minor GC.
-         *
-         * This is passed to HashSet::init(), which will pre-allocate a hash
-         * table capable of holding this many entries before rehashing, which is
-         * currently 4/3 this value.
-         */
-        const static size_t MaxEntries = 8192;
+        /* Maximum number of entries before we request a minor GC. */
+        const static size_t MaxEntries = 48 * 1024 / sizeof(T);
 
         explicit MonoTypeBuffer() : last_(T()) {}
         ~MonoTypeBuffer() { stores_.finish(); }
 
         MOZ_MUST_USE bool init() {
-            if (!stores_.initialized() && !stores_.init(MaxEntries))
+            if (!stores_.initialized() && !stores_.init())
                 return false;
             clear();
             return true;
         }
 
         void clear() {
             last_ = T();
             if (stores_.initialized())
                 stores_.clear();
-            MOZ_ASSERT(stores_.capacity() > MaxEntries);
         }
 
         /* Add one item to the buffer. */
         void put(StoreBuffer* owner, const T& t) {
             MOZ_ASSERT(stores_.initialized());
             sinkStore(owner);
             last_ = t;
         }