Bug 1489572 - Split LifoAlloc chunks in 2 lists. r=tcampbell
☠☠ backed out by 58bcea5ede75 ☠ ☠
authorNicolas B. Pierron <nicolas.b.pierron@nbp.name>
Wed, 17 Oct 2018 14:40:09 +0200
changeset 490859 683e3e0eaee15b22fc997ed6cfd61630ec9f73b4
parent 490858 f5973de3f6e8d55dfd22fe60df34d8f96c3c13e0
child 490860 cbfcb4b2b5c93b1d7330c6869fa197be79ea37f4
push id247
push userfmarier@mozilla.com
push dateSat, 27 Oct 2018 01:06:44 +0000
reviewerstcampbell
bugs1489572
milestone65.0a1
Bug 1489572 - Split LifoAlloc chunks in 2 lists. r=tcampbell LifoAlloc bump allocations are now stored in 2 different lists instead of 1. The first list continas small allocations, allocated in small chunks, while the second list holds large allocation, each stored in its own chunk. Splitting this list in 2 should prevent BumpChunks from being composed mostly of wasted space when a large allocation happens.
js/src/ds/LifoAlloc.cpp
js/src/ds/LifoAlloc.h
js/src/gc/StoreBuffer.h
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -22,17 +22,16 @@ using mozilla::tl::BitSize;
 
 namespace js {
 namespace detail {
 
 /* static */
 UniquePtr<BumpChunk>
 BumpChunk::newWithCapacity(size_t size)
 {
-    MOZ_DIAGNOSTIC_ASSERT(RoundUpPow2(size) == size);
     MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
     void* mem = js_malloc(size);
     if (!mem) {
         return nullptr;
     }
 
     UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
 
@@ -109,187 +108,257 @@ BumpChunk::setReadWrite()
 void
 LifoAlloc::reset(size_t defaultChunkSize)
 {
     MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
 
     while (!chunks_.empty()) {
         chunks_.popFirst();
     }
+    while (!oversize_.empty()) {
+        chunks_.popFirst();
+    }
     while (!unused_.empty()) {
         unused_.popFirst();
     }
     defaultChunkSize_ = defaultChunkSize;
+    oversizeThreshold_ = defaultChunkSize;
     markCount = 0;
     curSize_ = 0;
 }
 
 void
 LifoAlloc::freeAll()
 {
     while (!chunks_.empty()) {
         UniqueBumpChunk bc = chunks_.popFirst();
         decrementCurSize(bc->computedSizeOfIncludingThis());
     }
+    while (!oversize_.empty()) {
+        UniqueBumpChunk bc = oversize_.popFirst();
+        decrementCurSize(bc->computedSizeOfIncludingThis());
+    }
     while (!unused_.empty()) {
         UniqueBumpChunk bc = unused_.popFirst();
         decrementCurSize(bc->computedSizeOfIncludingThis());
     }
 
     // Nb: maintaining curSize_ correctly isn't easy.  Fortunately, this is an
     // excellent sanity check.
     MOZ_ASSERT(curSize_ == 0);
 }
 
+static size_t
+MallocGoodSize(size_t aSize)
+{
+# if defined(MOZ_MEMORY)
+    return malloc_good_size(aSize);
+# else
+    return aSize;
+# endif
+}
+
 LifoAlloc::UniqueBumpChunk
-LifoAlloc::newChunkWithCapacity(size_t n)
+LifoAlloc::newChunkWithCapacity(size_t n, bool oversize)
 {
     MOZ_ASSERT(fallibleScope_, "[OOM] Cannot allocate a new chunk in an infallible scope.");
 
     // Compute the size which should be requested in order to be able to fit |n|
     // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
 
     size_t minSize;
     if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
                      (minSize & (size_t(1) << (BitSize<size_t>::value - 1)))))
     {
         return nullptr;
     }
 
-    const size_t chunkSize = minSize > defaultChunkSize_
-                             ?  RoundUpPow2(minSize)
+    MOZ_ASSERT(curSize_ >= oversizeSize_);
+    const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
+                             ? MallocGoodSize(minSize)
                              : defaultChunkSize_;
 
     // Create a new BumpChunk, and allocate space for it.
     UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
     if (!result) {
         return nullptr;
     }
     MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
     return result;
 }
 
-bool
+LifoAlloc::UniqueBumpChunk
 LifoAlloc::getOrCreateChunk(size_t n)
 {
     // Look for existing unused BumpChunks to satisfy the request, and pick the
     // first one which is large enough, and move it into the list of used
     // chunks.
     if (!unused_.empty()) {
         if (unused_.begin()->canAlloc(n)) {
-            chunks_.append(unused_.popFirst());
-            return true;
+            return unused_.popFirst();
         }
 
         BumpChunkList::Iterator e(unused_.end());
         for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get(); ++i) {
             detail::BumpChunk* elem = i->next();
             MOZ_ASSERT(elem->empty());
             if (elem->canAlloc(n)) {
                 BumpChunkList temp = unused_.splitAfter(i.get());
-                chunks_.append(temp.popFirst());
+                UniqueBumpChunk newChunk = temp.popFirst();
                 unused_.appendAll(std::move(temp));
-                return true;
+                return newChunk;
             }
         }
     }
 
     // Allocate a new BumpChunk with enough space for the next allocation.
-    UniqueBumpChunk newChunk = newChunkWithCapacity(n);
+    UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
     if (!newChunk) {
-        return false;
+        return newChunk;
     }
     size_t size = newChunk->computedSizeOfIncludingThis();
-    chunks_.append(std::move(newChunk));
     incrementCurSize(size);
-    return true;
+    return newChunk;
 }
 
 void*
 LifoAlloc::allocImplColdPath(size_t n)
 {
     void* result;
-    if (!getOrCreateChunk(n)) {
+    UniqueBumpChunk newChunk = getOrCreateChunk(n);
+    if (!newChunk) {
         return nullptr;
     }
 
     // Since we just created a large enough chunk, this can't fail.
+    chunks_.append(std::move(newChunk));
     result = chunks_.last()->tryAlloc(n);
     MOZ_ASSERT(result);
     return result;
 }
 
+void*
+LifoAlloc::allocImplOversize(size_t n)
+{
+    void* result;
+    UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
+    if (!newChunk) {
+        return nullptr;
+    }
+    incrementCurSize(newChunk->computedSizeOfIncludingThis());
+
+    // Since we just created a large enough chunk, this can't fail.
+    oversize_.append(std::move(newChunk));
+    result = oversize_.last()->tryAlloc(n);
+    MOZ_ASSERT(result);
+    return result;
+}
+
 bool
 LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total)
 {
     for (detail::BumpChunk& bc : unused_) {
         total += bc.unused();
         if (total >= n) {
             return true;
         }
     }
 
-    UniqueBumpChunk newChunk = newChunkWithCapacity(n);
+    UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
     if (!newChunk) {
         return false;
     }
     size_t size = newChunk->computedSizeOfIncludingThis();
     unused_.pushFront(std::move(newChunk));
     incrementCurSize(size);
     return true;
 }
 
 LifoAlloc::Mark
 LifoAlloc::mark()
 {
     markCount++;
-    if (chunks_.empty()) {
-        return Mark();
+    Mark res;
+    if (!chunks_.empty()) {
+        res.chunk = chunks_.last()->mark();
     }
-    return chunks_.last()->mark();
+    if (!oversize_.empty()) {
+        res.oversize = oversize_.last()->mark();
+    }
+    return res;
 }
 
 void
 LifoAlloc::release(Mark mark)
 {
     markCount--;
+#ifdef DEBUG
+    auto assertIsContained = [](const detail::BumpChunk::Mark& m, BumpChunkList& list) {
+        if (m.markedChunk()) {
+            bool contained = false;
+            for (const detail::BumpChunk& chunk : list) {
+                if (&chunk == m.markedChunk() && chunk.contains(m)) {
+                    contained = true;
+                    break;
+                }
+            }
+            MOZ_ASSERT(contained);
+        }
+    };
+    assertIsContained(mark.chunk, chunks_);
+    assertIsContained(mark.oversize, oversize_);
+#endif
 
-    // Move the blocks which are after the mark to the set of unused chunks.
     BumpChunkList released;
-    if (!mark.markedChunk()) {
-        released = std::move(chunks_);
-    } else {
-        released = chunks_.splitAfter(mark.markedChunk());
-    }
+    auto cutAtMark = [&released](const detail::BumpChunk::Mark& m, BumpChunkList& list) {
+        // Move the blocks which are after the mark to the set released chunks.
+        if (!m.markedChunk()) {
+            released = std::move(list);
+        } else {
+            released = list.splitAfter(m.markedChunk());
+        }
 
-    // Release the content of all the blocks which are after the marks.
+        // Release everything which follows the mark in the last chunk.
+        if (!list.empty()) {
+            list.last()->release(m);
+        }
+    };
+
+    // Release the content of all the blocks which are after the marks, and keep
+    // blocks as unused.
+    cutAtMark(mark.chunk, chunks_);
     for (detail::BumpChunk& bc : released) {
         bc.release();
     }
     unused_.appendAll(std::move(released));
 
-    // Release everything which follows the mark in the last chunk.
-    if (!chunks_.empty()) {
-        chunks_.last()->release(mark);
+    // Free the content of all the blocks which are after the marks.
+    cutAtMark(mark.oversize, oversize_);
+    while (!released.empty()) {
+        UniqueBumpChunk bc = released.popFirst();
+        decrementCurSize(bc->computedSizeOfIncludingThis());
     }
 }
 
 void
 LifoAlloc::steal(LifoAlloc* other)
 {
     MOZ_ASSERT(!other->markCount);
     MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
     MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
+    MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
 
     // Copy everything from |other| to |this| except for |peakSize_|, which
     // requires some care.
     chunks_ = std::move(other->chunks_);
+    oversize_ = std::move(other->oversize_);
     unused_ = std::move(other->unused_);
     markCount = other->markCount;
     defaultChunkSize_ = other->defaultChunkSize_;
+    oversizeThreshold_ = other->oversizeThreshold_;
     curSize_ = other->curSize_;
     peakSize_ = Max(peakSize_, other->peakSize_);
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
     fallibleScope_ = other->fallibleScope_;
 #endif
 
     other->reset(defaultChunkSize_);
 }
@@ -298,16 +367,17 @@ void
 LifoAlloc::transferFrom(LifoAlloc* other)
 {
     MOZ_ASSERT(!markCount);
     MOZ_ASSERT(!other->markCount);
 
     incrementCurSize(other->curSize_);
     appendUnused(std::move(other->unused_));
     appendUsed(std::move(other->chunks_));
+    oversize_.appendAll(std::move(other->oversize_));
     other->curSize_ = 0;
 }
 
 void
 LifoAlloc::transferUnusedFrom(LifoAlloc* other)
 {
     MOZ_ASSERT(!markCount);
 
@@ -323,24 +393,30 @@ LifoAlloc::transferUnusedFrom(LifoAlloc*
 
 #ifdef LIFO_CHUNK_PROTECT
 void
 LifoAlloc::setReadOnly()
 {
     for (detail::BumpChunk& bc : chunks_) {
         bc.setReadOnly();
     }
+    for (detail::BumpChunk& bc : oversize_) {
+        bc.setReadOnly();
+    }
     for (detail::BumpChunk& bc : unused_) {
         bc.setReadOnly();
     }
 }
 
 void
 LifoAlloc::setReadWrite()
 {
     for (detail::BumpChunk& bc : chunks_) {
         bc.setReadWrite();
     }
+    for (detail::BumpChunk& bc : oversize_) {
+        bc.setReadWrite();
+    }
     for (detail::BumpChunk& bc : unused_) {
         bc.setReadWrite();
     }
 }
 #endif
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -374,17 +374,17 @@ class BumpChunk : public SingleLinkedLis
     class Mark
     {
         // Chunk which owns the pointer.
         BumpChunk* chunk_;
         // Recorded of the bump_ pointer of the BumpChunk.
         uint8_t* bump_;
 
         friend class BumpChunk;
-        explicit Mark(BumpChunk* chunk, uint8_t* bump)
+        Mark(BumpChunk* chunk, uint8_t* bump)
           : chunk_(chunk), bump_(bump)
         {}
 
       public:
         Mark() : chunk_(nullptr), bump_(nullptr) {}
 
         BumpChunk* markedChunk() const { return chunk_; }
     };
@@ -522,42 +522,51 @@ BumpChunk::begin()
 //
 // Note: We leave BumpChunks latent in the set of unused chunks after they've
 // been released to avoid thrashing before a GC.
 class LifoAlloc
 {
     using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
     using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
 
-    // List of chunks containing allocated data. In the common case, the last
-    // chunk of this list is always used to perform the allocations. When the
-    // allocation cannot be performed, we move a Chunk from the unused set to
-    // the list of used chunks.
+    // List of chunks containing allocated data of size smaller than the default
+    // chunk size. In the common case, the last chunk of this list is always
+    // used to perform the allocations. When the allocation cannot be performed,
+    // we move a Chunk from the unused set to the list of used chunks.
     BumpChunkList chunks_;
 
+    // List of chunks containing allocated data where each allocation is larger
+    // than the oversize threshold. Each chunk contains exactly on allocation.
+    // This reduces wasted space in the normal chunk list.
+    //
+    // Oversize chunks are allocated on demand and freed as soon as they are
+    // released, instead of being pushed to the unused list.
+    BumpChunkList oversize_;
+
     // Set of unused chunks, which can be reused for future allocations.
     BumpChunkList unused_;
 
     size_t      markCount;
     size_t      defaultChunkSize_;
+    size_t      oversizeThreshold_;
     size_t      curSize_;
     size_t      peakSize_;
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
     bool        fallibleScope_;
 #endif
 
     void operator=(const LifoAlloc&) = delete;
     LifoAlloc(const LifoAlloc&) = delete;
 
     // Return a BumpChunk that can perform an allocation of at least size |n|.
-    UniqueBumpChunk newChunkWithCapacity(size_t n);
+    UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
 
     // Reuse or allocate a BumpChunk that can perform an allocation of at least
     // size |n|, if successful it is placed at the end the list of |chunks_|.
-    MOZ_MUST_USE bool getOrCreateChunk(size_t n);
+    UniqueBumpChunk getOrCreateChunk(size_t n);
 
     void reset(size_t defaultChunkSize);
 
     // Append unused chunks to the end of this LifoAlloc.
     void appendUnused(BumpChunkList&& otherUnused) {
 #ifdef DEBUG
         for (detail::BumpChunk& bc: otherUnused) {
             MOZ_ASSERT(bc.empty());
@@ -580,21 +589,27 @@ class LifoAlloc
         }
     }
     void decrementCurSize(size_t size) {
         MOZ_ASSERT(curSize_ >= size);
         curSize_ -= size;
     }
 
     void* allocImplColdPath(size_t n);
+    void* allocImplOversize(size_t n);
 
     MOZ_ALWAYS_INLINE
     void* allocImpl(size_t n) {
         void* result;
-        if (!chunks_.empty() && (result = chunks_.last()->tryAlloc(n))) {
+        // Give oversized allocations their own chunk instead of wasting space
+        // due to fragmentation at the end of normal chunk.
+        if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
+            return allocImplOversize(n);
+        }
+        if (MOZ_LIKELY(!chunks_.empty() && (result = chunks_.last()->tryAlloc(n)))) {
             return result;
         }
         return allocImplColdPath(n);
     }
 
     // Check for space in unused chunks or allocate a new unused chunk.
     MOZ_MUST_USE bool ensureUnusedApproximateColdPath(size_t n, size_t total);
 
@@ -603,16 +618,26 @@ class LifoAlloc
       : peakSize_(0)
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
       , fallibleScope_(true)
 #endif
     {
         reset(defaultChunkSize);
     }
 
+    // Set the threshold to allocate data in its own chunk outside the space for
+    // small allocations.
+    void disableOversize() {
+        oversizeThreshold_ = SIZE_MAX;
+    }
+    void setOversizeThreshold(size_t oversizeThreshold) {
+        MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
+        oversizeThreshold_ = oversizeThreshold;
+    }
+
     // Steal allocated chunks from |other|.
     void steal(LifoAlloc* other);
 
     // Append all chunks from |other|. They are removed from |other|.
     void transferFrom(LifoAlloc* other);
 
     // Append unused chunks from |other|. They are removed from |other|.
     void transferUnusedFrom(LifoAlloc* other);
@@ -729,26 +754,36 @@ class LifoAlloc
     T* newArrayUninitialized(size_t count) {
         size_t bytes;
         if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
             return nullptr;
         }
         return static_cast<T*>(alloc(bytes));
     }
 
-    using Mark = detail::BumpChunk::Mark;
+    class Mark {
+        friend class LifoAlloc;
+        detail::BumpChunk::Mark chunk;
+        detail::BumpChunk::Mark oversize;
+    };
     Mark mark();
     void release(Mark mark);
 
     void releaseAll() {
         MOZ_ASSERT(!markCount);
         for (detail::BumpChunk& bc : chunks_) {
             bc.release();
         }
         unused_.appendAll(std::move(chunks_));
+        // On release, we free any oversize allocations instead of keeping them
+        // in unused chunks.
+        while (!oversize_.empty()) {
+            UniqueBumpChunk bc = oversize_.popFirst();
+            decrementCurSize(bc->computedSizeOfIncludingThis());
+        }
     }
 
     // Protect the content of the LifoAlloc chunks.
 #ifdef LIFO_CHUNK_PROTECT
     void setReadOnly();
     void setReadWrite();
 #else
     void setReadOnly() const {}
@@ -761,18 +796,20 @@ class LifoAlloc
         for (const detail::BumpChunk& chunk : chunks_) {
             accum += chunk.used();
         }
         return accum;
     }
 
     // Return true if the LifoAlloc does not currently contain any allocations.
     bool isEmpty() const {
-        return chunks_.empty() ||
-               (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
+        bool empty = chunks_.empty() ||
+            (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
+        MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
+        return empty && oversize_.empty();
     }
 
     // Return the number of bytes remaining to allocate in the current chunk.
     // e.g. How many bytes we can allocate before needing a new block.
     size_t availableInCurrentChunk() const {
         if (chunks_.empty()) {
             return 0;
         }
@@ -780,28 +817,34 @@ class LifoAlloc
     }
 
     // Get the total size of the arena chunks (including unused space).
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         size_t n = 0;
         for (const detail::BumpChunk& chunk : chunks_) {
             n += chunk.sizeOfIncludingThis(mallocSizeOf);
         }
+        for (const detail::BumpChunk& chunk : oversize_) {
+            n += chunk.sizeOfIncludingThis(mallocSizeOf);
+        }
         for (const detail::BumpChunk& chunk : unused_) {
             n += chunk.sizeOfIncludingThis(mallocSizeOf);
         }
         return n;
     }
 
     // Get the total size of the arena chunks (including unused space).
     size_t computedSizeOfExcludingThis() const {
         size_t n = 0;
         for (const detail::BumpChunk& chunk : chunks_) {
             n += chunk.computedSizeOfIncludingThis();
         }
+        for (const detail::BumpChunk& chunk : oversize_) {
+            n += chunk.computedSizeOfIncludingThis();
+        }
         for (const detail::BumpChunk& chunk : unused_) {
             n += chunk.computedSizeOfIncludingThis();
         }
         return n;
     }
 
     // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
@@ -824,16 +867,21 @@ class LifoAlloc
 
 #ifdef DEBUG
     bool contains(void* ptr) const {
         for (const detail::BumpChunk& chunk : chunks_) {
             if (chunk.contains(ptr)) {
                 return true;
             }
         }
+        for (const detail::BumpChunk& chunk : oversize_) {
+            if (chunk.contains(ptr)) {
+                return true;
+            }
+        }
         return false;
     }
 #endif
 
     // Iterate over the data allocated in a LifoAlloc, and interpret it.
     class Enum
     {
         friend class LifoAlloc;
@@ -864,16 +912,17 @@ class LifoAlloc
         }
 
       public:
         explicit Enum(LifoAlloc& alloc)
           : chunkIt_(alloc.chunks_.begin()),
             chunkEnd_(alloc.chunks_.end()),
             head_(nullptr)
         {
+            MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
             if (chunkIt_ != chunkEnd_) {
                 head_ = chunkIt_->begin();
             }
         }
 
         // Return true if there are no more bytes to enumerate.
         bool empty() {
             return chunkIt_ == chunkEnd_ ||
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -148,16 +148,22 @@ class StoreBuffer
 
         WholeCellBuffer() : storage_(nullptr), head_(nullptr) {}
         ~WholeCellBuffer() { js_delete(storage_); }
 
         MOZ_MUST_USE bool init() {
             MOZ_ASSERT(!head_);
             if (!storage_) {
                 storage_ = js_new<LifoAlloc>(LifoAllocBlockSize);
+                // This prevents LifoAlloc::Enum from crashing with a release
+                // assertion if we ever allocate one entry larger than
+                // LifoAllocBlockSize.
+                if (storage_) {
+                    storage_->disableOversize();
+                }
             }
             clear();
             return bool(storage_);
         }
 
         void clear();
 
         bool isAboutToOverflow() const {