Backed out 4 changesets (bug 1489572) for valgrind failures at js::jit::LIRGenerator::visitBlock.
authorCosmin Sabou <csabou@mozilla.com>
Tue, 23 Oct 2018 21:48:50 +0300
changeset 442658 58bcea5ede75c573fc8b8b20380b17d19b6f50ca
parent 442657 d7bb4168f88486e1058b5546ce0512c600f9893e
child 442660 c9f6d6242851d25e230bc6a81fb45bfc8f93e6c1
child 442690 ef78b85760ee45897b6add17b0860cd096168d3b
push id71590
push useraiakab@mozilla.com
push dateTue, 23 Oct 2018 22:22:43 +0000
treeherderautoland@d20fd7486fde [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1489572
milestone65.0a1
backs outf3aa68c506e01055a7d7d20eb4fb672c511b9caf
cbfcb4b2b5c93b1d7330c6869fa197be79ea37f4
683e3e0eaee15b22fc997ed6cfd61630ec9f73b4
f5973de3f6e8d55dfd22fe60df34d8f96c3c13e0
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 4 changesets (bug 1489572) for valgrind failures at js::jit::LIRGenerator::visitBlock. Backed out changeset f3aa68c506e0 (bug 1489572) Backed out changeset cbfcb4b2b5c9 (bug 1489572) Backed out changeset 683e3e0eaee1 (bug 1489572) Backed out changeset f5973de3f6e8 (bug 1489572)
js/src/ds/LifoAlloc.cpp
js/src/ds/LifoAlloc.h
js/src/frontend/Parser.cpp
js/src/gc/StoreBuffer.h
--- a/js/src/ds/LifoAlloc.cpp
+++ b/js/src/ds/LifoAlloc.cpp
@@ -22,16 +22,17 @@ using mozilla::tl::BitSize;
 
 namespace js {
 namespace detail {
 
 /* static */
 UniquePtr<BumpChunk>
 BumpChunk::newWithCapacity(size_t size)
 {
+    MOZ_DIAGNOSTIC_ASSERT(RoundUpPow2(size) == size);
     MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
     void* mem = js_malloc(size);
     if (!mem) {
         return nullptr;
     }
 
     UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
 
@@ -108,342 +109,123 @@ BumpChunk::setReadWrite()
 void
 LifoAlloc::reset(size_t defaultChunkSize)
 {
     MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
 
     while (!chunks_.empty()) {
         chunks_.popFirst();
     }
-    while (!oversize_.empty()) {
-        chunks_.popFirst();
-    }
     while (!unused_.empty()) {
         unused_.popFirst();
     }
     defaultChunkSize_ = defaultChunkSize;
-    oversizeThreshold_ = defaultChunkSize;
     markCount = 0;
     curSize_ = 0;
-    oversizeSize_ = 0;
 }
 
 void
 LifoAlloc::freeAll()
 {
     while (!chunks_.empty()) {
         UniqueBumpChunk bc = chunks_.popFirst();
         decrementCurSize(bc->computedSizeOfIncludingThis());
     }
-    while (!oversize_.empty()) {
-        UniqueBumpChunk bc = oversize_.popFirst();
-        decrementCurSize(bc->computedSizeOfIncludingThis());
-        oversizeSize_ -= bc->computedSizeOfIncludingThis();
-    }
     while (!unused_.empty()) {
         UniqueBumpChunk bc = unused_.popFirst();
         decrementCurSize(bc->computedSizeOfIncludingThis());
     }
 
     // Nb: maintaining curSize_ correctly isn't easy.  Fortunately, this is an
     // excellent sanity check.
     MOZ_ASSERT(curSize_ == 0);
-    MOZ_ASSERT(oversizeSize_ == 0);
-}
-
-// Round at the same page granularity used by malloc.
-static size_t
-MallocGoodSize(size_t aSize)
-{
-# if defined(MOZ_MEMORY)
-    return malloc_good_size(aSize);
-# else
-    return aSize;
-# endif
-}
-
-// Heuristic to choose the size of the next BumpChunk for small allocations.
-// `start` is the size of the first chunk. `used` is the total size of all
-// BumpChunks in this LifoAlloc so far.
-static size_t
-NextSize(size_t start, size_t used)
-{
-    // Double the size, up to 1 MB.
-    const size_t mb = 1 * 1024 * 1024;
-    if (used < mb) {
-        return Max(start, used);
-    }
-
-    // After 1 MB, grow more gradually, to waste less memory.
-    // The sequence (in megabytes) begins:
-    // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
-    return JS_ROUNDUP(used / 8, mb);
 }
 
 LifoAlloc::UniqueBumpChunk
-LifoAlloc::newChunkWithCapacity(size_t n, bool oversize)
+LifoAlloc::newChunkWithCapacity(size_t n)
 {
     MOZ_ASSERT(fallibleScope_, "[OOM] Cannot allocate a new chunk in an infallible scope.");
 
     // Compute the size which should be requested in order to be able to fit |n|
     // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
 
     size_t minSize;
     if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
                      (minSize & (size_t(1) << (BitSize<size_t>::value - 1)))))
     {
         return nullptr;
     }
 
-    MOZ_ASSERT(curSize_ >= oversizeSize_);
-    const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
-                             ? MallocGoodSize(minSize)
-                             : NextSize(defaultChunkSize_, curSize_ - oversizeSize_);
+    const size_t chunkSize = minSize > defaultChunkSize_
+                             ?  RoundUpPow2(minSize)
+                             : defaultChunkSize_;
 
     // Create a new BumpChunk, and allocate space for it.
     UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
     if (!result) {
         return nullptr;
     }
     MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
     return result;
 }
 
-LifoAlloc::UniqueBumpChunk
+bool
 LifoAlloc::getOrCreateChunk(size_t n)
 {
     // Look for existing unused BumpChunks to satisfy the request, and pick the
     // first one which is large enough, and move it into the list of used
     // chunks.
     if (!unused_.empty()) {
         if (unused_.begin()->canAlloc(n)) {
-            return unused_.popFirst();
+            chunks_.append(unused_.popFirst());
+            return true;
         }
 
         BumpChunkList::Iterator e(unused_.end());
         for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get(); ++i) {
             detail::BumpChunk* elem = i->next();
             MOZ_ASSERT(elem->empty());
             if (elem->canAlloc(n)) {
                 BumpChunkList temp = unused_.splitAfter(i.get());
-                UniqueBumpChunk newChunk = temp.popFirst();
+                chunks_.append(temp.popFirst());
                 unused_.appendAll(std::move(temp));
-                return newChunk;
+                return true;
             }
         }
     }
 
     // Allocate a new BumpChunk with enough space for the next allocation.
-    UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
-    if (!newChunk) {
-        return newChunk;
-    }
-    size_t size = newChunk->computedSizeOfIncludingThis();
-    incrementCurSize(size);
-    return newChunk;
-}
-
-void*
-LifoAlloc::allocImplColdPath(size_t n)
-{
-    void* result;
-    UniqueBumpChunk newChunk = getOrCreateChunk(n);
-    if (!newChunk) {
-        return nullptr;
-    }
-
-    // Since we just created a large enough chunk, this can't fail.
-    chunks_.append(std::move(newChunk));
-    result = chunks_.last()->tryAlloc(n);
-    MOZ_ASSERT(result);
-    return result;
-}
-
-void*
-LifoAlloc::allocImplOversize(size_t n)
-{
-    void* result;
-    UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
-    if (!newChunk) {
-        return nullptr;
-    }
-    incrementCurSize(newChunk->computedSizeOfIncludingThis());
-    oversizeSize_ += newChunk->computedSizeOfIncludingThis();
-
-    // Since we just created a large enough chunk, this can't fail.
-    oversize_.append(std::move(newChunk));
-    result = oversize_.last()->tryAlloc(n);
-    MOZ_ASSERT(result);
-    return result;
-}
-
-bool
-LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total)
-{
-    for (detail::BumpChunk& bc : unused_) {
-        total += bc.unused();
-        if (total >= n) {
-            return true;
-        }
-    }
-
-    UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
+    UniqueBumpChunk newChunk = newChunkWithCapacity(n);
     if (!newChunk) {
         return false;
     }
     size_t size = newChunk->computedSizeOfIncludingThis();
-    unused_.pushFront(std::move(newChunk));
+    chunks_.append(std::move(newChunk));
     incrementCurSize(size);
     return true;
 }
 
-LifoAlloc::Mark
-LifoAlloc::mark()
-{
-    markCount++;
-    Mark res;
-    if (!chunks_.empty()) {
-        res.chunk = chunks_.last()->mark();
-    }
-    if (!oversize_.empty()) {
-        res.oversize = oversize_.last()->mark();
-    }
-    return res;
-}
-
-void
-LifoAlloc::release(Mark mark)
-{
-    markCount--;
-#ifdef DEBUG
-    auto assertIsContained = [](const detail::BumpChunk::Mark& m, BumpChunkList& list) {
-        if (m.markedChunk()) {
-            bool contained = false;
-            for (const detail::BumpChunk& chunk : list) {
-                if (&chunk == m.markedChunk() && chunk.contains(m)) {
-                    contained = true;
-                    break;
-                }
-            }
-            MOZ_ASSERT(contained);
-        }
-    };
-    assertIsContained(mark.chunk, chunks_);
-    assertIsContained(mark.oversize, oversize_);
-#endif
-
-    BumpChunkList released;
-    auto cutAtMark = [&released](const detail::BumpChunk::Mark& m, BumpChunkList& list) {
-        // Move the blocks which are after the mark to the set released chunks.
-        if (!m.markedChunk()) {
-            released = std::move(list);
-        } else {
-            released = list.splitAfter(m.markedChunk());
-        }
-
-        // Release everything which follows the mark in the last chunk.
-        if (!list.empty()) {
-            list.last()->release(m);
-        }
-    };
-
-    // Release the content of all the blocks which are after the marks, and keep
-    // blocks as unused.
-    cutAtMark(mark.chunk, chunks_);
-    for (detail::BumpChunk& bc : released) {
-        bc.release();
-    }
-    unused_.appendAll(std::move(released));
-
-    // Free the content of all the blocks which are after the marks.
-    cutAtMark(mark.oversize, oversize_);
-    while (!released.empty()) {
-        UniqueBumpChunk bc = released.popFirst();
-        decrementCurSize(bc->computedSizeOfIncludingThis());
-        oversizeSize_ -= bc->computedSizeOfIncludingThis();
-    }
-}
-
-void
-LifoAlloc::steal(LifoAlloc* other)
-{
-    MOZ_ASSERT(!other->markCount);
-    MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
-    MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
-    MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
-
-    // Copy everything from |other| to |this| except for |peakSize_|, which
-    // requires some care.
-    chunks_ = std::move(other->chunks_);
-    oversize_ = std::move(other->oversize_);
-    unused_ = std::move(other->unused_);
-    markCount = other->markCount;
-    defaultChunkSize_ = other->defaultChunkSize_;
-    oversizeThreshold_ = other->oversizeThreshold_;
-    curSize_ = other->curSize_;
-    peakSize_ = Max(peakSize_, other->peakSize_);
-    oversizeSize_ = other->oversizeSize_;
-#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
-    fallibleScope_ = other->fallibleScope_;
-#endif
-
-    other->reset(defaultChunkSize_);
-}
-
 void
 LifoAlloc::transferFrom(LifoAlloc* other)
 {
     MOZ_ASSERT(!markCount);
     MOZ_ASSERT(!other->markCount);
 
     incrementCurSize(other->curSize_);
-    oversizeSize_ += other->oversizeSize_;
     appendUnused(std::move(other->unused_));
     appendUsed(std::move(other->chunks_));
-    oversize_.appendAll(std::move(other->oversize_));
     other->curSize_ = 0;
-    other->oversizeSize_ = 0;
 }
 
 void
 LifoAlloc::transferUnusedFrom(LifoAlloc* other)
 {
     MOZ_ASSERT(!markCount);
 
     size_t size = 0;
     for (detail::BumpChunk& bc : other->unused_) {
         size += bc.computedSizeOfIncludingThis();
     }
 
     appendUnused(std::move(other->unused_));
     incrementCurSize(size);
     other->decrementCurSize(size);
 }
-
-#ifdef LIFO_CHUNK_PROTECT
-void
-LifoAlloc::setReadOnly()
-{
-    for (detail::BumpChunk& bc : chunks_) {
-        bc.setReadOnly();
-    }
-    for (detail::BumpChunk& bc : oversize_) {
-        bc.setReadOnly();
-    }
-    for (detail::BumpChunk& bc : unused_) {
-        bc.setReadOnly();
-    }
-}
-
-void
-LifoAlloc::setReadWrite()
-{
-    for (detail::BumpChunk& bc : chunks_) {
-        bc.setReadWrite();
-    }
-    for (detail::BumpChunk& bc : oversize_) {
-        bc.setReadWrite();
-    }
-    for (detail::BumpChunk& bc : unused_) {
-        bc.setReadWrite();
-    }
-}
-#endif
--- a/js/src/ds/LifoAlloc.h
+++ b/js/src/ds/LifoAlloc.h
@@ -374,17 +374,17 @@ class BumpChunk : public SingleLinkedLis
     class Mark
     {
         // Chunk which owns the pointer.
         BumpChunk* chunk_;
         // Recorded of the bump_ pointer of the BumpChunk.
         uint8_t* bump_;
 
         friend class BumpChunk;
-        Mark(BumpChunk* chunk, uint8_t* bump)
+        explicit Mark(BumpChunk* chunk, uint8_t* bump)
           : chunk_(chunk), bump_(bump)
         {}
 
       public:
         Mark() : chunk_(nullptr), bump_(nullptr) {}
 
         BumpChunk* markedChunk() const { return chunk_; }
     };
@@ -522,52 +522,42 @@ BumpChunk::begin()
 //
 // Note: We leave BumpChunks latent in the set of unused chunks after they've
 // been released to avoid thrashing before a GC.
 class LifoAlloc
 {
     using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
     using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
 
-    // List of chunks containing allocated data of size smaller than the default
-    // chunk size. In the common case, the last chunk of this list is always
-    // used to perform the allocations. When the allocation cannot be performed,
-    // we move a Chunk from the unused set to the list of used chunks.
+    // List of chunks containing allocated data. In the common case, the last
+    // chunk of this list is always used to perform the allocations. When the
+    // allocation cannot be performed, we move a Chunk from the unused set to
+    // the list of used chunks.
     BumpChunkList chunks_;
 
-    // List of chunks containing allocated data where each allocation is larger
-    // than the oversize threshold. Each chunk contains exactly on allocation.
-    // This reduces wasted space in the normal chunk list.
-    //
-    // Oversize chunks are allocated on demand and freed as soon as they are
-    // released, instead of being pushed to the unused list.
-    BumpChunkList oversize_;
-
     // Set of unused chunks, which can be reused for future allocations.
     BumpChunkList unused_;
 
     size_t      markCount;
     size_t      defaultChunkSize_;
-    size_t      oversizeThreshold_;
     size_t      curSize_;
     size_t      peakSize_;
-    size_t      oversizeSize_;
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
     bool        fallibleScope_;
 #endif
 
     void operator=(const LifoAlloc&) = delete;
     LifoAlloc(const LifoAlloc&) = delete;
 
     // Return a BumpChunk that can perform an allocation of at least size |n|.
-    UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
+    UniqueBumpChunk newChunkWithCapacity(size_t n);
 
     // Reuse or allocate a BumpChunk that can perform an allocation of at least
     // size |n|, if successful it is placed at the end the list of |chunks_|.
-    UniqueBumpChunk getOrCreateChunk(size_t n);
+    MOZ_MUST_USE bool getOrCreateChunk(size_t n);
 
     void reset(size_t defaultChunkSize);
 
     // Append unused chunks to the end of this LifoAlloc.
     void appendUnused(BumpChunkList&& otherUnused) {
 #ifdef DEBUG
         for (detail::BumpChunk& bc: otherUnused) {
             MOZ_ASSERT(bc.empty());
@@ -589,58 +579,63 @@ class LifoAlloc
             peakSize_ = curSize_;
         }
     }
     void decrementCurSize(size_t size) {
         MOZ_ASSERT(curSize_ >= size);
         curSize_ -= size;
     }
 
-    void* allocImplColdPath(size_t n);
-    void* allocImplOversize(size_t n);
-
     MOZ_ALWAYS_INLINE
     void* allocImpl(size_t n) {
         void* result;
-        // Give oversized allocations their own chunk instead of wasting space
-        // due to fragmentation at the end of normal chunk.
-        if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
-            return allocImplOversize(n);
-        }
-        if (MOZ_LIKELY(!chunks_.empty() && (result = chunks_.last()->tryAlloc(n)))) {
+        if (!chunks_.empty() && (result = chunks_.last()->tryAlloc(n))) {
             return result;
         }
-        return allocImplColdPath(n);
-    }
+
+        if (!getOrCreateChunk(n)) {
+            return nullptr;
+        }
 
-    // Check for space in unused chunks or allocate a new unused chunk.
-    MOZ_MUST_USE bool ensureUnusedApproximateColdPath(size_t n, size_t total);
+        // Since we just created a large enough chunk, this can't fail.
+        result = chunks_.last()->tryAlloc(n);
+        MOZ_ASSERT(result);
+        return result;
+    }
 
   public:
     explicit LifoAlloc(size_t defaultChunkSize)
       : peakSize_(0)
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
       , fallibleScope_(true)
 #endif
     {
         reset(defaultChunkSize);
     }
 
-    // Set the threshold to allocate data in its own chunk outside the space for
-    // small allocations.
-    void disableOversize() {
-        oversizeThreshold_ = SIZE_MAX;
+    // Steal allocated chunks from |other|.
+    void steal(LifoAlloc* other) {
+        MOZ_ASSERT(!other->markCount);
+        MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
+        MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
+
+        // Copy everything from |other| to |this| except for |peakSize_|, which
+        // requires some care.
+        chunks_ = std::move(other->chunks_);
+        unused_ = std::move(other->unused_);
+        markCount = other->markCount;
+        defaultChunkSize_ = other->defaultChunkSize_;
+        curSize_ = other->curSize_;
+        peakSize_ = Max(peakSize_, other->peakSize_);
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+        fallibleScope_ = other->fallibleScope_;
+#endif
+
+        other->reset(defaultChunkSize_);
     }
-    void setOversizeThreshold(size_t oversizeThreshold) {
-        MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
-        oversizeThreshold_ = oversizeThreshold;
-    }
-
-    // Steal allocated chunks from |other|.
-    void steal(LifoAlloc* other);
 
     // Append all chunks from |other|. They are removed from |other|.
     void transferFrom(LifoAlloc* other);
 
     // Append unused chunks from |other|. They are removed from |other|.
     void transferUnusedFrom(LifoAlloc* other);
 
     ~LifoAlloc() { freeAll(); }
@@ -666,17 +661,17 @@ class LifoAlloc
             JS_OOM_POSSIBLY_FAIL();
         }
 #endif
         return allocImpl(n);
     }
 
     template<typename T, typename... Args>
     MOZ_ALWAYS_INLINE T*
-    newWithSize(size_t n, Args&&... args)
+    allocInSize(size_t n, Args&&... args)
     {
         MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
         static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
                       "LifoAlloc must provide enough alignment to store T");
         void* ptr = alloc(n);
         if (!ptr) {
             return nullptr;
         }
@@ -703,17 +698,31 @@ class LifoAlloc
         size_t total = 0;
         if (!chunks_.empty()) {
             total += chunks_.last()->unused();
             if (total >= n) {
                 return true;
             }
         }
 
-        return ensureUnusedApproximateColdPath(n, total);
+        for (detail::BumpChunk& bc : unused_) {
+            total += bc.unused();
+            if (total >= n) {
+                return true;
+            }
+        }
+
+        UniqueBumpChunk newChunk = newChunkWithCapacity(n);
+        if (!newChunk) {
+            return false;
+        }
+        size_t size = newChunk->computedSizeOfIncludingThis();
+        unused_.pushFront(std::move(newChunk));
+        incrementCurSize(size);
+        return true;
     }
 
     MOZ_ALWAYS_INLINE
     void setAsInfallibleByDefault() {
 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
         fallibleScope_ = false;
 #endif
     }
@@ -755,63 +764,93 @@ class LifoAlloc
     T* newArrayUninitialized(size_t count) {
         size_t bytes;
         if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
             return nullptr;
         }
         return static_cast<T*>(alloc(bytes));
     }
 
-    class Mark {
-        friend class LifoAlloc;
-        detail::BumpChunk::Mark chunk;
-        detail::BumpChunk::Mark oversize;
-    };
-    Mark mark();
-    void release(Mark mark);
+    using Mark = detail::BumpChunk::Mark;
+
+    Mark mark() {
+        markCount++;
+        if (chunks_.empty()) {
+            return Mark();
+        }
+        return chunks_.last()->mark();
+    }
+
+    void release(Mark mark) {
+        markCount--;
+
+        // Move the blocks which are after the mark to the set of unused chunks.
+        BumpChunkList released;
+        if (!mark.markedChunk()) {
+            released = std::move(chunks_);
+        } else {
+            released = chunks_.splitAfter(mark.markedChunk());
+        }
+
+        // Release the content of all the blocks which are after the marks.
+        for (detail::BumpChunk& bc : released) {
+            bc.release();
+        }
+        unused_.appendAll(std::move(released));
+
+        // Release everything which follows the mark in the last chunk.
+        if (!chunks_.empty()) {
+            chunks_.last()->release(mark);
+        }
+    }
 
     void releaseAll() {
         MOZ_ASSERT(!markCount);
         for (detail::BumpChunk& bc : chunks_) {
             bc.release();
         }
         unused_.appendAll(std::move(chunks_));
-        // On release, we free any oversize allocations instead of keeping them
-        // in unused chunks.
-        while (!oversize_.empty()) {
-            UniqueBumpChunk bc = oversize_.popFirst();
-            decrementCurSize(bc->computedSizeOfIncludingThis());
-            oversizeSize_ -= bc->computedSizeOfIncludingThis();
-        }
     }
 
     // Protect the content of the LifoAlloc chunks.
 #ifdef LIFO_CHUNK_PROTECT
-    void setReadOnly();
-    void setReadWrite();
+    void setReadOnly() {
+        for (detail::BumpChunk& bc : chunks_) {
+            bc.setReadOnly();
+        }
+        for (detail::BumpChunk& bc : unused_) {
+            bc.setReadOnly();
+        }
+    }
+    void setReadWrite() {
+        for (detail::BumpChunk& bc : chunks_) {
+            bc.setReadWrite();
+        }
+        for (detail::BumpChunk& bc : unused_) {
+            bc.setReadWrite();
+        }
+    }
 #else
     void setReadOnly() const {}
     void setReadWrite() const {}
 #endif
 
     // Get the total "used" (occupied bytes) count for the arena chunks.
     size_t used() const {
         size_t accum = 0;
         for (const detail::BumpChunk& chunk : chunks_) {
             accum += chunk.used();
         }
         return accum;
     }
 
     // Return true if the LifoAlloc does not currently contain any allocations.
     bool isEmpty() const {
-        bool empty = chunks_.empty() ||
-            (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
-        MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
-        return empty && oversize_.empty();
+        return chunks_.empty() ||
+               (chunks_.begin() == chunks_.last() && chunks_.last()->empty());
     }
 
     // Return the number of bytes remaining to allocate in the current chunk.
     // e.g. How many bytes we can allocate before needing a new block.
     size_t availableInCurrentChunk() const {
         if (chunks_.empty()) {
             return 0;
         }
@@ -819,34 +858,28 @@ class LifoAlloc
     }
 
     // Get the total size of the arena chunks (including unused space).
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         size_t n = 0;
         for (const detail::BumpChunk& chunk : chunks_) {
             n += chunk.sizeOfIncludingThis(mallocSizeOf);
         }
-        for (const detail::BumpChunk& chunk : oversize_) {
-            n += chunk.sizeOfIncludingThis(mallocSizeOf);
-        }
         for (const detail::BumpChunk& chunk : unused_) {
             n += chunk.sizeOfIncludingThis(mallocSizeOf);
         }
         return n;
     }
 
     // Get the total size of the arena chunks (including unused space).
     size_t computedSizeOfExcludingThis() const {
         size_t n = 0;
         for (const detail::BumpChunk& chunk : chunks_) {
             n += chunk.computedSizeOfIncludingThis();
         }
-        for (const detail::BumpChunk& chunk : oversize_) {
-            n += chunk.computedSizeOfIncludingThis();
-        }
         for (const detail::BumpChunk& chunk : unused_) {
             n += chunk.computedSizeOfIncludingThis();
         }
         return n;
     }
 
     // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
@@ -869,21 +902,16 @@ class LifoAlloc
 
 #ifdef DEBUG
     bool contains(void* ptr) const {
         for (const detail::BumpChunk& chunk : chunks_) {
             if (chunk.contains(ptr)) {
                 return true;
             }
         }
-        for (const detail::BumpChunk& chunk : oversize_) {
-            if (chunk.contains(ptr)) {
-                return true;
-            }
-        }
         return false;
     }
 #endif
 
     // Iterate over the data allocated in a LifoAlloc, and interpret it.
     class Enum
     {
         friend class LifoAlloc;
@@ -914,17 +942,16 @@ class LifoAlloc
         }
 
       public:
         explicit Enum(LifoAlloc& alloc)
           : chunkIt_(alloc.chunks_.begin()),
             chunkEnd_(alloc.chunks_.end()),
             head_(nullptr)
         {
-            MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
             if (chunkIt_ != chunkEnd_) {
                 head_ = chunkIt_->begin();
             }
         }
 
         // Return true if there are no more bytes to enumerate.
         bool empty() {
             return chunkIt_ == chunkEnd_ ||
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -1933,17 +1933,17 @@ Parser<FullParseHandler, Unit>::checkSta
 }
 
 template <typename Scope>
 typename Scope::Data*
 NewEmptyBindingData(JSContext* cx, LifoAlloc& alloc, uint32_t numBindings)
 {
     using Data = typename Scope::Data;
     size_t allocSize = SizeOfData<typename Scope::Data>(numBindings);
-    auto* bindings = alloc.newWithSize<Data>(allocSize, numBindings);
+    auto* bindings = alloc.allocInSize<Data>(allocSize, numBindings);
     if (!bindings) {
         ReportOutOfMemory(cx);
     }
     return bindings;
 }
 
 /**
  * Copy-construct |BindingName|s from |bindings| into |cursor|, then return
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -148,22 +148,16 @@ class StoreBuffer
 
         WholeCellBuffer() : storage_(nullptr), head_(nullptr) {}
         ~WholeCellBuffer() { js_delete(storage_); }
 
         MOZ_MUST_USE bool init() {
             MOZ_ASSERT(!head_);
             if (!storage_) {
                 storage_ = js_new<LifoAlloc>(LifoAllocBlockSize);
-                // This prevents LifoAlloc::Enum from crashing with a release
-                // assertion if we ever allocate one entry larger than
-                // LifoAllocBlockSize.
-                if (storage_) {
-                    storage_->disableOversize();
-                }
             }
             clear();
             return bool(storage_);
         }
 
         void clear();
 
         bool isAboutToOverflow() const {