Bug 1291292 - Use dynamic chunk allocation for the nursery r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Thu, 11 Aug 2016 17:14:56 +0100
changeset 309073 1a4509a3e2ce583753eda01ba911b34631bf207e
parent 309072 0987e46667b2e9fdcb130fe3eb7f51fa36c19958
child 309074 80af899d3462e26fa758ab1c734ac0148c9d96cc
push id20299
push userkwierso@gmail.com
push dateFri, 12 Aug 2016 23:39:35 +0000
treeherderfx-team@1ba6215e84c3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1291292
milestone51.0a1
Bug 1291292 - Use dynamic chunk allocation for the nursery r=terrence
js/public/MemoryMetrics.h
js/src/gc/Allocator.cpp
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/StoreBuffer.cpp
js/src/gc/StoreBuffer.h
js/src/jsgc.cpp
js/src/vm/Runtime.cpp
js/src/vm/TypedArrayObject.cpp
js/xpconnect/src/XPCJSRuntime.cpp
--- a/js/public/MemoryMetrics.h
+++ b/js/public/MemoryMetrics.h
@@ -278,17 +278,16 @@ struct CodeSizes
 /** Data for tracking GC memory usage. */
 struct GCSizes
 {
     // |nurseryDecommitted| is marked as NonHeap rather than GCHeapDecommitted
     // because we don't consider the nursery to be part of the GC heap.
 #define FOR_EACH_SIZE(macro) \
     macro(_, MallocHeap, marker) \
     macro(_, NonHeap,    nurseryCommitted) \
-    macro(_, NonHeap,    nurseryDecommitted) \
     macro(_, MallocHeap, nurseryMallocedBuffers) \
     macro(_, MallocHeap, storeBufferVals) \
     macro(_, MallocHeap, storeBufferCells) \
     macro(_, MallocHeap, storeBufferSlots) \
     macro(_, MallocHeap, storeBufferWholeCells) \
     macro(_, MallocHeap, storeBufferGenerics)
 
     GCSizes()
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -255,40 +255,16 @@ GCRuntime::checkIncrementalZoneState(Exc
     MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
                   t->asTenured().arena()->allocatedDuringIncremental);
 #endif
 }
 
 
 // ///////////  Arena -> Thing Allocator  //////////////////////////////////////
 
-// After pulling a Chunk out of the empty chunks pool, we want to run the
-// background allocator to refill it. The code that takes Chunks does so under
-// the GC lock. We need to start the background allocation under the helper
-// threads lock. To avoid lock inversion we have to delay the start until after
-// we are outside the GC lock. This class handles that delay automatically.
-class MOZ_RAII js::gc::AutoMaybeStartBackgroundAllocation
-{
-    JSRuntime* runtime;
-
-  public:
-    AutoMaybeStartBackgroundAllocation()
-      : runtime(nullptr)
-    {}
-
-    void tryToStartBackgroundAllocation(JSRuntime* rt) {
-        runtime = rt;
-    }
-
-    ~AutoMaybeStartBackgroundAllocation() {
-        if (runtime)
-            runtime->gc.startBackgroundAllocTaskIfIdle();
-    }
-};
-
 void
 GCRuntime::startBackgroundAllocTaskIfIdle()
 {
     AutoLockHelperThreadState helperLock;
     if (allocTask.isRunningWithLockHeld(helperLock))
         return;
 
     // Join the previous invocation of the task. This will return immediately
@@ -542,17 +518,17 @@ GCRuntime::getOrAllocChunk(const AutoLoc
     if (!chunk) {
         chunk = Chunk::allocate(rt);
         if (!chunk)
             return nullptr;
         MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
     }
 
     if (wantBackgroundAllocation(lock))
-        maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
+        maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt->gc);
 
     return chunk;
 }
 
 void
 GCRuntime::recycleChunk(Chunk* chunk, const AutoLockGC& lock)
 {
     emptyChunks(lock).push(chunk);
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -1382,16 +1382,40 @@ class MOZ_RAII AutoEnterIteration {
     }
 
     ~AutoEnterIteration() {
         MOZ_ASSERT(gc->numActiveZoneIters);
         --gc->numActiveZoneIters;
     }
 };
 
+// After pulling a Chunk out of the empty chunks pool, we want to run the
+// background allocator to refill it. The code that takes Chunks does so under
+// the GC lock. We need to start the background allocation under the helper
+// threads lock. To avoid lock inversion we have to delay the start until after
+// we are outside the GC lock. This class handles that delay automatically.
+class MOZ_RAII AutoMaybeStartBackgroundAllocation
+{
+    GCRuntime* gc;
+
+  public:
+    AutoMaybeStartBackgroundAllocation()
+      : gc(nullptr)
+    {}
+
+    void tryToStartBackgroundAllocation(GCRuntime& gc) {
+        this->gc = &gc;
+    }
+
+    ~AutoMaybeStartBackgroundAllocation() {
+        if (gc)
+            gc->startBackgroundAllocTaskIfIdle();
+    }
+};
+
 #ifdef JS_GC_ZEAL
 
 inline bool
 GCRuntime::hasZealMode(ZealMode mode)
 {
     static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
                   "Zeal modes must fit in zealModeBits");
     return zealModeBits & (1 << uint32_t(mode));
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -997,17 +997,17 @@ struct Chunk
 
     void releaseArena(JSRuntime* rt, Arena* arena, const AutoLockGC& lock);
     void recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena);
 
     MOZ_MUST_USE bool decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock);
     void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock);
 
     static Chunk* allocate(JSRuntime* rt);
-    inline void init(JSRuntime* rt);
+    void init(JSRuntime* rt);
 
   private:
     void decommitAllArenas(JSRuntime* rt);
 
     /* Search for a decommitted arena to allocate. */
     unsigned findDecommittedArenaOffset();
     Arena* fetchNextDecommittedArena();
 
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -77,145 +77,178 @@ struct js::Nursery::SweepAction
 #ifdef JS_GC_ZEAL
 struct js::Nursery::Canary
 {
     uintptr_t magicValue;
     Canary* next;
 };
 #endif
 
+inline void
+js::Nursery::NurseryChunk::poisonAndInit(JSRuntime* rt, uint8_t poison)
+{
+    JS_POISON(this, poison, ChunkSize);
+    init(rt);
+}
+
+inline void
+js::Nursery::NurseryChunk::init(JSRuntime* rt)
+{
+    new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
+}
+
+/* static */ inline js::Nursery::NurseryChunk*
+js::Nursery::NurseryChunk::fromChunk(Chunk* chunk)
+{
+    return reinterpret_cast<NurseryChunk*>(chunk);
+}
+
+inline Chunk*
+js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
+{
+    auto chunk = reinterpret_cast<Chunk*>(this);
+    chunk->init(rt);
+    return chunk;
+}
+
 js::Nursery::Nursery(JSRuntime* rt)
   : runtime_(rt)
   , position_(0)
-  , currentStart_(0)
+  , currentStartChunk_(0)
+  , currentStartPosition_(0)
   , currentEnd_(0)
-  , heapStart_(0)
-  , heapEnd_(0)
   , currentChunk_(0)
-  , numActiveChunks_(0)
-  , numNurseryChunks_(0)
+  , maxNurseryChunks_(0)
   , previousPromotionRate_(0)
   , profileThreshold_(0)
   , enableProfiling_(false)
   , minorGcCount_(0)
   , freeMallocedBuffersTask(nullptr)
   , sweepActions_(nullptr)
 #ifdef JS_GC_ZEAL
   , lastCanary_(nullptr)
 #endif
 {}
 
 bool
-js::Nursery::init(uint32_t maxNurseryBytes)
+js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
 {
     /* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
-    numNurseryChunks_ = maxNurseryBytes >> ChunkShift;
+    maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
 
     /* If no chunks are specified then the nursery is permenantly disabled. */
-    if (numNurseryChunks_ == 0)
+    if (maxNurseryChunks_ == 0)
         return true;
 
     if (!mallocedBuffers.init())
         return false;
 
     if (!cellsWithUid_.init())
         return false;
 
-    void* heap = MapAlignedPages(nurserySize(), Alignment);
-    if (!heap)
-        return false;
-
     freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
     if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
         return false;
 
-    heapStart_ = uintptr_t(heap);
-    heapEnd_ = heapStart_ + nurserySize();
-    currentStart_ = start();
-    numActiveChunks_ = numNurseryChunks_;
-    JS_POISON(heap, JS_FRESH_NURSERY_PATTERN, nurserySize());
-    updateNumActiveChunks(1);
+    updateNumChunksLocked(1, lock);
+    if (numChunks() == 0)
+        return false;
+
     setCurrentChunk(0);
+    setStartPosition();
 
     char* env = getenv("JS_GC_PROFILE_NURSERY");
     if (env) {
         if (0 == strcmp(env, "help")) {
             fprintf(stderr, "JS_GC_PROFILE_NURSERY=N\n\n"
                     "\tReport minor GC's taking more than N microseconds.");
             exit(0);
         }
         enableProfiling_ = true;
         profileThreshold_ = atoi(env);
     }
 
     PodZero(&startTimes_);
     PodZero(&profileTimes_);
     PodZero(&totalTimes_);
 
+    if (!runtime()->gc.storeBuffer.enable())
+        return false;
+
     MOZ_ASSERT(isEnabled());
     return true;
 }
 
 js::Nursery::~Nursery()
 {
-    if (start())
-        UnmapPages((void*)start(), nurserySize());
-
+    disable();
     js_delete(freeMallocedBuffersTask);
 }
 
 void
 js::Nursery::enable()
 {
     MOZ_ASSERT(isEmpty());
     MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
     if (isEnabled())
         return;
-    updateNumActiveChunks(1);
+
+    updateNumChunks(1);
+    if (numChunks() == 0)
+        return;
+
     setCurrentChunk(0);
-    currentStart_ = position();
+    setStartPosition();
 #ifdef JS_GC_ZEAL
     if (runtime()->hasZealMode(ZealMode::GenerationalGC))
         enterZealMode();
 #endif
+
+    MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
+    return;
 }
 
 void
 js::Nursery::disable()
 {
     MOZ_ASSERT(isEmpty());
     if (!isEnabled())
         return;
-    updateNumActiveChunks(0);
+    updateNumChunks(0);
     currentEnd_ = 0;
+    runtime()->gc.storeBuffer.disable();
 }
 
 bool
 js::Nursery::isEmpty() const
 {
     MOZ_ASSERT(runtime_);
     if (!isEnabled())
         return true;
-    MOZ_ASSERT_IF(!runtime_->hasZealMode(ZealMode::GenerationalGC), currentStart_ == start());
-    return position() == currentStart_;
+
+    if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
+        MOZ_ASSERT(currentStartChunk_ == 0);
+        MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
+    }
+    return position() == currentStartPosition_;
 }
 
 #ifdef JS_GC_ZEAL
 void
 js::Nursery::enterZealMode() {
     if (isEnabled())
-        numActiveChunks_ = numNurseryChunks_;
+        updateNumChunks(maxNurseryChunks_);
 }
 
 void
 js::Nursery::leaveZealMode() {
     if (isEnabled()) {
         MOZ_ASSERT(isEmpty());
         setCurrentChunk(0);
-        currentStart_ = start();
+        setStartPosition();
     }
 }
 #endif // JS_GC_ZEAL
 
 JSObject*
 js::Nursery::allocateObject(JSContext* cx, size_t size, size_t numDynamic, const js::Class* clasp)
 {
     /* Ensure there's enough space to replace the contents with a RelocationOverlay. */
@@ -255,28 +288,28 @@ js::Nursery::allocateObject(JSContext* c
     return obj;
 }
 
 void*
 js::Nursery::allocate(size_t size)
 {
     MOZ_ASSERT(isEnabled());
     MOZ_ASSERT(!runtime()->isHeapBusy());
-    MOZ_ASSERT(position() >= currentStart_);
+    MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
     MOZ_ASSERT(position() % gc::CellSize == 0);
     MOZ_ASSERT(size % gc::CellSize == 0);
 
 #ifdef JS_GC_ZEAL
     static const size_t CanarySize = (sizeof(Nursery::Canary) + CellSize - 1) & ~CellMask;
     if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
         size += CanarySize;
 #endif
 
     if (currentEnd() < position() + size) {
-        if (currentChunk_ + 1 == numActiveChunks_)
+        if (currentChunk_ + 1 == numChunks())
             return nullptr;
         setCurrentChunk(currentChunk_ + 1);
     }
 
     void* thing = (void*)position();
     position_ = position() + size;
 
     JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
@@ -361,20 +394,20 @@ js::Nursery::freeBuffer(void* buffer)
     }
 }
 
 void
 Nursery::setForwardingPointer(void* oldData, void* newData, bool direct)
 {
     MOZ_ASSERT(isInside(oldData));
 
-    // Bug 1196210: If a zero-capacity header lands in the last 2 words of the
-    // jemalloc chunk abutting the start of the nursery, the (invalid) newData
-    // pointer will appear to be "inside" the nursery.
-    MOZ_ASSERT(!isInside(newData) || uintptr_t(newData) == heapStart_);
+    // Bug 1196210: If a zero-capacity header lands in the last 2 words of a
+    // jemalloc chunk abutting the start of a nursery chunk, the (invalid)
+    // newData pointer will appear to be "inside" the nursery.
+    MOZ_ASSERT(!isInside(newData) || (uintptr_t(newData) & ChunkMask) == 0);
 
     if (direct) {
         *reinterpret_cast<void**>(oldData) = newData;
     } else {
         AutoEnterOOMUnsafeRegion oomUnsafe;
         if (!forwardedBuffers.initialized() && !forwardedBuffers.init())
             oomUnsafe.crash("Nursery::setForwardingPointer");
 #ifdef DEBUG
@@ -534,17 +567,17 @@ js::Nursery::collect(JSRuntime* rt, JS::
 
     startProfile(ProfileKey::Total);
 
     AutoTraceSession session(rt, JS::HeapState::MinorCollecting);
     AutoStopVerifyingBarriers av(rt, false);
     AutoDisableProxyCheck disableStrictProxyChecking(rt);
     mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
 
-    size_t initialUsedSpace = position() - start();
+    size_t initialUsedSpace = usedSpace();
 
     // Move objects pointed to by roots from the nursery to the major heap.
     TenuringTracer mover(rt, this);
 
     // Mark the store buffer. This must happen first.
 
     maybeStartProfile(ProfileKey::CancelIonCompilations);
     if (sb.cancelIonCompilations()) {
@@ -692,20 +725,20 @@ js::Nursery::collect(JSRuntime* rt, JS::
 
     if (enableProfiling_ && totalTime >= profileThreshold_) {
         static int printedHeader = 0;
         if ((printedHeader++ % 200) == 0) {
             fprintf(stderr, "MinorGC:               Reason  PRate Size ");
             printProfileHeader();
         }
 
-        fprintf(stderr, "MinorGC: %20s %5.1f%% %4d ",
+        fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
                 JS::gcreason::ExplainReason(reason),
                 promotionRate * 100,
-                numActiveChunks_);
+                numChunks());
         printProfileTimes(profileTimes_);
     }
 }
 
 void
 js::Nursery::FreeMallocedBuffersTask::transferBuffersToFree(MallocedBuffersSet& buffersToFree,
                                                             const AutoLockHelperThreadState& lock)
 {
@@ -763,83 +796,133 @@ js::Nursery::sweep()
             MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
     }
     cellsWithUid_.clear();
 
     runSweepActions();
 
 #ifdef JS_GC_ZEAL
     /* Poison the nursery contents so touching a freed object will crash. */
-    JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize());
-    for (int i = 0; i < numNurseryChunks_; ++i)
-        initChunk(i);
+    for (unsigned i = 0; i < numChunks(); i++)
+        chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
 
     if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
-        MOZ_ASSERT(numActiveChunks_ == numNurseryChunks_);
-
         /* Only reset the alloc point when we are close to the end. */
-        if (currentChunk_ + 1 == numNurseryChunks_)
+        if (currentChunk_ + 1 == numChunks())
             setCurrentChunk(0);
     } else
 #endif
     {
 #ifdef JS_CRASH_DIAGNOSTICS
-        JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start());
-        for (int i = 0; i < numActiveChunks_; ++i)
-            initChunk(i);
+        for (unsigned i = 0; i < numChunks(); ++i)
+            chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
 #endif
         setCurrentChunk(0);
     }
 
     /* Set current start position for isEmpty checks. */
-    currentStart_ = position();
+    setStartPosition();
     MemProfiler::SweepNursery(runtime());
 }
 
+size_t
+js::Nursery::usedSpace() const
+{
+    MOZ_ASSERT(currentChunk_ >= currentStartChunk_);
+    MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <= NurseryChunkUsableSize);
+    MOZ_ASSERT(position_ - chunk(currentChunk_).start() <= NurseryChunkUsableSize);
+
+    if (currentChunk_ == currentStartChunk_)
+        return position_ - currentStartPosition_;
+
+    size_t bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
+                   ((currentChunk_ - currentStartChunk_ - 1) * NurseryChunkUsableSize) +
+                   position_ - chunk(currentChunk_).start();
+
+    MOZ_ASSERT(bytes <= numChunks() * NurseryChunkUsableSize);
+
+    return bytes;
+}
+
+MOZ_ALWAYS_INLINE void
+js::Nursery::setCurrentChunk(unsigned chunkno)
+{
+    MOZ_ASSERT(chunkno < maxChunks());
+    MOZ_ASSERT(chunkno < numChunks());
+    currentChunk_ = chunkno;
+    position_ = chunk(chunkno).start();
+    currentEnd_ = chunk(chunkno).end();
+    chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
+}
+
+MOZ_ALWAYS_INLINE void
+js::Nursery::setStartPosition()
+{
+    currentStartChunk_ = currentChunk_;
+    currentStartPosition_ = position();
+}
+
 void
 js::Nursery::growAllocableSpace()
 {
-#ifdef JS_GC_ZEAL
-    MOZ_ASSERT_IF(runtime()->hasZealMode(ZealMode::GenerationalGC),
-                  numActiveChunks_ == numNurseryChunks_);
-#endif
-    updateNumActiveChunks(Min(numActiveChunks_ * 2, numNurseryChunks_));
+    updateNumChunks(Min(numChunks() * 2, maxNurseryChunks_));
 }
 
 void
 js::Nursery::shrinkAllocableSpace()
 {
 #ifdef JS_GC_ZEAL
     if (runtime()->hasZealMode(ZealMode::GenerationalGC))
         return;
 #endif
-    updateNumActiveChunks(Max(numActiveChunks_ - 1, 1));
+    updateNumChunks(Max(numChunks() - 1, 1u));
+}
+
+void
+js::Nursery::updateNumChunks(unsigned newCount)
+{
+    if (numChunks() != newCount) {
+        AutoLockGC lock(runtime());
+        updateNumChunksLocked(newCount, lock);
+    }
 }
 
 void
-js::Nursery::updateNumActiveChunks(int newCount)
+js::Nursery::updateNumChunksLocked(unsigned newCount, AutoLockGC& lock)
 {
-#ifndef JS_GC_ZEAL
-    int priorChunks = numActiveChunks_;
-#endif
-    numActiveChunks_ = newCount;
+    // The GC nursery is an optimization and so if we fail to allocate nursery
+    // chunks we do not report an error.
+
+    unsigned priorCount = numChunks();
+    MOZ_ASSERT(priorCount != newCount);
+
+    AutoMaybeStartBackgroundAllocation maybeBgAlloc;
 
-    // In zeal mode, we want to keep the unused memory poisoned so that we
-    // will crash sooner. Avoid decommit in that case to avoid having the
-    // system zero the pages.
-#ifndef JS_GC_ZEAL
-    if (numActiveChunks_ < priorChunks) {
-        uintptr_t decommitStart = chunk(numActiveChunks_).start();
-        uintptr_t decommitSize = chunk(priorChunks - 1).start() + ChunkSize - decommitStart;
-        MOZ_ASSERT(decommitSize != 0);
-        MOZ_ASSERT(decommitStart == AlignBytes(decommitStart, Alignment));
-        MOZ_ASSERT(decommitSize == AlignBytes(decommitSize, Alignment));
-        MarkPagesUnused((void*)decommitStart, decommitSize);
+    if (newCount < priorCount) {
+        // Shrink the nursery and free unused chunks.
+        for (unsigned i = newCount; i < priorCount; i++)
+            runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
+        chunks_.shrinkTo(newCount);
+        return;
     }
-#endif // !defined(JS_GC_ZEAL)
+
+    // Grow the nursery and allocate new chunks.
+    if (!chunks_.resize(newCount))
+        return;
+
+    for (unsigned i = priorCount; i < newCount; i++) {
+        auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
+        if (!newChunk) {
+            chunks_.shrinkTo(i);
+            return;
+        }
+
+        chunks_[i] = NurseryChunk::fromChunk(newChunk);
+        chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
+    }
 }
 
 void
 js::Nursery::queueSweepAction(SweepThunk thunk, void* data)
 {
     static_assert(sizeof(SweepAction) % CellSize == 0,
                   "SweepAction size must be a multiple of cell size");
     MOZ_ASSERT(!runtime()->mainThread.suppressGC);
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -116,36 +116,42 @@ class Nursery
 {
   public:
     static const size_t Alignment = gc::ChunkSize;
     static const size_t ChunkShift = gc::ChunkShift;
 
     explicit Nursery(JSRuntime* rt);
     ~Nursery();
 
-    MOZ_MUST_USE bool init(uint32_t maxNurseryBytes);
+    MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGC& lock);
 
-    bool exists() const { return numNurseryChunks_ != 0; }
-    size_t numChunks() const { return numNurseryChunks_; }
-    size_t nurserySize() const { return numNurseryChunks_ << ChunkShift; }
+    unsigned maxChunks() const { return maxNurseryChunks_; }
+    unsigned numChunks() const { return chunks_.length(); }
+
+    bool exists() const { return maxChunks() != 0; }
+    size_t nurserySize() const { return maxChunks() << ChunkShift; }
 
     void enable();
     void disable();
-    bool isEnabled() const { return numActiveChunks_ != 0; }
+    bool isEnabled() const { return numChunks() != 0; }
 
     /* Return true if no allocations have been made since the last collection. */
     bool isEmpty() const;
 
     /*
      * Check whether an arbitrary pointer is within the nursery. This is
      * slower than IsInsideNursery(Cell*), but works on all types of pointers.
      */
     MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
     MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
-        return uintptr_t(p) >= heapStart_ && uintptr_t(p) < heapEnd_;
+        for (auto chunk : chunks_) {
+            if (uintptr_t(p) - chunk->start() < gc::ChunkSize)
+                return true;
+        }
+        return false;
     }
     template<typename T>
     bool isInside(const SharedMem<T>& p) const {
         return isInside(p.unwrap(/*safe - used for value in comparison above*/));
     }
 
     /*
      * Allocate and return a pointer to a new GC object with its |slots|
@@ -207,79 +213,85 @@ class Nursery
         MOZ_ASSERT(!cellsWithUid_.has(cell));
         return cellsWithUid_.put(cell);
     }
 
     using SweepThunk = void (*)(void *data);
     void queueSweepAction(SweepThunk thunk, void* data);
 
     size_t sizeOfHeapCommitted() const {
-        return numActiveChunks_ * gc::ChunkSize;
-    }
-    size_t sizeOfHeapDecommitted() const {
-        return (numNurseryChunks_ - numActiveChunks_) * gc::ChunkSize;
+        return numChunks() * gc::ChunkSize;
     }
     size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
         size_t total = 0;
         for (MallocedBuffersSet::Range r = mallocedBuffers.all(); !r.empty(); r.popFront())
             total += mallocSizeOf(r.front());
         total += mallocedBuffers.sizeOfExcludingThis(mallocSizeOf);
         return total;
     }
 
-    MOZ_ALWAYS_INLINE uintptr_t start() const {
-        return heapStart_;
-    }
-
-    MOZ_ALWAYS_INLINE uintptr_t heapEnd() const {
-        return heapEnd_;
-    }
+    size_t usedSpace() const;
 
     // Free space remaining, not counting chunk trailers.
-    MOZ_ALWAYS_INLINE size_t approxFreeSpace() const {
-        return heapEnd_ - position_;
+    MOZ_ALWAYS_INLINE size_t freeSpace() const {
+        MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
+        return (currentEnd_ - position_) +
+               (numChunks() - currentChunk_ - 1) * NurseryChunkUsableSize;
     }
 
 #ifdef JS_GC_ZEAL
     void enterZealMode();
     void leaveZealMode();
 #endif
 
     /* Print total profile times on shutdown. */
     void printTotalProfileTimes();
 
   private:
+    /* The amount of space in the mapped nursery available to allocations. */
+    static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
+
+    struct NurseryChunk {
+        char data[NurseryChunkUsableSize];
+        gc::ChunkTrailer trailer;
+        static NurseryChunk* fromChunk(gc::Chunk* chunk);
+        void init(JSRuntime* rt);
+        void poisonAndInit(JSRuntime* rt, uint8_t poison);
+        uintptr_t start() const { return uintptr_t(&data); }
+        uintptr_t end() const { return uintptr_t(&trailer); }
+        gc::Chunk* toChunk(JSRuntime* rt);
+    };
+    static_assert(sizeof(NurseryChunk) == gc::ChunkSize,
+                  "Nursery chunk size must match gc::Chunk size.");
+
     /*
      * The start and end pointers are stored under the runtime so that we can
      * inline the isInsideNursery check into embedder code. Use the start()
      * and heapEnd() functions to access these values.
      */
     JSRuntime* runtime_;
 
+    /* Vector of allocated chunks to allocate from. */
+    Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
+
     /* Pointer to the first unallocated byte in the nursery. */
     uintptr_t position_;
 
     /* Pointer to the logical start of the Nursery. */
-    uintptr_t currentStart_;
+    unsigned currentStartChunk_;
+    uintptr_t currentStartPosition_;
 
     /* Pointer to the last byte of space in the current chunk. */
     uintptr_t currentEnd_;
 
-    /* Pointer to first and last address of the total nursery allocation. */
-    uintptr_t heapStart_;
-    uintptr_t heapEnd_;
+    /* The index of the chunk that is currently being allocated from. */
+    unsigned currentChunk_;
 
-    /* The index of the chunk that is currently being allocated from. */
-    int currentChunk_;
-
-    /* The index after the last chunk that we will allocate from. */
-    int numActiveChunks_;
-
-    /* Number of chunks allocated for the nursery. */
-    int numNurseryChunks_;
+    /* Maximum number of chunks to allocate for the nursery. */
+    unsigned maxNurseryChunks_;
 
     /* Promotion rate for the previous minor collection. */
     double previousPromotionRate_;
 
     /* Report minor collections taking more than this many us, if enabled. */
     int64_t profileThreshold_;
     bool enableProfiling_;
 
@@ -341,52 +353,31 @@ class Nursery
     struct SweepAction;
     SweepAction* sweepActions_;
 
 #ifdef JS_GC_ZEAL
     struct Canary;
     Canary* lastCanary_;
 #endif
 
-    /* The amount of space in the mapped nursery available to allocations. */
-    static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
+    NurseryChunk* allocChunk();
 
-    struct NurseryChunkLayout {
-        char data[NurseryChunkUsableSize];
-        gc::ChunkTrailer trailer;
-        uintptr_t start() const { return uintptr_t(&data); }
-        uintptr_t end() const { return uintptr_t(&trailer); }
-    };
-    static_assert(sizeof(NurseryChunkLayout) == gc::ChunkSize,
-                  "Nursery chunk size must match gc::Chunk size.");
-    NurseryChunkLayout& chunk(int index) const {
-        MOZ_ASSERT(index < numNurseryChunks_);
-        MOZ_ASSERT(start());
-        return reinterpret_cast<NurseryChunkLayout*>(start())[index];
+    NurseryChunk& chunk(unsigned index) const {
+        return *chunks_[index];
     }
 
-    MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
-        gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
-        new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
-    }
+    void setCurrentChunk(unsigned chunkno);
+    void setStartPosition();
 
-    MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {
-        MOZ_ASSERT(chunkno < numNurseryChunks_);
-        MOZ_ASSERT(chunkno < numActiveChunks_);
-        currentChunk_ = chunkno;
-        position_ = chunk(chunkno).start();
-        currentEnd_ = chunk(chunkno).end();
-        initChunk(chunkno);
-    }
-
-    void updateNumActiveChunks(int newCount);
+    void updateNumChunks(unsigned newCount);
+    void updateNumChunksLocked(unsigned newCount, AutoLockGC& lock);
 
     MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
-        MOZ_ASSERT(numActiveChunks_ > 0);
-        return chunk(numActiveChunks_ - 1).end();
+        MOZ_ASSERT(numChunks() > 0);
+        return chunks_.back()->end();
     }
 
     MOZ_ALWAYS_INLINE uintptr_t currentEnd() const {
         MOZ_ASSERT(runtime_);
         MOZ_ASSERT(currentEnd_ == chunk(currentChunk_).end());
         return currentEnd_;
     }
     void* addressOfCurrentEnd() const {
--- a/js/src/gc/StoreBuffer.cpp
+++ b/js/src/gc/StoreBuffer.cpp
@@ -133,17 +133,17 @@ js::gc::AllocateWholeCellSet(Arena* aren
     AutoEnterOOMUnsafeRegion oomUnsafe;
     Nursery& nursery = rt->gc.nursery;
     void* data = nursery.allocateBuffer(zone, sizeof(ArenaCellSet));
     if (!data) {
         oomUnsafe.crash("Failed to allocate WholeCellSet");
         return nullptr;
     }
 
-    if (nursery.approxFreeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
+    if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
         rt->gc.storeBuffer.setAboutToOverflow();
 
     auto cells = static_cast<ArenaCellSet*>(data);
     new (cells) ArenaCellSet(arena);
     arena->bufferedCells = cells;
     rt->gc.storeBuffer.addToWholeCellBuffer(cells);
     return cells;
 }
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -379,17 +379,17 @@ class StoreBuffer
         cancelIonCompilations_(false), runtime_(rt), nursery_(nursery), aboutToOverflow_(false),
         enabled_(false)
 #ifdef DEBUG
         , mEntered(false)
 #endif
     {
     }
 
-    bool enable();
+    MOZ_MUST_USE bool enable();
     void disable();
     bool isEnabled() const { return enabled_; }
 
     void clear();
 
     /* Get the overflowed status. */
     bool isAboutToOverflow() const { return aboutToOverflow_; }
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1026,40 +1026,41 @@ static const uint64_t JIT_SCRIPT_RELEASE
 bool
 GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
 {
     InitMemorySubsystem();
 
     if (!rootsHash.init(256))
         return false;
 
-    /*
-     * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
-     * for default backward API compatibility.
-     */
-    AutoLockGC lock(rt);
-    MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
-    setMaxMallocBytes(maxbytes);
-
-    const char* size = getenv("JSGC_MARK_STACK_LIMIT");
-    if (size)
-        setMarkStackLimit(atoi(size), lock);
-
-    jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
-
-    if (!nursery.init(maxNurseryBytes))
-        return false;
-
-    if (!nursery.isEnabled()) {
-        MOZ_ASSERT(nursery.nurserySize() == 0);
-        ++rt->gc.generationalDisabled;
-    } else {
-        MOZ_ASSERT(nursery.nurserySize() > 0);
-        if (!storeBuffer.enable())
+    {
+        AutoLockGC lock(rt);
+
+        /*
+         * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
+         * for default backward API compatibility.
+         */
+        MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
+        setMaxMallocBytes(maxbytes);
+
+        const char* size = getenv("JSGC_MARK_STACK_LIMIT");
+        if (size)
+            setMarkStackLimit(atoi(size), lock);
+
+        jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
+
+        if (!nursery.init(maxNurseryBytes, lock))
             return false;
+
+        if (!nursery.isEnabled()) {
+            MOZ_ASSERT(nursery.nurserySize() == 0);
+            ++rt->gc.generationalDisabled;
+        } else {
+            MOZ_ASSERT(nursery.nurserySize() > 0);
+        }
     }
 
 #ifdef JS_GC_ZEAL
     const char* zealSpec = getenv("JS_GC_ZEAL");
     if (zealSpec && zealSpec[0] && !parseAndSetZeal(zealSpec))
         return false;
 #endif
 
@@ -6489,30 +6490,27 @@ GCRuntime::minorGC(JS::gcreason::Reason 
 }
 
 void
 GCRuntime::disableGenerationalGC()
 {
     if (isGenerationalGCEnabled()) {
         evictNursery(JS::gcreason::API);
         nursery.disable();
-        storeBuffer.disable();
     }
     ++rt->gc.generationalDisabled;
 }
 
 void
 GCRuntime::enableGenerationalGC()
 {
     MOZ_ASSERT(generationalDisabled > 0);
     --generationalDisabled;
-    if (generationalDisabled == 0) {
+    if (generationalDisabled == 0)
         nursery.enable();
-        storeBuffer.enable();
-    }
 }
 
 bool
 GCRuntime::gcIfRequested()
 {
     // This method returns whether a major GC was performed.
 
     if (minorGCRequested())
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -527,17 +527,16 @@ JSRuntime::addSizeOfIncludingThis(mozill
 
     if (jitRuntime_) {
         jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
         jitRuntime_->backedgeExecAlloc().addSizeOfCode(&rtSizes->code);
     }
 
     rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf);
     rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted();
-    rtSizes->gc.nurseryDecommitted += gc.nursery.sizeOfHeapDecommitted();
     rtSizes->gc.nurseryMallocedBuffers += gc.nursery.sizeOfMallocedBuffers(mallocSizeOf);
     gc.storeBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc);
 }
 
 static bool
 InvokeInterruptCallback(JSContext* cx)
 {
     MOZ_ASSERT(cx->runtime()->requestDepth >= 1);
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -486,26 +486,26 @@ class TypedArrayObjectTemplate : public 
             obj->setIsSharedMemory();
 
         if (buffer) {
             obj->initViewData(buffer->dataPointerEither() + byteOffset);
 
             // If the buffer is for an inline typed object, the data pointer
             // may be in the nursery, so include a barrier to make sure this
             // object is updated if that typed object moves.
-            if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(buffer->dataPointerEither())) {
-                // Shared buffer data should never be nursery-allocated, so
-                // we need to fail here if isSharedMemory.  However, mmap()
-                // can place a SharedArrayRawBuffer up against the bottom end
-                // of the nursery, and a zero-length buffer will erroneously be
+            auto ptr = buffer->dataPointerEither();
+            if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(ptr)) {
+                // Shared buffer data should never be nursery-allocated, so we
+                // need to fail here if isSharedMemory.  However, mmap() can
+                // place a SharedArrayRawBuffer up against the bottom end of a
+                // nursery chunk, and a zero-length buffer will erroneously be
                 // perceived as being inside the nursery; sidestep that.
                 if (isSharedMemory) {
                     MOZ_ASSERT(buffer->byteLength() == 0 &&
-                               cx->runtime()->gc.nursery.start() ==
-                                   buffer->dataPointerEither().unwrapValue());
+                               (uintptr_t(ptr.unwrapValue()) & gc::ChunkMask) == 0);
                 } else {
                     cx->runtime()->gc.storeBuffer.putWholeCell(obj);
                 }
             }
         } else {
             void* data = obj->fixedData(FIXED_DATA_START);
             obj->initPrivate(data);
             memset(data, 0, len * sizeof(NativeType));
--- a/js/xpconnect/src/XPCJSRuntime.cpp
+++ b/js/xpconnect/src/XPCJSRuntime.cpp
@@ -2626,21 +2626,16 @@ ReportJSRuntimeExplicitTreeStats(const J
     // change the leading "explicit/" to "decommitted/".
     nsCString rtPath2(rtPath);
     rtPath2.Replace(0, strlen("explicit"), NS_LITERAL_CSTRING("decommitted"));
     REPORT_GC_BYTES(rtPath2 + NS_LITERAL_CSTRING("gc-heap/decommitted-arenas"),
         rtStats.gcHeapDecommittedArenas,
         "GC arenas in non-empty chunks that is decommitted, i.e. it takes up "
         "address space but no physical memory or swap space.");
 
-    REPORT_BYTES(rtPath2 + NS_LITERAL_CSTRING("runtime/gc/nursery-decommitted"),
-        KIND_NONHEAP, rtStats.runtime.gc.nurseryDecommitted,
-        "Memory allocated to the GC's nursery that is decommitted, i.e. it takes up "
-        "address space but no physical memory or swap space.");
-
     REPORT_GC_BYTES(rtPath + NS_LITERAL_CSTRING("gc-heap/unused-chunks"),
         rtStats.gcHeapUnusedChunks,
         "Empty GC chunks which will soon be released unless claimed for new "
         "allocations.");
 
     REPORT_GC_BYTES(rtPath + NS_LITERAL_CSTRING("gc-heap/unused-arenas"),
         rtStats.gcHeapUnusedArenas,
         "Empty GC arenas within non-empty chunks.");