Bug 1272449 - Finish refactoring allocator code; r=jonco
authorTerrence Cole <terrence@mozilla.com>
Wed, 18 May 2016 10:57:22 -0700
changeset 300256 00d30d448554e802975a76a9572911b902a1cb7f
parent 300255 b4555556428ee4ded0b94eaa43997f20098f9e63
child 300257 7916d8bb58d701fa5b315d2f4794a7b243eebeef
push id19512
push usercbook@mozilla.com
push dateFri, 03 Jun 2016 10:40:46 +0000
treeherderfx-team@0ef7cc6b42c7 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjonco
bugs1272449
milestone49.0a1
Bug 1272449 - Finish refactoring allocator code; r=jonco
js/src/gc/Allocator.cpp
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/gc/Marking.cpp
js/src/jsgc.cpp
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -12,95 +12,18 @@
 #include "gc/GCTrace.h"
 #include "gc/Nursery.h"
 #include "jit/JitCompartment.h"
 #include "vm/Runtime.h"
 #include "vm/String.h"
 
 #include "jsobjinlines.h"
 
-using namespace js;
 using namespace gc;
 
-bool
-GCRuntime::gcIfNeededPerAllocation(JSContext* cx)
-{
-#ifdef JS_GC_ZEAL
-    if (needZealousGC())
-        runDebugGC();
-#endif
-
-    // Invoking the interrupt callback can fail and we can't usefully
-    // handle that here. Just check in case we need to collect instead.
-    if (rt->hasPendingInterrupt())
-        gcIfRequested(cx);
-
-    // If we have grown past our GC heap threshold while in the middle of
-    // an incremental GC, we're growing faster than we're GCing, so stop
-    // the world and do a full, non-incremental GC right now, if possible.
-    if (isIncrementalGCInProgress() &&
-        cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
-    {
-        PrepareZoneForGC(cx->zone());
-        AutoKeepAtoms keepAtoms(cx->perThreadData);
-        gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
-    }
-
-    return true;
-}
-
-template <AllowGC allowGC>
-bool
-GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind)
-{
-    if (allowGC) {
-        if (!gcIfNeededPerAllocation(cx))
-            return false;
-    }
-
-#if defined(JS_GC_ZEAL) || defined(DEBUG)
-    MOZ_ASSERT_IF(rt->isAtomsCompartment(cx->compartment()),
-                  kind == AllocKind::STRING ||
-                  kind == AllocKind::FAT_INLINE_STRING ||
-                  kind == AllocKind::SYMBOL ||
-                  kind == AllocKind::JITCODE);
-    MOZ_ASSERT(!rt->isHeapBusy());
-    MOZ_ASSERT(isAllocAllowed());
-#endif
-
-    // Crash if we perform a GC action when it is not safe.
-    if (allowGC && !rt->mainThread.suppressGC)
-        JS::AutoAssertOnGC::VerifyIsSafeToGC(rt);
-
-    // For testing out of memory conditions
-    if (js::oom::ShouldFailWithOOM()) {
-        // If we are doing a fallible allocation, percolate up the OOM
-        // instead of reporting it.
-        if (allowGC)
-            ReportOutOfMemory(cx);
-        return false;
-    }
-
-    return true;
-}
-
-template <typename T>
-/* static */ void
-GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
-{
-#ifdef DEBUG
-    if (!cx->isJSContext())
-        return;
-
-    Zone* zone = cx->asJSContext()->zone();
-    MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
-                  t->asTenured().arena()->allocatedDuringIncremental);
-#endif
-}
-
 template <typename T, AllowGC allowGC /* = CanGC */>
 JSObject*
 js::Allocate(ExclusiveContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
              const Class* clasp)
 {
     static_assert(mozilla::IsConvertible<T*, JSObject*>::value, "must be JSObject derived");
     MOZ_ASSERT(IsObjectAllocKind(kind));
     size_t thingSize = Arena::thingSize(kind);
@@ -251,40 +174,156 @@ GCRuntime::tryNewTenuredThing(ExclusiveC
         }
     }
 
     checkIncrementalZoneState(cx, t);
     TraceTenuredAlloc(t, kind);
     return t;
 }
 
-/* static */ void*
+template <AllowGC allowGC>
+bool
+GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind)
+{
+    if (allowGC) {
+        if (!gcIfNeededPerAllocation(cx))
+            return false;
+    }
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+    MOZ_ASSERT_IF(rt->isAtomsCompartment(cx->compartment()),
+                  kind == AllocKind::STRING ||
+                  kind == AllocKind::FAT_INLINE_STRING ||
+                  kind == AllocKind::SYMBOL ||
+                  kind == AllocKind::JITCODE);
+    MOZ_ASSERT(!rt->isHeapBusy());
+    MOZ_ASSERT(isAllocAllowed());
+#endif
+
+    // Crash if we perform a GC action when it is not safe.
+    if (allowGC && !rt->mainThread.suppressGC)
+        JS::AutoAssertOnGC::VerifyIsSafeToGC(rt);
+
+    // For testing out of memory conditions
+    if (js::oom::ShouldFailWithOOM()) {
+        // If we are doing a fallible allocation, percolate up the OOM
+        // instead of reporting it.
+        if (allowGC)
+            ReportOutOfMemory(cx);
+        return false;
+    }
+
+    return true;
+}
+
+bool
+GCRuntime::gcIfNeededPerAllocation(JSContext* cx)
+{
+#ifdef JS_GC_ZEAL
+    if (needZealousGC())
+        runDebugGC();
+#endif
+
+    // Invoking the interrupt callback can fail and we can't usefully
+    // handle that here. Just check in case we need to collect instead.
+    if (rt->hasPendingInterrupt())
+        gcIfRequested(cx);
+
+    // If we have grown past our GC heap threshold while in the middle of
+    // an incremental GC, we're growing faster than we're GCing, so stop
+    // the world and do a full, non-incremental GC right now, if possible.
+    if (isIncrementalGCInProgress() &&
+        cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
+    {
+        PrepareZoneForGC(cx->zone());
+        AutoKeepAtoms keepAtoms(cx->perThreadData);
+        gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
+    }
+
+    return true;
+}
+
+template <typename T>
+/* static */ void
+GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
+{
+#ifdef DEBUG
+    if (!cx->isJSContext())
+        return;
+
+    Zone* zone = cx->asJSContext()->zone();
+    MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
+                  t->asTenured().arena()->allocatedDuringIncremental);
+#endif
+}
+
+
+// ///////////  Arena -> Thing Allocator  //////////////////////////////////////
+
+// After pulling a Chunk out of the empty chunks pool, we want to run the
+// background allocator to refill it. The code that takes Chunks does so under
+// the GC lock. We need to start the background allocation under the helper
+// threads lock. To avoid lock inversion we have to delay the start until after
+// we are outside the GC lock. This class handles that delay automatically.
+class MOZ_RAII js::gc::AutoMaybeStartBackgroundAllocation
+{
+    JSRuntime* runtime;
+
+  public:
+    AutoMaybeStartBackgroundAllocation()
+      : runtime(nullptr)
+    {}
+
+    void tryToStartBackgroundAllocation(JSRuntime* rt) {
+        runtime = rt;
+    }
+
+    ~AutoMaybeStartBackgroundAllocation() {
+        if (runtime)
+            runtime->gc.startBackgroundAllocTaskIfIdle();
+    }
+};
+
+void
+GCRuntime::startBackgroundAllocTaskIfIdle()
+{
+    AutoLockHelperThreadState helperLock;
+    if (allocTask.isRunning())
+        return;
+
+    // Join the previous invocation of the task. This will return immediately
+    // if the thread has never been started.
+    allocTask.joinWithLockHeld();
+    allocTask.startWithLockHeld();
+}
+
+/* static */ TenuredCell*
 GCRuntime::refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind, size_t thingSize)
 {
     cx->arenas()->checkEmptyFreeList(thingKind);
 
     if (cx->isJSContext())
         return refillFreeListFromMainThread(cx->asJSContext(), thingKind, thingSize);
 
     return refillFreeListOffMainThread(cx, thingKind);
 }
 
-/* static */ void*
+/* static */ TenuredCell*
 GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
 {
     ArenaLists *arenas = cx->arenas();
     Zone *zone = cx->zone();
     MOZ_ASSERT(!cx->runtime()->isHeapBusy(), "allocating while under GC");
 
     AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
 
     return arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
 }
 
-/* static */ void*
+/* static */ TenuredCell*
 GCRuntime::refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind)
 {
     ArenaLists* arenas = cx->arenas();
     Zone* zone = cx->zone();
     JSRuntime* rt = zone->runtimeFromAnyThread();
 
     AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
 
@@ -293,16 +332,32 @@ GCRuntime::refillFreeListOffMainThread(E
     // a GC session.
     AutoLockHelperThreadState lock;
     while (rt->isHeapBusy())
         HelperThreadState().wait(GlobalHelperThreadState::PRODUCER);
 
     return arenas->allocateFromArena(zone, thingKind, maybeStartBGAlloc);
 }
 
+/* static */ TenuredCell*
+GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
+{
+    /*
+     * Called by compacting GC to refill a free list while we are in a GC.
+     */
+
+    zone->arenas.checkEmptyFreeList(thingKind);
+    mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
+    MOZ_ASSERT(rt->isHeapCollecting());
+    MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
+
+    AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
+    return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
+}
+
 TenuredCell*
 ArenaLists::allocateFromArena(JS::Zone* zone, AllocKind thingKind,
                               AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc)
 {
     JSRuntime* rt = zone->runtimeFromAnyThread();
     mozilla::Maybe<AutoLockGC> maybeLock;
 
     // See if we can proceed without taking the GC lock.
@@ -360,8 +415,220 @@ GCRuntime::arenaAllocatedDuringGC(JS::Zo
     if (zone->needsIncrementalBarrier()) {
         arena->allocatedDuringIncremental = true;
         marker.delayMarkingArena(arena);
     } else if (zone->isGCSweeping()) {
         arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
         arenasAllocatedDuringSweep = arena;
     }
 }
+
+
+// ///////////  Chunk -> Arena Allocator  //////////////////////////////////////
+
+bool
+GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const
+{
+    // To minimize memory waste, we do not want to run the background chunk
+    // allocation if we already have some empty chunks or when the runtime has
+    // a small heap size (and therefore likely has a small growth rate).
+    return allocTask.enabled() &&
+           emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
+           (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
+}
+
+Arena*
+GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
+{
+    MOZ_ASSERT(chunk->hasAvailableArenas());
+
+    // Fail the allocation if we are over our heap size limits.
+    if (!rt->isHeapMinorCollecting() &&
+        !isHeapCompacting() &&
+        usage.gcBytes() >= tunables.gcMaxBytes())
+    {
+        return nullptr;
+    }
+
+    Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
+    zone->usage.addGCArena();
+
+    // Trigger an incremental slice if needed.
+    if (!rt->isHeapMinorCollecting() && !isHeapCompacting())
+        maybeAllocTriggerZoneGC(zone, lock);
+
+    return arena;
+}
+
+Arena*
+Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
+{
+    Arena* arena = info.numArenasFreeCommitted > 0
+                   ? fetchNextFreeArena(rt)
+                   : fetchNextDecommittedArena();
+    arena->init(zone, thingKind);
+    updateChunkListAfterAlloc(rt, lock);
+    return arena;
+}
+
+inline void
+GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo& info)
+{
+    MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
+    --numArenasFreeCommitted;
+}
+
+Arena*
+Chunk::fetchNextFreeArena(JSRuntime* rt)
+{
+    MOZ_ASSERT(info.numArenasFreeCommitted > 0);
+    MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
+
+    Arena* arena = info.freeArenasHead;
+    info.freeArenasHead = arena->next;
+    --info.numArenasFreeCommitted;
+    --info.numArenasFree;
+    rt->gc.updateOnFreeArenaAlloc(info);
+
+    return arena;
+}
+
+Arena*
+Chunk::fetchNextDecommittedArena()
+{
+    MOZ_ASSERT(info.numArenasFreeCommitted == 0);
+    MOZ_ASSERT(info.numArenasFree > 0);
+
+    unsigned offset = findDecommittedArenaOffset();
+    info.lastDecommittedArenaOffset = offset + 1;
+    --info.numArenasFree;
+    decommittedArenas.unset(offset);
+
+    Arena* arena = &arenas[offset];
+    MarkPagesInUse(arena, ArenaSize);
+    arena->setAsNotAllocated();
+
+    return arena;
+}
+
+/*
+ * Search for and return the next decommitted Arena. Our goal is to keep
+ * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
+ * it to the most recently freed arena when we free, and forcing it to
+ * the last alloc + 1 when we allocate.
+ */
+uint32_t
+Chunk::findDecommittedArenaOffset()
+{
+    /* Note: lastFreeArenaOffset can be past the end of the list. */
+    for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) {
+        if (decommittedArenas.get(i))
+            return i;
+    }
+    for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) {
+        if (decommittedArenas.get(i))
+            return i;
+    }
+    MOZ_CRASH("No decommitted arenas found.");
+}
+
+
+// ///////////  System -> Chunk Allocator  /////////////////////////////////////
+
+Chunk*
+GCRuntime::pickChunk(const AutoLockGC& lock,
+                     AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
+{
+    if (availableChunks(lock).count())
+        return availableChunks(lock).head();
+
+    Chunk* chunk = emptyChunks(lock).pop();
+    if (!chunk) {
+        chunk = Chunk::allocate(rt);
+        if (!chunk)
+            return nullptr;
+        MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+    }
+
+    MOZ_ASSERT(chunk->unused());
+    MOZ_ASSERT(!fullChunks(lock).contains(chunk));
+
+    if (wantBackgroundAllocation(lock))
+        maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
+
+    chunkAllocationSinceLastGC = true;
+
+    availableChunks(lock).push(chunk);
+
+    return chunk;
+}
+
+BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
+  : runtime(rt),
+    chunkPool_(pool),
+    enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
+{
+}
+
+/* virtual */ void
+BackgroundAllocTask::run()
+{
+    TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+    AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
+
+    AutoLockGC lock(runtime);
+    while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
+        Chunk* chunk;
+        {
+            AutoUnlockGC unlock(lock);
+            chunk = Chunk::allocate(runtime);
+            if (!chunk)
+                break;
+        }
+        chunkPool_.push(chunk);
+    }
+}
+
+/* static */ Chunk*
+Chunk::allocate(JSRuntime* rt)
+{
+    Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
+    if (!chunk)
+        return nullptr;
+    chunk->init(rt);
+    rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
+    return chunk;
+}
+
+void
+Chunk::init(JSRuntime* rt)
+{
+    JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
+
+    /*
+     * We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
+     * on uninitialized data, which would happen before the first GC cycle.
+     */
+    bitmap.clear();
+
+    /*
+     * Decommit the arenas. We do this after poisoning so that if the OS does
+     * not have to recycle the pages, we still get the benefit of poisoning.
+     */
+    decommitAllArenas(rt);
+
+    /* Initialize the chunk info. */
+    info.init();
+    new (&info.trailer) ChunkTrailer(rt);
+
+    /* The rest of info fields are initialized in pickChunk. */
+}
+
+void Chunk::decommitAllArenas(JSRuntime* rt)
+{
+    decommittedArenas.clear(true);
+    MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
+
+    info.freeArenasHead = nullptr;
+    info.lastDecommittedArenaOffset = 0;
+    info.numArenasFree = ArenasPerChunk;
+    info.numArenasFreeCommitted = 0;
+}
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -141,36 +141,16 @@ struct MovingTracer : JS::CallbackTracer
         MOZ_ASSERT(!RelocationOverlay::isCellForwarded(thing.asCell()));
     }
 
 #ifdef DEBUG
     TracerKind getTracerKind() const override { return TracerKind::Moving; }
 #endif
 };
 
-class MOZ_RAII AutoMaybeStartBackgroundAllocation
-{
-  private:
-    JSRuntime* runtime;
-
-  public:
-    AutoMaybeStartBackgroundAllocation()
-      : runtime(nullptr)
-    {}
-
-    void tryToStartBackgroundAllocation(JSRuntime* rt) {
-        runtime = rt;
-    }
-
-    ~AutoMaybeStartBackgroundAllocation() {
-        if (runtime)
-            runtime->gc.startBackgroundAllocTaskIfIdle();
-    }
-};
-
 // In debug builds, set/unset the GC sweeping flag for the current thread.
 struct MOZ_RAII AutoSetThreadIsSweeping
 {
 #ifdef DEBUG
     AutoSetThreadIsSweeping()
       : threadData_(js::TlsPerThreadData.get())
     {
         MOZ_ASSERT(!threadData_->gcSweeping);
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -24,16 +24,17 @@ namespace js {
 
 class AutoLockGC;
 class VerifyPreTracer;
 
 namespace gc {
 
 typedef Vector<JS::Zone*, 4, SystemAllocPolicy> ZoneVector;
 
+class AutoMaybeStartBackgroundAllocation;
 class MarkingValidator;
 class AutoTraceSession;
 struct MovingTracer;
 
 class ChunkPool
 {
     Chunk* head_;
     size_t count_;
@@ -870,17 +871,17 @@ class GCRuntime
     template <AllowGC allowGC>
     JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
                                   const Class* clasp);
     template <AllowGC allowGC>
     static JSObject* tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
                                          size_t nDynamicSlots);
     template <typename T, AllowGC allowGC>
     static T* tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize);
-    static void* refillFreeListInGC(Zone* zone, AllocKind thingKind);
+    static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
 
   private:
     enum IncrementalProgress
     {
         NotFinished = 0,
         Finished
     };
 
@@ -892,33 +893,33 @@ class GCRuntime
                      AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
     Arena* allocateArena(Chunk* chunk, Zone* zone, AllocKind kind, const AutoLockGC& lock);
     void arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena);
 
     // Allocator internals
     MOZ_MUST_USE bool gcIfNeededPerAllocation(JSContext* cx);
     template <typename T>
     static void checkIncrementalZoneState(ExclusiveContext* cx, T* t);
-    static void* refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind,
-                                             size_t thingSize);
-    static void* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
-                                              size_t thingSize);
-    static void* refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind);
+    static TenuredCell* refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind,
+                                                    size_t thingSize);
+    static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
+                                                     size_t thingSize);
+    static TenuredCell* refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind);
 
     /*
      * Return the list of chunks that can be released outside the GC lock.
      * Must be called either during the GC or with the GC lock taken.
      */
     ChunkPool expireEmptyChunkPool(bool shrinkBuffers, const AutoLockGC& lock);
     void freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock);
     void prepareToFreeChunk(ChunkInfo& info);
 
     friend class BackgroundAllocTask;
     friend class AutoMaybeStartBackgroundAllocation;
-    inline bool wantBackgroundAllocation(const AutoLockGC& lock) const;
+    bool wantBackgroundAllocation(const AutoLockGC& lock) const;
     void startBackgroundAllocTaskIfIdle();
 
     void requestMajorGC(JS::gcreason::Reason reason);
     SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
     void budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock);
     void resetIncrementalGC(const char* reason, AutoLockForExclusiveAccess& lock);
 
     // Assert if the system state is such that we should never
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -1007,17 +1007,17 @@ struct Chunk
     void addArenaToFreeList(JSRuntime* rt, Arena* arena);
     void addArenaToDecommittedList(JSRuntime* rt, const Arena* arena);
 
     void updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock);
     void updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock);
 
   public:
     /* Unlink and return the freeArenasHead. */
-    inline Arena* fetchNextFreeArena(JSRuntime* rt);
+    Arena* fetchNextFreeArena(JSRuntime* rt);
 };
 
 static_assert(sizeof(Chunk) == ChunkSize,
               "Ensure the hardcoded chunk size definition actually matches the struct.");
 static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
               "The hardcoded API bitmap offset must match the actual offset.");
 static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) +
                                             offsetof(ChunkInfo, trailer) +
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -2151,20 +2151,18 @@ js::TenuringTracer::moveToTenured(JSObje
     MOZ_ASSERT(IsInsideNursery(src));
     MOZ_ASSERT(!src->zone()->usedByExclusiveThread);
 
     AllocKind dstKind = src->allocKindForTenure(nursery());
     Zone* zone = src->zone();
 
     TenuredCell* t = zone->arenas.allocateFromFreeList(dstKind, Arena::thingSize(dstKind));
     if (!t) {
-        zone->arenas.checkEmptyFreeList(dstKind);
-        AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
         AutoEnterOOMUnsafeRegion oomUnsafe;
-        t = zone->arenas.allocateFromArena(zone, dstKind, maybeStartBackgroundAllocation);
+        t = runtime()->gc.refillFreeListInGC(zone, dstKind);
         if (!t)
             oomUnsafe.crash(ChunkSize, "Failed to allocate object while tenuring.");
     }
     JSObject* dst = reinterpret_cast<JSObject*>(t);
     tenuredSize += moveObjectToTenured(dst, src, dstKind);
 
     RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
     overlay->forwardTo(dst);
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -674,147 +674,31 @@ FreeChunkPool(JSRuntime* rt, ChunkPool& 
 }
 
 void
 GCRuntime::freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock)
 {
     FreeChunkPool(rt, emptyChunks(lock));
 }
 
-/* static */ Chunk*
-Chunk::allocate(JSRuntime* rt)
-{
-    Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
-    if (!chunk)
-        return nullptr;
-    chunk->init(rt);
-    rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
-    return chunk;
-}
-
 inline void
 GCRuntime::prepareToFreeChunk(ChunkInfo& info)
 {
     MOZ_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
     numArenasFreeCommitted -= info.numArenasFreeCommitted;
     stats.count(gcstats::STAT_DESTROY_CHUNK);
 #ifdef DEBUG
     /*
      * Let FreeChunkPool detect a missing prepareToFreeChunk call before it
      * frees chunk.
      */
     info.numArenasFreeCommitted = 0;
 #endif
 }
 
-void Chunk::decommitAllArenas(JSRuntime* rt)
-{
-    decommittedArenas.clear(true);
-    MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
-
-    info.freeArenasHead = nullptr;
-    info.lastDecommittedArenaOffset = 0;
-    info.numArenasFree = ArenasPerChunk;
-    info.numArenasFreeCommitted = 0;
-}
-
-void
-Chunk::init(JSRuntime* rt)
-{
-    JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
-
-    /*
-     * We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
-     * on uninitialized data, which would happen before the first GC cycle.
-     */
-    bitmap.clear();
-
-    /*
-     * Decommit the arenas. We do this after poisoning so that if the OS does
-     * not have to recycle the pages, we still get the benefit of poisoning.
-     */
-    decommitAllArenas(rt);
-
-    /* Initialize the chunk info. */
-    info.init();
-    new (&info.trailer) ChunkTrailer(rt);
-
-    /* The rest of info fields are initialized in pickChunk. */
-}
-
-/*
- * Search for and return the next decommitted Arena. Our goal is to keep
- * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
- * it to the most recently freed arena when we free, and forcing it to
- * the last alloc + 1 when we allocate.
- */
-uint32_t
-Chunk::findDecommittedArenaOffset()
-{
-    /* Note: lastFreeArenaOffset can be past the end of the list. */
-    for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++)
-        if (decommittedArenas.get(i))
-            return i;
-    for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++)
-        if (decommittedArenas.get(i))
-            return i;
-    MOZ_CRASH("No decommitted arenas found.");
-}
-
-Arena*
-Chunk::fetchNextDecommittedArena()
-{
-    MOZ_ASSERT(info.numArenasFreeCommitted == 0);
-    MOZ_ASSERT(info.numArenasFree > 0);
-
-    unsigned offset = findDecommittedArenaOffset();
-    info.lastDecommittedArenaOffset = offset + 1;
-    --info.numArenasFree;
-    decommittedArenas.unset(offset);
-
-    Arena* arena = &arenas[offset];
-    MarkPagesInUse(arena, ArenaSize);
-    arena->setAsNotAllocated();
-
-    return arena;
-}
-
-inline void
-GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo& info)
-{
-    MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
-    --numArenasFreeCommitted;
-}
-
-inline Arena*
-Chunk::fetchNextFreeArena(JSRuntime* rt)
-{
-    MOZ_ASSERT(info.numArenasFreeCommitted > 0);
-    MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
-
-    Arena* arena = info.freeArenasHead;
-    info.freeArenasHead = arena->next;
-    --info.numArenasFreeCommitted;
-    --info.numArenasFree;
-    rt->gc.updateOnFreeArenaAlloc(info);
-
-    return arena;
-}
-
-Arena*
-Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
-{
-    Arena* arena = info.numArenasFreeCommitted > 0
-                   ? fetchNextFreeArena(rt)
-                   : fetchNextDecommittedArena();
-    arena->init(zone, thingKind);
-    updateChunkListAfterAlloc(rt, lock);
-    return arena;
-}
-
 inline void
 GCRuntime::updateOnArenaFree(const ChunkInfo& info)
 {
     ++numArenasFreeCommitted;
 }
 
 void
 Chunk::addArenaToFreeList(JSRuntime* rt, Arena* arena)
@@ -910,91 +794,16 @@ Chunk::updateChunkListAfterFree(JSRuntim
     } else {
         MOZ_ASSERT(unused());
         rt->gc.availableChunks(lock).remove(this);
         decommitAllArenas(rt);
         rt->gc.emptyChunks(lock).push(this);
     }
 }
 
-inline bool
-GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const
-{
-    // To minimize memory waste, we do not want to run the background chunk
-    // allocation if we already have some empty chunks or when the runtime has
-    // a small heap size (and therefore likely has a small growth rate).
-    return allocTask.enabled() &&
-           emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
-           (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
-}
-
-void
-GCRuntime::startBackgroundAllocTaskIfIdle()
-{
-    AutoLockHelperThreadState helperLock;
-    if (allocTask.isRunning())
-        return;
-
-    // Join the previous invocation of the task. This will return immediately
-    // if the thread has never been started.
-    allocTask.joinWithLockHeld();
-    allocTask.startWithLockHeld();
-}
-
-Chunk*
-GCRuntime::pickChunk(const AutoLockGC& lock,
-                     AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
-{
-    if (availableChunks(lock).count())
-        return availableChunks(lock).head();
-
-    Chunk* chunk = emptyChunks(lock).pop();
-    if (!chunk) {
-        chunk = Chunk::allocate(rt);
-        if (!chunk)
-            return nullptr;
-        MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
-    }
-
-    MOZ_ASSERT(chunk->unused());
-    MOZ_ASSERT(!fullChunks(lock).contains(chunk));
-
-    if (wantBackgroundAllocation(lock))
-        maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt);
-
-    chunkAllocationSinceLastGC = true;
-
-    availableChunks(lock).push(chunk);
-
-    return chunk;
-}
-
-Arena*
-GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
-{
-    MOZ_ASSERT(chunk->hasAvailableArenas());
-
-    // Fail the allocation if we are over our heap size limits.
-    if (!rt->isHeapMinorCollecting() &&
-        !isHeapCompacting() &&
-        usage.gcBytes() >= tunables.gcMaxBytes())
-    {
-        return nullptr;
-    }
-
-    Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
-    zone->usage.addGCArena();
-
-    // Trigger an incremental slice if needed.
-    if (!rt->isHeapMinorCollecting() && !isHeapCompacting())
-        maybeAllocTriggerZoneGC(zone, lock);
-
-    return arena;
-}
-
 void
 GCRuntime::releaseArena(Arena* arena, const AutoLockGC& lock)
 {
     arena->zone->usage.removeGCArena();
     if (isBackgroundSweeping())
         arena->zone->threshold.updateForRemovedArena(tunables);
     return arena->chunk()->releaseArena(rt, arena, lock);
 }
@@ -3073,32 +2882,16 @@ void
 ArenaLists::queueForegroundThingsForSweep(FreeOp* fop)
 {
     gcShapeArenasToUpdate = arenaListsToSweep[AllocKind::SHAPE];
     gcAccessorShapeArenasToUpdate = arenaListsToSweep[AllocKind::ACCESSOR_SHAPE];
     gcObjectGroupArenasToUpdate = arenaListsToSweep[AllocKind::OBJECT_GROUP];
     gcScriptArenasToUpdate = arenaListsToSweep[AllocKind::SCRIPT];
 }
 
-/* static */ void*
-GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
-{
-    /*
-     * Called by compacting GC to refill a free list while we are in a GC.
-     */
-
-    zone->arenas.checkEmptyFreeList(thingKind);
-    mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
-    MOZ_ASSERT(rt->isHeapMajorCollecting());
-    MOZ_ASSERT(!rt->gc.isBackgroundSweeping());
-
-    AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
-    return zone->arenas.allocateFromArena(zone, thingKind, maybeStartBackgroundAllocation);
-}
-
 SliceBudget::SliceBudget()
   : timeBudget(UnlimitedTimeBudget), workBudget(UnlimitedWorkBudget)
 {
     makeUnlimited();
 }
 
 SliceBudget::SliceBudget(TimeBudget time)
   : timeBudget(time), workBudget(UnlimitedWorkBudget)
@@ -3554,42 +3347,16 @@ GCHelperState::work()
     }
 
     setState(IDLE);
     thread = nullptr;
 
     PR_NotifyAllCondVar(done);
 }
 
-BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
-  : runtime(rt),
-    chunkPool_(pool),
-    enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
-{
-}
-
-/* virtual */ void
-BackgroundAllocTask::run()
-{
-    TraceLoggerThread* logger = TraceLoggerForCurrentThread();
-    AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
-
-    AutoLockGC lock(runtime);
-    while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
-        Chunk* chunk;
-        {
-            AutoUnlockGC unlock(lock);
-            chunk = Chunk::allocate(runtime);
-            if (!chunk)
-                break;
-        }
-        chunkPool_.push(chunk);
-    }
-}
-
 void
 GCRuntime::queueZonesForBackgroundSweep(ZoneList& zones)
 {
     AutoLockHelperThreadState helperLock;
     AutoLockGC lock(rt);
     backgroundSweepZones.transferFrom(zones);
     helperState.maybeStartBackgroundSweep(lock);
 }