Bug 988486 - Make more GCRuntime members private and add necessary accessors r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 08 Jul 2014 14:05:24 +0100
changeset 192848 baec82f2baebe1c39db932fd68fa8802ea999045
parent 192847 047082ad146405b19aa566b1ecd5f47e7d1fa823
child 192849 ff2bf4960a4bf5cb4fd224e994b8feaf20f09d61
push id7663
push userkwierso@gmail.com
push dateWed, 09 Jul 2014 03:08:08 +0000
treeherderfx-team@48de6f4f82af [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs988486
milestone33.0a1
Bug 988486 - Make more GCRuntime members private and add necessary accessors r=terrence
js/src/builtin/TestingFunctions.cpp
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/gc/Iteration.cpp
js/src/gc/Nursery.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Statistics.cpp
js/src/gc/Zone.cpp
js/src/jsgc.cpp
js/src/jsgc.h
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -242,29 +242,29 @@ GC(JSContext *cx, unsigned argc, jsval *
                 return false;
         } else if (arg.isObject()) {
             PrepareZoneForGC(UncheckedUnwrap(&arg.toObject())->zone());
             compartment = true;
         }
     }
 
 #ifndef JS_MORE_DETERMINISTIC
-    size_t preBytes = cx->runtime()->gc.bytes;
+    size_t preBytes = cx->runtime()->gc.bytesAllocated();
 #endif
 
     if (compartment)
         PrepareForDebugGC(cx->runtime());
     else
         PrepareForFullGC(cx->runtime());
     GCForReason(cx->runtime(), gcreason::API);
 
     char buf[256] = { '\0' };
 #ifndef JS_MORE_DETERMINISTIC
     JS_snprintf(buf, sizeof(buf), "before %lu, after %lu\n",
-                (unsigned long)preBytes, (unsigned long)cx->runtime()->gc.bytes);
+                (unsigned long)preBytes, (unsigned long)cx->runtime()->gc.bytesAllocated());
 #endif
     JSString *str = JS_NewStringCopyZ(cx, buf);
     if (!str)
         return false;
     args.rval().setString(str);
     return true;
 }
 
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -29,16 +29,53 @@ namespace js {
 namespace gc {
 
 typedef Vector<JS::Zone *, 4, SystemAllocPolicy> ZoneVector;
 
 class MarkingValidator;
 struct AutoPrepareForTracing;
 class AutoTraceSession;
 
+class ChunkPool
+{
+    Chunk   *emptyChunkListHead;
+    size_t  emptyCount;
+
+  public:
+    ChunkPool()
+      : emptyChunkListHead(nullptr),
+        emptyCount(0)
+    {}
+
+    size_t getEmptyCount() const {
+        return emptyCount;
+    }
+
+    /* Must be called with the GC lock taken. */
+    inline Chunk *get(JSRuntime *rt);
+
+    /* Must be called either during the GC or with the GC lock taken. */
+    inline void put(Chunk *chunk);
+
+    /* Must be called with the GC lock taken. */
+    void expireAndFree(JSRuntime *rt, bool releaseAll);
+
+    class Enum {
+      public:
+        Enum(ChunkPool &pool) : pool(pool), chunkp(&pool.emptyChunkListHead) {}
+        bool empty() { return !*chunkp; }
+        Chunk *front();
+        inline void popFront();
+        inline void removeAndPopFront();
+      private:
+        ChunkPool &pool;
+        Chunk **chunkp;
+    };
+};
+
 struct ConservativeGCData
 {
     /*
      * The GC scans conservatively between ThreadData::nativeStackBase and
      * nativeStackTop unless the latter is nullptr.
      */
     uintptr_t           *nativeStackTop;
 
@@ -147,16 +184,20 @@ class GCRuntime
     void verifyPostBarriers();
     void maybeVerifyPreBarriers(bool always);
     void maybeVerifyPostBarriers(bool always);
     bool selectForMarking(JSObject *object);
     void clearSelectedForMarking();
     void setDeterministic(bool enable);
 #endif
 
+    size_t bytesAllocated() { return bytes; }
+    size_t maxBytesAllocated() { return maxBytes; }
+    size_t maxMallocBytesAllocated() { return maxBytes; }
+
   public:
     // Internal public interface
     js::gc::State state() { return incrementalState; }
     void recordNativeStackTop();
 #ifdef JS_THREADSAFE
     void notifyRequestEnd() { conservativeGC.updateForRequestEnd(); }
 #endif
     bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
@@ -286,34 +327,49 @@ class GCRuntime
     size_t allocationThreshold() { return allocThreshold; }
 
     JSGCMode gcMode() const { return mode; }
     void setGCMode(JSGCMode m) {
         mode = m;
         marker.setGCMode(mode);
     }
 
-    inline void updateOnChunkFree(const ChunkInfo &info);
     inline void updateOnFreeArenaAlloc(const ChunkInfo &info);
     inline void updateOnArenaFree(const ChunkInfo &info);
+    inline void updateBytesAllocated(ptrdiff_t size);
+
+    GCChunkSet::Range allChunks() { return chunkSet.all(); }
+    inline Chunk **getAvailableChunkList(Zone *zone);
+    void moveChunkToFreePool(Chunk *chunk);
+    bool hasChunk(Chunk *chunk) { return chunkSet.has(chunk); }
 
 #ifdef JS_GC_ZEAL
     void startVerifyPreBarriers();
     bool endVerifyPreBarriers();
     void startVerifyPostBarriers();
     bool endVerifyPostBarriers();
     void finishVerifier();
 #endif
 
   private:
     // For ArenaLists::allocateFromArenaInline()
     friend class ArenaLists;
     Chunk *pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation);
     inline void arenaAllocatedDuringGC(JS::Zone *zone, ArenaHeader *arena);
 
+    /*
+     * Return the list of chunks that can be released outside the GC lock.
+     * Must be called either during the GC or with the GC lock taken.
+     */
+    Chunk *expireChunkPool(bool releaseAll);
+    void expireAndFreeChunkPool(bool releaseAll);
+    void freeChunkList(Chunk *chunkListHead);
+    void prepareToFreeChunk(ChunkInfo &info);
+    void releaseChunk(Chunk *chunk);
+
     inline bool wantBackgroundAllocation() const;
 
     bool initZeal();
     void requestInterrupt(JS::gcreason::Reason reason);
     bool gcCycle(bool incremental, int64_t budget, JSGCInvocationKind gckind,
                  JS::gcreason::Reason reason);
     void budgetIncrementalGC(int64_t *budget);
     void resetIncrementalGC(const char *reason);
@@ -353,27 +409,37 @@ class GCRuntime
     void markConservativeStackRoots(JSTracer *trc, bool useSavedRoots);
 
 #ifdef DEBUG
     void checkForCompartmentMismatches();
     void markAllWeakReferences(gcstats::Phase phase);
     void markAllGrayReferences();
 #endif
 
-  public:  // Internal state, public for now
+  public:
     JSRuntime             *rt;
 
     /* Embedders can use this zone however they wish. */
     JS::Zone              *systemZone;
 
     /* List of compartments and zones (protected by the GC lock). */
     js::gc::ZoneVector    zones;
 
     js::gc::SystemPageAllocator pageAllocator;
 
+#ifdef JSGC_GENERATIONAL
+    js::Nursery           nursery;
+    js::gc::StoreBuffer   storeBuffer;
+#endif
+
+    js::gcstats::Statistics stats;
+
+    js::GCMarker          marker;
+
+  private:
     /*
      * Set of all GC chunks with at least one allocated thing. The
      * conservative GC uses it to quickly check if a possible GC thing points
      * into an allocated chunk.
      */
     js::GCChunkSet        chunkSet;
 
     /*
@@ -382,34 +448,24 @@ class GCRuntime
      * in the list head are taken, then the chunk is removed from the list.
      * During the GC when all arenas in a chunk become free, that chunk is
      * removed from the list and scheduled for release.
      */
     js::gc::Chunk         *systemAvailableChunkListHead;
     js::gc::Chunk         *userAvailableChunkListHead;
     js::gc::ChunkPool     chunkPool;
 
-#ifdef JSGC_GENERATIONAL
-    js::Nursery           nursery;
-    js::gc::StoreBuffer   storeBuffer;
-#endif
-
-    js::gcstats::Statistics stats;
-
-    js::GCMarker          marker;
-
     js::RootedValueMap    rootsHash;
 
     /* This is updated by both the main and GC helper threads. */
     mozilla::Atomic<size_t, mozilla::ReleaseAcquire>   bytes;
 
     size_t                maxBytes;
     size_t                maxMallocBytes;
 
-  private:
     /*
      * Number of the committed arenas in all GC chunks including empty chunks.
      */
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>   numArenasFreeCommitted;
     void                  *verifyPreData;
     void                  *verifyPostData;
     bool                  chunkAllocationSinceLastGC;
     int64_t               nextFullGCTime;
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -829,23 +829,16 @@ struct Chunk
 
     void releaseArena(ArenaHeader *aheader);
     void recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind);
 
     static Chunk *allocate(JSRuntime *rt);
 
     void decommitAllArenas(JSRuntime *rt);
 
-    /* Must be called with the GC lock taken. */
-    static inline void release(JSRuntime *rt, Chunk *chunk);
-    static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
-
-    /* Must be called with the GC lock taken. */
-    inline void prepareToBeFreed(JSRuntime *rt);
-
     /*
      * Assuming that the info.prevp points to the next field of the previous
      * chunk in a doubly-linked list, get that chunk.
      */
     Chunk *getPrevious() {
         JS_ASSERT(info.prevp);
         return fromPointerToNext(info.prevp);
     }
--- a/js/src/gc/Iteration.cpp
+++ b/js/src/gc/Iteration.cpp
@@ -80,17 +80,17 @@ js::IterateZoneCompartmentsArenasCells(J
                                    compartmentCallback, arenaCallback, cellCallback);
 }
 
 void
 js::IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback)
 {
     AutoPrepareForTracing prep(rt, SkipAtoms);
 
-    for (js::GCChunkSet::Range r = rt->gc.chunkSet.all(); !r.empty(); r.popFront())
+    for (js::GCChunkSet::Range r = rt->gc.allChunks(); !r.empty(); r.popFront())
         chunkCallback(rt, data, r.front());
 }
 
 void
 js::IterateScripts(JSRuntime *rt, JSCompartment *compartment,
                    void *data, IterateScriptCallback scriptCallback)
 {
     MinorGC(rt, JS::gcreason::EVICT_NURSERY);
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -879,17 +879,17 @@ js::Nursery::collect(JSRuntime *rt, JS::
 
     TIME_START(clearStoreBuffer);
     rt->gc.storeBuffer.clear();
     TIME_END(clearStoreBuffer);
 
     // We ignore gcMaxBytes when allocating for minor collection. However, if we
     // overflowed, we disable the nursery. The next time we allocate, we'll fail
     // because gcBytes >= gcMaxBytes.
-    if (rt->gc.bytes >= rt->gc.maxBytes)
+    if (rt->gc.bytesAllocated() >= rt->gc.maxBytesAllocated())
         disable();
 
     TIME_END(total);
 
     TraceMinorGCEnd();
 
 #ifdef PROFILE_NURSERY
     int64_t totalTime = TIME_TOTAL(total);
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -182,17 +182,17 @@ IsAddressableGCThing(JSRuntime *rt, uint
 #if JS_BITS_PER_WORD == 32
     uintptr_t addr = w & JSID_PAYLOAD_MASK;
 #elif JS_BITS_PER_WORD == 64
     uintptr_t addr = w & JSID_PAYLOAD_MASK & JSVAL_PAYLOAD_MASK;
 #endif
 
     Chunk *chunk = Chunk::fromAddress(addr);
 
-    if (!rt->gc.chunkSet.has(chunk))
+    if (!rt->gc.hasChunk(chunk))
         return CGCT_NOTCHUNK;
 
     /*
      * We query for pointers outside the arena array after checking for an
      * allocated chunk. Such pointers are rare and we want to reject them
      * after doing more likely rejections.
      */
     if (!Chunk::withinArenasRange(addr))
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -526,17 +526,17 @@ Statistics::beginGC()
 {
     PodArrayZero(phaseStartTimes);
     PodArrayZero(phaseTimes);
 
     slices.clearAndFree();
     sccTimes.clearAndFree();
     nonincrementalReason = nullptr;
 
-    preBytes = runtime->gc.bytes;
+    preBytes = runtime->gc.bytesAllocated();
 }
 
 void
 Statistics::endGC()
 {
     crash::SnapshotGCStack();
 
     for (int i = 0; i < PHASE_LIMIT; i++)
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -44,17 +44,17 @@ JS::Zone::Zone(JSRuntime *rt)
     gcScheduled_(false),
     gcPreserveCode_(false),
     ionUsingBarriers_(false)
 {
     /* Ensure that there are no vtables to mess us up here. */
     JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) ==
               static_cast<JS::shadow::Zone *>(this));
 
-    setGCMaxMallocBytes(rt->gc.maxMallocBytes * 0.9);
+    setGCMaxMallocBytes(rt->gc.maxMallocBytesAllocated() * 0.9);
 }
 
 Zone::~Zone()
 {
     JSRuntime *rt = runtimeFromMainThread();
     if (this == rt->gc.systemZone)
         rt->gc.systemZone = nullptr;
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -651,105 +651,118 @@ inline void
 ChunkPool::put(Chunk *chunk)
 {
     chunk->info.age = 0;
     chunk->info.next = emptyChunkListHead;
     emptyChunkListHead = chunk;
     emptyCount++;
 }
 
+inline Chunk *
+ChunkPool::Enum::front()
+{
+    Chunk *chunk = *chunkp;
+    JS_ASSERT_IF(chunk, pool.getEmptyCount() != 0);
+    return chunk;
+}
+
+inline void
+ChunkPool::Enum::popFront()
+{
+    JS_ASSERT(!empty());
+    chunkp = &front()->info.next;
+}
+
+inline void
+ChunkPool::Enum::removeAndPopFront()
+{
+    JS_ASSERT(!empty());
+    *chunkp = front()->info.next;
+    --pool.emptyCount;
+}
+
 /* Must be called either during the GC or with the GC lock taken. */
 Chunk *
-ChunkPool::expire(JSRuntime *rt, bool releaseAll)
-{
-    JS_ASSERT(this == &rt->gc.chunkPool);
-
+GCRuntime::expireChunkPool(bool releaseAll)
+{
     /*
      * Return old empty chunks to the system while preserving the order of
      * other chunks in the list. This way, if the GC runs several times
      * without emptying the list, the older chunks will stay at the tail
      * and are more likely to reach the max age.
      */
     Chunk *freeList = nullptr;
     int freeChunkCount = 0;
-    for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) {
-        JS_ASSERT(emptyCount);
-        Chunk *chunk = *chunkp;
+    for (ChunkPool::Enum e(chunkPool); !e.empty(); ) {
+        Chunk *chunk = e.front();
         JS_ASSERT(chunk->unused());
-        JS_ASSERT(!rt->gc.chunkSet.has(chunk));
+        JS_ASSERT(!chunkSet.has(chunk));
         JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE);
         if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE ||
             freeChunkCount++ > MAX_EMPTY_CHUNK_COUNT)
         {
-            *chunkp = chunk->info.next;
-            --emptyCount;
-            chunk->prepareToBeFreed(rt);
+            e.removeAndPopFront();
+            prepareToFreeChunk(chunk->info);
             chunk->info.next = freeList;
             freeList = chunk;
         } else {
             /* Keep the chunk but increase its age. */
             ++chunk->info.age;
-            chunkp = &chunk->info.next;
+            e.popFront();
         }
     }
-    JS_ASSERT_IF(releaseAll, !emptyCount);
+    JS_ASSERT_IF(releaseAll, chunkPool.getEmptyCount() == 0);
     return freeList;
 }
 
-static void
-FreeChunkList(JSRuntime *rt, Chunk *chunkListHead)
+void
+GCRuntime::freeChunkList(Chunk *chunkListHead)
 {
     while (Chunk *chunk = chunkListHead) {
         JS_ASSERT(!chunk->info.numArenasFreeCommitted);
         chunkListHead = chunk->info.next;
         FreeChunk(rt, chunk);
     }
 }
 
 void
-ChunkPool::expireAndFree(JSRuntime *rt, bool releaseAll)
-{
-    FreeChunkList(rt, expire(rt, releaseAll));
+GCRuntime::expireAndFreeChunkPool(bool releaseAll)
+{
+    freeChunkList(expireChunkPool(releaseAll));
 }
 
 /* static */ Chunk *
 Chunk::allocate(JSRuntime *rt)
 {
     Chunk *chunk = AllocChunk(rt);
     if (!chunk)
         return nullptr;
     chunk->init(rt);
     rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
     return chunk;
 }
 
 /* Must be called with the GC lock taken. */
-/* static */ inline void
-Chunk::release(JSRuntime *rt, Chunk *chunk)
+inline void
+GCRuntime::releaseChunk(Chunk *chunk)
 {
     JS_ASSERT(chunk);
-    chunk->prepareToBeFreed(rt);
+    prepareToFreeChunk(chunk->info);
     FreeChunk(rt, chunk);
 }
 
 inline void
-GCRuntime::updateOnChunkFree(const ChunkInfo &info)
+GCRuntime::prepareToFreeChunk(ChunkInfo &info)
 {
     JS_ASSERT(numArenasFreeCommitted >= info.numArenasFreeCommitted);
     numArenasFreeCommitted -= info.numArenasFreeCommitted;
     stats.count(gcstats::STAT_DESTROY_CHUNK);
-}
-
-inline void
-Chunk::prepareToBeFreed(JSRuntime *rt)
-{
-    rt->gc.updateOnChunkFree(info);
 #ifdef DEBUG
     /*
-     * Let FreeChunkList detect a missing prepareToBeFreed call before it
+     * Let FreeChunkList detect a missing prepareToFreeChunk call before it
      * frees chunk.
      */
     info.numArenasFreeCommitted = 0;
 #endif
 }
 
 void Chunk::decommitAllArenas(JSRuntime *rt)
 {
@@ -783,29 +796,29 @@ Chunk::init(JSRuntime *rt)
     info.age = 0;
     info.trailer.storeBuffer = nullptr;
     info.trailer.location = ChunkLocationBitTenuredHeap;
     info.trailer.runtime = rt;
 
     /* The rest of info fields are initialized in pickChunk. */
 }
 
-static inline Chunk **
-GetAvailableChunkList(Zone *zone)
-{
-    JSRuntime *rt = zone->runtimeFromAnyThread();
+inline Chunk **
+GCRuntime::getAvailableChunkList(Zone *zone)
+{
     return zone->isSystem
-           ? &rt->gc.systemAvailableChunkListHead
-           : &rt->gc.userAvailableChunkListHead;
+           ? &systemAvailableChunkListHead
+           : &userAvailableChunkListHead;
 }
 
 inline void
 Chunk::addToAvailableList(Zone *zone)
 {
-    insertToAvailableList(GetAvailableChunkList(zone));
+    JSRuntime *rt = zone->runtimeFromAnyThread();
+    insertToAvailableList(rt->gc.getAvailableChunkList(zone));
 }
 
 inline void
 Chunk::insertToAvailableList(Chunk **insertPoint)
 {
     JS_ASSERT(hasAvailableArenas());
     JS_ASSERT(!info.prevp);
     JS_ASSERT(!info.next);
@@ -886,23 +899,30 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
     info.freeArenasHead = aheader->next;
     --info.numArenasFreeCommitted;
     --info.numArenasFree;
     rt->gc.updateOnFreeArenaAlloc(info);
 
     return aheader;
 }
 
+void
+GCRuntime::updateBytesAllocated(ptrdiff_t size)
+{
+    JS_ASSERT_IF(size < 0, bytes >= size_t(-size));
+    bytes += size;
+}
+
 ArenaHeader *
 Chunk::allocateArena(Zone *zone, AllocKind thingKind)
 {
     JS_ASSERT(hasAvailableArenas());
 
     JSRuntime *rt = zone->runtimeFromAnyThread();
-    if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes) {
+    if (!rt->isHeapMinorCollecting() && rt->gc.bytesAllocated() >= rt->gc.maxBytesAllocated()) {
 #ifdef JSGC_FJGENERATIONAL
         // This is an approximation to the best test, which would check that
         // this thread is currently promoting into the tenured area.  I doubt
         // the better test would make much difference.
         if (!rt->isFJMinorCollecting())
             return nullptr;
 #else
         return nullptr;
@@ -911,17 +931,17 @@ Chunk::allocateArena(Zone *zone, AllocKi
 
     ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0)
                            ? fetchNextFreeArena(rt)
                            : fetchNextDecommittedArena();
     aheader->init(zone, thingKind);
     if (MOZ_UNLIKELY(!hasAvailableArenas()))
         removeFromAvailableList();
 
-    rt->gc.bytes += ArenaSize;
+    rt->gc.updateBytesAllocated(ArenaSize);
     zone->gcBytes += ArenaSize;
 
     if (zone->gcBytes >= zone->gcTriggerBytes) {
         AutoUnlockGC unlock(rt);
         TriggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
     }
 
     return aheader;
@@ -957,39 +977,47 @@ Chunk::releaseArena(ArenaHeader *aheader
     JS_ASSERT(aheader->allocated());
     JS_ASSERT(!aheader->hasDelayedMarking);
     Zone *zone = aheader->zone;
     JSRuntime *rt = zone->runtimeFromAnyThread();
     AutoLockGC maybeLock;
     if (rt->gc.isBackgroundSweeping())
         maybeLock.lock(rt);
 
-    JS_ASSERT(rt->gc.bytes >= ArenaSize);
+    JS_ASSERT(rt->gc.bytesAllocated() >= ArenaSize);
     JS_ASSERT(zone->gcBytes >= ArenaSize);
     if (rt->gc.isBackgroundSweeping())
         zone->gcBytesAfterGC -= ArenaSize;
-    rt->gc.bytes -= ArenaSize;
+    rt->gc.updateBytesAllocated(-ArenaSize);
     zone->gcBytes -= ArenaSize;
 
     aheader->setAsNotAllocated();
     addArenaToFreeList(rt, aheader);
 
     if (info.numArenasFree == 1) {
         JS_ASSERT(!info.prevp);
         JS_ASSERT(!info.next);
         addToAvailableList(zone);
     } else if (!unused()) {
         JS_ASSERT(info.prevp);
     } else {
-        rt->gc.chunkSet.remove(this);
+        JS_ASSERT(unused());
         removeFromAvailableList();
-        JS_ASSERT(info.numArenasFree == ArenasPerChunk);
         decommitAllArenas(rt);
-        rt->gc.chunkPool.put(this);
-    }
+        rt->gc.moveChunkToFreePool(this);
+    }
+}
+
+void
+GCRuntime::moveChunkToFreePool(Chunk *chunk)
+{
+    JS_ASSERT(chunk->unused());
+    JS_ASSERT(chunkSet.has(chunk));
+    chunkSet.remove(chunk);
+    chunkPool.put(chunk);
 }
 
 inline bool
 GCRuntime::wantBackgroundAllocation() const
 {
     /*
      * To minimize memory waste we do not want to run the background chunk
      * allocation if we have empty chunks or when the runtime needs just few
@@ -1025,17 +1053,17 @@ class js::gc::AutoMaybeStartBackgroundAl
         }
     }
 };
 
 /* The caller must hold the GC lock. */
 Chunk *
 GCRuntime::pickChunk(Zone *zone, AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
 {
-    Chunk **listHeadp = GetAvailableChunkList(zone);
+    Chunk **listHeadp = getAvailableChunkList(zone);
     Chunk *chunk = *listHeadp;
     if (chunk)
         return chunk;
 
     chunk = chunkPool.get(rt);
     if (!chunk) {
         chunk = Chunk::allocate(rt);
         if (!chunk)
@@ -1053,38 +1081,38 @@ GCRuntime::pickChunk(Zone *zone, AutoMay
 
     /*
      * FIXME bug 583732 - chunk is newly allocated and cannot be present in
      * the table so using ordinary lookupForAdd is suboptimal here.
      */
     GCChunkSet::AddPtr p = chunkSet.lookupForAdd(chunk);
     JS_ASSERT(!p);
     if (!chunkSet.add(p, chunk)) {
-        Chunk::release(rt, chunk);
+        releaseChunk(chunk);
         return nullptr;
     }
 
     chunk->info.prevp = nullptr;
     chunk->info.next = nullptr;
     chunk->addToAvailableList(zone);
 
     return chunk;
 }
 
 GCRuntime::GCRuntime(JSRuntime *rt) :
     rt(rt),
     systemZone(nullptr),
-    systemAvailableChunkListHead(nullptr),
-    userAvailableChunkListHead(nullptr),
 #ifdef JSGC_GENERATIONAL
     nursery(rt),
     storeBuffer(rt, nursery),
 #endif
     stats(rt),
     marker(rt),
+    systemAvailableChunkListHead(nullptr),
+    userAvailableChunkListHead(nullptr),
     bytes(0),
     maxBytes(0),
     maxMallocBytes(0),
     numArenasFreeCommitted(0),
     verifyPreData(nullptr),
     verifyPostData(nullptr),
     chunkAllocationSinceLastGC(false),
     nextFullGCTime(0),
@@ -1323,21 +1351,21 @@ GCRuntime::finish()
     }
 
     zones.clear();
 
     systemAvailableChunkListHead = nullptr;
     userAvailableChunkListHead = nullptr;
     if (chunkSet.initialized()) {
         for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
-            Chunk::release(rt, r.front());
+            releaseChunk(r.front());
         chunkSet.clear();
     }
 
-    chunkPool.expireAndFree(rt, true);
+    expireAndFreeChunkPool(true);
 
     if (rootsHash.initialized())
         rootsHash.clear();
 
     FinishPersistentRootedChains(rt);
 
 #ifdef JS_THREADSAFE
     if (lock) {
@@ -2519,19 +2547,19 @@ GCRuntime::decommitArenas()
 /* Must be called with the GC lock taken. */
 void
 GCRuntime::expireChunksAndArenas(bool shouldShrink)
 {
 #ifdef JSGC_FJGENERATIONAL
     rt->threadPool.pruneChunkCache();
 #endif
 
-    if (Chunk *toFree = chunkPool.expire(rt, shouldShrink)) {
+    if (Chunk *toFree = expireChunkPool(shouldShrink)) {
         AutoUnlockGC unlock(rt);
-        FreeChunkList(rt, toFree);
+        freeChunkList(toFree);
     }
 
     if (shouldShrink)
         decommitArenas();
 }
 
 void
 GCRuntime::sweepBackgroundThings(bool onBackgroundThread)
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -68,45 +68,16 @@ namespace gc {
 enum State {
     NO_INCREMENTAL,
     MARK_ROOTS,
     MARK,
     SWEEP,
     INVALID
 };
 
-class ChunkPool {
-    Chunk   *emptyChunkListHead;
-    size_t  emptyCount;
-
-  public:
-    ChunkPool()
-      : emptyChunkListHead(nullptr),
-        emptyCount(0) { }
-
-    size_t getEmptyCount() const {
-        return emptyCount;
-    }
-
-    /* Must be called with the GC lock taken. */
-    inline Chunk *get(JSRuntime *rt);
-
-    /* Must be called either during the GC or with the GC lock taken. */
-    inline void put(Chunk *chunk);
-
-    /*
-     * Return the list of chunks that can be released outside the GC lock.
-     * Must be called either during the GC or with the GC lock taken.
-     */
-    Chunk *expire(JSRuntime *rt, bool releaseAll);
-
-    /* Must be called with the GC lock taken. */
-    void expireAndFree(JSRuntime *rt, bool releaseAll);
-};
-
 static inline JSGCTraceKind
 MapAllocToTraceKind(AllocKind kind)
 {
     static const JSGCTraceKind map[] = {
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT0 */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT0_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT2 */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT2_BACKGROUND */