Bug 988486 - Convert static functions to methods on GCRuntime r=terrence
☠☠ backed out by 6a8fcbc59766 ☠ ☠
authorJon Coppeard <jcoppeard@mozilla.com>
Fri, 09 May 2014 11:33:01 +0100
changeset 182364 e5edc899d2b2896ea68007eca3659382d2b95ee5
parent 182363 edd0cd3d8750d92753b4ff2471574b0f9fb1c988
child 182365 e0cd88e8e6367b923d7dfc0c93369a074aeb95bd
push id26759
push userryanvm@gmail.com
push dateFri, 09 May 2014 19:51:49 +0000
treeherdermozilla-central@1f4ae9fdfca8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs988486
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 988486 - Convert static functions to methods on GCRuntime r=terrence
js/src/gc/GCRuntime.h
js/src/gc/Nursery.h
js/src/gc/Zone.h
js/src/jsapi.cpp
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jspubtd.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -38,16 +38,17 @@ struct ScriptAndCounts
 
 typedef Vector<ScriptAndCounts, 0, SystemAllocPolicy> ScriptAndCountsVector;
 
 namespace gc {
 
 typedef Vector<JS::Zone *, 4, SystemAllocPolicy> ZoneVector;
 
 class MarkingValidator;
+class AutoPrepareForTracing;
 
 struct ConservativeGCData
 {
     /*
      * The GC scans conservatively between ThreadData::nativeStackBase and
      * nativeStackTop unless the latter is nullptr.
      */
     uintptr_t           *nativeStackTop;
@@ -83,18 +84,87 @@ struct ConservativeGCData
         return !!nativeStackTop;
     }
 };
 
 class GCRuntime
 {
   public:
     GCRuntime(JSRuntime *rt);
+    bool init(uint32_t maxbytes);
+    void finish();
+
+    void setGCZeal(uint8_t zeal, uint32_t frequency);
+    template <typename T> bool addRoot(T *rp, const char *name, JSGCRootType rootType);
+    void removeRoot(void *rp);
+    void setMarkStackLimit(size_t limit);
+
+    bool isHeapBusy() { return heapState != js::Idle; }
+    bool isHeapMajorCollecting() { return heapState == js::MajorCollecting; }
+    bool isHeapMinorCollecting() { return heapState == js::MinorCollecting; }
+    bool isHeapCollecting() { return isHeapMajorCollecting() || isHeapMinorCollecting(); }
+
+    bool triggerGC(JS::gcreason::Reason reason);
+    bool triggerZoneGC(Zone *zone, JS::gcreason::Reason reason);
+    void maybeGC(Zone *zone);
+    void minorGC(JS::gcreason::Reason reason);
+    void minorGC(JSContext *cx, JS::gcreason::Reason reason);
+    void gcIfNeeded(JSContext *cx);
+    void collect(bool incremental, int64_t budget, JSGCInvocationKind gckind,
+                 JS::gcreason::Reason reason);
+    void gcSlice(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis);
+    void runDebugGC();
+
+  private:
+    // For ArenaLists::allocateFromArenaInline()
+    friend class ArenaLists;
+    Chunk *pickChunk(Zone *zone);
+
+    inline bool wantBackgroundAllocation() const;
+
+    bool initGCZeal();
+    void recordNativeStackTopForGC();
+    void requestInterrupt(JS::gcreason::Reason reason);
+    bool gcCycle(bool incremental, int64_t budget, JSGCInvocationKind gckind,
+                 JS::gcreason::Reason reason);
+    void budgetIncrementalGC(int64_t *budget);
+    void resetIncrementalGC(const char *reason);
+    void incrementalCollectSlice(int64_t budget, JS::gcreason::Reason reason,
+                                 JSGCInvocationKind gckind);
+    void pushZealSelectedObjects();
+    bool beginMarkPhase();
+    bool shouldPreserveJITCode(JSCompartment *comp, int64_t currentTime);
+    bool drainMarkStack(SliceBudget &sliceBudget, gcstats::Phase phase);
+    template <class CompartmentIterT> void markWeakReferences(gcstats::Phase phase);
+    void markWeakReferencesInCurrentGroup(gcstats::Phase phase);
+    template <class ZoneIterT, class CompartmentIterT> void markGrayReferences();
+    void markGrayReferencesInCurrentGroup();
+    void beginSweepPhase(bool lastGC);
+    void findZoneGroups();
+    void getNextZoneGroup();
+    void endMarkingZoneGroup();
+    void beginSweepingZoneGroup();
+    bool releaseObservedTypes();
+    void endSweepingZoneGroup();
+    bool sweepPhase(SliceBudget &sliceBudget);
+    void endSweepPhase(JSGCInvocationKind gckind, bool lastGC);
+    void sweepZones(FreeOp *fop, bool lastGC);
+
+    void computeNonIncrementalMarkingForValidation();
+    void validateIncrementalMarking();
+    void finishMarkingValidation();
+
+#ifdef DEBUG
+    void checkForCompartmentMismatches();
+    void markAllWeakReferences(gcstats::Phase phase);
+    void markAllGrayReferences();
+#endif
 
   public:  // Internal state, public for now
+    JSRuntime             *rt;
 
     /* Embedders can use this zone however they wish. */
     JS::Zone              *systemZone;
 
     /* List of compartments and zones (protected by the GC lock). */
     js::gc::ZoneVector    zones;
 
     js::gc::SystemPageAllocator pageAllocator;
@@ -223,17 +293,17 @@ class GCRuntime
     /* Index of current zone group (for stats). */
     unsigned              zoneGroupIndex;
 
     /*
      * Incremental sweep state.
      */
     JS::Zone              *zoneGroups;
     JS::Zone              *currentZoneGroup;
-    int                   sweepPhase;
+    int                   finalizePhase;
     JS::Zone              *sweepZone;
     int                   sweepKindIndex;
     bool                  abortSweepAfterCurrentGroup;
 
     /*
      * List head of arenas allocated during the sweep phase.
      */
     js::gc::ArenaHeader   *arenasAllocatedDuringSweep;
@@ -321,21 +391,21 @@ class GCRuntime
     int                   incrementalLimit;
 
     js::Vector<JSObject *, 0, js::SystemAllocPolicy>   selectedForMarking;
 #endif
 
     bool                  validate;
     bool                  fullCompartmentChecks;
 
-    JSGCCallback          callback;
+    JSGCCallback          gcCallback;
     JS::GCSliceCallback   sliceCallback;
     JSFinalizeCallback    finalizeCallback;
 
-    void                  *callbackData;
+    void                  *gcCallbackData;
 
     /*
      * Malloc counter to measure memory pressure for GC scheduling. It runs
      * from   maxMallocBytes down to zero.
      */
     mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire>   mallocBytes;
 
     /*
@@ -372,19 +442,21 @@ class GCRuntime
 #ifdef DEBUG
     size_t                noGCOrAllocationCheck;
 #endif
 
     /* Synchronize GC heap access between main thread and GCHelperThread. */
     PRLock   *lock;
     mozilla::DebugOnly<PRThread *>   lockOwner;
 
-    friend class js::GCHelperThread;
+    js::GCHelperThread helperThread;
+
+    ConservativeGCData conservativeGC;
 
-    js::GCHelperThread    helperThread;
-
-    ConservativeGCData    conservativeGC;
+    friend class js::GCHelperThread;
+    friend class js::gc::AutoPrepareForTracing; /* For recordNativeStackTopForGC(). */
+    friend class js::gc::MarkingValidator;
 };
 
 } /* namespace gc */
 } /* namespace js */
 
 #endif
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -29,16 +29,17 @@ struct Zone;
 namespace js {
 
 class ObjectElements;
 class HeapSlot;
 void SetGCZeal(JSRuntime *, uint8_t, uint32_t);
 
 namespace gc {
 class Cell;
+class Collector;
 class MinorCollectionTracer;
 } /* namespace gc */
 
 namespace types {
 struct TypeObject;
 }
 
 namespace jit {
@@ -145,16 +146,37 @@ class Nursery
         return ((JS::shadow::Runtime *)runtime_)->gcNurseryStart_;
     }
 
     MOZ_ALWAYS_INLINE uintptr_t heapEnd() const {
         JS_ASSERT(runtime_);
         return ((JS::shadow::Runtime *)runtime_)->gcNurseryEnd_;
     }
 
+#ifdef JS_GC_ZEAL
+    /*
+     * In debug and zeal builds, these bytes indicate the state of an unused
+     * segment of nursery-allocated memory.
+     */
+    void enterZealMode() {
+        if (isEnabled())
+            numActiveChunks_ = NumNurseryChunks;
+    }
+    void leaveZealMode() {
+        if (isEnabled()) {
+            JS_ASSERT(isEmpty());
+            setCurrentChunk(0);
+            currentStart_ = start();
+        }
+    }
+#else
+    void enterZealMode() {}
+    void leaveZealMode() {}
+#endif
+
   private:
     /*
      * The start and end pointers are stored under the runtime so that we can
      * inline the isInsideNursery check into embedder code. Use the start()
      * and heapEnd() functions to access these values.
      */
     JSRuntime *runtime_;
 
@@ -284,38 +306,16 @@ class Nursery
     void sweep(JSRuntime *rt);
 
     /* Change the allocable space provided by the nursery. */
     void growAllocableSpace();
     void shrinkAllocableSpace();
 
     static void MinorGCCallback(JSTracer *trc, void **thingp, JSGCTraceKind kind);
 
-#ifdef JS_GC_ZEAL
-    /*
-     * In debug and zeal builds, these bytes indicate the state of an unused
-     * segment of nursery-allocated memory.
-     */
-    void enterZealMode() {
-        if (isEnabled())
-            numActiveChunks_ = NumNurseryChunks;
-    }
-    void leaveZealMode() {
-        if (isEnabled()) {
-            JS_ASSERT(isEmpty());
-            setCurrentChunk(0);
-            currentStart_ = start();
-        }
-    }
-#else
-    void enterZealMode() {}
-    void leaveZealMode() {}
-#endif
-
     friend class gc::MinorCollectionTracer;
     friend class jit::MacroAssembler;
-    friend void SetGCZeal(JSRuntime *, uint8_t, uint32_t);
 };
 
 } /* namespace js */
 
 #endif /* JSGC_GENERATIONAL */
 #endif /* gc_Nursery_h */
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -97,16 +97,17 @@ namespace JS {
  */
 
 struct Zone : public JS::shadow::Zone,
               public js::gc::GraphNodeBase<JS::Zone>,
               public js::MallocProvider<JS::Zone>
 {
   private:
     friend bool js::CurrentThreadCanAccessZone(Zone *zone);
+    friend class js::gc::GCRuntime;
 
   public:
     js::Allocator                allocator;
 
     js::CompartmentVector        compartments;
 
   private:
     bool                         ionUsingBarriers_;
@@ -316,16 +317,17 @@ struct Zone : public JS::shadow::Zone,
     js::types::TypeZone types;
 
     void sweep(js::FreeOp *fop, bool releaseTypes, bool *oom);
 
     bool hasMarkedCompartments();
 
   private:
     void sweepBreakpoints(js::FreeOp *fop);
+    void sweepCompartments(js::FreeOp *fop, bool keepAtleastOne, bool lastGC);
 
 #ifdef JS_ION
     js::jit::JitZone *jitZone_;
     js::jit::JitZone *createJitZone(JSContext *cx);
 
   public:
     js::jit::JitZone *getJitZone(JSContext *cx) {
         return jitZone_ ? jitZone_ : createJitZone(cx);
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -1902,18 +1902,18 @@ JS_MaybeGC(JSContext *cx)
 {
     MaybeGC(cx);
 }
 
 JS_PUBLIC_API(void)
 JS_SetGCCallback(JSRuntime *rt, JSGCCallback cb, void *data)
 {
     AssertHeapIsIdle(rt);
-    rt->gc.callback = cb;
-    rt->gc.callbackData = data;
+    rt->gc.gcCallback = cb;
+    rt->gc.gcCallbackData = data;
 }
 
 JS_PUBLIC_API(void)
 JS_SetFinalizeCallback(JSRuntime *rt, JSFinalizeCallback cb)
 {
     AssertHeapIsIdle(rt);
     rt->gc.finalizeCallback = cb;
 }
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -601,53 +601,29 @@ AllocChunk(JSRuntime *rt)
 }
 
 static inline void
 FreeChunk(JSRuntime *rt, Chunk *p)
 {
     rt->gc.pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
 }
 
-inline bool
-ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const
-{
-    /*
-     * To minimize memory waste we do not want to run the background chunk
-     * allocation if we have empty chunks or when the runtime needs just few
-     * of them.
-     */
-    return rt->gc.helperThread.canBackgroundAllocate() &&
-           emptyCount == 0 &&
-           rt->gc.chunkSet.count() >= 4;
-}
-
 /* Must be called with the GC lock taken. */
 inline Chunk *
 ChunkPool::get(JSRuntime *rt)
 {
-    JS_ASSERT(this == &rt->gc.chunkPool);
-
     Chunk *chunk = emptyChunkListHead;
-    if (chunk) {
-        JS_ASSERT(emptyCount);
-        emptyChunkListHead = chunk->info.next;
-        --emptyCount;
-    } else {
+    if (!chunk) {
         JS_ASSERT(!emptyCount);
-        chunk = Chunk::allocate(rt);
-        if (!chunk)
-            return nullptr;
-        JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+        return nullptr;
     }
-    JS_ASSERT(chunk->unused());
-    JS_ASSERT(!rt->gc.chunkSet.has(chunk));
-
-    if (wantBackgroundAllocation(rt))
-        rt->gc.helperThread.startBackgroundAllocationIfIdle();
-
+
+    JS_ASSERT(emptyCount);
+    emptyChunkListHead = chunk->info.next;
+    --emptyCount;
     return chunk;
 }
 
 /* Must be called either during the GC or with the GC lock taken. */
 inline void
 ChunkPool::put(Chunk *chunk)
 {
     chunk->info.age = 0;
@@ -956,51 +932,74 @@ Chunk::releaseArena(ArenaHeader *aheader
         rt->gc.chunkSet.remove(this);
         removeFromAvailableList();
         JS_ASSERT(info.numArenasFree == ArenasPerChunk);
         decommitAllArenas(rt);
         rt->gc.chunkPool.put(this);
     }
 }
 
+inline bool
+GCRuntime::wantBackgroundAllocation() const
+{
+    /*
+     * To minimize memory waste we do not want to run the background chunk
+     * allocation if we have empty chunks or when the runtime needs just few
+     * of them.
+     */
+    return helperThread.canBackgroundAllocate() &&
+           chunkPool.getEmptyCount() == 0 &&
+           chunkSet.count() >= 4;
+}
+
 /* The caller must hold the GC lock. */
-static Chunk *
-PickChunk(Zone *zone)
-{
-    JSRuntime *rt = zone->runtimeFromAnyThread();
+Chunk *
+GCRuntime::pickChunk(Zone *zone)
+{
     Chunk **listHeadp = GetAvailableChunkList(zone);
     Chunk *chunk = *listHeadp;
     if (chunk)
         return chunk;
 
-    chunk = rt->gc.chunkPool.get(rt);
-    if (!chunk)
-        return nullptr;
-
-    rt->gc.chunkAllocationSinceLastGC = true;
+    chunk = chunkPool.get(rt);
+    if (!chunk) {
+        chunk = Chunk::allocate(rt);
+        if (!chunk)
+            return nullptr;
+        JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+    }
+
+    JS_ASSERT(chunk->unused());
+    JS_ASSERT(!chunkSet.has(chunk));
+
+    if (wantBackgroundAllocation())
+        helperThread.startBackgroundAllocationIfIdle();
+
+    chunkAllocationSinceLastGC = true;
 
     /*
      * FIXME bug 583732 - chunk is newly allocated and cannot be present in
      * the table so using ordinary lookupForAdd is suboptimal here.
      */
-    GCChunkSet::AddPtr p = rt->gc.chunkSet.lookupForAdd(chunk);
+    GCChunkSet::AddPtr p = chunkSet.lookupForAdd(chunk);
     JS_ASSERT(!p);
-    if (!rt->gc.chunkSet.add(p, chunk)) {
+    if (!chunkSet.add(p, chunk)) {
         Chunk::release(rt, chunk);
         return nullptr;
     }
 
     chunk->info.prevp = nullptr;
     chunk->info.next = nullptr;
     chunk->addToAvailableList(zone);
 
     return chunk;
 }
 
-js::gc::GCRuntime::GCRuntime(JSRuntime *rt) :
+GCRuntime::GCRuntime(JSRuntime *rt) :
+    rt(rt),
     systemZone(nullptr),
     systemAvailableChunkListHead(nullptr),
     userAvailableChunkListHead(nullptr),
     bytes(0),
     maxBytes(0),
     maxMallocBytes(0),
     numArenasFreeCommitted(0),
     marker(rt),
@@ -1036,17 +1035,16 @@ js::gc::GCRuntime::GCRuntime(JSRuntime *
     incrementalState(gc::NO_INCREMENTAL),
     lastMarkSlice(false),
     sweepOnBackgroundThread(false),
     foundBlackGrayEdges(false),
     sweepingZones(nullptr),
     zoneGroupIndex(0),
     zoneGroups(nullptr),
     currentZoneGroup(nullptr),
-    sweepPhase(0),
     sweepZone(nullptr),
     sweepKindIndex(0),
     abortSweepAfterCurrentGroup(false),
     arenasAllocatedDuringSweep(nullptr),
 #ifdef DEBUG
     markingValidator(nullptr),
 #endif
     interFrameGC(0),
@@ -1065,17 +1063,17 @@ js::gc::GCRuntime::GCRuntime(JSRuntime *
     zealMode(0),
     zealFrequency(0),
     nextScheduled(0),
     deterministicOnly(false),
     incrementalLimit(0),
 #endif
     validate(true),
     fullCompartmentChecks(false),
-    callback(nullptr),
+    gcCallback(nullptr),
     sliceCallback(nullptr),
     finalizeCallback(nullptr),
     mallocBytes(0),
     mallocGCTriggered(false),
     scriptAndCountsVector(nullptr),
     alwaysPreserveCode(false),
 #ifdef DEBUG
     noGCOrAllocationCheck(0),
@@ -1086,39 +1084,45 @@ js::gc::GCRuntime::GCRuntime(JSRuntime *
 {
 }
 
 #ifdef JS_GC_ZEAL
 
 extern void
 js::SetGCZeal(JSRuntime *rt, uint8_t zeal, uint32_t frequency)
 {
-    if (rt->gc.verifyPreData)
+    rt->gc.setGCZeal(zeal, frequency);
+}
+
+void
+GCRuntime::setGCZeal(uint8_t zeal, uint32_t frequency)
+{
+    if (verifyPreData)
         VerifyBarriers(rt, PreBarrierVerifier);
-    if (rt->gc.verifyPostData)
+    if (verifyPostData)
         VerifyBarriers(rt, PostBarrierVerifier);
 
 #ifdef JSGC_GENERATIONAL
-    if (rt->gc.zealMode == ZealGenerationalGCValue) {
-        MinorGC(rt, JS::gcreason::DEBUG_GC);
-        rt->gc.nursery.leaveZealMode();
+    if (zealMode == ZealGenerationalGCValue) {
+        minorGC(JS::gcreason::DEBUG_GC);
+        nursery.leaveZealMode();
     }
 
     if (zeal == ZealGenerationalGCValue)
-        rt->gc.nursery.enterZealMode();
+        nursery.enterZealMode();
 #endif
 
     bool schedule = zeal >= js::gc::ZealAllocValue;
-    rt->gc.zealMode = zeal;
-    rt->gc.zealFrequency = frequency;
-    rt->gc.nextScheduled = schedule ? frequency : 0;
-}
-
-static bool
-InitGCZeal(JSRuntime *rt)
+    zealMode = zeal;
+    zealFrequency = frequency;
+    nextScheduled = schedule ? frequency : 0;
+}
+
+bool
+GCRuntime::initGCZeal()
 {
     const char *env = getenv("JS_GC_ZEAL");
     if (!env)
         return true;
 
     int zeal = -1;
     int frequency = JS_DEFAULT_ZEAL_FREQ;
     if (strcmp(env, "help") != 0) {
@@ -1144,148 +1148,170 @@ InitGCZeal(JSRuntime *rt)
                 "  9: Incremental GC in two slices: 1) mark all 2) new marking and finish\n"
                 " 10: Incremental GC in multiple slices\n"
                 " 11: Verify post write barriers between instructions\n"
                 " 12: Verify post write barriers between paints\n"
                 " 13: Purge analysis state every F allocations (default: 100)\n");
         return false;
     }
 
-    SetGCZeal(rt, zeal, frequency);
+    setGCZeal(zeal, frequency);
     return true;
 }
 
 #endif
 
 /* Lifetime for type sets attached to scripts containing observed types. */
 static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
 
 bool
-js_InitGC(JSRuntime *rt, uint32_t maxbytes)
-{
-    if (!rt->gc.chunkSet.init(INITIAL_CHUNK_CAPACITY))
+GCRuntime::init(uint32_t maxbytes)
+{
+    if (!chunkSet.init(INITIAL_CHUNK_CAPACITY))
         return false;
 
-    if (!rt->gc.rootsHash.init(256))
+    if (!rootsHash.init(256))
         return false;
 
-    if (!rt->gc.helperThread.init())
+    if (!helperThread.init())
         return false;
 
     /*
      * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
      * for default backward API compatibility.
      */
-    rt->gc.maxBytes = maxbytes;
+    maxBytes = maxbytes;
     rt->setGCMaxMallocBytes(maxbytes);
 
 #ifndef JS_MORE_DETERMINISTIC
-    rt->gc.jitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
+    jitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
 #endif
 
 #ifdef JSGC_GENERATIONAL
-    if (!rt->gc.nursery.init())
+    if (!nursery.init())
         return false;
 
-    if (!rt->gc.storeBuffer.enable())
+    if (!storeBuffer.enable())
         return false;
 #endif
 
 #ifdef JS_GC_ZEAL
-    if (!InitGCZeal(rt))
+    if (!initGCZeal())
         return false;
 #endif
 
+    if (!marker.init(mode))
+        return false;
+
     return true;
 }
 
-static void
-RecordNativeStackTopForGC(JSRuntime *rt)
-{
-    ConservativeGCData *cgcd = &rt->gc.conservativeGC;
-
+void
+GCRuntime::recordNativeStackTopForGC()
+{
 #ifdef JS_THREADSAFE
     /* Record the stack top here only if we are called from a request. */
     if (!rt->requestDepth)
         return;
 #endif
-    cgcd->recordStackTop();
+    conservativeGC.recordStackTop();
 }
 
 void
-js_FinishGC(JSRuntime *rt)
+GCRuntime::finish()
 {
     /*
      * Wait until the background finalization stops and the helper thread
      * shuts down before we forcefully release any remaining GC memory.
      */
-    rt->gc.helperThread.finish();
+    helperThread.finish();
 
 #ifdef JS_GC_ZEAL
     /* Free memory associated with GC verification. */
     FinishVerifier(rt);
 #endif
 
     /* Delete all remaining zones. */
     if (rt->gcInitialized) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
                 js_delete(comp.get());
             js_delete(zone.get());
         }
     }
 
-    rt->gc.zones.clear();
-
-    rt->gc.systemAvailableChunkListHead = nullptr;
-    rt->gc.userAvailableChunkListHead = nullptr;
-    if (rt->gc.chunkSet.initialized()) {
-        for (GCChunkSet::Range r(rt->gc.chunkSet.all()); !r.empty(); r.popFront())
+    zones.clear();
+
+    systemAvailableChunkListHead = nullptr;
+    userAvailableChunkListHead = nullptr;
+    if (chunkSet.initialized()) {
+        for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
             Chunk::release(rt, r.front());
-        rt->gc.chunkSet.clear();
+        chunkSet.clear();
     }
 
-    rt->gc.chunkPool.expireAndFree(rt, true);
-
-    if (rt->gc.rootsHash.initialized())
-        rt->gc.rootsHash.clear();
-
+    chunkPool.expireAndFree(rt, true);
+
+    if (rootsHash.initialized())
+        rootsHash.clear();
+
+    FinishPersistentRootedChains(rt);
+}
+
+void
+js::gc::FinishPersistentRootedChains(JSRuntime *rt)
+{
+    /* The lists of persistent roots are stored on the shadow runtime. */
     rt->functionPersistentRooteds.clear();
     rt->idPersistentRooteds.clear();
     rt->objectPersistentRooteds.clear();
     rt->scriptPersistentRooteds.clear();
     rt->stringPersistentRooteds.clear();
     rt->valuePersistentRooteds.clear();
 }
 
 template <typename T> struct BarrierOwner {};
 template <typename T> struct BarrierOwner<T *> { typedef T result; };
 template <> struct BarrierOwner<Value> { typedef HeapValue result; };
 
 template <typename T>
-static bool
-AddRoot(JSRuntime *rt, T *rp, const char *name, JSGCRootType rootType)
+bool
+GCRuntime::addRoot(T *rp, const char *name, JSGCRootType rootType)
 {
     /*
      * Sometimes Firefox will hold weak references to objects and then convert
      * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
      * or ModifyBusyCount in workers). We need a read barrier to cover these
      * cases.
      */
     if (rt->gc.incrementalState != NO_INCREMENTAL)
         BarrierOwner<T>::result::writeBarrierPre(*rp);
 
     return rt->gc.rootsHash.put((void *)rp, RootInfo(name, rootType));
 }
 
+void
+GCRuntime::removeRoot(void *rp)
+{
+    rootsHash.remove(rp);
+    poke = true;
+}
+
+template <typename T>
+static bool
+AddRoot(JSRuntime *rt, T *rp, const char *name, JSGCRootType rootType)
+{
+    return rt->gc.addRoot(rp, name, rootType);
+}
+
 template <typename T>
 static bool
 AddRoot(JSContext *cx, T *rp, const char *name, JSGCRootType rootType)
 {
-    bool ok = AddRoot(cx->runtime(), rp, name, rootType);
+    bool ok = cx->runtime()->gc.addRoot(rp, name, rootType);
     if (!ok)
         JS_ReportOutOfMemory(cx);
     return ok;
 }
 
 bool
 js::AddValueRoot(JSContext *cx, Value *vp, const char *name)
 {
@@ -1332,18 +1358,17 @@ extern JS_FRIEND_API(void)
 js::RemoveRawValueRoot(JSContext *cx, Value *vp)
 {
     RemoveRoot(cx->runtime(), vp);
 }
 
 void
 js::RemoveRoot(JSRuntime *rt, void *rp)
 {
-    rt->gc.rootsHash.remove(rp);
-    rt->gc.poke = true;
+    rt->gc.removeRoot(rp);
 }
 
 typedef RootedValueMap::Range RootRange;
 typedef RootedValueMap::Entry RootEntry;
 typedef RootedValueMap::Enum RootEnum;
 
 static size_t
 ComputeTriggerBytes(Zone *zone, size_t lastBytes, size_t maxBytes, JSGCInvocationKind gckind)
@@ -1453,37 +1478,38 @@ inline void *
 ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind)
 {
     /*
      * Parallel JS Note:
      *
      * This function can be called from parallel threads all of which
      * are associated with the same compartment. In that case, each
      * thread will have a distinct ArenaLists.  Therefore, whenever we
-     * fall through to PickChunk() we must be sure that we are holding
+     * fall through to pickChunk() we must be sure that we are holding
      * a lock.
      */
 
     Chunk *chunk = nullptr;
 
     ArenaList *al = &arenaLists[thingKind];
     AutoLockGC maybeLock;
 
 #ifdef JS_THREADSAFE
     volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind];
     if (*bfs != BFS_DONE) {
         /*
          * We cannot search the arena list for free things while background
          * finalization runs and can modify it at any moment. So we always
          * allocate a new arena in that case.
          */
-        maybeLock.lock(zone->runtimeFromAnyThread());
+        JSRuntime *rt = zone->runtimeFromAnyThread(); 
+        maybeLock.lock(rt);
         if (*bfs == BFS_RUN) {
             JS_ASSERT(al->isCursorAtEnd());
-            chunk = PickChunk(zone);
+            chunk = rt->gc.pickChunk(zone);
             if (!chunk) {
                 /*
                  * Let the caller to wait for the background allocation to
                  * finish and restart the allocation attempt.
                  */
                 return nullptr;
             }
         } else if (*bfs == BFS_JUST_FINISHED) {
@@ -1521,19 +1547,20 @@ ArenaLists::allocateFromArenaInline(Zone
                 } else if (zone->isGCSweeping()) {
                     PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader);
                 }
             }
             return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
         }
 
         /* Make sure we hold the GC lock before we call PickChunk. */
+        JSRuntime *rt = zone->runtimeFromAnyThread();
         if (!maybeLock.locked())
-            maybeLock.lock(zone->runtimeFromAnyThread());
-        chunk = PickChunk(zone);
+            maybeLock.lock(rt);
+        chunk = rt->gc.pickChunk(zone);
         if (!chunk)
             return nullptr;
     }
 
     /*
      * While we still hold the GC lock get an arena from some chunk, mark it
      * as full as its single free span is moved to the free lits, and insert
      * it to the list as a fully allocated arena.
@@ -1930,30 +1957,36 @@ SliceBudget::checkOverBudget()
 }
 
 void
 js::MarkCompartmentActive(InterpreterFrame *fp)
 {
     fp->script()->compartment()->zone()->active = true;
 }
 
-static void
-RequestInterrupt(JSRuntime *rt, JS::gcreason::Reason reason)
-{
-    if (rt->gc.isNeeded)
+void
+GCRuntime::requestInterrupt(JS::gcreason::Reason reason)
+{
+    if (isNeeded)
         return;
 
-    rt->gc.isNeeded = true;
-    rt->gc.triggerReason = reason;
+    isNeeded = true;
+    triggerReason = reason;
     rt->requestInterrupt(JSRuntime::RequestInterruptMainThread);
 }
 
 bool
 js::TriggerGC(JSRuntime *rt, JS::gcreason::Reason reason)
 {
+    return rt->gc.triggerGC(reason);
+}
+
+bool
+GCRuntime::triggerGC(JS::gcreason::Reason reason)
+{
     /* Wait till end of parallel section to trigger GC. */
     if (InParallelSection()) {
         ForkJoinContext::current()->requestGC(reason);
         return true;
     }
 
     /* Don't trigger GCs when allocating under the interrupt callback lock. */
     if (rt->currentThreadOwnsInterruptLock())
@@ -1961,106 +1994,118 @@ js::TriggerGC(JSRuntime *rt, JS::gcreaso
 
     JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     /* GC is already running. */
     if (rt->isHeapCollecting())
         return false;
 
     JS::PrepareForFullGC(rt);
-    RequestInterrupt(rt, reason);
+    requestInterrupt(reason);
     return true;
 }
 
 bool
 js::TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
 {
+    return zone->runtimeFromMainThread()->gc.triggerZoneGC(zone,reason);
+}
+
+bool
+GCRuntime::triggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
+{
     /*
      * If parallel threads are running, wait till they
      * are stopped to trigger GC.
      */
     if (InParallelSection()) {
         ForkJoinContext::current()->requestZoneGC(zone, reason);
         return true;
     }
 
     /* Zones in use by a thread with an exclusive context can't be collected. */
     if (zone->usedByExclusiveThread)
         return false;
 
-    JSRuntime *rt = zone->runtimeFromMainThread();
-
     /* Don't trigger GCs when allocating under the interrupt callback lock. */
     if (rt->currentThreadOwnsInterruptLock())
         return false;
 
     /* GC is already running. */
     if (rt->isHeapCollecting())
         return false;
 
-    if (rt->gcZeal() == ZealAllocValue) {
+#ifdef JS_GC_ZEAL
+    if (zealMode == ZealAllocValue) {
         TriggerGC(rt, reason);
         return true;
     }
+#endif
 
     if (rt->isAtomsZone(zone)) {
         /* We can't do a zone GC of the atoms compartment. */
         TriggerGC(rt, reason);
         return true;
     }
 
     PrepareZoneForGC(zone);
-    RequestInterrupt(rt, reason);
+    requestInterrupt(reason);
     return true;
 }
 
 void
 js::MaybeGC(JSContext *cx)
 {
-    JSRuntime *rt = cx->runtime();
+    cx->runtime()->gc.maybeGC(cx->zone());
+}
+
+void
+GCRuntime::maybeGC(Zone *zone)
+{
     JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
-    if (rt->gcZeal() == ZealAllocValue || rt->gcZeal() == ZealPokeValue) {
+#ifdef JS_GC_ZEAL
+    if (zealMode == ZealAllocValue || zealMode == ZealPokeValue) {
         JS::PrepareForFullGC(rt);
         GC(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
         return;
     }
-
-    if (rt->gc.isNeeded) {
+#endif
+
+    if (isNeeded) {
         GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
         return;
     }
 
-    double factor = rt->gc.highFrequencyGC ? 0.85 : 0.9;
-    Zone *zone = cx->zone();
+    double factor = highFrequencyGC ? 0.85 : 0.9;
     if (zone->gcBytes > 1024 * 1024 &&
         zone->gcBytes >= factor * zone->gcTriggerBytes &&
-        rt->gc.incrementalState == NO_INCREMENTAL &&
-        !rt->gc.helperThread.sweeping())
+        incrementalState == NO_INCREMENTAL &&
+        !helperThread.sweeping())
     {
         PrepareZoneForGC(zone);
         GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
         return;
     }
 
 #ifndef JS_MORE_DETERMINISTIC
     /*
      * Access to the counters and, on 32 bit, setting gcNextFullGCTime below
      * is not atomic and a race condition could trigger or suppress the GC. We
      * tolerate this.
      */
     int64_t now = PRMJ_Now();
-    if (rt->gc.nextFullGCTime && rt->gc.nextFullGCTime <= now) {
-        if (rt->gc.chunkAllocationSinceLastGC ||
-            rt->gc.numArenasFreeCommitted > rt->gc.decommitThreshold)
+    if (nextFullGCTime && nextFullGCTime <= now) {
+        if (chunkAllocationSinceLastGC ||
+            numArenasFreeCommitted > decommitThreshold)
         {
             JS::PrepareForFullGC(rt);
             GCSlice(rt, GC_SHRINK, JS::gcreason::MAYBEGC);
         } else {
-            rt->gc.nextFullGCTime = now + GC_IDLE_FULL_SPAN;
+            nextFullGCTime = now + GC_IDLE_FULL_SPAN;
         }
     }
 #endif
 }
 
 static void
 DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
 {
@@ -2372,17 +2417,17 @@ GCHelperThread::threadLoop()
                     chunk = Chunk::allocate(rt);
                 }
 
                 /* OOM stops the background allocation. */
                 if (!chunk)
                     break;
                 JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
                 rt->gc.chunkPool.put(chunk);
-            } while (state == ALLOCATING && rt->gc.chunkPool.wantBackgroundAllocation(rt));
+            } while (state == ALLOCATING && rt->gc.wantBackgroundAllocation());
             if (state == ALLOCATING)
                 state = IDLE;
             break;
           }
           case CANCEL_ALLOCATION:
             state = IDLE;
             PR_NotifyAllCondVar(done);
             break;
@@ -2553,49 +2598,54 @@ GCHelperThread::onBackgroundThread()
 {
 #ifdef JS_THREADSAFE
     return PR_GetCurrentThread() == getThread();
 #else
     return false;
 #endif
 }
 
-static bool
-ReleaseObservedTypes(JSRuntime *rt)
-{
-    bool releaseTypes = rt->gcZeal() != 0;
+bool
+GCRuntime::releaseObservedTypes()
+{
+    bool releaseTypes = false;
+
+#ifdef JS_GC_ZEAL
+    if (zealMode != 0)
+        releaseTypes = true;
+#endif
 
 #ifndef JS_MORE_DETERMINISTIC
     int64_t now = PRMJ_Now();
-    if (now >= rt->gc.jitReleaseTime)
+    if (now >= jitReleaseTime)
         releaseTypes = true;
     if (releaseTypes)
-        rt->gc.jitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
+        jitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
 #endif
 
     return releaseTypes;
 }
 
 /*
  * It's simpler if we preserve the invariant that every zone has at least one
  * compartment. If we know we're deleting the entire zone, then
  * SweepCompartments is allowed to delete all compartments. In this case,
  * |keepAtleastOne| is false. If some objects remain in the zone so that it
  * cannot be deleted, then we set |keepAtleastOne| to true, which prohibits
  * SweepCompartments from deleting every compartment. Instead, it preserves an
  * arbitrary compartment in the zone.
  */
-static void
-SweepCompartments(FreeOp *fop, Zone *zone, bool keepAtleastOne, bool lastGC)
-{
-    JSRuntime *rt = zone->runtimeFromMainThread();
+void
+Zone::sweepCompartments(FreeOp *fop, bool keepAtleastOne, bool lastGC)
+{
+    JSRuntime *rt = runtimeFromMainThread();
     JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
 
-    JSCompartment **read = zone->compartments.begin();
-    JSCompartment **end = zone->compartments.end();
+    JSCompartment **read = compartments.begin();
+    JSCompartment **end = compartments.end();
     JSCompartment **write = read;
     bool foundOne = false;
     while (read < end) {
         JSCompartment *comp = *read++;
         JS_ASSERT(!rt->isAtomsCompartment(comp));
 
         /*
          * Don't delete the last compartment if all the ones before it were
@@ -2608,53 +2658,52 @@ SweepCompartments(FreeOp *fop, Zone *zon
             if (comp->principals)
                 JS_DropPrincipals(rt, comp->principals);
             js_delete(comp);
         } else {
             *write++ = comp;
             foundOne = true;
         }
     }
-    zone->compartments.resize(write - zone->compartments.begin());
-    JS_ASSERT_IF(keepAtleastOne, !zone->compartments.empty());
-}
-
-static void
-SweepZones(FreeOp *fop, bool lastGC)
-{
-    JSRuntime *rt = fop->runtime();
+    compartments.resize(write - compartments.begin());
+    JS_ASSERT_IF(keepAtleastOne, !compartments.empty());
+}
+
+void
+GCRuntime::sweepZones(FreeOp *fop, bool lastGC)
+{
     JSZoneCallback callback = rt->destroyZoneCallback;
 
     /* Skip the atomsCompartment zone. */
-    Zone **read = rt->gc.zones.begin() + 1;
-    Zone **end = rt->gc.zones.end();
+    Zone **read = zones.begin() + 1;
+    Zone **end = zones.end();
     Zone **write = read;
-    JS_ASSERT(rt->gc.zones.length() >= 1);
-    JS_ASSERT(rt->isAtomsZone(rt->gc.zones[0]));
+    JS_ASSERT(zones.length() >= 1);
+    JS_ASSERT(rt->isAtomsZone(zones[0]));
 
     while (read < end) {
         Zone *zone = *read++;
 
         if (zone->wasGCStarted()) {
             if ((zone->allocator.arenas.arenaListsAreEmpty() && !zone->hasMarkedCompartments()) ||
                 lastGC)
             {
                 zone->allocator.arenas.checkEmptyFreeLists();
                 if (callback)
                     callback(zone);
-                SweepCompartments(fop, zone, false, lastGC);
+                zone->sweepCompartments(fop, false, lastGC);
                 JS_ASSERT(zone->compartments.empty());
                 fop->delete_(zone);
                 continue;
             }
-            SweepCompartments(fop, zone, true, lastGC);
+            zone->sweepCompartments(fop, true, lastGC);
         }
         *write++ = zone;
     }
-    rt->gc.zones.resize(write - rt->gc.zones.begin());
+    zones.resize(write - zones.begin());
 }
 
 static void
 PurgeRuntime(JSRuntime *rt)
 {
     for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
         comp->purge();
 
@@ -2667,24 +2716,23 @@ PurgeRuntime(JSRuntime *rt)
     rt->nativeIterCache.purge();
     rt->sourceDataCache.purge();
     rt->evalCache.clear();
 
     if (!rt->hasActiveCompilations())
         rt->parseMapPool().purgeAll();
 }
 
-static bool
-ShouldPreserveJITCode(JSCompartment *comp, int64_t currentTime)
-{
-    JSRuntime *rt = comp->runtimeFromMainThread();
-    if (rt->gc.shouldCleanUpEverything)
+bool
+GCRuntime::shouldPreserveJITCode(JSCompartment *comp, int64_t currentTime)
+{
+    if (shouldCleanUpEverything)
         return false;
 
-    if (rt->gc.alwaysPreserveCode)
+    if (alwaysPreserveCode)
         return true;
     if (comp->lastAnimationTime + PRMJ_USEC_PER_SEC >= currentTime)
         return true;
 
     return false;
 }
 
 #ifdef DEBUG
@@ -2761,20 +2809,20 @@ CheckCompartmentCallback(JSTracer *trcAr
     if (comp && trc->compartment) {
         CheckCompartment(trc, comp, thing, kind);
     } else {
         JS_ASSERT(thing->tenuredZone() == trc->zone ||
                   trc->runtime()->isAtomsZone(thing->tenuredZone()));
     }
 }
 
-static void
-CheckForCompartmentMismatches(JSRuntime *rt)
-{
-    if (rt->gc.disableStrictProxyCheckingCount)
+void
+GCRuntime::checkForCompartmentMismatches()
+{
+    if (disableStrictProxyCheckingCount)
         return;
 
     CompartmentCheckTracer trc(rt, CheckCompartmentCallback);
     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
         trc.zone = zone;
         for (size_t thingKind = 0; thingKind < FINALIZE_LAST; thingKind++) {
             for (ZoneCellIterUnderGC i(zone, AllocKind(thingKind)); !i.done(); i.next()) {
                 trc.src = i.getCell();
@@ -2782,55 +2830,55 @@ CheckForCompartmentMismatches(JSRuntime 
                 trc.compartment = CompartmentOfCell(trc.src, trc.srcKind);
                 JS_TraceChildren(&trc, trc.src, trc.srcKind);
             }
         }
     }
 }
 #endif
 
-static bool
-BeginMarkPhase(JSRuntime *rt)
+bool
+GCRuntime::beginMarkPhase()
 {
     int64_t currentTime = PRMJ_Now();
 
 #ifdef DEBUG
-    if (rt->gc.fullCompartmentChecks)
-        CheckForCompartmentMismatches(rt);
+    if (fullCompartmentChecks)
+        checkForCompartmentMismatches();
 #endif
 
-    rt->gc.isFull = true;
+    isFull = true;
     bool any = false;
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         /* Assert that zone state is as we expect */
         JS_ASSERT(!zone->isCollecting());
         JS_ASSERT(!zone->compartments.empty());
         for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
             JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]);
 
         /* Set up which zones will be collected. */
         if (zone->isGCScheduled()) {
             if (!rt->isAtomsZone(zone)) {
                 any = true;
                 zone->setGCState(Zone::Mark);
             }
         } else {
-            rt->gc.isFull = false;
+            isFull = false;
         }
 
         zone->scheduledForDestruction = false;
         zone->maybeAlive = false;
         zone->setPreservingCode(false);
     }
 
     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
         JS_ASSERT(c->gcLiveArrayBuffers.empty());
         c->marked = false;
-        if (ShouldPreserveJITCode(c, currentTime))
+        if (shouldPreserveJITCode(c, currentTime))
             c->zone()->setPreservingCode(true);
     }
 
     if (!rt->gc.shouldCleanUpEverything) {
 #ifdef JS_ION
         if (JSCompartment *comp = jit::TopmostIonActivationCompartment(rt))
             comp->zone()->setPreservingCode(true);
 #endif
@@ -2841,17 +2889,17 @@ BeginMarkPhase(JSRuntime *rt)
      * zones that are not being collected, we are not allowed to collect
      * atoms. Otherwise, the non-collected zones could contain pointers
      * to atoms that we would miss.
      *
      * keepAtoms() will only change on the main thread, which we are currently
      * on. If the value of keepAtoms() changes between GC slices, then we'll
      * cancel the incremental GC. See IsIncrementalGCSafe.
      */
-    if (rt->gc.isFull && !rt->keepAtoms()) {
+    if (isFull && !rt->keepAtoms()) {
         Zone *atomsZone = rt->atomsCompartment()->zone();
         if (atomsZone->isGCScheduled()) {
             JS_ASSERT(!atomsZone->isCollecting());
             atomsZone->setGCState(Zone::Mark);
             any = true;
         }
     }
 
@@ -2861,72 +2909,72 @@ BeginMarkPhase(JSRuntime *rt)
 
     /*
      * At the end of each incremental slice, we call prepareForIncrementalGC,
      * which marks objects in all arenas that we're currently allocating
      * into. This can cause leaks if unreachable objects are in these
      * arenas. This purge call ensures that we only mark arenas that have had
      * allocations after the incremental GC started.
      */
-    if (rt->gc.isIncremental) {
+    if (isIncremental) {
         for (GCZonesIter zone(rt); !zone.done(); zone.next())
             zone->allocator.arenas.purge();
     }
 
-    rt->gc.marker.start();
-    JS_ASSERT(!rt->gc.marker.callback);
-    JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gc.marker));
+    marker.start();
+    JS_ASSERT(!marker.callback);
+    JS_ASSERT(IS_GC_MARKING_TRACER(&marker));
 
     /* For non-incremental GC the following sweep discards the jit code. */
-    if (rt->gc.isIncremental) {
+    if (isIncremental) {
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_DISCARD_CODE);
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_DISCARD_CODE);
             zone->discardJitCode(rt->defaultFreeOp());
         }
     }
 
-    GCMarker *gcmarker = &rt->gc.marker;
-
-    rt->gc.startNumber = rt->gc.number;
+    GCMarker *gcmarker = &marker;
+
+    startNumber = number;
 
     /*
      * We must purge the runtime at the beginning of an incremental GC. The
      * danger if we purge later is that the snapshot invariant of incremental
      * GC will be broken, as follows. If some object is reachable only through
      * some cache (say the dtoaCache) then it will not be part of the snapshot.
      * If we purge after root marking, then the mutator could obtain a pointer
      * to the object and start using it. This object might never be marked, so
      * a GC hazard would exist.
      */
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_PURGE);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_PURGE);
         PurgeRuntime(rt);
     }
 
     /*
      * Mark phase.
      */
-    gcstats::AutoPhase ap1(rt->gc.stats, gcstats::PHASE_MARK);
-    gcstats::AutoPhase ap2(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
+    gcstats::AutoPhase ap1(stats, gcstats::PHASE_MARK);
+    gcstats::AutoPhase ap2(stats, gcstats::PHASE_MARK_ROOTS);
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         /* Unmark everything in the zones being collected. */
         zone->allocator.arenas.unmarkAll();
     }
 
     for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
         /* Reset weak map list for the compartments being collected. */
         WeakMapBase::resetCompartmentWeakMapList(c);
     }
 
-    if (rt->gc.isFull)
+    if (isFull)
         UnmarkScriptData(rt);
 
     MarkRuntime(gcmarker);
-    if (rt->gc.isIncremental)
+    if (isIncremental)
         BufferGrayRoots(gcmarker);
 
     /*
      * This code ensures that if a zone is "dead", then it will be
      * collected in this GC. A zone is considered dead if its maybeAlive
      * flag is false. The maybeAlive flag is set if:
      *   (1) the zone has incoming cross-compartment edges, or
      *   (2) an object in the zone was marked during root marking, either
@@ -2966,121 +3014,118 @@ BeginMarkPhase(JSRuntime *rt)
      * For black roots, code in gc/Marking.cpp will already have set maybeAlive
      * during MarkRuntime.
      */
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         if (!zone->maybeAlive && !rt->isAtomsZone(zone))
             zone->scheduledForDestruction = true;
     }
-    rt->gc.foundBlackGrayEdges = false;
+    foundBlackGrayEdges = false;
 
     return true;
 }
 
 template <class CompartmentIterT>
-static void
-MarkWeakReferences(JSRuntime *rt, gcstats::Phase phase)
-{
-    GCMarker *gcmarker = &rt->gc.marker;
-    JS_ASSERT(gcmarker->isDrained());
-
-    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_MARK);
-    gcstats::AutoPhase ap1(rt->gc.stats, phase);
+void
+GCRuntime::markWeakReferences(gcstats::Phase phase)
+{
+    JS_ASSERT(marker.isDrained());
+
+    gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_MARK);
+    gcstats::AutoPhase ap1(stats, phase);
 
     for (;;) {
         bool markedAny = false;
         for (CompartmentIterT c(rt); !c.done(); c.next()) {
-            markedAny |= WatchpointMap::markCompartmentIteratively(c, gcmarker);
-            markedAny |= WeakMapBase::markCompartmentIteratively(c, gcmarker);
+            markedAny |= WatchpointMap::markCompartmentIteratively(c, &marker);
+            markedAny |= WeakMapBase::markCompartmentIteratively(c, &marker);
         }
-        markedAny |= Debugger::markAllIteratively(gcmarker);
+        markedAny |= Debugger::markAllIteratively(&marker);
 
         if (!markedAny)
             break;
 
         SliceBudget budget;
-        gcmarker->drainMarkStack(budget);
+        marker.drainMarkStack(budget);
     }
-    JS_ASSERT(gcmarker->isDrained());
-}
-
-static void
-MarkWeakReferencesInCurrentGroup(JSRuntime *rt, gcstats::Phase phase)
-{
-    MarkWeakReferences<GCCompartmentGroupIter>(rt, phase);
+    JS_ASSERT(marker.isDrained());
+}
+
+void
+GCRuntime::markWeakReferencesInCurrentGroup(gcstats::Phase phase)
+{
+    markWeakReferences<GCCompartmentGroupIter>(phase);
 }
 
 template <class ZoneIterT, class CompartmentIterT>
-static void
-MarkGrayReferences(JSRuntime *rt)
-{
-    GCMarker *gcmarker = &rt->gc.marker;
-
+void
+GCRuntime::markGrayReferences()
+{
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_MARK);
-        gcstats::AutoPhase ap1(rt->gc.stats, gcstats::PHASE_SWEEP_MARK_GRAY);
-        gcmarker->setMarkColorGray();
-        if (gcmarker->hasBufferedGrayRoots()) {
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_MARK);
+        gcstats::AutoPhase ap1(stats, gcstats::PHASE_SWEEP_MARK_GRAY);
+        marker.setMarkColorGray();
+        if (marker.hasBufferedGrayRoots()) {
             for (ZoneIterT zone(rt); !zone.done(); zone.next())
-                gcmarker->markBufferedGrayRoots(zone);
+                marker.markBufferedGrayRoots(zone);
         } else {
-            JS_ASSERT(!rt->gc.isIncremental);
-            if (JSTraceDataOp op = rt->gc.grayRootTracer.op)
-                (*op)(gcmarker, rt->gc.grayRootTracer.data);
+            JS_ASSERT(!isIncremental);
+            if (JSTraceDataOp op = grayRootTracer.op)
+                (*op)(&marker, grayRootTracer.data);
         }
         SliceBudget budget;
-        gcmarker->drainMarkStack(budget);
+        marker.drainMarkStack(budget);
     }
 
-    MarkWeakReferences<CompartmentIterT>(rt, gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
-
-    JS_ASSERT(gcmarker->isDrained());
-
-    gcmarker->setMarkColorBlack();
-}
-
-static void
-MarkGrayReferencesInCurrentGroup(JSRuntime *rt)
-{
-    MarkGrayReferences<GCZoneGroupIter, GCCompartmentGroupIter>(rt);
+    markWeakReferences<CompartmentIterT>(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
+
+    JS_ASSERT(marker.isDrained());
+
+    marker.setMarkColorBlack();
+}
+
+void
+GCRuntime::markGrayReferencesInCurrentGroup()
+{
+    markGrayReferences<GCZoneGroupIter, GCCompartmentGroupIter>();
 }
 
 #ifdef DEBUG
 
-static void
-MarkAllWeakReferences(JSRuntime *rt, gcstats::Phase phase)
-{
-    MarkWeakReferences<GCCompartmentsIter>(rt, phase);
-}
-
-static void
-MarkAllGrayReferences(JSRuntime *rt)
-{
-    MarkGrayReferences<GCZonesIter, GCCompartmentsIter>(rt);
+void
+GCRuntime::markAllWeakReferences(gcstats::Phase phase)
+{
+    markWeakReferences<GCCompartmentsIter>(phase);
+}
+
+void
+GCRuntime::markAllGrayReferences()
+{
+    markGrayReferences<GCZonesIter, GCCompartmentsIter>();
 }
 
 class js::gc::MarkingValidator
 {
   public:
-    MarkingValidator(JSRuntime *rt);
+    MarkingValidator(GCRuntime *gc);
     ~MarkingValidator();
     void nonIncrementalMark();
     void validate();
 
   private:
-    JSRuntime *runtime;
+    GCRuntime *gc;
     bool initialized;
 
     typedef HashMap<Chunk *, ChunkBitmap *, GCChunkHasher, SystemAllocPolicy> BitmapMap;
     BitmapMap map;
 };
 
-js::gc::MarkingValidator::MarkingValidator(JSRuntime *rt)
-  : runtime(rt),
+js::gc::MarkingValidator::MarkingValidator(GCRuntime *gc)
+  : gc(gc),
     initialized(false)
 {}
 
 js::gc::MarkingValidator::~MarkingValidator()
 {
     if (!map.initialized())
         return;
 
@@ -3096,20 +3141,21 @@ js::gc::MarkingValidator::nonIncremental
      * the results for later comparison.
      *
      * Currently this does not validate gray marking.
      */
 
     if (!map.init())
         return;
 
-    GCMarker *gcmarker = &runtime->gc.marker;
+    JSRuntime *runtime = gc->rt;
+    GCMarker *gcmarker = &gc->marker;
 
     /* Save existing mark bits. */
-    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront()) {
+    for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
         ChunkBitmap *bitmap = &r.front()->bitmap;
 	ChunkBitmap *entry = js_new<ChunkBitmap>();
         if (!entry)
             return;
 
         memcpy((void *)entry->bitmap, (void *)bitmap->bitmap, sizeof(bitmap->bitmap));
         if (!map.putNew(r.front(), entry))
             return;
@@ -3137,88 +3183,88 @@ js::gc::MarkingValidator::nonIncremental
     initialized = true;
 
     for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
         WeakMapBase::resetCompartmentWeakMapList(c);
         ArrayBufferObject::resetArrayBufferList(c);
     }
 
     /* Re-do all the marking, but non-incrementally. */
-    js::gc::State state = runtime->gc.incrementalState;
-    runtime->gc.incrementalState = MARK_ROOTS;
+    js::gc::State state = gc->incrementalState;
+    gc->incrementalState = MARK_ROOTS;
 
     JS_ASSERT(gcmarker->isDrained());
     gcmarker->reset();
 
-    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront())
+    for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront())
         r.front()->bitmap.clear();
 
     {
-        gcstats::AutoPhase ap1(runtime->gc.stats, gcstats::PHASE_MARK);
-        gcstats::AutoPhase ap2(runtime->gc.stats, gcstats::PHASE_MARK_ROOTS);
+        gcstats::AutoPhase ap1(gc->stats, gcstats::PHASE_MARK);
+        gcstats::AutoPhase ap2(gc->stats, gcstats::PHASE_MARK_ROOTS);
         MarkRuntime(gcmarker, true);
     }
 
     {
-        gcstats::AutoPhase ap1(runtime->gc.stats, gcstats::PHASE_MARK);
+        gcstats::AutoPhase ap1(gc->stats, gcstats::PHASE_MARK);
         SliceBudget budget;
-        runtime->gc.incrementalState = MARK;
-        runtime->gc.marker.drainMarkStack(budget);
+        gc->incrementalState = MARK;
+        gc->marker.drainMarkStack(budget);
     }
 
-    runtime->gc.incrementalState = SWEEP;
+    gc->incrementalState = SWEEP;
     {
-        gcstats::AutoPhase ap(runtime->gc.stats, gcstats::PHASE_SWEEP);
-        MarkAllWeakReferences(runtime, gcstats::PHASE_SWEEP_MARK_WEAK);
+        gcstats::AutoPhase ap(gc->stats, gcstats::PHASE_SWEEP);
+        gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_WEAK);
 
         /* Update zone state for gray marking. */
         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
             JS_ASSERT(zone->isGCMarkingBlack());
             zone->setGCState(Zone::MarkGray);
         }
 
-        MarkAllGrayReferences(runtime);
+        gc->markAllGrayReferences();
 
         /* Restore zone state. */
         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
             JS_ASSERT(zone->isGCMarkingGray());
             zone->setGCState(Zone::Mark);
         }
     }
 
     /* Take a copy of the non-incremental mark state and restore the original. */
-    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront()) {
+    for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
         Chunk *chunk = r.front();
         ChunkBitmap *bitmap = &chunk->bitmap;
         ChunkBitmap *entry = map.lookup(chunk)->value();
         Swap(*entry, *bitmap);
     }
 
     for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
         WeakMapBase::resetCompartmentWeakMapList(c);
         ArrayBufferObject::resetArrayBufferList(c);
     }
     WeakMapBase::restoreCompartmentWeakMapLists(weakmaps);
     ArrayBufferObject::restoreArrayBufferLists(arrayBuffers);
 
-    runtime->gc.incrementalState = state;
+    gc->incrementalState = state;
 }
 
 void
 js::gc::MarkingValidator::validate()
 {
     /*
      * Validates the incremental marking for a single compartment by comparing
      * the mark bits to those previously recorded for a non-incremental mark.
      */
 
     if (!initialized)
         return;
 
-    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront()) {
+    for (GCChunkSet::Range r(gc->chunkSet.all()); !r.empty(); r.popFront()) {
         Chunk *chunk = r.front();
         BitmapMap::Ptr ptr = map.lookup(chunk);
         if (!ptr)
             continue;  /* Allocated after we did the non-incremental mark. */
 
         ChunkBitmap *bitmap = ptr->value();
         ChunkBitmap *incBitmap = &chunk->bitmap;
 
@@ -3255,43 +3301,43 @@ js::gc::MarkingValidator::validate()
                 thing += Arena::thingSize(kind);
             }
         }
     }
 }
 
 #endif
 
-static void
-ComputeNonIncrementalMarkingForValidation(JSRuntime *rt)
+void
+GCRuntime::computeNonIncrementalMarkingForValidation()
 {
 #ifdef DEBUG
-    JS_ASSERT(!rt->gc.markingValidator);
-    if (rt->gc.isIncremental && rt->gc.validate)
-        rt->gc.markingValidator = js_new<MarkingValidator>(rt);
-    if (rt->gc.markingValidator)
-        rt->gc.markingValidator->nonIncrementalMark();
+    JS_ASSERT(!markingValidator);
+    if (isIncremental && validate)
+        markingValidator = js_new<MarkingValidator>(this);
+    if (markingValidator)
+        markingValidator->nonIncrementalMark();
 #endif
 }
 
-static void
-ValidateIncrementalMarking(JSRuntime *rt)
+void
+GCRuntime::validateIncrementalMarking()
 {
 #ifdef DEBUG
-    if (rt->gc.markingValidator)
-        rt->gc.markingValidator->validate();
+    if (markingValidator)
+        markingValidator->validate();
 #endif
 }
 
-static void
-FinishMarkingValidation(JSRuntime *rt)
+void
+GCRuntime::finishMarkingValidation()
 {
 #ifdef DEBUG
-    js_delete(rt->gc.markingValidator);
-    rt->gc.markingValidator = nullptr;
+    js_delete(markingValidator);
+    markingValidator = nullptr;
 #endif
 }
 
 static void
 AssertNeedsBarrierFlagsConsistent(JSRuntime *rt)
 {
 #ifdef DEBUG
     bool anyNeedsBarrier = false;
@@ -3379,68 +3425,68 @@ Zone::findOutgoingEdges(ComponentFinder<
     JSRuntime *rt = runtimeFromMainThread();
     if (rt->atomsCompartment()->zone()->isGCMarking())
         finder.addEdgeTo(rt->atomsCompartment()->zone());
 
     for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
         comp->findOutgoingEdges(finder);
 }
 
-static void
-FindZoneGroups(JSRuntime *rt)
+void
+GCRuntime::findZoneGroups()
 {
     ComponentFinder<Zone> finder(rt->mainThread.nativeStackLimit[StackForSystemCode]);
-    if (!rt->gc.isIncremental)
+    if (!isIncremental)
         finder.useOneComponent();
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCMarking());
         finder.addNode(zone);
     }
-    rt->gc.zoneGroups = finder.getResultsList();
-    rt->gc.currentZoneGroup = rt->gc.zoneGroups;
-    rt->gc.zoneGroupIndex = 0;
-    JS_ASSERT_IF(!rt->gc.isIncremental, !rt->gc.currentZoneGroup->nextGroup());
+    zoneGroups = finder.getResultsList();
+    currentZoneGroup = zoneGroups;
+    zoneGroupIndex = 0;
+    JS_ASSERT_IF(!isIncremental, !currentZoneGroup->nextGroup());
 }
 
 static void
 ResetGrayList(JSCompartment* comp);
 
-static void
-GetNextZoneGroup(JSRuntime *rt)
-{
-    rt->gc.currentZoneGroup = rt->gc.currentZoneGroup->nextGroup();
-    ++rt->gc.zoneGroupIndex;
-    if (!rt->gc.currentZoneGroup) {
-        rt->gc.abortSweepAfterCurrentGroup = false;
+void
+GCRuntime::getNextZoneGroup()
+{
+    currentZoneGroup = currentZoneGroup->nextGroup();
+    ++zoneGroupIndex;
+    if (!currentZoneGroup) {
+        abortSweepAfterCurrentGroup = false;
         return;
     }
 
-    if (!rt->gc.isIncremental)
-        ComponentFinder<Zone>::mergeGroups(rt->gc.currentZoneGroup);
-
-    if (rt->gc.abortSweepAfterCurrentGroup) {
-        JS_ASSERT(!rt->gc.isIncremental);
+    if (!isIncremental)
+        ComponentFinder<Zone>::mergeGroups(currentZoneGroup);
+
+    if (abortSweepAfterCurrentGroup) {
+        JS_ASSERT(!isIncremental);
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
             JS_ASSERT(!zone->gcNextGraphComponent);
             JS_ASSERT(zone->isGCMarking());
             zone->setNeedsBarrier(false, Zone::UpdateIon);
             zone->setGCState(Zone::NoGC);
             zone->gcGrayRoots.clearAndFree();
         }
         rt->setNeedsBarrier(false);
         AssertNeedsBarrierFlagsConsistent(rt);
 
         for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next()) {
             ArrayBufferObject::resetArrayBufferList(comp);
             ResetGrayList(comp);
         }
 
-        rt->gc.abortSweepAfterCurrentGroup = false;
-        rt->gc.currentZoneGroup = nullptr;
+        abortSweepAfterCurrentGroup = false;
+        currentZoneGroup = nullptr;
     }
 }
 
 /*
  * Gray marking:
  *
  * At the end of collection, anything reachable from a gray root that has not
  * otherwise been marked black must be marked gray.
@@ -3663,58 +3709,58 @@ js::NotifyGCPostSwap(JSObject *a, JSObje
      * either of them were in our gray pointer list, we re-add them again.
      */
     if (removedFlags & JS_GC_SWAP_OBJECT_A_REMOVED)
         DelayCrossCompartmentGrayMarking(b);
     if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED)
         DelayCrossCompartmentGrayMarking(a);
 }
 
-static void
-EndMarkingZoneGroup(JSRuntime *rt)
+void
+GCRuntime::endMarkingZoneGroup()
 {
     /*
      * Mark any incoming black pointers from previously swept compartments
      * whose referents are not marked. This can occur when gray cells become
      * black by the action of UnmarkGray.
      */
     MarkIncomingCrossCompartmentPointers(rt, BLACK);
 
-    MarkWeakReferencesInCurrentGroup(rt, gcstats::PHASE_SWEEP_MARK_WEAK);
+    markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_WEAK);
 
     /*
      * Change state of current group to MarkGray to restrict marking to this
      * group.  Note that there may be pointers to the atoms compartment, and
      * these will be marked through, as they are not marked with
      * MarkCrossCompartmentXXX.
      */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCMarkingBlack());
         zone->setGCState(Zone::MarkGray);
     }
 
     /* Mark incoming gray pointers from previously swept compartments. */
-    rt->gc.marker.setMarkColorGray();
+    marker.setMarkColorGray();
     MarkIncomingCrossCompartmentPointers(rt, GRAY);
-    rt->gc.marker.setMarkColorBlack();
+    marker.setMarkColorBlack();
 
     /* Mark gray roots and mark transitively inside the current compartment group. */
-    MarkGrayReferencesInCurrentGroup(rt);
+    markGrayReferencesInCurrentGroup();
 
     /* Restore marking state. */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCMarkingGray());
         zone->setGCState(Zone::Mark);
     }
 
-    JS_ASSERT(rt->gc.marker.isDrained());
-}
-
-static void
-BeginSweepingZoneGroup(JSRuntime *rt)
+    JS_ASSERT(marker.isDrained());
+}
+
+void
+GCRuntime::beginSweepingZoneGroup()
 {
     /*
      * Begin sweeping the group of zones in gcCurrentZoneGroup,
      * performing actions that must be done before yielding to caller.
      */
 
     bool sweepingAtoms = false;
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
@@ -3727,57 +3773,57 @@ BeginSweepingZoneGroup(JSRuntime *rt)
 
         if (rt->isAtomsZone(zone))
             sweepingAtoms = true;
 
         if (rt->sweepZoneCallback)
             rt->sweepZoneCallback(zone);
     }
 
-    ValidateIncrementalMarking(rt);
-
-    FreeOp fop(rt, rt->gc.sweepOnBackgroundThread);
+    validateIncrementalMarking();
+
+    FreeOp fop(rt, sweepOnBackgroundThread);
 
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_FINALIZE_START);
-        if (rt->gc.finalizeCallback)
-            rt->gc.finalizeCallback(&fop, JSFINALIZE_GROUP_START, !rt->gc.isFull /* unused */);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_START);
+        if (finalizeCallback)
+            finalizeCallback(&fop, JSFINALIZE_GROUP_START, !isFull /* unused */);
     }
 
     if (sweepingAtoms) {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_ATOMS);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_ATOMS);
         rt->sweepAtoms();
     }
 
     /* Prune out dead views from ArrayBuffer's view lists. */
     for (GCCompartmentGroupIter c(rt); !c.done(); c.next())
         ArrayBufferObject::sweep(c);
 
     /* Collect watch points associated with unreachable objects. */
     WatchpointMap::sweepAll(rt);
 
     /* Detach unreachable debuggers and global objects from each other. */
     Debugger::sweepAll(&fop);
 
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
 
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_DISCARD_CODE);
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_DISCARD_CODE);
             zone->discardJitCode(&fop);
         }
 
-        bool releaseTypes = ReleaseObservedTypes(rt);
+        bool releaseTypes = releaseObservedTypes();
         for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
-            gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+            gcstats::AutoSCC scc(stats, zoneGroupIndex);
             c->sweep(&fop, releaseTypes && !c->zone()->isPreservingCode());
         }
 
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+            gcstats::AutoSCC scc(stats, zoneGroupIndex);
 
             // If there is an OOM while sweeping types, the type information
             // will be deoptimized so that it is still correct (i.e.
             // overapproximates the possible types in the zone), but the
             // constraints might not have been triggered on the deoptimization
             // or even copied over completely. In this case, destroy all JIT
             // code and new script addendums in the zone, the only things whose
             // correctness depends on the type constraints.
@@ -3796,230 +3842,230 @@ BeginSweepingZoneGroup(JSRuntime *rt)
      * Queue all GC things in all zones for sweeping, either in the
      * foreground or on the background thread.
      *
      * Note that order is important here for the background case.
      *
      * Objects are finalized immediately but this may change in the future.
      */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+        gcstats::AutoSCC scc(stats, zoneGroupIndex);
         zone->allocator.arenas.queueObjectsForSweep(&fop);
     }
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+        gcstats::AutoSCC scc(stats, zoneGroupIndex);
         zone->allocator.arenas.queueStringsForSweep(&fop);
     }
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+        gcstats::AutoSCC scc(stats, zoneGroupIndex);
         zone->allocator.arenas.queueScriptsForSweep(&fop);
     }
 #ifdef JS_ION
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+        gcstats::AutoSCC scc(stats, zoneGroupIndex);
         zone->allocator.arenas.queueJitCodeForSweep(&fop);
     }
 #endif
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
+        gcstats::AutoSCC scc(stats, zoneGroupIndex);
         zone->allocator.arenas.queueShapesForSweep(&fop);
         zone->allocator.arenas.gcShapeArenasToSweep =
             zone->allocator.arenas.arenaListsToSweep[FINALIZE_SHAPE];
     }
 
-    rt->gc.sweepPhase = 0;
-    rt->gc.sweepZone = rt->gc.currentZoneGroup;
-    rt->gc.sweepKindIndex = 0;
+    finalizePhase = 0;
+    sweepZone = currentZoneGroup;
+    sweepKindIndex = 0;
 
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_FINALIZE_END);
-        if (rt->gc.finalizeCallback)
-            rt->gc.finalizeCallback(&fop, JSFINALIZE_GROUP_END, !rt->gc.isFull /* unused */);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
+        if (finalizeCallback)
+            finalizeCallback(&fop, JSFINALIZE_GROUP_END, !isFull /* unused */);
     }
 }
 
-static void
-EndSweepingZoneGroup(JSRuntime *rt)
+void
+GCRuntime::endSweepingZoneGroup()
 {
     /* Update the GC state for zones we have swept and unlink the list. */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCSweeping());
         zone->setGCState(Zone::Finished);
     }
 
     /* Reset the list of arenas marked as being allocated during sweep phase. */
-    while (ArenaHeader *arena = rt->gc.arenasAllocatedDuringSweep) {
-        rt->gc.arenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
+    while (ArenaHeader *arena = arenasAllocatedDuringSweep) {
+        arenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
         arena->unsetAllocDuringSweep();
     }
 }
 
-static void
-BeginSweepPhase(JSRuntime *rt, bool lastGC)
+void
+GCRuntime::beginSweepPhase(bool lastGC)
 {
     /*
      * Sweep phase.
      *
-     * Finalize as we sweep, outside of rt->gc.lock but with rt->isHeapBusy()
+     * Finalize as we sweep, outside of lock but with rt->isHeapBusy()
      * true so that any attempt to allocate a GC-thing from a finalizer will
      * fail, rather than nest badly and leave the unmarked newborn to be swept.
      */
 
-    JS_ASSERT(!rt->gc.abortSweepAfterCurrentGroup);
-
-    ComputeNonIncrementalMarkingForValidation(rt);
-
-    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP);
+    JS_ASSERT(!abortSweepAfterCurrentGroup);
+
+    computeNonIncrementalMarkingForValidation();
+
+    gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
 
 #ifdef JS_THREADSAFE
-    rt->gc.sweepOnBackgroundThread = !lastGC && rt->useHelperThreads();
+    sweepOnBackgroundThread = !lastGC && rt->useHelperThreads();
 #endif
 
 #ifdef DEBUG
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
         JS_ASSERT(!c->gcIncomingGrayPointers);
         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
                 AssertNotOnGrayList(&e.front().value().get().toObject());
         }
     }
 #endif
 
     DropStringWrappers(rt);
-    FindZoneGroups(rt);
-    EndMarkingZoneGroup(rt);
-    BeginSweepingZoneGroup(rt);
+    findZoneGroups();
+    endMarkingZoneGroup();
+    beginSweepingZoneGroup();
 }
 
 bool
 ArenaLists::foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget)
 {
     if (!arenaListsToSweep[thingKind])
         return true;
 
     ArenaList &dest = arenaLists[thingKind];
     return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget);
 }
 
-static bool
-DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget, gcstats::Phase phase)
+bool
+GCRuntime::drainMarkStack(SliceBudget &sliceBudget, gcstats::Phase phase)
 {
     /* Run a marking slice and return whether the stack is now empty. */
-    gcstats::AutoPhase ap(rt->gc.stats, phase);
-    return rt->gc.marker.drainMarkStack(sliceBudget);
-}
-
-static bool
-SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
-{
-    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP);
-    FreeOp fop(rt, rt->gc.sweepOnBackgroundThread);
-
-    bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_SWEEP_MARK);
+    gcstats::AutoPhase ap(stats, phase);
+    return marker.drainMarkStack(sliceBudget);
+}
+
+bool
+GCRuntime::sweepPhase(SliceBudget &sliceBudget)
+{
+    gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
+    FreeOp fop(rt, sweepOnBackgroundThread);
+
+    bool finished = drainMarkStack(sliceBudget, gcstats::PHASE_SWEEP_MARK);
     if (!finished)
         return false;
 
     for (;;) {
         /* Finalize foreground finalized things. */
-        for (; rt->gc.sweepPhase < FinalizePhaseCount ; ++rt->gc.sweepPhase) {
-            gcstats::AutoPhase ap(rt->gc.stats, FinalizePhaseStatsPhase[rt->gc.sweepPhase]);
-
-            for (; rt->gc.sweepZone; rt->gc.sweepZone = rt->gc.sweepZone->nextNodeInGroup()) {
-                Zone *zone = rt->gc.sweepZone;
-
-                while (rt->gc.sweepKindIndex < FinalizePhaseLength[rt->gc.sweepPhase]) {
-                    AllocKind kind = FinalizePhases[rt->gc.sweepPhase][rt->gc.sweepKindIndex];
+        for (; finalizePhase < FinalizePhaseCount ; ++finalizePhase) {
+            gcstats::AutoPhase ap(stats, FinalizePhaseStatsPhase[finalizePhase]);
+
+            for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
+                Zone *zone = sweepZone;
+
+                while (sweepKindIndex < FinalizePhaseLength[finalizePhase]) {
+                    AllocKind kind = FinalizePhases[finalizePhase][sweepKindIndex];
 
                     if (!zone->allocator.arenas.foregroundFinalize(&fop, kind, sliceBudget))
                         return false;  /* Yield to the mutator. */
 
-                    ++rt->gc.sweepKindIndex;
+                    ++sweepKindIndex;
                 }
-                rt->gc.sweepKindIndex = 0;
+                sweepKindIndex = 0;
             }
-            rt->gc.sweepZone = rt->gc.currentZoneGroup;
+            sweepZone = currentZoneGroup;
         }
 
         /* Remove dead shapes from the shape tree, but don't finalize them yet. */
         {
-            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_SHAPE);
-
-            for (; rt->gc.sweepZone; rt->gc.sweepZone = rt->gc.sweepZone->nextNodeInGroup()) {
-                Zone *zone = rt->gc.sweepZone;
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SHAPE);
+
+            for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
+                Zone *zone = sweepZone;
                 while (ArenaHeader *arena = zone->allocator.arenas.gcShapeArenasToSweep) {
                     for (ArenaCellIterUnderGC i(arena); !i.done(); i.next()) {
                         Shape *shape = i.get<Shape>();
                         if (!shape->isMarked())
                             shape->sweep();
                     }
 
                     zone->allocator.arenas.gcShapeArenasToSweep = arena->next;
                     sliceBudget.step(Arena::thingsPerArena(Arena::thingSize(FINALIZE_SHAPE)));
                     if (sliceBudget.isOverBudget())
                         return false;  /* Yield to the mutator. */
                 }
             }
         }
 
-        EndSweepingZoneGroup(rt);
-        GetNextZoneGroup(rt);
-        if (!rt->gc.currentZoneGroup)
+        endSweepingZoneGroup();
+        getNextZoneGroup();
+        if (!currentZoneGroup)
             return true;  /* We're finished. */
-        EndMarkingZoneGroup(rt);
-        BeginSweepingZoneGroup(rt);
+        endMarkingZoneGroup();
+        beginSweepingZoneGroup();
     }
 }
 
-static void
-EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC)
-{
-    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP);
-    FreeOp fop(rt, rt->gc.sweepOnBackgroundThread);
-
-    JS_ASSERT_IF(lastGC, !rt->gc.sweepOnBackgroundThread);
-
-    JS_ASSERT(rt->gc.marker.isDrained());
-    rt->gc.marker.stop();
+void
+GCRuntime::endSweepPhase(JSGCInvocationKind gckind, bool lastGC)
+{
+    gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
+    FreeOp fop(rt, sweepOnBackgroundThread);
+
+    JS_ASSERT_IF(lastGC, !sweepOnBackgroundThread);
+
+    JS_ASSERT(marker.isDrained());
+    marker.stop();
 
     /*
      * Recalculate whether GC was full or not as this may have changed due to
      * newly created zones.  Can only change from full to not full.
      */
-    if (rt->gc.isFull) {
+    if (isFull) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             if (!zone->isCollecting()) {
-                rt->gc.isFull = false;
+                isFull = false;
                 break;
             }
         }
     }
 
     /*
      * If we found any black->gray edges during marking, we completely clear the
      * mark bits of all uncollected zones, or if a reset has occured, zones that
      * will no longer be collected. This is safe, although it may
      * prevent the cycle collector from collecting some dead objects.
      */
-    if (rt->gc.foundBlackGrayEdges) {
+    if (foundBlackGrayEdges) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             if (!zone->isCollecting())
                 zone->allocator.arenas.unmarkAll();
         }
     }
 
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_DESTROY);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_DESTROY);
 
         /*
          * Sweep script filenames after sweeping functions in the generic loop
          * above. In this way when a scripted function's finalizer destroys the
          * script and calls rt->destroyScriptHook, the hook can still access the
          * script's filename. See bug 323267.
          */
-        if (rt->gc.isFull)
+        if (isFull)
             SweepScriptData(rt);
 
         /* Clear out any small pools that we're hanging on to. */
         if (JSC::ExecutableAllocator *execAlloc = rt->maybeExecAlloc())
             execAlloc->purge();
 #ifdef JS_ION
         if (rt->jitRuntime() && rt->jitRuntime()->hasIonAlloc()) {
             JSRuntime::AutoLockForInterrupt lock(rt);
@@ -4027,75 +4073,75 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocat
         }
 #endif
 
         /*
          * This removes compartments from rt->compartment, so we do it last to make
          * sure we don't miss sweeping any compartments.
          */
         if (!lastGC)
-            SweepZones(&fop, lastGC);
-
-        if (!rt->gc.sweepOnBackgroundThread) {
+            sweepZones(&fop, lastGC);
+
+        if (!sweepOnBackgroundThread) {
             /*
              * Destroy arenas after we finished the sweeping so finalizers can
              * safely use IsAboutToBeFinalized(). This is done on the
              * GCHelperThread if possible. We acquire the lock only because
              * Expire needs to unlock it for other callers.
              */
             AutoLockGC lock(rt);
             ExpireChunksAndArenas(rt, gckind == GC_SHRINK);
         }
     }
 
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_FINALIZE_END);
-
-        if (rt->gc.finalizeCallback)
-            rt->gc.finalizeCallback(&fop, JSFINALIZE_COLLECTION_END, !rt->gc.isFull);
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
+
+        if (finalizeCallback)
+            finalizeCallback(&fop, JSFINALIZE_COLLECTION_END, !isFull);
 
         /* If we finished a full GC, then the gray bits are correct. */
-        if (rt->gc.isFull)
-            rt->gc.grayBitsValid = true;
+        if (isFull)
+            grayBitsValid = true;
     }
 
     /* Set up list of zones for sweeping of background things. */
-    JS_ASSERT(!rt->gc.sweepingZones);
+    JS_ASSERT(!sweepingZones);
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-        zone->gcNextGraphNode = rt->gc.sweepingZones;
-        rt->gc.sweepingZones = zone;
+        zone->gcNextGraphNode = sweepingZones;
+        sweepingZones = zone;
     }
 
     /* If not sweeping on background thread then we must do it here. */
-    if (!rt->gc.sweepOnBackgroundThread) {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_DESTROY);
+    if (!sweepOnBackgroundThread) {
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_DESTROY);
 
         SweepBackgroundThings(rt, false);
 
         rt->freeLifoAlloc.freeAll();
 
         /* Ensure the compartments get swept if it's the last GC. */
         if (lastGC)
-            SweepZones(&fop, lastGC);
+            sweepZones(&fop, lastGC);
     }
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         zone->setGCLastBytes(zone->gcBytes, gckind);
         if (zone->isCollecting()) {
             JS_ASSERT(zone->isGCFinished());
             zone->setGCState(Zone::NoGC);
         }
 
 #ifdef DEBUG
         JS_ASSERT(!zone->isCollecting());
         JS_ASSERT(!zone->wasGCStarted());
 
         for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) {
             JS_ASSERT_IF(!IsBackgroundFinalized(AllocKind(i)) ||
-                         !rt->gc.sweepOnBackgroundThread,
+                         !sweepOnBackgroundThread,
                          !zone->allocator.arenas.arenaListsToSweep[i]);
         }
 #endif
     }
 
 #ifdef DEBUG
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
         JS_ASSERT(!c->gcIncomingGrayPointers);
@@ -4103,47 +4149,47 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocat
 
         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
                 AssertNotOnGrayList(&e.front().value().get().toObject());
         }
     }
 #endif
 
-    FinishMarkingValidation(rt);
-
-    rt->gc.lastGCTime = PRMJ_Now();
+    finishMarkingValidation();
+
+    lastGCTime = PRMJ_Now();
 }
 
 namespace {
 
 /* ...while this class is to be used only for garbage collection. */
 class AutoGCSession
 {
-    JSRuntime *runtime;
+    GCRuntime *gc;
     AutoTraceSession session;
     bool canceled;
 
   public:
-    explicit AutoGCSession(JSRuntime *rt);
+    explicit AutoGCSession(GCRuntime *gc);
     ~AutoGCSession();
 
     void cancel() { canceled = true; }
 };
 
 } /* anonymous namespace */
 
 /* Start a new heap session. */
 AutoTraceSession::AutoTraceSession(JSRuntime *rt, js::HeapState heapState)
   : lock(rt),
     runtime(rt),
     prevState(rt->gc.heapState)
 {
     JS_ASSERT(!rt->gc.noGCOrAllocationCheck);
-    JS_ASSERT(!rt->isHeapBusy());
+    JS_ASSERT(rt->gc.heapState == Idle);
     JS_ASSERT(heapState != Idle);
 #ifdef JSGC_GENERATIONAL
     JS_ASSERT_IF(heapState == MajorCollecting, rt->gc.nursery.isEmpty());
 #endif
 
     // Threads with an exclusive context can hit refillFreeList while holding
     // the exclusive access lock. To avoid deadlocking when we try to acquire
     // this lock during GC and the other thread is waiting, make sure we hold
@@ -4178,54 +4224,54 @@ AutoTraceSession::~AutoTraceSession()
 #else
         MOZ_CRASH();
 #endif
     } else {
         runtime->gc.heapState = prevState;
     }
 }
 
-AutoGCSession::AutoGCSession(JSRuntime *rt)
-  : runtime(rt),
-    session(rt, MajorCollecting),
+AutoGCSession::AutoGCSession(GCRuntime *gc)
+  : gc(gc),
+    session(gc->rt, MajorCollecting),
     canceled(false)
 {
-    runtime->gc.isNeeded = false;
-    runtime->gc.interFrameGC = true;
-
-    runtime->gc.number++;
+    gc->isNeeded = false;
+    gc->interFrameGC = true;
+
+    gc->number++;
 
     // It's ok if threads other than the main thread have suppressGC set, as
     // they are operating on zones which will not be collected from here.
-    JS_ASSERT(!runtime->mainThread.suppressGC);
+    JS_ASSERT(!gc->rt->mainThread.suppressGC);
 }
 
 AutoGCSession::~AutoGCSession()
 {
     if (canceled)
         return;
 
 #ifndef JS_MORE_DETERMINISTIC
-    runtime->gc.nextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
+    gc->nextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
 #endif
 
-    runtime->gc.chunkAllocationSinceLastGC = false;
+    gc->chunkAllocationSinceLastGC = false;
 
 #ifdef JS_GC_ZEAL
     /* Keeping these around after a GC is dangerous. */
-    runtime->gc.selectedForMarking.clearAndFree();
+    gc->selectedForMarking.clearAndFree();
 #endif
 
     /* Clear gcMallocBytes for all compartments */
-    for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
+    for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) {
         zone->resetGCMallocBytes();
         zone->unscheduleGC();
     }
 
-    runtime->resetGCMallocBytes();
+    gc->rt->resetGCMallocBytes();
 }
 
 AutoCopyFreeListToArenas::AutoCopyFreeListToArenas(JSRuntime *rt, ZoneSelector selector)
   : runtime(rt),
     selector(selector)
 {
     for (ZonesIter zone(rt, selector); !zone.done(); zone.next())
         zone->allocator.arenas.copyFreeListsToArenas();
@@ -4248,77 +4294,71 @@ class AutoCopyFreeListToArenasForGC
             zone->allocator.arenas.copyFreeListsToArenas();
     }
     ~AutoCopyFreeListToArenasForGC() {
         for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next())
             zone->allocator.arenas.clearFreeListsInArenas();
     }
 };
 
-static void
-IncrementalCollectSlice(JSRuntime *rt,
-                        int64_t budget,
-                        JS::gcreason::Reason gcReason,
-                        JSGCInvocationKind gcKind);
-
-static void
-ResetIncrementalGC(JSRuntime *rt, const char *reason)
-{
-    switch (rt->gc.incrementalState) {
+void
+GCRuntime::resetIncrementalGC(const char *reason)
+{
+    switch (incrementalState) {
       case NO_INCREMENTAL:
         return;
 
       case MARK: {
         /* Cancel any ongoing marking. */
         AutoCopyFreeListToArenasForGC copy(rt);
 
-        rt->gc.marker.reset();
-        rt->gc.marker.stop();
+        marker.reset();
+        marker.stop();
 
         for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
             ArrayBufferObject::resetArrayBufferList(c);
             ResetGrayList(c);
         }
 
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             JS_ASSERT(zone->isGCMarking());
             zone->setNeedsBarrier(false, Zone::UpdateIon);
             zone->setGCState(Zone::NoGC);
         }
         rt->setNeedsBarrier(false);
         AssertNeedsBarrierFlagsConsistent(rt);
 
-        rt->gc.incrementalState = NO_INCREMENTAL;
-
-        JS_ASSERT(!rt->gc.strictCompartmentChecking);
+        incrementalState = NO_INCREMENTAL;
+
+        JS_ASSERT(!strictCompartmentChecking);
 
         break;
       }
 
       case SWEEP:
-        rt->gc.marker.reset();
+        marker.reset();
 
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
             zone->scheduledForDestruction = false;
 
         /* Finish sweeping the current zone group, then abort. */
-        rt->gc.abortSweepAfterCurrentGroup = true;
-        IncrementalCollectSlice(rt, SliceBudget::Unlimited, JS::gcreason::RESET, GC_NORMAL);
+        abortSweepAfterCurrentGroup = true;
+        incrementalCollectSlice(SliceBudget::Unlimited, JS::gcreason::RESET, GC_NORMAL);
 
         {
-            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
-            rt->gc.helperThread.waitBackgroundSweepOrAllocEnd();
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
+            helperThread.waitBackgroundSweepOrAllocEnd();
         }
         break;
 
       default:
         MOZ_ASSUME_UNREACHABLE("Invalid incremental GC state");
     }
 
-    rt->gc.stats.reset(reason);
+    stats.reset(reason);
 
 #ifdef DEBUG
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
         JS_ASSERT(c->gcLiveArrayBuffers.empty());
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         JS_ASSERT(!zone->needsBarrier());
         for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
@@ -4382,152 +4422,148 @@ AutoGCSlice::~AutoGCSlice()
         } else {
             zone->setNeedsBarrier(false, Zone::UpdateIon);
         }
     }
     runtime->setNeedsBarrier(haveBarriers);
     AssertNeedsBarrierFlagsConsistent(runtime);
 }
 
-static void
-PushZealSelectedObjects(JSRuntime *rt)
+void
+GCRuntime::pushZealSelectedObjects()
 {
 #ifdef JS_GC_ZEAL
     /* Push selected objects onto the mark stack and clear the list. */
-    for (JSObject **obj = rt->gc.selectedForMarking.begin();
-         obj != rt->gc.selectedForMarking.end(); obj++)
-    {
-        MarkObjectUnbarriered(&rt->gc.marker, obj, "selected obj");
-    }
+    for (JSObject **obj = selectedForMarking.begin(); obj != selectedForMarking.end(); obj++)
+        MarkObjectUnbarriered(&marker, obj, "selected obj");
 #endif
 }
 
-static void
-IncrementalCollectSlice(JSRuntime *rt,
-                        int64_t budget,
-                        JS::gcreason::Reason reason,
-                        JSGCInvocationKind gckind)
+void
+GCRuntime::incrementalCollectSlice(int64_t budget,
+                                   JS::gcreason::Reason reason,
+                                   JSGCInvocationKind gckind)
 {
     JS_ASSERT(rt->currentThreadHasExclusiveAccess());
 
     AutoCopyFreeListToArenasForGC copy(rt);
     AutoGCSlice slice(rt);
 
     bool lastGC = (reason == JS::gcreason::DESTROY_RUNTIME);
 
-    gc::State initialState = rt->gc.incrementalState;
+    gc::State initialState = incrementalState;
 
     int zeal = 0;
 #ifdef JS_GC_ZEAL
     if (reason == JS::gcreason::DEBUG_GC && budget != SliceBudget::Unlimited) {
         /*
          * Do the incremental collection type specified by zeal mode if the
          * collection was triggered by RunDebugGC() and incremental GC has not
-         * been cancelled by ResetIncrementalGC.
+         * been cancelled by resetIncrementalGC().
          */
-        zeal = rt->gcZeal();
+        zeal = zealMode;
     }
 #endif
 
-    JS_ASSERT_IF(rt->gc.incrementalState != NO_INCREMENTAL, rt->gc.isIncremental);
-    rt->gc.isIncremental = budget != SliceBudget::Unlimited;
+    JS_ASSERT_IF(incrementalState != NO_INCREMENTAL, isIncremental);
+    isIncremental = budget != SliceBudget::Unlimited;
 
     if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
         /*
          * Yields between slices occurs at predetermined points in these modes;
          * the budget is not used.
          */
         budget = SliceBudget::Unlimited;
     }
 
     SliceBudget sliceBudget(budget);
 
-    if (rt->gc.incrementalState == NO_INCREMENTAL) {
-        rt->gc.incrementalState = MARK_ROOTS;
-        rt->gc.lastMarkSlice = false;
+    if (incrementalState == NO_INCREMENTAL) {
+        incrementalState = MARK_ROOTS;
+        lastMarkSlice = false;
     }
 
-    if (rt->gc.incrementalState == MARK)
-        AutoGCRooter::traceAllWrappers(&rt->gc.marker);
-
-    switch (rt->gc.incrementalState) {
+    if (incrementalState == MARK)
+        AutoGCRooter::traceAllWrappers(&marker);
+
+    switch (incrementalState) {
 
       case MARK_ROOTS:
-        if (!BeginMarkPhase(rt)) {
-            rt->gc.incrementalState = NO_INCREMENTAL;
+        if (!beginMarkPhase()) {
+            incrementalState = NO_INCREMENTAL;
             return;
         }
 
         if (!lastGC)
-            PushZealSelectedObjects(rt);
-
-        rt->gc.incrementalState = MARK;
-
-        if (rt->gc.isIncremental && zeal == ZealIncrementalRootsThenFinish)
+            pushZealSelectedObjects();
+
+        incrementalState = MARK;
+
+        if (isIncremental && zeal == ZealIncrementalRootsThenFinish)
             break;
 
         /* fall through */
 
       case MARK: {
         /* If we needed delayed marking for gray roots, then collect until done. */
-        if (!rt->gc.marker.hasBufferedGrayRoots()) {
+        if (!marker.hasBufferedGrayRoots()) {
             sliceBudget.reset();
-            rt->gc.isIncremental = false;
+            isIncremental = false;
         }
 
-        bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_MARK);
+        bool finished = drainMarkStack(sliceBudget, gcstats::PHASE_MARK);
         if (!finished)
             break;
 
-        JS_ASSERT(rt->gc.marker.isDrained());
-
-        if (!rt->gc.lastMarkSlice && rt->gc.isIncremental &&
+        JS_ASSERT(marker.isDrained());
+
+        if (!lastMarkSlice && isIncremental &&
             ((initialState == MARK && zeal != ZealIncrementalRootsThenFinish) ||
              zeal == ZealIncrementalMarkAllThenFinish))
         {
             /*
              * Yield with the aim of starting the sweep in the next
              * slice.  We will need to mark anything new on the stack
              * when we resume, so we stay in MARK state.
              */
-            rt->gc.lastMarkSlice = true;
+            lastMarkSlice = true;
             break;
         }
 
-        rt->gc.incrementalState = SWEEP;
+        incrementalState = SWEEP;
 
         /*
          * This runs to completion, but we don't continue if the budget is
          * now exhasted.
          */
-        BeginSweepPhase(rt, lastGC);
+        beginSweepPhase(lastGC);
         if (sliceBudget.isOverBudget())
             break;
 
         /*
          * Always yield here when running in incremental multi-slice zeal
          * mode, so RunDebugGC can reset the slice buget.
          */
-        if (rt->gc.isIncremental && zeal == ZealIncrementalMultipleSlices)
+        if (isIncremental && zeal == ZealIncrementalMultipleSlices)
             break;
 
         /* fall through */
       }
 
       case SWEEP: {
-        bool finished = SweepPhase(rt, sliceBudget);
+        bool finished = sweepPhase(sliceBudget);
         if (!finished)
             break;
 
-        EndSweepPhase(rt, gckind, lastGC);
-
-        if (rt->gc.sweepOnBackgroundThread)
-            rt->gc.helperThread.startBackgroundSweep(gckind == GC_SHRINK);
-
-        rt->gc.incrementalState = NO_INCREMENTAL;
+        endSweepPhase(gckind, lastGC);
+
+        if (sweepOnBackgroundThread)
+            helperThread.startBackgroundSweep(gckind == GC_SHRINK);
+
+        incrementalState = NO_INCREMENTAL;
         break;
       }
 
       default:
         JS_ASSERT(false);
     }
 }
 
@@ -4540,106 +4576,106 @@ gc::IsIncrementalGCSafe(JSRuntime *rt)
         return IncrementalSafety::Unsafe("keepAtoms set");
 
     if (!rt->gc.incrementalEnabled)
         return IncrementalSafety::Unsafe("incremental permanently disabled");
 
     return IncrementalSafety::Safe();
 }
 
-static void
-BudgetIncrementalGC(JSRuntime *rt, int64_t *budget)
+void
+GCRuntime::budgetIncrementalGC(int64_t *budget)
 {
     IncrementalSafety safe = IsIncrementalGCSafe(rt);
     if (!safe) {
-        ResetIncrementalGC(rt, safe.reason());
+        resetIncrementalGC(safe.reason());
         *budget = SliceBudget::Unlimited;
-        rt->gc.stats.nonincremental(safe.reason());
+        stats.nonincremental(safe.reason());
         return;
     }
 
-    if (rt->gcMode() != JSGC_MODE_INCREMENTAL) {
-        ResetIncrementalGC(rt, "GC mode change");
+    if (mode != JSGC_MODE_INCREMENTAL) {
+        resetIncrementalGC("GC mode change");
         *budget = SliceBudget::Unlimited;
-        rt->gc.stats.nonincremental("GC mode");
+        stats.nonincremental("GC mode");
         return;
     }
 
     if (rt->isTooMuchMalloc()) {
         *budget = SliceBudget::Unlimited;
-        rt->gc.stats.nonincremental("malloc bytes trigger");
+        stats.nonincremental("malloc bytes trigger");
     }
 
     bool reset = false;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->gcBytes >= zone->gcTriggerBytes) {
             *budget = SliceBudget::Unlimited;
-            rt->gc.stats.nonincremental("allocation trigger");
+            stats.nonincremental("allocation trigger");
         }
 
-        if (rt->gc.incrementalState != NO_INCREMENTAL &&
+        if (incrementalState != NO_INCREMENTAL &&
             zone->isGCScheduled() != zone->wasGCStarted())
         {
             reset = true;
         }
 
         if (zone->isTooMuchMalloc()) {
             *budget = SliceBudget::Unlimited;
-            rt->gc.stats.nonincremental("malloc bytes trigger");
+            stats.nonincremental("malloc bytes trigger");
         }
     }
 
     if (reset)
-        ResetIncrementalGC(rt, "zone change");
+        resetIncrementalGC("zone change");
 }
 
 /*
  * Run one GC "cycle" (either a slice of incremental GC or an entire
  * non-incremental GC. We disable inlining to ensure that the bottom of the
  * stack with possible GC roots recorded in MarkRuntime excludes any pointers we
  * use during the marking implementation.
  *
  * Returns true if we "reset" an existing incremental GC, which would force us
  * to run another cycle.
  */
-static MOZ_NEVER_INLINE bool
-GCCycle(JSRuntime *rt, bool incremental, int64_t budget,
-        JSGCInvocationKind gckind, JS::gcreason::Reason reason)
-{
-    AutoGCSession gcsession(rt);
+MOZ_NEVER_INLINE bool
+GCRuntime::gcCycle(bool incremental, int64_t budget, JSGCInvocationKind gckind,
+                   JS::gcreason::Reason reason)
+{
+    AutoGCSession gcsession(this);
 
     /*
      * As we about to purge caches and clear the mark bits we must wait for
      * any background finalization to finish. We must also wait for the
      * background allocation to finish so we can avoid taking the GC lock
      * when manipulating the chunks during the GC.
      */
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
-        rt->gc.helperThread.waitBackgroundSweepOrAllocEnd();
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
+        helperThread.waitBackgroundSweepOrAllocEnd();
     }
 
-    State prevState = rt->gc.incrementalState;
+    State prevState = incrementalState;
 
     if (!incremental) {
         /* If non-incremental GC was requested, reset incremental GC. */
-        ResetIncrementalGC(rt, "requested");
-        rt->gc.stats.nonincremental("requested");
+        resetIncrementalGC("requested");
+        stats.nonincremental("requested");
         budget = SliceBudget::Unlimited;
     } else {
-        BudgetIncrementalGC(rt, &budget);
+        budgetIncrementalGC(&budget);
     }
 
     /* The GC was reset, so we need a do-over. */
-    if (prevState != NO_INCREMENTAL && rt->gc.incrementalState == NO_INCREMENTAL) {
+    if (prevState != NO_INCREMENTAL && incrementalState == NO_INCREMENTAL) {
         gcsession.cancel();
         return true;
     }
 
-    IncrementalCollectSlice(rt, budget, reason, gckind);
+    incrementalCollectSlice(budget, reason, gckind);
     return false;
 }
 
 #ifdef JS_GC_ZEAL
 static bool
 IsDeterministicGCReason(JS::gcreason::Reason reason)
 {
     if (reason > JS::gcreason::DEBUG_GC &&
@@ -4651,179 +4687,185 @@ IsDeterministicGCReason(JS::gcreason::Re
     if (reason == JS::gcreason::MAYBEGC)
         return false;
 
     return true;
 }
 #endif
 
 static bool
-ShouldCleanUpEverything(JSRuntime *rt, JS::gcreason::Reason reason, JSGCInvocationKind gckind)
+ShouldCleanUpEverything(JS::gcreason::Reason reason, JSGCInvocationKind gckind)
 {
     // During shutdown, we must clean everything up, for the sake of leak
     // detection. When a runtime has no contexts, or we're doing a GC before a
     // shutdown CC, those are strong indications that we're shutting down.
     return reason == JS::gcreason::DESTROY_RUNTIME ||
            reason == JS::gcreason::SHUTDOWN_CC ||
            gckind == GC_SHRINK;
 }
 
 namespace {
 
 #ifdef JSGC_GENERATIONAL
 class AutoDisableStoreBuffer
 {
-    JSRuntime *runtime;
+    StoreBuffer &sb;
     bool prior;
 
   public:
-    AutoDisableStoreBuffer(JSRuntime *rt) : runtime(rt) {
-        prior = rt->gc.storeBuffer.isEnabled();
-        rt->gc.storeBuffer.disable();
+    AutoDisableStoreBuffer(GCRuntime *gc) : sb(gc->storeBuffer) {
+        prior = sb.isEnabled();
+        sb.disable();
     }
     ~AutoDisableStoreBuffer() {
         if (prior)
-            runtime->gc.storeBuffer.enable();
+            sb.enable();
     }
 };
 #else
 struct AutoDisableStoreBuffer
 {
-    AutoDisableStoreBuffer(JSRuntime *) {}
+    AutoDisableStoreBuffer(GCRuntime *gc) {}
 };
 #endif
 
 } /* anonymous namespace */
 
-static void
-Collect(JSRuntime *rt, bool incremental, int64_t budget,
-        JSGCInvocationKind gckind, JS::gcreason::Reason reason)
+void
+GCRuntime::collect(bool incremental, int64_t budget, JSGCInvocationKind gckind,
+                   JS::gcreason::Reason reason)
 {
     /* GC shouldn't be running in parallel execution mode */
     JS_ASSERT(!InParallelSection());
 
     JS_AbortIfWrongThread(rt);
 
     /* If we attempt to invoke the GC while we are running in the GC, assert. */
     JS_ASSERT(!rt->isHeapBusy());
 
     if (rt->mainThread.suppressGC)
         return;
 
     TraceLogger *logger = TraceLoggerForMainThread(rt);
     AutoTraceLog logGC(logger, TraceLogger::GC);
 
 #ifdef JS_GC_ZEAL
-    if (rt->gc.deterministicOnly && !IsDeterministicGCReason(reason))
+    if (deterministicOnly && !IsDeterministicGCReason(reason))
         return;
 #endif
 
     JS_ASSERT_IF(!incremental || budget != SliceBudget::Unlimited, JSGC_INCREMENTAL);
 
     AutoStopVerifyingBarriers av(rt, reason == JS::gcreason::SHUTDOWN_CC ||
                                      reason == JS::gcreason::DESTROY_RUNTIME);
 
-    RecordNativeStackTopForGC(rt);
+    recordNativeStackTopForGC();
 
     int zoneCount = 0;
     int compartmentCount = 0;
     int collectedCount = 0;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
-        if (rt->gcMode() == JSGC_MODE_GLOBAL)
+        if (mode == JSGC_MODE_GLOBAL)
             zone->scheduleGC();
 
         /* This is a heuristic to avoid resets. */
-        if (rt->gc.incrementalState != NO_INCREMENTAL && zone->needsBarrier())
+        if (incrementalState != NO_INCREMENTAL && zone->needsBarrier())
             zone->scheduleGC();
 
         zoneCount++;
         if (zone->isGCScheduled())
             collectedCount++;
     }
 
     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next())
         compartmentCount++;
 
-    rt->gc.shouldCleanUpEverything = ShouldCleanUpEverything(rt, reason, gckind);
+    shouldCleanUpEverything = ShouldCleanUpEverything(reason, gckind);
 
     bool repeat = false;
     do {
-        MinorGC(rt, reason);
+        minorGC(reason);
 
         /*
          * Marking can trigger many incidental post barriers, some of them for
          * objects which are not going to be live after the GC.
          */
-        AutoDisableStoreBuffer adsb(rt);
-
-        gcstats::AutoGCSlice agc(rt->gc.stats, collectedCount, zoneCount, compartmentCount, reason);
+        AutoDisableStoreBuffer adsb(this);
+
+        gcstats::AutoGCSlice agc(stats, collectedCount, zoneCount, compartmentCount, reason);
 
         /*
          * Let the API user decide to defer a GC if it wants to (unless this
          * is the last context). Invoke the callback regardless.
          */
-        if (rt->gc.incrementalState == NO_INCREMENTAL) {
-            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_GC_BEGIN);
-            if (JSGCCallback callback = rt->gc.callback)
-                callback(rt, JSGC_BEGIN, rt->gc.callbackData);
+        if (incrementalState == NO_INCREMENTAL) {
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_GC_BEGIN);
+            if (gcCallback)
+                gcCallback(rt, JSGC_BEGIN, gcCallbackData);
         }
 
-        rt->gc.poke = false;
-        bool wasReset = GCCycle(rt, incremental, budget, gckind, reason);
-
-        if (rt->gc.incrementalState == NO_INCREMENTAL) {
-            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_GC_END);
-            if (JSGCCallback callback = rt->gc.callback)
-                callback(rt, JSGC_END, rt->gc.callbackData);
+        poke = false;
+        bool wasReset = gcCycle(incremental, budget, gckind, reason);
+
+        if (incrementalState == NO_INCREMENTAL) {
+            gcstats::AutoPhase ap(stats, gcstats::PHASE_GC_END);
+            if (gcCallback)
+                gcCallback(rt, JSGC_END, gcCallbackData);
         }
 
         /* Need to re-schedule all zones for GC. */
-        if (rt->gc.poke && rt->gc.shouldCleanUpEverything)
+        if (poke && shouldCleanUpEverything)
             JS::PrepareForFullGC(rt);
 
         /*
          * If we reset an existing GC, we need to start a new one. Also, we
          * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
          * case) until we can be sure that no additional garbage is created
          * (which typically happens if roots are dropped during finalizers).
          */
-        repeat = (rt->gc.poke && rt->gc.shouldCleanUpEverything) || wasReset;
+        repeat = (poke && shouldCleanUpEverything) || wasReset;
     } while (repeat);
 
-    if (rt->gc.incrementalState == NO_INCREMENTAL) {
+    if (incrementalState == NO_INCREMENTAL) {
 #ifdef JS_THREADSAFE
         EnqueuePendingParseTasksAfterGC(rt);
 #endif
     }
 }
 
 void
 js::GC(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason)
 {
-    Collect(rt, false, SliceBudget::Unlimited, gckind, reason);
+    rt->gc.collect(false, SliceBudget::Unlimited, gckind, reason);
 }
 
 void
 js::GCSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
 {
-    int64_t sliceBudget;
+    rt->gc.gcSlice(gckind, reason, millis);
+}
+
+void
+GCRuntime::gcSlice(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
+{
+    int64_t budget;
     if (millis)
-        sliceBudget = SliceBudget::TimeBudget(millis);
-    else if (rt->gc.highFrequencyGC && rt->gc.dynamicMarkSlice)
-        sliceBudget = rt->gc.sliceBudget * IGC_MARK_SLICE_MULTIPLIER;
+        budget = SliceBudget::TimeBudget(millis);
+    else if (highFrequencyGC && dynamicMarkSlice)
+        budget = sliceBudget * IGC_MARK_SLICE_MULTIPLIER;
     else
-        sliceBudget = rt->gc.sliceBudget;
-
-    Collect(rt, true, sliceBudget, gckind, reason);
+        budget = sliceBudget;
+
+    collect(true, budget, gckind, reason);
 }
 
 void
 js::GCFinalSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason)
 {
-    Collect(rt, true, SliceBudget::Unlimited, gckind, reason);
+    rt->gc.collect(true, SliceBudget::Unlimited, gckind, reason);
 }
 
 static bool
 ZonesSelected(JSRuntime *rt)
 {
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->isGCScheduled())
             return true;
@@ -4836,17 +4878,17 @@ js::GCDebugSlice(JSRuntime *rt, bool lim
 {
     int64_t budget = limit ? SliceBudget::WorkBudget(objCount) : SliceBudget::Unlimited;
     if (!ZonesSelected(rt)) {
         if (JS::IsIncrementalGCInProgress(rt))
             JS::PrepareForIncrementalGC(rt);
         else
             JS::PrepareForFullGC(rt);
     }
-    Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
+    rt->gc.collect(true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
 }
 
 /* Schedule a full GC unless a zone will already be collected. */
 void
 js::PrepareForDebugGC(JSRuntime *rt)
 {
     if (!ZonesSelected(rt))
         JS::PrepareForFullGC(rt);
@@ -4862,60 +4904,74 @@ JS::ShrinkGCBuffers(JSRuntime *rt)
         ExpireChunksAndArenas(rt, true);
     else
         rt->gc.helperThread.startBackgroundShrink();
 }
 
 void
 js::MinorGC(JSRuntime *rt, JS::gcreason::Reason reason)
 {
+    rt->gc.minorGC(reason);
+}
+
+void
+GCRuntime::minorGC(JS::gcreason::Reason reason)
+{
 #ifdef JSGC_GENERATIONAL
     TraceLogger *logger = TraceLoggerForMainThread(rt);
     AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC);
-    rt->gc.nursery.collect(rt, reason, nullptr);
-    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gc.nursery.isEmpty());
+    nursery.collect(rt, reason, nullptr);
+    JS_ASSERT_IF(!rt->mainThread.suppressGC, nursery.isEmpty());
 #endif
 }
 
 void
 js::MinorGC(JSContext *cx, JS::gcreason::Reason reason)
 {
     // Alternate to the runtime-taking form above which allows marking type
     // objects as needing pretenuring.
+    cx->runtime()->gc.minorGC(cx, reason);
+}
+
+void
+GCRuntime::minorGC(JSContext *cx, JS::gcreason::Reason reason)
+{
 #ifdef JSGC_GENERATIONAL
-    TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
+    TraceLogger *logger = TraceLoggerForMainThread(rt);
     AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC);
-
     Nursery::TypeObjectList pretenureTypes;
-    JSRuntime *rt = cx->runtime();
-    rt->gc.nursery.collect(cx->runtime(), reason, &pretenureTypes);
+    nursery.collect(rt, reason, &pretenureTypes);
     for (size_t i = 0; i < pretenureTypes.length(); i++) {
         if (pretenureTypes[i]->canPreTenure())
             pretenureTypes[i]->setShouldPreTenure(cx);
     }
-    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gc.nursery.isEmpty());
+    JS_ASSERT_IF(!rt->mainThread.suppressGC, nursery.isEmpty());
 #endif
 }
 
 void
 js::gc::GCIfNeeded(JSContext *cx)
 {
-    JSRuntime *rt = cx->runtime();
-
+    cx->runtime()->gc.gcIfNeeded(cx);
+}
+
+void
+GCRuntime::gcIfNeeded(JSContext *cx)
+{
 #ifdef JSGC_GENERATIONAL
     /*
      * In case of store buffer overflow perform minor GC first so that the
      * correct reason is seen in the logs.
      */
-    if (rt->gc.storeBuffer.isAboutToOverflow())
-        MinorGC(cx, JS::gcreason::FULL_STORE_BUFFER);
+    if (storeBuffer.isAboutToOverflow())
+        minorGC(cx, JS::gcreason::FULL_STORE_BUFFER);
 #endif
 
-    if (rt->gc.isNeeded)
-        GCSlice(rt, GC_NORMAL, rt->gc.triggerReason);
+    if (isNeeded)
+        gcSlice(GC_NORMAL, rt->gc.triggerReason, 0);
 }
 
 void
 js::gc::FinishBackgroundFinalize(JSRuntime *rt)
 {
     rt->gc.helperThread.waitBackgroundSweepEnd();
 }
 
@@ -4929,17 +4985,17 @@ AutoFinishGC::AutoFinishGC(JSRuntime *rt
     gc::FinishBackgroundFinalize(rt);
 }
 
 AutoPrepareForTracing::AutoPrepareForTracing(JSRuntime *rt, ZoneSelector selector)
   : finish(rt),
     session(rt),
     copy(rt, selector)
 {
-    RecordNativeStackTopForGC(rt);
+    rt->gc.recordNativeStackTopForGC();
 }
 
 JSCompartment *
 js::NewCompartment(JSContext *cx, Zone *zone, JSPrincipals *principals,
                    const JS::CompartmentOptions &options)
 {
     JSRuntime *rt = cx->runtime();
     JS_AbortIfWrongThread(rt);
@@ -5031,63 +5087,68 @@ gc::MergeCompartments(JSCompartment *sou
 
     // Merge other info in source's zone into target's zone.
     target->zone()->types.typeLifoAlloc.transferFrom(&source->zone()->types.typeLifoAlloc);
 }
 
 void
 gc::RunDebugGC(JSContext *cx)
 {
+    cx->runtime()->gc.runDebugGC();
+}
+
+void
+GCRuntime::runDebugGC()
+{
 #ifdef JS_GC_ZEAL
-    JSRuntime *rt = cx->runtime();
-    int type = rt->gcZeal();
+    int type = zealMode;
 
     if (rt->mainThread.suppressGC)
         return;
 
     if (type == js::gc::ZealGenerationalGCValue)
         return MinorGC(rt, JS::gcreason::DEBUG_GC);
 
-    PrepareForDebugGC(cx->runtime());
+    PrepareForDebugGC(rt);
 
     if (type == ZealIncrementalRootsThenFinish ||
         type == ZealIncrementalMarkAllThenFinish ||
         type == ZealIncrementalMultipleSlices)
     {
-        js::gc::State initialState = rt->gc.incrementalState;
+        js::gc::State initialState = incrementalState;
         int64_t budget;
         if (type == ZealIncrementalMultipleSlices) {
             /*
              * Start with a small slice limit and double it every slice. This
              * ensure that we get multiple slices, and collection runs to
              * completion.
              */
             if (initialState == NO_INCREMENTAL)
-                rt->gc.incrementalLimit = rt->gc.zealFrequency / 2;
+                incrementalLimit = zealFrequency / 2;
             else
-                rt->gc.incrementalLimit *= 2;
-            budget = SliceBudget::WorkBudget(rt->gc.incrementalLimit);
+                incrementalLimit *= 2;
+            budget = SliceBudget::WorkBudget(incrementalLimit);
         } else {
             // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
             budget = SliceBudget::WorkBudget(1);
         }
 
-        Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
+        collect(true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
 
         /*
          * For multi-slice zeal, reset the slice size when we get to the sweep
          * phase.
          */
         if (type == ZealIncrementalMultipleSlices &&
-            initialState == MARK && rt->gc.incrementalState == SWEEP)
+            initialState == MARK && incrementalState == SWEEP)
         {
-            rt->gc.incrementalLimit = rt->gc.zealFrequency / 2;
+            incrementalLimit = zealFrequency / 2;
         }
     } else {
-        Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, JS::gcreason::DEBUG_GC);
+        collect(false, SliceBudget::Unlimited, GC_NORMAL, JS::gcreason::DEBUG_GC);
     }
 
 #endif
 }
 
 void
 gc::SetDeterministicGC(JSContext *cx, bool enabled)
 {
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -84,18 +84,16 @@ class ChunkPool {
     ChunkPool()
       : emptyChunkListHead(nullptr),
         emptyCount(0) { }
 
     size_t getEmptyCount() const {
         return emptyCount;
     }
 
-    inline bool wantBackgroundAllocation(JSRuntime *rt) const;
-
     /* Must be called with the GC lock taken. */
     inline Chunk *get(JSRuntime *rt);
 
     /* Must be called either during the GC or with the GC lock taken. */
     inline void put(Chunk *chunk);
 
     /*
      * Return the list of chunks that can be released outside the GC lock.
--- a/js/src/jspubtd.h
+++ b/js/src/jspubtd.h
@@ -154,16 +154,17 @@ typedef void
 (* JSTraceDataOp)(JSTracer *trc, void *data);
 
 void js_FinishGC(JSRuntime *rt);
 
 namespace js {
 namespace gc {
 class StoreBuffer;
 void MarkPersistentRootedChains(JSTracer *);
+void FinishPersistentRootedChains(JSRuntime *);
 }
 }
 
 namespace JS {
 
 typedef void (*OffThreadCompileCallback)(void *token, void *callbackData);
 
 namespace shadow {
@@ -207,17 +208,17 @@ struct Runtime
     static JS::shadow::Runtime *asShadowRuntime(JSRuntime *rt) {
         return reinterpret_cast<JS::shadow::Runtime*>(rt);
     }
 
     /* Allow inlining of PersistentRooted constructors and destructors. */
   private:
     template <typename Referent> friend class JS::PersistentRooted;
     friend void js::gc::MarkPersistentRootedChains(JSTracer *);
-    friend void ::js_FinishGC(JSRuntime *rt);
+    friend void js::gc::FinishPersistentRootedChains(JSRuntime *rt);
 
     mozilla::LinkedList<PersistentRootedFunction> functionPersistentRooteds;
     mozilla::LinkedList<PersistentRootedId>       idPersistentRooteds;
     mozilla::LinkedList<PersistentRootedObject>   objectPersistentRooteds;
     mozilla::LinkedList<PersistentRootedScript>   scriptPersistentRooteds;
     mozilla::LinkedList<PersistentRootedString>   stringPersistentRooteds;
     mozilla::LinkedList<PersistentRootedValue>    valuePersistentRooteds;
 
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -274,20 +274,17 @@ JSRuntime::init(uint32_t maxbytes)
     if (!mainThread.init())
         return false;
 
     js::TlsPerThreadData.set(&mainThread);
 
     if (!threadPool.init())
         return false;
 
-    if (!js_InitGC(this, maxbytes))
-        return false;
-
-    if (!gc.marker.init(gcMode()))
+    if (!gc.init(maxbytes))
         return false;
 
     const char *size = getenv("JSGC_MARK_STACK_LIMIT");
     if (size)
         SetMarkStackLimit(this, atoi(size));
 
     ScopedJSDeletePtr<Zone> atomsZone(new_<Zone>(this));
     if (!atomsZone)
@@ -424,17 +421,17 @@ JSRuntime::~JSRuntime()
                 cxcount, (cxcount == 1) ? "" : "s");
     }
 #endif
 
 #if !EXPOSE_INTL_API
     FinishRuntimeNumberState(this);
 #endif
 
-    js_FinishGC(this);
+    gc.finish();
     atomsCompartment_ = nullptr;
 
 #ifdef JS_THREADSAFE
     if (gc.lock)
         PR_DestroyLock(gc.lock);
 #endif
 
     js_free(defaultLocale);
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -920,20 +920,20 @@ struct JSRuntime : public JS::shadow::Ru
     bool                gcInitialized;
 
     JSGCMode gcMode() const { return gc.mode; }
     void setGCMode(JSGCMode mode) {
         gc.mode = mode;
         gc.marker.setGCMode(mode);
     }
 
-    bool isHeapBusy() { return gc.heapState != js::Idle; }
-    bool isHeapMajorCollecting() { return gc.heapState == js::MajorCollecting; }
-    bool isHeapMinorCollecting() { return gc.heapState == js::MinorCollecting; }
-    bool isHeapCollecting() { return isHeapMajorCollecting() || isHeapMinorCollecting(); }
+    bool isHeapBusy() { return gc.isHeapBusy(); }
+    bool isHeapMajorCollecting() { return gc.isHeapMajorCollecting(); }
+    bool isHeapMinorCollecting() { return gc.isHeapMinorCollecting(); }
+    bool isHeapCollecting() { return gc.isHeapCollecting(); }
 
 #ifdef JS_GC_ZEAL
     int gcZeal() { return gc.zealMode; }
 
     bool upcomingZealousGC() {
         return gc.nextScheduled == 1;
     }