Bug 988486 - Split out GC state from JSRuntime into new class GCRuntime r=terrence
☠☠ backed out by 85d37fc36546 ☠ ☠
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 30 Apr 2014 12:13:55 +0100
changeset 181058 3e6abdf3b4b4fbab07b722205f2d0b478b29f1e4
parent 181057 602ddd08eab90a2714f05b144b86339c47243aa5
child 181059 85d37fc36546aa776336ddb88261c2b6f7c8f523
push id26693
push useremorley@mozilla.com
push dateThu, 01 May 2014 14:50:08 +0000
treeherdermozilla-central@51bc58066ac9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs988486
milestone32.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 988486 - Split out GC state from JSRuntime into new class GCRuntime r=terrence
js/src/builtin/MapObject.cpp
js/src/builtin/TestingFunctions.cpp
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/Iteration.cpp
js/src/gc/Marking.cpp
js/src/gc/Nursery.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Statistics.cpp
js/src/gc/StoreBuffer.cpp
js/src/gc/Tracer.cpp
js/src/gc/Verifier.cpp
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineJIT.cpp
js/src/jit/CompileWrappers.cpp
js/src/jit/Ion.cpp
js/src/jit/IonFrames.cpp
js/src/jit/IonLinker.h
js/src/jit/JitCompartment.h
js/src/jit/PcScriptCache.h
js/src/jit/VMFunctions.cpp
js/src/jsapi-tests/testGCFinalizeCallback.cpp
js/src/jsapi.cpp
js/src/jscntxt.h
js/src/jscompartment.cpp
js/src/jsfriendapi.cpp
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jsinfer.cpp
js/src/jsiter.cpp
js/src/jsobj.cpp
js/src/jsobjinlines.h
js/src/jsopcode.cpp
js/src/jsproxy.cpp
js/src/jsscript.cpp
js/src/jsweakmap.cpp
js/src/vm/ArrayBufferObject.h
js/src/vm/ForkJoin.cpp
js/src/vm/MemoryMetrics.cpp
js/src/vm/RegExpObject.cpp
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/ScopeObject.cpp
js/src/vm/Shape.cpp
--- a/js/src/builtin/MapObject.cpp
+++ b/js/src/builtin/MapObject.cpp
@@ -1131,27 +1131,27 @@ class OrderedHashTableRef : public gc::B
 };
 #endif
 
 static void
 WriteBarrierPost(JSRuntime *rt, ValueMap *map, const HashableValue &key)
 {
 #ifdef JSGC_GENERATIONAL
     typedef OrderedHashMap<Value, Value, UnbarrieredHashPolicy, RuntimeAllocPolicy> UnbarrieredMap;
-    rt->gcStoreBuffer.putGeneric(OrderedHashTableRef<UnbarrieredMap>(
+    rt->gc.storeBuffer.putGeneric(OrderedHashTableRef<UnbarrieredMap>(
                 reinterpret_cast<UnbarrieredMap *>(map), key.get()));
 #endif
 }
 
 static void
 WriteBarrierPost(JSRuntime *rt, ValueSet *set, const HashableValue &key)
 {
 #ifdef JSGC_GENERATIONAL
     typedef OrderedHashSet<Value, UnbarrieredHashPolicy, RuntimeAllocPolicy> UnbarrieredSet;
-    rt->gcStoreBuffer.putGeneric(OrderedHashTableRef<UnbarrieredSet>(
+    rt->gc.storeBuffer.putGeneric(OrderedHashTableRef<UnbarrieredSet>(
                 reinterpret_cast<UnbarrieredSet *>(set), key.get()));
 #endif
 }
 
 void
 MapObject::finalize(FreeOp *fop, JSObject *obj)
 {
     if (ValueMap *map = obj->as<MapObject>().getData())
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -233,44 +233,44 @@ GC(JSContext *cx, unsigned argc, jsval *
                 return false;
         } else if (arg.isObject()) {
             PrepareZoneForGC(UncheckedUnwrap(&arg.toObject())->zone());
             compartment = true;
         }
     }
 
 #ifndef JS_MORE_DETERMINISTIC
-    size_t preBytes = cx->runtime()->gcBytes;
+    size_t preBytes = cx->runtime()->gc.bytes;
 #endif
 
     if (compartment)
         PrepareForDebugGC(cx->runtime());
     else
         PrepareForFullGC(cx->runtime());
     GCForReason(cx->runtime(), gcreason::API);
 
     char buf[256] = { '\0' };
 #ifndef JS_MORE_DETERMINISTIC
     JS_snprintf(buf, sizeof(buf), "before %lu, after %lu\n",
-                (unsigned long)preBytes, (unsigned long)cx->runtime()->gcBytes);
+                (unsigned long)preBytes, (unsigned long)cx->runtime()->gc.bytes);
 #endif
     JSString *str = JS_NewStringCopyZ(cx, buf);
     if (!str)
         return false;
     args.rval().setString(str);
     return true;
 }
 
 static bool
 MinorGC(JSContext *cx, unsigned argc, jsval *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 #ifdef JSGC_GENERATIONAL
     if (args.get(0) == BooleanValue(true))
-        cx->runtime()->gcStoreBuffer.setAboutToOverflow();
+        cx->runtime()->gc.storeBuffer.setAboutToOverflow();
 
     MinorGC(cx, gcreason::API);
 #endif
     args.rval().setUndefined();
     return true;
 }
 
 static const struct ParamPair {
@@ -440,17 +440,17 @@ GCPreserveCode(JSContext *cx, unsigned a
     CallArgs args = CallArgsFromVp(argc, vp);
 
     if (args.length() != 0) {
         RootedObject callee(cx, &args.callee());
         ReportUsageError(cx, callee, "Wrong number of arguments");
         return false;
     }
 
-    cx->runtime()->alwaysPreserveCode = true;
+    cx->runtime()->gc.alwaysPreserveCode = true;
 
     args.rval().setUndefined();
     return true;
 }
 
 #ifdef JS_GC_ZEAL
 static bool
 GCZeal(JSContext *cx, unsigned argc, Value *vp)
@@ -508,17 +508,17 @@ ScheduleGC(JSContext *cx, unsigned argc,
 static bool
 SelectForGC(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     JSRuntime *rt = cx->runtime();
     for (unsigned i = 0; i < args.length(); i++) {
         if (args[i].isObject()) {
-            if (!rt->gcSelectedForMarking.append(&args[i].toObject()))
+            if (!rt->gc.selectedForMarking.append(&args[i].toObject()))
                 return false;
         }
     }
 
     args.rval().setUndefined();
     return true;
 }
 
@@ -559,17 +559,17 @@ GCState(JSContext *cx, unsigned argc, js
 
     if (args.length() != 0) {
         RootedObject callee(cx, &args.callee());
         ReportUsageError(cx, callee, "Too many arguments");
         return false;
     }
 
     const char *state;
-    gc::State globalState = cx->runtime()->gcIncrementalState;
+    gc::State globalState = cx->runtime()->gc.incrementalState;
     if (globalState == gc::NO_INCREMENTAL)
         state = "none";
     else if (globalState == gc::MARK)
         state = "mark";
     else if (globalState == gc::SWEEP)
         state = "sweep";
     else
         MOZ_ASSUME_UNREACHABLE("Unobserveable global GC state");
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -119,21 +119,21 @@ class AutoStopVerifyingBarriers
     bool restartPostVerifier;
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 
   public:
     AutoStopVerifyingBarriers(JSRuntime *rt, bool isShutdown
                               MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
       : runtime(rt)
     {
-        restartPreVerifier = !isShutdown && rt->gcVerifyPreData;
-        restartPostVerifier = !isShutdown && rt->gcVerifyPostData && JS::IsGenerationalGCEnabled(rt);
-        if (rt->gcVerifyPreData)
+        restartPreVerifier = !isShutdown && rt->gc.verifyPreData;
+        restartPostVerifier = !isShutdown && rt->gc.verifyPostData && JS::IsGenerationalGCEnabled(rt);
+        if (rt->gc.verifyPreData)
             EndVerifyPreBarriers(rt);
-        if (rt->gcVerifyPostData)
+        if (rt->gc.verifyPostData)
             EndVerifyPostBarriers(rt);
         MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     }
 
     ~AutoStopVerifyingBarriers() {
         if (restartPreVerifier)
             StartVerifyPreBarriers(runtime);
         if (restartPostVerifier)
new file mode 100644
--- /dev/null
+++ b/js/src/gc/GCRuntime.h
@@ -0,0 +1,371 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCRuntime_h
+#define gc_GCRuntime_h
+
+#include "jsgc.h"
+
+#include "gc/Heap.h"
+#ifdef JSGC_GENERATIONAL
+# include "gc/Nursery.h"
+#endif
+#include "gc/Statistics.h"
+#ifdef JSGC_GENERATIONAL
+# include "gc/StoreBuffer.h"
+#endif
+
+namespace js {
+namespace gc {
+
+typedef Vector<JS::Zone *, 4, SystemAllocPolicy> ZoneVector;
+
+class MarkingValidator;
+
+struct ConservativeGCData
+{
+    /*
+     * The GC scans conservatively between ThreadData::nativeStackBase and
+     * nativeStackTop unless the latter is nullptr.
+     */
+    uintptr_t           *nativeStackTop;
+
+    union {
+        jmp_buf         jmpbuf;
+        uintptr_t       words[JS_HOWMANY(sizeof(jmp_buf), sizeof(uintptr_t))];
+    } registerSnapshot;
+
+    ConservativeGCData() {
+        mozilla::PodZero(this);
+    }
+
+    ~ConservativeGCData() {
+#ifdef JS_THREADSAFE
+        /*
+         * The conservative GC scanner should be disabled when the thread leaves
+         * the last request.
+         */
+        JS_ASSERT(!hasStackToScan());
+#endif
+    }
+
+    MOZ_NEVER_INLINE void recordStackTop();
+
+#ifdef JS_THREADSAFE
+    void updateForRequestEnd() {
+        nativeStackTop = nullptr;
+    }
+#endif
+
+    bool hasStackToScan() const {
+        return !!nativeStackTop;
+    }
+};
+
+class GCRuntime
+{
+  public:
+    GCRuntime(JSRuntime *rt);
+
+  public:  // Internal state, public for now
+
+    /* Embedders can use this zone however they wish. */
+    JS::Zone              *systemZone;
+
+    /* List of compartments and zones (protected by the GC lock). */
+    js::gc::ZoneVector    zones;
+
+    js::gc::SystemPageAllocator pageAllocator;
+
+    /*
+     * Set of all GC chunks with at least one allocated thing. The
+     * conservative GC uses it to quickly check if a possible GC thing points
+     * into an allocated chunk.
+     */
+    js::GCChunkSet        chunkSet;
+
+    /*
+     * Doubly-linked lists of chunks from user and system compartments. The GC
+     * allocates its arenas from the corresponding list and when all arenas
+     * in the list head are taken, then the chunk is removed from the list.
+     * During the GC when all arenas in a chunk become free, that chunk is
+     * removed from the list and scheduled for release.
+     */
+    js::gc::Chunk         *systemAvailableChunkListHead;
+    js::gc::Chunk         *userAvailableChunkListHead;
+    js::gc::ChunkPool     chunkPool;
+
+    js::RootedValueMap    rootsHash;
+
+    /* This is updated by both the main and GC helper threads. */
+    mozilla::Atomic<size_t, mozilla::ReleaseAcquire>   bytes;
+
+    size_t                maxBytes;
+    size_t                maxMallocBytes;
+
+    /*
+     * Number of the committed arenas in all GC chunks including empty chunks.
+     */
+    mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>   numArenasFreeCommitted;
+    js::GCMarker          marker;
+    void                  *verifyPreData;
+    void                  *verifyPostData;
+    bool                  chunkAllocationSinceLastGC;
+    int64_t               nextFullGCTime;
+    int64_t               lastGCTime;
+    int64_t               jitReleaseTime;
+
+    JSGCMode              mode;
+
+    size_t                allocationThreshold;
+    bool                  highFrequencyGC;
+    uint64_t              highFrequencyTimeThreshold;
+    uint64_t              highFrequencyLowLimitBytes;
+    uint64_t              highFrequencyHighLimitBytes;
+    double                highFrequencyHeapGrowthMax;
+    double                highFrequencyHeapGrowthMin;
+    double                lowFrequencyHeapGrowth;
+    bool                  dynamicHeapGrowth;
+    bool                  dynamicMarkSlice;
+    uint64_t              decommitThreshold;
+
+    /* During shutdown, the GC needs to clean up every possible object. */
+    bool                  shouldCleanUpEverything;
+
+    /*
+     * The gray bits can become invalid if UnmarkGray overflows the stack. A
+     * full GC will reset this bit, since it fills in all the gray bits.
+     */
+    bool                  grayBitsValid;
+
+    /*
+     * These flags must be kept separate so that a thread requesting a
+     * compartment GC doesn't cancel another thread's concurrent request for a
+     * full GC.
+     */
+    volatile uintptr_t    isNeeded;
+
+    js::gcstats::Statistics stats;
+
+    /* Incremented on every GC slice. */
+    uint64_t              number;
+
+    /* The   number at the time of the most recent GC's first slice. */
+    uint64_t              startNumber;
+
+    /* Whether the currently running GC can finish in multiple slices. */
+    bool                  isIncremental;
+
+    /* Whether all compartments are being collected in first GC slice. */
+    bool                  isFull;
+
+    /* The reason that an interrupt-triggered GC should be called. */
+    JS::gcreason::Reason  triggerReason;
+
+    /*
+     * If this is true, all marked objects must belong to a compartment being
+     * GCed. This is used to look for compartment bugs.
+     */
+    bool                  strictCompartmentChecking;
+
+#ifdef DEBUG
+    /*
+     * If this is 0, all cross-compartment proxies must be registered in the
+     * wrapper map. This checking must be disabled temporarily while creating
+     * new wrappers. When non-zero, this records the recursion depth of wrapper
+     * creation.
+     */
+    uintptr_t             disableStrictProxyCheckingCount;
+#else
+    uintptr_t             unused1;
+#endif
+
+    /*
+     * The current incremental GC phase. This is also used internally in
+     * non-incremental GC.
+     */
+    js::gc::State         incrementalState;
+
+    /* Indicates that the last incremental slice exhausted the mark stack. */
+    bool                  lastMarkSlice;
+
+    /* Whether any sweeping will take place in the separate GC helper thread. */
+    bool                  sweepOnBackgroundThread;
+
+    /* Whether any black->gray edges were found during marking. */
+    bool                  foundBlackGrayEdges;
+
+    /* List head of zones to be swept in the background. */
+    JS::Zone              *sweepingZones;
+
+    /* Index of current zone group (for stats). */
+    unsigned              zoneGroupIndex;
+
+    /*
+     * Incremental sweep state.
+     */
+    JS::Zone              *zoneGroups;
+    JS::Zone              *currentZoneGroup;
+    int                   sweepPhase;
+    JS::Zone              *sweepZone;
+    int                   sweepKindIndex;
+    bool                  abortSweepAfterCurrentGroup;
+
+    /*
+     * List head of arenas allocated during the sweep phase.
+     */
+    js::gc::ArenaHeader   *arenasAllocatedDuringSweep;
+
+#ifdef DEBUG
+    js::gc::MarkingValidator *markingValidator;
+#endif
+
+    /*
+     * Indicates that a GC slice has taken place in the middle of an animation
+     * frame, rather than at the beginning. In this case, the next slice will be
+     * delayed so that we don't get back-to-back slices.
+     */
+    volatile uintptr_t    interFrameGC;
+
+    /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */
+    int64_t               sliceBudget;
+
+    /*
+     * We disable incremental GC if we encounter a js::Class with a trace hook
+     * that does not implement write barriers.
+     */
+    bool                  incrementalEnabled;
+
+    /*
+     * GGC can be enabled from the command line while testing.
+     */
+    unsigned              generationalDisabled;
+
+    /*
+     * This is true if we are in the middle of a brain transplant (e.g.,
+     * JS_TransplantObject) or some other operation that can manipulate
+     * dead zones.
+     */
+    bool                  manipulatingDeadZones;
+
+    /*
+     * This field is incremented each time we mark an object inside a
+     * zone with no incoming cross-compartment pointers. Typically if
+     * this happens it signals that an incremental GC is marking too much
+     * stuff. At various times we check this counter and, if it has changed, we
+     * run an immediate, non-incremental GC to clean up the dead
+     * zones. This should happen very rarely.
+     */
+    unsigned              objectsMarkedInDeadZones;
+
+    bool                  poke;
+
+    volatile js::HeapState heapState;
+
+#ifdef JSGC_GENERATIONAL
+    js::Nursery           nursery;
+    js::gc::StoreBuffer   storeBuffer;
+#endif
+
+    /*
+     * These options control the zealousness of the GC. The fundamental values
+     * are   nextScheduled and gcDebugCompartmentGC. At every allocation,
+     *   nextScheduled is decremented. When it reaches zero, we do either a
+     * full or a compartmental GC, based on   debugCompartmentGC.
+     *
+     * At this point, if   zeal_ is one of the types that trigger periodic
+     * collection, then   nextScheduled is reset to the value of
+     *   zealFrequency. Otherwise, no additional GCs take place.
+     *
+     * You can control these values in several ways:
+     *   - Pass the -Z flag to the shell (see the usage info for details)
+     *   - Call   zeal() or schedulegc() from inside shell-executed JS code
+     *     (see the help for details)
+     *
+     * If gzZeal_ == 1 then we perform GCs in select places (during MaybeGC and
+     * whenever a GC poke happens). This option is mainly useful to embedders.
+     *
+     * We use   zeal_ == 4 to enable write barrier verification. See the comment
+     * in jsgc.cpp for more information about this.
+     *
+     *   zeal_ values from 8 to 10 periodically run different types of
+     * incremental GC.
+     */
+#ifdef JS_GC_ZEAL
+    int                   zealMode;
+    int                   zealFrequency;
+    int                   nextScheduled;
+    bool                  deterministicOnly;
+    int                   incrementalLimit;
+
+    js::Vector<JSObject *, 0, js::SystemAllocPolicy>   selectedForMarking;
+#endif
+
+    bool                  validate;
+    bool                  fullCompartmentChecks;
+
+    JSGCCallback          callback;
+    JS::GCSliceCallback   sliceCallback;
+    JSFinalizeCallback    finalizeCallback;
+
+    void                  *callbackData;
+
+    /*
+     * Malloc counter to measure memory pressure for GC scheduling. It runs
+     * from   maxMallocBytes down to zero.
+     */
+    mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire>   mallocBytes;
+
+    /*
+     * Whether a GC has been triggered as a result of   mallocBytes falling
+     * below zero.
+     */
+    mozilla::Atomic<bool, mozilla::ReleaseAcquire>   mallocGCTriggered;
+
+    /*
+     * The trace operations to trace embedding-specific GC roots. One is for
+     * tracing through black roots and the other is for tracing through gray
+     * roots. The black/gray distinction is only relevant to the cycle
+     * collector.
+     */
+    typedef js::Vector<ExtraTracer, 4, js::SystemAllocPolicy> ExtraTracerVector;
+    ExtraTracerVector     blackRootTracers;
+    ExtraTracer           grayRootTracer;
+
+    /*
+     * The GC can only safely decommit memory when the page size of the
+     * running process matches the compiled arena size.
+     */
+    size_t                systemPageSize;
+
+    /* The OS allocation granularity may not match the page size. */
+    size_t                systemAllocGranularity;
+
+    /* Strong references on scripts held for PCCount profiling API. */
+    js::ScriptAndCountsVector *scriptAndCountsVector;
+
+    /* Always preserve JIT code during GCs, for testing. */
+    bool                  alwaysPreserveCode;
+
+#ifdef DEBUG
+    size_t                noGCOrAllocationCheck;
+#endif
+
+    /* Synchronize GC heap access between main thread and GCHelperThread. */
+    PRLock   *lock;
+    mozilla::DebugOnly<PRThread *>   lockOwner;
+
+    friend class js::GCHelperThread;
+
+    js::GCHelperThread    helperThread;
+
+    ConservativeGCData    conservativeGC;
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif
--- a/js/src/gc/Iteration.cpp
+++ b/js/src/gc/Iteration.cpp
@@ -79,17 +79,17 @@ js::IterateZoneCompartmentsArenasCells(J
                                    compartmentCallback, arenaCallback, cellCallback);
 }
 
 void
 js::IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback)
 {
     AutoPrepareForTracing prep(rt, SkipAtoms);
 
-    for (js::GCChunkSet::Range r = rt->gcChunkSet.all(); !r.empty(); r.popFront())
+    for (js::GCChunkSet::Range r = rt->gc.chunkSet.all(); !r.empty(); r.popFront())
         chunkCallback(rt, data, r.front());
 }
 
 void
 js::IterateScripts(JSRuntime *rt, JSCompartment *compartment,
                    void *data, IterateScriptCallback scriptCallback)
 {
     MinorGC(rt, JS::gcreason::EVICT_NURSERY);
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -164,29 +164,29 @@ CheckMarkedThing(JSTracer *trc, T *thing
         return;
 
     JS_ASSERT(thing->zone());
     JS_ASSERT(thing->zone()->runtimeFromMainThread() == trc->runtime());
     JS_ASSERT(trc->hasTracingDetails());
 
     DebugOnly<JSRuntime *> rt = trc->runtime();
 
-    JS_ASSERT_IF(IS_GC_MARKING_TRACER(trc) && rt->gcManipulatingDeadZones,
+    JS_ASSERT_IF(IS_GC_MARKING_TRACER(trc) && rt->gc.manipulatingDeadZones,
                  !thing->zone()->scheduledForDestruction);
 
     JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     JS_ASSERT_IF(thing->zone()->requireGCTracer(),
                  IS_GC_MARKING_TRACER(trc));
 
     JS_ASSERT(thing->isAligned());
 
     JS_ASSERT(MapTypeToTraceKind<T>::kind == GetGCThingTraceKind(thing));
 
-    JS_ASSERT_IF(rt->gcStrictCompartmentChecking,
+    JS_ASSERT_IF(rt->gc.strictCompartmentChecking,
                  thing->zone()->isCollecting() || rt->isAtomsZone(thing->zone()));
 
     JS_ASSERT_IF(IS_GC_MARKING_TRACER(trc) && AsGCMarker(trc)->getMarkColor() == GRAY,
                  !thing->zone()->isGCMarkingBlack() || rt->isAtomsZone(thing->zone()));
 
     JS_ASSERT_IF(IS_GC_MARKING_TRACER(trc),
                  !(thing->zone()->isGCSweeping() || thing->zone()->isGCFinished()));
 
@@ -242,18 +242,18 @@ MarkInternal(JSTracer *trc, T **thingp)
         trc->unsetTracingLocation();
     }
 
     trc->clearTracingDetails();
 }
 
 #define JS_ROOT_MARKING_ASSERT(trc)                                     \
     JS_ASSERT_IF(IS_GC_MARKING_TRACER(trc),                             \
-                 trc->runtime()->gcIncrementalState == NO_INCREMENTAL ||  \
-                 trc->runtime()->gcIncrementalState == MARK_ROOTS);
+                 trc->runtime()->gc.incrementalState == NO_INCREMENTAL ||       \
+                 trc->runtime()->gc.incrementalState == MARK_ROOTS);
 
 namespace js {
 namespace gc {
 
 template <typename T>
 void
 MarkUnbarriered(JSTracer *trc, T **thingp, const char *name)
 {
@@ -334,17 +334,17 @@ namespace gc {
 
 template <typename T>
 static bool
 IsMarked(T **thingp)
 {
     JS_ASSERT(thingp);
     JS_ASSERT(*thingp);
 #ifdef JSGC_GENERATIONAL
-    Nursery &nursery = (*thingp)->runtimeFromMainThread()->gcNursery;
+    Nursery &nursery = (*thingp)->runtimeFromMainThread()->gc.nursery;
     if (nursery.isInside(*thingp))
         return nursery.getForwardedPointer(thingp);
 #endif
     Zone *zone = (*thingp)->tenuredZone();
     if (!zone->isCollecting() || zone->isGCFinished())
         return true;
     return (*thingp)->isMarked();
 }
@@ -359,17 +359,17 @@ IsAboutToBeFinalized(T **thingp)
     T *thing = *thingp;
     JSRuntime *rt = thing->runtimeFromAnyThread();
 
     /* Permanent atoms are never finalized by non-owning runtimes. */
     if (ThingIsPermanentAtom(thing) && !TlsPerThreadData.get()->associatedWith(rt))
         return false;
 
 #ifdef JSGC_GENERATIONAL
-    Nursery &nursery = rt->gcNursery;
+    Nursery &nursery = rt->gc.nursery;
     JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !nursery.isInside(thing));
     if (rt->isHeapMinorCollecting()) {
         if (nursery.isInside(thing))
             return !nursery.getForwardedPointer(thingp);
         return false;
     }
 #endif
 
@@ -389,18 +389,18 @@ IsAboutToBeFinalized(T **thingp)
 }
 
 template <typename T>
 T *
 UpdateIfRelocated(JSRuntime *rt, T **thingp)
 {
     JS_ASSERT(thingp);
 #ifdef JSGC_GENERATIONAL
-    if (*thingp && rt->isHeapMinorCollecting() && rt->gcNursery.isInside(*thingp))
-        rt->gcNursery.getForwardedPointer(thingp);
+    if (*thingp && rt->isHeapMinorCollecting() && rt->gc.nursery.isInside(*thingp))
+        rt->gc.nursery.getForwardedPointer(thingp);
 #endif
     return *thingp;
 }
 
 #define DeclMarkerImpl(base, type)                                                                \
 void                                                                                              \
 Mark##base(JSTracer *trc, BarrieredBase<type*> *thing, const char *name)                          \
 {                                                                                                 \
@@ -779,17 +779,17 @@ ShouldMarkCrossCompartment(JSTracer *trc
          * Having black->gray edges violates our promise to the cycle
          * collector. This can happen if we're collecting a compartment and it
          * has an edge to an uncollected compartment: it's possible that the
          * source and destination of the cross-compartment edge should be gray,
          * but the source was marked black by the conservative scanner.
          */
         if (cell->isMarked(GRAY)) {
             JS_ASSERT(!zone->isCollecting());
-            trc->runtime()->gcFoundBlackGrayEdges = true;
+            trc->runtime()->gc.foundBlackGrayEdges = true;
         }
         return zone->isGCMarking();
     } else {
         if (zone->isGCMarkingBlack()) {
             /*
              * The destination compartment is being not being marked gray now,
              * but it will be later, so record the cell so it can be marked gray
              * at the appropriate time.
@@ -1535,17 +1535,17 @@ GCMarker::processMarkStackTop(SliceBudge
 
         /* Call the trace hook if necessary. */
         const Class *clasp = type->clasp();
         if (clasp->trace) {
             // Global objects all have the same trace hook. That hook is safe without barriers
             // if the gloal has no custom trace hook of it's own, or has been moved to a different
             // compartment, and so can't have one.
             JS_ASSERT_IF(runtime()->gcMode() == JSGC_MODE_INCREMENTAL &&
-                         runtime()->gcIncrementalEnabled &&
+                         runtime()->gc.incrementalEnabled &&
                          !(clasp->trace == JS_GlobalObjectTraceHook &&
                            (!obj->compartment()->options().getTrace() ||
                             !obj->isOwnGlobal())),
                          clasp->flags & JSCLASS_IMPLEMENTS_BARRIERS);
             clasp->trace(this, obj);
         }
 
         if (!shape->isNative())
@@ -1581,20 +1581,20 @@ bool
 GCMarker::drainMarkStack(SliceBudget &budget)
 {
 #ifdef DEBUG
     JSRuntime *rt = runtime();
 
     struct AutoCheckCompartment {
         JSRuntime *runtime;
         AutoCheckCompartment(JSRuntime *rt) : runtime(rt) {
-            JS_ASSERT(!rt->gcStrictCompartmentChecking);
-            runtime->gcStrictCompartmentChecking = true;
+            JS_ASSERT(!rt->gc.strictCompartmentChecking);
+            runtime->gc.strictCompartmentChecking = true;
         }
-        ~AutoCheckCompartment() { runtime->gcStrictCompartmentChecking = false; }
+        ~AutoCheckCompartment() { runtime->gc.strictCompartmentChecking = false; }
     } acc(rt);
 #endif
 
     if (budget.isOverBudget())
         return false;
 
     for (;;) {
         while (!stack.isEmpty()) {
@@ -1734,17 +1734,17 @@ UnmarkGrayChildren(JSTracer *trc, void *
 {
     void *thing = *thingp;
     int stackDummy;
     if (!JS_CHECK_STACK_SIZE(trc->runtime()->mainThread.nativeStackLimit[StackForSystemCode], &stackDummy)) {
         /*
          * If we run out of stack, we take a more drastic measure: require that
          * we GC again before the next CC.
          */
-        trc->runtime()->gcGrayBitsValid = false;
+        trc->runtime()->gc.grayBitsValid = false;
         return;
     }
 
     UnmarkGrayTracer *tracer = static_cast<UnmarkGrayTracer *>(trc);
     if (!IsInsideNursery(trc->runtime(), thing)) {
         if (!JS::GCThingIsMarkedGray(thing))
             return;
 
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -48,17 +48,17 @@ static int64_t GCReportThreshold = INT64
 bool
 js::Nursery::init()
 {
     JS_ASSERT(start() == 0);
 
     if (!hugeSlots.init())
         return false;
 
-    void *heap = runtime()->pageAllocator.mapAlignedPages(NurserySize, Alignment);
+    void *heap = runtime()->gc.pageAllocator.mapAlignedPages(NurserySize, Alignment);
     if (!heap)
         return false;
 
     JSRuntime *rt = runtime();
     rt->gcNurseryStart_ = uintptr_t(heap);
     currentStart_ = start();
     rt->gcNurseryEnd_ = chunk(LastNurseryChunk).end();
     numActiveChunks_ = 1;
@@ -74,17 +74,17 @@ js::Nursery::init()
 
     JS_ASSERT(isEnabled());
     return true;
 }
 
 js::Nursery::~Nursery()
 {
     if (start())
-        runtime()->pageAllocator.unmapPages((void *)start(), NurserySize);
+        runtime()->gc.pageAllocator.unmapPages((void *)start(), NurserySize);
 }
 
 void
 js::Nursery::updateDecommittedRegion()
 {
 #ifndef JS_GC_ZEAL
     if (numActiveChunks_ < NumNurseryChunks) {
         // Bug 994054: madvise on MacOS is too slow to make this
@@ -103,17 +103,17 @@ js::Nursery::enable()
 {
     JS_ASSERT(isEmpty());
     if (isEnabled())
         return;
     numActiveChunks_ = 1;
     setCurrentChunk(0);
     currentStart_ = position();
 #ifdef JS_GC_ZEAL
-    if (runtime()->gcZeal_ == ZealGenerationalGCValue)
+    if (runtime()->gc.zealMode == ZealGenerationalGCValue)
         enterZealMode();
 #endif
 }
 
 void
 js::Nursery::disable()
 {
     JS_ASSERT(isEmpty());
@@ -125,17 +125,17 @@ js::Nursery::disable()
 }
 
 bool
 js::Nursery::isEmpty() const
 {
     JS_ASSERT(runtime_);
     if (!isEnabled())
         return true;
-    JS_ASSERT_IF(runtime_->gcZeal_ != ZealGenerationalGCValue, currentStart_ == start());
+    JS_ASSERT_IF(runtime_->gc.zealMode != ZealGenerationalGCValue, currentStart_ == start());
     return position() == currentStart_;
 }
 
 JSObject *
 js::Nursery::allocateObject(JSContext *cx, size_t size, size_t numDynamic)
 {
     /* Ensure there's enough space to replace the contents with a RelocationOverlay. */
     JS_ASSERT(size >= sizeof(RelocationOverlay));
@@ -319,45 +319,45 @@ class MinorCollectionTracer : public JST
         nursery(nursery),
         session(rt, MinorCollecting),
         tenuredSize(0),
         head(nullptr),
         tail(&head),
         savedRuntimeNeedBarrier(rt->needsBarrier()),
         disableStrictProxyChecking(rt)
     {
-        rt->gcNumber++;
+        rt->gc.number++;
 
         /*
          * We disable the runtime needsBarrier() check so that pre-barriers do
          * not fire on objects that have been relocated. The pre-barrier's
          * call to obj->zone() will try to look through shape_, which is now
          * the relocation magic and will crash. However, zone->needsBarrier()
          * must still be set correctly so that allocations we make in minor
          * GCs between incremental slices will allocate their objects marked.
          */
         rt->setNeedsBarrier(false);
 
         /*
          * We use the live array buffer lists to track traced buffers so we can
          * sweep their dead views. Incremental collection also use these lists,
          * so we may need to save and restore their contents here.
          */
-        if (rt->gcIncrementalState != NO_INCREMENTAL) {
+        if (rt->gc.incrementalState != NO_INCREMENTAL) {
             for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
                 if (!ArrayBufferObject::saveArrayBufferList(c, liveArrayBuffers))
                     CrashAtUnhandlableOOM("OOM while saving live array buffers");
                 ArrayBufferObject::resetArrayBufferList(c);
             }
         }
     }
 
     ~MinorCollectionTracer() {
         runtime()->setNeedsBarrier(savedRuntimeNeedBarrier);
-        if (runtime()->gcIncrementalState != NO_INCREMENTAL)
+        if (runtime()->gc.incrementalState != NO_INCREMENTAL)
             ArrayBufferObject::restoreArrayBufferLists(liveArrayBuffers);
     }
 };
 
 } /* namespace gc */
 } /* namespace js */
 
 static AllocKind
@@ -735,27 +735,27 @@ js::Nursery::collect(JSRuntime *rt, JS::
         return;
 
     if (!isEnabled())
         return;
 
     if (isEmpty())
         return;
 
-    rt->gcStats.count(gcstats::STAT_MINOR_GC);
+    rt->gc.stats.count(gcstats::STAT_MINOR_GC);
 
     TIME_START(total);
 
     AutoStopVerifyingBarriers av(rt, false);
 
     // Move objects pointed to by roots from the nursery to the major heap.
     MinorCollectionTracer trc(rt, this);
 
     // Mark the store buffer. This must happen first.
-    StoreBuffer &sb = rt->gcStoreBuffer;
+    StoreBuffer &sb = rt->gc.storeBuffer;
     TIME_START(markValues);
     sb.markValues(&trc);
     TIME_END(markValues);
 
     TIME_START(markCells);
     sb.markCells(&trc);
     TIME_END(markCells);
 
@@ -847,23 +847,23 @@ js::Nursery::collect(JSRuntime *rt, JS::
     freeHugeSlots(rt);
     TIME_END(freeHugeSlots);
 
     TIME_START(sweep);
     sweep(rt);
     TIME_END(sweep);
 
     TIME_START(clearStoreBuffer);
-    rt->gcStoreBuffer.clear();
+    rt->gc.storeBuffer.clear();
     TIME_END(clearStoreBuffer);
 
     // We ignore gcMaxBytes when allocating for minor collection. However, if we
     // overflowed, we disable the nursery. The next time we allocate, we'll fail
     // because gcBytes >= gcMaxBytes.
-    if (rt->gcBytes >= rt->gcMaxBytes)
+    if (rt->gc.bytes >= rt->gc.maxBytes)
         disable();
 
     TIME_END(total);
 
 #ifdef PROFILE_NURSERY
     int64_t totalTime = TIME_TOTAL(total);
 
     if (totalTime >= GCReportThreshold) {
@@ -917,17 +917,17 @@ void
 js::Nursery::sweep(JSRuntime *rt)
 {
 #ifdef JS_GC_ZEAL
     /* Poison the nursery contents so touching a freed object will crash. */
     JS_POISON((void *)start(), JS_SWEPT_NURSERY_PATTERN, NurserySize);
     for (int i = 0; i < NumNurseryChunks; ++i)
         initChunk(i);
 
-    if (rt->gcZeal_ == ZealGenerationalGCValue) {
+    if (rt->gc.zealMode == ZealGenerationalGCValue) {
         MOZ_ASSERT(numActiveChunks_ == NumNurseryChunks);
 
         /* Only reset the alloc point when we are close to the end. */
         if (currentChunk_ + 1 == NumNurseryChunks)
             setCurrentChunk(0);
     } else
 #endif
     {
@@ -942,25 +942,26 @@ js::Nursery::sweep(JSRuntime *rt)
     /* Set current start position for isEmpty checks. */
     currentStart_ = position();
 }
 
 void
 js::Nursery::growAllocableSpace()
 {
 #ifdef JS_GC_ZEAL
-    MOZ_ASSERT_IF(runtime()->gcZeal_ == ZealGenerationalGCValue, numActiveChunks_ == NumNurseryChunks);
+    MOZ_ASSERT_IF(runtime()->gc.zealMode == ZealGenerationalGCValue,
+                  numActiveChunks_ == NumNurseryChunks);
 #endif
     numActiveChunks_ = Min(numActiveChunks_ * 2, NumNurseryChunks);
 }
 
 void
 js::Nursery::shrinkAllocableSpace()
 {
 #ifdef JS_GC_ZEAL
-    if (runtime()->gcZeal_ == ZealGenerationalGCValue)
+    if (runtime()->gc.zealMode == ZealGenerationalGCValue)
         return;
 #endif
     numActiveChunks_ = Max(numActiveChunks_ - 1, 1);
     updateDecommittedRegion();
 }
 
 #endif /* JSGC_GENERATIONAL */
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -138,17 +138,17 @@ IsAddressableGCThing(JSRuntime *rt, uint
 #if JS_BITS_PER_WORD == 32
     uintptr_t addr = w & JSID_PAYLOAD_MASK;
 #elif JS_BITS_PER_WORD == 64
     uintptr_t addr = w & JSID_PAYLOAD_MASK & JSVAL_PAYLOAD_MASK;
 #endif
 
     Chunk *chunk = Chunk::fromAddress(addr);
 
-    if (!rt->gcChunkSet.has(chunk))
+    if (!rt->gc.chunkSet.has(chunk))
         return CGCT_NOTCHUNK;
 
     /*
      * We query for pointers outside the arena array after checking for an
      * allocated chunk. Such pointers are rare and we want to reject them
      * after doing more likely rejections.
      */
     if (!Chunk::withinArenasRange(addr))
@@ -218,17 +218,17 @@ MarkIfGCThingWord(JSTracer *trc, uintptr
     trc->setTracingName(nameBuf);
 #endif
     trc->setTracingLocation((void *)w);
     void *tmp = thing;
     MarkKind(trc, &tmp, traceKind);
     JS_ASSERT(tmp == thing);
 
 #ifdef DEBUG
-    if (trc->runtime()->gcIncrementalState == MARK_ROOTS)
+    if (trc->runtime()->gc.incrementalState == MARK_ROOTS)
         trc->runtime()->mainThread.gcSavedRoots.append(
             PerThreadData::SavedGCRoot(thing, traceKind));
 #endif
 
     return CGCT_VALID;
 }
 
 #ifndef JSGC_USE_EXACT_ROOTING
@@ -686,17 +686,17 @@ js::gc::MarkRuntime(JSTracer *trc, bool 
 #ifdef JSGC_USE_EXACT_ROOTING
         MarkExactStackRoots(trc);
 #else
         MarkConservativeStackRoots(trc, useSavedRoots);
 #endif
         rt->markSelfHostingGlobal(trc);
     }
 
-    for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront()) {
+    for (RootRange r = rt->gc.rootsHash.all(); !r.empty(); r.popFront()) {
         const RootEntry &entry = r.front();
         const char *name = entry.value().name ? entry.value().name : "root";
         JSGCRootType type = entry.value().type;
         void *key = entry.key();
         if (type == JS_GC_ROOT_VALUE_PTR) {
             MarkValueRoot(trc, reinterpret_cast<Value *>(key), name);
         } else if (*reinterpret_cast<void **>(key)){
             if (type == JS_GC_ROOT_STRING_PTR)
@@ -707,18 +707,18 @@ js::gc::MarkRuntime(JSTracer *trc, bool 
                 MarkScriptRoot(trc, reinterpret_cast<JSScript **>(key), name);
             else
                 MOZ_ASSUME_UNREACHABLE("unexpected js::RootInfo::type value");
         }
     }
 
     MarkPersistentRootedChains(trc);
 
-    if (rt->scriptAndCountsVector) {
-        ScriptAndCountsVector &vec = *rt->scriptAndCountsVector;
+    if (rt->gc.scriptAndCountsVector) {
+        ScriptAndCountsVector &vec = *rt->gc.scriptAndCountsVector;
         for (size_t i = 0; i < vec.length(); i++)
             MarkScriptRoot(trc, &vec[i].script, "scriptAndCountsVector");
     }
 
     if (!rt->isBeingDestroyed() && !trc->runtime()->isHeapMinorCollecting()) {
         if (!IS_GC_MARKING_TRACER(trc) || rt->atomsCompartment()->zone()->isCollecting()) {
             MarkPermanentAtoms(trc);
             MarkAtoms(trc);
@@ -783,30 +783,30 @@ js::gc::MarkRuntime(JSTracer *trc, bool 
 
         /*
          * The embedding can register additional roots here.
          *
          * We don't need to trace these in a minor GC because all pointers into
          * the nursery should be in the store buffer, and we want to avoid the
          * time taken to trace all these roots.
          */
-        for (size_t i = 0; i < rt->gcBlackRootTracers.length(); i++) {
-            const JSRuntime::ExtraTracer &e = rt->gcBlackRootTracers[i];
+        for (size_t i = 0; i < rt->gc.blackRootTracers.length(); i++) {
+            const ExtraTracer &e = rt->gc.blackRootTracers[i];
             (*e.op)(trc, e.data);
         }
 
         /* During GC, we don't mark gray roots at this stage. */
-        if (JSTraceDataOp op = rt->gcGrayRootTracer.op) {
+        if (JSTraceDataOp op = rt->gc.grayRootTracer.op) {
             if (!IS_GC_MARKING_TRACER(trc))
-                (*op)(trc, rt->gcGrayRootTracer.data);
+                (*op)(trc, rt->gc.grayRootTracer.data);
         }
     }
 }
 
 void
 js::gc::BufferGrayRoots(GCMarker *gcmarker)
 {
     JSRuntime *rt = gcmarker->runtime();
     gcmarker->startBufferingGrayRoots();
-    if (JSTraceDataOp op = rt->gcGrayRootTracer.op)
-        (*op)(gcmarker, rt->gcGrayRootTracer.data);
+    if (JSTraceDataOp op = rt->gc.grayRootTracer.op)
+        (*op)(gcmarker, rt->gc.grayRootTracer.data);
     gcmarker->endBufferingGrayRoots();
 }
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -516,17 +516,17 @@ Statistics::beginGC()
 {
     PodArrayZero(phaseStartTimes);
     PodArrayZero(phaseTimes);
 
     slices.clearAndFree();
     sccTimes.clearAndFree();
     nonincrementalReason = nullptr;
 
-    preBytes = runtime->gcBytes;
+    preBytes = runtime->gc.bytes;
 }
 
 void
 Statistics::endGC()
 {
     crash::SnapshotGCStack();
 
     for (int i = 0; i < PHASE_LIMIT; i++)
@@ -542,17 +542,17 @@ Statistics::endGC()
         (*cb)(JS_TELEMETRY_GC_IS_COMPARTMENTAL, collectedCount == zoneCount ? 0 : 1);
         (*cb)(JS_TELEMETRY_GC_MS, t(total));
         (*cb)(JS_TELEMETRY_GC_MAX_PAUSE_MS, t(longest));
         (*cb)(JS_TELEMETRY_GC_MARK_MS, t(phaseTimes[PHASE_MARK]));
         (*cb)(JS_TELEMETRY_GC_SWEEP_MS, t(phaseTimes[PHASE_SWEEP]));
         (*cb)(JS_TELEMETRY_GC_MARK_ROOTS_MS, t(phaseTimes[PHASE_MARK_ROOTS]));
         (*cb)(JS_TELEMETRY_GC_MARK_GRAY_MS, t(phaseTimes[PHASE_SWEEP_MARK_GRAY]));
         (*cb)(JS_TELEMETRY_GC_NON_INCREMENTAL, !!nonincrementalReason);
-        (*cb)(JS_TELEMETRY_GC_INCREMENTAL_DISABLED, !runtime->gcIncrementalEnabled);
+        (*cb)(JS_TELEMETRY_GC_INCREMENTAL_DISABLED, !runtime->gc.incrementalEnabled);
         (*cb)(JS_TELEMETRY_GC_SCC_SWEEP_TOTAL_MS, t(sccTotal));
         (*cb)(JS_TELEMETRY_GC_SCC_SWEEP_MAX_PAUSE_MS, t(sccLongest));
 
         double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
         (*cb)(JS_TELEMETRY_GC_MMU_50, mmu50 * 100);
     }
 
     if (fp)
@@ -562,54 +562,54 @@ Statistics::endGC()
 void
 Statistics::beginSlice(int collectedCount, int zoneCount, int compartmentCount,
                        JS::gcreason::Reason reason)
 {
     this->collectedCount = collectedCount;
     this->zoneCount = zoneCount;
     this->compartmentCount = compartmentCount;
 
-    bool first = runtime->gcIncrementalState == gc::NO_INCREMENTAL;
+    bool first = runtime->gc.incrementalState == gc::NO_INCREMENTAL;
     if (first)
         beginGC();
 
     SliceData data(reason, PRMJ_Now(), SystemPageAllocator::GetPageFaultCount());
     (void) slices.append(data); /* Ignore any OOMs here. */
 
     if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback)
         (*cb)(JS_TELEMETRY_GC_REASON, reason);
 
     // Slice callbacks should only fire for the outermost level
     if (++gcDepth == 1) {
         bool wasFullGC = collectedCount == zoneCount;
-        if (JS::GCSliceCallback cb = runtime->gcSliceCallback)
+        if (JS::GCSliceCallback cb = runtime->gc.sliceCallback)
             (*cb)(runtime, first ? JS::GC_CYCLE_BEGIN : JS::GC_SLICE_BEGIN,
                   JS::GCDescription(!wasFullGC));
     }
 }
 
 void
 Statistics::endSlice()
 {
     slices.back().end = PRMJ_Now();
     slices.back().endFaults = SystemPageAllocator::GetPageFaultCount();
 
     if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback) {
         (*cb)(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start));
         (*cb)(JS_TELEMETRY_GC_RESET, !!slices.back().resetReason);
     }
 
-    bool last = runtime->gcIncrementalState == gc::NO_INCREMENTAL;
+    bool last = runtime->gc.incrementalState == gc::NO_INCREMENTAL;
     if (last)
         endGC();
 
     // Slice callbacks should only fire for the outermost level
     if (--gcDepth == 0) {
         bool wasFullGC = collectedCount == zoneCount;
-        if (JS::GCSliceCallback cb = runtime->gcSliceCallback)
+        if (JS::GCSliceCallback cb = runtime->gc.sliceCallback)
             (*cb)(runtime, last ? JS::GC_CYCLE_END : JS::GC_SLICE_END,
                   JS::GCDescription(!wasFullGC));
     }
 
     /* Do this after the slice callback since it uses these values. */
     if (last)
         PodArrayZero(counts);
 }
--- a/js/src/gc/StoreBuffer.cpp
+++ b/js/src/gc/StoreBuffer.cpp
@@ -21,17 +21,17 @@ using mozilla::ReentrancyGuard;
 
 /*** Edges ***/
 
 void
 StoreBuffer::SlotsEdge::mark(JSTracer *trc)
 {
     JSObject *obj = object();
 
-    if (trc->runtime()->gcNursery.isInside(obj))
+    if (trc->runtime()->gc.nursery.isInside(obj))
         return;
 
     if (!obj->isNative()) {
         const Class *clasp = obj->getClass();
         if (clasp)
             clasp->trace(trc, obj);
         return;
     }
@@ -332,47 +332,47 @@ StoreBuffer::addSizeOfExcludingThis(mozi
     sizes->storeBufferGenerics   += bufferGeneric.sizeOfExcludingThis(mallocSizeOf);
 }
 
 JS_PUBLIC_API(void)
 JS::HeapCellPostBarrier(js::gc::Cell **cellp)
 {
     JS_ASSERT(*cellp);
     JSRuntime *runtime = (*cellp)->runtimeFromMainThread();
-    runtime->gcStoreBuffer.putRelocatableCell(cellp);
+    runtime->gc.storeBuffer.putRelocatableCell(cellp);
 }
 
 JS_PUBLIC_API(void)
 JS::HeapCellRelocate(js::gc::Cell **cellp)
 {
     /* Called with old contents of *pp before overwriting. */
     JS_ASSERT(*cellp);
     JSRuntime *runtime = (*cellp)->runtimeFromMainThread();
-    runtime->gcStoreBuffer.removeRelocatableCell(cellp);
+    runtime->gc.storeBuffer.removeRelocatableCell(cellp);
 }
 
 JS_PUBLIC_API(void)
 JS::HeapValuePostBarrier(JS::Value *valuep)
 {
     JS_ASSERT(valuep->isMarkable());
     if (valuep->isString() && StringIsPermanentAtom(valuep->toString()))
         return;
     JSRuntime *runtime = static_cast<js::gc::Cell *>(valuep->toGCThing())->runtimeFromMainThread();
-    runtime->gcStoreBuffer.putRelocatableValue(valuep);
+    runtime->gc.storeBuffer.putRelocatableValue(valuep);
 }
 
 JS_PUBLIC_API(void)
 JS::HeapValueRelocate(JS::Value *valuep)
 {
     /* Called with old contents of *valuep before overwriting. */
     JS_ASSERT(valuep->isMarkable());
     if (valuep->isString() && StringIsPermanentAtom(valuep->toString()))
         return;
     JSRuntime *runtime = static_cast<js::gc::Cell *>(valuep->toGCThing())->runtimeFromMainThread();
-    runtime->gcStoreBuffer.removeRelocatableValue(valuep);
+    runtime->gc.storeBuffer.removeRelocatableValue(valuep);
 }
 
 template class StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>;
 template class StoreBuffer::MonoTypeBuffer<StoreBuffer::CellPtrEdge>;
 template class StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>;
 template class StoreBuffer::MonoTypeBuffer<StoreBuffer::WholeCellEdges>;
 template class StoreBuffer::RelocatableMonoTypeBuffer<StoreBuffer::ValueEdge>;
 template class StoreBuffer::RelocatableMonoTypeBuffer<StoreBuffer::CellPtrEdge>;
--- a/js/src/gc/Tracer.cpp
+++ b/js/src/gc/Tracer.cpp
@@ -526,18 +526,18 @@ GCMarker::markDelayedChildren(ArenaHeade
      * allocatedDuringIncremental flag if we continue marking.
      */
 }
 
 bool
 GCMarker::markDelayedChildren(SliceBudget &budget)
 {
     gcstats::MaybeAutoPhase ap;
-    if (runtime()->gcIncrementalState == MARK)
-        ap.construct(runtime()->gcStats, gcstats::PHASE_MARK_DELAYED);
+    if (runtime()->gc.incrementalState == MARK)
+        ap.construct(runtime()->gc.stats, gcstats::PHASE_MARK_DELAYED);
 
     JS_ASSERT(unmarkedArenaStackTop);
     do {
         /*
          * If marking gets delayed at the same arena again, we must repeat
          * marking of its things. For that we pop arena from the stack and
          * clear its hasDelayedMarking flag before we begin the marking.
          */
@@ -664,11 +664,11 @@ GCMarker::sizeOfExcludingThis(mozilla::M
     return size;
 }
 
 void
 js::SetMarkStackLimit(JSRuntime *rt, size_t limit)
 {
     JS_ASSERT(!rt->isHeapBusy());
     AutoStopVerifyingBarriers pauseVerification(rt, false);
-    rt->gcMarker.setMaxCapacity(limit);
+    rt->gc.marker.setMaxCapacity(limit);
 }
 
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -98,17 +98,17 @@ struct VerifyPreTracer : JSTracer
     /* This graph represents the initial GC "snapshot". */
     VerifyNode *curnode;
     VerifyNode *root;
     char *edgeptr;
     char *term;
     NodeMap nodemap;
 
     VerifyPreTracer(JSRuntime *rt, JSTraceCallback callback)
-      : JSTracer(rt, callback), noggc(rt), number(rt->gcNumber), count(0), root(nullptr)
+      : JSTracer(rt, callback), noggc(rt), number(rt->gc.number), count(0), root(nullptr)
     {}
 
     ~VerifyPreTracer() {
         js_free(root);
     }
 };
 
 /*
@@ -166,39 +166,39 @@ NextNode(VerifyNode *node)
     else
         return (VerifyNode *)((char *)node + sizeof(VerifyNode) +
                              sizeof(EdgeValue)*(node->count - 1));
 }
 
 void
 gc::StartVerifyPreBarriers(JSRuntime *rt)
 {
-    if (rt->gcVerifyPreData || rt->gcIncrementalState != NO_INCREMENTAL)
+    if (rt->gc.verifyPreData || rt->gc.incrementalState != NO_INCREMENTAL)
         return;
 
     /*
      * The post barrier verifier requires the storebuffer to be enabled, but the
      * pre barrier verifier disables it as part of disabling GGC.  Don't allow
      * starting the pre barrier verifier if the post barrier verifier is already
      * running.
      */
-    if (rt->gcVerifyPostData)
+    if (rt->gc.verifyPostData)
         return;
 
     MinorGC(rt, JS::gcreason::EVICT_NURSERY);
 
     AutoPrepareForTracing prep(rt, WithAtoms);
 
     if (!IsIncrementalGCSafe(rt))
         return;
 
-    for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
+    for (GCChunkSet::Range r(rt->gc.chunkSet.all()); !r.empty(); r.popFront())
         r.front()->bitmap.clear();
 
-    rt->gcNumber++;
+    rt->gc.number++;
 
     VerifyPreTracer *trc = js_new<VerifyPreTracer>(rt, JSTraceCallback(nullptr));
     if (!trc)
         return;
 
     /*
      * Passing a function pointer directly to js_new trips a compiler bug in
      * MSVC. Work around by filling the pointer after allocating with nullptr.
@@ -214,17 +214,17 @@ gc::StartVerifyPreBarriers(JSRuntime *rt
 
     if (!trc->nodemap.init())
         goto oom;
 
     /* Create the root node. */
     trc->curnode = MakeNode(trc, nullptr, JSGCTraceKind(0));
 
     /* We want MarkRuntime to save the roots to gcSavedRoots. */
-    rt->gcIncrementalState = MARK_ROOTS;
+    rt->gc.incrementalState = MARK_ROOTS;
 
     /* Make all the roots be edges emanating from the root node. */
     MarkRuntime(trc);
 
     VerifyNode *node;
     node = trc->curnode;
     if (trc->edgeptr == trc->term)
         goto oom;
@@ -240,33 +240,33 @@ gc::StartVerifyPreBarriers(JSRuntime *rt
             }
             if (trc->edgeptr == trc->term)
                 goto oom;
         }
 
         node = NextNode(node);
     }
 
-    rt->gcVerifyPreData = trc;
-    rt->gcIncrementalState = MARK;
-    rt->gcMarker.start();
+    rt->gc.verifyPreData = trc;
+    rt->gc.incrementalState = MARK;
+    rt->gc.marker.start();
 
     rt->setNeedsBarrier(true);
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         PurgeJITCaches(zone);
         zone->setNeedsBarrier(true, Zone::UpdateIon);
         zone->allocator.arenas.purge();
     }
 
     return;
 
 oom:
-    rt->gcIncrementalState = NO_INCREMENTAL;
+    rt->gc.incrementalState = NO_INCREMENTAL;
     js_delete(trc);
-    rt->gcVerifyPreData = nullptr;
+    rt->gc.verifyPreData = nullptr;
 }
 
 static bool
 IsMarkedOrAllocated(Cell *cell)
 {
     return cell->isMarked() || cell->arenaHeader()->allocatedDuringIncremental;
 }
 
@@ -318,17 +318,17 @@ AssertMarkedOrAllocated(const EdgeValue 
 
 void
 gc::EndVerifyPreBarriers(JSRuntime *rt)
 {
     JS_ASSERT(!JS::IsGenerationalGCEnabled(rt));
 
     AutoPrepareForTracing prep(rt, SkipAtoms);
 
-    VerifyPreTracer *trc = (VerifyPreTracer *)rt->gcVerifyPreData;
+    VerifyPreTracer *trc = (VerifyPreTracer *)rt->gc.verifyPreData;
 
     if (!trc)
         return;
 
     bool compartmentCreated = false;
 
     /* We need to disable barriers before tracing, which may invoke barriers. */
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
@@ -339,21 +339,21 @@ gc::EndVerifyPreBarriers(JSRuntime *rt)
         PurgeJITCaches(zone);
     }
     rt->setNeedsBarrier(false);
 
     /*
      * We need to bump gcNumber so that the methodjit knows that jitcode has
      * been discarded.
      */
-    JS_ASSERT(trc->number == rt->gcNumber);
-    rt->gcNumber++;
+    JS_ASSERT(trc->number == rt->gc.number);
+    rt->gc.number++;
 
-    rt->gcVerifyPreData = nullptr;
-    rt->gcIncrementalState = NO_INCREMENTAL;
+    rt->gc.verifyPreData = nullptr;
+    rt->gc.incrementalState = NO_INCREMENTAL;
 
     if (!compartmentCreated && IsIncrementalGCSafe(rt)) {
         trc->setTraceCallback(CheckEdge);
 
         /* Start after the roots. */
         VerifyNode *node = NextNode(trc->root);
         while ((char *)node < trc->edgeptr) {
             trc->curnode = node;
@@ -363,18 +363,18 @@ gc::EndVerifyPreBarriers(JSRuntime *rt)
                 for (uint32_t i = 0; i < node->count; i++)
                     AssertMarkedOrAllocated(node->edges[i]);
             }
 
             node = NextNode(node);
         }
     }
 
-    rt->gcMarker.reset();
-    rt->gcMarker.stop();
+    rt->gc.marker.reset();
+    rt->gc.marker.stop();
 
     js_delete(trc);
 }
 
 /*** Post-Barrier Verifyier ***/
 
 struct VerifyPostTracer : JSTracer
 {
@@ -384,60 +384,60 @@ struct VerifyPostTracer : JSTracer
     /* This counts up to gcZealFrequency to decide whether to verify. */
     int count;
 
     /* The set of edges in the StoreBuffer at the end of verification. */
     typedef HashSet<void **, PointerHasher<void **, 3>, SystemAllocPolicy> EdgeSet;
     EdgeSet *edges;
 
     VerifyPostTracer(JSRuntime *rt, JSTraceCallback callback)
-      : JSTracer(rt, callback), number(rt->gcNumber), count(0)
+      : JSTracer(rt, callback), number(rt->gc.number), count(0)
     {}
 };
 
 /*
  * The post-barrier verifier runs the full store buffer and a fake nursery when
  * running and when it stops, walks the full heap to ensure that all the
  * important edges were inserted into the storebuffer.
  */
 void
 gc::StartVerifyPostBarriers(JSRuntime *rt)
 {
 #ifdef JSGC_GENERATIONAL
-    if (rt->gcVerifyPostData ||
-        rt->gcIncrementalState != NO_INCREMENTAL)
+    if (rt->gc.verifyPostData ||
+        rt->gc.incrementalState != NO_INCREMENTAL)
     {
         return;
     }
 
     MinorGC(rt, JS::gcreason::EVICT_NURSERY);
 
-    rt->gcNumber++;
+    rt->gc.number++;
 
     VerifyPostTracer *trc = js_new<VerifyPostTracer>(rt, JSTraceCallback(nullptr));
     if (!trc)
         return;
 
-    rt->gcVerifyPostData = trc;
+    rt->gc.verifyPostData = trc;
 #endif
 }
 
 #ifdef JSGC_GENERATIONAL
 void
 PostVerifierCollectStoreBufferEdges(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
 {
     VerifyPostTracer *trc = (VerifyPostTracer *)jstrc;
 
     /* The nursery only stores objects. */
     if (kind != JSTRACE_OBJECT)
         return;
 
     /* The store buffer may store extra, non-cross-generational edges. */
     JSObject *dst = *reinterpret_cast<JSObject **>(thingp);
-    if (trc->runtime()->gcNursery.isInside(thingp) || !trc->runtime()->gcNursery.isInside(dst))
+    if (trc->runtime()->gc.nursery.isInside(thingp) || !trc->runtime()->gc.nursery.isInside(dst))
         return;
 
     /*
      * Values will be unpacked to the stack before getting here. However, the
      * only things that enter this callback are marked by the store buffer. The
      * store buffer ensures that the real tracing location is set correctly.
      */
     void **loc = trc->tracingLocation(thingp);
@@ -463,19 +463,19 @@ PostVerifierVisitEdge(JSTracer *jstrc, v
 {
     VerifyPostTracer *trc = (VerifyPostTracer *)jstrc;
 
     /* The nursery only stores objects. */
     if (kind != JSTRACE_OBJECT)
         return;
 
     /* Filter out non cross-generational edges. */
-    JS_ASSERT(!trc->runtime()->gcNursery.isInside(thingp));
+    JS_ASSERT(!trc->runtime()->gc.nursery.isInside(thingp));
     JSObject *dst = *reinterpret_cast<JSObject **>(thingp);
-    if (!trc->runtime()->gcNursery.isInside(dst))
+    if (!trc->runtime()->gc.nursery.isInside(dst))
         return;
 
     /*
      * Values will be unpacked to the stack before getting here. However, the
      * only things that enter this callback are marked by the JS_TraceChildren
      * below. Since ObjectImpl::markChildren handles this, the real trace
      * location will be set correctly in these cases.
      */
@@ -487,57 +487,57 @@ PostVerifierVisitEdge(JSTracer *jstrc, v
 
 void
 js::gc::EndVerifyPostBarriers(JSRuntime *rt)
 {
 #ifdef JSGC_GENERATIONAL
     VerifyPostTracer::EdgeSet edges;
     AutoPrepareForTracing prep(rt, SkipAtoms);
 
-    VerifyPostTracer *trc = (VerifyPostTracer *)rt->gcVerifyPostData;
+    VerifyPostTracer *trc = (VerifyPostTracer *)rt->gc.verifyPostData;
 
     /* Visit every entry in the store buffer and put the edges in a hash set. */
     trc->setTraceCallback(PostVerifierCollectStoreBufferEdges);
     if (!edges.init())
         goto oom;
     trc->edges = &edges;
-    rt->gcStoreBuffer.markAll(trc);
+    rt->gc.storeBuffer.markAll(trc);
 
     /* Walk the heap to find any edges not the the |edges| set. */
     trc->setTraceCallback(PostVerifierVisitEdge);
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         for (size_t kind = 0; kind < FINALIZE_LIMIT; ++kind) {
             for (CellIterUnderGC cells(zone, AllocKind(kind)); !cells.done(); cells.next()) {
                 Cell *src = cells.getCell();
                 JS_TraceChildren(trc, src, MapAllocToTraceKind(AllocKind(kind)));
             }
         }
     }
 
 oom:
     js_delete(trc);
-    rt->gcVerifyPostData = nullptr;
+    rt->gc.verifyPostData = nullptr;
 #endif
 }
 
 /*** Barrier Verifier Scheduling ***/
 
 static void
 VerifyPreBarriers(JSRuntime *rt)
 {
-    if (rt->gcVerifyPreData)
+    if (rt->gc.verifyPreData)
         EndVerifyPreBarriers(rt);
     else
         StartVerifyPreBarriers(rt);
 }
 
 static void
 VerifyPostBarriers(JSRuntime *rt)
 {
-    if (rt->gcVerifyPostData)
+    if (rt->gc.verifyPostData)
         EndVerifyPostBarriers(rt);
     else
         StartVerifyPostBarriers(rt);
 }
 
 void
 gc::VerifyBarriers(JSRuntime *rt, VerifierType type)
 {
@@ -551,38 +551,38 @@ static void
 MaybeVerifyPreBarriers(JSRuntime *rt, bool always)
 {
     if (rt->gcZeal() != ZealVerifierPreValue)
         return;
 
     if (rt->mainThread.suppressGC)
         return;
 
-    if (VerifyPreTracer *trc = (VerifyPreTracer *)rt->gcVerifyPreData) {
-        if (++trc->count < rt->gcZealFrequency && !always)
+    if (VerifyPreTracer *trc = (VerifyPreTracer *)rt->gc.verifyPreData) {
+        if (++trc->count < rt->gc.zealFrequency && !always)
             return;
 
         EndVerifyPreBarriers(rt);
     }
 
     StartVerifyPreBarriers(rt);
 }
 
 static void
 MaybeVerifyPostBarriers(JSRuntime *rt, bool always)
 {
 #ifdef JSGC_GENERATIONAL
     if (rt->gcZeal() != ZealVerifierPostValue)
         return;
 
-    if (rt->mainThread.suppressGC || !rt->gcStoreBuffer.isEnabled())
+    if (rt->mainThread.suppressGC || !rt->gc.storeBuffer.isEnabled())
         return;
 
-    if (VerifyPostTracer *trc = (VerifyPostTracer *)rt->gcVerifyPostData) {
-        if (++trc->count < rt->gcZealFrequency && !always)
+    if (VerifyPostTracer *trc = (VerifyPostTracer *)rt->gc.verifyPostData) {
+        if (++trc->count < rt->gc.zealFrequency && !always)
             return;
 
         EndVerifyPostBarriers(rt);
     }
     StartVerifyPostBarriers(rt);
 #endif
 }
 
@@ -591,21 +591,21 @@ js::gc::MaybeVerifyBarriers(JSContext *c
 {
     MaybeVerifyPreBarriers(cx->runtime(), always);
     MaybeVerifyPostBarriers(cx->runtime(), always);
 }
 
 void
 js::gc::FinishVerifier(JSRuntime *rt)
 {
-    if (VerifyPreTracer *trc = (VerifyPreTracer *)rt->gcVerifyPreData) {
+    if (VerifyPreTracer *trc = (VerifyPreTracer *)rt->gc.verifyPreData) {
         js_delete(trc);
-        rt->gcVerifyPreData = nullptr;
+        rt->gc.verifyPreData = nullptr;
     }
 #ifdef JSGC_GENERATIONAL
-    if (VerifyPostTracer *trc = (VerifyPostTracer *)rt->gcVerifyPostData) {
+    if (VerifyPostTracer *trc = (VerifyPostTracer *)rt->gc.verifyPostData) {
         js_delete(trc);
-        rt->gcVerifyPostData = nullptr;
+        rt->gc.verifyPostData = nullptr;
     }
 #endif
 }
 
 #endif /* JS_GC_ZEAL */
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -17,17 +17,17 @@
 #include "vm/Runtime.h"
 
 #include "jsgcinlines.h"
 
 using namespace js;
 using namespace js::gc;
 
 JS::Zone::Zone(JSRuntime *rt)
-  : JS::shadow::Zone(rt, &rt->gcMarker),
+  : JS::shadow::Zone(rt, &rt->gc.marker),
     allocator(this),
     ionUsingBarriers_(false),
     active(false),
     gcScheduled(false),
     gcState(NoGC),
     gcPreserveCode(false),
     gcBytes(0),
     gcTriggerBytes(0),
@@ -44,23 +44,24 @@ JS::Zone::Zone(JSRuntime *rt)
 #ifdef JS_ION
     , jitZone_(nullptr)
 #endif
 {
     /* Ensure that there are no vtables to mess us up here. */
     JS_ASSERT(reinterpret_cast<JS::shadow::Zone *>(this) ==
               static_cast<JS::shadow::Zone *>(this));
 
-    setGCMaxMallocBytes(rt->gcMaxMallocBytes * 0.9);
+    setGCMaxMallocBytes(rt->gc.maxMallocBytes * 0.9);
 }
 
 Zone::~Zone()
 {
-    if (this == runtimeFromMainThread()->systemZone)
-        runtimeFromMainThread()->systemZone = nullptr;
+    JSRuntime *rt = runtimeFromMainThread();
+    if (this == rt->gc.systemZone)
+        rt->gc.systemZone = nullptr;
 
 #ifdef JS_ION
     js_delete(jitZone_);
 #endif
 }
 
 void
 Zone::setNeedsBarrier(bool needs, ShouldUpdateIon updateIon)
@@ -110,17 +111,17 @@ Zone::sweep(FreeOp *fop, bool releaseTyp
     /*
      * Periodically release observed types for all scripts. This is safe to
      * do when there are no frames for the zone on the stack.
      */
     if (active)
         releaseTypes = false;
 
     {
-        gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_DISCARD_ANALYSIS);
+        gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_DISCARD_ANALYSIS);
         types.sweep(fop, releaseTypes, oom);
     }
 
     if (!fop->runtime()->debuggerList.isEmpty())
         sweepBreakpoints(fop);
 
     active = false;
 }
@@ -128,18 +129,18 @@ Zone::sweep(FreeOp *fop, bool releaseTyp
 void
 Zone::sweepBreakpoints(FreeOp *fop)
 {
     /*
      * Sweep all compartments in a zone at the same time, since there is no way
      * to iterate over the scripts belonging to a single compartment in a zone.
      */
 
-    gcstats::AutoPhase ap1(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_TABLES);
-    gcstats::AutoPhase ap2(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_TABLES_BREAKPOINT);
+    gcstats::AutoPhase ap1(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_TABLES);
+    gcstats::AutoPhase ap2(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_TABLES_BREAKPOINT);
 
     JS_ASSERT(isGCSweeping());
     for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
         JSScript *script = i.get<JSScript>();
         JS_ASSERT(script->zone()->isGCSweeping());
         if (!script->hasAnyBreakpointsOrStepMode())
             continue;
 
@@ -224,17 +225,17 @@ Zone::discardJitCode(FreeOp *fop)
 #endif
 }
 
 uint64_t
 Zone::gcNumber()
 {
     // Zones in use by exclusive threads are not collected, and threads using
     // them cannot access the main runtime's gcNumber without racing.
-    return usedByExclusiveThread ? 0 : runtimeFromMainThread()->gcNumber;
+    return usedByExclusiveThread ? 0 : runtimeFromMainThread()->gc.number;
 }
 
 #ifdef JS_ION
 js::jit::JitZone *
 Zone::createJitZone(JSContext *cx)
 {
     MOZ_ASSERT(!jitZone_);
 
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -352,18 +352,18 @@ enum ZoneSelector {
 };
 
 class ZonesIter {
   private:
     JS::Zone **it, **end;
 
   public:
     ZonesIter(JSRuntime *rt, ZoneSelector selector) {
-        it = rt->zones.begin();
-        end = rt->zones.end();
+        it = rt->gc.zones.begin();
+        end = rt->gc.zones.end();
 
         if (selector == SkipAtoms) {
             JS_ASSERT(rt->isAtomsZone(*it));
             it++;
         }
     }
 
     bool done() const { return it == end; }
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -729,17 +729,17 @@ ICStubCompiler::emitProfilingUpdate(Macr
     emitProfilingUpdate(masm, regs.takeAny(), regs.takeAny(), stubPcOffset);
 }
 
 #ifdef JSGC_GENERATIONAL
 inline bool
 ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler &masm, Register obj, ValueOperand val,
                                          Register scratch, GeneralRegisterSet saveRegs)
 {
-    Nursery &nursery = cx->runtime()->gcNursery;
+    Nursery &nursery = cx->runtime()->gc.nursery;
 
     Label skipBarrier;
     masm.branchTestObject(Assembler::NotEqual, val, &skipBarrier);
 
     masm.branchPtrInNurseryRange(obj, scratch, &skipBarrier);
 
     Register valReg = masm.extractObject(val, scratch);
     masm.branchPtr(Assembler::Below, valReg, ImmWord(nursery.start()), &skipBarrier);
@@ -3428,17 +3428,17 @@ IsCacheableGetPropCall(JSContext *cx, JS
     if (!shape->getterValue().isObject() || !shape->getterObject()->is<JSFunction>())
         return false;
 
     JSFunction *func = &shape->getterObject()->as<JSFunction>();
 
 #ifdef JSGC_GENERATIONAL
     // Information from get prop call ICs may be used directly from Ion code,
     // and should not be nursery allocated.
-    if (cx->runtime()->gcNursery.isInside(holder) || cx->runtime()->gcNursery.isInside(func))
+    if (cx->runtime()->gc.nursery.isInside(holder) || cx->runtime()->gc.nursery.isInside(func))
         return false;
 #endif
 
     if (func->isNative()) {
         *isScripted = false;
         return true;
     }
 
@@ -3547,17 +3547,17 @@ IsCacheableSetPropCall(JSContext *cx, JS
     if (!shape->setterValue().isObject() || !shape->setterObject()->is<JSFunction>())
         return false;
 
     JSFunction *func = &shape->setterObject()->as<JSFunction>();
 
 #ifdef JSGC_GENERATIONAL
     // Information from set prop call ICs may be used directly from Ion code,
     // and should not be nursery allocated.
-    if (cx->runtime()->gcNursery.isInside(holder) || cx->runtime()->gcNursery.isInside(func))
+    if (cx->runtime()->gc.nursery.isInside(holder) || cx->runtime()->gc.nursery.isInside(func))
         return false;
 #endif
 
     if (func->isNative()) {
         *isScripted = false;
         return true;
     }
 
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -451,17 +451,17 @@ BaselineScript::Destroy(FreeOp *fop, Bas
 #ifdef JSGC_GENERATIONAL
     /*
      * When the script contains pointers to nursery things, the store buffer
      * will contain entries refering to the referenced things. Since we can
      * destroy scripts outside the context of a GC, this situation can result
      * in invalid store buffer entries. Assert that if we do destroy scripts
      * outside of a GC that we at least emptied the nursery first.
      */
-    JS_ASSERT(fop->runtime()->gcNursery.isEmpty());
+    JS_ASSERT(fop->runtime()->gc.nursery.isEmpty());
 #endif
     fop->delete_(script);
 }
 
 ICEntry &
 BaselineScript::icEntry(size_t index)
 {
     JS_ASSERT(index < numICEntries());
--- a/js/src/jit/CompileWrappers.cpp
+++ b/js/src/jit/CompileWrappers.cpp
@@ -63,17 +63,17 @@ CompileRuntime::addressOfLastCachedNativ
 {
     return &runtime()->nativeIterCache.last;
 }
 
 #ifdef JS_GC_ZEAL
 const void *
 CompileRuntime::addressOfGCZeal()
 {
-    return &runtime()->gcZeal_;
+    return &runtime()->gc.zealMode;
 }
 #endif
 
 const void *
 CompileRuntime::addressOfInterrupt()
 {
     return &runtime()->interrupt;
 }
@@ -165,17 +165,17 @@ CompileRuntime::maybeGetMathCache()
 {
     return runtime()->maybeGetMathCache();
 }
 
 #ifdef JSGC_GENERATIONAL
 const Nursery &
 CompileRuntime::gcNursery()
 {
-    return runtime()->gcNursery;
+    return runtime()->gc.nursery;
 }
 #endif
 
 Zone *
 CompileZone::zone()
 {
     return reinterpret_cast<Zone *>(this);
 }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -1725,17 +1725,17 @@ OffThreadCompilationAvailable(JSContext 
     // Require cpuCount > 1 so that Ion compilation jobs and main-thread
     // execution are not competing for the same resources.
     //
     // Skip off thread compilation if PC count profiling is enabled, as
     // CodeGenerator::maybeCreateScriptCounts will not attach script profiles
     // when running off thread.
     return cx->runtime()->canUseParallelIonCompilation()
         && WorkerThreadState().cpuCount > 1
-        && cx->runtime()->gcIncrementalState == gc::NO_INCREMENTAL
+        && cx->runtime()->gc.incrementalState == gc::NO_INCREMENTAL
         && !cx->runtime()->profilingScripts;
 #else
     return false;
 #endif
 }
 
 static void
 TrackAllProperties(JSContext *cx, JSObject *obj)
@@ -2840,23 +2840,23 @@ template void
 jit::FinishInvalidation<SequentialExecution>(FreeOp *fop, JSScript *script);
 
 template void
 jit::FinishInvalidation<ParallelExecution>(FreeOp *fop, JSScript *script);
 
 void
 jit::MarkValueFromIon(JSRuntime *rt, Value *vp)
 {
-    gc::MarkValueUnbarriered(&rt->gcMarker, vp, "write barrier");
+    gc::MarkValueUnbarriered(&rt->gc.marker, vp, "write barrier");
 }
 
 void
 jit::MarkShapeFromIon(JSRuntime *rt, Shape **shapep)
 {
-    gc::MarkShapeUnbarriered(&rt->gcMarker, shapep, "write barrier");
+    gc::MarkShapeUnbarriered(&rt->gc.marker, shapep, "write barrier");
 }
 
 void
 jit::ForbidCompilation(JSContext *cx, JSScript *script)
 {
     ForbidCompilation(cx, script, SequentialExecution);
 }
 
--- a/js/src/jit/IonFrames.cpp
+++ b/js/src/jit/IonFrames.cpp
@@ -960,31 +960,31 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc
     const SafepointIndex *si = ionScript->getSafepointIndex(frame.returnAddressToFp());
     SafepointReader safepoint(ionScript, si);
 
     GeneralRegisterSet slotsRegs = safepoint.slotsOrElementsSpills();
     uintptr_t *spill = frame.spillBase();
     for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); iter++) {
         --spill;
         if (slotsRegs.has(*iter))
-            trc->runtime()->gcNursery.forwardBufferPointer(reinterpret_cast<HeapSlot **>(spill));
+            trc->runtime()->gc.nursery.forwardBufferPointer(reinterpret_cast<HeapSlot **>(spill));
     }
 
     // Skip to the right place in the safepoint
     uint32_t slot;
     while (safepoint.getGcSlot(&slot));
     while (safepoint.getValueSlot(&slot));
 #ifdef JS_NUNBOX32
     LAllocation type, payload;
     while (safepoint.getNunboxSlot(&type, &payload));
 #endif
 
     while (safepoint.getSlotsOrElementsSlot(&slot)) {
         HeapSlot **slots = reinterpret_cast<HeapSlot **>(layout->slotRef(slot));
-        trc->runtime()->gcNursery.forwardBufferPointer(slots);
+        trc->runtime()->gc.nursery.forwardBufferPointer(slots);
     }
 }
 #endif
 
 static void
 MarkBaselineStubFrame(JSTracer *trc, const JitFrameIterator &frame)
 {
     // Mark the ICStub pointer stored in the stub frame. This is necessary
@@ -1297,17 +1297,17 @@ GetPcScript(JSContext *cx, JSScript **sc
     uint8_t *retAddr = it.returnAddress();
     uint32_t hash = PcScriptCache::Hash(retAddr);
     JS_ASSERT(retAddr != nullptr);
 
     // Lazily initialize the cache. The allocation may safely fail and will not GC.
     if (MOZ_UNLIKELY(rt->ionPcScriptCache == nullptr)) {
         rt->ionPcScriptCache = (PcScriptCache *)js_malloc(sizeof(struct PcScriptCache));
         if (rt->ionPcScriptCache)
-            rt->ionPcScriptCache->clear(rt->gcNumber);
+            rt->ionPcScriptCache->clear(rt->gc.number);
     }
 
     // Attempt to lookup address in cache.
     if (rt->ionPcScriptCache && rt->ionPcScriptCache->get(rt, hash, retAddr, scriptRes, pcRes))
         return;
 
     // Lookup failed: undertake expensive process to recover the innermost inlined frame.
     ++it; // Skip exit frame.
--- a/js/src/jit/IonLinker.h
+++ b/js/src/jit/IonLinker.h
@@ -62,17 +62,17 @@ class Linker
         if (!code)
             return nullptr;
         if (masm.oom())
             return fail(cx);
         code->copyFrom(masm);
         masm.link(code);
 #ifdef JSGC_GENERATIONAL
         if (masm.embedsNurseryPointers())
-            cx->runtime()->gcStoreBuffer.putWholeCell(code);
+            cx->runtime()->gc.storeBuffer.putWholeCell(code);
 #endif
         return code;
     }
 
   public:
     Linker(MacroAssembler &masm)
       : masm(masm)
     {
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -455,17 +455,17 @@ void InvalidateAll(FreeOp *fop, JS::Zone
 template <ExecutionMode mode>
 void FinishInvalidation(FreeOp *fop, JSScript *script);
 
 inline bool
 ShouldPreserveParallelJITCode(JSRuntime *rt, JSScript *script, bool increase = false)
 {
     IonScript *parallelIon = script->parallelIonScript();
     uint32_t age = increase ? parallelIon->increaseParallelAge() : parallelIon->parallelAge();
-    return age < jit::IonScript::MAX_PARALLEL_AGE && !rt->gcShouldCleanUpEverything;
+    return age < jit::IonScript::MAX_PARALLEL_AGE && !rt->gc.shouldCleanUpEverything;
 }
 
 // On windows systems, really large frames need to be incrementally touched.
 // The following constant defines the minimum increment of the touch.
 #ifdef XP_WIN
 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
 #endif
 
--- a/js/src/jit/PcScriptCache.h
+++ b/js/src/jit/PcScriptCache.h
@@ -40,18 +40,18 @@ struct PcScriptCache
         this->gcNumber = gcNumber;
     }
 
     // Get a value from the cache. May perform lazy allocation.
     bool get(JSRuntime *rt, uint32_t hash, uint8_t *addr,
              JSScript **scriptRes, jsbytecode **pcRes)
     {
         // If a GC occurred, lazily clear the cache now.
-        if (gcNumber != rt->gcNumber) {
-            clear(rt->gcNumber);
+        if (gcNumber != rt->gc.number) {
+            clear(rt->gc.number);
             return false;
         }
 
         if (entries[hash].returnAddress != addr)
             return false;
 
         *scriptRes = entries[hash].script;
         if (pcRes)
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -549,17 +549,17 @@ NewCallObject(JSContext *cx, HandleShape
     if (!obj)
         return nullptr;
 
 #ifdef JSGC_GENERATIONAL
     // The JIT creates call objects in the nursery, so elides barriers for
     // the initializing writes. The interpreter, however, may have allocated
     // the call object tenured, so barrier as needed before re-entering.
     if (!IsInsideNursery(cx->runtime(), obj))
-        cx->runtime()->gcStoreBuffer.putWholeCell(obj);
+        cx->runtime()->gc.storeBuffer.putWholeCell(obj);
 #endif
 
     return obj;
 }
 
 JSObject *
 NewSingletonCallObject(JSContext *cx, HandleShape shape, HeapSlot *slots)
 {
@@ -568,17 +568,17 @@ NewSingletonCallObject(JSContext *cx, Ha
         return nullptr;
 
 #ifdef JSGC_GENERATIONAL
     // The JIT creates call objects in the nursery, so elides barriers for
     // the initializing writes. The interpreter, however, may have allocated
     // the call object tenured, so barrier as needed before re-entering.
     MOZ_ASSERT(!IsInsideNursery(cx->runtime(), obj),
                "singletons are created in the tenured heap");
-    cx->runtime()->gcStoreBuffer.putWholeCell(obj);
+    cx->runtime()->gc.storeBuffer.putWholeCell(obj);
 #endif
 
     return obj;
 }
 
 JSObject *
 NewStringObject(JSContext *cx, HandleString str)
 {
@@ -709,17 +709,17 @@ FilterArgumentsOrEval(JSContext *cx, JSS
         !StringHasPattern(chars, str->length(), eval, mozilla::ArrayLength(eval));
 }
 
 #ifdef JSGC_GENERATIONAL
 void
 PostWriteBarrier(JSRuntime *rt, JSObject *obj)
 {
     JS_ASSERT(!IsInsideNursery(rt, obj));
-    rt->gcStoreBuffer.putWholeCell(obj);
+    rt->gc.storeBuffer.putWholeCell(obj);
 }
 
 void
 PostGlobalWriteBarrier(JSRuntime *rt, JSObject *obj)
 {
     JS_ASSERT(obj->is<GlobalObject>());
     if (!obj->compartment()->globalWriteBarriered) {
         PostWriteBarrier(rt, obj);
--- a/js/src/jsapi-tests/testGCFinalizeCallback.cpp
+++ b/js/src/jsapi-tests/testGCFinalizeCallback.cpp
@@ -12,95 +12,95 @@ static bool IsCompartmentGCBuffer[Buffer
 BEGIN_TEST(testGCFinalizeCallback)
 {
     JS_SetGCParameter(rt, JSGC_MODE, JSGC_MODE_INCREMENTAL);
     JS_SetFinalizeCallback(rt, FinalizeCallback);
 
     /* Full GC, non-incremental. */
     FinalizeCalls = 0;
     JS_GC(rt);
-    CHECK(rt->gcIsFull);
+    CHECK(rt->gc.isFull);
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(false));
 
     /* Full GC, incremental. */
     FinalizeCalls = 0;
     JS::PrepareForFullGC(rt);
     JS::IncrementalGC(rt, JS::gcreason::API, 1000000);
-    CHECK(rt->gcIncrementalState == js::gc::NO_INCREMENTAL);
-    CHECK(rt->gcIsFull);
+    CHECK(rt->gc.incrementalState == js::gc::NO_INCREMENTAL);
+    CHECK(rt->gc.isFull);
     CHECK(checkMultipleGroups());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(false));
 
     JS::RootedObject global1(cx, createGlobal());
     JS::RootedObject global2(cx, createGlobal());
     JS::RootedObject global3(cx, createGlobal());
     CHECK(global1);
     CHECK(global2);
     CHECK(global3);
 
     /* Compartment GC, non-incremental, single compartment. */
     FinalizeCalls = 0;
     JS::PrepareZoneForGC(global1->zone());
     JS::GCForReason(rt, JS::gcreason::API);
-    CHECK(!rt->gcIsFull);
+    CHECK(!rt->gc.isFull);
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
     /* Compartment GC, non-incremental, multiple compartments. */
     FinalizeCalls = 0;
     JS::PrepareZoneForGC(global1->zone());
     JS::PrepareZoneForGC(global2->zone());
     JS::PrepareZoneForGC(global3->zone());
     JS::GCForReason(rt, JS::gcreason::API);
-    CHECK(!rt->gcIsFull);
+    CHECK(!rt->gc.isFull);
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
     /* Compartment GC, incremental, single compartment. */
     FinalizeCalls = 0;
     JS::PrepareZoneForGC(global1->zone());
     JS::IncrementalGC(rt, JS::gcreason::API, 1000000);
-    CHECK(rt->gcIncrementalState == js::gc::NO_INCREMENTAL);
-    CHECK(!rt->gcIsFull);
+    CHECK(rt->gc.incrementalState == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isFull);
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
     /* Compartment GC, incremental, multiple compartments. */
     FinalizeCalls = 0;
     JS::PrepareZoneForGC(global1->zone());
     JS::PrepareZoneForGC(global2->zone());
     JS::PrepareZoneForGC(global3->zone());
     JS::IncrementalGC(rt, JS::gcreason::API, 1000000);
-    CHECK(rt->gcIncrementalState == js::gc::NO_INCREMENTAL);
-    CHECK(!rt->gcIsFull);
+    CHECK(rt->gc.incrementalState == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isFull);
     CHECK(checkMultipleGroups());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
 #ifdef JS_GC_ZEAL
 
     /* Full GC with reset due to new compartment, becoming compartment GC. */
 
     FinalizeCalls = 0;
     JS_SetGCZeal(cx, 9, 1000000);
     JS::PrepareForFullGC(rt);
     js::GCDebugSlice(rt, true, 1);
-    CHECK(rt->gcIncrementalState == js::gc::MARK);
-    CHECK(rt->gcIsFull);
+    CHECK(rt->gc.incrementalState == js::gc::MARK);
+    CHECK(rt->gc.isFull);
 
     JS::RootedObject global4(cx, createGlobal());
     js::GCDebugSlice(rt, true, 1);
-    CHECK(rt->gcIncrementalState == js::gc::NO_INCREMENTAL);
-    CHECK(!rt->gcIsFull);
+    CHECK(rt->gc.incrementalState == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isFull);
     CHECK(checkMultipleGroups());
     CHECK(checkFinalizeStatus());
 
     for (unsigned i = 0; i < FinalizeCalls - 1; ++i)
         CHECK(!IsCompartmentGCBuffer[i]);
     CHECK(IsCompartmentGCBuffer[FinalizeCalls - 1]);
 
     JS_SetGCZeal(cx, 0, 0);
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -153,17 +153,17 @@ JS_GetEmptyString(JSRuntime *rt)
     return rt->emptyString;
 }
 
 namespace js {
 
 void
 AssertHeapIsIdle(JSRuntime *rt)
 {
-    JS_ASSERT(rt->heapState == js::Idle);
+    JS_ASSERT(rt->gc.heapState == js::Idle);
 }
 
 void
 AssertHeapIsIdle(JSContext *cx)
 {
     AssertHeapIsIdle(cx->runtime());
 }
 
@@ -713,17 +713,17 @@ StopRequest(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
     JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     JS_ASSERT(rt->requestDepth != 0);
     if (rt->requestDepth != 1) {
         rt->requestDepth--;
     } else {
-        rt->conservativeGC.updateForRequestEnd();
+        rt->gc.conservativeGC.updateForRequestEnd();
         rt->requestDepth = 0;
         rt->triggerActivityCallback(false);
     }
 }
 #endif /* JS_THREADSAFE */
 
 JS_PUBLIC_API(void)
 JS_BeginRequest(JSContext *cx)
@@ -1608,27 +1608,27 @@ JS::RemoveScriptRootRT(JSRuntime *rt, JS
     RemoveRoot(rt, (void *)rp);
     *rp = nullptr;
 }
 
 JS_PUBLIC_API(bool)
 JS_AddExtraGCRootsTracer(JSRuntime *rt, JSTraceDataOp traceOp, void *data)
 {
     AssertHeapIsIdle(rt);
-    return !!rt->gcBlackRootTracers.append(JSRuntime::ExtraTracer(traceOp, data));
+    return !!rt->gc.blackRootTracers.append(ExtraTracer(traceOp, data));
 }
 
 JS_PUBLIC_API(void)
 JS_RemoveExtraGCRootsTracer(JSRuntime *rt, JSTraceDataOp traceOp, void *data)
 {
     AssertHeapIsIdle(rt);
-    for (size_t i = 0; i < rt->gcBlackRootTracers.length(); i++) {
-        JSRuntime::ExtraTracer *e = &rt->gcBlackRootTracers[i];
+    for (size_t i = 0; i < rt->gc.blackRootTracers.length(); i++) {
+        ExtraTracer *e = &rt->gc.blackRootTracers[i];
         if (e->op == traceOp && e->data == data) {
-            rt->gcBlackRootTracers.erase(e);
+            rt->gc.blackRootTracers.erase(e);
             break;
         }
     }
 }
 
 #ifdef DEBUG
 
 typedef struct JSHeapDumpNode JSHeapDumpNode;
@@ -1892,25 +1892,25 @@ JS_MaybeGC(JSContext *cx)
 {
     MaybeGC(cx);
 }
 
 JS_PUBLIC_API(void)
 JS_SetGCCallback(JSRuntime *rt, JSGCCallback cb, void *data)
 {
     AssertHeapIsIdle(rt);
-    rt->gcCallback = cb;
-    rt->gcCallbackData = data;
+    rt->gc.callback = cb;
+    rt->gc.callbackData = data;
 }
 
 JS_PUBLIC_API(void)
 JS_SetFinalizeCallback(JSRuntime *rt, JSFinalizeCallback cb)
 {
     AssertHeapIsIdle(rt);
-    rt->gcFinalizeCallback = cb;
+    rt->gc.finalizeCallback = cb;
 }
 
 JS_PUBLIC_API(bool)
 JS_IsAboutToBeFinalized(JS::Heap<JSObject *> *objp)
 {
     return IsObjectAboutToBeFinalized(objp->unsafeGet());
 }
 
@@ -1920,113 +1920,113 @@ JS_IsAboutToBeFinalizedUnbarriered(JSObj
     return IsObjectAboutToBeFinalized(objp);
 }
 
 JS_PUBLIC_API(void)
 JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32_t value)
 {
     switch (key) {
       case JSGC_MAX_BYTES: {
-        JS_ASSERT(value >= rt->gcBytes);
-        rt->gcMaxBytes = value;
+        JS_ASSERT(value >= rt->gc.bytes);
+        rt->gc.maxBytes = value;
         break;
       }
       case JSGC_MAX_MALLOC_BYTES:
         rt->setGCMaxMallocBytes(value);
         break;
       case JSGC_SLICE_TIME_BUDGET:
-        rt->gcSliceBudget = SliceBudget::TimeBudget(value);
+        rt->gc.sliceBudget = SliceBudget::TimeBudget(value);
         break;
       case JSGC_MARK_STACK_LIMIT:
         js::SetMarkStackLimit(rt, value);
         break;
       case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
-        rt->gcHighFrequencyTimeThreshold = value;
+        rt->gc.highFrequencyTimeThreshold = value;
         break;
       case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
-        rt->gcHighFrequencyLowLimitBytes = value * 1024 * 1024;
+        rt->gc.highFrequencyLowLimitBytes = value * 1024 * 1024;
         break;
       case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
-        rt->gcHighFrequencyHighLimitBytes = value * 1024 * 1024;
+        rt->gc.highFrequencyHighLimitBytes = value * 1024 * 1024;
         break;
       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
-        rt->gcHighFrequencyHeapGrowthMax = value / 100.0;
-        MOZ_ASSERT(rt->gcHighFrequencyHeapGrowthMax / 0.85 > 1.0);
+        rt->gc.highFrequencyHeapGrowthMax = value / 100.0;
+        MOZ_ASSERT(rt->gc.highFrequencyHeapGrowthMax / 0.85 > 1.0);
         break;
       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
-        rt->gcHighFrequencyHeapGrowthMin = value / 100.0;
-        MOZ_ASSERT(rt->gcHighFrequencyHeapGrowthMin / 0.85 > 1.0);
+        rt->gc.highFrequencyHeapGrowthMin = value / 100.0;
+        MOZ_ASSERT(rt->gc.highFrequencyHeapGrowthMin / 0.85 > 1.0);
         break;
       case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
-        rt->gcLowFrequencyHeapGrowth = value / 100.0;
-        MOZ_ASSERT(rt->gcLowFrequencyHeapGrowth / 0.9 > 1.0);
+        rt->gc.lowFrequencyHeapGrowth = value / 100.0;
+        MOZ_ASSERT(rt->gc.lowFrequencyHeapGrowth / 0.9 > 1.0);
         break;
       case JSGC_DYNAMIC_HEAP_GROWTH:
-        rt->gcDynamicHeapGrowth = value;
+        rt->gc.dynamicHeapGrowth = value;
         break;
       case JSGC_DYNAMIC_MARK_SLICE:
-        rt->gcDynamicMarkSlice = value;
+        rt->gc.dynamicMarkSlice = value;
         break;
       case JSGC_ALLOCATION_THRESHOLD:
-        rt->gcAllocationThreshold = value * 1024 * 1024;
+        rt->gc.allocationThreshold = value * 1024 * 1024;
         break;
       case JSGC_DECOMMIT_THRESHOLD:
-        rt->gcDecommitThreshold = value * 1024 * 1024;
+        rt->gc.decommitThreshold = value * 1024 * 1024;
         break;
       default:
         JS_ASSERT(key == JSGC_MODE);
         rt->setGCMode(JSGCMode(value));
         JS_ASSERT(rt->gcMode() == JSGC_MODE_GLOBAL ||
                   rt->gcMode() == JSGC_MODE_COMPARTMENT ||
                   rt->gcMode() == JSGC_MODE_INCREMENTAL);
         return;
     }
 }
 
 JS_PUBLIC_API(uint32_t)
 JS_GetGCParameter(JSRuntime *rt, JSGCParamKey key)
 {
     switch (key) {
       case JSGC_MAX_BYTES:
-        return uint32_t(rt->gcMaxBytes);
+        return uint32_t(rt->gc.maxBytes);
       case JSGC_MAX_MALLOC_BYTES:
-        return rt->gcMaxMallocBytes;
+        return rt->gc.maxMallocBytes;
       case JSGC_BYTES:
-        return uint32_t(rt->gcBytes);
+        return uint32_t(rt->gc.bytes);
       case JSGC_MODE:
         return uint32_t(rt->gcMode());
       case JSGC_UNUSED_CHUNKS:
-        return uint32_t(rt->gcChunkPool.getEmptyCount());
+        return uint32_t(rt->gc.chunkPool.getEmptyCount());
       case JSGC_TOTAL_CHUNKS:
-        return uint32_t(rt->gcChunkSet.count() + rt->gcChunkPool.getEmptyCount());
+        return uint32_t(rt->gc.chunkSet.count() + rt->gc.chunkPool.getEmptyCount());
       case JSGC_SLICE_TIME_BUDGET:
-        return uint32_t(rt->gcSliceBudget > 0 ? rt->gcSliceBudget / PRMJ_USEC_PER_MSEC : 0);
+        return uint32_t(rt->gc.sliceBudget > 0 ? rt->gc.sliceBudget / PRMJ_USEC_PER_MSEC : 0);
       case JSGC_MARK_STACK_LIMIT:
-        return rt->gcMarker.maxCapacity();
+        return rt->gc.marker.maxCapacity();
       case JSGC_HIGH_FREQUENCY_TIME_LIMIT:
-        return rt->gcHighFrequencyTimeThreshold;
+        return rt->gc.highFrequencyTimeThreshold;
       case JSGC_HIGH_FREQUENCY_LOW_LIMIT:
-        return rt->gcHighFrequencyLowLimitBytes / 1024 / 1024;
+        return rt->gc.highFrequencyLowLimitBytes / 1024 / 1024;
       case JSGC_HIGH_FREQUENCY_HIGH_LIMIT:
-        return rt->gcHighFrequencyHighLimitBytes / 1024 / 1024;
+        return rt->gc.highFrequencyHighLimitBytes / 1024 / 1024;
       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MAX:
-        return uint32_t(rt->gcHighFrequencyHeapGrowthMax * 100);
+        return uint32_t(rt->gc.highFrequencyHeapGrowthMax * 100);
       case JSGC_HIGH_FREQUENCY_HEAP_GROWTH_MIN:
-        return uint32_t(rt->gcHighFrequencyHeapGrowthMin * 100);
+        return uint32_t(rt->gc.highFrequencyHeapGrowthMin * 100);
       case JSGC_LOW_FREQUENCY_HEAP_GROWTH:
-        return uint32_t(rt->gcLowFrequencyHeapGrowth * 100);
+        return uint32_t(rt->gc.lowFrequencyHeapGrowth * 100);
       case JSGC_DYNAMIC_HEAP_GROWTH:
-        return rt->gcDynamicHeapGrowth;
+        return rt->gc.dynamicHeapGrowth;
       case JSGC_DYNAMIC_MARK_SLICE:
-        return rt->gcDynamicMarkSlice;
+        return rt->gc.dynamicMarkSlice;
       case JSGC_ALLOCATION_THRESHOLD:
-        return rt->gcAllocationThreshold / 1024 / 1024;
+        return rt->gc.allocationThreshold / 1024 / 1024;
       default:
         JS_ASSERT(key == JSGC_NUMBER);
-        return uint32_t(rt->gcNumber);
+        return uint32_t(rt->gc.number);
     }
 }
 
 JS_PUBLIC_API(void)
 JS_SetGCParameterForThread(JSContext *cx, JSGCParamKey key, uint32_t value)
 {
     JS_ASSERT(key == JSGC_MAX_CODE_CACHE_BYTES);
 }
@@ -2496,30 +2496,30 @@ JS_NewGlobalObject(JSContext *cx, const 
     CHECK_REQUEST(cx);
     JS_ASSERT(!cx->runtime()->isAtomsCompartment(cx->compartment()));
     JS_ASSERT(!cx->isExceptionPending());
 
     JSRuntime *rt = cx->runtime();
 
     Zone *zone;
     if (options.zoneSpecifier() == JS::SystemZone)
-        zone = rt->systemZone;
+        zone = rt->gc.systemZone;
     else if (options.zoneSpecifier() == JS::FreshZone)
         zone = nullptr;
     else
         zone = static_cast<Zone *>(options.zonePointer());
 
     AutoCompartmentRooter compartment(cx, NewCompartment(cx, zone, principals, options));
     if (!compartment)
         return nullptr;
 
     // Lazily create the system zone.
-    if (!rt->systemZone && options.zoneSpecifier() == JS::SystemZone) {
-        rt->systemZone = compartment->zone();
-        rt->systemZone->isSystem = true;
+    if (!rt->gc.systemZone && options.zoneSpecifier() == JS::SystemZone) {
+        rt->gc.systemZone = compartment->zone();
+        rt->gc.systemZone->isSystem = true;
     }
 
     Rooted<GlobalObject *> global(cx);
     {
         AutoCompartment ac(cx, compartment);
         global = GlobalObject::create(cx, Valueify(clasp));
     }
 
@@ -6198,17 +6198,17 @@ JS_PUBLIC_API(void)
 JS_SetGCZeal(JSContext *cx, uint8_t zeal, uint32_t frequency)
 {
     SetGCZeal(cx->runtime(), zeal, frequency);
 }
 
 JS_PUBLIC_API(void)
 JS_ScheduleGC(JSContext *cx, uint32_t count)
 {
-    cx->runtime()->gcNextScheduled = count;
+    cx->runtime()->gc.nextScheduled = count;
 }
 #endif
 
 JS_PUBLIC_API(void)
 JS_SetParallelParsingEnabled(JSRuntime *rt, bool enabled)
 {
 #ifdef JS_ION
     rt->setParallelParsingEnabled(enabled);
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -218,17 +218,17 @@ struct ThreadSafeContext : ContextFriend
     // The generational GC nursery may only be used on the main thread.
 #ifdef JSGC_GENERATIONAL
     inline bool hasNursery() const {
         return isJSContext();
     }
 
     inline js::Nursery &nursery() {
         JS_ASSERT(hasNursery());
-        return runtime_->gcNursery;
+        return runtime_->gc.nursery;
     }
 #endif
 
     /*
      * Allocator used when allocating GCThings on this context. If we are a
      * JSContext, this is the Zone allocator of the JSContext's zone.
      * Otherwise, this is a per-thread allocator.
      *
@@ -284,17 +284,17 @@ struct ThreadSafeContext : ContextFriend
     AtomSet &permanentAtoms() { return *runtime_->permanentAtoms; }
     const JS::AsmJSCacheOps &asmJSCacheOps() { return runtime_->asmJSCacheOps; }
     PropertyName *emptyString() { return runtime_->emptyString; }
     FreeOp *defaultFreeOp() { return runtime_->defaultFreeOp(); }
     bool useHelperThreads() { return runtime_->useHelperThreads(); }
     void *runtimeAddressForJit() { return runtime_; }
     void *stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
     void *stackLimitAddressForJitCode(StackKind kind);
-    size_t gcSystemPageSize() { return runtime_->pageAllocator.systemPageSize(); }
+    size_t gcSystemPageSize() { return runtime_->gc.pageAllocator.systemPageSize(); }
     bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
     bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
 
     // Thread local data that may be accessed freely.
     DtoaState *dtoaState() {
         return perThreadData->dtoaState;
     }
 };
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -253,17 +253,17 @@ JSCompartment::putWrapper(JSContext *cx,
 
 #ifdef JSGC_GENERATIONAL
     /* There's no point allocating wrappers in the nursery since we will tenure them anyway. */
     Nursery &nursery = cx->nursery();
     JS_ASSERT(!nursery.isInside(wrapper.toGCThing()));
 
     if (success && (nursery.isInside(wrapped.wrapped) || nursery.isInside(wrapped.debugger))) {
         WrapperMapRef ref(&crossCompartmentWrappers, wrapped);
-        cx->runtime()->gcStoreBuffer.putGeneric(ref);
+        cx->runtime()->gc.storeBuffer.putGeneric(ref);
     }
 #endif
 
     return success;
 }
 
 bool
 JSCompartment::wrap(JSContext *cx, JSString **strp)
@@ -552,17 +552,17 @@ JSCompartment::sweep(FreeOp *fop, bool r
     JS_ASSERT(!activeAnalysis);
 
     /* This function includes itself in PHASE_SWEEP_TABLES. */
     sweepCrossCompartmentWrappers();
 
     JSRuntime *rt = runtimeFromMainThread();
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_TABLES);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_TABLES);
 
         /* Remove dead references held weakly by the compartment. */
 
         sweepBaseShapeTable();
         sweepInitialShapeTable();
         sweepNewTypeObjectTable(newTypeObjects);
         sweepNewTypeObjectTable(lazyTypeObjects);
         sweepCallsiteClones();
@@ -611,18 +611,18 @@ JSCompartment::sweep(FreeOp *fop, bool r
  * string entries in the crossCompartmentWrappers table are not marked during
  * markCrossCompartmentWrappers.
  */
 void
 JSCompartment::sweepCrossCompartmentWrappers()
 {
     JSRuntime *rt = runtimeFromMainThread();
 
-    gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_SWEEP_TABLES);
-    gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_SWEEP_TABLES_WRAPPER);
+    gcstats::AutoPhase ap1(rt->gc.stats, gcstats::PHASE_SWEEP_TABLES);
+    gcstats::AutoPhase ap2(rt->gc.stats, gcstats::PHASE_SWEEP_TABLES_WRAPPER);
 
     /* Remove dead wrappers from the table. */
     for (WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
         CrossCompartmentKey key = e.front().key();
         bool keyDying = IsCellAboutToBeFinalized(&key.wrapped);
         bool valDying = IsValueAboutToBeFinalized(e.front().value().unsafeGet());
         bool dbgDying = key.debugger && IsObjectAboutToBeFinalized(&key.debugger);
         if (keyDying || valDying || dbgDying) {
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -54,18 +54,18 @@ JS_FRIEND_API(SourceHook *)
 js::ForgetSourceHook(JSRuntime *rt)
 {
     return rt->sourceHook.forget();
 }
 
 JS_FRIEND_API(void)
 JS_SetGrayGCRootsTracer(JSRuntime *rt, JSTraceDataOp traceOp, void *data)
 {
-    rt->gcGrayRootTracer.op = traceOp;
-    rt->gcGrayRootTracer.data = data;
+    rt->gc.grayRootTracer.op = traceOp;
+    rt->gc.grayRootTracer.data = data;
 }
 
 JS_FRIEND_API(JSString *)
 JS_GetAnonymousString(JSRuntime *rt)
 {
     JS_ASSERT(rt->hasContexts());
     return rt->commonNames->anonymous;
 }
@@ -627,17 +627,17 @@ js::TraceWeakMaps(WeakMapTracer *trc)
 {
     WeakMapBase::traceAllMappings(trc);
     WatchpointMap::traceAll(trc);
 }
 
 extern JS_FRIEND_API(bool)
 js::AreGCGrayBitsValid(JSRuntime *rt)
 {
-    return rt->gcGrayBitsValid;
+    return rt->gc.grayBitsValid;
 }
 
 JS_FRIEND_API(bool)
 js::ZoneGlobalsAreAllGray(JS::Zone *zone)
 {
     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
         JSObject *obj = comp->maybeGlobal();
         if (!obj || !JS::GCThingIsMarkedGray(obj))
@@ -852,37 +852,37 @@ JS_FRIEND_API(bool)
 js::IsContextRunningJS(JSContext *cx)
 {
     return cx->currentlyRunning();
 }
 
 JS_FRIEND_API(JS::GCSliceCallback)
 JS::SetGCSliceCallback(JSRuntime *rt, GCSliceCallback callback)
 {
-    JS::GCSliceCallback old = rt->gcSliceCallback;
-    rt->gcSliceCallback = callback;
+    JS::GCSliceCallback old = rt->gc.sliceCallback;
+    rt->gc.sliceCallback = callback;
     return old;
 }
 
 JS_FRIEND_API(bool)
 JS::WasIncrementalGC(JSRuntime *rt)
 {
-    return rt->gcIsIncremental;
+    return rt->gc.isIncremental;
 }
 
 jschar *
 GCDescription::formatMessage(JSRuntime *rt) const
 {
-    return rt->gcStats.formatMessage();
+    return rt->gc.stats.formatMessage();
 }
 
 jschar *
 GCDescription::formatJSON(JSRuntime *rt, uint64_t timestamp) const
 {
-    return rt->gcStats.formatJSON(timestamp);
+    return rt->gc.stats.formatJSON(timestamp);
 }
 
 JS_FRIEND_API(void)
 JS::NotifyDidPaint(JSRuntime *rt)
 {
     if (rt->gcZeal() == gc::ZealFrameVerifierPreValue) {
         gc::VerifyBarriers(rt, gc::PreBarrierVerifier);
         return;
@@ -894,88 +894,88 @@ JS::NotifyDidPaint(JSRuntime *rt)
     }
 
     if (rt->gcZeal() == gc::ZealFrameGCValue) {
         PrepareForFullGC(rt);
         GCSlice(rt, GC_NORMAL, gcreason::REFRESH_FRAME);
         return;
     }
 
-    if (JS::IsIncrementalGCInProgress(rt) && !rt->gcInterFrameGC) {
+    if (JS::IsIncrementalGCInProgress(rt) && !rt->gc.interFrameGC) {
         JS::PrepareForIncrementalGC(rt);
         GCSlice(rt, GC_NORMAL, gcreason::REFRESH_FRAME);
     }
 
-    rt->gcInterFrameGC = false;
+    rt->gc.interFrameGC = false;
 }
 
 JS_FRIEND_API(bool)
 JS::IsIncrementalGCEnabled(JSRuntime *rt)
 {
-    return rt->gcIncrementalEnabled && rt->gcMode() == JSGC_MODE_INCREMENTAL;
+    return rt->gc.incrementalEnabled && rt->gcMode() == JSGC_MODE_INCREMENTAL;
 }
 
 JS_FRIEND_API(bool)
 JS::IsIncrementalGCInProgress(JSRuntime *rt)
 {
-    return rt->gcIncrementalState != gc::NO_INCREMENTAL && !rt->gcVerifyPreData;
+    return rt->gc.incrementalState != gc::NO_INCREMENTAL && !rt->gc.verifyPreData;
 }
 
 JS_FRIEND_API(void)
 JS::DisableIncrementalGC(JSRuntime *rt)
 {
-    rt->gcIncrementalEnabled = false;
+    rt->gc.incrementalEnabled = false;
 }
 
 JS::AutoDisableGenerationalGC::AutoDisableGenerationalGC(JSRuntime *rt)
   : runtime(rt)
 #if defined(JSGC_GENERATIONAL) && defined(JS_GC_ZEAL)
-  , restartVerifier(rt->gcVerifyPostData)
+  , restartVerifier(rt->gc.verifyPostData)
 #endif
 {
 #ifdef JSGC_GENERATIONAL
     if (IsGenerationalGCEnabled(rt)) {
 #ifdef JS_GC_ZEAL
         if (restartVerifier)
             gc::EndVerifyPostBarriers(rt);
 #endif
         MinorGC(rt, JS::gcreason::API);
-        rt->gcNursery.disable();
-        rt->gcStoreBuffer.disable();
+        rt->gc.nursery.disable();
+        rt->gc.storeBuffer.disable();
     }
 #endif
-    ++rt->gcGenerationalDisabled;
+    ++rt->gc.generationalDisabled;
 }
 
 JS::AutoDisableGenerationalGC::~AutoDisableGenerationalGC()
 {
-    JS_ASSERT(runtime->gcGenerationalDisabled > 0);
-    --runtime->gcGenerationalDisabled;
+    JS_ASSERT(runtime->gc.generationalDisabled > 0);
+    --runtime->gc.generationalDisabled;
 #ifdef JSGC_GENERATIONAL
-    if (runtime->gcGenerationalDisabled == 0) {
-        runtime->gcNursery.enable();
-        runtime->gcStoreBuffer.enable();
+    if (runtime->gc.generationalDisabled == 0) {
+        runtime->gc.nursery.enable();
+        runtime->gc.storeBuffer.enable();
 #ifdef JS_GC_ZEAL
         if (restartVerifier)
             gc::StartVerifyPostBarriers(runtime);
 #endif
     }
 #endif
 }
 
 extern JS_FRIEND_API(bool)
 JS::IsGenerationalGCEnabled(JSRuntime *rt)
 {
-    return rt->gcGenerationalDisabled == 0;
+    return rt->gc.generationalDisabled == 0;
 }
 
 JS_FRIEND_API(bool)
 JS::IsIncrementalBarrierNeeded(JSRuntime *rt)
 {
-    return rt->gcIncrementalState == gc::MARK && !rt->isHeapBusy();
+    return rt->gc.incrementalState == gc::MARK && !rt->isHeapBusy();
 }
 
 JS_FRIEND_API(bool)
 JS::IsIncrementalBarrierNeeded(JSContext *cx)
 {
     return IsIncrementalBarrierNeeded(cx->runtime());
 }
 
@@ -1029,17 +1029,17 @@ JS_FRIEND_API(void)
 JS::IncrementalValueBarrier(const Value &v)
 {
     js::HeapValue::writeBarrierPre(v);
 }
 
 JS_FRIEND_API(void)
 JS::PokeGC(JSRuntime *rt)
 {
-    rt->gcPoke = true;
+    rt->gc.poke = true;
 }
 
 JS_FRIEND_API(JSCompartment *)
 js::GetAnyCompartmentInZone(JS::Zone *zone)
 {
     CompartmentsInZoneIter comp(zone);
     JS_ASSERT(!comp.done());
     return comp.get();
@@ -1198,17 +1198,17 @@ js::UnsafeDefineElement(JSContext *cx, J
 }
 
 JS_FRIEND_API(bool)
 js_DefineOwnProperty(JSContext *cx, JSObject *objArg, jsid idArg,
                      JS::Handle<js::PropertyDescriptor> descriptor, bool *bp)
 {
     RootedObject obj(cx, objArg);
     RootedId id(cx, idArg);
-    JS_ASSERT(cx->runtime()->heapState == js::Idle);
+    JS_ASSERT(cx->runtime()->gc.heapState == js::Idle);
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, obj, id, descriptor.value());
     if (descriptor.hasGetterObject())
         assertSameCompartment(cx, descriptor.getterObject());
     if (descriptor.hasSetterObject())
         assertSameCompartment(cx, descriptor.setterObject());
 
     return DefineOwnProperty(cx, HandleObject(obj), id, descriptor, bp);
@@ -1235,21 +1235,21 @@ js::IsInRequest(JSContext *cx)
 #ifdef JSGC_GENERATIONAL
 JS_FRIEND_API(void)
 JS_StoreObjectPostBarrierCallback(JSContext* cx,
                                   void (*callback)(JSTracer *trc, JSObject *key, void *data),
                                   JSObject *key, void *data)
 {
     JSRuntime *rt = cx->runtime();
     if (IsInsideNursery(rt, key))
-        rt->gcStoreBuffer.putCallback(callback, key, data);
+        rt->gc.storeBuffer.putCallback(callback, key, data);
 }
 
 extern JS_FRIEND_API(void)
 JS_StoreStringPostBarrierCallback(JSContext* cx,
                                   void (*callback)(JSTracer *trc, JSString *key, void *data),
                                   JSString *key, void *data)
 {
     JSRuntime *rt = cx->runtime();
     if (IsInsideNursery(rt, key))
-        rt->gcStoreBuffer.putCallback(callback, key, data);
+        rt->gc.storeBuffer.putCallback(callback, key, data);
 }
 #endif /* JSGC_GENERATIONAL */
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -388,17 +388,17 @@ ArenaHeader::checkSynchronizedWithFreeLi
      */
     JS_ASSERT(allocated());
 
     /*
      * We can be called from the background finalization thread when the free
      * list in the zone can mutate at any moment. We cannot do any
      * checks in this case.
      */
-    if (IsBackgroundFinalized(getAllocKind()) && zone->runtimeFromAnyThread()->gcHelperThread.onBackgroundThread())
+    if (IsBackgroundFinalized(getAllocKind()) && zone->runtimeFromAnyThread()->gc.helperThread.onBackgroundThread())
         return;
 
     FreeSpan firstSpan = FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
     if (firstSpan.isEmpty())
         return;
     const FreeSpan *list = zone->allocator.arenas.getFreeList(getAllocKind());
     if (list->isEmpty() || firstSpan.arenaAddress() != list->arenaAddress())
         return;
@@ -623,61 +623,61 @@ FinalizeArenas(FreeOp *fop,
       default:
         MOZ_ASSUME_UNREACHABLE("Invalid alloc kind");
     }
 }
 
 static inline Chunk *
 AllocChunk(JSRuntime *rt)
 {
-    return static_cast<Chunk *>(rt->pageAllocator.mapAlignedPages(ChunkSize, ChunkSize));
+    return static_cast<Chunk *>(rt->gc.pageAllocator.mapAlignedPages(ChunkSize, ChunkSize));
 }
 
 static inline void
 FreeChunk(JSRuntime *rt, Chunk *p)
 {
-    rt->pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
+    rt->gc.pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
 }
 
 inline bool
 ChunkPool::wantBackgroundAllocation(JSRuntime *rt) const
 {
     /*
      * To minimize memory waste we do not want to run the background chunk
      * allocation if we have empty chunks or when the runtime needs just few
      * of them.
      */
-    return rt->gcHelperThread.canBackgroundAllocate() &&
+    return rt->gc.helperThread.canBackgroundAllocate() &&
            emptyCount == 0 &&
-           rt->gcChunkSet.count() >= 4;
+           rt->gc.chunkSet.count() >= 4;
 }
 
 /* Must be called with the GC lock taken. */
 inline Chunk *
 ChunkPool::get(JSRuntime *rt)
 {
-    JS_ASSERT(this == &rt->gcChunkPool);
+    JS_ASSERT(this == &rt->gc.chunkPool);
 
     Chunk *chunk = emptyChunkListHead;
     if (chunk) {
         JS_ASSERT(emptyCount);
         emptyChunkListHead = chunk->info.next;
         --emptyCount;
     } else {
         JS_ASSERT(!emptyCount);
         chunk = Chunk::allocate(rt);
         if (!chunk)
             return nullptr;
         JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
     }
     JS_ASSERT(chunk->unused());
-    JS_ASSERT(!rt->gcChunkSet.has(chunk));
+    JS_ASSERT(!rt->gc.chunkSet.has(chunk));
 
     if (wantBackgroundAllocation(rt))
-        rt->gcHelperThread.startBackgroundAllocationIfIdle();
+        rt->gc.helperThread.startBackgroundAllocationIfIdle();
 
     return chunk;
 }
 
 /* Must be called either during the GC or with the GC lock taken. */
 inline void
 ChunkPool::put(Chunk *chunk)
 {
@@ -686,31 +686,31 @@ ChunkPool::put(Chunk *chunk)
     emptyChunkListHead = chunk;
     emptyCount++;
 }
 
 /* Must be called either during the GC or with the GC lock taken. */
 Chunk *
 ChunkPool::expire(JSRuntime *rt, bool releaseAll)
 {
-    JS_ASSERT(this == &rt->gcChunkPool);
+    JS_ASSERT(this == &rt->gc.chunkPool);
 
     /*
      * Return old empty chunks to the system while preserving the order of
      * other chunks in the list. This way, if the GC runs several times
      * without emptying the list, the older chunks will stay at the tail
      * and are more likely to reach the max age.
      */
     Chunk *freeList = nullptr;
     int freeChunkCount = 0;
     for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) {
         JS_ASSERT(emptyCount);
         Chunk *chunk = *chunkp;
         JS_ASSERT(chunk->unused());
-        JS_ASSERT(!rt->gcChunkSet.has(chunk));
+        JS_ASSERT(!rt->gc.chunkSet.has(chunk));
         JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE);
         if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE ||
             freeChunkCount++ > MAX_EMPTY_CHUNK_COUNT)
         {
             *chunkp = chunk->info.next;
             --emptyCount;
             chunk->prepareToBeFreed(rt);
             chunk->info.next = freeList;
@@ -743,49 +743,49 @@ ChunkPool::expireAndFree(JSRuntime *rt, 
 
 /* static */ Chunk *
 Chunk::allocate(JSRuntime *rt)
 {
     Chunk *chunk = AllocChunk(rt);
     if (!chunk)
         return nullptr;
     chunk->init(rt);
-    rt->gcStats.count(gcstats::STAT_NEW_CHUNK);
+    rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
     return chunk;
 }
 
 /* Must be called with the GC lock taken. */
 /* static */ inline void
 Chunk::release(JSRuntime *rt, Chunk *chunk)
 {
     JS_ASSERT(chunk);
     chunk->prepareToBeFreed(rt);
     FreeChunk(rt, chunk);
 }
 
 inline void
 Chunk::prepareToBeFreed(JSRuntime *rt)
 {
-    JS_ASSERT(rt->gcNumArenasFreeCommitted >= info.numArenasFreeCommitted);
-    rt->gcNumArenasFreeCommitted -= info.numArenasFreeCommitted;
-    rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK);
+    JS_ASSERT(rt->gc.numArenasFreeCommitted >= info.numArenasFreeCommitted);
+    rt->gc.numArenasFreeCommitted -= info.numArenasFreeCommitted;
+    rt->gc.stats.count(gcstats::STAT_DESTROY_CHUNK);
 
 #ifdef DEBUG
     /*
      * Let FreeChunkList detect a missing prepareToBeFreed call before it
      * frees chunk.
      */
     info.numArenasFreeCommitted = 0;
 #endif
 }
 
 void Chunk::decommitAllArenas(JSRuntime *rt)
 {
     decommittedArenas.clear(true);
-    rt->pageAllocator.markPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
+    rt->gc.pageAllocator.markPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
 
     info.freeArenasHead = nullptr;
     info.lastDecommittedArenaOffset = 0;
     info.numArenasFree = ArenasPerChunk;
     info.numArenasFreeCommitted = 0;
 }
 
 void
@@ -813,18 +813,18 @@ Chunk::init(JSRuntime *rt)
     /* The rest of info fields are initialized in PickChunk. */
 }
 
 static inline Chunk **
 GetAvailableChunkList(Zone *zone)
 {
     JSRuntime *rt = zone->runtimeFromAnyThread();
     return zone->isSystem
-           ? &rt->gcSystemAvailableChunkListHead
-           : &rt->gcUserAvailableChunkListHead;
+           ? &rt->gc.systemAvailableChunkListHead
+           : &rt->gc.userAvailableChunkListHead;
 }
 
 inline void
 Chunk::addToAvailableList(Zone *zone)
 {
     insertToAvailableList(GetAvailableChunkList(zone));
 }
 
@@ -883,55 +883,55 @@ Chunk::fetchNextDecommittedArena()
     JS_ASSERT(info.numArenasFree > 0);
 
     unsigned offset = findDecommittedArenaOffset();
     info.lastDecommittedArenaOffset = offset + 1;
     --info.numArenasFree;
     decommittedArenas.unset(offset);
 
     Arena *arena = &arenas[offset];
-    info.trailer.runtime->pageAllocator.markPagesInUse(arena, ArenaSize);
+    info.trailer.runtime->gc.pageAllocator.markPagesInUse(arena, ArenaSize);
     arena->aheader.setAsNotAllocated();
 
     return &arena->aheader;
 }
 
 inline ArenaHeader *
 Chunk::fetchNextFreeArena(JSRuntime *rt)
 {
     JS_ASSERT(info.numArenasFreeCommitted > 0);
     JS_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
-    JS_ASSERT(info.numArenasFreeCommitted <= rt->gcNumArenasFreeCommitted);
+    JS_ASSERT(info.numArenasFreeCommitted <= rt->gc.numArenasFreeCommitted);
 
     ArenaHeader *aheader = info.freeArenasHead;
     info.freeArenasHead = aheader->next;
     --info.numArenasFreeCommitted;
     --info.numArenasFree;
-    --rt->gcNumArenasFreeCommitted;
+    --rt->gc.numArenasFreeCommitted;
 
     return aheader;
 }
 
 ArenaHeader *
 Chunk::allocateArena(Zone *zone, AllocKind thingKind)
 {
     JS_ASSERT(hasAvailableArenas());
 
     JSRuntime *rt = zone->runtimeFromAnyThread();
-    if (!rt->isHeapMinorCollecting() && rt->gcBytes >= rt->gcMaxBytes)
+    if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes)
         return nullptr;
 
     ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0)
                            ? fetchNextFreeArena(rt)
                            : fetchNextDecommittedArena();
     aheader->init(zone, thingKind);
     if (MOZ_UNLIKELY(!hasAvailableArenas()))
         removeFromAvailableList();
 
-    rt->gcBytes += ArenaSize;
+    rt->gc.bytes += ArenaSize;
     zone->gcBytes += ArenaSize;
 
     if (zone->gcBytes >= zone->gcTriggerBytes) {
         AutoUnlockGC unlock(rt);
         TriggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
     }
 
     return aheader;
@@ -940,17 +940,17 @@ Chunk::allocateArena(Zone *zone, AllocKi
 inline void
 Chunk::addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader)
 {
     JS_ASSERT(!aheader->allocated());
     aheader->next = info.freeArenasHead;
     info.freeArenasHead = aheader;
     ++info.numArenasFreeCommitted;
     ++info.numArenasFree;
-    ++rt->gcNumArenasFreeCommitted;
+    ++rt->gc.numArenasFreeCommitted;
 }
 
 void
 Chunk::recycleArena(ArenaHeader *aheader, ArenaList &dest, AllocKind thingKind)
 {
     aheader->getArena()->setAsFullyUnused(thingKind);
     dest.insert(aheader);
 }
@@ -958,102 +958,193 @@ Chunk::recycleArena(ArenaHeader *aheader
 void
 Chunk::releaseArena(ArenaHeader *aheader)
 {
     JS_ASSERT(aheader->allocated());
     JS_ASSERT(!aheader->hasDelayedMarking);
     Zone *zone = aheader->zone;
     JSRuntime *rt = zone->runtimeFromAnyThread();
     AutoLockGC maybeLock;
-    if (rt->gcHelperThread.sweeping())
+    if (rt->gc.helperThread.sweeping())
         maybeLock.lock(rt);
 
-    JS_ASSERT(rt->gcBytes >= ArenaSize);
+    JS_ASSERT(rt->gc.bytes >= ArenaSize);
     JS_ASSERT(zone->gcBytes >= ArenaSize);
-    if (rt->gcHelperThread.sweeping())
+    if (rt->gc.helperThread.sweeping())
         zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize);
-    rt->gcBytes -= ArenaSize;
+    rt->gc.bytes -= ArenaSize;
     zone->gcBytes -= ArenaSize;
 
     aheader->setAsNotAllocated();
     addArenaToFreeList(rt, aheader);
 
     if (info.numArenasFree == 1) {
         JS_ASSERT(!info.prevp);
         JS_ASSERT(!info.next);
         addToAvailableList(zone);
     } else if (!unused()) {
         JS_ASSERT(info.prevp);
     } else {
-        rt->gcChunkSet.remove(this);
+        rt->gc.chunkSet.remove(this);
         removeFromAvailableList();
         JS_ASSERT(info.numArenasFree == ArenasPerChunk);
         decommitAllArenas(rt);
-        rt->gcChunkPool.put(this);
+        rt->gc.chunkPool.put(this);
     }
 }
 
 /* The caller must hold the GC lock. */
 static Chunk *
 PickChunk(Zone *zone)
 {
     JSRuntime *rt = zone->runtimeFromAnyThread();
     Chunk **listHeadp = GetAvailableChunkList(zone);
     Chunk *chunk = *listHeadp;
     if (chunk)
         return chunk;
 
-    chunk = rt->gcChunkPool.get(rt);
+    chunk = rt->gc.chunkPool.get(rt);
     if (!chunk)
         return nullptr;
 
-    rt->gcChunkAllocationSinceLastGC = true;
+    rt->gc.chunkAllocationSinceLastGC = true;
 
     /*
      * FIXME bug 583732 - chunk is newly allocated and cannot be present in
      * the table so using ordinary lookupForAdd is suboptimal here.
      */
-    GCChunkSet::AddPtr p = rt->gcChunkSet.lookupForAdd(chunk);
+    GCChunkSet::AddPtr p = rt->gc.chunkSet.lookupForAdd(chunk);
     JS_ASSERT(!p);
-    if (!rt->gcChunkSet.add(p, chunk)) {
+    if (!rt->gc.chunkSet.add(p, chunk)) {
         Chunk::release(rt, chunk);
         return nullptr;
     }
 
     chunk->info.prevp = nullptr;
     chunk->info.next = nullptr;
     chunk->addToAvailableList(zone);
 
     return chunk;
 }
 
+js::gc::GCRuntime::GCRuntime(JSRuntime *rt) :
+    systemZone(nullptr),
+    systemAvailableChunkListHead(nullptr),
+    userAvailableChunkListHead(nullptr),
+    bytes(0),
+    maxBytes(0),
+    maxMallocBytes(0),
+    numArenasFreeCommitted(0),
+    marker(rt),
+    verifyPreData(nullptr),
+    verifyPostData(nullptr),
+    chunkAllocationSinceLastGC(false),
+    nextFullGCTime(0),
+    lastGCTime(0),
+    jitReleaseTime(0),
+    allocationThreshold(30 * 1024 * 1024),
+    highFrequencyGC(false),
+    highFrequencyTimeThreshold(1000),
+    highFrequencyLowLimitBytes(100 * 1024 * 1024),
+    highFrequencyHighLimitBytes(500 * 1024 * 1024),
+    highFrequencyHeapGrowthMax(3.0),
+    highFrequencyHeapGrowthMin(1.5),
+    lowFrequencyHeapGrowth(1.5),
+    dynamicHeapGrowth(false),
+    dynamicMarkSlice(false),
+    decommitThreshold(32 * 1024 * 1024),
+    shouldCleanUpEverything(false),
+    grayBitsValid(false),
+    isNeeded(0),
+    stats(rt),
+    number(0),
+    startNumber(0),
+    isFull(false),
+    triggerReason(JS::gcreason::NO_REASON),
+    strictCompartmentChecking(false),
+#ifdef DEBUG
+    disableStrictProxyCheckingCount(0),
+#endif
+    incrementalState(gc::NO_INCREMENTAL),
+    lastMarkSlice(false),
+    sweepOnBackgroundThread(false),
+    foundBlackGrayEdges(false),
+    sweepingZones(nullptr),
+    zoneGroupIndex(0),
+    zoneGroups(nullptr),
+    currentZoneGroup(nullptr),
+    sweepPhase(0),
+    sweepZone(nullptr),
+    sweepKindIndex(0),
+    abortSweepAfterCurrentGroup(false),
+    arenasAllocatedDuringSweep(nullptr),
+#ifdef DEBUG
+    markingValidator(nullptr),
+#endif
+    interFrameGC(0),
+    sliceBudget(SliceBudget::Unlimited),
+    incrementalEnabled(true),
+    generationalDisabled(0),
+    manipulatingDeadZones(false),
+    objectsMarkedInDeadZones(0),
+    poke(false),
+    heapState(Idle),
+#ifdef JSGC_GENERATIONAL
+    nursery(rt),
+    storeBuffer(rt, nursery),
+#endif
+#ifdef JS_GC_ZEAL
+    zealMode(0),
+    zealFrequency(0),
+    nextScheduled(0),
+    deterministicOnly(false),
+    incrementalLimit(0),
+#endif
+    validate(true),
+    fullCompartmentChecks(false),
+    callback(nullptr),
+    sliceCallback(nullptr),
+    finalizeCallback(nullptr),
+    mallocBytes(0),
+    mallocGCTriggered(false),
+    scriptAndCountsVector(nullptr),
+    alwaysPreserveCode(false),
+#ifdef DEBUG
+    noGCOrAllocationCheck(0),
+#endif
+    lock(nullptr),
+    lockOwner(nullptr),
+    helperThread(rt)
+{
+}
+
 #ifdef JS_GC_ZEAL
 
 extern void
 js::SetGCZeal(JSRuntime *rt, uint8_t zeal, uint32_t frequency)
 {
-    if (rt->gcVerifyPreData)
+    if (rt->gc.verifyPreData)
         VerifyBarriers(rt, PreBarrierVerifier);
-    if (rt->gcVerifyPostData)
+    if (rt->gc.verifyPostData)
         VerifyBarriers(rt, PostBarrierVerifier);
 
 #ifdef JSGC_GENERATIONAL
-    if (rt->gcZeal_ == ZealGenerationalGCValue) {
+    if (rt->gc.zealMode == ZealGenerationalGCValue) {
         MinorGC(rt, JS::gcreason::DEBUG_GC);
-        rt->gcNursery.leaveZealMode();
+        rt->gc.nursery.leaveZealMode();
     }
 
     if (zeal == ZealGenerationalGCValue)
-        rt->gcNursery.enterZealMode();
+        rt->gc.nursery.enterZealMode();
 #endif
 
     bool schedule = zeal >= js::gc::ZealAllocValue;
-    rt->gcZeal_ = zeal;
-    rt->gcZealFrequency = frequency;
-    rt->gcNextScheduled = schedule ? frequency : 0;
+    rt->gc.zealMode = zeal;
+    rt->gc.zealFrequency = frequency;
+    rt->gc.nextScheduled = schedule ? frequency : 0;
 }
 
 static bool
 InitGCZeal(JSRuntime *rt)
 {
     const char *env = getenv("JS_GC_ZEAL");
     if (!env)
         return true;
@@ -1095,102 +1186,102 @@ InitGCZeal(JSRuntime *rt)
 #endif
 
 /* Lifetime for type sets attached to scripts containing observed types. */
 static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
 
 bool
 js_InitGC(JSRuntime *rt, uint32_t maxbytes)
 {
-    if (!rt->gcChunkSet.init(INITIAL_CHUNK_CAPACITY))
+    if (!rt->gc.chunkSet.init(INITIAL_CHUNK_CAPACITY))
         return false;
 
-    if (!rt->gcRootsHash.init(256))
+    if (!rt->gc.rootsHash.init(256))
         return false;
 
-    if (!rt->gcHelperThread.init())
+    if (!rt->gc.helperThread.init())
         return false;
 
     /*
      * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
      * for default backward API compatibility.
      */
-    rt->gcMaxBytes = maxbytes;
+    rt->gc.maxBytes = maxbytes;
     rt->setGCMaxMallocBytes(maxbytes);
 
 #ifndef JS_MORE_DETERMINISTIC
-    rt->gcJitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
+    rt->gc.jitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
 #endif
 
 #ifdef JSGC_GENERATIONAL
-    if (!rt->gcNursery.init())
+    if (!rt->gc.nursery.init())
         return false;
 
-    if (!rt->gcStoreBuffer.enable())
+    if (!rt->gc.storeBuffer.enable())
         return false;
 #endif
 
 #ifdef JS_GC_ZEAL
     if (!InitGCZeal(rt))
         return false;
 #endif
 
     return true;
 }
 
 static void
 RecordNativeStackTopForGC(JSRuntime *rt)
 {
-    ConservativeGCData *cgcd = &rt->conservativeGC;
+    ConservativeGCData *cgcd = &rt->gc.conservativeGC;
 
 #ifdef JS_THREADSAFE
     /* Record the stack top here only if we are called from a request. */
     if (!rt->requestDepth)
         return;
 #endif
     cgcd->recordStackTop();
 }
 
 void
 js_FinishGC(JSRuntime *rt)
 {
     /*
      * Wait until the background finalization stops and the helper thread
      * shuts down before we forcefully release any remaining GC memory.
      */
-    rt->gcHelperThread.finish();
+    rt->gc.helperThread.finish();
 
 #ifdef JS_GC_ZEAL
     /* Free memory associated with GC verification. */
     FinishVerifier(rt);
 #endif
 
     /* Delete all remaining zones. */
     if (rt->gcInitialized) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
                 js_delete(comp.get());
             js_delete(zone.get());
         }
     }
 
-    rt->zones.clear();
-
-    rt->gcSystemAvailableChunkListHead = nullptr;
-    rt->gcUserAvailableChunkListHead = nullptr;
-    if (rt->gcChunkSet.initialized()) {
-        for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
+    rt->gc.zones.clear();
+
+    rt->gc.systemAvailableChunkListHead = nullptr;
+    rt->gc.userAvailableChunkListHead = nullptr;
+    if (rt->gc.chunkSet.initialized()) {
+        for (GCChunkSet::Range r(rt->gc.chunkSet.all()); !r.empty(); r.popFront())
             Chunk::release(rt, r.front());
-        rt->gcChunkSet.clear();
+        rt->gc.chunkSet.clear();
     }
 
-    rt->gcChunkPool.expireAndFree(rt, true);
-
-    if (rt->gcRootsHash.initialized())
-        rt->gcRootsHash.clear();
+    rt->gc.chunkPool.expireAndFree(rt, true);
+
+    if (rt->gc.rootsHash.initialized())
+        rt->gc.rootsHash.clear();
 
     rt->functionPersistentRooteds.clear();
     rt->idPersistentRooteds.clear();
     rt->objectPersistentRooteds.clear();
     rt->scriptPersistentRooteds.clear();
     rt->stringPersistentRooteds.clear();
     rt->valuePersistentRooteds.clear();
 }
@@ -1204,20 +1295,20 @@ static bool
 AddRoot(JSRuntime *rt, T *rp, const char *name, JSGCRootType rootType)
 {
     /*
      * Sometimes Firefox will hold weak references to objects and then convert
      * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
      * or ModifyBusyCount in workers). We need a read barrier to cover these
      * cases.
      */
-    if (rt->gcIncrementalState != NO_INCREMENTAL)
+    if (rt->gc.incrementalState != NO_INCREMENTAL)
         BarrierOwner<T>::result::writeBarrierPre(*rp);
 
-    return rt->gcRootsHash.put((void *)rp, RootInfo(name, rootType));
+    return rt->gc.rootsHash.put((void *)rp, RootInfo(name, rootType));
 }
 
 template <typename T>
 static bool
 AddRoot(JSContext *cx, T *rp, const char *name, JSGCRootType rootType)
 {
     bool ok = AddRoot(cx->runtime(), rp, name, rootType);
     if (!ok)
@@ -1271,28 +1362,28 @@ extern JS_FRIEND_API(void)
 js::RemoveRawValueRoot(JSContext *cx, Value *vp)
 {
     RemoveRoot(cx->runtime(), vp);
 }
 
 void
 js::RemoveRoot(JSRuntime *rt, void *rp)
 {
-    rt->gcRootsHash.remove(rp);
-    rt->gcPoke = true;
+    rt->gc.rootsHash.remove(rp);
+    rt->gc.poke = true;
 }
 
 typedef RootedValueMap::Range RootRange;
 typedef RootedValueMap::Entry RootEntry;
 typedef RootedValueMap::Enum RootEnum;
 
 static size_t
 ComputeTriggerBytes(Zone *zone, size_t lastBytes, size_t maxBytes, JSGCInvocationKind gckind)
 {
-    size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, zone->runtimeFromMainThread()->gcAllocationThreshold);
+    size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, zone->runtimeFromMainThread()->gc.allocationThreshold);
     double trigger = double(base) * zone->gcHeapGrowthFactor;
     return size_t(Min(double(maxBytes), trigger));
 }
 
 void
 Zone::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind)
 {
     /*
@@ -1300,51 +1391,51 @@ Zone::setGCLastBytes(size_t lastBytes, J
      * For low frequency GCs (more than 1sec between GCs) we let the heap grow to 150%.
      * For high frequency GCs we let the heap grow depending on the heap size:
      *   lastBytes < highFrequencyLowLimit: 300%
      *   lastBytes > highFrequencyHighLimit: 150%
      *   otherwise: linear interpolation between 150% and 300% based on lastBytes
      */
     JSRuntime *rt = runtimeFromMainThread();
 
-    if (!rt->gcDynamicHeapGrowth) {
+    if (!rt->gc.dynamicHeapGrowth) {
         gcHeapGrowthFactor = 3.0;
     } else if (lastBytes < 1 * 1024 * 1024) {
-        gcHeapGrowthFactor = rt->gcLowFrequencyHeapGrowth;
+        gcHeapGrowthFactor = rt->gc.lowFrequencyHeapGrowth;
     } else {
-        JS_ASSERT(rt->gcHighFrequencyHighLimitBytes > rt->gcHighFrequencyLowLimitBytes);
+        JS_ASSERT(rt->gc.highFrequencyHighLimitBytes > rt->gc.highFrequencyLowLimitBytes);
         uint64_t now = PRMJ_Now();
-        if (rt->gcLastGCTime && rt->gcLastGCTime + rt->gcHighFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > now) {
-            if (lastBytes <= rt->gcHighFrequencyLowLimitBytes) {
-                gcHeapGrowthFactor = rt->gcHighFrequencyHeapGrowthMax;
-            } else if (lastBytes >= rt->gcHighFrequencyHighLimitBytes) {
-                gcHeapGrowthFactor = rt->gcHighFrequencyHeapGrowthMin;
+        if (rt->gc.lastGCTime && rt->gc.lastGCTime + rt->gc.highFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > now) {
+            if (lastBytes <= rt->gc.highFrequencyLowLimitBytes) {
+                gcHeapGrowthFactor = rt->gc.highFrequencyHeapGrowthMax;
+            } else if (lastBytes >= rt->gc.highFrequencyHighLimitBytes) {
+                gcHeapGrowthFactor = rt->gc.highFrequencyHeapGrowthMin;
             } else {
-                double k = (rt->gcHighFrequencyHeapGrowthMin - rt->gcHighFrequencyHeapGrowthMax)
-                           / (double)(rt->gcHighFrequencyHighLimitBytes - rt->gcHighFrequencyLowLimitBytes);
-                gcHeapGrowthFactor = (k * (lastBytes - rt->gcHighFrequencyLowLimitBytes)
-                                     + rt->gcHighFrequencyHeapGrowthMax);
-                JS_ASSERT(gcHeapGrowthFactor <= rt->gcHighFrequencyHeapGrowthMax
-                          && gcHeapGrowthFactor >= rt->gcHighFrequencyHeapGrowthMin);
+                double k = (rt->gc.highFrequencyHeapGrowthMin - rt->gc.highFrequencyHeapGrowthMax)
+                           / (double)(rt->gc.highFrequencyHighLimitBytes - rt->gc.highFrequencyLowLimitBytes);
+                gcHeapGrowthFactor = (k * (lastBytes - rt->gc.highFrequencyLowLimitBytes)
+                                     + rt->gc.highFrequencyHeapGrowthMax);
+                JS_ASSERT(gcHeapGrowthFactor <= rt->gc.highFrequencyHeapGrowthMax
+                          && gcHeapGrowthFactor >= rt->gc.highFrequencyHeapGrowthMin);
             }
-            rt->gcHighFrequencyGC = true;
+            rt->gc.highFrequencyGC = true;
         } else {
-            gcHeapGrowthFactor = rt->gcLowFrequencyHeapGrowth;
-            rt->gcHighFrequencyGC = false;
+            gcHeapGrowthFactor = rt->gc.lowFrequencyHeapGrowth;
+            rt->gc.highFrequencyGC = false;
         }
     }
-    gcTriggerBytes = ComputeTriggerBytes(this, lastBytes, rt->gcMaxBytes, gckind);
+    gcTriggerBytes = ComputeTriggerBytes(this, lastBytes, rt->gc.maxBytes, gckind);
 }
 
 void
 Zone::reduceGCTriggerBytes(size_t amount)
 {
     JS_ASSERT(amount > 0);
     JS_ASSERT(gcTriggerBytes >= amount);
-    if (gcTriggerBytes - amount < runtimeFromAnyThread()->gcAllocationThreshold * gcHeapGrowthFactor)
+    if (gcTriggerBytes - amount < runtimeFromAnyThread()->gc.allocationThreshold * gcHeapGrowthFactor)
         return;
     gcTriggerBytes -= amount;
 }
 
 Allocator::Allocator(Zone *zone)
   : zone_(zone)
 {}
 
@@ -1371,26 +1462,26 @@ GCMarker::delayMarkingChildren(const voi
 inline void
 ArenaLists::prepareForIncrementalGC(JSRuntime *rt)
 {
     for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
         FreeSpan *headSpan = &freeLists[i];
         if (!headSpan->isEmpty()) {
             ArenaHeader *aheader = headSpan->arenaHeader();
             aheader->allocatedDuringIncremental = true;
-            rt->gcMarker.delayMarkingArena(aheader);
+            rt->gc.marker.delayMarkingArena(aheader);
         }
     }
 }
 
 static inline void
 PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
 {
-    arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep);
-    runtime->gcArenasAllocatedDuringSweep = arena;
+    arena->setNextAllocDuringSweep(runtime->gc.arenasAllocatedDuringSweep);
+    runtime->gc.arenasAllocatedDuringSweep = arena;
 }
 
 inline void *
 ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind)
 {
     /*
      * Parallel JS Note:
      *
@@ -1451,17 +1542,17 @@ ArenaLists::allocateFromArenaInline(Zone
              * Move the free span stored in the arena to the free list and
              * allocate from it.
              */
             freeLists[thingKind] = aheader->getFirstFreeSpan();
             aheader->setAsFullyUsed();
             if (MOZ_UNLIKELY(zone->wasGCStarted())) {
                 if (zone->needsBarrier()) {
                     aheader->allocatedDuringIncremental = true;
-                    zone->runtimeFromMainThread()->gcMarker.delayMarkingArena(aheader);
+                    zone->runtimeFromMainThread()->gc.marker.delayMarkingArena(aheader);
                 } else if (zone->isGCSweeping()) {
                     PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader);
                 }
             }
             return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
         }
 
         /* Make sure we hold the GC lock before we call PickChunk. */
@@ -1484,17 +1575,17 @@ ArenaLists::allocateFromArenaInline(Zone
     JS_ASSERT(!*al->cursor);
     ArenaHeader *aheader = chunk->allocateArena(zone, thingKind);
     if (!aheader)
         return nullptr;
 
     if (MOZ_UNLIKELY(zone->wasGCStarted())) {
         if (zone->needsBarrier()) {
             aheader->allocatedDuringIncremental = true;
-            zone->runtimeFromMainThread()->gcMarker.delayMarkingArena(aheader);
+            zone->runtimeFromMainThread()->gc.marker.delayMarkingArena(aheader);
         } else if (zone->isGCSweeping()) {
             PushArenaAllocatedDuringSweep(zone->runtimeFromMainThread(), aheader);
         }
     }
     aheader->next = al->head;
     if (!al->head) {
         JS_ASSERT(al->cursor == &al->head);
         al->cursor = &aheader->next;
@@ -1582,17 +1673,17 @@ ArenaLists::queueForForegroundSweep(Free
 }
 
 inline void
 ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind)
 {
     JS_ASSERT(IsBackgroundFinalized(thingKind));
 
 #ifdef JS_THREADSAFE
-    JS_ASSERT(!fop->runtime()->gcHelperThread.sweeping());
+    JS_ASSERT(!fop->runtime()->gc.helperThread.sweeping());
 #endif
 
     ArenaList *al = &arenaLists[thingKind];
     if (!al->head) {
         JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
         JS_ASSERT(al->cursor == &al->head);
         return;
     }
@@ -1654,17 +1745,17 @@ ArenaLists::backgroundFinalize(FreeOp *f
         lists->backgroundFinalizeState[thingKind] = BFS_DONE;
 
     lists->arenaListsToSweep[thingKind] = nullptr;
 }
 
 void
 ArenaLists::queueObjectsForSweep(FreeOp *fop)
 {
-    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_OBJECT);
+    gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_OBJECT);
 
     finalizeNow(fop, FINALIZE_OBJECT0);
     finalizeNow(fop, FINALIZE_OBJECT2);
     finalizeNow(fop, FINALIZE_OBJECT4);
     finalizeNow(fop, FINALIZE_OBJECT8);
     finalizeNow(fop, FINALIZE_OBJECT12);
     finalizeNow(fop, FINALIZE_OBJECT16);
 
@@ -1674,43 +1765,43 @@ ArenaLists::queueObjectsForSweep(FreeOp 
     queueForBackgroundSweep(fop, FINALIZE_OBJECT8_BACKGROUND);
     queueForBackgroundSweep(fop, FINALIZE_OBJECT12_BACKGROUND);
     queueForBackgroundSweep(fop, FINALIZE_OBJECT16_BACKGROUND);
 }
 
 void
 ArenaLists::queueStringsForSweep(FreeOp *fop)
 {
-    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_STRING);
+    gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_STRING);
 
     queueForBackgroundSweep(fop, FINALIZE_FAT_INLINE_STRING);
     queueForBackgroundSweep(fop, FINALIZE_STRING);
 
     queueForForegroundSweep(fop, FINALIZE_EXTERNAL_STRING);
 }
 
 void
 ArenaLists::queueScriptsForSweep(FreeOp *fop)
 {
-    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_SCRIPT);
+    gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_SCRIPT);
     queueForForegroundSweep(fop, FINALIZE_SCRIPT);
     queueForForegroundSweep(fop, FINALIZE_LAZY_SCRIPT);
 }
 
 void
 ArenaLists::queueJitCodeForSweep(FreeOp *fop)
 {
-    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_JITCODE);
+    gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_JITCODE);
     queueForForegroundSweep(fop, FINALIZE_JITCODE);
 }
 
 void
 ArenaLists::queueShapesForSweep(FreeOp *fop)
 {
-    gcstats::AutoPhase ap(fop->runtime()->gcStats, gcstats::PHASE_SWEEP_SHAPE);
+    gcstats::AutoPhase ap(fop->runtime()->gc.stats, gcstats::PHASE_SWEEP_SHAPE);
 
     queueForBackgroundSweep(fop, FINALIZE_SHAPE);
     queueForBackgroundSweep(fop, FINALIZE_BASE_SHAPE);
     queueForBackgroundSweep(fop, FINALIZE_TYPE_OBJECT);
 }
 
 static void *
 RunLastDitchGC(JSContext *cx, JS::Zone *zone, AllocKind thingKind)
@@ -1746,17 +1837,17 @@ template <AllowGC allowGC>
 ArenaLists::refillFreeList(ThreadSafeContext *cx, AllocKind thingKind)
 {
     JS_ASSERT(cx->allocator()->arenas.freeLists[thingKind].isEmpty());
     JS_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->runtime()->isHeapBusy());
 
     Zone *zone = cx->allocator()->zone_;
 
     bool runGC = cx->allowGC() && allowGC &&
-                 cx->asJSContext()->runtime()->gcIncrementalState != NO_INCREMENTAL &&
+                 cx->asJSContext()->runtime()->gc.incrementalState != NO_INCREMENTAL &&
                  zone->gcBytes > zone->gcTriggerBytes;
 
 #ifdef JS_THREADSAFE
     JS_ASSERT_IF(cx->isJSContext() && allowGC,
                  !cx->asJSContext()->runtime()->currentThreadHasExclusiveAccess());
 #endif
 
     for (;;) {
@@ -1776,17 +1867,17 @@ ArenaLists::refillFreeList(ThreadSafeCon
              */
             for (bool secondAttempt = false; ; secondAttempt = true) {
                 void *thing = cx->allocator()->arenas.allocateFromArenaInline(zone, thingKind);
                 if (MOZ_LIKELY(!!thing))
                     return thing;
                 if (secondAttempt)
                     break;
 
-                cx->asJSContext()->runtime()->gcHelperThread.waitBackgroundSweepEnd();
+                cx->asJSContext()->runtime()->gc.helperThread.waitBackgroundSweepEnd();
             }
         } else {
 #ifdef JS_THREADSAFE
             /*
              * If we're off the main thread, we try to allocate once and
              * return whatever value we get. If we aren't in a ForkJoin
              * session (i.e. we are in a worker thread async with the main
              * thread), we need to first ensure the main thread is not in a GC
@@ -1883,21 +1974,21 @@ void
 js::MarkCompartmentActive(InterpreterFrame *fp)
 {
     fp->script()->compartment()->zone()->active = true;
 }
 
 static void
 RequestInterrupt(JSRuntime *rt, JS::gcreason::Reason reason)
 {
-    if (rt->gcIsNeeded)
+    if (rt->gc.isNeeded)
         return;
 
-    rt->gcIsNeeded = true;
-    rt->gcTriggerReason = reason;
+    rt->gc.isNeeded = true;
+    rt->gc.triggerReason = reason;
     rt->requestInterrupt(JSRuntime::RequestInterruptMainThread);
 }
 
 bool
 js::TriggerGC(JSRuntime *rt, JS::gcreason::Reason reason)
 {
     /* Wait till end of parallel section to trigger GC. */
     if (InParallelSection()) {
@@ -1969,48 +2060,48 @@ js::MaybeGC(JSContext *cx)
     JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     if (rt->gcZeal() == ZealAllocValue || rt->gcZeal() == ZealPokeValue) {
         JS::PrepareForFullGC(rt);
         GC(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
         return;
     }
 
-    if (rt->gcIsNeeded) {
+    if (rt->gc.isNeeded) {
         GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
         return;
     }
 
-    double factor = rt->gcHighFrequencyGC ? 0.85 : 0.9;
+    double factor = rt->gc.highFrequencyGC ? 0.85 : 0.9;
     Zone *zone = cx->zone();
     if (zone->gcBytes > 1024 * 1024 &&
         zone->gcBytes >= factor * zone->gcTriggerBytes &&
-        rt->gcIncrementalState == NO_INCREMENTAL &&
-        !rt->gcHelperThread.sweeping())
+        rt->gc.incrementalState == NO_INCREMENTAL &&
+        !rt->gc.helperThread.sweeping())
     {
         PrepareZoneForGC(zone);
         GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
         return;
     }
 
 #ifndef JS_MORE_DETERMINISTIC
     /*
      * Access to the counters and, on 32 bit, setting gcNextFullGCTime below
      * is not atomic and a race condition could trigger or suppress the GC. We
      * tolerate this.
      */
     int64_t now = PRMJ_Now();
-    if (rt->gcNextFullGCTime && rt->gcNextFullGCTime <= now) {
-        if (rt->gcChunkAllocationSinceLastGC ||
-            rt->gcNumArenasFreeCommitted > rt->gcDecommitThreshold)
+    if (rt->gc.nextFullGCTime && rt->gc.nextFullGCTime <= now) {
+        if (rt->gc.chunkAllocationSinceLastGC ||
+            rt->gc.numArenasFreeCommitted > rt->gc.decommitThreshold)
         {
             JS::PrepareForFullGC(rt);
             GCSlice(rt, GC_SHRINK, JS::gcreason::MAYBEGC);
         } else {
-            rt->gcNextFullGCTime = now + GC_IDLE_FULL_SPAN;
+            rt->gc.nextFullGCTime = now + GC_IDLE_FULL_SPAN;
         }
     }
 #endif
 }
 
 static void
 DecommitArenasFromAvailableList(JSRuntime *rt, Chunk **availableListHeadp)
 {
@@ -2060,17 +2151,17 @@ DecommitArenasFromAvailableList(JSRuntim
                 /*
                  * If the main thread waits for the decommit to finish, skip
                  * potentially expensive unlock/lock pair on the contested
                  * lock.
                  */
                 Maybe<AutoUnlockGC> maybeUnlock;
                 if (!rt->isHeapBusy())
                     maybeUnlock.construct(rt);
-                ok = rt->pageAllocator.markPagesUnused(aheader->getArena(), ArenaSize);
+                ok = rt->gc.pageAllocator.markPagesUnused(aheader->getArena(), ArenaSize);
             }
 
             if (ok) {
                 ++chunk->info.numArenasFree;
                 chunk->decommittedArenas.set(arenaIndex);
             } else {
                 chunk->addArenaToFreeList(rt, aheader);
             }
@@ -2090,17 +2181,17 @@ DecommitArenasFromAvailableList(JSRuntim
                     if (!prev->hasAvailableArenas())
                         insertPoint = availableListHeadp;
                 }
                 chunk->insertToAvailableList(insertPoint);
             } else {
                 JS_ASSERT(chunk->info.prevp);
             }
 
-            if (rt->gcChunkAllocationSinceLastGC || !ok) {
+            if (rt->gc.chunkAllocationSinceLastGC || !ok) {
                 /*
                  * The allocator thread has started to get new chunks. We should stop
                  * to avoid decommitting arenas in just allocated chunks.
                  */
                 return;
             }
         }
 
@@ -2118,25 +2209,25 @@ DecommitArenasFromAvailableList(JSRuntim
          */
         chunk = chunk->getPrevious();
     }
 }
 
 static void
 DecommitArenas(JSRuntime *rt)
 {
-    DecommitArenasFromAvailableList(rt, &rt->gcSystemAvailableChunkListHead);
-    DecommitArenasFromAvailableList(rt, &rt->gcUserAvailableChunkListHead);
+    DecommitArenasFromAvailableList(rt, &rt->gc.systemAvailableChunkListHead);
+    DecommitArenasFromAvailableList(rt, &rt->gc.userAvailableChunkListHead);
 }
 
 /* Must be called with the GC lock taken. */
 static void
 ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
 {
-    if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink)) {
+    if (Chunk *toFree = rt->gc.chunkPool.expire(rt, shouldShrink)) {
         AutoUnlockGC unlock(rt);
         FreeChunkList(rt, toFree);
     }
 
     if (shouldShrink)
         DecommitArenas(rt);
 }
 
@@ -2144,34 +2235,34 @@ static void
 SweepBackgroundThings(JSRuntime* rt, bool onBackgroundThread)
 {
     /*
      * We must finalize in the correct order, see comments in
      * finalizeObjects.
      */
     FreeOp fop(rt, false);
     for (int phase = 0 ; phase < BackgroundPhaseCount ; ++phase) {
-        for (Zone *zone = rt->gcSweepingZones; zone; zone = zone->gcNextGraphNode) {
+        for (Zone *zone = rt->gc.sweepingZones; zone; zone = zone->gcNextGraphNode) {
             for (int index = 0 ; index < BackgroundPhaseLength[phase] ; ++index) {
                 AllocKind kind = BackgroundPhases[phase][index];
                 ArenaHeader *arenas = zone->allocator.arenas.arenaListsToSweep[kind];
                 if (arenas)
                     ArenaLists::backgroundFinalize(&fop, arenas, onBackgroundThread);
             }
         }
     }
 
-    rt->gcSweepingZones = nullptr;
+    rt->gc.sweepingZones = nullptr;
 }
 
 #ifdef JS_THREADSAFE
 static void
 AssertBackgroundSweepingFinished(JSRuntime *rt)
 {
-    JS_ASSERT(!rt->gcSweepingZones);
+    JS_ASSERT(!rt->gc.sweepingZones);
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         for (unsigned i = 0; i < FINALIZE_LIMIT; ++i) {
             JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]);
             JS_ASSERT(zone->allocator.arenas.doneBackgroundFinalize(AllocKind(i)));
         }
     }
 }
 
@@ -2197,35 +2288,35 @@ bool
 GCHelperThread::init()
 {
     if (!rt->useHelperThreads()) {
         backgroundAllocation = false;
         return true;
     }
 
 #ifdef JS_THREADSAFE
-    if (!(wakeup = PR_NewCondVar(rt->gcLock)))
+    if (!(wakeup = PR_NewCondVar(rt->gc.lock)))
         return false;
-    if (!(done = PR_NewCondVar(rt->gcLock)))
+    if (!(done = PR_NewCondVar(rt->gc.lock)))
         return false;
 
     thread = PR_CreateThread(PR_USER_THREAD, threadMain, this, PR_PRIORITY_NORMAL,
                              PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
     if (!thread)
         return false;
 
     backgroundAllocation = (GetCPUCount() >= 2);
 #endif /* JS_THREADSAFE */
     return true;
 }
 
 void
 GCHelperThread::finish()
 {
-    if (!rt->useHelperThreads() || !rt->gcLock) {
+    if (!rt->useHelperThreads() || !rt->gc.lock) {
         JS_ASSERT(state == IDLE);
         return;
     }
 
 #ifdef JS_THREADSAFE
     PRThread *join = nullptr;
     {
         AutoLockGC lock(rt);
@@ -2274,20 +2365,20 @@ GCHelperThread::threadMain(void *arg)
 #endif
 
     static_cast<GCHelperThread *>(arg)->threadLoop();
 }
 
 void
 GCHelperThread::wait(PRCondVar *which)
 {
-    rt->gcLockOwner = nullptr;
+    rt->gc.lockOwner = nullptr;
     PR_WaitCondVar(which, PR_INTERVAL_NO_TIMEOUT);
 #ifdef DEBUG
-    rt->gcLockOwner = PR_GetCurrentThread();
+    rt->gc.lockOwner = PR_GetCurrentThread();
 #endif
 }
 
 void
 GCHelperThread::threadLoop()
 {
     AutoLockGC lock(rt);
 
@@ -2321,18 +2412,18 @@ GCHelperThread::threadLoop()
                     AutoUnlockGC unlock(rt);
                     chunk = Chunk::allocate(rt);
                 }
 
                 /* OOM stops the background allocation. */
                 if (!chunk)
                     break;
                 JS_ASSERT(chunk->info.numArenasFreeCommitted == 0);
-                rt->gcChunkPool.put(chunk);
-            } while (state == ALLOCATING && rt->gcChunkPool.wantBackgroundAllocation(rt));
+                rt->gc.chunkPool.put(chunk);
+            } while (state == ALLOCATING && rt->gc.chunkPool.wantBackgroundAllocation(rt));
             if (state == ALLOCATING)
                 state = IDLE;
             break;
           }
           case CANCEL_ALLOCATION:
             state = IDLE;
             PR_NotifyAllCondVar(done);
             break;
@@ -2394,17 +2485,17 @@ GCHelperThread::waitBackgroundSweepEnd()
         JS_ASSERT(state == IDLE);
         return;
     }
 
 #ifdef JS_THREADSAFE
     AutoLockGC lock(rt);
     while (state == SWEEPING)
         wait(done);
-    if (rt->gcIncrementalState == NO_INCREMENTAL)
+    if (rt->gc.incrementalState == NO_INCREMENTAL)
         AssertBackgroundSweepingFinished(rt);
 #endif /* JS_THREADSAFE */
 }
 
 void
 GCHelperThread::waitBackgroundSweepOrAllocEnd()
 {
     if (!rt->useHelperThreads()) {
@@ -2413,17 +2504,17 @@ GCHelperThread::waitBackgroundSweepOrAll
     }
 
 #ifdef JS_THREADSAFE
     AutoLockGC lock(rt);
     if (state == ALLOCATING)
         state = CANCEL_ALLOCATION;
     while (state == SWEEPING || state == CANCEL_ALLOCATION)
         wait(done);
-    if (rt->gcIncrementalState == NO_INCREMENTAL)
+    if (rt->gc.incrementalState == NO_INCREMENTAL)
         AssertBackgroundSweepingFinished(rt);
 #endif /* JS_THREADSAFE */
 }
 
 /* Must be called with the GC lock taken. */
 inline void
 GCHelperThread::startBackgroundAllocationIfIdle()
 {
@@ -2510,20 +2601,20 @@ GCHelperThread::onBackgroundThread()
 
 static bool
 ReleaseObservedTypes(JSRuntime *rt)
 {
     bool releaseTypes = rt->gcZeal() != 0;
 
 #ifndef JS_MORE_DETERMINISTIC
     int64_t now = PRMJ_Now();
-    if (now >= rt->gcJitReleaseTime)
+    if (now >= rt->gc.jitReleaseTime)
         releaseTypes = true;
     if (releaseTypes)
-        rt->gcJitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
+        rt->gc.jitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
 #endif
 
     return releaseTypes;
 }
 
 /*
  * It's simpler if we preserve the invariant that every zone has at least one
  * compartment. If we know we're deleting the entire zone, then
@@ -2569,21 +2660,21 @@ SweepCompartments(FreeOp *fop, Zone *zon
 
 static void
 SweepZones(FreeOp *fop, bool lastGC)
 {
     JSRuntime *rt = fop->runtime();
     JSZoneCallback callback = rt->destroyZoneCallback;
 
     /* Skip the atomsCompartment zone. */
-    Zone **read = rt->zones.begin() + 1;
-    Zone **end = rt->zones.end();
+    Zone **read = rt->gc.zones.begin() + 1;
+    Zone **end = rt->gc.zones.end();
     Zone **write = read;
-    JS_ASSERT(rt->zones.length() >= 1);
-    JS_ASSERT(rt->isAtomsZone(rt->zones[0]));
+    JS_ASSERT(rt->gc.zones.length() >= 1);
+    JS_ASSERT(rt->isAtomsZone(rt->gc.zones[0]));
 
     while (read < end) {
         Zone *zone = *read++;
 
         if (zone->wasGCStarted()) {
             if ((zone->allocator.arenas.arenaListsAreEmpty() && !zone->hasMarkedCompartments()) ||
                 lastGC)
             {
@@ -2594,17 +2685,17 @@ SweepZones(FreeOp *fop, bool lastGC)
                 JS_ASSERT(zone->compartments.empty());
                 fop->delete_(zone);
                 continue;
             }
             SweepCompartments(fop, zone, true, lastGC);
         }
         *write++ = zone;
     }
-    rt->zones.resize(write - rt->zones.begin());
+    rt->gc.zones.resize(write - rt->gc.zones.begin());
 }
 
 static void
 PurgeRuntime(JSRuntime *rt)
 {
     for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
         comp->purge();
 
@@ -2621,20 +2712,20 @@ PurgeRuntime(JSRuntime *rt)
     if (!rt->hasActiveCompilations())
         rt->parseMapPool().purgeAll();
 }
 
 static bool
 ShouldPreserveJITCode(JSCompartment *comp, int64_t currentTime)
 {
     JSRuntime *rt = comp->runtimeFromMainThread();
-    if (rt->gcShouldCleanUpEverything)
+    if (rt->gc.shouldCleanUpEverything)
         return false;
 
-    if (rt->alwaysPreserveCode)
+    if (rt->gc.alwaysPreserveCode)
         return true;
     if (comp->lastAnimationTime + PRMJ_USEC_PER_SEC >= currentTime)
         return true;
 
     return false;
 }
 
 #ifdef DEBUG
@@ -2714,17 +2805,17 @@ CheckCompartmentCallback(JSTracer *trcAr
         JS_ASSERT(thing->tenuredZone() == trc->zone ||
                   trc->runtime()->isAtomsZone(thing->tenuredZone()));
     }
 }
 
 static void
 CheckForCompartmentMismatches(JSRuntime *rt)
 {
-    if (rt->gcDisableStrictProxyCheckingCount)
+    if (rt->gc.disableStrictProxyCheckingCount)
         return;
 
     CompartmentCheckTracer trc(rt, CheckCompartmentCallback);
     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
         trc.zone = zone;
         for (size_t thingKind = 0; thingKind < FINALIZE_LAST; thingKind++) {
             for (CellIterUnderGC i(zone, AllocKind(thingKind)); !i.done(); i.next()) {
                 trc.src = i.getCell();
@@ -2738,70 +2829,70 @@ CheckForCompartmentMismatches(JSRuntime 
 #endif
 
 static bool
 BeginMarkPhase(JSRuntime *rt)
 {
     int64_t currentTime = PRMJ_Now();
 
 #ifdef DEBUG
-    if (rt->gcFullCompartmentChecks)
+    if (rt->gc.fullCompartmentChecks)
         CheckForCompartmentMismatches(rt);
 #endif
 
-    rt->gcIsFull = true;
+    rt->gc.isFull = true;
     bool any = false;
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         /* Assert that zone state is as we expect */
         JS_ASSERT(!zone->isCollecting());
         JS_ASSERT(!zone->compartments.empty());
         for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
             JS_ASSERT(!zone->allocator.arenas.arenaListsToSweep[i]);
 
         /* Set up which zones will be collected. */
         if (zone->isGCScheduled()) {
             if (!rt->isAtomsZone(zone)) {
                 any = true;
                 zone->setGCState(Zone::Mark);
             }
         } else {
-            rt->gcIsFull = false;
+            rt->gc.isFull = false;
         }
 
         zone->scheduledForDestruction = false;
         zone->maybeAlive = false;
         zone->setPreservingCode(false);
     }
 
     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
         JS_ASSERT(c->gcLiveArrayBuffers.empty());
         c->marked = false;
         if (ShouldPreserveJITCode(c, currentTime))
             c->zone()->setPreservingCode(true);
     }
 
-    if (!rt->gcShouldCleanUpEverything) {
+    if (!rt->gc.shouldCleanUpEverything) {
 #ifdef JS_ION
         if (JSCompartment *comp = jit::TopmostJitActivationCompartment(rt))
             comp->zone()->setPreservingCode(true);
 #endif
     }
 
     /*
      * Atoms are not in the cross-compartment map. So if there are any
      * zones that are not being collected, we are not allowed to collect
      * atoms. Otherwise, the non-collected zones could contain pointers
      * to atoms that we would miss.
      *
      * keepAtoms() will only change on the main thread, which we are currently
      * on. If the value of keepAtoms() changes between GC slices, then we'll
      * cancel the incremental GC. See IsIncrementalGCSafe.
      */
-    if (rt->gcIsFull && !rt->keepAtoms()) {
+    if (rt->gc.isFull && !rt->keepAtoms()) {
         Zone *atomsZone = rt->atomsCompartment()->zone();
         if (atomsZone->isGCScheduled()) {
             JS_ASSERT(!atomsZone->isCollecting());
             atomsZone->setGCState(Zone::Mark);
             any = true;
         }
     }
 
@@ -2811,72 +2902,72 @@ BeginMarkPhase(JSRuntime *rt)
 
     /*
      * At the end of each incremental slice, we call prepareForIncrementalGC,
      * which marks objects in all arenas that we're currently allocating
      * into. This can cause leaks if unreachable objects are in these
      * arenas. This purge call ensures that we only mark arenas that have had
      * allocations after the incremental GC started.
      */
-    if (rt->gcIsIncremental) {
+    if (rt->gc.isIncremental) {
         for (GCZonesIter zone(rt); !zone.done(); zone.next())
             zone->allocator.arenas.purge();
     }
 
-    rt->gcMarker.start();
-    JS_ASSERT(!rt->gcMarker.callback);
-    JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gcMarker));
+    rt->gc.marker.start();
+    JS_ASSERT(!rt->gc.marker.callback);
+    JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gc.marker));
 
     /* For non-incremental GC the following sweep discards the jit code. */
-    if (rt->gcIsIncremental) {
+    if (rt->gc.isIncremental) {
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK_DISCARD_CODE);
+            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_DISCARD_CODE);
             zone->discardJitCode(rt->defaultFreeOp());
         }
     }
 
-    GCMarker *gcmarker = &rt->gcMarker;
-
-    rt->gcStartNumber = rt->gcNumber;
+    GCMarker *gcmarker = &rt->gc.marker;
+
+    rt->gc.startNumber = rt->gc.number;
 
     /*
      * We must purge the runtime at the beginning of an incremental GC. The
      * danger if we purge later is that the snapshot invariant of incremental
      * GC will be broken, as follows. If some object is reachable only through
      * some cache (say the dtoaCache) then it will not be part of the snapshot.
      * If we purge after root marking, then the mutator could obtain a pointer
      * to the object and start using it. This object might never be marked, so
      * a GC hazard would exist.
      */
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_PURGE);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_PURGE);
         PurgeRuntime(rt);
     }
 
     /*
      * Mark phase.
      */
-    gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_MARK);
-    gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_MARK_ROOTS);
+    gcstats::AutoPhase ap1(rt->gc.stats, gcstats::PHASE_MARK);
+    gcstats::AutoPhase ap2(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         /* Unmark everything in the zones being collected. */
         zone->allocator.arenas.unmarkAll();
     }
 
     for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
         /* Reset weak map list for the compartments being collected. */
         WeakMapBase::resetCompartmentWeakMapList(c);
     }
 
-    if (rt->gcIsFull)
+    if (rt->gc.isFull)
         UnmarkScriptData(rt);
 
     MarkRuntime(gcmarker);
-    if (rt->gcIsIncremental)
+    if (rt->gc.isIncremental)
         BufferGrayRoots(gcmarker);
 
     /*
      * This code ensures that if a zone is "dead", then it will be
      * collected in this GC. A zone is considered dead if its maybeAlive
      * flag is false. The maybeAlive flag is set if:
      *   (1) the zone has incoming cross-compartment edges, or
      *   (2) an object in the zone was marked during root marking, either
@@ -2916,30 +3007,30 @@ BeginMarkPhase(JSRuntime *rt)
      * For black roots, code in gc/Marking.cpp will already have set maybeAlive
      * during MarkRuntime.
      */
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         if (!zone->maybeAlive && !rt->isAtomsZone(zone))
             zone->scheduledForDestruction = true;
     }
-    rt->gcFoundBlackGrayEdges = false;
+    rt->gc.foundBlackGrayEdges = false;
 
     return true;
 }
 
 template <class CompartmentIterT>
 static void
 MarkWeakReferences(JSRuntime *rt, gcstats::Phase phase)
 {
-    GCMarker *gcmarker = &rt->gcMarker;
+    GCMarker *gcmarker = &rt->gc.marker;
     JS_ASSERT(gcmarker->isDrained());
 
-    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK);
-    gcstats::AutoPhase ap1(rt->gcStats, phase);
+    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_MARK);
+    gcstats::AutoPhase ap1(rt->gc.stats, phase);
 
     for (;;) {
         bool markedAny = false;
         for (CompartmentIterT c(rt); !c.done(); c.next()) {
             markedAny |= WatchpointMap::markCompartmentIteratively(c, gcmarker);
             markedAny |= WeakMapBase::markCompartmentIteratively(c, gcmarker);
         }
         markedAny |= Debugger::markAllIteratively(gcmarker);
@@ -2958,29 +3049,29 @@ MarkWeakReferencesInCurrentGroup(JSRunti
 {
     MarkWeakReferences<GCCompartmentGroupIter>(rt, phase);
 }
 
 template <class ZoneIterT, class CompartmentIterT>
 static void
 MarkGrayReferences(JSRuntime *rt)
 {
-    GCMarker *gcmarker = &rt->gcMarker;
+    GCMarker *gcmarker = &rt->gc.marker;
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK);
-        gcstats::AutoPhase ap1(rt->gcStats, gcstats::PHASE_SWEEP_MARK_GRAY);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_MARK);
+        gcstats::AutoPhase ap1(rt->gc.stats, gcstats::PHASE_SWEEP_MARK_GRAY);
         gcmarker->setMarkColorGray();
         if (gcmarker->hasBufferedGrayRoots()) {
             for (ZoneIterT zone(rt); !zone.done(); zone.next())
                 gcmarker->markBufferedGrayRoots(zone);
         } else {
-            JS_ASSERT(!rt->gcIsIncremental);
-            if (JSTraceDataOp op = rt->gcGrayRootTracer.op)
-                (*op)(gcmarker, rt->gcGrayRootTracer.data);
+            JS_ASSERT(!rt->gc.isIncremental);
+            if (JSTraceDataOp op = rt->gc.grayRootTracer.op)
+                (*op)(gcmarker, rt->gc.grayRootTracer.data);
         }
         SliceBudget budget;
         gcmarker->drainMarkStack(budget);
     }
 
     MarkWeakReferences<CompartmentIterT>(rt, gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
 
     JS_ASSERT(gcmarker->isDrained());
@@ -3046,20 +3137,20 @@ js::gc::MarkingValidator::nonIncremental
      * the results for later comparison.
      *
      * Currently this does not validate gray marking.
      */
 
     if (!map.init())
         return;
 
-    GCMarker *gcmarker = &runtime->gcMarker;
+    GCMarker *gcmarker = &runtime->gc.marker;
 
     /* Save existing mark bits. */
-    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) {
+    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront()) {
         ChunkBitmap *bitmap = &r.front()->bitmap;
 	ChunkBitmap *entry = js_new<ChunkBitmap>();
         if (!entry)
             return;
 
         memcpy((void *)entry->bitmap, (void *)bitmap->bitmap, sizeof(bitmap->bitmap));
         if (!map.putNew(r.front(), entry))
             return;
@@ -3087,41 +3178,41 @@ js::gc::MarkingValidator::nonIncremental
     initialized = true;
 
     for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
         WeakMapBase::resetCompartmentWeakMapList(c);
         ArrayBufferObject::resetArrayBufferList(c);
     }
 
     /* Re-do all the marking, but non-incrementally. */
-    js::gc::State state = runtime->gcIncrementalState;
-    runtime->gcIncrementalState = MARK_ROOTS;
+    js::gc::State state = runtime->gc.incrementalState;
+    runtime->gc.incrementalState = MARK_ROOTS;
 
     JS_ASSERT(gcmarker->isDrained());
     gcmarker->reset();
 
-    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront())
+    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront())
         r.front()->bitmap.clear();
 
     {
-        gcstats::AutoPhase ap1(runtime->gcStats, gcstats::PHASE_MARK);
-        gcstats::AutoPhase ap2(runtime->gcStats, gcstats::PHASE_MARK_ROOTS);
+        gcstats::AutoPhase ap1(runtime->gc.stats, gcstats::PHASE_MARK);
+        gcstats::AutoPhase ap2(runtime->gc.stats, gcstats::PHASE_MARK_ROOTS);
         MarkRuntime(gcmarker, true);
     }
 
     {
-        gcstats::AutoPhase ap1(runtime->gcStats, gcstats::PHASE_MARK);
+        gcstats::AutoPhase ap1(runtime->gc.stats, gcstats::PHASE_MARK);
         SliceBudget budget;
-        runtime->gcIncrementalState = MARK;
-        runtime->gcMarker.drainMarkStack(budget);
+        runtime->gc.incrementalState = MARK;
+        runtime->gc.marker.drainMarkStack(budget);
     }
 
-    runtime->gcIncrementalState = SWEEP;
+    runtime->gc.incrementalState = SWEEP;
     {
-        gcstats::AutoPhase ap(runtime->gcStats, gcstats::PHASE_SWEEP);
+        gcstats::AutoPhase ap(runtime->gc.stats, gcstats::PHASE_SWEEP);
         MarkAllWeakReferences(runtime, gcstats::PHASE_SWEEP_MARK_WEAK);
 
         /* Update zone state for gray marking. */
         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
             JS_ASSERT(zone->isGCMarkingBlack());
             zone->setGCState(Zone::MarkGray);
         }
 
@@ -3130,45 +3221,45 @@ js::gc::MarkingValidator::nonIncremental
         /* Restore zone state. */
         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
             JS_ASSERT(zone->isGCMarkingGray());
             zone->setGCState(Zone::Mark);
         }
     }
 
     /* Take a copy of the non-incremental mark state and restore the original. */
-    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) {
+    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront()) {
         Chunk *chunk = r.front();
         ChunkBitmap *bitmap = &chunk->bitmap;
         ChunkBitmap *entry = map.lookup(chunk)->value();
         Swap(*entry, *bitmap);
     }
 
     for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
         WeakMapBase::resetCompartmentWeakMapList(c);
         ArrayBufferObject::resetArrayBufferList(c);
     }
     WeakMapBase::restoreCompartmentWeakMapLists(weakmaps);
     ArrayBufferObject::restoreArrayBufferLists(arrayBuffers);
 
-    runtime->gcIncrementalState = state;
+    runtime->gc.incrementalState = state;
 }
 
 void
 js::gc::MarkingValidator::validate()
 {
     /*
      * Validates the incremental marking for a single compartment by comparing
      * the mark bits to those previously recorded for a non-incremental mark.
      */
 
     if (!initialized)
         return;
 
-    for (GCChunkSet::Range r(runtime->gcChunkSet.all()); !r.empty(); r.popFront()) {
+    for (GCChunkSet::Range r(runtime->gc.chunkSet.all()); !r.empty(); r.popFront()) {
         Chunk *chunk = r.front();
         BitmapMap::Ptr ptr = map.lookup(chunk);
         if (!ptr)
             continue;  /* Allocated after we did the non-incremental mark. */
 
         ChunkBitmap *bitmap = ptr->value();
         ChunkBitmap *incBitmap = &chunk->bitmap;
 
@@ -3209,39 +3300,39 @@ js::gc::MarkingValidator::validate()
 }
 
 #endif
 
 static void
 ComputeNonIncrementalMarkingForValidation(JSRuntime *rt)
 {
 #ifdef DEBUG
-    JS_ASSERT(!rt->gcMarkingValidator);
-    if (rt->gcIsIncremental && rt->gcValidate)
-        rt->gcMarkingValidator = js_new<MarkingValidator>(rt);
-    if (rt->gcMarkingValidator)
-        rt->gcMarkingValidator->nonIncrementalMark();
+    JS_ASSERT(!rt->gc.markingValidator);
+    if (rt->gc.isIncremental && rt->gc.validate)
+        rt->gc.markingValidator = js_new<MarkingValidator>(rt);
+    if (rt->gc.markingValidator)
+        rt->gc.markingValidator->nonIncrementalMark();
 #endif
 }
 
 static void
 ValidateIncrementalMarking(JSRuntime *rt)
 {
 #ifdef DEBUG
-    if (rt->gcMarkingValidator)
-        rt->gcMarkingValidator->validate();
+    if (rt->gc.markingValidator)
+        rt->gc.markingValidator->validate();
 #endif
 }
 
 static void
 FinishMarkingValidation(JSRuntime *rt)
 {
 #ifdef DEBUG
-    js_delete(rt->gcMarkingValidator);
-    rt->gcMarkingValidator = nullptr;
+    js_delete(rt->gc.markingValidator);
+    rt->gc.markingValidator = nullptr;
 #endif
 }
 
 static void
 AssertNeedsBarrierFlagsConsistent(JSRuntime *rt)
 {
 #ifdef DEBUG
     bool anyNeedsBarrier = false;
@@ -3333,64 +3424,64 @@ Zone::findOutgoingEdges(ComponentFinder<
     for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
         comp->findOutgoingEdges(finder);
 }
 
 static void
 FindZoneGroups(JSRuntime *rt)
 {
     ComponentFinder<Zone> finder(rt->mainThread.nativeStackLimit[StackForSystemCode]);
-    if (!rt->gcIsIncremental)
+    if (!rt->gc.isIncremental)
         finder.useOneComponent();
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCMarking());
         finder.addNode(zone);
     }
-    rt->gcZoneGroups = finder.getResultsList();
-    rt->gcCurrentZoneGroup = rt->gcZoneGroups;
-    rt->gcZoneGroupIndex = 0;
-    JS_ASSERT_IF(!rt->gcIsIncremental, !rt->gcCurrentZoneGroup->nextGroup());
+    rt->gc.zoneGroups = finder.getResultsList();
+    rt->gc.currentZoneGroup = rt->gc.zoneGroups;
+    rt->gc.zoneGroupIndex = 0;
+    JS_ASSERT_IF(!rt->gc.isIncremental, !rt->gc.currentZoneGroup->nextGroup());
 }
 
 static void
 ResetGrayList(JSCompartment* comp);
 
 static void
 GetNextZoneGroup(JSRuntime *rt)
 {
-    rt->gcCurrentZoneGroup = rt->gcCurrentZoneGroup->nextGroup();
-    ++rt->gcZoneGroupIndex;
-    if (!rt->gcCurrentZoneGroup) {
-        rt->gcAbortSweepAfterCurrentGroup = false;
+    rt->gc.currentZoneGroup = rt->gc.currentZoneGroup->nextGroup();
+    ++rt->gc.zoneGroupIndex;
+    if (!rt->gc.currentZoneGroup) {
+        rt->gc.abortSweepAfterCurrentGroup = false;
         return;
     }
 
-    if (!rt->gcIsIncremental)
-        ComponentFinder<Zone>::mergeGroups(rt->gcCurrentZoneGroup);
-
-    if (rt->gcAbortSweepAfterCurrentGroup) {
-        JS_ASSERT(!rt->gcIsIncremental);
+    if (!rt->gc.isIncremental)
+        ComponentFinder<Zone>::mergeGroups(rt->gc.currentZoneGroup);
+
+    if (rt->gc.abortSweepAfterCurrentGroup) {
+        JS_ASSERT(!rt->gc.isIncremental);
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
             JS_ASSERT(!zone->gcNextGraphComponent);
             JS_ASSERT(zone->isGCMarking());
             zone->setNeedsBarrier(false, Zone::UpdateIon);
             zone->setGCState(Zone::NoGC);
             zone->gcGrayRoots.clearAndFree();
         }
         rt->setNeedsBarrier(false);
         AssertNeedsBarrierFlagsConsistent(rt);
 
         for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next()) {
             ArrayBufferObject::resetArrayBufferList(comp);
             ResetGrayList(comp);
         }
 
-        rt->gcAbortSweepAfterCurrentGroup = false;
-        rt->gcCurrentZoneGroup = nullptr;
+        rt->gc.abortSweepAfterCurrentGroup = false;
+        rt->gc.currentZoneGroup = nullptr;
     }
 }
 
 /*
  * Gray marking:
  *
  * At the end of collection, anything reachable from a gray root that has not
  * otherwise been marked black must be marked gray.
@@ -3491,22 +3582,22 @@ js::DelayCrossCompartmentGrayMarking(JSO
 #endif
 }
 
 static void
 MarkIncomingCrossCompartmentPointers(JSRuntime *rt, const uint32_t color)
 {
     JS_ASSERT(color == BLACK || color == GRAY);
 
-    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_MARK);
+    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_MARK);
     static const gcstats::Phase statsPhases[] = {
         gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK,
         gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY
     };
-    gcstats::AutoPhase ap1(rt->gcStats, statsPhases[color]);
+    gcstats::AutoPhase ap1(rt->gc.stats, statsPhases[color]);
 
     bool unlinkList = color == GRAY;
 
     for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
         JS_ASSERT_IF(color == GRAY, c->zone()->isGCMarkingGray());
         JS_ASSERT_IF(color == BLACK, c->zone()->isGCMarkingBlack());
         JS_ASSERT_IF(c->gcIncomingGrayPointers, IsGrayListObject(c->gcIncomingGrayPointers));
 
@@ -3514,31 +3605,31 @@ MarkIncomingCrossCompartmentPointers(JSR
              src;
              src = NextIncomingCrossCompartmentPointer(src, unlinkList))
         {
             JSObject *dst = CrossCompartmentPointerReferent(src);
             JS_ASSERT(dst->compartment() == c);
 
             if (color == GRAY) {
                 if (IsObjectMarked(&src) && src->isMarked(GRAY))
-                    MarkGCThingUnbarriered(&rt->gcMarker, (void**)&dst,
+                    MarkGCThingUnbarriered(&rt->gc.marker, (void**)&dst,
                                            "cross-compartment gray pointer");
             } else {
                 if (IsObjectMarked(&src) && !src->isMarked(GRAY))
-                    MarkGCThingUnbarriered(&rt->gcMarker, (void**)&dst,
+                    MarkGCThingUnbarriered(&rt->gc.marker, (void**)&dst,
                                            "cross-compartment black pointer");
             }
         }
 
         if (unlinkList)
             c->gcIncomingGrayPointers = nullptr;
     }
 
     SliceBudget budget;
-    rt->gcMarker.drainMarkStack(budget);
+    rt->gc.marker.drainMarkStack(budget);
 }
 
 static bool
 RemoveFromGrayList(JSObject *wrapper)
 {
     if (!IsGrayListObject(wrapper))
         return false;
 
@@ -3637,30 +3728,30 @@ EndMarkingZoneGroup(JSRuntime *rt)
      * MarkCrossCompartmentXXX.
      */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCMarkingBlack());
         zone->setGCState(Zone::MarkGray);
     }
 
     /* Mark incoming gray pointers from previously swept compartments. */
-    rt->gcMarker.setMarkColorGray();
+    rt->gc.marker.setMarkColorGray();
     MarkIncomingCrossCompartmentPointers(rt, GRAY);
-    rt->gcMarker.setMarkColorBlack();
+    rt->gc.marker.setMarkColorBlack();
 
     /* Mark gray roots and mark transitively inside the current compartment group. */
     MarkGrayReferencesInCurrentGroup(rt);
 
     /* Restore marking state. */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCMarkingGray());
         zone->setGCState(Zone::Mark);
     }
 
-    JS_ASSERT(rt->gcMarker.isDrained());
+    JS_ASSERT(rt->gc.marker.isDrained());
 }
 
 static void
 BeginSweepingZoneGroup(JSRuntime *rt)
 {
     /*
      * Begin sweeping the group of zones in gcCurrentZoneGroup,
      * performing actions that must be done before yielding to caller.
@@ -3679,55 +3770,55 @@ BeginSweepingZoneGroup(JSRuntime *rt)
             sweepingAtoms = true;
 
         if (rt->sweepZoneCallback)
             rt->sweepZoneCallback(zone);
     }
 
     ValidateIncrementalMarking(rt);
 
-    FreeOp fop(rt, rt->gcSweepOnBackgroundThread);
+    FreeOp fop(rt, rt->gc.sweepOnBackgroundThread);
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START);
-        if (rt->gcFinalizeCallback)
-            rt->gcFinalizeCallback(&fop, JSFINALIZE_GROUP_START, !rt->gcIsFull /* unused */);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_FINALIZE_START);
+        if (rt->gc.finalizeCallback)
+            rt->gc.finalizeCallback(&fop, JSFINALIZE_GROUP_START, !rt->gc.isFull /* unused */);
     }
 
     if (sweepingAtoms) {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_ATOMS);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_ATOMS);
         rt->sweepAtoms();
     }
 
     /* Prune out dead views from ArrayBuffer's view lists. */
     for (GCCompartmentGroupIter c(rt); !c.done(); c.next())
         ArrayBufferObject::sweep(c);
 
     /* Collect watch points associated with unreachable objects. */
     WatchpointMap::sweepAll(rt);
 
     /* Detach unreachable debuggers and global objects from each other. */
     Debugger::sweepAll(&fop);
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_COMPARTMENTS);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_COMPARTMENTS);
 
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_DISCARD_CODE);
+            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_DISCARD_CODE);
             zone->discardJitCode(&fop);
         }
 
         bool releaseTypes = ReleaseObservedTypes(rt);
         for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
-            gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+            gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
             c->sweep(&fop, releaseTypes && !c->zone()->isPreservingCode());
         }
 
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+            gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
 
             // If there is an OOM while sweeping types, the type information
             // will be deoptimized so that it is still correct (i.e.
             // overapproximates the possible types in the zone), but the
             // constraints might not have been triggered on the deoptimization
             // or even copied over completely. In this case, destroy all JIT
             // code and new script addendums in the zone, the only things whose
             // correctness depends on the type constraints.
@@ -3746,86 +3837,86 @@ BeginSweepingZoneGroup(JSRuntime *rt)
      * Queue all GC things in all zones for sweeping, either in the
      * foreground or on the background thread.
      *
      * Note that order is important here for the background case.
      *
      * Objects are finalized immediately but this may change in the future.
      */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
         zone->allocator.arenas.queueObjectsForSweep(&fop);
     }
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
         zone->allocator.arenas.queueStringsForSweep(&fop);
     }
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
         zone->allocator.arenas.queueScriptsForSweep(&fop);
     }
 #ifdef JS_ION
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
         zone->allocator.arenas.queueJitCodeForSweep(&fop);
     }
 #endif
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
-        gcstats::AutoSCC scc(rt->gcStats, rt->gcZoneGroupIndex);
+        gcstats::AutoSCC scc(rt->gc.stats, rt->gc.zoneGroupIndex);
         zone->allocator.arenas.queueShapesForSweep(&fop);
         zone->allocator.arenas.gcShapeArenasToSweep =
             zone->allocator.arenas.arenaListsToSweep[FINALIZE_SHAPE];
     }
 
-    rt->gcSweepPhase = 0;
-    rt->gcSweepZone = rt->gcCurrentZoneGroup;
-    rt->gcSweepKindIndex = 0;
+    rt->gc.sweepPhase = 0;
+    rt->gc.sweepZone = rt->gc.currentZoneGroup;
+    rt->gc.sweepKindIndex = 0;
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
-        if (rt->gcFinalizeCallback)
-            rt->gcFinalizeCallback(&fop, JSFINALIZE_GROUP_END, !rt->gcIsFull /* unused */);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_FINALIZE_END);
+        if (rt->gc.finalizeCallback)
+            rt->gc.finalizeCallback(&fop, JSFINALIZE_GROUP_END, !rt->gc.isFull /* unused */);
     }
 }
 
 static void
 EndSweepingZoneGroup(JSRuntime *rt)
 {
     /* Update the GC state for zones we have swept and unlink the list. */
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
         JS_ASSERT(zone->isGCSweeping());
         zone->setGCState(Zone::Finished);
     }
 
     /* Reset the list of arenas marked as being allocated during sweep phase. */
-    while (ArenaHeader *arena = rt->gcArenasAllocatedDuringSweep) {
-        rt->gcArenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
+    while (ArenaHeader *arena = rt->gc.arenasAllocatedDuringSweep) {
+        rt->gc.arenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
         arena->unsetAllocDuringSweep();
     }
 }
 
 static void
 BeginSweepPhase(JSRuntime *rt, bool lastGC)
 {
     /*
      * Sweep phase.
      *
-     * Finalize as we sweep, outside of rt->gcLock but with rt->isHeapBusy()
+     * Finalize as we sweep, outside of rt->gc.lock but with rt->isHeapBusy()
      * true so that any attempt to allocate a GC-thing from a finalizer will
      * fail, rather than nest badly and leave the unmarked newborn to be swept.
      */
 
-    JS_ASSERT(!rt->gcAbortSweepAfterCurrentGroup);
+    JS_ASSERT(!rt->gc.abortSweepAfterCurrentGroup);
 
     ComputeNonIncrementalMarkingForValidation(rt);
 
-    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
+    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP);
 
 #ifdef JS_THREADSAFE
-    rt->gcSweepOnBackgroundThread = !lastGC && rt->useHelperThreads();
+    rt->gc.sweepOnBackgroundThread = !lastGC && rt->useHelperThreads();
 #endif
 
 #ifdef DEBUG
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
         JS_ASSERT(!c->gcIncomingGrayPointers);
         for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) {
             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
                 AssertNotOnGrayList(&e.front().value().get().toObject());
@@ -3848,57 +3939,57 @@ ArenaLists::foregroundFinalize(FreeOp *f
     ArenaList &dest = arenaLists[thingKind];
     return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget);
 }
 
 static bool
 DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget, gcstats::Phase phase)
 {
     /* Run a marking slice and return whether the stack is now empty. */
-    gcstats::AutoPhase ap(rt->gcStats, phase);
-    return rt->gcMarker.drainMarkStack(sliceBudget);
+    gcstats::AutoPhase ap(rt->gc.stats, phase);
+    return rt->gc.marker.drainMarkStack(sliceBudget);
 }
 
 static bool
 SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
 {
-    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
-    FreeOp fop(rt, rt->gcSweepOnBackgroundThread);
+    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP);
+    FreeOp fop(rt, rt->gc.sweepOnBackgroundThread);
 
     bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_SWEEP_MARK);
     if (!finished)
         return false;
 
     for (;;) {
         /* Finalize foreground finalized things. */
-        for (; rt->gcSweepPhase < FinalizePhaseCount ; ++rt->gcSweepPhase) {
-            gcstats::AutoPhase ap(rt->gcStats, FinalizePhaseStatsPhase[rt->gcSweepPhase]);
-
-            for (; rt->gcSweepZone; rt->gcSweepZone = rt->gcSweepZone->nextNodeInGroup()) {
-                Zone *zone = rt->gcSweepZone;
-
-                while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) {
-                    AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex];
+        for (; rt->gc.sweepPhase < FinalizePhaseCount ; ++rt->gc.sweepPhase) {
+            gcstats::AutoPhase ap(rt->gc.stats, FinalizePhaseStatsPhase[rt->gc.sweepPhase]);
+
+            for (; rt->gc.sweepZone; rt->gc.sweepZone = rt->gc.sweepZone->nextNodeInGroup()) {
+                Zone *zone = rt->gc.sweepZone;
+
+                while (rt->gc.sweepKindIndex < FinalizePhaseLength[rt->gc.sweepPhase]) {
+                    AllocKind kind = FinalizePhases[rt->gc.sweepPhase][rt->gc.sweepKindIndex];
 
                     if (!zone->allocator.arenas.foregroundFinalize(&fop, kind, sliceBudget))
                         return false;  /* Yield to the mutator. */
 
-                    ++rt->gcSweepKindIndex;
+                    ++rt->gc.sweepKindIndex;
                 }
-                rt->gcSweepKindIndex = 0;
+                rt->gc.sweepKindIndex = 0;
             }
-            rt->gcSweepZone = rt->gcCurrentZoneGroup;
+            rt->gc.sweepZone = rt->gc.currentZoneGroup;
         }
 
         /* Remove dead shapes from the shape tree, but don't finalize them yet. */
         {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE);
-
-            for (; rt->gcSweepZone; rt->gcSweepZone = rt->gcSweepZone->nextNodeInGroup()) {
-                Zone *zone = rt->gcSweepZone;
+            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP_SHAPE);
+
+            for (; rt->gc.sweepZone; rt->gc.sweepZone = rt->gc.sweepZone->nextNodeInGroup()) {
+                Zone *zone = rt->gc.sweepZone;
                 while (ArenaHeader *arena = zone->allocator.arenas.gcShapeArenasToSweep) {
                     for (CellIterUnderGC i(arena); !i.done(); i.next()) {
                         Shape *shape = i.get<Shape>();
                         if (!shape->isMarked())
                             shape->sweep();
                     }
 
                     zone->allocator.arenas.gcShapeArenasToSweep = arena->next;
@@ -3906,116 +3997,116 @@ SweepPhase(JSRuntime *rt, SliceBudget &s
                     if (sliceBudget.isOverBudget())
                         return false;  /* Yield to the mutator. */
                 }
             }
         }
 
         EndSweepingZoneGroup(rt);
         GetNextZoneGroup(rt);
-        if (!rt->gcCurrentZoneGroup)
+        if (!rt->gc.currentZoneGroup)
             return true;  /* We're finished. */
         EndMarkingZoneGroup(rt);
         BeginSweepingZoneGroup(rt);
     }
 }
 
 static void
 EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool lastGC)
 {
-    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
-    FreeOp fop(rt, rt->gcSweepOnBackgroundThread);
-
-    JS_ASSERT_IF(lastGC, !rt->gcSweepOnBackgroundThread);
-
-    JS_ASSERT(rt->gcMarker.isDrained());
-    rt->gcMarker.stop();
+    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_SWEEP);
+    FreeOp fop(rt, rt->gc.sweepOnBackgroundThread);
+
+    JS_ASSERT_IF(lastGC, !rt->gc.sweepOnBackgroundThread);
+
+    JS_ASSERT(rt->gc.marker.isDrained());
+    rt->gc.marker.stop();
 
     /*
      * Recalculate whether GC was full or not as this may have changed due to
      * newly created zones.  Can only change from full to not full.
      */
-    if (rt->gcIsFull) {
+    if (rt->gc.isFull) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             if (!zone->isCollecting()) {
-                rt->gcIsFull = false;
+                rt->gc.isFull = false;
                 break;
             }
         }
     }
 
     /*
      * If we found any black->gray edges during marking, we completely clear the
      * mark bits of all uncollected zones, or if a reset has occured, zones that
      * will no longer be collected. This is safe, although it may
      * prevent the cycle collector from collecting some dead objects.
      */
-    if (rt->gcFoundBlackGrayEdges) {
+    if (rt->gc.foundBlackGrayEdges) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             if (!zone->isCollecting())
                 zone->allocator.arenas.unmarkAll();
         }
     }
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_DESTROY);
 
         /*
          * Sweep script filenames after sweeping functions in the generic loop
          * above. In this way when a scripted function's finalizer destroys the
          * script and calls rt->destroyScriptHook, the hook can still access the
          * script's filename. See bug 323267.
          */
-        if (rt->gcIsFull)
+        if (rt->gc.isFull)
             SweepScriptData(rt);
 
         /* Clear out any small pools that we're hanging on to. */
         if (JSC::ExecutableAllocator *execAlloc = rt->maybeExecAlloc())
             execAlloc->purge();
 
         /*
          * This removes compartments from rt->compartment, so we do it last to make
          * sure we don't miss sweeping any compartments.
          */
         if (!lastGC)
             SweepZones(&fop, lastGC);
 
-        if (!rt->gcSweepOnBackgroundThread) {
+        if (!rt->gc.sweepOnBackgroundThread) {
             /*
              * Destroy arenas after we finished the sweeping so finalizers can
              * safely use IsAboutToBeFinalized(). This is done on the
              * GCHelperThread if possible. We acquire the lock only because
              * Expire needs to unlock it for other callers.
              */
             AutoLockGC lock(rt);
             ExpireChunksAndArenas(rt, gckind == GC_SHRINK);
         }
     }
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
-
-        if (rt->gcFinalizeCallback)
-            rt->gcFinalizeCallback(&fop, JSFINALIZE_COLLECTION_END, !rt->gcIsFull);
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_FINALIZE_END);
+
+        if (rt->gc.finalizeCallback)
+            rt->gc.finalizeCallback(&fop, JSFINALIZE_COLLECTION_END, !rt->gc.isFull);
 
         /* If we finished a full GC, then the gray bits are correct. */
-        if (rt->gcIsFull)
-            rt->gcGrayBitsValid = true;
+        if (rt->gc.isFull)
+            rt->gc.grayBitsValid = true;
     }
 
     /* Set up list of zones for sweeping of background things. */
-    JS_ASSERT(!rt->gcSweepingZones);
+    JS_ASSERT(!rt->gc.sweepingZones);
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-        zone->gcNextGraphNode = rt->gcSweepingZones;
-        rt->gcSweepingZones = zone;
+        zone->gcNextGraphNode = rt->gc.sweepingZones;
+        rt->gc.sweepingZones = zone;
     }
 
     /* If not sweeping on background thread then we must do it here. */
-    if (!rt->gcSweepOnBackgroundThread) {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY);
+    if (!rt->gc.sweepOnBackgroundThread) {
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_DESTROY);
 
         SweepBackgroundThings(rt, false);
 
         rt->freeLifoAlloc.freeAll();
 
         /* Ensure the compartments get swept if it's the last GC. */
         if (lastGC)
             SweepZones(&fop, lastGC);
@@ -4029,17 +4120,17 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocat
         }
 
 #ifdef DEBUG
         JS_ASSERT(!zone->isCollecting());
         JS_ASSERT(!zone->wasGCStarted());
 
         for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) {
             JS_ASSERT_IF(!IsBackgroundFinalized(AllocKind(i)) ||
-                         !rt->gcSweepOnBackgroundThread,
+                         !rt->gc.sweepOnBackgroundThread,
                          !zone->allocator.arenas.arenaListsToSweep[i]);
         }
 #endif
     }
 
 #ifdef DEBUG
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
         JS_ASSERT(!c->gcIncomingGrayPointers);
@@ -4049,17 +4140,17 @@ EndSweepPhase(JSRuntime *rt, JSGCInvocat
             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
                 AssertNotOnGrayList(&e.front().value().get().toObject());
         }
     }
 #endif
 
     FinishMarkingValidation(rt);
 
-    rt->gcLastGCTime = PRMJ_Now();
+    rt->gc.lastGCTime = PRMJ_Now();
 }
 
 namespace {
 
 /* ...while this class is to be used only for garbage collection. */
 class AutoGCSession
 {
     JSRuntime *runtime;
@@ -4074,93 +4165,93 @@ class AutoGCSession
 };
 
 } /* anonymous namespace */
 
 /* Start a new heap session. */
 AutoTraceSession::AutoTraceSession(JSRuntime *rt, js::HeapState heapState)
   : lock(rt),
     runtime(rt),
-    prevState(rt->heapState)
-{
-    JS_ASSERT(!rt->noGCOrAllocationCheck);
+    prevState(rt->gc.heapState)
+{
+    JS_ASSERT(!rt->gc.noGCOrAllocationCheck);
     JS_ASSERT(!rt->isHeapBusy());
     JS_ASSERT(heapState != Idle);
 #ifdef JSGC_GENERATIONAL
-    JS_ASSERT_IF(heapState == MajorCollecting, rt->gcNursery.isEmpty());
+    JS_ASSERT_IF(heapState == MajorCollecting, rt->gc.nursery.isEmpty());
 #endif
 
     // Threads with an exclusive context can hit refillFreeList while holding
     // the exclusive access lock. To avoid deadlocking when we try to acquire
     // this lock during GC and the other thread is waiting, make sure we hold
     // the exclusive access lock during GC sessions.
     JS_ASSERT(rt->currentThreadHasExclusiveAccess());
 
     if (rt->exclusiveThreadsPresent()) {
         // Lock the worker thread state when changing the heap state in the
         // presence of exclusive threads, to avoid racing with refillFreeList.
 #ifdef JS_THREADSAFE
         AutoLockWorkerThreadState lock;
-        rt->heapState = heapState;
+        rt->gc.heapState = heapState;
 #else
         MOZ_CRASH();
 #endif
     } else {
-        rt->heapState = heapState;
+        rt->gc.heapState = heapState;
     }
 }
 
 AutoTraceSession::~AutoTraceSession()
 {
     JS_ASSERT(runtime->isHeapBusy());
 
     if (runtime->exclusiveThreadsPresent()) {
 #ifdef JS_THREADSAFE
         AutoLockWorkerThreadState lock;
-        runtime->heapState = prevState;
+        runtime->gc.heapState = prevState;
 
         // Notify any worker threads waiting for the trace session to end.
         WorkerThreadState().notifyAll(GlobalWorkerThreadState::PRODUCER);
 #else
         MOZ_CRASH();
 #endif
     } else {
-        runtime->heapState = prevState;
+        runtime->gc.heapState = prevState;
     }
 }
 
 AutoGCSession::AutoGCSession(JSRuntime *rt)
   : runtime(rt),
     session(rt, MajorCollecting),
     canceled(false)
 {
-    runtime->gcIsNeeded = false;
-    runtime->gcInterFrameGC = true;
-
-    runtime->gcNumber++;
+    runtime->gc.isNeeded = false;
+    runtime->gc.interFrameGC = true;
+
+    runtime->gc.number++;
 
     // It's ok if threads other than the main thread have suppressGC set, as
     // they are operating on zones which will not be collected from here.
     JS_ASSERT(!runtime->mainThread.suppressGC);
 }
 
 AutoGCSession::~AutoGCSession()
 {
     if (canceled)
         return;
 
 #ifndef JS_MORE_DETERMINISTIC
-    runtime->gcNextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
+    runtime->gc.nextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
 #endif
 
-    runtime->gcChunkAllocationSinceLastGC = false;
+    runtime->gc.chunkAllocationSinceLastGC = false;
 
 #ifdef JS_GC_ZEAL
     /* Keeping these around after a GC is dangerous. */
-    runtime->gcSelectedForMarking.clearAndFree();
+    runtime->gc.selectedForMarking.clearAndFree();
 #endif
 
     /* Clear gcMallocBytes for all compartments */
     for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
         zone->resetGCMallocBytes();
         zone->unscheduleGC();
     }
 
@@ -4201,68 +4292,68 @@ static void
 IncrementalCollectSlice(JSRuntime *rt,
                         int64_t budget,
                         JS::gcreason::Reason gcReason,
                         JSGCInvocationKind gcKind);
 
 static void
 ResetIncrementalGC(JSRuntime *rt, const char *reason)
 {
-    switch (rt->gcIncrementalState) {
+    switch (rt->gc.incrementalState) {
       case NO_INCREMENTAL:
         return;
 
       case MARK: {
         /* Cancel any ongoing marking. */
         AutoCopyFreeListToArenasForGC copy(rt);
 
-        rt->gcMarker.reset();
-        rt->gcMarker.stop();
+        rt->gc.marker.reset();
+        rt->gc.marker.stop();
 
         for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
             ArrayBufferObject::resetArrayBufferList(c);
             ResetGrayList(c);
         }
 
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             JS_ASSERT(zone->isGCMarking());
             zone->setNeedsBarrier(false, Zone::UpdateIon);
             zone->setGCState(Zone::NoGC);
         }
         rt->setNeedsBarrier(false);
         AssertNeedsBarrierFlagsConsistent(rt);
 
-        rt->gcIncrementalState = NO_INCREMENTAL;
-
-        JS_ASSERT(!rt->gcStrictCompartmentChecking);
+        rt->gc.incrementalState = NO_INCREMENTAL;
+
+        JS_ASSERT(!rt->gc.strictCompartmentChecking);
 
         break;
       }
 
       case SWEEP:
-        rt->gcMarker.reset();
+        rt->gc.marker.reset();
 
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next())
             zone->scheduledForDestruction = false;
 
         /* Finish sweeping the current zone group, then abort. */
-        rt->gcAbortSweepAfterCurrentGroup = true;
+        rt->gc.abortSweepAfterCurrentGroup = true;
         IncrementalCollectSlice(rt, SliceBudget::Unlimited, JS::gcreason::RESET, GC_NORMAL);
 
         {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
-            rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
+            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
+            rt->gc.helperThread.waitBackgroundSweepOrAllocEnd();
         }
         break;
 
       default:
         MOZ_ASSUME_UNREACHABLE("Invalid incremental GC state");
     }
 
-    rt->gcStats.reset(reason);
+    rt->gc.stats.reset(reason);
 
 #ifdef DEBUG
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
         JS_ASSERT(c->gcLiveArrayBuffers.empty());
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         JS_ASSERT(!zone->needsBarrier());
         for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
@@ -4331,20 +4422,20 @@ AutoGCSlice::~AutoGCSlice()
     AssertNeedsBarrierFlagsConsistent(runtime);
 }
 
 static void
 PushZealSelectedObjects(JSRuntime *rt)
 {
 #ifdef JS_GC_ZEAL
     /* Push selected objects onto the mark stack and clear the list. */
-    for (JSObject **obj = rt->gcSelectedForMarking.begin();
-         obj != rt->gcSelectedForMarking.end(); obj++)
+    for (JSObject **obj = rt->gc.selectedForMarking.begin();
+         obj != rt->gc.selectedForMarking.end(); obj++)
     {
-        MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
+        MarkObjectUnbarriered(&rt->gc.marker, obj, "selected obj");
     }
 #endif
 }
 
 static void
 IncrementalCollectSlice(JSRuntime *rt,
                         int64_t budget,
                         JS::gcreason::Reason reason,
@@ -4352,187 +4443,187 @@ IncrementalCollectSlice(JSRuntime *rt,
 {
     JS_ASSERT(rt->currentThreadHasExclusiveAccess());
 
     AutoCopyFreeListToArenasForGC copy(rt);
     AutoGCSlice slice(rt);
 
     bool lastGC = (reason == JS::gcreason::DESTROY_RUNTIME);
 
-    gc::State initialState = rt->gcIncrementalState;
+    gc::State initialState = rt->gc.incrementalState;
 
     int zeal = 0;
 #ifdef JS_GC_ZEAL
     if (reason == JS::gcreason::DEBUG_GC && budget != SliceBudget::Unlimited) {
         /*
          * Do the incremental collection type specified by zeal mode if the
          * collection was triggered by RunDebugGC() and incremental GC has not
          * been cancelled by ResetIncrementalGC.
          */
         zeal = rt->gcZeal();
     }
 #endif
 
-    JS_ASSERT_IF(rt->gcIncrementalState != NO_INCREMENTAL, rt->gcIsIncremental);
-    rt->gcIsIncremental = budget != SliceBudget::Unlimited;
+    JS_ASSERT_IF(rt->gc.incrementalState != NO_INCREMENTAL, rt->gc.isIncremental);
+    rt->gc.isIncremental = budget != SliceBudget::Unlimited;
 
     if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
         /*
          * Yields between slices occurs at predetermined points in these modes;
          * the budget is not used.
          */
         budget = SliceBudget::Unlimited;
     }
 
     SliceBudget sliceBudget(budget);
 
-    if (rt->gcIncrementalState == NO_INCREMENTAL) {
-        rt->gcIncrementalState = MARK_ROOTS;
-        rt->gcLastMarkSlice = false;
+    if (rt->gc.incrementalState == NO_INCREMENTAL) {
+        rt->gc.incrementalState = MARK_ROOTS;
+        rt->gc.lastMarkSlice = false;
     }
 
-    if (rt->gcIncrementalState == MARK)
-        AutoGCRooter::traceAllWrappers(&rt->gcMarker);
-
-    switch (rt->gcIncrementalState) {
+    if (rt->gc.incrementalState == MARK)
+        AutoGCRooter::traceAllWrappers(&rt->gc.marker);
+
+    switch (rt->gc.incrementalState) {
 
       case MARK_ROOTS:
         if (!BeginMarkPhase(rt)) {
-            rt->gcIncrementalState = NO_INCREMENTAL;
+            rt->gc.incrementalState = NO_INCREMENTAL;
             return;
         }
 
         if (!lastGC)
             PushZealSelectedObjects(rt);
 
-        rt->gcIncrementalState = MARK;
-
-        if (rt->gcIsIncremental && zeal == ZealIncrementalRootsThenFinish)
+        rt->gc.incrementalState = MARK;
+
+        if (rt->gc.isIncremental && zeal == ZealIncrementalRootsThenFinish)
             break;
 
         /* fall through */
 
       case MARK: {
         /* If we needed delayed marking for gray roots, then collect until done. */
-        if (!rt->gcMarker.hasBufferedGrayRoots()) {
+        if (!rt->gc.marker.hasBufferedGrayRoots()) {
             sliceBudget.reset();
-            rt->gcIsIncremental = false;
+            rt->gc.isIncremental = false;
         }
 
         bool finished = DrainMarkStack(rt, sliceBudget, gcstats::PHASE_MARK);
         if (!finished)
             break;
 
-        JS_ASSERT(rt->gcMarker.isDrained());
-
-        if (!rt->gcLastMarkSlice && rt->gcIsIncremental &&
+        JS_ASSERT(rt->gc.marker.isDrained());
+
+        if (!rt->gc.lastMarkSlice && rt->gc.isIncremental &&
             ((initialState == MARK && zeal != ZealIncrementalRootsThenFinish) ||
              zeal == ZealIncrementalMarkAllThenFinish))
         {
             /*
              * Yield with the aim of starting the sweep in the next
              * slice.  We will need to mark anything new on the stack
              * when we resume, so we stay in MARK state.
              */
-            rt->gcLastMarkSlice = true;
+            rt->gc.lastMarkSlice = true;
             break;
         }
 
-        rt->gcIncrementalState = SWEEP;
+        rt->gc.incrementalState = SWEEP;
 
         /*
          * This runs to completion, but we don't continue if the budget is
          * now exhasted.
          */
         BeginSweepPhase(rt, lastGC);
         if (sliceBudget.isOverBudget())
             break;
 
         /*
          * Always yield here when running in incremental multi-slice zeal
          * mode, so RunDebugGC can reset the slice buget.
          */
-        if (rt->gcIsIncremental && zeal == ZealIncrementalMultipleSlices)
+        if (rt->gc.isIncremental && zeal == ZealIncrementalMultipleSlices)
             break;
 
         /* fall through */
       }
 
       case SWEEP: {
         bool finished = SweepPhase(rt, sliceBudget);
         if (!finished)
             break;
 
         EndSweepPhase(rt, gckind, lastGC);
 
-        if (rt->gcSweepOnBackgroundThread)
-            rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
-
-        rt->gcIncrementalState = NO_INCREMENTAL;
+        if (rt->gc.sweepOnBackgroundThread)
+            rt->gc.helperThread.startBackgroundSweep(gckind == GC_SHRINK);
+
+        rt->gc.incrementalState = NO_INCREMENTAL;
         break;
       }
 
       default:
         JS_ASSERT(false);
     }
 }
 
 IncrementalSafety
 gc::IsIncrementalGCSafe(JSRuntime *rt)
 {
     JS_ASSERT(!rt->mainThread.suppressGC);
 
     if (rt->keepAtoms())
         return IncrementalSafety::Unsafe("keepAtoms set");
 
-    if (!rt->gcIncrementalEnabled)
+    if (!rt->gc.incrementalEnabled)
         return IncrementalSafety::Unsafe("incremental permanently disabled");
 
     return IncrementalSafety::Safe();
 }
 
 static void
 BudgetIncrementalGC(JSRuntime *rt, int64_t *budget)
 {
     IncrementalSafety safe = IsIncrementalGCSafe(rt);
     if (!safe) {
         ResetIncrementalGC(rt, safe.reason());
         *budget = SliceBudget::Unlimited;
-        rt->gcStats.nonincremental(safe.reason());
+        rt->gc.stats.nonincremental(safe.reason());
         return;
     }
 
     if (rt->gcMode() != JSGC_MODE_INCREMENTAL) {
         ResetIncrementalGC(rt, "GC mode change");
         *budget = SliceBudget::Unlimited;
-        rt->gcStats.nonincremental("GC mode");
+        rt->gc.stats.nonincremental("GC mode");
         return;
     }
 
     if (rt->isTooMuchMalloc()) {
         *budget = SliceBudget::Unlimited;
-        rt->gcStats.nonincremental("malloc bytes trigger");
+        rt->gc.stats.nonincremental("malloc bytes trigger");
     }
 
     bool reset = false;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->gcBytes >= zone->gcTriggerBytes) {
             *budget = SliceBudget::Unlimited;
-            rt->gcStats.nonincremental("allocation trigger");
+            rt->gc.stats.nonincremental("allocation trigger");
         }
 
-        if (rt->gcIncrementalState != NO_INCREMENTAL &&
+        if (rt->gc.incrementalState != NO_INCREMENTAL &&
             zone->isGCScheduled() != zone->wasGCStarted())
         {
             reset = true;
         }
 
         if (zone->isTooMuchMalloc()) {
             *budget = SliceBudget::Unlimited;
-            rt->gcStats.nonincremental("malloc bytes trigger");
+            rt->gc.stats.nonincremental("malloc bytes trigger");
         }
     }
 
     if (reset)
         ResetIncrementalGC(rt, "zone change");
 }
 
 /*
@@ -4552,33 +4643,33 @@ GCCycle(JSRuntime *rt, bool incremental,
 
     /*
      * As we about to purge caches and clear the mark bits we must wait for
      * any background finalization to finish. We must also wait for the
      * background allocation to finish so we can avoid taking the GC lock
      * when manipulating the chunks during the GC.
      */
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
-        rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
+        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
+        rt->gc.helperThread.waitBackgroundSweepOrAllocEnd();
     }
 
-    State prevState = rt->gcIncrementalState;
+    State prevState = rt->gc.incrementalState;
 
     if (!incremental) {
         /* If non-incremental GC was requested, reset incremental GC. */
         ResetIncrementalGC(rt, "requested");
-        rt->gcStats.nonincremental("requested");
+        rt->gc.stats.nonincremental("requested");
         budget = SliceBudget::Unlimited;
     } else {
         BudgetIncrementalGC(rt, &budget);
     }
 
     /* The GC was reset, so we need a do-over. */
-    if (prevState != NO_INCREMENTAL && rt->gcIncrementalState == NO_INCREMENTAL) {
+    if (prevState != NO_INCREMENTAL && rt->gc.incrementalState == NO_INCREMENTAL) {
         gcsession.cancel();
         return true;
     }
 
     IncrementalCollectSlice(rt, budget, reason, gckind);
     return false;
 }
 
@@ -4615,22 +4706,22 @@ namespace {
 #ifdef JSGC_GENERATIONAL
 class AutoDisableStoreBuffer
 {
     JSRuntime *runtime;
     bool prior;
 
   public:
     AutoDisableStoreBuffer(JSRuntime *rt) : runtime(rt) {
-        prior = rt->gcStoreBuffer.isEnabled();
-        rt->gcStoreBuffer.disable();
+        prior = rt->gc.storeBuffer.isEnabled();
+        rt->gc.storeBuffer.disable();
     }
     ~AutoDisableStoreBuffer() {
         if (prior)
-            runtime->gcStoreBuffer.enable();
+            runtime->gc.storeBuffer.enable();
     }
 };
 #else
 struct AutoDisableStoreBuffer
 {
     AutoDisableStoreBuffer(JSRuntime *) {}
 };
 #endif
@@ -4651,17 +4742,17 @@ Collect(JSRuntime *rt, bool incremental,
 
     if (rt->mainThread.suppressGC)
         return;
 
     TraceLogger *logger = TraceLoggerForMainThread(rt);
     AutoTraceLog logGC(logger, TraceLogger::GC);
 
 #ifdef JS_GC_ZEAL
-    if (rt->gcDeterministicOnly && !IsDeterministicGCReason(reason))
+    if (rt->gc.deterministicOnly && !IsDeterministicGCReason(reason))
         return;
 #endif
 
     JS_ASSERT_IF(!incremental || budget != SliceBudget::Unlimited, JSGC_INCREMENTAL);
 
     AutoStopVerifyingBarriers av(rt, reason == JS::gcreason::SHUTDOWN_CC ||
                                      reason == JS::gcreason::DESTROY_RUNTIME);
 
@@ -4670,74 +4761,74 @@ Collect(JSRuntime *rt, bool incremental,
     int zoneCount = 0;
     int compartmentCount = 0;
     int collectedCount = 0;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (rt->gcMode() == JSGC_MODE_GLOBAL)
             zone->scheduleGC();
 
         /* This is a heuristic to avoid resets. */
-        if (rt->gcIncrementalState != NO_INCREMENTAL && zone->needsBarrier())
+        if (rt->gc.incrementalState != NO_INCREMENTAL && zone->needsBarrier())
             zone->scheduleGC();
 
         zoneCount++;
         if (zone->isGCScheduled())
             collectedCount++;
     }
 
     for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next())
         compartmentCount++;
 
-    rt->gcShouldCleanUpEverything = ShouldCleanUpEverything(rt, reason, gckind);
+    rt->gc.shouldCleanUpEverything = ShouldCleanUpEverything(rt, reason, gckind);
 
     bool repeat = false;
     do {
         MinorGC(rt, reason);
 
         /*
          * Marking can trigger many incidental post barriers, some of them for
          * objects which are not going to be live after the GC.
          */
         AutoDisableStoreBuffer adsb(rt);
 
-        gcstats::AutoGCSlice agc(rt->gcStats, collectedCount, zoneCount, compartmentCount, reason);
+        gcstats::AutoGCSlice agc(rt->gc.stats, collectedCount, zoneCount, compartmentCount, reason);
 
         /*
          * Let the API user decide to defer a GC if it wants to (unless this
          * is the last context). Invoke the callback regardless.
          */
-        if (rt->gcIncrementalState == NO_INCREMENTAL) {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_BEGIN);
-            if (JSGCCallback callback = rt->gcCallback)
-                callback(rt, JSGC_BEGIN, rt->gcCallbackData);
+        if (rt->gc.incrementalState == NO_INCREMENTAL) {
+            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_GC_BEGIN);
+            if (JSGCCallback callback = rt->gc.callback)
+                callback(rt, JSGC_BEGIN, rt->gc.callbackData);
         }
 
-        rt->gcPoke = false;
+        rt->gc.poke = false;
         bool wasReset = GCCycle(rt, incremental, budget, gckind, reason);
 
-        if (rt->gcIncrementalState == NO_INCREMENTAL) {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_GC_END);
-            if (JSGCCallback callback = rt->gcCallback)
-                callback(rt, JSGC_END, rt->gcCallbackData);
+        if (rt->gc.incrementalState == NO_INCREMENTAL) {
+            gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_GC_END);
+            if (JSGCCallback callback = rt->gc.callback)
+                callback(rt, JSGC_END, rt->gc.callbackData);
         }
 
         /* Need to re-schedule all zones for GC. */
-        if (rt->gcPoke && rt->gcShouldCleanUpEverything)
+        if (rt->gc.poke && rt->gc.shouldCleanUpEverything)
             JS::PrepareForFullGC(rt);
 
         /*
          * If we reset an existing GC, we need to start a new one. Also, we
          * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
          * case) until we can be sure that no additional garbage is created
          * (which typically happens if roots are dropped during finalizers).
          */
-        repeat = (rt->gcPoke && rt->gcShouldCleanUpEverything) || wasReset;
+        repeat = (rt->gc.poke && rt->gc.shouldCleanUpEverything) || wasReset;
     } while (repeat);
 
-    if (rt->gcIncrementalState == NO_INCREMENTAL) {
+    if (rt->gc.incrementalState == NO_INCREMENTAL) {
 #ifdef JS_THREADSAFE
         EnqueuePendingParseTasksAfterGC(rt);
 #endif
     }
 }
 
 void
 js::GC(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason)
@@ -4746,20 +4837,20 @@ js::GC(JSRuntime *rt, JSGCInvocationKind
 }
 
 void
 js::GCSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis)
 {
     int64_t sliceBudget;
     if (millis)
         sliceBudget = SliceBudget::TimeBudget(millis);
-    else if (rt->gcHighFrequencyGC && rt->gcDynamicMarkSlice)
-        sliceBudget = rt->gcSliceBudget * IGC_MARK_SLICE_MULTIPLIER;
+    else if (rt->gc.highFrequencyGC && rt->gc.dynamicMarkSlice)
+        sliceBudget = rt->gc.sliceBudget * IGC_MARK_SLICE_MULTIPLIER;
     else
-        sliceBudget = rt->gcSliceBudget;
+        sliceBudget = rt->gc.sliceBudget;
 
     Collect(rt, true, sliceBudget, gckind, reason);
 }
 
 void
 js::GCFinalSlice(JSRuntime *rt, JSGCInvocationKind gckind, JS::gcreason::Reason reason)
 {
     Collect(rt, true, SliceBudget::Unlimited, gckind, reason);
@@ -4800,72 +4891,72 @@ JS_FRIEND_API(void)
 JS::ShrinkGCBuffers(JSRuntime *rt)
 {
     AutoLockGC lock(rt);
     JS_ASSERT(!rt->isHeapBusy());
 
     if (!rt->useHelperThreads())
         ExpireChunksAndArenas(rt, true);
     else
-        rt->gcHelperThread.startBackgroundShrink();
+        rt->gc.helperThread.startBackgroundShrink();
 }
 
 void
 js::MinorGC(JSRuntime *rt, JS::gcreason::Reason reason)
 {
 #ifdef JSGC_GENERATIONAL
     TraceLogger *logger = TraceLoggerForMainThread(rt);
     AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC);
-    rt->gcNursery.collect(rt, reason, nullptr);
-    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gcNursery.isEmpty());
+    rt->gc.nursery.collect(rt, reason, nullptr);
+    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gc.nursery.isEmpty());
 #endif
 }
 
 void
 js::MinorGC(JSContext *cx, JS::gcreason::Reason reason)
 {
     // Alternate to the runtime-taking form above which allows marking type
     // objects as needing pretenuring.
 #ifdef JSGC_GENERATIONAL
     TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
     AutoTraceLog logMinorGC(logger, TraceLogger::MinorGC);
 
     Nursery::TypeObjectList pretenureTypes;
     JSRuntime *rt = cx->runtime();
-    rt->gcNursery.collect(cx->runtime(), reason, &pretenureTypes);
+    rt->gc.nursery.collect(cx->runtime(), reason, &pretenureTypes);
     for (size_t i = 0; i < pretenureTypes.length(); i++) {
         if (pretenureTypes[i]->canPreTenure())
             pretenureTypes[i]->setShouldPreTenure(cx);
     }
-    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gcNursery.isEmpty());
+    JS_ASSERT_IF(!rt->mainThread.suppressGC, rt->gc.nursery.isEmpty());
 #endif
 }
 
 void
 js::gc::GCIfNeeded(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
 
 #ifdef JSGC_GENERATIONAL
     /*
      * In case of store buffer overflow perform minor GC first so that the
      * correct reason is seen in the logs.
      */
-    if (rt->gcStoreBuffer.isAboutToOverflow())
+    if (rt->gc.storeBuffer.isAboutToOverflow())
         MinorGC(cx, JS::gcreason::FULL_STORE_BUFFER);
 #endif
 
-    if (rt->gcIsNeeded)
-        GCSlice(rt, GC_NORMAL, rt->gcTriggerReason);
+    if (rt->gc.isNeeded)
+        GCSlice(rt, GC_NORMAL, rt->gc.triggerReason);
 }
 
 void
 js::gc::FinishBackgroundFinalize(JSRuntime *rt)
 {
-    rt->gcHelperThread.waitBackgroundSweepEnd();
+    rt->gc.helperThread.waitBackgroundSweepEnd();
 }
 
 AutoFinishGC::AutoFinishGC(JSRuntime *rt)
 {
     if (JS::IsIncrementalGCInProgress(rt)) {
         JS::PrepareForIncrementalGC(rt);
         JS::FinishIncrementalGC(rt, JS::gcreason::API);
     }
@@ -4911,17 +5002,17 @@ js::NewCompartment(JSContext *cx, Zone *
 
     AutoLockGC lock(rt);
 
     if (!zone->compartments.append(compartment.get())) {
         js_ReportOutOfMemory(cx);
         return nullptr;
     }
 
-    if (zoneHolder && !rt->zones.append(zone)) {
+    if (zoneHolder && !rt->gc.zones.append(zone)) {
         js_ReportOutOfMemory(cx);
         return nullptr;
     }
 
     zoneHolder.forget();
     return compartment.forget();
 }
 
@@ -4991,73 +5082,73 @@ gc::RunDebugGC(JSContext *cx)
         return MinorGC(rt, JS::gcreason::DEBUG_GC);
 
     PrepareForDebugGC(cx->runtime());
 
     if (type == ZealIncrementalRootsThenFinish ||
         type == ZealIncrementalMarkAllThenFinish ||
         type == ZealIncrementalMultipleSlices)
     {
-        js::gc::State initialState = rt->gcIncrementalState;
+        js::gc::State initialState = rt->gc.incrementalState;
         int64_t budget;
         if (type == ZealIncrementalMultipleSlices) {
             /*
              * Start with a small slice limit and double it every slice. This
              * ensure that we get multiple slices, and collection runs to
              * completion.
              */
             if (initialState == NO_INCREMENTAL)
-                rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
+                rt->gc.incrementalLimit = rt->gc.zealFrequency / 2;
             else
-                rt->gcIncrementalLimit *= 2;
-            budget = SliceBudget::WorkBudget(rt->gcIncrementalLimit);
+                rt->gc.incrementalLimit *= 2;
+            budget = SliceBudget::WorkBudget(rt->gc.incrementalLimit);
         } else {
             // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
             budget = SliceBudget::WorkBudget(1);
         }
 
         Collect(rt, true, budget, GC_NORMAL, JS::gcreason::DEBUG_GC);
 
         /*
          * For multi-slice zeal, reset the slice size when we get to the sweep
          * phase.
          */
         if (type == ZealIncrementalMultipleSlices &&
-            initialState == MARK && rt->gcIncrementalState == SWEEP)
+            initialState == MARK && rt->gc.incrementalState == SWEEP)
         {
-            rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
+            rt->gc.incrementalLimit = rt->gc.zealFrequency / 2;
         }
     } else {
         Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, JS::gcreason::DEBUG_GC);
     }
 
 #endif
 }
 
 void
 gc::SetDeterministicGC(JSContext *cx, bool enabled)
 {
 #ifdef JS_GC_ZEAL
     JSRuntime *rt = cx->runtime();
-    rt->gcDeterministicOnly = enabled;
+    rt->gc.deterministicOnly = enabled;
 #endif
 }
 
 void
 gc::SetValidateGC(JSContext *cx, bool enabled)
 {
     JSRuntime *rt = cx->runtime();
-    rt->gcValidate = enabled;
+    rt->gc.validate = enabled;
 }
 
 void
 gc::SetFullCompartmentChecks(JSContext *cx, bool enabled)
 {
     JSRuntime *rt = cx->runtime();
-    rt->gcFullCompartmentChecks = enabled;
+    rt->gc.fullCompartmentChecks = enabled;
 }
 
 #ifdef DEBUG
 
 /* Should only be called manually under gdb */
 void PreventGCDuringInteractiveDebug()
 {
     TlsPerThreadData.get()->suppressGC++;
@@ -5135,51 +5226,51 @@ js::ReleaseAllJITCode(FreeOp *fop)
  * StopPCCountProfiling     None      Query     Query
  * PurgePCCounts            None      None      None
  */
 
 static void
 ReleaseScriptCounts(FreeOp *fop)
 {
     JSRuntime *rt = fop->runtime();
-    JS_ASSERT(rt->scriptAndCountsVector);
-
-    ScriptAndCountsVector &vec = *rt->scriptAndCountsVector;
+    JS_ASSERT(rt->gc.scriptAndCountsVector);
+
+    ScriptAndCountsVector &vec = *rt->gc.scriptAndCountsVector;
 
     for (size_t i = 0; i < vec.length(); i++)
         vec[i].scriptCounts.destroy(fop);
 
-    fop->delete_(rt->scriptAndCountsVector);
-    rt->scriptAndCountsVector = nullptr;
+    fop->delete_(rt->gc.scriptAndCountsVector);
+    rt->gc.scriptAndCountsVector = nullptr;
 }
 
 JS_FRIEND_API(void)
 js::StartPCCountProfiling(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
 
     if (rt->profilingScripts)
         return;
 
-    if (rt->scriptAndCountsVector)
+    if (rt->gc.scriptAndCountsVector)
         ReleaseScriptCounts(rt->defaultFreeOp());
 
     ReleaseAllJITCode(rt->defaultFreeOp());
 
     rt->profilingScripts = true;
 }
 
 JS_FRIEND_API(void)
 js::StopPCCountProfiling(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
 
     if (!rt->profilingScripts)
         return;
-    JS_ASSERT(!rt->scriptAndCountsVector);
+    JS_ASSERT(!rt->gc.scriptAndCountsVector);
 
     ReleaseAllJITCode(rt->defaultFreeOp());
 
     ScriptAndCountsVector *vec = cx->new_<ScriptAndCountsVector>(SystemAllocPolicy());
     if (!vec)
         return;
 
     for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
@@ -5191,25 +5282,25 @@ js::StopPCCountProfiling(JSContext *cx)
                 sac.scriptCounts.set(script->releaseScriptCounts());
                 if (!vec->append(sac))
                     sac.scriptCounts.destroy(rt->defaultFreeOp());
             }
         }
     }
 
     rt->profilingScripts = false;
-    rt->scriptAndCountsVector = vec;
+    rt->gc.scriptAndCountsVector = vec;
 }
 
 JS_FRIEND_API(void)
 js::PurgePCCounts(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
 
-    if (!rt->scriptAndCountsVector)
+    if (!rt->gc.scriptAndCountsVector)
         return;
     JS_ASSERT(!rt->profilingScripts);
 
     ReleaseScriptCounts(rt->defaultFreeOp());
 }
 
 void
 js::PurgeJITCaches(Zone *zone)
@@ -5294,37 +5385,37 @@ ArenaLists::containsArena(JSRuntime *rt,
             return true;
     }
     return false;
 }
 
 
 AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSContext *cx)
   : runtime(cx->runtime()),
-    markCount(runtime->gcObjectsMarkedInDeadZones),
+    markCount(runtime->gc.objectsMarkedInDeadZones),
     inIncremental(JS::IsIncrementalGCInProgress(runtime)),
-    manipulatingDeadZones(runtime->gcManipulatingDeadZones)
-{
-    runtime->gcManipulatingDeadZones = true;
+    manipulatingDeadZones(runtime->gc.manipulatingDeadZones)
+{
+    runtime->gc.manipulatingDeadZones = true;
 }
 
 AutoMaybeTouchDeadZones::AutoMaybeTouchDeadZones(JSObject *obj)
   : runtime(obj->compartment()->runtimeFromMainThread()),
-    markCount(runtime->gcObjectsMarkedInDeadZones),
+    markCount(runtime->gc.objectsMarkedInDeadZones),
     inIncremental(JS::IsIncrementalGCInProgress(runtime)),
-    manipulatingDeadZones(runtime->gcManipulatingDeadZones)
-{
-    runtime->gcManipulatingDeadZones = true;
+    manipulatingDeadZones(runtime->gc.manipulatingDeadZones)
+{
+    runtime->gc.manipulatingDeadZones = true;
 }
 
 AutoMaybeTouchDeadZones::~AutoMaybeTouchDeadZones()
 {
-    runtime->gcManipulatingDeadZones = manipulatingDeadZones;
-
-    if (inIncremental && runtime->gcObjectsMarkedInDeadZones != markCount) {
+    runtime->gc.manipulatingDeadZones = manipulatingDeadZones;
+
+    if (inIncremental && runtime->gc.objectsMarkedInDeadZones != markCount) {
         JS::PrepareForFullGC(runtime);
         js::GC(runtime, GC_NORMAL, JS::gcreason::TRANSPLANT);
     }
 }
 
 AutoSuppressGC::AutoSuppressGC(ExclusiveContext *cx)
   : suppressGC_(cx->perThreadData->suppressGC)
 {
@@ -5347,17 +5438,17 @@ bool
 js::UninlinedIsInsideNursery(JSRuntime *rt, const void *thing)
 {
     return IsInsideNursery(rt, thing);
 }
 
 #ifdef DEBUG
 AutoDisableProxyCheck::AutoDisableProxyCheck(JSRuntime *rt
                                              MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
-  : count(rt->gcDisableStrictProxyCheckingCount)
+  : count(rt->gc.disableStrictProxyCheckingCount)
 {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     count++;
 }
 
 JS_FRIEND_API(void)
 JS::AssertGCThingMustBeTenured(JSObject *obj)
 {
@@ -5366,39 +5457,39 @@ JS::AssertGCThingMustBeTenured(JSObject 
 }
 
 JS_FRIEND_API(size_t)
 JS::GetGCNumber()
 {
     JSRuntime *rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
     if (!rt)
         return 0;
-    return rt->gcNumber;
+    return rt->gc.number;
 }
 
 JS::AutoAssertNoGC::AutoAssertNoGC()
   : runtime(nullptr), gcNumber(0)
 {
     js::PerThreadData *data = js::TlsPerThreadData.get();
     if (data) {
         /*
          * GC's from off-thread will always assert, so off-thread is implicitly
          * AutoAssertNoGC. We still need to allow AutoAssertNoGC to be used in
          * code that works from both threads, however. We also use this to
          * annotate the off thread run loops.
          */
         runtime = data->runtimeIfOnOwnerThread();
         if (runtime)
-            gcNumber = runtime->gcNumber;
+            gcNumber = runtime->gc.number;
     }
 }
 
 JS::AutoAssertNoGC::AutoAssertNoGC(JSRuntime *rt)
-  : runtime(rt), gcNumber(rt->gcNumber)
+  : runtime(rt), gcNumber(rt->gc.number)
 {
 }
 
 JS::AutoAssertNoGC::~AutoAssertNoGC()
 {
     if (runtime)
-        MOZ_ASSERT(gcNumber == runtime->gcNumber, "GC ran inside an AutoAssertNoGC scope.");
+        MOZ_ASSERT(gcNumber == runtime->gc.number, "GC ran inside an AutoAssertNoGC scope.");
 }
 #endif
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -45,16 +45,30 @@ unsigned GetCPUCount();
 
 enum HeapState {
     Idle,             // doing nothing with the GC heap
     Tracing,          // tracing the GC heap without collecting, e.g. IterateCompartments()
     MajorCollecting,  // doing a GC of the major heap
     MinorCollecting   // doing a GC of the minor heap (nursery)
 };
 
+struct ExtraTracer {
+    JSTraceDataOp op;
+    void *data;
+
+    ExtraTracer()
+      : op(nullptr), data(nullptr)
+        {}
+    ExtraTracer(JSTraceDataOp op, void *data)
+      : op(op), data(data)
+        {}
+};
+
+typedef Vector<ScriptAndCounts, 0, SystemAllocPolicy> ScriptAndCountsVector;
+
 namespace jit {
     class JitCode;
 }
 
 namespace gc {
 
 enum State {
     NO_INCREMENTAL,
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -22,18 +22,18 @@ class Shape;
  */
 struct AutoMarkInDeadZone
 {
     AutoMarkInDeadZone(JS::Zone *zone)
       : zone(zone),
         scheduled(zone->scheduledForDestruction)
     {
         JSRuntime *rt = zone->runtimeFromMainThread();
-        if (rt->gcManipulatingDeadZones && zone->scheduledForDestruction) {
-            rt->gcObjectsMarkedInDeadZones++;
+        if (rt->gc.manipulatingDeadZones && zone->scheduledForDestruction) {
+            rt->gc.objectsMarkedInDeadZones++;
             zone->scheduledForDestruction = false;
         }
     }
 
     ~AutoMarkInDeadZone() {
         zone->scheduledForDestruction = scheduled;
     }
 
@@ -101,22 +101,22 @@ GetGCThingTraceKind(const void *thing)
         return JSTRACE_OBJECT;
 #endif
     return MapAllocToTraceKind(cell->tenuredGetAllocKind());
 }
 
 static inline void
 GCPoke(JSRuntime *rt)
 {
-    rt->gcPoke = true;
+    rt->gc.poke = true;
 
 #ifdef JS_GC_ZEAL
     /* Schedule a GC to happen "soon" after a GC poke. */
     if (rt->gcZeal() == js::gc::ZealPokeValue)
-        rt->gcNextScheduled = 1;
+        rt->gc.nextScheduled = 1;
 #endif
 }
 
 class ArenaIter
 {
     ArenaHeader *aheader;
     ArenaHeader *remainingHeader;
 
@@ -240,17 +240,17 @@ class CellIterImpl
     }
 };
 
 class CellIterUnderGC : public CellIterImpl
 {
   public:
     CellIterUnderGC(JS::Zone *zone, AllocKind kind) {
 #ifdef JSGC_GENERATIONAL
-        JS_ASSERT(zone->runtimeFromAnyThread()->gcNursery.isEmpty());
+        JS_ASSERT(zone->runtimeFromAnyThread()->gc.nursery.isEmpty());
 #endif
         JS_ASSERT(zone->runtimeFromAnyThread()->isHeapBusy());
         init(zone, kind);
     }
 
     CellIterUnderGC(ArenaHeader *aheader) {
         JS_ASSERT(aheader->zone->runtimeFromAnyThread()->isHeapBusy());
         init(aheader);
@@ -279,30 +279,30 @@ class CellIter : public CellIterImpl
             zone->allocator.arenas.needBackgroundFinalizeWait(kind))
         {
             gc::FinishBackgroundFinalize(zone->runtimeFromMainThread());
         }
 
 #ifdef JSGC_GENERATIONAL
         /* Evict the nursery before iterating so we can see all things. */
         JSRuntime *rt = zone->runtimeFromMainThread();
-        if (!rt->gcNursery.isEmpty())
+        if (!rt->gc.nursery.isEmpty())
             MinorGC(rt, JS::gcreason::EVICT_NURSERY);
 #endif
 
         if (lists->isSynchronizedFreeList(kind)) {
             lists = nullptr;
         } else {
             JS_ASSERT(!zone->runtimeFromMainThread()->isHeapBusy());
             lists->copyFreeListToArena(kind);
         }
 
 #ifdef DEBUG
         /* Assert that no GCs can occur while a CellIter is live. */
-        counter = &zone->runtimeFromAnyThread()->noGCOrAllocationCheck;
+        counter = &zone->runtimeFromAnyThread()->gc.noGCOrAllocationCheck;
         ++*counter;
 #endif
 
         init(zone, kind);
     }
 
     ~CellIter() {
 #ifdef DEBUG
@@ -348,17 +348,17 @@ typedef CompartmentsIterT<GCZonesIter> G
 /* Iterates over all zones in the current zone group. */
 class GCZoneGroupIter {
   private:
     JS::Zone *current;
 
   public:
     GCZoneGroupIter(JSRuntime *rt) {
         JS_ASSERT(rt->isHeapBusy());
-        current = rt->gcCurrentZoneGroup;
+        current = rt->gc.currentZoneGroup;
     }
 
     bool done() const { return !current; }
 
     void next() {
         JS_ASSERT(!done());
         current = current->nextNodeInGroup();
     }
@@ -382,17 +382,17 @@ typedef CompartmentsIterT<GCZoneGroupIte
 template <AllowGC allowGC>
 inline JSObject *
 TryNewNurseryObject(ThreadSafeContext *cxArg, size_t thingSize, size_t nDynamicSlots)
 {
     JSContext *cx = cxArg->asJSContext();
 
     JS_ASSERT(!IsAtomsCompartment(cx->compartment()));
     JSRuntime *rt = cx->runtime();
-    Nursery &nursery = rt->gcNursery;
+    Nursery &nursery = rt->gc.nursery;
     JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots);
     if (obj)
         return obj;
     if (allowGC && !rt->mainThread.suppressGC) {
         MinorGC(cx, JS::gcreason::OUT_OF_NURSERY);
 
         /* Exceeding gcMaxBytes while tenuring can disable the Nursery. */
         if (nursery.isEnabled()) {
@@ -422,17 +422,17 @@ CheckAllocatorState(ThreadSafeContext *c
     JSContext *ncx = cx->asJSContext();
     JSRuntime *rt = ncx->runtime();
 #if defined(JS_GC_ZEAL) || defined(DEBUG)
     JS_ASSERT_IF(rt->isAtomsCompartment(ncx->compartment()),
                  kind == FINALIZE_STRING ||
                  kind == FINALIZE_FAT_INLINE_STRING ||
                  kind == FINALIZE_JITCODE);
     JS_ASSERT(!rt->isHeapBusy());
-    JS_ASSERT(!rt->noGCOrAllocationCheck);
+    JS_ASSERT(!rt->gc.noGCOrAllocationCheck);
 #endif
 
     // For testing out of memory conditions
     if (!PossiblyFail()) {
         js_ReportOutOfMemory(cx);
         return false;
     }
 
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -3912,17 +3912,17 @@ ExclusiveContext::getNewType(const Class
     if (!type)
         return nullptr;
 
     if (!newTypeObjects.add(p, TypeObjectWithNewScriptEntry(type, fun)))
         return nullptr;
 
 #ifdef JSGC_GENERATIONAL
     if (proto.isObject() && hasNursery() && nursery().isInside(proto.toObject())) {
-        asJSContext()->runtime()->gcStoreBuffer.putGeneric(
+        asJSContext()->runtime()->gc.storeBuffer.putGeneric(
             NewTypeObjectsSetRef(&newTypeObjects, clasp, proto.toObject(), fun));
     }
 #endif
 
     if (proto.isObject()) {
         RootedObject obj(this, proto.toObject());
 
         if (fun)
@@ -4250,17 +4250,17 @@ TypeCompartment::sweep(FreeOp *fop)
                 e.rekeyFront(key);
         }
     }
 }
 
 void
 JSCompartment::sweepNewTypeObjectTable(TypeObjectWithNewScriptSet &table)
 {
-    gcstats::AutoPhase ap(runtimeFromMainThread()->gcStats,
+    gcstats::AutoPhase ap(runtimeFromMainThread()->gc.stats,
                           gcstats::PHASE_SWEEP_TABLES_TYPE_OBJECT);
 
     JS_ASSERT(zone()->isGCSweeping());
     if (table.initialized()) {
         for (TypeObjectWithNewScriptSet::Enum e(table); !e.empty(); e.popFront()) {
             TypeObjectWithNewScriptEntry entry = e.front();
             if (IsTypeObjectAboutToBeFinalized(entry.object.unsafeGet())) {
                 e.removeFront();
@@ -4395,17 +4395,17 @@ TypeZone::sweep(FreeOp *fop, bool releas
                     output.setSweepIndex(newCompilerOutputCount++);
                 }
             }
         }
     }
 #endif
 
     {
-        gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_DISCARD_TI);
+        gcstats::AutoPhase ap2(rt->gc.stats, gcstats::PHASE_DISCARD_TI);
 
         for (CellIterUnderGC i(zone(), FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
             if (script->types) {
                 types::TypeScript::Sweep(fop, script, oom);
 
                 if (releaseTypes) {
                     if (script->hasParallelIonScript()) {
@@ -4437,17 +4437,17 @@ TypeZone::sweep(FreeOp *fop, bool releas
                     if (script->hasParallelIonScript())
                         script->parallelIonScript()->recompileInfoRef().shouldSweep(*this);
                 }
             }
         }
     }
 
     {
-        gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_SWEEP_TYPES);
+        gcstats::AutoPhase ap2(rt->gc.stats, gcstats::PHASE_SWEEP_TYPES);
 
         for (gc::CellIterUnderGC iter(zone(), gc::FINALIZE_TYPE_OBJECT);
              !iter.done(); iter.next())
         {
             TypeObject *object = iter.get<TypeObject>();
             object->sweep(fop, oom);
         }
 
@@ -4465,17 +4465,17 @@ TypeZone::sweep(FreeOp *fop, bool releas
                 (*compilerOutputs)[sweepIndex++] = output;
             }
         }
         JS_ASSERT(sweepIndex == newCompilerOutputCount);
         JS_ALWAYS_TRUE(compilerOutputs->resize(newCompilerOutputCount));
     }
 
     {
-        gcstats::AutoPhase ap2(rt->gcStats, gcstats::PHASE_FREE_TI_ARENA);
+        gcstats::AutoPhase ap2(rt->gc.stats, gcstats::PHASE_FREE_TI_ARENA);
         rt->freeLifoAlloc.transferFrom(&oldAlloc);
     }
 }
 
 void
 TypeZone::clearAllNewScriptAddendumsOnOOM()
 {
     for (gc::CellIterUnderGC iter(zone(), gc::FINALIZE_TYPE_OBJECT);
--- a/js/src/jsiter.cpp
+++ b/js/src/jsiter.cpp
@@ -1498,17 +1498,17 @@ GeneratorWriteBarrierPre(JSContext *cx, 
     if (zone->needsBarrier())
         MarkGeneratorFrame(zone->barrierTracer(), gen);
 }
 
 static void
 GeneratorWriteBarrierPost(JSContext *cx, JSGenerator *gen)
 {
 #ifdef JSGC_GENERATIONAL
-    cx->runtime()->gcStoreBuffer.putWholeCell(gen->obj);
+    cx->runtime()->gc.storeBuffer.putWholeCell(gen->obj);
 #endif
 }
 
 /*
  * Only mark generator frames/slots when the generator is not active on the
  * stack or closed. Barriers when copying onto the stack or closing preserve
  * gc invariants.
  */
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -1285,17 +1285,17 @@ NewObject(ExclusiveContext *cx, types::T
                                     !cx->compartment()->options().getTrace();
     if (clasp->trace &&
         !globalWithoutCustomTrace &&
         !(clasp->flags & JSCLASS_IMPLEMENTS_BARRIERS))
     {
         if (!cx->shouldBeJSContext())
             return nullptr;
         JSRuntime *rt = cx->asJSContext()->runtime();
-        rt->gcIncrementalEnabled = false;
+        rt->gc.incrementalEnabled = false;
 
 #ifdef DEBUG
         if (rt->gcMode() == JSGC_MODE_INCREMENTAL) {
             fprintf(stderr,
                     "The class %s has a trace hook but does not declare the\n"
                     "JSCLASS_IMPLEMENTS_BARRIERS flag. Please ensure that it correctly\n"
                     "implements write barriers and then set the flag.\n",
                     clasp->name);
@@ -2705,30 +2705,30 @@ JSObject::setSlotSpan(ThreadSafeContext 
     return true;
 }
 
 static HeapSlot *
 AllocateSlots(ThreadSafeContext *cx, JSObject *obj, uint32_t nslots)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext())
-        return cx->asJSContext()->runtime()->gcNursery.allocateSlots(cx->asJSContext(), obj, nslots);
+        return cx->asJSContext()->runtime()->gc.nursery.allocateSlots(cx->asJSContext(), obj, nslots);
 #endif
     return cx->pod_malloc<HeapSlot>(nslots);
 }
 
 static HeapSlot *
 ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
                 uint32_t oldCount, uint32_t newCount)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext()) {
-        return cx->asJSContext()->runtime()->gcNursery.reallocateSlots(cx->asJSContext(),
-                                                                       obj, oldSlots,
-                                                                       oldCount, newCount);
+        return cx->asJSContext()->runtime()->gc.nursery.reallocateSlots(cx->asJSContext(),
+                                                                          obj, oldSlots,
+                                                                          oldCount, newCount);
     }
 #endif
     return (HeapSlot *)cx->realloc_(oldSlots, oldCount * sizeof(HeapSlot),
                                     newCount * sizeof(HeapSlot));
 }
 
 /* static */ bool
 JSObject::growSlots(ThreadSafeContext *cx, HandleObject obj, uint32_t oldCount, uint32_t newCount)
@@ -2793,17 +2793,17 @@ JSObject::growSlots(ThreadSafeContext *c
 }
 
 static void
 FreeSlots(ThreadSafeContext *cx, HeapSlot *slots)
 {
     // Note: threads without a JSContext do not have access to nursery allocated things.
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext())
-        return cx->asJSContext()->runtime()->gcNursery.freeSlots(cx->asJSContext(), slots);
+        return cx->asJSContext()->runtime()->gc.nursery.freeSlots(cx->asJSContext(), slots);
 #endif
     js_free(slots);
 }
 
 /* static */ void
 JSObject::shrinkSlots(ThreadSafeContext *cx, HandleObject obj, uint32_t oldCount, uint32_t newCount)
 {
     JS_ASSERT(cx->isThreadLocal(obj));
@@ -3015,31 +3015,31 @@ JSObject::maybeDensifySparseElements(js:
     return ED_OK;
 }
 
 static ObjectElements *
 AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext())
-        return cx->asJSContext()->runtime()->gcNursery.allocateElements(cx->asJSContext(), obj, nelems);
+        return cx->asJSContext()->runtime()->gc.nursery.allocateElements(cx->asJSContext(), obj, nelems);
 #endif
 
     return static_cast<js::ObjectElements *>(cx->malloc_(nelems * sizeof(HeapValue)));
 }
 
 static ObjectElements *
 ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHeader,
                    uint32_t oldCount, uint32_t newCount)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext()) {
-        return cx->asJSContext()->runtime()->gcNursery.reallocateElements(cx->asJSContext(), obj,
-                                                                          oldHeader, oldCount,
-                                                                          newCount);
+        return cx->asJSContext()->runtime()->gc.nursery.reallocateElements(cx->asJSContext(), obj,
+                                                                             oldHeader, oldCount,
+                                                                             newCount);
     }
 #endif
 
     return static_cast<js::ObjectElements *>(cx->realloc_(oldHeader, oldCount * sizeof(HeapSlot),
                                                           newCount * sizeof(HeapSlot)));
 }
 
 bool
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -512,17 +512,17 @@ JSObject::create(js::ExclusiveContext *c
     if (!obj)
         return nullptr;
 
     obj->shape_.init(shape);
     obj->type_.init(type);
     if (extantSlots) {
 #ifdef JSGC_GENERATIONAL
         if (cx->isJSContext())
-            cx->asJSContext()->runtime()->gcNursery.notifyInitialSlots(obj, extantSlots);
+            cx->asJSContext()->runtime()->gc.nursery.notifyInitialSlots(obj, extantSlots);
 #endif
         obj->slots = extantSlots;
     }
     obj->elements = js::emptyObjectElements;
 
     if (clasp->hasPrivate())
         obj->privateRef(shape->numFixedSlots()) = nullptr;
 
--- a/js/src/jsopcode.cpp
+++ b/js/src/jsopcode.cpp
@@ -809,17 +809,17 @@ ToDisassemblySource(JSContext *cx, Handl
             return false;
         nbytes = JS_sprintf_append(nullptr, "%s", nbytes);
         if (!nbytes)
             return false;
         bytes->initBytes(nbytes);
         return true;
     }
 
-    if (cx->runtime()->isHeapBusy() || cx->runtime()->noGCOrAllocationCheck) {
+    if (cx->runtime()->isHeapBusy() || cx->runtime()->gc.noGCOrAllocationCheck) {
         char *source = JS_sprintf_append(nullptr, "<value>");
         if (!source)
             return false;
         bytes->initBytes(source);
         return true;
     }
 
     if (v.isObject()) {
@@ -1931,20 +1931,20 @@ js::IsValidBytecodeOffset(JSContext *cx,
     return false;
 }
 
 JS_FRIEND_API(size_t)
 js::GetPCCountScriptCount(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
 
-    if (!rt->scriptAndCountsVector)
+    if (!rt->gc.scriptAndCountsVector)
         return 0;
 
-    return rt->scriptAndCountsVector->length();
+    return rt->gc.scriptAndCountsVector->length();
 }
 
 enum MaybeComma {NO_COMMA, COMMA};
 
 static void
 AppendJSONProperty(StringBuffer &buf, const char *name, MaybeComma comma = COMMA)
 {
     if (comma)
@@ -1969,22 +1969,22 @@ AppendArrayJSONProperties(JSContext *cx,
     }
 }
 
 JS_FRIEND_API(JSString *)
 js::GetPCCountScriptSummary(JSContext *cx, size_t index)
 {
     JSRuntime *rt = cx->runtime();
 
-    if (!rt->scriptAndCountsVector || index >= rt->scriptAndCountsVector->length()) {
+    if (!rt->gc.scriptAndCountsVector || index >= rt->gc.scriptAndCountsVector->length()) {
         JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_BUFFER_TOO_SMALL);
         return nullptr;
     }
 
-    const ScriptAndCounts &sac = (*rt->scriptAndCountsVector)[index];
+    const ScriptAndCounts &sac = (*rt->gc.scriptAndCountsVector)[index];
     RootedScript script(cx, sac.script);
 
     /*
      * OOM on buffer appends here will not be caught immediately, but since
      * StringBuffer uses a ContextAllocPolicy will trigger an exception on the
      * context if they occur, which we'll catch before returning.
      */
     StringBuffer buf(cx);
@@ -2229,22 +2229,22 @@ GetPCCountJSON(JSContext *cx, const Scri
     return !cx->isExceptionPending();
 }
 
 JS_FRIEND_API(JSString *)
 js::GetPCCountScriptContents(JSContext *cx, size_t index)
 {
     JSRuntime *rt = cx->runtime();
 
-    if (!rt->scriptAndCountsVector || index >= rt->scriptAndCountsVector->length()) {
+    if (!rt->gc.scriptAndCountsVector || index >= rt->gc.scriptAndCountsVector->length()) {
         JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_BUFFER_TOO_SMALL);
         return nullptr;
     }
 
-    const ScriptAndCounts &sac = (*rt->scriptAndCountsVector)[index];
+    const ScriptAndCounts &sac = (*rt->gc.scriptAndCountsVector)[index];
     JSScript *script = sac.script;
 
     StringBuffer buf(cx);
 
     if (!script->functionNonDelazifying() && !script->compileAndGo())
         return buf.finishString();
 
     {
--- a/js/src/jsproxy.cpp
+++ b/js/src/jsproxy.cpp
@@ -2982,17 +2982,17 @@ js::proxy_Trace(JSTracer *trc, JSObject 
 }
 
 /* static */ void
 ProxyObject::trace(JSTracer *trc, JSObject *obj)
 {
     ProxyObject *proxy = &obj->as<ProxyObject>();
 
 #ifdef DEBUG
-    if (!trc->runtime()->gcDisableStrictProxyCheckingCount && proxy->is<WrapperObject>()) {
+    if (!trc->runtime()->gc.disableStrictProxyCheckingCount && proxy->is<WrapperObject>()) {
         JSObject *referent = &proxy->private_().toObject();
         if (referent->compartment() != proxy->compartment()) {
             /*
              * Assert that this proxy is tracked in the wrapper map. We maintain
              * the invariant that the wrapped object is the key in the wrapper map.
              */
             Value key = ObjectValue(*referent);
             WrapperMap::Ptr p = proxy->compartment()->lookupWrapper(key);
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -2058,17 +2058,17 @@ SaveSharedScriptData(ExclusiveContext *c
     /*
      * During the IGC we need to ensure that bytecode is marked whenever it is
      * accessed even if the bytecode was already in the table: at this point
      * old scripts or exceptions pointing to the bytecode may no longer be
      * reachable. This is effectively a read barrier.
      */
     if (cx->isJSContext()) {
         JSRuntime *rt = cx->asJSContext()->runtime();
-        if (JS::IsIncrementalGCInProgress(rt) && rt->gcIsFull)
+        if (JS::IsIncrementalGCInProgress(rt) && rt->gc.isFull)
             ssd->marked = true;
     }
 #endif
 
     script->setCode(ssd->data);
     script->atoms = ssd->atoms();
     return true;
 }
@@ -2076,35 +2076,35 @@ SaveSharedScriptData(ExclusiveContext *c
 static inline void
 MarkScriptData(JSRuntime *rt, const jsbytecode *bytecode)
 {
     /*
      * As an invariant, a ScriptBytecodeEntry should not be 'marked' outside of
      * a GC. Since SweepScriptBytecodes is only called during a full gc,
      * to preserve this invariant, only mark during a full gc.
      */
-    if (rt->gcIsFull)
+    if (rt->gc.isFull)
         SharedScriptData::fromBytecode(bytecode)->marked = true;
 }
 
 void
 js::UnmarkScriptData(JSRuntime *rt)
 {
-    JS_ASSERT(rt->gcIsFull);
+    JS_ASSERT(rt->gc.isFull);
     ScriptDataTable &table = rt->scriptDataTable();
     for (ScriptDataTable::Enum e(table); !e.empty(); e.popFront()) {
         SharedScriptData *entry = e.front();
         entry->marked = false;
     }
 }
 
 void
 js::SweepScriptData(JSRuntime *rt)
 {
-    JS_ASSERT(rt->gcIsFull);
+    JS_ASSERT(rt->gc.isFull);
     ScriptDataTable &table = rt->scriptDataTable();
 
     if (rt->keepAtoms())
         return;
 
     for (ScriptDataTable::Enum e(table); !e.empty(); e.popFront()) {
         SharedScriptData *entry = e.front();
         if (!entry->marked) {
@@ -3299,17 +3299,17 @@ JSScript::clearTraps(FreeOp *fop)
 void
 JSScript::markChildren(JSTracer *trc)
 {
     // NOTE: this JSScript may be partially initialized at this point.  E.g. we
     // may have created it and partially initialized it with
     // JSScript::Create(), but not yet finished initializing it with
     // fullyInitFromEmitter() or fullyInitTrivial().
 
-    JS_ASSERT_IF(trc->runtime()->gcStrictCompartmentChecking, zone()->isCollecting());
+    JS_ASSERT_IF(trc->runtime()->gc.strictCompartmentChecking, zone()->isCollecting());
 
     for (uint32_t i = 0; i < natoms(); ++i) {
         if (atoms[i])
             MarkString(trc, &atoms[i], "atom");
     }
 
     if (hasObjects()) {
         ObjectArray *objarray = objects();
--- a/js/src/jsweakmap.cpp
+++ b/js/src/jsweakmap.cpp
@@ -283,17 +283,17 @@ WeakMapPostWriteBarrier(JSRuntime *rt, O
      */
     ObjectValueMap::Base *baseHashMap = static_cast<ObjectValueMap::Base *>(weakMap);
 
     typedef HashMap<JSObject *, Value> UnbarrieredMap;
     UnbarrieredMap *unbarrieredMap = reinterpret_cast<UnbarrieredMap *>(baseHashMap);
 
     typedef gc::HashKeyRef<UnbarrieredMap, JSObject *> Ref;
     if (key && IsInsideNursery(rt, key))
-        rt->gcStoreBuffer.putGeneric(Ref((unbarrieredMap), key));
+        rt->gc.storeBuffer.putGeneric(Ref((unbarrieredMap), key));
 #endif
 }
 
 MOZ_ALWAYS_INLINE bool
 SetWeakMapEntryInternal(JSContext *cx, Handle<WeakMapObject*> mapObj,
                         HandleObject key, HandleValue value)
 {
     ObjectValueMap *map = mapObj->getMap();
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -271,17 +271,17 @@ ToClampedIndex(JSContext *cx, HandleValu
 
 inline void
 PostBarrierTypedArrayObject(JSObject *obj)
 {
 #ifdef JSGC_GENERATIONAL
     JS_ASSERT(obj);
     JSRuntime *rt = obj->runtimeFromMainThread();
     if (!rt->isHeapBusy() && !IsInsideNursery(rt, obj))
-        rt->gcStoreBuffer.putWholeCell(obj);
+        rt->gc.storeBuffer.putWholeCell(obj);
 #endif
 }
 
 inline void
 InitArrayBufferViewDataPointer(ArrayBufferViewObject *obj, ArrayBufferObject *buffer, size_t byteOffset)
 {
     /*
      * N.B. The base of the array's data is stored in the object's
--- a/js/src/vm/ForkJoin.cpp
+++ b/js/src/vm/ForkJoin.cpp
@@ -474,17 +474,17 @@ ForkJoinActivation::ForkJoinActivation(J
 
     if (JS::IsIncrementalGCInProgress(cx->runtime())) {
         JS::PrepareForIncrementalGC(cx->runtime());
         JS::FinishIncrementalGC(cx->runtime(), JS::gcreason::API);
     }
 
     MinorGC(cx->runtime(), JS::gcreason::API);
 
-    cx->runtime()->gcHelperThread.waitBackgroundSweepEnd();
+    cx->runtime()->gc.helperThread.waitBackgroundSweepEnd();
 
     JS_ASSERT(!cx->runtime()->needsBarrier());
     JS_ASSERT(!cx->zone()->needsBarrier());
 }
 
 ForkJoinActivation::~ForkJoinActivation()
 {
     cx_->mainThread().ionTop = prevIonTop_;
@@ -1552,17 +1552,17 @@ ForkJoinShared::executePortion(PerThread
 
 void
 ForkJoinShared::setAbortFlagDueToInterrupt(ForkJoinContext &cx)
 {
     JS_ASSERT(cx_->runtime()->interruptPar);
     // The GC Needed flag should not be set during parallel
     // execution.  Instead, one of the requestGC() or
     // requestZoneGC() methods should be invoked.
-    JS_ASSERT(!cx_->runtime()->gcIsNeeded);
+    JS_ASSERT(!cx_->runtime()->gc.isNeeded);
 
     if (!abort_) {
         cx.bailoutRecord->setCause(ParallelBailoutInterrupt);
         setAbortFlagAndRequestInterrupt(false);
     }
 }
 
 void
--- a/js/src/vm/MemoryMetrics.cpp
+++ b/js/src/vm/MemoryMetrics.cpp
@@ -532,17 +532,17 @@ FindNotableScriptSources(JS::RuntimeSize
 }
 
 JS_PUBLIC_API(bool)
 JS::CollectRuntimeStats(JSRuntime *rt, RuntimeStats *rtStats, ObjectPrivateVisitor *opv)
 {
     if (!rtStats->compartmentStatsVector.reserve(rt->numCompartments))
         return false;
 
-    if (!rtStats->zoneStatsVector.reserve(rt->zones.length()))
+    if (!rtStats->zoneStatsVector.reserve(rt->gc.zones.length()))
         return false;
 
     rtStats->gcHeapChunkTotal =
         size_t(JS_GetGCParameter(rt, JSGC_TOTAL_CHUNKS)) * gc::ChunkSize;
 
     rtStats->gcHeapUnusedChunks =
         size_t(JS_GetGCParameter(rt, JSGC_UNUSED_CHUNKS)) * gc::ChunkSize;
 
--- a/js/src/vm/RegExpObject.cpp
+++ b/js/src/vm/RegExpObject.cpp
@@ -726,17 +726,17 @@ RegExpCompartment::sweep(JSRuntime *rt)
     for (Map::Range r = map_.all(); !r.empty(); r.popFront())
         JS_ASSERT(inUse_.has(r.front().value()));
 #endif
 
     map_.clear();
 
     for (PendingSet::Enum e(inUse_); !e.empty(); e.popFront()) {
         RegExpShared *shared = e.front();
-        if (shared->activeUseCount == 0 && shared->gcNumberWhenUsed < rt->gcStartNumber) {
+        if (shared->activeUseCount == 0 && shared->gcNumberWhenUsed < rt->gc.startNumber) {
             js_delete(shared);
             e.removeFront();
         }
     }
 
     if (matchResultTemplateObject_ &&
         IsObjectAboutToBeFinalized(matchResultTemplateObject_.unsafeGet()))
     {
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -109,17 +109,17 @@ PerThreadData::init()
 static const JSWrapObjectCallbacks DefaultWrapObjectCallbacks = {
     TransparentObjectWrapper,
     nullptr
 };
 
 JSRuntime::JSRuntime(JSRuntime *parentRuntime, JSUseHelperThreads useHelperThreads)
   : JS::shadow::Runtime(
 #ifdef JSGC_GENERATIONAL
-        &gcStoreBuffer
+        &gc.storeBuffer
 #endif
     ),
     mainThread(this),
     parentRuntime(parentRuntime),
     interrupt(false),
 #if defined(JS_THREADSAFE) && defined(JS_ION)
     interruptPar(false),
 #endif
@@ -130,17 +130,16 @@ JSRuntime::JSRuntime(JSRuntime *parentRu
     interruptLockOwner(nullptr),
     exclusiveAccessLock(nullptr),
     exclusiveAccessOwner(nullptr),
     mainThreadHasExclusiveAccess(false),
     numExclusiveThreads(0),
 #else
     interruptLockTaken(false),
 #endif
-    systemZone(nullptr),
     numCompartments(0),
     localeCallbacks(nullptr),
     defaultLocale(nullptr),
     defaultVersion_(JSVERSION_DEFAULT),
 #ifdef JS_THREADSAFE
     ownerThread_(nullptr),
 #endif
     tempLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
@@ -161,113 +160,31 @@ JSRuntime::JSRuntime(JSRuntime *parentRu
     requestDepth(0),
 # ifdef DEBUG
     checkRequestDepth(0),
 # endif
 #endif
 #ifdef DEBUG
     activeContext(nullptr),
 #endif
+    gc(thisFromCtor()),
     gcInitialized(false),
-    gcSystemAvailableChunkListHead(nullptr),
-    gcUserAvailableChunkListHead(nullptr),
-    gcBytes(0),
-    gcMaxBytes(0),
-    gcMaxMallocBytes(0),
-    gcNumArenasFreeCommitted(0),
-    gcMarker(this),
-    gcVerifyPreData(nullptr),
-    gcVerifyPostData(nullptr),
-    gcChunkAllocationSinceLastGC(false),
-    gcNextFullGCTime(0),
-    gcLastGCTime(0),
-    gcJitReleaseTime(0),
-    gcAllocationThreshold(30 * 1024 * 1024),
-    gcHighFrequencyGC(false),
-    gcHighFrequencyTimeThreshold(1000),
-    gcHighFrequencyLowLimitBytes(100 * 1024 * 1024),
-    gcHighFrequencyHighLimitBytes(500 * 1024 * 1024),
-    gcHighFrequencyHeapGrowthMax(3.0),
-    gcHighFrequencyHeapGrowthMin(1.5),
-    gcLowFrequencyHeapGrowth(1.5),
-    gcDynamicHeapGrowth(false),
-    gcDynamicMarkSlice(false),
-    gcDecommitThreshold(32 * 1024 * 1024),
-    gcShouldCleanUpEverything(false),
-    gcGrayBitsValid(false),
-    gcIsNeeded(0),
-    gcStats(thisFromCtor()),
-    gcNumber(0),
-    gcStartNumber(0),
-    gcIsFull(false),
-    gcTriggerReason(JS::gcreason::NO_REASON),
-    gcStrictCompartmentChecking(false),
-#ifdef DEBUG
-    gcDisableStrictProxyCheckingCount(0),
-#endif
-    gcIncrementalState(gc::NO_INCREMENTAL),
-    gcLastMarkSlice(false),
-    gcSweepOnBackgroundThread(false),
-    gcFoundBlackGrayEdges(false),
-    gcSweepingZones(nullptr),
-    gcZoneGroupIndex(0),
-    gcZoneGroups(nullptr),
-    gcCurrentZoneGroup(nullptr),
-    gcSweepPhase(0),
-    gcSweepZone(nullptr),
-    gcSweepKindIndex(0),
-    gcAbortSweepAfterCurrentGroup(false),
-    gcArenasAllocatedDuringSweep(nullptr),
-#ifdef DEBUG
-    gcMarkingValidator(nullptr),
-#endif
-    gcInterFrameGC(0),
-    gcSliceBudget(SliceBudget::Unlimited),
-    gcIncrementalEnabled(true),
-    gcGenerationalDisabled(0),
-    gcManipulatingDeadZones(false),
-    gcObjectsMarkedInDeadZones(0),
-    gcPoke(false),
-    heapState(Idle),
-#ifdef JSGC_GENERATIONAL
-    gcNursery(thisFromCtor()),
-    gcStoreBuffer(thisFromCtor(), gcNursery),
-#endif
-#ifdef JS_GC_ZEAL
-    gcZeal_(0),
-    gcZealFrequency(0),
-    gcNextScheduled(0),
-    gcDeterministicOnly(false),
-    gcIncrementalLimit(0),
-#endif
-    gcValidate(true),
-    gcFullCompartmentChecks(false),
-    gcCallback(nullptr),
-    gcSliceCallback(nullptr),
-    gcFinalizeCallback(nullptr),
-    gcMallocBytes(0),
-    gcMallocGCTriggered(false),
 #ifdef JS_ARM_SIMULATOR
     simulatorRuntime_(nullptr),
 #endif
-    scriptAndCountsVector(nullptr),
     NaNValue(DoubleNaNValue()),
     negativeInfinityValue(DoubleValue(NegativeInfinity<double>())),
     positiveInfinityValue(DoubleValue(PositiveInfinity<double>())),
     emptyString(nullptr),
     debugMode(false),
     spsProfiler(thisFromCtor()),
     profilingScripts(false),
-    alwaysPreserveCode(false),
     hadOutOfMemory(false),
     haveCreatedContext(false),
     data(nullptr),
-    gcLock(nullptr),
-    gcLockOwner(nullptr),
-    gcHelperThread(thisFromCtor()),
     signalHandlersInstalled_(false),
     defaultFreeOp_(thisFromCtor(), false),
     debuggerMutations(0),
     securityCallbacks(const_cast<JSSecurityCallbacks *>(&NullSecurityCallbacks)),
     DOMcallbacks(nullptr),
     destroyPrincipals(nullptr),
     structuredCloneCallbacks(nullptr),
     telemetryCallback(nullptr),
@@ -284,19 +201,16 @@ JSRuntime::JSRuntime(JSRuntime *parentRu
     beingDestroyed_(false),
     atoms_(nullptr),
     atomsCompartment_(nullptr),
     staticStrings(nullptr),
     commonNames(nullptr),
     permanentAtoms(nullptr),
     wrapObjectCallbacks(&DefaultWrapObjectCallbacks),
     preserveWrapperCallback(nullptr),
-#ifdef DEBUG
-    noGCOrAllocationCheck(0),
-#endif
     jitSupportsFloatingPoint(false),
     ionPcScriptCache(nullptr),
     threadPool(this),
     defaultJSContextCallback(nullptr),
     ctypesActivityCallback(nullptr),
     forkJoinWarmup(0),
     ionReturnOverride_(MagicValue(JS_ARG_POISON)),
     useHelperThreads_(useHelperThreads),
@@ -348,18 +262,18 @@ JSRuntime::init(uint32_t maxbytes)
 {
 #ifdef JS_THREADSAFE
     ownerThread_ = PR_GetCurrentThread();
 
     interruptLock = PR_NewLock();
     if (!interruptLock)
         return false;
 
-    gcLock = PR_NewLock();
-    if (!gcLock)
+    gc.lock = PR_NewLock();
+    if (!gc.lock)
         return false;
 
     exclusiveAccessLock = PR_NewLock();
     if (!exclusiveAccessLock)
         return false;
 #endif
 
     if (!mainThread.init())
@@ -368,33 +282,33 @@ JSRuntime::init(uint32_t maxbytes)
     js::TlsPerThreadData.set(&mainThread);
 
     if (!threadPool.init())
         return false;
 
     if (!js_InitGC(this, maxbytes))
         return false;
 
-    if (!gcMarker.init(gcMode()))
+    if (!gc.marker.init(gcMode()))
         return false;
 
     const char *size = getenv("JSGC_MARK_STACK_LIMIT");
     if (size)
         SetMarkStackLimit(this, atoi(size));
 
     ScopedJSDeletePtr<Zone> atomsZone(new_<Zone>(this));
     if (!atomsZone)
         return false;
 
     JS::CompartmentOptions options;
     ScopedJSDeletePtr<JSCompartment> atomsCompartment(new_<JSCompartment>(atomsZone.get(), options));
     if (!atomsCompartment || !atomsCompartment->init(nullptr))
         return false;
 
-    zones.append(atomsZone.get());
+    gc.zones.append(atomsZone.get());
     atomsZone->compartments.append(atomsCompartment.get());
 
     atomsCompartment->isSystem = true;
     atomsZone->isSystem = true;
     atomsZone->setGCLastBytes(8192, GC_NORMAL);
 
     atomsZone.forget();
     this->atomsCompartment_ = atomsCompartment.forget();
@@ -519,33 +433,33 @@ JSRuntime::~JSRuntime()
 #if !EXPOSE_INTL_API
     FinishRuntimeNumberState(this);
 #endif
 
     js_FinishGC(this);
     atomsCompartment_ = nullptr;
 
 #ifdef JS_THREADSAFE
-    if (gcLock)
-        PR_DestroyLock(gcLock);
+    if (gc.lock)
+        PR_DestroyLock(gc.lock);
 #endif
 
     js_free(defaultLocale);
     js_delete(bumpAlloc_);
     js_delete(mathCache_);
 #ifdef JS_ION
     js_delete(jitRuntime_);
 #endif
     js_delete(execAlloc_);  /* Delete after jitRuntime_. */
 
     js_delete(ionPcScriptCache);
 
 #ifdef JSGC_GENERATIONAL
-    gcStoreBuffer.disable();
-    gcNursery.disable();
+    gc.storeBuffer.disable();
+    gc.nursery.disable();
 #endif
 
 #ifdef JS_ARM_SIMULATOR
     js::jit::DestroySimulatorRuntime(simulatorRuntime_);
 #endif
 
     DebugOnly<size_t> oldCount = liveRuntimesCount--;
     JS_ASSERT(oldCount > 0);
@@ -623,22 +537,22 @@ JSRuntime::addSizeOfIncludingThis(mozill
         AutoLockForInterrupt lock(this);
         if (jitRuntime()) {
             if (JSC::ExecutableAllocator *ionAlloc = jitRuntime()->ionAlloc(this))
                 ionAlloc->addSizeOfCode(&rtSizes->code);
         }
     }
 #endif
 
-    rtSizes->gc.marker += gcMarker.sizeOfExcludingThis(mallocSizeOf);
+    rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf);
 #ifdef JSGC_GENERATIONAL
-    rtSizes->gc.nurseryCommitted += gcNursery.sizeOfHeapCommitted();
-    rtSizes->gc.nurseryDecommitted += gcNursery.sizeOfHeapDecommitted();
-    rtSizes->gc.nurseryHugeSlots += gcNursery.sizeOfHugeSlots(mallocSizeOf);
-    gcStoreBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc);
+    rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted();
+    rtSizes->gc.nurseryDecommitted += gc.nursery.sizeOfHeapDecommitted();
+    rtSizes->gc.nurseryHugeSlots += gc.nursery.sizeOfHugeSlots(mallocSizeOf);
+    gc.storeBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc);
 #endif
 }
 
 static bool
 SignalBasedTriggersDisabled()
 {
   // Don't bother trying to cache the getenv lookup; this should be called
   // infrequently.
@@ -780,47 +694,47 @@ JSRuntime::triggerActivityCallback(bool 
 
 void
 JSRuntime::setGCMaxMallocBytes(size_t value)
 {
     /*
      * For compatibility treat any value that exceeds PTRDIFF_T_MAX to
      * mean that value.
      */
-    gcMaxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
+    gc.maxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
     resetGCMallocBytes();
     for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next())
         zone->setGCMaxMallocBytes(value);
 }
 
 void
 JSRuntime::updateMallocCounter(size_t nbytes)
 {
     updateMallocCounter(nullptr, nbytes);
 }
 
 void
 JSRuntime::updateMallocCounter(JS::Zone *zone, size_t nbytes)
 {
     /* We tolerate any thread races when updating gcMallocBytes. */
-    gcMallocBytes -= ptrdiff_t(nbytes);
-    if (MOZ_UNLIKELY(gcMallocBytes <= 0))
+    gc.mallocBytes -= ptrdiff_t(nbytes);
+    if (MOZ_UNLIKELY(gc.mallocBytes <= 0))
         onTooMuchMalloc();
     else if (zone)
         zone->updateMallocCounter(nbytes);
 }
 
 JS_FRIEND_API(void)
 JSRuntime::onTooMuchMalloc()
 {
     if (!CurrentThreadCanAccessRuntime(this))
         return;
 
-    if (!gcMallocGCTriggered)
-        gcMallocGCTriggered = TriggerGC(this, JS::gcreason::TOO_MUCH_MALLOC);
+    if (!gc.mallocGCTriggered)
+        gc.mallocGCTriggered = TriggerGC(this, JS::gcreason::TOO_MUCH_MALLOC);
 }
 
 JS_FRIEND_API(void *)
 JSRuntime::onOutOfMemory(void *p, size_t nbytes)
 {
     return onOutOfMemory(p, nbytes, nullptr);
 }
 
@@ -830,17 +744,17 @@ JSRuntime::onOutOfMemory(void *p, size_t
     if (isHeapBusy())
         return nullptr;
 
     /*
      * Retry when we are done with the background sweeping and have stopped
      * all the allocations and released the empty GC chunks.
      */
     JS::ShrinkGCBuffers(this);
-    gcHelperThread.waitBackgroundSweepOrAllocEnd();
+    gc.helperThread.waitBackgroundSweepOrAllocEnd();
     if (!p)
         p = js_malloc(nbytes);
     else if (p == reinterpret_cast<void *>(1))
         p = js_calloc(nbytes);
     else
       p = js_realloc(p, nbytes);
     if (p)
         return p;
@@ -925,17 +839,17 @@ JSRuntime::assertCanLock(RuntimeLock whi
     switch (which) {
       case ExclusiveAccessLock:
         JS_ASSERT(exclusiveAccessOwner != PR_GetCurrentThread());
       case WorkerThreadStateLock:
         JS_ASSERT(!WorkerThreadState().isLocked());
       case InterruptLock:
         JS_ASSERT(!currentThreadOwnsInterruptLock());
       case GCLock:
-        JS_ASSERT(gcLockOwner != PR_GetCurrentThread());
+        JS_ASSERT(gc.lockOwner != PR_GetCurrentThread());
         break;
       default:
         MOZ_CRASH();
     }
 #endif // JS_THREADSAFE
 }
 
 void
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -14,31 +14,24 @@
 #include "mozilla/PodOperations.h"
 #include "mozilla/Scoped.h"
 #include "mozilla/ThreadLocal.h"
 
 #include <setjmp.h>
 
 #include "jsatom.h"
 #include "jsclist.h"
-#include "jsgc.h"
 #ifdef DEBUG
 # include "jsproxy.h"
 #endif
 #include "jsscript.h"
 
 #include "ds/FixedSizeHash.h"
 #include "frontend/ParseMaps.h"
-#ifdef JSGC_GENERATIONAL
-# include "gc/Nursery.h"
-#endif
-#include "gc/Statistics.h"
-#ifdef JSGC_GENERATIONAL
-# include "gc/StoreBuffer.h"
-#endif
+#include "gc/GCRuntime.h"
 #include "gc/Tracer.h"
 #ifdef XP_MACOSX
 # include "jit/AsmJSSignalHandlers.h"
 #endif
 #include "js/HashTable.h"
 #include "js/Vector.h"
 #include "vm/CommonPropertyNames.h"
 #include "vm/DateTime.h"
@@ -130,58 +123,16 @@ struct ScopeCoordinateNameCache {
 
     Shape *shape;
     Map map;
 
     ScopeCoordinateNameCache() : shape(nullptr) {}
     void purge();
 };
 
-typedef Vector<ScriptAndCounts, 0, SystemAllocPolicy> ScriptAndCountsVector;
-
-struct ConservativeGCData
-{
-    /*
-     * The GC scans conservatively between ThreadData::nativeStackBase and
-     * nativeStackTop unless the latter is nullptr.
-     */
-    uintptr_t           *nativeStackTop;
-
-    union {
-        jmp_buf         jmpbuf;
-        uintptr_t       words[JS_HOWMANY(sizeof(jmp_buf), sizeof(uintptr_t))];
-    } registerSnapshot;
-
-    ConservativeGCData() {
-        mozilla::PodZero(this);
-    }
-
-    ~ConservativeGCData() {
-#ifdef JS_THREADSAFE
-        /*
-         * The conservative GC scanner should be disabled when the thread leaves
-         * the last request.
-         */
-        JS_ASSERT(!hasStackToScan());
-#endif
-    }
-
-    MOZ_NEVER_INLINE void recordStackTop();
-
-#ifdef JS_THREADSAFE
-    void updateForRequestEnd() {
-        nativeStackTop = nullptr;
-    }
-#endif
-
-    bool hasStackToScan() const {
-        return !!nativeStackTop;
-    }
-};
-
 struct EvalCacheEntry
 {
     JSScript *script;
     JSScript *callerScript;
     jsbytecode *pc;
 };
 
 struct EvalCacheLookup
@@ -650,22 +601,16 @@ class PerThreadData : public PerThreadDa
 #ifdef JS_ARM_SIMULATOR
     js::jit::Simulator *simulator() const;
     void setSimulator(js::jit::Simulator *sim);
     js::jit::SimulatorRuntime *simulatorRuntime() const;
     uintptr_t *addressOfSimulatorStackLimit();
 #endif
 };
 
-namespace gc {
-class MarkingValidator;
-} // namespace gc
-
-typedef Vector<JS::Zone *, 4, SystemAllocPolicy> ZoneVector;
-
 class AutoLockForExclusiveAccess;
 
 void RecomputeStackLimit(JSRuntime *rt, StackKind kind);
 
 } // namespace js
 
 struct JSRuntime : public JS::shadow::Runtime,
                    public js::MallocProvider<JSRuntime>
@@ -801,22 +746,16 @@ struct JSRuntime : public JS::shadow::Ru
     bool exclusiveThreadsPresent() const {
 #ifdef JS_THREADSAFE
         return numExclusiveThreads > 0;
 #else
         return false;
 #endif
     }
 
-    /* Embedders can use this zone however they wish. */
-    JS::Zone            *systemZone;
-
-    /* List of compartments and zones (protected by the GC lock). */
-    js::ZoneVector      zones;
-
     /* How many compartments there are across all zones. */
     size_t              numCompartments;
 
     /* Locale-specific callbacks for string conversion. */
     JSLocaleCallbacks *localeCallbacks;
 
     /* Default locale for Internationalization API */
     char *defaultLocale;
@@ -970,343 +909,91 @@ struct JSRuntime : public JS::shadow::Ru
      * advance which JSContext should be passed to JSAPI calls. If this is set
      * to a non-null value, the assertSameCompartment machinery does double-
      * duty (in debug builds) to verify that it matches the cx being used.
      */
     JSContext          *activeContext;
 #endif
 
     /* Garbage collector state, used by jsgc.c. */
+    js::gc::GCRuntime   gc;
 
     /* Garbase collector state has been sucessfully initialized. */
     bool                gcInitialized;
 
-    /*
-     * Set of all GC chunks with at least one allocated thing. The
-     * conservative GC uses it to quickly check if a possible GC thing points
-     * into an allocated chunk.
-     */
-    js::GCChunkSet      gcChunkSet;
-
-    /*
-     * Doubly-linked lists of chunks from user and system compartments. The GC
-     * allocates its arenas from the corresponding list and when all arenas
-     * in the list head are taken, then the chunk is removed from the list.
-     * During the GC when all arenas in a chunk become free, that chunk is
-     * removed from the list and scheduled for release.
-     */
-    js::gc::Chunk       *gcSystemAvailableChunkListHead;
-    js::gc::Chunk       *gcUserAvailableChunkListHead;
-    js::gc::ChunkPool   gcChunkPool;
-
-    js::RootedValueMap  gcRootsHash;
-
-    /* This is updated by both the main and GC helper threads. */
-    mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
-
-    size_t              gcMaxBytes;
-    size_t              gcMaxMallocBytes;
-
-    /*
-     * Number of the committed arenas in all GC chunks including empty chunks.
-     */
-    mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcNumArenasFreeCommitted;
-    js::GCMarker        gcMarker;
-    void                *gcVerifyPreData;
-    void                *gcVerifyPostData;
-    bool                gcChunkAllocationSinceLastGC;
-    int64_t             gcNextFullGCTime;
-    int64_t             gcLastGCTime;
-    int64_t             gcJitReleaseTime;
-  private:
-    JSGCMode            gcMode_;
-
-  public:
-    JSGCMode gcMode() const { return gcMode_; }
+    JSGCMode gcMode() const { return gc.mode; }
     void setGCMode(JSGCMode mode) {
-        gcMode_ = mode;
-        gcMarker.setGCMode(mode);
+        gc.mode = mode;
+        gc.marker.setGCMode(mode);
     }
 
-    size_t              gcAllocationThreshold;
-    bool                gcHighFrequencyGC;
-    uint64_t            gcHighFrequencyTimeThreshold;
-    uint64_t            gcHighFrequencyLowLimitBytes;
-    uint64_t            gcHighFrequencyHighLimitBytes;
-    double              gcHighFrequencyHeapGrowthMax;
-    double              gcHighFrequencyHeapGrowthMin;
-    double              gcLowFrequencyHeapGrowth;
-    bool                gcDynamicHeapGrowth;
-    bool                gcDynamicMarkSlice;
-    uint64_t            gcDecommitThreshold;
-
-    /* During shutdown, the GC needs to clean up every possible object. */
-    bool                gcShouldCleanUpEverything;
-
-    /*
-     * The gray bits can become invalid if UnmarkGray overflows the stack. A
-     * full GC will reset this bit, since it fills in all the gray bits.
-     */
-    bool                gcGrayBitsValid;
-
-    /*
-     * These flags must be kept separate so that a thread requesting a
-     * compartment GC doesn't cancel another thread's concurrent request for a
-     * full GC.
-     */
-    volatile uintptr_t  gcIsNeeded;
-
-    js::gcstats::Statistics gcStats;
-
-    /* Incremented on every GC slice. */
-    uint64_t            gcNumber;
-
-    /* The gcNumber at the time of the most recent GC's first slice. */
-    uint64_t            gcStartNumber;
-
-    /* Whether the currently running GC can finish in multiple slices. */
-    bool                gcIsIncremental;
-
-    /* Whether all compartments are being collected in first GC slice. */
-    bool                gcIsFull;
-
-    /* The reason that an interrupt-triggered GC should be called. */
-    JS::gcreason::Reason gcTriggerReason;
-
-    /*
-     * If this is true, all marked objects must belong to a compartment being
-     * GCed. This is used to look for compartment bugs.
-     */
-    bool                gcStrictCompartmentChecking;
-
-#ifdef DEBUG
-    /*
-     * If this is 0, all cross-compartment proxies must be registered in the
-     * wrapper map. This checking must be disabled temporarily while creating
-     * new wrappers. When non-zero, this records the recursion depth of wrapper
-     * creation.
-     */
-    uintptr_t           gcDisableStrictProxyCheckingCount;
-#else
-    uintptr_t           unused1;
-#endif
-
-    /*
-     * The current incremental GC phase. This is also used internally in
-     * non-incremental GC.
-     */
-    js::gc::State       gcIncrementalState;
-
-    /* Indicates that the last incremental slice exhausted the mark stack. */
-    bool                gcLastMarkSlice;
-
-    /* Whether any sweeping will take place in the separate GC helper thread. */
-    bool                gcSweepOnBackgroundThread;
-
-    /* Whether any black->gray edges were found during marking. */
-    bool                gcFoundBlackGrayEdges;
-
-    /* List head of zones to be swept in the background. */
-    JS::Zone            *gcSweepingZones;
-
-    /* Index of current zone group (for stats). */
-    unsigned            gcZoneGroupIndex;
-
-    /*
-     * Incremental sweep state.
-     */
-    JS::Zone            *gcZoneGroups;
-    JS::Zone            *gcCurrentZoneGroup;
-    int                 gcSweepPhase;
-    JS::Zone            *gcSweepZone;
-    int                 gcSweepKindIndex;
-    bool                gcAbortSweepAfterCurrentGroup;
-
-    /*
-     * List head of arenas allocated during the sweep phase.
-     */
-    js::gc::ArenaHeader *gcArenasAllocatedDuringSweep;
-
-#ifdef DEBUG
-    js::gc::MarkingValidator *gcMarkingValidator;
-#endif
-
-    /*
-     * Indicates that a GC slice has taken place in the middle of an animation
-     * frame, rather than at the beginning. In this case, the next slice will be
-     * delayed so that we don't get back-to-back slices.
-     */
-    volatile uintptr_t  gcInterFrameGC;
-
-    /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */
-    int64_t             gcSliceBudget;
-
-    /*
-     * We disable incremental GC if we encounter a js::Class with a trace hook
-     * that does not implement write barriers.
-     */
-    bool                gcIncrementalEnabled;
-
-    /*
-     * GGC can be enabled from the command line while testing.
-     */
-    unsigned            gcGenerationalDisabled;
-
-    /*
-     * This is true if we are in the middle of a brain transplant (e.g.,
-     * JS_TransplantObject) or some other operation that can manipulate
-     * dead zones.
-     */
-    bool                gcManipulatingDeadZones;
-
-    /*
-     * This field is incremented each time we mark an object inside a
-     * zone with no incoming cross-compartment pointers. Typically if
-     * this happens it signals that an incremental GC is marking too much
-     * stuff. At various times we check this counter and, if it has changed, we
-     * run an immediate, non-incremental GC to clean up the dead
-     * zones. This should happen very rarely.
-     */
-    unsigned            gcObjectsMarkedInDeadZones;
-
-    bool                gcPoke;
-
-    volatile js::HeapState heapState;
-
-    bool isHeapBusy() { return heapState != js::Idle; }
-    bool isHeapMajorCollecting() { return heapState == js::MajorCollecting; }
-    bool isHeapMinorCollecting() { return heapState == js::MinorCollecting; }
+    bool isHeapBusy() { return gc.heapState != js::Idle; }
+    bool isHeapMajorCollecting() { return gc.heapState == js::MajorCollecting; }
+    bool isHeapMinorCollecting() { return gc.heapState == js::MinorCollecting; }
     bool isHeapCollecting() { return isHeapMajorCollecting() || isHeapMinorCollecting(); }
 
-#ifdef JSGC_GENERATIONAL
-    js::Nursery                  gcNursery;
-    js::gc::StoreBuffer          gcStoreBuffer;
-#endif
-
-    /*
-     * These options control the zealousness of the GC. The fundamental values
-     * are gcNextScheduled and gcDebugCompartmentGC. At every allocation,
-     * gcNextScheduled is decremented. When it reaches zero, we do either a
-     * full or a compartmental GC, based on gcDebugCompartmentGC.
-     *
-     * At this point, if gcZeal_ is one of the types that trigger periodic
-     * collection, then gcNextScheduled is reset to the value of
-     * gcZealFrequency. Otherwise, no additional GCs take place.
-     *
-     * You can control these values in several ways:
-     *   - Pass the -Z flag to the shell (see the usage info for details)
-     *   - Call gczeal() or schedulegc() from inside shell-executed JS code
-     *     (see the help for details)
-     *
-     * If gzZeal_ == 1 then we perform GCs in select places (during MaybeGC and
-     * whenever a GC poke happens). This option is mainly useful to embedders.
-     *
-     * We use gcZeal_ == 4 to enable write barrier verification. See the comment
-     * in jsgc.cpp for more information about this.
-     *
-     * gcZeal_ values from 8 to 10 periodically run different types of
-     * incremental GC.
-     */
 #ifdef JS_GC_ZEAL
-    int                 gcZeal_;
-    int                 gcZealFrequency;
-    int                 gcNextScheduled;
-    bool                gcDeterministicOnly;
-    int                 gcIncrementalLimit;
-
-    js::Vector<JSObject *, 0, js::SystemAllocPolicy> gcSelectedForMarking;
-
-    int gcZeal() { return gcZeal_; }
+    int gcZeal() { return gc.zealMode; }
 
     bool upcomingZealousGC() {
-        return gcNextScheduled == 1;
+        return gc.nextScheduled == 1;
     }
 
     bool needZealousGC() {
-        if (gcNextScheduled > 0 && --gcNextScheduled == 0) {
+        if (gc.nextScheduled > 0 && --gc.nextScheduled == 0) {
             if (gcZeal() == js::gc::ZealAllocValue ||
                 gcZeal() == js::gc::ZealGenerationalGCValue ||
                 (gcZeal() >= js::gc::ZealIncrementalRootsThenFinish &&
                  gcZeal() <= js::gc::ZealIncrementalMultipleSlices))
             {
-                gcNextScheduled = gcZealFrequency;
+                gc.nextScheduled = gc.zealFrequency;
             }
             return true;
         }
         return false;
     }
 #else
     int gcZeal() { return 0; }
     bool upcomingZealousGC() { return false; }
     bool needZealousGC() { return false; }
 #endif
 
-    bool                gcValidate;
-    bool                gcFullCompartmentChecks;
-
-    JSGCCallback        gcCallback;
-    JS::GCSliceCallback gcSliceCallback;
-    JSFinalizeCallback  gcFinalizeCallback;
-
-    void                *gcCallbackData;
+    void lockGC() {
+#ifdef JS_THREADSAFE
+        assertCanLock(js::GCLock);
+        PR_Lock(gc.lock);
+        JS_ASSERT(!gc.lockOwner);
+#ifdef DEBUG
+        gc.lockOwner = PR_GetCurrentThread();
+#endif
+#endif
+    }
 
-  private:
-    /*
-     * Malloc counter to measure memory pressure for GC scheduling. It runs
-     * from gcMaxMallocBytes down to zero.
-     */
-    mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
-
-    /*
-     * Whether a GC has been triggered as a result of gcMallocBytes falling
-     * below zero.
-     */
-    mozilla::Atomic<bool, mozilla::ReleaseAcquire> gcMallocGCTriggered;
+    void unlockGC() {
+#ifdef JS_THREADSAFE
+        JS_ASSERT(gc.lockOwner == PR_GetCurrentThread());
+        gc.lockOwner = nullptr;
+        PR_Unlock(gc.lock);
+#endif
+    }
 
 #ifdef JS_ARM_SIMULATOR
     js::jit::SimulatorRuntime *simulatorRuntime_;
 #endif
 
   public:
     void setNeedsBarrier(bool needs) {
         needsBarrier_ = needs;
     }
 
-    struct ExtraTracer {
-        JSTraceDataOp op;
-        void *data;
-
-        ExtraTracer()
-          : op(nullptr), data(nullptr)
-        {}
-        ExtraTracer(JSTraceDataOp op, void *data)
-          : op(op), data(data)
-        {}
-    };
-
 #ifdef JS_ARM_SIMULATOR
     js::jit::SimulatorRuntime *simulatorRuntime() const;
     void setSimulatorRuntime(js::jit::SimulatorRuntime *srt);
 #endif
 
-    /*
-     * The trace operations to trace embedding-specific GC roots. One is for
-     * tracing through black roots and the other is for tracing through gray
-     * roots. The black/gray distinction is only relevant to the cycle
-     * collector.
-     */
-    typedef js::Vector<ExtraTracer, 4, js::SystemAllocPolicy> ExtraTracerVector;
-    ExtraTracerVector   gcBlackRootTracers;
-    ExtraTracer         gcGrayRootTracer;
-
-    js::gc::SystemPageAllocator pageAllocator;
-
-    /* Strong references on scripts held for PCCount profiling API. */
-    js::ScriptAndCountsVector *scriptAndCountsVector;
-
     /* Well-known numbers held for use by this runtime's contexts. */
     const js::Value     NaNValue;
     const js::Value     negativeInfinityValue;
     const js::Value     positiveInfinityValue;
 
     js::PropertyName    *emptyString;
 
     /* List of active contexts sharing this runtime. */
@@ -1325,19 +1012,16 @@ struct JSRuntime : public JS::shadow::Ru
     bool                debugMode;
 
     /* SPS profiling metadata */
     js::SPSProfiler     spsProfiler;
 
     /* If true, new scripts must be created with PC counter information. */
     bool                profilingScripts;
 
-    /* Always preserve JIT code during GCs, for testing. */
-    bool                alwaysPreserveCode;
-
     /* Had an out-of-memory error which did not populate an exception. */
     bool                hadOutOfMemory;
 
     /* A context has been created on this runtime. */
     bool                haveCreatedContext;
 
     /* Linked list of all Debugger objects in the runtime. */
     mozilla::LinkedList<js::Debugger> debuggerList;
@@ -1347,43 +1031,16 @@ struct JSRuntime : public JS::shadow::Ru
      * onNewGlobalObject handler methods established.
      */
     JSCList             onNewGlobalObjectWatchers;
 
     /* Client opaque pointers */
     void                *data;
 
   private:
-    /* Synchronize GC heap access between main thread and GCHelperThread. */
-    PRLock *gcLock;
-    mozilla::DebugOnly<PRThread *> gcLockOwner;
-
-    friend class js::GCHelperThread;
-  public:
-
-    void lockGC() {
-#ifdef JS_THREADSAFE
-        assertCanLock(js::GCLock);
-        PR_Lock(gcLock);
-        JS_ASSERT(!gcLockOwner);
-#ifdef DEBUG
-        gcLockOwner = PR_GetCurrentThread();
-#endif
-#endif
-    }
-
-    void unlockGC() {
-#ifdef JS_THREADSAFE
-        JS_ASSERT(gcLockOwner == PR_GetCurrentThread());
-        gcLockOwner = nullptr;
-        PR_Unlock(gcLock);
-#endif
-    }
-
-    js::GCHelperThread  gcHelperThread;
 
 #if defined(XP_MACOSX) && defined(JS_ION)
     js::AsmJSMachExceptionHandler asmJSMachExceptionHandler;
 #endif
 
     // Whether asm.js signal handlers have been installed and can be used for
     // performing interrupt checks in loops.
   private:
@@ -1446,18 +1103,16 @@ struct JSRuntime : public JS::shadow::Ru
     js::NewObjectCache  newObjectCache;
     js::NativeIterCache nativeIterCache;
     js::SourceDataCache sourceDataCache;
     js::EvalCache       evalCache;
     js::LazyScriptCache lazyScriptCache;
 
     js::DateTimeInfo    dateTimeInfo;
 
-    js::ConservativeGCData conservativeGC;
-
     // Pool of maps used during parse/emit. This may be modified by threads
     // with an ExclusiveContext and requires a lock. Active compilations
     // prevent the pool from being purged during GCs.
   private:
     js::frontend::ParseMapPool parseMapPool_;
     unsigned activeCompilations_;
   public:
     js::frontend::ParseMapPool &parseMapPool() {
@@ -1567,20 +1222,16 @@ struct JSRuntime : public JS::shadow::Ru
   private:
     js::ScriptDataTable scriptDataTable_;
   public:
     js::ScriptDataTable &scriptDataTable() {
         JS_ASSERT(currentThreadHasExclusiveAccess());
         return scriptDataTable_;
     }
 
-#ifdef DEBUG
-    size_t              noGCOrAllocationCheck;
-#endif
-
     bool                jitSupportsFloatingPoint;
 
     // Used to reset stack limit after a signaled interrupt (i.e. jitStackLimit_ = -1)
     // has been noticed by Ion/Baseline.
     void resetJitStackLimit();
 
     // Cache for jit::GetPcScript().
     js::jit::PcScriptCache *ionPcScriptCache;
@@ -1639,35 +1290,35 @@ struct JSRuntime : public JS::shadow::Ru
 
     bool init(uint32_t maxbytes);
 
     JSRuntime *thisFromCtor() { return this; }
 
     void setGCMaxMallocBytes(size_t value);
 
     void resetGCMallocBytes() {
-        gcMallocBytes = ptrdiff_t(gcMaxMallocBytes);
-        gcMallocGCTriggered = false;
+        gc.mallocBytes = ptrdiff_t(gc.maxMallocBytes);
+        gc.mallocGCTriggered = false;
     }
 
     /*
      * Call this after allocating memory held by GC things, to update memory
      * pressure counters or report the OOM error if necessary. If oomError and
      * cx is not null the function also reports OOM error.
      *
      * The function must be called outside the GC lock and in case of OOM error
      * the caller must ensure that no deadlock possible during OOM reporting.
      */
     void updateMallocCounter(size_t nbytes);
     void updateMallocCounter(JS::Zone *zone, size_t nbytes);
 
     void reportAllocationOverflow() { js_ReportAllocationOverflow(nullptr); }
 
     bool isTooMuchMalloc() const {
-        return gcMallocBytes <= 0;
+        return gc.mallocBytes <= 0;
     }
 
     /*
      * The function must be called outside the GC lock.
      */
     JS_FRIEND_API(void) onTooMuchMalloc();
 
     /*
@@ -1844,17 +1495,17 @@ VersionIsKnown(JSVersion version)
 {
     return VersionNumber(version) != JSVERSION_UNKNOWN;
 }
 
 inline void
 FreeOp::free_(void *p)
 {
     if (shouldFreeLater()) {
-        runtime()->gcHelperThread.freeLater(p);
+        runtime()->gc.helperThread.freeLater(p);
         return;
     }
     js_free(p);
 }
 
 class AutoLockGC
 {
   public:
--- a/js/src/vm/ScopeObject.cpp
+++ b/js/src/vm/ScopeObject.cpp
@@ -1576,17 +1576,17 @@ DebugScopes::proxiedScopesPostWriteBarri
      */
     ObjectWeakMap::Base *baseHashMap = static_cast<ObjectWeakMap::Base *>(map);
 
     typedef HashMap<JSObject *, JSObject *> UnbarrieredMap;
     UnbarrieredMap *unbarrieredMap = reinterpret_cast<UnbarrieredMap *>(baseHashMap);
 
     typedef gc::HashKeyRef<UnbarrieredMap, JSObject *> Ref;
     if (key && IsInsideNursery(rt, key))
-        rt->gcStoreBuffer.putGeneric(Ref(unbarrieredMap, key.get()));
+        rt->gc.storeBuffer.putGeneric(Ref(unbarrieredMap, key.get()));
 #endif
 }
 
 #ifdef JSGC_GENERATIONAL
 class DebugScopes::MissingScopesRef : public gc::BufferableRef
 {
     MissingScopeMap *map;
     ScopeIterKey key;
@@ -1607,33 +1607,33 @@ class DebugScopes::MissingScopesRef : pu
 #endif
 
 /* static */ MOZ_ALWAYS_INLINE void
 DebugScopes::missingScopesPostWriteBarrier(JSRuntime *rt, MissingScopeMap *map,
                                            const ScopeIterKey &key)
 {
 #ifdef JSGC_GENERATIONAL
     if (key.enclosingScope() && IsInsideNursery(rt, key.enclosingScope()))
-        rt->gcStoreBuffer.putGeneric(MissingScopesRef(map, key));
+        rt->gc.storeBuffer.putGeneric(MissingScopesRef(map, key));
 #endif
 }
 
 /* static */ MOZ_ALWAYS_INLINE void
 DebugScopes::liveScopesPostWriteBarrier(JSRuntime *rt, LiveScopeMap *map, ScopeObject *key)
 {
 #ifdef JSGC_GENERATIONAL
     // As above.  Otherwise, barriers could fire during GC when moving the
     // value.
     typedef HashMap<ScopeObject *,
                     ScopeIterKey,
                     DefaultHasher<ScopeObject *>,
                     RuntimeAllocPolicy> UnbarrieredLiveScopeMap;
     typedef gc::HashKeyRef<UnbarrieredLiveScopeMap, ScopeObject *> Ref;
     if (key && IsInsideNursery(rt, key))
-        rt->gcStoreBuffer.putGeneric(Ref(reinterpret_cast<UnbarrieredLiveScopeMap *>(map), key));
+        rt->gc.storeBuffer.putGeneric(Ref(reinterpret_cast<UnbarrieredLiveScopeMap *>(map), key));
 #endif
 }
 
 DebugScopes::DebugScopes(JSContext *cx)
  : proxiedScopes(cx),
    missingScopes(cx->runtime()),
    liveScopes(cx->runtime())
 {}
--- a/js/src/vm/Shape.cpp
+++ b/js/src/vm/Shape.cpp
@@ -1521,17 +1521,17 @@ BaseShape::assertConsistency()
         JS_ASSERT(getObjectFlags() == unowned->getObjectFlags());
     }
 #endif
 }
 
 void
 JSCompartment::sweepBaseShapeTable()
 {
-    gcstats::AutoPhase ap(runtimeFromMainThread()->gcStats,
+    gcstats::AutoPhase ap(runtimeFromMainThread()->gc.stats,
                           gcstats::PHASE_SWEEP_TABLES_BASE_SHAPE);
 
     if (baseShapes.initialized()) {
         for (BaseShapeSet::Enum e(baseShapes); !e.empty(); e.popFront()) {
             UnownedBaseShape *base = e.front();
             if (IsBaseShapeAboutToBeFinalized(&base))
                 e.removeFront();
         }
@@ -1731,17 +1731,17 @@ EmptyShape::getInitialShape(ExclusiveCon
 #ifdef JSGC_GENERATIONAL
     if (cx->hasNursery()) {
         if ((protoRoot.isObject() && cx->nursery().isInside(protoRoot.toObject())) ||
             cx->nursery().isInside(parentRoot.get()) ||
             cx->nursery().isInside(metadataRoot.get()))
         {
             InitialShapeSetRef ref(
                 &table, clasp, protoRoot, parentRoot, metadataRoot, nfixed, objectFlags);
-            cx->asJSContext()->runtime()->gcStoreBuffer.putGeneric(ref);
+            cx->asJSContext()->runtime()->gc.storeBuffer.putGeneric(ref);
         }
     }
 #endif
 
     return shape;
 }
 
 /* static */ Shape *
@@ -1809,17 +1809,17 @@ EmptyShape::insertInitialShape(Exclusive
         JSContext *ncx = cx->asJSContext();
         ncx->runtime()->newObjectCache.invalidateEntriesForShape(ncx, shape, proto);
     }
 }
 
 void
 JSCompartment::sweepInitialShapeTable()
 {
-    gcstats::AutoPhase ap(runtimeFromMainThread()->gcStats,
+    gcstats::AutoPhase ap(runtimeFromMainThread()->gc.stats,
                           gcstats::PHASE_SWEEP_TABLES_INITIAL_SHAPE);
 
     if (initialShapes.initialized()) {
         for (InitialShapeSet::Enum e(initialShapes); !e.empty(); e.popFront()) {
             const InitialShapeEntry &entry = e.front();
             Shape *shape = entry.shape;
             JSObject *proto = entry.proto.raw();
             if (IsShapeAboutToBeFinalized(&shape) || (entry.proto.isObject() && IsObjectAboutToBeFinalized(&proto))) {