Bug 1325050 - Structure reorganization for multithreaded runtimes, r=jandem,jonco,h4writer,luke,lhansen,nbp.
authorBrian Hackett <bhackett1024@gmail.com>
Thu, 02 Feb 2017 12:12:43 -0700
changeset 381337 d2758f635f72f779f712bf9c6e838868ed53c9f7
parent 381336 042e3d9aeca2c8c1e6b0c82fbd9042a59c69abcb
child 381338 4d40791b6d0ae318d961bbc02f028d72aa21c90c
push id1468
push userasasaki@mozilla.com
push dateMon, 05 Jun 2017 19:31:07 +0000
treeherdermozilla-release@0641fc6ee9d1 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem, jonco, h4writer, luke, lhansen, nbp
bugs1325050
milestone54.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1325050 - Structure reorganization for multithreaded runtimes, r=jandem,jonco,h4writer,luke,lhansen,nbp.
js/public/CharacterEncoding.h
js/public/Debug.h
js/public/GCAPI.h
js/public/HeapAPI.h
js/public/RootingAPI.h
js/src/builtin/AtomicsObject.cpp
js/src/builtin/AtomicsObject.h
js/src/builtin/DataViewObject.cpp
js/src/builtin/Eval.cpp
js/src/builtin/Intl.cpp
js/src/builtin/MapObject.cpp
js/src/builtin/ModuleObject.cpp
js/src/builtin/ModuleObject.h
js/src/builtin/Object.cpp
js/src/builtin/Profilers.cpp
js/src/builtin/Promise.cpp
js/src/builtin/TestingFunctions.cpp
js/src/builtin/TypedObject.cpp
js/src/frontend/BytecodeCompiler.cpp
js/src/frontend/BytecodeCompiler.h
js/src/frontend/BytecodeEmitter.cpp
js/src/frontend/BytecodeEmitter.h
js/src/frontend/FoldConstants.cpp
js/src/frontend/FoldConstants.h
js/src/frontend/FullParseHandler.h
js/src/frontend/NameCollections.h
js/src/frontend/NameFunctions.cpp
js/src/frontend/NameFunctions.h
js/src/frontend/ParseNode.h
js/src/frontend/Parser.cpp
js/src/frontend/Parser.h
js/src/frontend/SharedContext.h
js/src/frontend/SyntaxParseHandler.h
js/src/frontend/TokenStream.cpp
js/src/frontend/TokenStream.h
js/src/gc/Allocator.cpp
js/src/gc/Allocator.h
js/src/gc/AtomMarking.cpp
js/src/gc/AtomMarking.h
js/src/gc/Barrier.cpp
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/gc/Iteration.cpp
js/src/gc/Marking.cpp
js/src/gc/Marking.h
js/src/gc/MemoryProfiler.cpp
js/src/gc/Nursery-inl.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/RootMarking.cpp
js/src/gc/Statistics.cpp
js/src/gc/StoreBuffer.cpp
js/src/gc/StoreBuffer.h
js/src/gc/Verifier.cpp
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/gc/ZoneGroup.cpp
js/src/gc/ZoneGroup.h
js/src/gdb/mozilla/unwind.py
js/src/irregexp/NativeRegExpMacroAssembler.cpp
js/src/irregexp/NativeRegExpMacroAssembler.h
js/src/irregexp/RegExpEngine.cpp
js/src/irregexp/RegExpInterpreter.cpp
js/src/irregexp/RegExpStack.cpp
js/src/irregexp/RegExpStack.h
js/src/jit/Bailouts.cpp
js/src/jit/BaselineBailouts.cpp
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/BaselineDebugModeOSR.h
js/src/jit/BaselineFrame.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineJIT.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/CompileWrappers.cpp
js/src/jit/CompileWrappers.h
js/src/jit/ExecutableAllocator.cpp
js/src/jit/Ion.cpp
js/src/jit/Ion.h
js/src/jit/IonBuilder.cpp
js/src/jit/IonBuilder.h
js/src/jit/IonCacheIRCompiler.cpp
js/src/jit/JitCompartment.h
js/src/jit/JitFrames.cpp
js/src/jit/JitcodeMap.cpp
js/src/jit/Linker.cpp
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/SharedIC.cpp
js/src/jit/SharedIC.h
js/src/jit/VMFunctions.cpp
js/src/jit/VMFunctions.h
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/Simulator-arm.cpp
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/arm64/Assembler-arm64.cpp
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
js/src/jit/arm64/vixl/Simulator-vixl.h
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips32/Assembler-mips32.cpp
js/src/jit/mips32/Simulator-mips32.cpp
js/src/jit/mips32/Simulator-mips32.h
js/src/jit/mips32/Trampoline-mips32.cpp
js/src/jit/mips64/Assembler-mips64.cpp
js/src/jit/mips64/Simulator-mips64.cpp
js/src/jit/mips64/Simulator-mips64.h
js/src/jit/mips64/Trampoline-mips64.cpp
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/Trampoline-x86.cpp
js/src/jsalloc.cpp
js/src/jsalloc.h
js/src/jsapi-tests/testAssemblerBuffer.cpp
js/src/jsapi-tests/testFindSCCs.cpp
js/src/jsapi-tests/testGCCellPtr.cpp
js/src/jsapi-tests/testGCChunkPool.cpp
js/src/jsapi-tests/testGCExactRooting.cpp
js/src/jsapi-tests/testGCFinalizeCallback.cpp
js/src/jsapi-tests/testGCGrayMarking.cpp
js/src/jsapi-tests/testGCHeapPostBarriers.cpp
js/src/jsapi-tests/testGCMarking.cpp
js/src/jsapi-tests/testGCWeakRef.cpp
js/src/jsapi-tests/testIsInsideNursery.cpp
js/src/jsapi-tests/testJitMoveEmitterCycles.cpp
js/src/jsapi-tests/testPreserveJitCode.cpp
js/src/jsapi-tests/testProfileStrings.cpp
js/src/jsapi-tests/testWeakMap.cpp
js/src/jsapi.cpp
js/src/jsapi.h
js/src/jsarray.cpp
js/src/jsarray.h
js/src/jsatom.cpp
js/src/jsatom.h
js/src/jsatominlines.h
js/src/jsbool.cpp
js/src/jsbool.h
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jscntxtinlines.h
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jscompartmentinlines.h
js/src/jsexn.cpp
js/src/jsfriendapi.cpp
js/src/jsfriendapi.h
js/src/jsfun.cpp
js/src/jsfun.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jshashutil.h
js/src/jsiter.cpp
js/src/jsmath.cpp
js/src/jsnum.cpp
js/src/jsnum.h
js/src/jsobj.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/jsopcode.cpp
js/src/jspropertytree.cpp
js/src/jspropertytree.h
js/src/jspubtd.h
js/src/jsscript.cpp
js/src/jsscript.h
js/src/jsstr.cpp
js/src/jsstr.h
js/src/jswatchpoint.cpp
js/src/jsweakmap.cpp
js/src/jsweakmap.h
js/src/moz.build
js/src/proxy/CrossCompartmentWrapper.cpp
js/src/proxy/Proxy.cpp
js/src/proxy/Wrapper.cpp
js/src/shell/js.cpp
js/src/threading/ProtectedData.cpp
js/src/threading/ProtectedData.h
js/src/vm/ArgumentsObject.cpp
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayObject-inl.h
js/src/vm/ArrayObject.h
js/src/vm/Caches.cpp
js/src/vm/Caches.h
js/src/vm/CharacterEncoding.cpp
js/src/vm/CodeCoverage.cpp
js/src/vm/Debugger-inl.h
js/src/vm/Debugger.cpp
js/src/vm/Debugger.h
js/src/vm/DebuggerMemory.cpp
js/src/vm/EnvironmentObject.cpp
js/src/vm/EnvironmentObject.h
js/src/vm/ErrorObject.cpp
js/src/vm/ErrorObject.h
js/src/vm/GeckoProfiler.cpp
js/src/vm/GeneratorObject.h
js/src/vm/GlobalObject.cpp
js/src/vm/GlobalObject.h
js/src/vm/HelperThreads.cpp
js/src/vm/HelperThreads.h
js/src/vm/Initialization.cpp
js/src/vm/Interpreter-inl.h
js/src/vm/Interpreter.cpp
js/src/vm/MallocProvider.h
js/src/vm/MemoryMetrics.cpp
js/src/vm/MutexIDs.h
js/src/vm/NativeObject-inl.h
js/src/vm/NativeObject.cpp
js/src/vm/NativeObject.h
js/src/vm/ObjectGroup-inl.h
js/src/vm/ObjectGroup.cpp
js/src/vm/ObjectGroup.h
js/src/vm/Printer.cpp
js/src/vm/Printer.h
js/src/vm/Probes-inl.h
js/src/vm/Probes.h
js/src/vm/ProxyObject.cpp
js/src/vm/Realm.cpp
js/src/vm/RegExpObject.cpp
js/src/vm/RegExpObject.h
js/src/vm/RegExpStatics.cpp
js/src/vm/RegExpStatics.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/SavedStacks.cpp
js/src/vm/Scope.cpp
js/src/vm/Scope.h
js/src/vm/SelfHosting.cpp
js/src/vm/Shape-inl.h
js/src/vm/Shape.cpp
js/src/vm/Shape.h
js/src/vm/Stack-inl.h
js/src/vm/Stack.cpp
js/src/vm/Stopwatch.cpp
js/src/vm/String-inl.h
js/src/vm/String.cpp
js/src/vm/String.h
js/src/vm/StringBuffer.cpp
js/src/vm/StringBuffer.h
js/src/vm/StringObject.h
js/src/vm/StructuredClone.cpp
js/src/vm/Symbol.cpp
js/src/vm/Symbol.h
js/src/vm/TraceLogging.cpp
js/src/vm/TraceLogging.h
js/src/vm/TypeInference-inl.h
js/src/vm/TypeInference.cpp
js/src/vm/TypeInference.h
js/src/vm/TypedArrayObject.cpp
js/src/vm/UbiNode.cpp
js/src/vm/UnboxedObject-inl.h
js/src/vm/UnboxedObject.cpp
js/src/vm/UnboxedObject.h
js/src/vm/Xdr.cpp
js/src/vm/Xdr.h
js/src/wasm/AsmJS.cpp
js/src/wasm/AsmJS.h
js/src/wasm/WasmCode.cpp
js/src/wasm/WasmCompartment.cpp
js/src/wasm/WasmCompile.cpp
js/src/wasm/WasmCompile.h
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmInstance.h
js/src/wasm/WasmJS.cpp
js/src/wasm/WasmJS.h
js/src/wasm/WasmSignalHandlers.cpp
js/src/wasm/WasmSignalHandlers.h
js/src/wasm/WasmTable.cpp
js/src/wasm/WasmTypes.cpp
js/src/wasm/WasmTypes.h
--- a/js/public/CharacterEncoding.h
+++ b/js/public/CharacterEncoding.h
@@ -7,20 +7,16 @@
 #ifndef js_CharacterEncoding_h
 #define js_CharacterEncoding_h
 
 #include "mozilla/Range.h"
 
 #include "js/TypeDecls.h"
 #include "js/Utility.h"
 
-namespace js {
-class ExclusiveContext;
-} // namespace js
-
 class JSFlatString;
 
 namespace JS {
 
 /*
  * By default, all C/C++ 1-byte-per-character strings passed into the JSAPI
  * are treated as ISO/IEC 8859-1, also known as Latin-1. That is, each
  * byte is treated as a 2-byte character, and there is no way to pass in a
@@ -217,29 +213,29 @@ class ConstTwoByteChars : public mozilla
  * contains any UTF-16 extension characters, then this may give invalid Latin1
  * output. The returned string is zero terminated. The returned string or the
  * returned string's |start()| must be freed with JS_free or js_free,
  * respectively. If allocation fails, an OOM error will be set and the method
  * will return a nullptr chars (which can be tested for with the ! operator).
  * This method cannot trigger GC.
  */
 extern Latin1CharsZ
-LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx,
+LossyTwoByteCharsToNewLatin1CharsZ(JSContext* cx,
                                    const mozilla::Range<const char16_t> tbchars);
 
 inline Latin1CharsZ
-LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx, const char16_t* begin, size_t length)
+LossyTwoByteCharsToNewLatin1CharsZ(JSContext* cx, const char16_t* begin, size_t length)
 {
     const mozilla::Range<const char16_t> tbchars(begin, length);
     return JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, tbchars);
 }
 
 template <typename CharT>
 extern UTF8CharsZ
-CharsToNewUTF8CharsZ(js::ExclusiveContext* maybeCx, const mozilla::Range<CharT> chars);
+CharsToNewUTF8CharsZ(JSContext* maybeCx, const mozilla::Range<CharT> chars);
 
 uint32_t
 Utf8ToOneUcs4Char(const uint8_t* utf8Buffer, int utf8Length);
 
 /*
  * Inflate bytes in UTF-8 encoding to char16_t.
  * - On error, returns an empty TwoByteCharsZ.
  * - On success, returns a malloc'd TwoByteCharsZ, and updates |outlen| to hold
--- a/js/public/Debug.h
+++ b/js/public/Debug.h
@@ -336,17 +336,17 @@ GetDebuggeeGlobals(JSContext* cx, JSObje
 // Construct an instance of this class on the stack, providing a JSContext
 // belonging to the runtime in which execution will occur. Each time we enter
 // JavaScript --- specifically, each time we push a JavaScript stack frame that
 // has no older JS frames younger than this AutoEntryMonitor --- we will
 // call the appropriate |Entry| member function to indicate where we've begun
 // execution.
 
 class MOZ_STACK_CLASS AutoEntryMonitor {
-    JSRuntime* runtime_;
+    JSContext* cx_;
     AutoEntryMonitor* savedMonitor_;
 
   public:
     explicit AutoEntryMonitor(JSContext* cx);
     ~AutoEntryMonitor();
 
     // SpiderMonkey reports the JavaScript entry points occuring within this
     // AutoEntryMonitor's scope to the following member functions, which the
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -457,20 +457,20 @@ WasIncrementalGC(JSContext* cx);
  * Note: Generational GC is not yet enabled by default. The following class
  *       is non-functional unless SpiderMonkey was configured with
  *       --enable-gcgenerational.
  */
 
 /** Ensure that generational GC is disabled within some scope. */
 class JS_PUBLIC_API(AutoDisableGenerationalGC)
 {
-    js::gc::GCRuntime* gc;
+    JSContext* cx;
 
   public:
-    explicit AutoDisableGenerationalGC(JSRuntime* rt);
+    explicit AutoDisableGenerationalGC(JSContext* cx);
     ~AutoDisableGenerationalGC();
 };
 
 /**
  * Returns true if generational allocation and collection is currently enabled
  * on the given runtime.
  */
 extern JS_PUBLIC_API(bool)
@@ -501,23 +501,20 @@ class JS_PUBLIC_API(AutoRequireNoGC)
  * class is live. This class does not disable the static rooting hazard
  * analysis.
  *
  * This works by entering a GC unsafe region, which is checked on allocation and
  * on GC.
  */
 class JS_PUBLIC_API(AutoAssertNoGC) : public AutoRequireNoGC
 {
-    js::gc::GCRuntime* gc;
-    size_t gcNumber;
+    JSContext* cx_;
 
   public:
-    AutoAssertNoGC();
-    explicit AutoAssertNoGC(JSRuntime* rt);
-    explicit AutoAssertNoGC(JSContext* cx);
+    explicit AutoAssertNoGC(JSContext* cx = nullptr);
     ~AutoAssertNoGC();
 };
 
 /**
  * Assert if an allocation of a GC thing occurs while this class is live. This
  * class does not disable the static rooting hazard analysis.
  */
 class JS_PUBLIC_API(AutoAssertNoAlloc)
@@ -598,61 +595,61 @@ class JS_PUBLIC_API(AutoAssertGCCallback
  * heap.
  *
  * We only do the assertion checking in DEBUG builds.
  */
 #ifdef DEBUG
 class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoAssertNoGC
 {
   public:
-    AutoCheckCannotGC() : AutoAssertNoGC() {}
-    explicit AutoCheckCannotGC(JSContext* cx) : AutoAssertNoGC(cx) {}
+    explicit AutoCheckCannotGC(JSContext* cx = nullptr) : AutoAssertNoGC(cx) {}
 } JS_HAZ_GC_INVALIDATED;
 #else
 class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoRequireNoGC
 {
   public:
-    AutoCheckCannotGC() {}
-    explicit AutoCheckCannotGC(JSContext* cx) {}
+    explicit AutoCheckCannotGC(JSContext* cx = nullptr) {}
 } JS_HAZ_GC_INVALIDATED;
 #endif
 
 /**
  * Unsets the gray bit for anything reachable from |thing|. |kind| should not be
  * JS::TraceKind::Shape. |thing| should be non-null. The return value indicates
  * if anything was unmarked.
  */
 extern JS_FRIEND_API(bool)
 UnmarkGrayGCThingRecursively(GCCellPtr thing);
 
 } /* namespace JS */
 
 namespace js {
 namespace gc {
 
+extern JS_FRIEND_API(bool)
+BarriersAreAllowedOnCurrentThread();
+
 static MOZ_ALWAYS_INLINE void
 ExposeGCThingToActiveJS(JS::GCCellPtr thing)
 {
     MOZ_ASSERT(thing.kind() != JS::TraceKind::Shape);
 
     // GC things residing in the nursery cannot be gray: they have no mark bits.
     // All live objects in the nursery are moved to tenured at the beginning of
     // each GC slice, so the gray marker never sees nursery things.
     if (IsInsideNursery(thing.asCell()))
         return;
 
     // There's nothing to do for permanent GC things that might be owned by
     // another runtime.
     if (thing.mayBeOwnedByOtherRuntime())
         return;
 
-    JS::shadow::Runtime* rt = detail::GetCellRuntime(thing.asCell());
-    MOZ_DIAGNOSTIC_ASSERT(rt->allowGCBarriers());
+    MOZ_DIAGNOSTIC_ASSERT(BarriersAreAllowedOnCurrentThread());
 
-    if (IsIncrementalBarrierNeededOnTenuredGCThing(rt, thing))
+    if (IsIncrementalBarrierNeededOnTenuredGCThing(thing))
         JS::IncrementalReferenceBarrier(thing);
     else if (!thing.mayBeOwnedByOtherRuntime() && js::gc::detail::CellIsMarkedGray(thing.asCell()))
         JS::UnmarkGrayGCThingRecursively(thing);
 }
 
 static MOZ_ALWAYS_INLINE void
 MarkGCThingAsLive(JSRuntime* aRt, JS::GCCellPtr thing)
 {
@@ -661,20 +658,19 @@ MarkGCThingAsLive(JSRuntime* aRt, JS::GC
     if (IsInsideNursery(thing.asCell()))
         return;
 
     // There's nothing to do for permanent GC things that might be owned by
     // another runtime.
     if (thing.mayBeOwnedByOtherRuntime())
         return;
 
-    JS::shadow::Runtime* rt = JS::shadow::Runtime::asShadowRuntime(aRt);
-    MOZ_DIAGNOSTIC_ASSERT(rt->allowGCBarriers());
+    MOZ_DIAGNOSTIC_ASSERT(BarriersAreAllowedOnCurrentThread());
 
-    if (IsIncrementalBarrierNeededOnTenuredGCThing(rt, thing))
+    if (IsIncrementalBarrierNeededOnTenuredGCThing(thing))
         JS::IncrementalReferenceBarrier(thing);
 }
 
 } /* namespace gc */
 } /* namespace js */
 
 namespace JS {
 
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -101,30 +101,23 @@ namespace shadow {
 
 struct Zone
 {
   protected:
     JSRuntime* const runtime_;
     JSTracer* const barrierTracer_;     // A pointer to the JSRuntime's |gcMarker|.
 
   public:
-    // Stack GC roots for Rooted GC pointers.
-    js::RootedListHeads stackRoots_;
-    template <typename T> friend class JS::Rooted;
-
     bool needsIncrementalBarrier_;
 
     Zone(JSRuntime* runtime, JSTracer* barrierTracerArg)
       : runtime_(runtime),
         barrierTracer_(barrierTracerArg),
         needsIncrementalBarrier_(false)
-    {
-        for (auto& stackRootPtr : stackRoots_)
-            stackRootPtr = nullptr;
-    }
+    {}
 
     bool needsIncrementalBarrier() const {
         return needsIncrementalBarrier_;
     }
 
     JSTracer* barrierTracer() {
         MOZ_ASSERT(needsIncrementalBarrier_);
         MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime_));
@@ -297,25 +290,16 @@ static MOZ_ALWAYS_INLINE JS::Zone*
 GetGCThingZone(const uintptr_t addr)
 {
     MOZ_ASSERT(addr);
     const uintptr_t zone_addr = (addr & ~ArenaMask) | ArenaZoneOffset;
     return *reinterpret_cast<JS::Zone**>(zone_addr);
 
 }
 
-static MOZ_ALWAYS_INLINE JS::shadow::Runtime*
-GetCellRuntime(const Cell* cell)
-{
-    MOZ_ASSERT(cell);
-    const uintptr_t addr = uintptr_t(cell);
-    const uintptr_t rt_addr = (addr & ~ChunkMask) | ChunkRuntimeOffset;
-    return *reinterpret_cast<JS::shadow::Runtime**>(rt_addr);
-}
-
 static MOZ_ALWAYS_INLINE bool
 CellIsMarkedGray(const Cell* cell)
 {
     MOZ_ASSERT(cell);
     if (js::gc::IsInsideNursery(cell))
         return false;
 
     uintptr_t* word, mask;
@@ -374,24 +358,25 @@ extern JS_PUBLIC_API(JS::TraceKind)
 GCThingTraceKind(void* thing);
 
 } /* namespace JS */
 
 namespace js {
 namespace gc {
 
 static MOZ_ALWAYS_INLINE bool
-IsIncrementalBarrierNeededOnTenuredGCThing(JS::shadow::Runtime* rt, const JS::GCCellPtr thing)
+IsIncrementalBarrierNeededOnTenuredGCThing(const JS::GCCellPtr thing)
 {
     MOZ_ASSERT(thing);
     MOZ_ASSERT(!js::gc::IsInsideNursery(thing.asCell()));
 
-    // TODO: I'd like to assert !isHeapBusy() here but this gets called while we
-    // are tracing the heap, e.g. during memory reporting (see bug 1313318).
-    MOZ_ASSERT(!rt->isHeapCollecting());
+    // TODO: I'd like to assert !CurrentThreadIsHeapBusy() here but this gets
+    // called while we are tracing the heap, e.g. during memory reporting
+    // (see bug 1313318).
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
 
     JS::Zone* zone = JS::GetTenuredGCThingZone(thing);
     return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
 }
 
 /**
  * Create an object providing access to the garbage collector's internal notion
  * of the current state of memory (both GC heap memory and GCthing-controlled
--- a/js/public/RootingAPI.h
+++ b/js/public/RootingAPI.h
@@ -750,33 +750,27 @@ namespace JS {
  * function that requires a handle, e.g. Foo(Root<T>(cx, x)).
  *
  * If you want to add additional methods to Rooted for a specific
  * specialization, define a RootedBase<T> specialization containing them.
  */
 template <typename T>
 class MOZ_RAII Rooted : public js::RootedBase<T, Rooted<T>>
 {
-    inline void registerWithRootLists(js::RootedListHeads& roots) {
+    inline void registerWithRootLists(RootedListHeads& roots) {
         this->stack = &roots[JS::MapTypeToRootKind<T>::kind];
         this->prev = *stack;
         *stack = reinterpret_cast<Rooted<void*>*>(this);
     }
 
-    inline js::RootedListHeads& rootLists(JS::RootingContext* cx) {
-        return rootLists(static_cast<js::ContextFriendFields*>(cx));
+    inline RootedListHeads& rootLists(RootingContext* cx) {
+        return cx->stackRoots_;
     }
-    inline js::RootedListHeads& rootLists(js::ContextFriendFields* cx) {
-        if (JS::Zone* zone = cx->zone_)
-            return JS::shadow::Zone::asShadowZone(zone)->stackRoots_;
-        MOZ_ASSERT(cx->isJSContext);
-        return cx->roots.stackRoots_;
-    }
-    inline js::RootedListHeads& rootLists(JSContext* cx) {
-        return rootLists(js::ContextFriendFields::get(cx));
+    inline RootedListHeads& rootLists(JSContext* cx) {
+        return rootLists(RootingContext::get(cx));
     }
 
   public:
     using ElementType = T;
 
     template <typename RootingContext>
     explicit Rooted(const RootingContext& cx)
       : ptr(GCPolicy<T>::initial())
@@ -1036,16 +1030,19 @@ template <typename T>
 inline
 MutableHandle<T>::MutableHandle(PersistentRooted<T>* root)
 {
     static_assert(sizeof(MutableHandle<T>) == sizeof(T*),
                   "MutableHandle must be binary compatible with T*.");
     ptr = root->address();
 }
 
+JS_PUBLIC_API(void)
+AddPersistentRoot(RootingContext* cx, RootKind kind, PersistentRooted<void*>* root);
+
 /**
  * A copyable, assignable global GC root type with arbitrary lifetime, an
  * infallible constructor, and automatic unrooting on destruction.
  *
  * These roots can be used in heap-allocated data structures, so they are not
  * associated with any particular JSContext or stack. They are registered with
  * the JSRuntime itself, without locking, so they require a full JSContext to be
  * initialized, not one of its more restricted superclasses. Initialization may
@@ -1079,50 +1076,51 @@ template<typename T>
 class PersistentRooted : public js::RootedBase<T, PersistentRooted<T>>,
                          private mozilla::LinkedListElement<PersistentRooted<T>>
 {
     using ListBase = mozilla::LinkedListElement<PersistentRooted<T>>;
 
     friend class mozilla::LinkedList<PersistentRooted>;
     friend class mozilla::LinkedListElement<PersistentRooted>;
 
-    void registerWithRootLists(js::RootLists& roots) {
+    void registerWithRootLists(RootingContext* cx) {
         MOZ_ASSERT(!initialized());
         JS::RootKind kind = JS::MapTypeToRootKind<T>::kind;
-        roots.heapRoots_[kind].insertBack(reinterpret_cast<JS::PersistentRooted<void*>*>(this));
-    }
-
-    js::RootLists& rootLists(JSContext* cx) {
-        return rootLists(JS::RootingContext::get(cx));
+        AddPersistentRoot(cx, kind, reinterpret_cast<JS::PersistentRooted<void*>*>(this));
     }
-    js::RootLists& rootLists(JS::RootingContext* cx) {
-        MOZ_ASSERT(cx->isJSContext);
-        return cx->roots;
-    }
-
-    // Disallow ExclusiveContext*.
-    js::RootLists& rootLists(js::ContextFriendFields* cx) = delete;
 
   public:
     using ElementType = T;
 
     PersistentRooted() : ptr(GCPolicy<T>::initial()) {}
 
-    template <typename RootingContext>
-    explicit PersistentRooted(const RootingContext& cx)
+    explicit PersistentRooted(RootingContext* cx)
+      : ptr(GCPolicy<T>::initial())
+    {
+        registerWithRootLists(cx);
+    }
+
+    explicit PersistentRooted(JSContext* cx)
       : ptr(GCPolicy<T>::initial())
     {
-        registerWithRootLists(rootLists(cx));
+        registerWithRootLists(RootingContext::get(cx));
     }
 
-    template <typename RootingContext, typename U>
-    PersistentRooted(const RootingContext& cx, U&& initial)
+    template <typename U>
+    PersistentRooted(RootingContext* cx, U&& initial)
       : ptr(mozilla::Forward<U>(initial))
     {
-        registerWithRootLists(rootLists(cx));
+        registerWithRootLists(cx);
+    }
+
+    template <typename U>
+    PersistentRooted(JSContext* cx, U&& initial)
+      : ptr(mozilla::Forward<U>(initial))
+    {
+        registerWithRootLists(RootingContext::get(cx));
     }
 
     PersistentRooted(const PersistentRooted& rhs)
       : mozilla::LinkedListElement<PersistentRooted<T>>(),
         ptr(rhs.ptr)
     {
         /*
          * Copy construction takes advantage of the fact that the original
@@ -1134,25 +1132,24 @@ class PersistentRooted : public js::Root
          */
         const_cast<PersistentRooted&>(rhs).setNext(this);
     }
 
     bool initialized() {
         return ListBase::isInList();
     }
 
-    template <typename RootingContext>
-    void init(const RootingContext& cx) {
+    void init(JSContext* cx) {
         init(cx, GCPolicy<T>::initial());
     }
 
-    template <typename RootingContext, typename U>
-    void init(const RootingContext& cx, U&& initial) {
+    template <typename U>
+    void init(JSContext* cx, U&& initial) {
         ptr = mozilla::Forward<U>(initial);
-        registerWithRootLists(rootLists(cx));
+        registerWithRootLists(RootingContext::get(cx));
     }
 
     void reset() {
         if (initialized()) {
             set(GCPolicy<T>::initial());
             ListBase::remove();
         }
     }
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -694,51 +694,51 @@ js::atomics_cmpxchg_asm_callout(wasm::In
 }
 
 namespace js {
 
 // Represents one waiting worker.
 //
 // The type is declared opaque in SharedArrayObject.h.  Instances of
 // js::FutexWaiter are stack-allocated and linked onto a list across a
-// call to FutexRuntime::wait().
+// call to FutexThread::wait().
 //
 // The 'waiters' field of the SharedArrayRawBuffer points to the highest
 // priority waiter in the list, and lower priority nodes are linked through
 // the 'lower_pri' field.  The 'back' field goes the other direction.
 // The list is circular, so the 'lower_pri' field of the lowest priority
 // node points to the first node in the list.  The list has no dedicated
 // header node.
 
 class FutexWaiter
 {
   public:
-    FutexWaiter(uint32_t offset, JSRuntime* rt)
+    FutexWaiter(uint32_t offset, JSContext* cx)
       : offset(offset),
-        rt(rt),
+        cx(cx),
         lower_pri(nullptr),
         back(nullptr)
     {
     }
 
     uint32_t    offset;                 // int32 element index within the SharedArrayBuffer
-    JSRuntime*  rt;                    // The runtime of the waiter
+    JSContext* cx;                      // The waiting thread
     FutexWaiter* lower_pri;             // Lower priority nodes in circular doubly-linked list of waiters
     FutexWaiter* back;                  // Other direction
 };
 
 class AutoLockFutexAPI
 {
     // We have to wrap this in a Maybe because of the way loading
     // mozilla::Atomic pointers works.
     mozilla::Maybe<js::UniqueLock<js::Mutex>> unique_;
 
   public:
     AutoLockFutexAPI() {
-        js::Mutex* lock = FutexRuntime::lock_;
+        js::Mutex* lock = FutexThread::lock_;
         unique_.emplace(*lock);
     }
 
     ~AutoLockFutexAPI() {
         unique_.reset();
     }
 
     js::UniqueLock<js::Mutex>& unique() { return *unique_; }
@@ -751,18 +751,16 @@ js::atomics_wait(JSContext* cx, unsigned
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     HandleValue objv = args.get(0);
     HandleValue idxv = args.get(1);
     HandleValue valv = args.get(2);
     HandleValue timeoutv = args.get(3);
     MutableHandleValue r = args.rval();
 
-    JSRuntime* rt = cx->runtime();
-
     Rooted<TypedArrayObject*> view(cx, nullptr);
     if (!GetSharedTypedArray(cx, objv, &view))
         return false;
     if (view->type() != Scalar::Int32)
         return ReportBadArrayType(cx);
     uint32_t offset;
     if (!GetTypedArrayIndex(cx, idxv, view, &offset))
         return false;
@@ -777,51 +775,51 @@ js::atomics_wait(JSContext* cx, unsigned
         if (!mozilla::IsNaN(timeout_ms)) {
             if (timeout_ms < 0)
                 timeout = mozilla::Some(mozilla::TimeDuration::FromSeconds(0.0));
             else if (!mozilla::IsInfinite(timeout_ms))
                 timeout = mozilla::Some(mozilla::TimeDuration::FromMilliseconds(timeout_ms));
         }
     }
 
-    if (!rt->fx.canWait())
+    if (!cx->fx.canWait())
         return ReportCannotWait(cx);
 
     // This lock also protects the "waiters" field on SharedArrayRawBuffer,
     // and it provides the necessary memory fence.
     AutoLockFutexAPI lock;
 
     SharedMem<int32_t*>(addr) = view->viewDataShared().cast<int32_t*>() + offset;
     if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
         r.setString(cx->names().futexNotEqual);
         return true;
     }
 
     Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
     SharedArrayRawBuffer* sarb = sab->rawBufferObject();
 
-    FutexWaiter w(offset, rt);
+    FutexWaiter w(offset, cx);
     if (FutexWaiter* waiters = sarb->waiters()) {
         w.lower_pri = waiters;
         w.back = waiters->back;
         waiters->back->lower_pri = &w;
         waiters->back = &w;
     } else {
         w.lower_pri = w.back = &w;
         sarb->setWaiters(&w);
     }
 
-    FutexRuntime::WaitResult result = FutexRuntime::FutexOK;
-    bool retval = rt->fx.wait(cx, lock.unique(), timeout, &result);
+    FutexThread::WaitResult result = FutexThread::FutexOK;
+    bool retval = cx->fx.wait(cx, lock.unique(), timeout, &result);
     if (retval) {
         switch (result) {
-          case FutexRuntime::FutexOK:
+          case FutexThread::FutexOK:
             r.setString(cx->names().futexOK);
             break;
-          case FutexRuntime::FutexTimedOut:
+          case FutexThread::FutexTimedOut:
             r.setString(cx->names().futexTimedOut);
             break;
         }
     }
 
     if (w.lower_pri == &w) {
         sarb->setWaiters(nullptr);
     } else {
@@ -867,106 +865,106 @@ js::atomics_wake(JSContext* cx, unsigned
     int32_t woken = 0;
 
     FutexWaiter* waiters = sarb->waiters();
     if (waiters && count > 0) {
         FutexWaiter* iter = waiters;
         do {
             FutexWaiter* c = iter;
             iter = iter->lower_pri;
-            if (c->offset != offset || !c->rt->fx.isWaiting())
+            if (c->offset != offset || !c->cx->fx.isWaiting())
                 continue;
-            c->rt->fx.wake(FutexRuntime::WakeExplicit);
+            c->cx->fx.wake(FutexThread::WakeExplicit);
             ++woken;
             --count;
         } while (count > 0 && iter != waiters);
     }
 
     r.setInt32(woken);
     return true;
 }
 
 /* static */ bool
-js::FutexRuntime::initialize()
+js::FutexThread::initialize()
 {
     MOZ_ASSERT(!lock_);
-    lock_ = js_new<js::Mutex>(mutexid::FutexRuntime);
+    lock_ = js_new<js::Mutex>(mutexid::FutexThread);
     return lock_ != nullptr;
 }
 
 /* static */ void
-js::FutexRuntime::destroy()
+js::FutexThread::destroy()
 {
     if (lock_) {
         js::Mutex* lock = lock_;
         js_delete(lock);
         lock_ = nullptr;
     }
 }
 
 /* static */ void
-js::FutexRuntime::lock()
+js::FutexThread::lock()
 {
     // Load the atomic pointer.
     js::Mutex* lock = lock_;
 
     lock->lock();
 }
 
-/* static */ mozilla::Atomic<js::Mutex*> FutexRuntime::lock_;
+/* static */ mozilla::Atomic<js::Mutex*> FutexThread::lock_;
 
 /* static */ void
-js::FutexRuntime::unlock()
+js::FutexThread::unlock()
 {
     // Load the atomic pointer.
     js::Mutex* lock = lock_;
 
     lock->unlock();
 }
 
-js::FutexRuntime::FutexRuntime()
+js::FutexThread::FutexThread()
   : cond_(nullptr),
     state_(Idle),
     canWait_(false)
 {
 }
 
 bool
-js::FutexRuntime::initInstance()
+js::FutexThread::initInstance()
 {
     MOZ_ASSERT(lock_);
     cond_ = js_new<js::ConditionVariable>();
     return cond_ != nullptr;
 }
 
 void
-js::FutexRuntime::destroyInstance()
+js::FutexThread::destroyInstance()
 {
     if (cond_)
         js_delete(cond_);
 }
 
 bool
-js::FutexRuntime::isWaiting()
+js::FutexThread::isWaiting()
 {
     // When a worker is awoken for an interrupt it goes into state
     // WaitingNotifiedForInterrupt for a short time before it actually
     // wakes up and goes into WaitingInterrupted.  In those states the
     // worker is still waiting, and if an explicit wake arrives the
     // worker transitions to Woken.  See further comments in
-    // FutexRuntime::wait().
+    // FutexThread::wait().
     return state_ == Waiting || state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt;
 }
 
 bool
-js::FutexRuntime::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
+js::FutexThread::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
                        mozilla::Maybe<mozilla::TimeDuration>& timeout, WaitResult* result)
 {
-    MOZ_ASSERT(&cx->runtime()->fx == this);
-    MOZ_ASSERT(cx->runtime()->fx.canWait());
+    MOZ_ASSERT(&cx->fx == this);
+    MOZ_ASSERT(cx->fx.canWait());
     MOZ_ASSERT(state_ == Idle || state_ == WaitingInterrupted);
 
     // Disallow waiting when a runtime is processing an interrupt.
     // See explanation below.
 
     if (state_ == WaitingInterrupted) {
         UnlockGuard<Mutex> unlock(locked);
         JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_ATOMICS_WAIT_NOT_ALLOWED);
@@ -1003,32 +1001,32 @@ js::FutexRuntime::wait(JSContext* cx, js
 
         if (isTimed) {
             mozilla::Unused << cond_->wait_until(locked, *sliceEnd);
         } else {
             cond_->wait(locked);
         }
 
         switch (state_) {
-          case FutexRuntime::Waiting:
+          case FutexThread::Waiting:
             // Timeout or spurious wakeup.
             if (isTimed) {
                 auto now = mozilla::TimeStamp::Now();
                 if (now >= *finalEnd) {
                     *result = FutexTimedOut;
                     return true;
                 }
             }
             break;
 
-          case FutexRuntime::Woken:
+          case FutexThread::Woken:
             *result = FutexOK;
             return true;
 
-          case FutexRuntime::WaitingNotifiedForInterrupt:
+          case FutexThread::WaitingNotifiedForInterrupt:
             // The interrupt handler may reenter the engine.  In that case
             // there are two complications:
             //
             // - The waiting thread is not actually waiting on the
             //   condition variable so we have to record that it
             //   should be woken when the interrupt handler returns.
             //   To that end, we flag the thread as interrupted around
             //   the interrupt and check state_ when the interrupt
@@ -1051,33 +1049,33 @@ js::FutexRuntime::wait(JSContext* cx, js
             //   occasionally (very rarely) be surprising but is
             //   expedient.  Other solutions exist, see bug #1131943.  The
             //   code that performs the check is above, at the head of
             //   this function.
 
             state_ = WaitingInterrupted;
             {
                 UnlockGuard<Mutex> unlock(locked);
-                if (!cx->runtime()->handleInterrupt(cx))
+                if (!cx->handleInterrupt())
                     return false;
             }
             if (state_ == Woken) {
                 *result = FutexOK;
                 return true;
             }
             break;
 
           default:
             MOZ_CRASH("Bad FutexState in wait()");
         }
     }
 }
 
 void
-js::FutexRuntime::wake(WakeReason reason)
+js::FutexThread::wake(WakeReason reason)
 {
     MOZ_ASSERT(isWaiting());
 
     if ((state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt) && reason == WakeExplicit) {
         state_ = Woken;
         return;
     }
     switch (reason) {
@@ -1085,17 +1083,17 @@ js::FutexRuntime::wake(WakeReason reason
         state_ = Woken;
         break;
       case WakeForJSInterrupt:
         if (state_ == WaitingNotifiedForInterrupt)
             return;
         state_ = WaitingNotifiedForInterrupt;
         break;
       default:
-        MOZ_CRASH("bad WakeReason in FutexRuntime::wake()");
+        MOZ_CRASH("bad WakeReason in FutexThread::wake()");
     }
     cond_->notify_all();
 }
 
 const JSFunctionSpec AtomicsMethods[] = {
     JS_INLINABLE_FN("compareExchange",    atomics_compareExchange,    4,0, AtomicsCompareExchange),
     JS_INLINABLE_FN("load",               atomics_load,               2,0, AtomicsLoad),
     JS_INLINABLE_FN("store",              atomics_store,              3,0, AtomicsStore),
--- a/js/src/builtin/AtomicsObject.h
+++ b/js/src/builtin/AtomicsObject.h
@@ -43,28 +43,28 @@ namespace wasm { class Instance; }
 int32_t atomics_add_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
 int32_t atomics_sub_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
 int32_t atomics_and_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
 int32_t atomics_or_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
 int32_t atomics_xor_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
 int32_t atomics_cmpxchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t oldval, int32_t newval);
 int32_t atomics_xchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
 
-class FutexRuntime
+class FutexThread
 {
     friend class AutoLockFutexAPI;
 
 public:
     static MOZ_MUST_USE bool initialize();
     static void destroy();
 
     static void lock();
     static void unlock();
 
-    FutexRuntime();
+    FutexThread();
     MOZ_MUST_USE bool initInstance();
     void destroyInstance();
 
     // Parameters to wake().
     enum WakeReason {
         WakeExplicit,           // Being asked to wake up by another thread
         WakeForJSInterrupt      // Interrupt requested
     };
@@ -84,17 +84,17 @@ public:
     // wait.
     //
     // wait() will not wake up spuriously.  It will return true and
     // set *result to a return code appropriate for
     // Atomics.wait() on success, and return false on error.
     MOZ_MUST_USE bool wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
                            mozilla::Maybe<mozilla::TimeDuration>& timeout, WaitResult* result);
 
-    // Wake the thread represented by this Runtime.
+    // Wake the thread this is associated with.
     //
     // The futex lock must be held around this call.  (The sleeping
     // thread will not wake up until the caller of Atomics.wake()
     // releases the lock.)
     //
     // If the thread is not waiting then this method does nothing.
     //
     // If the thread is waiting in a call to wait() and the
@@ -105,17 +105,17 @@ public:
     // reason is WakeForJSInterrupt then the wait() will return
     // with WaitingNotifiedForInterrupt; in the latter case the caller
     // of wait() must handle the interrupt.
     void wake(WakeReason reason);
 
     bool isWaiting();
 
     // If canWait() returns false (the default) then wait() is disabled
-    // on the runtime to which the FutexRuntime belongs.
+    // on the thread to which the FutexThread belongs.
     bool canWait() {
         return canWait_;
     }
 
     void setCanWait(bool flag) {
         canWait_ = flag;
     }
 
@@ -140,17 +140,17 @@ public:
     FutexState state_;
 
     // Shared futex lock for all runtimes.  We can perhaps do better,
     // but any lock will need to be per-domain (consider SharedWorker)
     // or coarser.
     static mozilla::Atomic<js::Mutex*> lock_;
 
     // A flag that controls whether waiting is allowed.
-    bool canWait_;
+    ThreadLocalData<bool> canWait_;
 };
 
 JSObject*
 InitAtomicsClass(JSContext* cx, HandleObject obj);
 
 }  /* namespace js */
 
 #endif /* builtin_AtomicsObject_h */
--- a/js/src/builtin/DataViewObject.cpp
+++ b/js/src/builtin/DataViewObject.cpp
@@ -109,27 +109,27 @@ DataViewObject::create(JSContext* cx, ui
     SharedMem<uint8_t*> ptr = arrayBuffer->dataPointerEither();
     // A pointer to raw shared memory is exposed through the private slot.  This
     // is safe so long as getPrivate() is not used willy-nilly.  It is wrapped in
     // other accessors in TypedArrayObject.h.
     dvobj.initPrivate(ptr.unwrap(/*safe - see above*/) + byteOffset);
 
     // Include a barrier if the data view's data pointer is in the nursery, as
     // is done for typed arrays.
-    if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(ptr)) {
+    if (!IsInsideNursery(obj) && cx->nursery().isInside(ptr)) {
         // Shared buffer data should never be nursery-allocated, so we
         // need to fail here if isSharedMemory.  However, mmap() can
         // place a SharedArrayRawBuffer up against the bottom end of a
         // nursery chunk, and a zero-length buffer will erroneously be
         // perceived as being inside the nursery; sidestep that.
         if (isSharedMemory) {
             MOZ_ASSERT(arrayBuffer->byteLength() == 0 &&
                        (uintptr_t(ptr.unwrapValue()) & gc::ChunkMask) == 0);
         } else {
-            cx->runtime()->gc.storeBuffer.putWholeCell(obj);
+            cx->zone()->group()->storeBuffer().putWholeCell(obj);
         }
     }
 
     // Verify that the private slot is at the expected place
     MOZ_ASSERT(dvobj.numFixedSlots() == TypedArrayObject::DATA_SLOT);
 
     if (arrayBuffer->is<ArrayBufferObject>()) {
         if (!arrayBuffer->as<ArrayBufferObject>().addView(cx, &dvobj))
--- a/js/src/builtin/Eval.cpp
+++ b/js/src/builtin/Eval.cpp
@@ -89,33 +89,33 @@ class EvalScriptGuard
 
     ~EvalScriptGuard() {
         if (script_ && !cx_->isExceptionPending()) {
             script_->cacheForEval();
             EvalCacheEntry cacheEntry = {lookupStr_, script_, lookup_.callerScript, lookup_.pc};
             lookup_.str = lookupStr_;
             if (lookup_.str && IsEvalCacheCandidate(script_)) {
                 // Ignore failure to add cache entry.
-                if (!p_->add(cx_, cx_->caches.evalCache, lookup_, cacheEntry))
+                if (!p_->add(cx_, cx_->caches().evalCache, lookup_, cacheEntry))
                     cx_->recoverFromOutOfMemory();
             }
         }
     }
 
     void lookupInEvalCache(JSLinearString* str, JSScript* callerScript, jsbytecode* pc)
     {
         lookupStr_ = str;
         lookup_.str = str;
         lookup_.callerScript = callerScript;
         lookup_.version = cx_->findVersion();
         lookup_.pc = pc;
-        p_.emplace(cx_, cx_->caches.evalCache, lookup_);
+        p_.emplace(cx_, cx_->caches().evalCache, lookup_);
         if (*p_) {
             script_ = (*p_)->script;
-            p_->remove(cx_, cx_->caches.evalCache, lookup_);
+            p_->remove(cx_, cx_->caches().evalCache, lookup_);
             script_->uncacheForEval();
         }
     }
 
     void setNewScript(JSScript* script) {
         // JSScript::initFromEmitter has already called js_CallNewScriptHook.
         MOZ_ASSERT(!script_ && script);
         script_ = script;
--- a/js/src/builtin/Intl.cpp
+++ b/js/src/builtin/Intl.cpp
@@ -2816,17 +2816,17 @@ js::SharedIntlData::destroyInstance()
     ianaZonesTreatedAsLinksByICU.finish();
     ianaLinksCanonicalizedDifferentlyByICU.finish();
 }
 
 void
 js::SharedIntlData::trace(JSTracer* trc)
 {
     // Atoms are always tenured.
-    if (!trc->runtime()->isHeapMinorCollecting()) {
+    if (!JS::CurrentThreadIsHeapMinorCollecting()) {
         availableTimeZones.trace(trc);
         ianaZonesTreatedAsLinksByICU.trace(trc);
         ianaLinksCanonicalizedDifferentlyByICU.trace(trc);
     }
 }
 
 size_t
 js::SharedIntlData::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
@@ -2838,17 +2838,17 @@ js::SharedIntlData::sizeOfExcludingThis(
 
 bool
 js::intl_IsValidTimeZoneName(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     MOZ_ASSERT(args.length() == 1);
     MOZ_ASSERT(args[0].isString());
 
-    SharedIntlData& sharedIntlData = cx->sharedIntlData;
+    SharedIntlData& sharedIntlData = cx->runtime()->sharedIntlData.ref();
 
     RootedString timeZone(cx, args[0].toString());
     RootedString validatedTimeZone(cx);
     if (!sharedIntlData.validateTimeZoneName(cx, timeZone, &validatedTimeZone))
         return false;
 
     if (validatedTimeZone) {
         cx->markAtom(validatedTimeZone);
@@ -2862,17 +2862,17 @@ js::intl_IsValidTimeZoneName(JSContext* 
 
 bool
 js::intl_canonicalizeTimeZone(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     MOZ_ASSERT(args.length() == 1);
     MOZ_ASSERT(args[0].isString());
 
-    SharedIntlData& sharedIntlData = cx->sharedIntlData;
+    SharedIntlData& sharedIntlData = cx->runtime()->sharedIntlData.ref();
 
     // Some time zone names are canonicalized differently by ICU -- handle
     // those first:
     RootedString timeZone(cx, args[0].toString());
     RootedString ianaTimeZone(cx);
     if (!sharedIntlData.tryCanonicalizeTimeZoneConsistentWithIANA(cx, timeZone, &ianaTimeZone))
         return false;
 
--- a/js/src/builtin/MapObject.cpp
+++ b/js/src/builtin/MapObject.cpp
@@ -97,19 +97,19 @@ HashableValue::hash(const mozilla::HashC
 bool
 HashableValue::operator==(const HashableValue& other) const
 {
     // Two HashableValues are equal if they have equal bits.
     bool b = (value.asRawBits() == other.value.asRawBits());
 
 #ifdef DEBUG
     bool same;
-    JS::RootingContext* rcx = GetJSContextFromMainThread();
-    RootedValue valueRoot(rcx, value);
-    RootedValue otherRoot(rcx, other.value);
+    JSContext* cx = TlsContext.get();
+    RootedValue valueRoot(cx, value);
+    RootedValue otherRoot(cx, other.value);
     MOZ_ASSERT(SameValue(nullptr, valueRoot, otherRoot, &same));
     MOZ_ASSERT(same == b);
 #endif
     return b;
 }
 
 HashableValue
 HashableValue::trace(JSTracer* trc) const
@@ -459,17 +459,17 @@ WriteBarrierPostImpl(JSRuntime* rt, Obje
         return true;
 
     NurseryKeysVector* keys = GetNurseryKeys(obj);
     if (!keys) {
         keys = AllocNurseryKeys(obj);
         if (!keys)
             return false;
 
-        rt->gc.storeBuffer.putGeneric(OrderedHashTableRef<ObjectT>(obj));
+        key->zone()->group()->storeBuffer().putGeneric(OrderedHashTableRef<ObjectT>(obj));
     }
 
     if (!keys->append(key))
         return false;
 
     return true;
 }
 
--- a/js/src/builtin/ModuleObject.cpp
+++ b/js/src/builtin/ModuleObject.cpp
@@ -110,17 +110,17 @@ GlobalObject::initImportEntryProto(JSCon
     if (!DefinePropertiesAndFunctions(cx, proto, protoAccessors, nullptr))
         return false;
 
     global->setReservedSlot(IMPORT_ENTRY_PROTO, ObjectValue(*proto));
     return true;
 }
 
 /* static */ ImportEntryObject*
-ImportEntryObject::create(ExclusiveContext* cx,
+ImportEntryObject::create(JSContext* cx,
                           HandleAtom moduleRequest,
                           HandleAtom importName,
                           HandleAtom localName)
 {
     RootedObject proto(cx, cx->global()->getImportEntryPrototype());
     RootedObject obj(cx, NewObjectWithGivenProto(cx, &class_, proto));
     if (!obj)
         return nullptr;
@@ -182,17 +182,17 @@ GlobalObject::initExportEntryProto(JSCon
 
 static Value
 StringOrNullValue(JSString* maybeString)
 {
     return maybeString ? StringValue(maybeString) : NullValue();
 }
 
 /* static */ ExportEntryObject*
-ExportEntryObject::create(ExclusiveContext* cx,
+ExportEntryObject::create(JSContext* cx,
                           HandleAtom maybeExportName,
                           HandleAtom maybeModuleRequest,
                           HandleAtom maybeImportName,
                           HandleAtom maybeLocalName)
 {
     RootedObject proto(cx, cx->global()->getExportEntryPrototype());
     RootedObject obj(cx, NewObjectWithGivenProto(cx, &class_, proto));
     if (!obj)
@@ -571,17 +571,17 @@ DEFINE_ARRAY_SLOT_ACCESSOR(ModuleObject,
 
 /* static */ bool
 ModuleObject::isInstance(HandleValue value)
 {
     return value.isObject() && value.toObject().is<ModuleObject>();
 }
 
 /* static */ ModuleObject*
-ModuleObject::create(ExclusiveContext* cx)
+ModuleObject::create(JSContext* cx)
 {
     RootedObject proto(cx, cx->global()->getModulePrototype());
     RootedObject obj(cx, NewObjectWithGivenProto(cx, &class_, proto));
     if (!obj)
         return nullptr;
 
     RootedModuleObject self(cx, &obj->as<ModuleObject>());
 
@@ -848,17 +848,17 @@ ModuleObject::createEnvironment()
     // The environment has already been created, we just neet to set it in the
     // right slot.
     MOZ_ASSERT(!getReservedSlot(InitialEnvironmentSlot).isUndefined());
     MOZ_ASSERT(getReservedSlot(EnvironmentSlot).isUndefined());
     setReservedSlot(EnvironmentSlot, getReservedSlot(InitialEnvironmentSlot));
 }
 
 bool
-ModuleObject::noteFunctionDeclaration(ExclusiveContext* cx, HandleAtom name, HandleFunction fun)
+ModuleObject::noteFunctionDeclaration(JSContext* cx, HandleAtom name, HandleFunction fun)
 {
     FunctionDeclarationVector* funDecls = functionDeclarations();
     if (!funDecls->emplaceBack(name, fun)) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     return true;
@@ -1009,17 +1009,17 @@ GlobalObject::initModuleProto(JSContext*
 
 #undef DEFINE_GETTER_FUNCTIONS
 #undef DEFINE_STRING_ACCESSOR_METHOD
 #undef DEFINE_ARRAY_SLOT_ACCESSOR
 
 ///////////////////////////////////////////////////////////////////////////
 // ModuleBuilder
 
-ModuleBuilder::ModuleBuilder(ExclusiveContext* cx, HandleModuleObject module)
+ModuleBuilder::ModuleBuilder(JSContext* cx, HandleModuleObject module)
   : cx_(cx),
     module_(cx, module),
     requestedModules_(cx, AtomVector(cx)),
     importedBoundNames_(cx, AtomVector(cx)),
     importEntries_(cx, ImportEntryVector(cx)),
     exportEntries_(cx, ExportEntryVector(cx)),
     localExportEntries_(cx, ExportEntryVector(cx)),
     indirectExportEntries_(cx, ExportEntryVector(cx)),
--- a/js/src/builtin/ModuleObject.h
+++ b/js/src/builtin/ModuleObject.h
@@ -38,19 +38,19 @@ class ImportEntryObject : public NativeO
     {
         ModuleRequestSlot = 0,
         ImportNameSlot,
         LocalNameSlot,
         SlotCount
     };
 
     static const Class class_;
-    static JSObject* initClass(ExclusiveContext* cx, HandleObject obj);
+    static JSObject* initClass(JSContext* cx, HandleObject obj);
     static bool isInstance(HandleValue value);
-    static ImportEntryObject* create(ExclusiveContext* cx,
+    static ImportEntryObject* create(JSContext* cx,
                                      HandleAtom moduleRequest,
                                      HandleAtom importName,
                                      HandleAtom localName);
     JSAtom* moduleRequest() const;
     JSAtom* importName() const;
     JSAtom* localName() const;
 };
 
@@ -65,19 +65,19 @@ class ExportEntryObject : public NativeO
         ExportNameSlot = 0,
         ModuleRequestSlot,
         ImportNameSlot,
         LocalNameSlot,
         SlotCount
     };
 
     static const Class class_;
-    static JSObject* initClass(ExclusiveContext* cx, HandleObject obj);
+    static JSObject* initClass(JSContext* cx, HandleObject obj);
     static bool isInstance(HandleValue value);
-    static ExportEntryObject* create(ExclusiveContext* cx,
+    static ExportEntryObject* create(JSContext* cx,
                                      HandleAtom maybeExportName,
                                      HandleAtom maybeModuleRequest,
                                      HandleAtom maybeImportName,
                                      HandleAtom maybeLocalName);
     JSAtom* exportName() const;
     JSAtom* moduleRequest() const;
     JSAtom* importName() const;
     JSAtom* localName() const;
@@ -220,17 +220,17 @@ class ModuleObject : public NativeObject
 
     static_assert(EnvironmentSlot == MODULE_OBJECT_ENVIRONMENT_SLOT,
                   "EnvironmentSlot must match self-hosting define");
 
     static const Class class_;
 
     static bool isInstance(HandleValue value);
 
-    static ModuleObject* create(ExclusiveContext* cx);
+    static ModuleObject* create(JSContext* cx);
     void init(HandleScript script);
     void setInitialEnvironment(Handle<ModuleEnvironmentObject*> initialEnvironment);
     void initImportExportData(HandleArrayObject requestedModules,
                               HandleArrayObject importEntries,
                               HandleArrayObject localExportEntries,
                               HandleArrayObject indiretExportEntries,
                               HandleArrayObject starExportEntries);
     static bool Freeze(JSContext* cx, HandleModuleObject self);
@@ -259,17 +259,17 @@ class ModuleObject : public NativeObject
     static bool Evaluation(JSContext* cx, HandleModuleObject self);
 
     void setHostDefinedField(const JS::Value& value);
 
     // For intrinsic_CreateModuleEnvironment.
     void createEnvironment();
 
     // For BytecodeEmitter.
-    bool noteFunctionDeclaration(ExclusiveContext* cx, HandleAtom name, HandleFunction fun);
+    bool noteFunctionDeclaration(JSContext* cx, HandleAtom name, HandleFunction fun);
 
     // For intrinsic_InstantiateModuleFunctionDeclarations.
     static bool instantiateFunctionDeclarations(JSContext* cx, HandleModuleObject self);
 
     void setState(ModuleState newState);
 
     // For intrinsic_EvaluateModule.
     static bool evaluate(JSContext* cx, HandleModuleObject self, MutableHandleValue rval);
@@ -289,17 +289,17 @@ class ModuleObject : public NativeObject
     FunctionDeclarationVector* functionDeclarations();
 };
 
 // Process a module's parse tree to collate the import and export data used when
 // creating a ModuleObject.
 class MOZ_STACK_CLASS ModuleBuilder
 {
   public:
-    explicit ModuleBuilder(ExclusiveContext* cx, HandleModuleObject module);
+    explicit ModuleBuilder(JSContext* cx, HandleModuleObject module);
 
     bool processImport(frontend::ParseNode* pn);
     bool processExport(frontend::ParseNode* pn);
     bool processExportFrom(frontend::ParseNode* pn);
 
     bool hasExportedName(JSAtom* name) const;
 
     using ExportEntryVector = GCVector<ExportEntryObject*>;
@@ -312,17 +312,17 @@ class MOZ_STACK_CLASS ModuleBuilder
 
   private:
     using AtomVector = GCVector<JSAtom*>;
     using RootedAtomVector = JS::Rooted<AtomVector>;
     using ImportEntryVector = GCVector<ImportEntryObject*>;
     using RootedImportEntryVector = JS::Rooted<ImportEntryVector>;
     using RootedExportEntryVector = JS::Rooted<ExportEntryVector>;
 
-    ExclusiveContext* cx_;
+    JSContext* cx_;
     RootedModuleObject module_;
     RootedAtomVector requestedModules_;
     RootedAtomVector importedBoundNames_;
     RootedImportEntryVector importEntries_;
     RootedExportEntryVector exportEntries_;
     RootedExportEntryVector localExportEntries_;
     RootedExportEntryVector indirectExportEntries_;
     RootedExportEntryVector starExportEntries_;
--- a/js/src/builtin/Object.cpp
+++ b/js/src/builtin/Object.cpp
@@ -167,17 +167,17 @@ ArgsAndBodySubstring(mozilla::Range<cons
     MOZ_ASSERT(*outOffset + *outLen <= chars.length());
     return true;
 }
 
 JSString*
 js::ObjectToSource(JSContext* cx, HandleObject obj)
 {
     /* If outermost, we need parentheses to be an expression, not a block. */
-    bool outermost = (cx->cycleDetectorSet.count() == 0);
+    bool outermost = (cx->cycleDetectorSet().count() == 0);
 
     AutoCycleDetector detector(cx, obj);
     if (!detector.init())
         return nullptr;
     if (detector.foundCycle())
         return NewStringCopyZ<CanGC>(cx, "{}");
 
     StringBuffer buf(cx);
--- a/js/src/builtin/Profilers.cpp
+++ b/js/src/builtin/Profilers.cpp
@@ -311,25 +311,25 @@ DumpProfile(JSContext* cx, unsigned argc
     args.rval().setBoolean(ret);
     return true;
 }
 
 static bool
 GetMaxGCPauseSinceClear(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
-    args.rval().setNumber(cx->runtime()->gc.stats.getMaxGCPauseSinceClear().ToMicroseconds());
+    args.rval().setNumber(cx->runtime()->gc.stats().getMaxGCPauseSinceClear().ToMicroseconds());
     return true;
 }
 
 static bool
 ClearMaxGCPauseAccumulator(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
-    args.rval().setNumber(cx->runtime()->gc.stats.clearMaxGCPauseAccumulator().ToMicroseconds());
+    args.rval().setNumber(cx->runtime()->gc.stats().clearMaxGCPauseAccumulator().ToMicroseconds());
     return true;
 }
 
 #if defined(MOZ_INSTRUMENTS)
 
 static bool
 IgnoreAndReturnTrue(JSContext* cx, unsigned argc, Value* vp)
 {
--- a/js/src/builtin/Promise.cpp
+++ b/js/src/builtin/Promise.cpp
@@ -2643,44 +2643,44 @@ js::EnqueuePromiseReactions(JSContext* c
                             HandleValue onFulfilled, HandleValue onRejected)
 {
     MOZ_ASSERT_IF(dependentPromise, dependentPromise->is<PromiseObject>());
     return PerformPromiseThen(cx, promise, onFulfilled, onRejected, dependentPromise,
                               nullptr, nullptr);
 }
 
 PromiseTask::PromiseTask(JSContext* cx, Handle<PromiseObject*> promise)
-  : runtime_(cx),
+  : runtime_(cx->runtime()),
     promise_(cx, promise)
 {}
 
 PromiseTask::~PromiseTask()
 {
     MOZ_ASSERT(CurrentThreadCanAccessZone(promise_->zone()));
 }
 
 void
 PromiseTask::finish(JSContext* cx)
 {
-    MOZ_ASSERT(cx == runtime_);
+    MOZ_ASSERT(cx->runtime() == runtime_);
     {
         // We can't leave a pending exception when returning to the caller so do
         // the same thing as Gecko, which is to ignore the error. This should
         // only happen due to OOM or interruption.
         AutoCompartment ac(cx, promise_);
         if (!finishPromise(cx, promise_))
             cx->clearPendingException();
     }
     js_delete(this);
 }
 
 void
 PromiseTask::cancel(JSContext* cx)
 {
-    MOZ_ASSERT(cx == runtime_);
+    MOZ_ASSERT(cx->runtime() == runtime_);
     js_delete(this);
 }
 
 bool
 PromiseTask::executeAndFinish(JSContext* cx)
 {
     MOZ_ASSERT(!CanUseExtraThreads());
     execute();
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -332,17 +332,17 @@ GC(JSContext* cx, unsigned argc, Value* 
     return true;
 }
 
 static bool
 MinorGC(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     if (args.get(0) == BooleanValue(true))
-        cx->runtime()->gc.storeBuffer.setAboutToOverflow();
+        cx->zone()->group()->storeBuffer().setAboutToOverflow();
 
     cx->minorGC(JS::gcreason::API);
     args.rval().setUndefined();
     return true;
 }
 
 #define FOR_EACH_GC_PARAM(_)                                                    \
     _("maxBytes",                   JSGC_MAX_BYTES,                      true)  \
@@ -812,22 +812,21 @@ SelectForGC(JSContext* cx, unsigned argc
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     /*
      * The selectedForMarking set is intended to be manually marked at slice
      * start to detect missing pre-barriers. It is invalid for nursery things
      * to be in the set, so evict the nursery before adding items.
      */
-    JSRuntime* rt = cx->runtime();
-    rt->gc.evictNursery();
+    cx->zone()->group()->evictNursery();
 
     for (unsigned i = 0; i < args.length(); i++) {
         if (args[i].isObject()) {
-            if (!rt->gc.selectForMarking(&args[i].toObject()))
+            if (!cx->runtime()->gc.selectForMarking(&args[i].toObject()))
                 return false;
         }
     }
 
     args.rval().setUndefined();
     return true;
 }
 
@@ -1420,25 +1419,24 @@ OOMTest(JSContext* cx, unsigned argc, Va
             JS_ReportErrorASCII(cx, "OOM_THREAD value out of range.");
             return false;
         }
 
         threadStart = threadOption;
         threadEnd = threadOption + 1;
     }
 
-    JSRuntime* rt = cx->runtime();
-    if (rt->runningOOMTest) {
+    if (cx->runningOOMTest) {
         JS_ReportErrorASCII(cx, "Nested call to oomTest() is not allowed.");
         return false;
     }
-    rt->runningOOMTest = true;
+    cx->runningOOMTest = true;
 
     MOZ_ASSERT(!cx->isExceptionPending());
-    rt->hadOutOfMemory = false;
+    cx->runtime()->hadOutOfMemory = false;
 
     JS_SetGCZeal(cx, 0, JS_DEFAULT_ZEAL_FREQ);
 
     for (unsigned thread = threadStart; thread < threadEnd; thread++) {
         if (verbose)
             fprintf(stderr, "thread %d\n", thread);
 
         HelperThreadState().waitForAllThreads();
@@ -1495,17 +1493,17 @@ OOMTest(JSContext* cx, unsigned argc, Va
             allocation++;
         } while (handledOOM);
 
         if (verbose) {
             fprintf(stderr, "  finished after %d allocations\n", allocation - 2);
         }
     }
 
-    rt->runningOOMTest = false;
+    cx->runningOOMTest = false;
     args.rval().setUndefined();
     return true;
 }
 #endif
 
 static bool
 SettlePromiseNow(JSContext* cx, unsigned argc, Value* vp)
 {
@@ -1746,29 +1744,29 @@ Terminate(JSContext* cx, unsigned arg, V
 
 static bool
 ReadGeckoProfilingStack(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     args.rval().setUndefined();
 
     // Return boolean 'false' if profiler is not enabled.
-    if (!cx->runtime()->geckoProfiler.enabled()) {
+    if (!cx->runtime()->geckoProfiler().enabled()) {
         args.rval().setBoolean(false);
         return true;
     }
 
     // Array holding physical jit stack frames.
     RootedObject stack(cx, NewDenseEmptyArray(cx));
     if (!stack)
         return false;
 
     // If profiler sampling has been suppressed, return an empty
     // stack.
-    if (!cx->runtime()->isProfilerSamplingEnabled()) {
+    if (!cx->isProfilerSamplingEnabled()) {
       args.rval().setObject(*stack);
       return true;
     }
 
     struct InlineFrameInfo
     {
         InlineFrameInfo(const char* kind, char* label)
           : kind(kind), label(label) {}
@@ -1888,17 +1886,17 @@ DisplayName(JSContext* cx, unsigned argc
     if (!args.get(0).isObject() || !args[0].toObject().is<JSFunction>()) {
         RootedObject arg(cx, &args.callee());
         ReportUsageErrorASCII(cx, arg, "Must have one function argument");
         return false;
     }
 
     JSFunction* fun = &args[0].toObject().as<JSFunction>();
     JSString* str = fun->displayAtom();
-    args.rval().setString(str ? str : cx->runtime()->emptyString);
+    args.rval().setString(str ? str : cx->runtime()->emptyString.ref());
     return true;
 }
 
 class ShellAllocationMetadataBuilder : public AllocationMetadataBuilder {
   public:
     ShellAllocationMetadataBuilder() : AllocationMetadataBuilder() { }
 
     virtual JSObject* build(JSContext *cx, HandleObject,
@@ -1994,17 +1992,17 @@ testingFunc_bailAfter(JSContext* cx, uns
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     if (args.length() != 1 || !args[0].isInt32() || args[0].toInt32() < 0) {
         JS_ReportErrorASCII(cx, "Argument must be a positive number that fits in an int32");
         return false;
     }
 
 #ifdef DEBUG
-    cx->runtime()->setIonBailAfter(args[0].toInt32());
+    cx->zone()->group()->setIonBailAfter(args[0].toInt32());
 #endif
 
     args.rval().setUndefined();
     return true;
 }
 
 static bool
 testingFunc_inJit(JSContext* cx, unsigned argc, Value* vp)
@@ -3531,17 +3529,18 @@ static void
 minorGC(JSContext* cx, JSGCStatus status, void* data)
 {
     auto info = static_cast<MinorGC*>(data);
     if (!(info->phases & (1 << status)))
         return;
 
     if (info->active) {
         info->active = false;
-        cx->gc.evictNursery(JS::gcreason::DEBUG_GC);
+        if (cx->zone() && !cx->zone()->isAtomsZone())
+            cx->zone()->group()->evictNursery(JS::gcreason::DEBUG_GC);
         info->active = true;
     }
 }
 
 // Process global, should really be runtime-local. Also, the final one of these
 // is currently leaked, since they are only deleted when changing.
 MajorGC* prevMajorGC = nullptr;
 MinorGC* prevMinorGC = nullptr;
--- a/js/src/builtin/TypedObject.cpp
+++ b/js/src/builtin/TypedObject.cpp
@@ -1434,17 +1434,17 @@ OutlineTypedObject::setOwnerAndData(JSOb
     // Typed objects cannot move from one owner to another, so don't worry
     // about pre barriers during this initialization.
     owner_ = owner;
     data_ = data;
 
     // Trigger a post barrier when attaching an object outside the nursery to
     // one that is inside it.
     if (owner && !IsInsideNursery(this) && IsInsideNursery(owner))
-        runtimeFromMainThread()->gc.storeBuffer.putWholeCell(this);
+        zone()->group()->storeBuffer().putWholeCell(this);
 }
 
 /*static*/ OutlineTypedObject*
 OutlineTypedObject::createUnattachedWithClass(JSContext* cx,
                                               const Class* clasp,
                                               HandleTypeDescr descr,
                                               int32_t length,
                                               gc::InitialHeap heap)
@@ -1631,17 +1631,18 @@ OutlineTypedObject::obj_trace(JSTracer* 
                   !owner->as<ArrayBufferObject>().forInlineTypedObject());
     if (owner != oldOwner &&
         (owner->is<InlineTypedObject>() ||
          owner->as<ArrayBufferObject>().hasInlineData()))
     {
         newData += reinterpret_cast<uint8_t*>(owner) - reinterpret_cast<uint8_t*>(oldOwner);
         typedObj.setData(newData);
 
-        trc->runtime()->gc.nursery.maybeSetForwardingPointer(trc, oldData, newData, /* direct = */ false);
+        Nursery& nursery = typedObj.zoneFromAnyThread()->group()->nursery();
+        nursery.maybeSetForwardingPointer(trc, oldData, newData, /* direct = */ false);
     }
 
     if (!descr.opaque() || !typedObj.isAttached())
         return;
 
     descr.traceInstances(trc, newData, 1);
 }
 
@@ -2136,18 +2137,18 @@ InlineTypedObject::objectMovedDuringMino
     // whether this object moved and where it was moved from.
     TypeDescr& descr = dst->as<InlineTypedObject>().typeDescr();
     if (descr.kind() == type::Array) {
         // The forwarding pointer can be direct as long as there is enough
         // space for it. Other objects might point into the object's buffer,
         // but they will not set any direct forwarding pointers.
         uint8_t* oldData = reinterpret_cast<uint8_t*>(src) + offsetOfDataStart();
         uint8_t* newData = dst->as<InlineTypedObject>().inlineTypedMem();
-        trc->runtime()->gc.nursery.maybeSetForwardingPointer(trc, oldData, newData,
-                                                             descr.size() >= sizeof(uintptr_t));
+        dst->zone()->group()->nursery().maybeSetForwardingPointer(trc, oldData, newData,
+                                                                  descr.size() >= sizeof(uintptr_t));
     }
 }
 
 ArrayBufferObject*
 InlineTransparentTypedObject::getOrCreateBuffer(JSContext* cx)
 {
     ObjectWeakMap*& table = cx->compartment()->lazyArrayBuffers;
     if (!table) {
@@ -2184,17 +2185,17 @@ InlineTransparentTypedObject::getOrCreat
     buffer->setHasTypedObjectViews();
 
     if (!table->add(cx, this, buffer))
         return nullptr;
 
     if (IsInsideNursery(this)) {
         // Make sure the buffer is traced by the next generational collection,
         // so that its data pointer is updated after this typed object moves.
-        cx->runtime()->gc.storeBuffer.putWholeCell(buffer);
+        zone()->group()->storeBuffer().putWholeCell(buffer);
     }
 
     return buffer;
 }
 
 ArrayBufferObject*
 OutlineTransparentTypedObject::getOrCreateBuffer(JSContext* cx)
 {
@@ -2674,18 +2675,18 @@ js::LoadReference##_name::Func(JSContext
 bool
 StoreReferenceAny::store(JSContext* cx, GCPtrValue* heap, const Value& v,
                          TypedObject* obj, jsid id)
 {
     // Undefined values are not included in type inference information for
     // value properties of typed objects, as these properties are always
     // considered to contain undefined.
     if (!v.isUndefined()) {
-        if (cx->isJSContext())
-            AddTypePropertyId(cx->asJSContext(), obj, id, v);
+        if (!cx->helperThread())
+            AddTypePropertyId(cx, obj, id, v);
         else if (!HasTypePropertyId(obj, id, v))
             return false;
     }
 
     *heap = v;
     return true;
 }
 
@@ -2694,18 +2695,18 @@ StoreReferenceObject::store(JSContext* c
                             TypedObject* obj, jsid id)
 {
     MOZ_ASSERT(v.isObjectOrNull()); // or else Store_object is being misused
 
     // Null pointers are not included in type inference information for
     // object properties of typed objects, as these properties are always
     // considered to contain null.
     if (v.isObject()) {
-        if (cx->isJSContext())
-            AddTypePropertyId(cx->asJSContext(), obj, id, v);
+        if (!cx->helperThread())
+            AddTypePropertyId(cx, obj, id, v);
         else if (!HasTypePropertyId(obj, id, v))
             return false;
     }
 
     *heap = v.toObjectOrNull();
     return true;
 }
 
--- a/js/src/frontend/BytecodeCompiler.cpp
+++ b/js/src/frontend/BytecodeCompiler.cpp
@@ -29,33 +29,33 @@
 using namespace js;
 using namespace js::frontend;
 using mozilla::Maybe;
 using mozilla::Nothing;
 
 class MOZ_STACK_CLASS AutoCompilationTraceLogger
 {
   public:
-    AutoCompilationTraceLogger(ExclusiveContext* cx, const TraceLoggerTextId id,
+    AutoCompilationTraceLogger(JSContext* cx, const TraceLoggerTextId id,
                                const ReadOnlyCompileOptions& options);
 
   private:
     TraceLoggerThread* logger;
     TraceLoggerEvent event;
     AutoTraceLog scriptLogger;
     AutoTraceLog typeLogger;
 };
 
 // The BytecodeCompiler class contains resources common to compiling scripts and
 // function bodies.
 class MOZ_STACK_CLASS BytecodeCompiler
 {
   public:
     // Construct an object passing mandatory arguments.
-    BytecodeCompiler(ExclusiveContext* cx,
+    BytecodeCompiler(JSContext* cx,
                      LifoAlloc& alloc,
                      const ReadOnlyCompileOptions& options,
                      SourceBufferHolder& sourceBuffer,
                      HandleScope enclosingScope,
                      TraceLoggerTextId logId);
 
     // Call setters for optional arguments.
     void maybeSetSourceCompressor(SourceCompressionTask* sourceCompressor);
@@ -81,17 +81,17 @@ class MOZ_STACK_CLASS BytecodeCompiler
     bool emplaceEmitter(Maybe<BytecodeEmitter>& emitter, SharedContext* sharedContext);
     bool handleParseFailure(const Directives& newDirectives);
     bool deoptimizeArgumentsInEnclosingScripts(JSContext* cx, HandleObject environment);
     bool maybeCompleteCompressSource();
 
     AutoCompilationTraceLogger traceLogger;
     AutoKeepAtoms keepAtoms;
 
-    ExclusiveContext* cx;
+    JSContext* cx;
     LifoAlloc& alloc;
     const ReadOnlyCompileOptions& options;
     SourceBufferHolder& sourceBuffer;
 
     RootedScope enclosingScope;
 
     RootedScriptSource sourceObject;
     ScriptSource* scriptSource;
@@ -104,33 +104,33 @@ class MOZ_STACK_CLASS BytecodeCompiler
     Maybe<Parser<FullParseHandler>> parser;
 
     Directives directives;
     TokenStream::Position startPosition;
 
     RootedScript script;
 };
 
-AutoCompilationTraceLogger::AutoCompilationTraceLogger(ExclusiveContext* cx,
+AutoCompilationTraceLogger::AutoCompilationTraceLogger(JSContext* cx,
         const TraceLoggerTextId id, const ReadOnlyCompileOptions& options)
-  : logger(cx->isJSContext() ? TraceLoggerForMainThread(cx->asJSContext()->runtime())
-                             : TraceLoggerForCurrentThread()),
+  : logger(!cx->helperThread() ? TraceLoggerForMainThread(cx->runtime())
+                               : TraceLoggerForCurrentThread()),
     event(logger, TraceLogger_AnnotateScripts, options),
     scriptLogger(logger, event),
     typeLogger(logger, id)
 {}
 
-BytecodeCompiler::BytecodeCompiler(ExclusiveContext* cx,
+BytecodeCompiler::BytecodeCompiler(JSContext* cx,
                                    LifoAlloc& alloc,
                                    const ReadOnlyCompileOptions& options,
                                    SourceBufferHolder& sourceBuffer,
                                    HandleScope enclosingScope,
                                    TraceLoggerTextId logId)
   : traceLogger(cx, logId, options),
-    keepAtoms(cx->perThreadData),
+    keepAtoms(cx),
     cx(cx),
     alloc(alloc),
     options(options),
     sourceBuffer(sourceBuffer),
     enclosingScope(cx, enclosingScope),
     sourceObject(cx),
     scriptSource(nullptr),
     sourceCompressor(nullptr),
@@ -149,18 +149,18 @@ BytecodeCompiler::maybeSetSourceCompress
 
 bool
 BytecodeCompiler::checkLength()
 {
     // Note this limit is simply so we can store sourceStart and sourceEnd in
     // JSScript as 32-bits. It could be lifted fairly easily, since the compiler
     // is using size_t internally already.
     if (sourceBuffer.length() > UINT32_MAX) {
-        if (cx->isJSContext())
-            JS_ReportErrorNumberASCII(cx->asJSContext(), GetErrorMessage, nullptr,
+        if (!cx->helperThread())
+            JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
                                       JSMSG_SOURCE_TOO_LONG);
         return false;
     }
     return true;
 }
 
 bool
 BytecodeCompiler::createScriptSource(Maybe<uint32_t> parameterListEnd)
@@ -324,22 +324,22 @@ BytecodeCompiler::compileScript(HandleOb
         ParseNode* pn;
         if (sc->isEvalContext())
             pn = parser->evalBody(sc->asEvalContext());
         else
             pn = parser->globalBody(sc->asGlobalContext());
 
         // Successfully parsed. Emit the script.
         if (pn) {
-            if (sc->isEvalContext() && sc->hasDebuggerStatement() && cx->isJSContext()) {
+            if (sc->isEvalContext() && sc->hasDebuggerStatement() && !cx->helperThread()) {
                 // If the eval'ed script contains any debugger statement, force construction
                 // of arguments objects for the caller script and any other scripts it is
                 // transitively nested inside. The debugger can access any variable on the
                 // scope chain.
-                if (!deoptimizeArgumentsInEnclosingScripts(cx->asJSContext(), environment))
+                if (!deoptimizeArgumentsInEnclosingScripts(cx, environment))
                     return nullptr;
             }
             if (!emitter->emitScript(pn))
                 return nullptr;
             if (!NameFunctions(cx, pn))
                 return nullptr;
             parser->handler.freeTree(pn);
 
@@ -352,17 +352,17 @@ BytecodeCompiler::compileScript(HandleOb
 
         // Reset UsedNameTracker state before trying again.
         usedNames->reset();
     }
 
     if (!maybeCompleteCompressSource())
         return nullptr;
 
-    MOZ_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->isExceptionPending());
+    MOZ_ASSERT_IF(!cx->helperThread(), !cx->isExceptionPending());
 
     return script;
 }
 
 JSScript*
 BytecodeCompiler::compileGlobalScript(ScopeKind scopeKind)
 {
     GlobalSharedContext globalsc(cx, scopeKind, directives, options.extraWarningsOption);
@@ -416,17 +416,17 @@ BytecodeCompiler::compileModule()
     if (!env)
         return nullptr;
 
     module->setInitialEnvironment(env);
 
     if (!maybeCompleteCompressSource())
         return nullptr;
 
-    MOZ_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->isExceptionPending());
+    MOZ_ASSERT_IF(!cx->helperThread(), !cx->isExceptionPending());
     return module;
 }
 
 // Compile a standalone JS function, which might appear as the value of an
 // event handler attribute in an HTML <INPUT> tag, or in a Function()
 // constructor.
 bool
 BytecodeCompiler::compileStandaloneFunction(MutableHandleFunction fun,
@@ -481,17 +481,17 @@ BytecodeCompiler::compileStandaloneFunct
 
 ScriptSourceObject*
 BytecodeCompiler::sourceObjectPtr() const
 {
     return sourceObject.get();
 }
 
 ScriptSourceObject*
-frontend::CreateScriptSourceObject(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
+frontend::CreateScriptSourceObject(JSContext* cx, const ReadOnlyCompileOptions& options,
                                    Maybe<uint32_t> parameterListEnd /* = Nothing() */)
 {
     ScriptSource* ss = cx->new_<ScriptSource>();
     if (!ss)
         return nullptr;
     ScriptSourceHolder ssHolder(ss);
 
     if (!ss->initFromOptions(cx, options, parameterListEnd))
@@ -505,18 +505,18 @@ frontend::CreateScriptSourceObject(Exclu
     // SSO, in a temporary compartment. Hence, for the SSO to refer to the
     // gc-heap-allocated values in |options|, it would need cross-compartment
     // wrappers from the temporary compartment to the real compartment --- which
     // would then be inappropriate once we merged the temporary and real
     // compartments.
     //
     // Instead, we put off populating those SSO slots in off-thread compilations
     // until after we've merged compartments.
-    if (cx->isJSContext()) {
-        if (!ScriptSourceObject::initFromOptions(cx->asJSContext(), sso, options))
+    if (!cx->helperThread()) {
+        if (!ScriptSourceObject::initFromOptions(cx, sso, options))
             return nullptr;
     }
 
     return sso;
 }
 
 // CompileScript independently returns the ScriptSourceObject (SSO) for the
 // compile.  This is used by off-main-thread script compilation (OMT-SC).
@@ -545,47 +545,47 @@ class MOZ_STACK_CLASS AutoInitializeSour
 
     ~AutoInitializeSourceObject() {
         if (sourceObjectOut_)
             *sourceObjectOut_ = compiler_.sourceObjectPtr();
     }
 };
 
 JSScript*
-frontend::CompileGlobalScript(ExclusiveContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
+frontend::CompileGlobalScript(JSContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
                               const ReadOnlyCompileOptions& options,
                               SourceBufferHolder& srcBuf,
                               SourceCompressionTask* extraSct,
                               ScriptSourceObject** sourceObjectOut)
 {
     MOZ_ASSERT(scopeKind == ScopeKind::Global || scopeKind == ScopeKind::NonSyntactic);
     BytecodeCompiler compiler(cx, alloc, options, srcBuf, /* enclosingScope = */ nullptr,
                               TraceLogger_ParserCompileScript);
     AutoInitializeSourceObject autoSSO(compiler, sourceObjectOut);
     compiler.maybeSetSourceCompressor(extraSct);
     return compiler.compileGlobalScript(scopeKind);
 }
 
 JSScript*
-frontend::CompileEvalScript(ExclusiveContext* cx, LifoAlloc& alloc,
+frontend::CompileEvalScript(JSContext* cx, LifoAlloc& alloc,
                             HandleObject environment, HandleScope enclosingScope,
                             const ReadOnlyCompileOptions& options,
                             SourceBufferHolder& srcBuf,
                             SourceCompressionTask* extraSct,
                             ScriptSourceObject** sourceObjectOut)
 {
     BytecodeCompiler compiler(cx, alloc, options, srcBuf, enclosingScope,
                               TraceLogger_ParserCompileScript);
     AutoInitializeSourceObject autoSSO(compiler, sourceObjectOut);
     compiler.maybeSetSourceCompressor(extraSct);
     return compiler.compileEvalScript(environment, enclosingScope);
 }
 
 ModuleObject*
-frontend::CompileModule(ExclusiveContext* cx, const ReadOnlyCompileOptions& optionsInput,
+frontend::CompileModule(JSContext* cx, const ReadOnlyCompileOptions& optionsInput,
                         SourceBufferHolder& srcBuf, LifoAlloc& alloc,
                         ScriptSourceObject** sourceObjectOut /* = nullptr */)
 {
     MOZ_ASSERT(srcBuf.get());
     MOZ_ASSERT_IF(sourceObjectOut, *sourceObjectOut == nullptr);
 
     CompileOptions options(cx, optionsInput);
     options.maybeMakeStrictMode(true); // ES6 10.2.1 Module code is always strict mode code.
@@ -600,24 +600,24 @@ frontend::CompileModule(ExclusiveContext
 
 ModuleObject*
 frontend::CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
                         SourceBufferHolder& srcBuf)
 {
     if (!GlobalObject::ensureModulePrototypesCreated(cx, cx->global()))
         return nullptr;
 
-    LifoAlloc& alloc = cx->asJSContext()->tempLifoAlloc();
+    LifoAlloc& alloc = cx->tempLifoAlloc();
     RootedModuleObject module(cx, CompileModule(cx, options, srcBuf, alloc));
     if (!module)
         return nullptr;
 
     // This happens in GlobalHelperThreadState::finishModuleParseTask() when a
     // module is compiled off main thread.
-    if (!ModuleObject::Freeze(cx->asJSContext(), module))
+    if (!ModuleObject::Freeze(cx, module))
         return nullptr;
 
     return module;
 }
 
 bool
 frontend::CompileLazyFunction(JSContext* cx, Handle<LazyScript*> lazy, const char16_t* chars, size_t length)
 {
--- a/js/src/frontend/BytecodeCompiler.h
+++ b/js/src/frontend/BytecodeCompiler.h
@@ -22,36 +22,36 @@ class LazyScript;
 class LifoAlloc;
 class ModuleObject;
 class ScriptSourceObject;
 struct SourceCompressionTask;
 
 namespace frontend {
 
 JSScript*
-CompileGlobalScript(ExclusiveContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
+CompileGlobalScript(JSContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
                     const ReadOnlyCompileOptions& options,
                     SourceBufferHolder& srcBuf,
                     SourceCompressionTask* extraSct = nullptr,
                     ScriptSourceObject** sourceObjectOut = nullptr);
 
 JSScript*
-CompileEvalScript(ExclusiveContext* cx, LifoAlloc& alloc,
+CompileEvalScript(JSContext* cx, LifoAlloc& alloc,
                   HandleObject scopeChain, HandleScope enclosingScope,
                   const ReadOnlyCompileOptions& options,
                   SourceBufferHolder& srcBuf,
                   SourceCompressionTask* extraSct = nullptr,
                   ScriptSourceObject** sourceObjectOut = nullptr);
 
 ModuleObject*
 CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
               SourceBufferHolder& srcBuf);
 
 ModuleObject*
-CompileModule(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
+CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
               SourceBufferHolder& srcBuf, LifoAlloc& alloc,
               ScriptSourceObject** sourceObjectOut = nullptr);
 
 MOZ_MUST_USE bool
 CompileLazyFunction(JSContext* cx, Handle<LazyScript*> lazy, const char16_t* chars, size_t length);
 
 //
 // Compile a single function. The source in srcBuf must match the ECMA-262
@@ -85,17 +85,17 @@ CompileStandaloneAsyncFunction(JSContext
                                mozilla::Maybe<uint32_t> parameterListEnd);
 
 MOZ_MUST_USE bool
 CompileAsyncFunctionBody(JSContext* cx, MutableHandleFunction fun,
                          const ReadOnlyCompileOptions& options,
                          Handle<PropertyNameVector> formals, JS::SourceBufferHolder& srcBuf);
 
 ScriptSourceObject*
-CreateScriptSourceObject(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
+CreateScriptSourceObject(JSContext* cx, const ReadOnlyCompileOptions& options,
                          mozilla::Maybe<uint32_t> parameterListEnd = mozilla::Nothing());
 
 /*
  * True if str consists of an IdentifierStart character, followed by one or
  * more IdentifierPart characters, i.e. it matches the IdentifierName production
  * in the language spec.
  *
  * This returns true even if str is a keyword like "if".
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -956,17 +956,17 @@ BytecodeEmitter::EmitterScope::enterLexi
 
         if (!tdzCache->noteTDZCheck(bce, bi.name(), CheckTDZ))
             return false;
     }
 
     updateFrameFixedSlots(bce, bi);
 
     // Create and intern the VM scope.
-    auto createScope = [kind, bindings, firstFrameSlot](ExclusiveContext* cx,
+    auto createScope = [kind, bindings, firstFrameSlot](JSContext* cx,
                                                         HandleScope enclosing)
     {
         return LexicalScope::create(cx, kind, bindings, firstFrameSlot, enclosing);
     };
     if (!internScope(bce, createScope))
         return false;
 
     if (ScopeKindIsInBody(kind) && hasEnvironment()) {
@@ -1010,17 +1010,17 @@ BytecodeEmitter::EmitterScope::enterName
     // not a frame slot. Do not update frame slot information.
     NameLocation loc = NameLocation::fromBinding(bi.kind(), bi.location());
     if (!putNameInCache(bce, bi.name(), loc))
         return false;
 
     bi++;
     MOZ_ASSERT(!bi, "There should be exactly one binding in a NamedLambda scope");
 
-    auto createScope = [funbox](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [funbox](JSContext* cx, HandleScope enclosing) {
         ScopeKind scopeKind =
             funbox->strict() ? ScopeKind::StrictNamedLambda : ScopeKind::NamedLambda;
         return LexicalScope::create(cx, scopeKind, funbox->namedLambdaBindings(),
                                     LOCALNO_LIMIT, enclosing);
     };
     if (!internScope(bce, createScope))
         return false;
 
@@ -1066,17 +1066,17 @@ BytecodeEmitter::EmitterScope::enterPara
         return false;
 
     // Parameter expressions var scopes have no pre-set bindings and are
     // always extensible, as they are needed for eval.
     fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
 
     // Create and intern the VM scope.
     uint32_t firstFrameSlot = frameSlotStart();
-    auto createScope = [firstFrameSlot](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [firstFrameSlot](JSContext* cx, HandleScope enclosing) {
         return VarScope::create(cx, ScopeKind::ParameterExpressionVar,
                                 /* data = */ nullptr, firstFrameSlot,
                                 /* needsEnvironment = */ true, enclosing);
     };
     if (!internScope(bce, createScope))
         return false;
 
     MOZ_ASSERT(hasEnvironment());
@@ -1160,17 +1160,17 @@ BytecodeEmitter::EmitterScope::enterFunc
             }
         }
 
         if (!deadZoneFrameSlotRange(bce, 0, paramFrameSlotEnd))
             return false;
     }
 
     // Create and intern the VM scope.
-    auto createScope = [funbox](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [funbox](JSContext* cx, HandleScope enclosing) {
         RootedFunction fun(cx, funbox->function());
         return FunctionScope::create(cx, funbox->functionScopeBindings(),
                                      funbox->hasParameterExprs,
                                      funbox->needsCallObjectRegardlessOfBindings(),
                                      fun, enclosing);
     };
     if (!internBodyScope(bce, createScope))
         return false;
@@ -1214,17 +1214,17 @@ BytecodeEmitter::EmitterScope::enterFunc
     // If the extra var scope may be extended at runtime due to sloppy
     // direct eval, any names beyond the var scope must be accessed
     // dynamically as we don't know if the name will become a 'var' binding
     // due to direct eval.
     if (funbox->hasExtensibleScope())
         fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
 
     // Create and intern the VM scope.
-    auto createScope = [funbox, firstFrameSlot](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [funbox, firstFrameSlot](JSContext* cx, HandleScope enclosing) {
         return VarScope::create(cx, ScopeKind::FunctionBodyVar,
                                 funbox->extraVarScopeBindings(), firstFrameSlot,
                                 funbox->needsExtraBodyVarEnvironmentRegardlessOfBindings(),
                                 enclosing);
     };
     if (!internScope(bce, createScope))
         return false;
 
@@ -1282,17 +1282,17 @@ BytecodeEmitter::EmitterScope::enterGlob
         // self-hosted scripts are cloned into their target compartments before
         // they are run. Instead of Global, Intrinsic is used for all names.
         //
         // Intrinsic lookups are redirected to the special intrinsics holder
         // in the global object, into which any missing values are cloned
         // lazily upon first access.
         fallbackFreeNameLocation_ = Some(NameLocation::Intrinsic());
 
-        auto createScope = [](ExclusiveContext* cx, HandleScope enclosing) {
+        auto createScope = [](JSContext* cx, HandleScope enclosing) {
             MOZ_ASSERT(!enclosing);
             return &cx->global()->emptyGlobalScope();
         };
         return internBodyScope(bce, createScope);
     }
 
     // Resolve binding names and emit DEF{VAR,LET,CONST} prologue ops.
     if (globalsc->bindings) {
@@ -1315,17 +1315,17 @@ BytecodeEmitter::EmitterScope::enterGlob
     // Note that to save space, we don't add free names to the cache for
     // global scopes. They are assumed to be global vars in the syntactic
     // global scope, dynamic accesses under non-syntactic global scope.
     if (globalsc->scopeKind() == ScopeKind::Global)
         fallbackFreeNameLocation_ = Some(NameLocation::Global(BindingKind::Var));
     else
         fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
 
-    auto createScope = [globalsc](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [globalsc](JSContext* cx, HandleScope enclosing) {
         MOZ_ASSERT(!enclosing);
         return GlobalScope::create(cx, globalsc->scopeKind(), globalsc->bindings);
     };
     return internBodyScope(bce, createScope);
 }
 
 bool
 BytecodeEmitter::EmitterScope::enterEval(BytecodeEmitter* bce, EvalSharedContext* evalsc)
@@ -1337,17 +1337,17 @@ BytecodeEmitter::EmitterScope::enterEval
     if (!ensureCache(bce))
         return false;
 
     // For simplicity, treat all free name lookups in eval scripts as dynamic.
     fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
 
     // Create the `var` scope. Note that there is also a lexical scope, created
     // separately in emitScript().
-    auto createScope = [evalsc](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [evalsc](JSContext* cx, HandleScope enclosing) {
         ScopeKind scopeKind = evalsc->strict() ? ScopeKind::StrictEval : ScopeKind::Eval;
         return EvalScope::create(cx, scopeKind, evalsc->bindings, enclosing);
     };
     if (!internBodyScope(bce, createScope))
         return false;
 
     if (hasEnvironment()) {
         if (!bce->emitInternedScopeOp(index(), JSOP_PUSHVARENV))
@@ -1425,17 +1425,17 @@ BytecodeEmitter::EmitterScope::enterModu
     // Put lexical frame slots in TDZ. Environment slots are poisoned during
     // environment creation.
     if (firstLexicalFrameSlot) {
         if (!deadZoneFrameSlotRange(bce, *firstLexicalFrameSlot, frameSlotEnd()))
             return false;
     }
 
     // Create and intern the VM scope.
-    auto createScope = [modulesc](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [modulesc](JSContext* cx, HandleScope enclosing) {
         return ModuleScope::create(cx, modulesc->bindings, modulesc->module(), enclosing);
     };
     if (!internBodyScope(bce, createScope))
         return false;
 
     return checkEnvironmentChainLength(bce);
 }
 
@@ -1445,17 +1445,17 @@ BytecodeEmitter::EmitterScope::enterWith
     MOZ_ASSERT(this == bce->innermostEmitterScope);
 
     if (!ensureCache(bce))
         return false;
 
     // 'with' make all accesses dynamic and unanalyzable.
     fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
 
-    auto createScope = [](ExclusiveContext* cx, HandleScope enclosing) {
+    auto createScope = [](JSContext* cx, HandleScope enclosing) {
         return WithScope::create(cx, enclosing);
     };
     if (!internScope(bce, createScope))
         return false;
 
     if (!bce->emitInternedScopeOp(index(), JSOP_ENTERWITH))
         return false;
 
@@ -3481,28 +3481,27 @@ BytecodeEmitter::maybeSetSourceMap()
         if (!parser->ss->setSourceMapURL(cx, parser->options().sourceMapURL()))
             return false;
     }
 
     return true;
 }
 
 void
-BytecodeEmitter::tellDebuggerAboutCompiledScript(ExclusiveContext* cx)
+BytecodeEmitter::tellDebuggerAboutCompiledScript(JSContext* cx)
 {
     // Note: when parsing off thread the resulting scripts need to be handed to
     // the debugger after rejoining to the main thread.
-    if (!cx->isJSContext())
+    if (cx->helperThread())
         return;
 
     // Lazy scripts are never top level (despite always being invoked with a
     // nullptr parent), and so the hook should never be fired.
-    if (emitterMode != LazyFunction && !parent) {
-        Debugger::onNewScript(cx->asJSContext(), script);
-    }
+    if (emitterMode != LazyFunction && !parent)
+        Debugger::onNewScript(cx, script);
 }
 
 inline TokenStream*
 BytecodeEmitter::tokenStream()
 {
     return &parser->tokenStream;
 }
 
@@ -6116,17 +6115,17 @@ BytecodeEmitter::emitAssignment(ParseNod
         break;
       default:
         MOZ_ASSERT(0);
     }
     return true;
 }
 
 bool
-ParseNode::getConstantValue(ExclusiveContext* cx, AllowConstantObjects allowObjects,
+ParseNode::getConstantValue(JSContext* cx, AllowConstantObjects allowObjects,
                             MutableHandleValue vp, Value* compare, size_t ncompare,
                             NewObjectKind newKind)
 {
     MOZ_ASSERT(newKind == TenuredObject || newKind == SingletonObject);
 
     switch (getKind()) {
       case PNK_NUMBER:
         vp.setNumber(pn_dval);
@@ -8563,17 +8562,17 @@ BytecodeEmitter::emitDeleteExpression(Pa
         if (!emit1(JSOP_POP))
             return false;
     }
 
     return emit1(JSOP_TRUE);
 }
 
 static const char *
-SelfHostedCallFunctionName(JSAtom* name, ExclusiveContext* cx)
+SelfHostedCallFunctionName(JSAtom* name, JSContext* cx)
 {
     if (name == cx->names().callFunction)
         return "callFunction";
     if (name == cx->names().callContentFunction)
         return "callContentFunction";
     if (name == cx->names().constructContentFunction)
         return "constructContentFunction";
 
@@ -10448,17 +10447,17 @@ BytecodeEmitter::emitTreeInBranch(ParseN
 {
     // Code that may be conditionally executed always need their own TDZ
     // cache.
     TDZCheckCache tdzCache(this);
     return emitTree(pn);
 }
 
 static bool
-AllocSrcNote(ExclusiveContext* cx, SrcNotesVector& notes, unsigned* index)
+AllocSrcNote(JSContext* cx, SrcNotesVector& notes, unsigned* index)
 {
     // Start it off moderately large to avoid repeated resizings early on.
     // ~99% of cases fit within 256 bytes.
     if (notes.capacity() == 0 && !notes.reserve(256))
         return false;
 
     if (!notes.growBy(1)) {
         ReportOutOfMemory(cx);
--- a/js/src/frontend/BytecodeEmitter.h
+++ b/js/src/frontend/BytecodeEmitter.h
@@ -27,17 +27,17 @@ class ObjectBox;
 class ParseNode;
 template <typename ParseHandler> class Parser;
 class SharedContext;
 class TokenStream;
 
 class CGConstList {
     Vector<Value> list;
   public:
-    explicit CGConstList(ExclusiveContext* cx) : list(cx) {}
+    explicit CGConstList(JSContext* cx) : list(cx) {}
     MOZ_MUST_USE bool append(const Value& v) {
         MOZ_ASSERT_IF(v.isString(), v.toString()->isAtom());
         return list.append(v);
     }
     size_t length() const { return list.length(); }
     void finish(ConstArray* array);
 };
 
@@ -51,28 +51,28 @@ struct CGObjectList {
     unsigned indexOf(JSObject* obj);
     void finish(ObjectArray* array);
     ObjectBox* find(uint32_t index);
 };
 
 struct MOZ_STACK_CLASS CGScopeList {
     Rooted<GCVector<Scope*>> vector;
 
-    explicit CGScopeList(ExclusiveContext* cx)
+    explicit CGScopeList(JSContext* cx)
       : vector(cx, GCVector<Scope*>(cx))
     { }
 
     bool append(Scope* scope) { return vector.append(scope); }
     uint32_t length() const { return vector.length(); }
     void finish(ScopeArray* array);
 };
 
 struct CGTryNoteList {
     Vector<JSTryNote> list;
-    explicit CGTryNoteList(ExclusiveContext* cx) : list(cx) {}
+    explicit CGTryNoteList(JSContext* cx) : list(cx) {}
 
     MOZ_MUST_USE bool append(JSTryNoteKind kind, uint32_t stackDepth, size_t start, size_t end);
     size_t length() const { return list.length(); }
     void finish(TryNoteArray* array);
 };
 
 struct CGScopeNote : public ScopeNote
 {
@@ -84,28 +84,28 @@ struct CGScopeNote : public ScopeNote
     bool startInPrologue;
 
     // Is the end offset in the prologue?
     bool endInPrologue;
 };
 
 struct CGScopeNoteList {
     Vector<CGScopeNote> list;
-    explicit CGScopeNoteList(ExclusiveContext* cx) : list(cx) {}
+    explicit CGScopeNoteList(JSContext* cx) : list(cx) {}
 
     MOZ_MUST_USE bool append(uint32_t scopeIndex, uint32_t offset, bool inPrologue,
                              uint32_t parent);
     void recordEnd(uint32_t index, uint32_t offset, bool inPrologue);
     size_t length() const { return list.length(); }
     void finish(ScopeNoteArray* array, uint32_t prologueLength);
 };
 
 struct CGYieldOffsetList {
     Vector<uint32_t> list;
-    explicit CGYieldOffsetList(ExclusiveContext* cx) : list(cx) {}
+    explicit CGYieldOffsetList(JSContext* cx) : list(cx) {}
 
     MOZ_MUST_USE bool append(uint32_t offset) { return list.append(offset); }
     size_t length() const { return list.length(); }
     void finish(YieldOffsetArray& array, uint32_t prologueLength);
 };
 
 // Use zero inline elements because these go on the stack and affect how many
 // nested functions are possible.
@@ -170,17 +170,17 @@ struct JumpList {
 struct MOZ_STACK_CLASS BytecodeEmitter
 {
     class TDZCheckCache;
     class NestableControl;
     class EmitterScope;
 
     SharedContext* const sc;      /* context shared between parsing and bytecode generation */
 
-    ExclusiveContext* const cx;
+    JSContext* const cx;
 
     BytecodeEmitter* const parent;  /* enclosing function or global context */
 
     Rooted<JSScript*> script;       /* the JSScript we're ultimately producing */
 
     Rooted<LazyScript*> lazyScript; /* the lazy script if mode is LazyFunction,
                                         nullptr otherwise. */
 
@@ -188,17 +188,17 @@ struct MOZ_STACK_CLASS BytecodeEmitter
         BytecodeVector code;        /* bytecode */
         SrcNotesVector notes;       /* source notes, see below */
         ptrdiff_t   lastNoteOffset; /* code offset for last source note */
         uint32_t    currentLine;    /* line number for tree-based srcnote gen */
         uint32_t    lastColumn;     /* zero-based column index on currentLine of
                                        last SRC_COLSPAN-annotated opcode */
         JumpTarget lastTarget;      // Last jump target emitted.
 
-        EmitSection(ExclusiveContext* cx, uint32_t lineNum)
+        EmitSection(JSContext* cx, uint32_t lineNum)
           : code(cx), notes(cx), lastNoteOffset(0), currentLine(lineNum), lastColumn(0),
             lastTarget{ -1 - ptrdiff_t(JSOP_JUMPTARGET_LENGTH) }
         {}
     };
     EmitSection prologue, main, *current;
 
     Parser<FullParseHandler>* const parser;
 
@@ -345,17 +345,17 @@ struct MOZ_STACK_CLASS BytecodeEmitter
     // Check whether our function is in a run-once context (a toplevel
     // run-one script or a run-once lambda).
     MOZ_MUST_USE bool checkRunOnceContext();
 
     bool needsImplicitThis();
 
     MOZ_MUST_USE bool maybeSetDisplayURL();
     MOZ_MUST_USE bool maybeSetSourceMap();
-    void tellDebuggerAboutCompiledScript(ExclusiveContext* cx);
+    void tellDebuggerAboutCompiledScript(JSContext* cx);
 
     inline TokenStream* tokenStream();
 
     BytecodeVector& code() const { return current->code; }
     jsbytecode* code(ptrdiff_t offset) const { return current->code.begin() + offset; }
     ptrdiff_t offset() const { return current->code.end() - current->code.begin(); }
     ptrdiff_t prologueOffset() const { return prologue.code.end() - prologue.code.begin(); }
     void switchToMain() { current = &main; }
--- a/js/src/frontend/FoldConstants.cpp
+++ b/js/src/frontend/FoldConstants.cpp
@@ -24,20 +24,20 @@ using mozilla::IsNaN;
 using mozilla::IsNegative;
 using mozilla::NegativeInfinity;
 using mozilla::PositiveInfinity;
 using JS::GenericNaN;
 using JS::ToInt32;
 using JS::ToUint32;
 
 static bool
-ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result);
+ContainsHoistedDeclaration(JSContext* cx, ParseNode* node, bool* result);
 
 static bool
-ListContainsHoistedDeclaration(ExclusiveContext* cx, ListNode* list, bool* result)
+ListContainsHoistedDeclaration(JSContext* cx, ListNode* list, bool* result)
 {
     for (ParseNode* node = list->pn_head; node; node = node->pn_next) {
         if (!ContainsHoistedDeclaration(cx, node, result))
             return false;
         if (*result)
             return true;
     }
 
@@ -49,17 +49,17 @@ ListContainsHoistedDeclaration(Exclusive
 // visibility will extend outside the node itself -- that is, whether the
 // ParseNode contains any var statements.
 //
 // THIS IS NOT A GENERAL-PURPOSE FUNCTION.  It is only written to work in the
 // specific context of deciding that |node|, as one arm of a PNK_IF controlled
 // by a constant condition, contains a declaration that forbids |node| being
 // completely eliminated as dead.
 static bool
-ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result)
+ContainsHoistedDeclaration(JSContext* cx, ParseNode* node, bool* result)
 {
     JS_CHECK_RECURSION(cx, return false);
 
   restart:
 
     // With a better-typed AST, we would have distinct parse node classes for
     // expressions and for statements and would characterize expressions with
     // ExpressionKind and statements with StatementKind.  Perhaps someday.  In
@@ -410,17 +410,17 @@ ContainsHoistedDeclaration(ExclusiveCont
     MOZ_CRASH("invalid node kind");
 }
 
 /*
  * Fold from one constant type to another.
  * XXX handles only strings and numbers for now
  */
 static bool
-FoldType(ExclusiveContext* cx, ParseNode* pn, ParseNodeKind kind)
+FoldType(JSContext* cx, ParseNode* pn, ParseNodeKind kind)
 {
     if (!pn->isKind(kind)) {
         switch (kind) {
           case PNK_NUMBER:
             if (pn->isKind(PNK_STRING)) {
                 double d;
                 if (!StringToNumber(cx, pn->pn_atom, &d))
                     return false;
@@ -512,20 +512,20 @@ Boolish(ParseNode* pn)
       }
 
       default:
         return Unknown;
     }
 }
 
 static bool
-Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda);
+Fold(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda);
 
 static bool
-FoldCondition(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
+FoldCondition(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
               bool inGenexpLambda)
 {
     // Conditions fold like any other expression...
     if (!Fold(cx, nodePtr, parser, inGenexpLambda))
         return false;
 
     // ...but then they sometimes can be further folded to constants.
     ParseNode* node = *nodePtr;
@@ -546,17 +546,17 @@ FoldCondition(ExclusiveContext* cx, Pars
         }
         node->setArity(PN_NULLARY);
     }
 
     return true;
 }
 
 static bool
-FoldTypeOfExpr(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldTypeOfExpr(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_TYPEOFEXPR));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
     ParseNode*& expr = node->pn_kid;
     if (!Fold(cx, &expr, parser, inGenexpLambda))
         return false;
@@ -582,17 +582,17 @@ FoldTypeOfExpr(ExclusiveContext* cx, Par
         node->setOp(JSOP_NOP);
         node->pn_atom = result;
     }
 
     return true;
 }
 
 static bool
-FoldDeleteExpr(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldDeleteExpr(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_DELETEEXPR));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
     ParseNode*& expr = node->pn_kid;
     if (!Fold(cx, &expr, parser, inGenexpLambda))
         return false;
@@ -605,17 +605,17 @@ FoldDeleteExpr(ExclusiveContext* cx, Par
         node->setArity(PN_NULLARY);
         node->setOp(JSOP_TRUE);
     }
 
     return true;
 }
 
 static bool
-FoldDeleteElement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldDeleteElement(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                   bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_DELETEELEM));
     MOZ_ASSERT(node->isArity(PN_UNARY));
     MOZ_ASSERT(node->pn_kid->isKind(PNK_ELEM));
 
     ParseNode*& expr = node->pn_kid;
     if (!Fold(cx, &expr, parser, inGenexpLambda))
@@ -630,17 +630,17 @@ FoldDeleteElement(ExclusiveContext* cx, 
     MOZ_ASSERT(expr->isKind(PNK_ELEM) || expr->isKind(PNK_DOT));
     if (expr->isKind(PNK_DOT))
         node->setKind(PNK_DELETEPROP);
 
     return true;
 }
 
 static bool
-FoldDeleteProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldDeleteProperty(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                    bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_DELETEPROP));
     MOZ_ASSERT(node->isArity(PN_UNARY));
     MOZ_ASSERT(node->pn_kid->isKind(PNK_DOT));
 
     ParseNode*& expr = node->pn_kid;
 #ifdef DEBUG
@@ -652,17 +652,17 @@ FoldDeleteProperty(ExclusiveContext* cx,
 
     MOZ_ASSERT(expr->isKind(oldKind),
                "kind should have remained invariant under folding");
 
     return true;
 }
 
 static bool
-FoldNot(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldNot(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
         bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_NOT));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
     ParseNode*& expr = node->pn_kid;
     if (!FoldCondition(cx, &expr, parser, inGenexpLambda))
         return false;
@@ -687,17 +687,17 @@ FoldNot(ExclusiveContext* cx, ParseNode*
         node->setArity(PN_NULLARY);
         node->setOp(newval ? JSOP_TRUE : JSOP_FALSE);
     }
 
     return true;
 }
 
 static bool
-FoldUnaryArithmetic(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldUnaryArithmetic(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                     bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_BITNOT) || node->isKind(PNK_POS) || node->isKind(PNK_NEG),
                "need a different method for this node kind");
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
     ParseNode*& expr = node->pn_kid;
     if (!Fold(cx, &expr, parser, inGenexpLambda))
@@ -721,17 +721,17 @@ FoldUnaryArithmetic(ExclusiveContext* cx
         node->setArity(PN_NULLARY);
         node->pn_dval = d;
     }
 
     return true;
 }
 
 static bool
-FoldIncrementDecrement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldIncrementDecrement(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                        bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_PREINCREMENT) ||
                node->isKind(PNK_POSTINCREMENT) ||
                node->isKind(PNK_PREDECREMENT) ||
                node->isKind(PNK_POSTDECREMENT));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
@@ -742,17 +742,17 @@ FoldIncrementDecrement(ExclusiveContext*
         return false;
 
     MOZ_ASSERT(parser.isValidSimpleAssignmentTarget(target, Parser<FullParseHandler>::PermitAssignmentToFunctionCalls));
 
     return true;
 }
 
 static bool
-FoldAndOr(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
+FoldAndOr(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
           bool inGenexpLambda)
 {
     ParseNode* node = *nodePtr;
 
     MOZ_ASSERT(node->isKind(PNK_AND) || node->isKind(PNK_OR));
     MOZ_ASSERT(node->isArity(PN_LIST));
 
     bool isOrNode = node->isKind(PNK_OR);
@@ -825,17 +825,17 @@ FoldAndOr(ExclusiveContext* cx, ParseNod
         node->setArity(PN_NULLARY);
         parser.freeTree(node);
     }
 
     return true;
 }
 
 static bool
-FoldConditional(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
+FoldConditional(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
                 bool inGenexpLambda)
 {
     ParseNode** nextNode = nodePtr;
 
     do {
         // |nextNode| on entry points to the C?T:F expression to be folded.
         // Reset it to exit the loop in the common case where F isn't another
         // ?: expression.
@@ -895,17 +895,17 @@ FoldConditional(ExclusiveContext* cx, Pa
 
         parser.freeTree(discarded);
     } while (nextNode);
 
     return true;
 }
 
 static bool
-FoldIf(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
+FoldIf(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
        bool inGenexpLambda)
 {
     ParseNode** nextNode = nodePtr;
 
     do {
         // |nextNode| on entry points to the initial |if| to be folded.  Reset
         // it to exit the loop when the |else| arm isn't another |if|.
         nodePtr = nextNode;
@@ -999,17 +999,17 @@ FoldIf(ExclusiveContext* cx, ParseNode**
             parser.freeTree(node);
         }
     } while (nextNode);
 
     return true;
 }
 
 static bool
-FoldFunction(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldFunction(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
              bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_FUNCTION));
     MOZ_ASSERT(node->isArity(PN_CODE));
 
     // Don't constant-fold inside "use asm" code, as this could create a parse
     // tree that doesn't type-check as asm.js.
     if (node->pn_funbox->useAsmOrInsideUseAsm())
@@ -1062,28 +1062,28 @@ ComputeBinary(ParseNodeKind kind, double
     MOZ_ASSERT(kind == PNK_LSH || kind == PNK_RSH);
 
     int32_t i = ToInt32(left);
     uint32_t j = ToUint32(right) & 31;
     return int32_t((kind == PNK_LSH) ? uint32_t(i) << j : i >> j);
 }
 
 static bool
-FoldModule(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser)
+FoldModule(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser)
 {
     MOZ_ASSERT(node->isKind(PNK_MODULE));
     MOZ_ASSERT(node->isArity(PN_CODE));
 
     ParseNode*& moduleBody = node->pn_body;
     MOZ_ASSERT(moduleBody);
     return Fold(cx, &moduleBody, parser, false);
 }
 
 static bool
-FoldBinaryArithmetic(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldBinaryArithmetic(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                      bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_SUB) ||
                node->isKind(PNK_STAR) ||
                node->isKind(PNK_LSH) ||
                node->isKind(PNK_RSH) ||
                node->isKind(PNK_URSH) ||
                node->isKind(PNK_DIV) ||
@@ -1145,17 +1145,17 @@ FoldBinaryArithmetic(ExclusiveContext* c
             parser.freeTree(elem);
         }
     }
 
     return true;
 }
 
 static bool
-FoldExponentiation(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldExponentiation(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                    bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_POW));
     MOZ_ASSERT(node->isArity(PN_LIST));
     MOZ_ASSERT(node->pn_count >= 2);
 
     // Fold each operand, ideally into a number.
     ParseNode** listp = &node->pn_head;
@@ -1189,17 +1189,17 @@ FoldExponentiation(ExclusiveContext* cx,
     node->setKind(PNK_NUMBER);
     node->setArity(PN_NULLARY);
     node->setOp(JSOP_DOUBLE);
     node->pn_dval = ecmaPow(d1, d2);
     return true;
 }
 
 static bool
-FoldList(ExclusiveContext* cx, ParseNode* list, Parser<FullParseHandler>& parser,
+FoldList(JSContext* cx, ParseNode* list, Parser<FullParseHandler>& parser,
          bool inGenexpLambda)
 {
     MOZ_ASSERT(list->isArity(PN_LIST));
 
     ParseNode** elem = &list->pn_head;
     for (; *elem; elem = &(*elem)->pn_next) {
         if (!Fold(cx, elem, parser, inGenexpLambda))
             return false;
@@ -1209,32 +1209,32 @@ FoldList(ExclusiveContext* cx, ParseNode
     list->pn_tail = elem;
 
     list->checkListConsistency();
 
     return true;
 }
 
 static bool
-FoldReturn(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldReturn(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
            bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_RETURN));
     MOZ_ASSERT(node->isArity(PN_UNARY));
 
     if (ParseNode*& expr = node->pn_kid) {
         if (!Fold(cx, &expr, parser, inGenexpLambda))
             return false;
     }
 
     return true;
 }
 
 static bool
-FoldTry(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldTry(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
         bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_TRY));
     MOZ_ASSERT(node->isArity(PN_TERNARY));
 
     ParseNode*& statements = node->pn_kid1;
     if (!Fold(cx, &statements, parser, inGenexpLambda))
         return false;
@@ -1248,17 +1248,17 @@ FoldTry(ExclusiveContext* cx, ParseNode*
         if (!Fold(cx, &finally, parser, inGenexpLambda))
             return false;
     }
 
     return true;
 }
 
 static bool
-FoldCatch(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldCatch(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
           bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_CATCH));
     MOZ_ASSERT(node->isArity(PN_TERNARY));
 
     ParseNode*& declPattern = node->pn_kid1;
     if (!Fold(cx, &declPattern, parser, inGenexpLambda))
         return false;
@@ -1272,17 +1272,17 @@ FoldCatch(ExclusiveContext* cx, ParseNod
         if (!Fold(cx, &statements, parser, inGenexpLambda))
             return false;
     }
 
     return true;
 }
 
 static bool
-FoldClass(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldClass(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
           bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_CLASS));
     MOZ_ASSERT(node->isArity(PN_TERNARY));
 
     if (ParseNode*& classNames = node->pn_kid1) {
         if (!Fold(cx, &classNames, parser, inGenexpLambda))
             return false;
@@ -1293,17 +1293,17 @@ FoldClass(ExclusiveContext* cx, ParseNod
             return false;
     }
 
     ParseNode*& body = node->pn_kid3;
     return Fold(cx, &body, parser, inGenexpLambda);
 }
 
 static bool
-FoldElement(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
+FoldElement(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
             bool inGenexpLambda)
 {
     ParseNode* node = *nodePtr;
 
     MOZ_ASSERT(node->isKind(PNK_ELEM));
     MOZ_ASSERT(node->isArity(PN_BINARY));
 
     ParseNode*& expr = node->pn_left;
@@ -1369,17 +1369,17 @@ FoldElement(ExclusiveContext* cx, ParseN
     node->setArity(PN_UNARY);
     node->pn_kid = key;
     parser.freeTree(node);
 
     return true;
 }
 
 static bool
-FoldAdd(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
+FoldAdd(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
         bool inGenexpLambda)
 {
     ParseNode* node = *nodePtr;
 
     MOZ_ASSERT(node->isKind(PNK_ADD));
     MOZ_ASSERT(node->isArity(PN_LIST));
     MOZ_ASSERT(node->pn_count >= 2);
 
@@ -1520,17 +1520,17 @@ FoldAdd(ExclusiveContext* cx, ParseNode*
         node->setOp(JSOP_TRUE);
         parser.freeTree(node);
     }
 
     return true;
 }
 
 static bool
-FoldCall(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldCall(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
          bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_CALL) || node->isKind(PNK_SUPERCALL) ||
                node->isKind(PNK_TAGGED_TEMPLATE));
     MOZ_ASSERT(node->isArity(PN_LIST));
 
     // Don't fold a parenthesized callable component in an invocation, as this
     // might cause a different |this| value to be used, changing semantics:
@@ -1555,29 +1555,29 @@ FoldCall(ExclusiveContext* cx, ParseNode
     // If the last node in the list was replaced, pn_tail points into the wrong node.
     node->pn_tail = listp;
 
     node->checkListConsistency();
     return true;
 }
 
 static bool
-FoldForInOrOf(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldForInOrOf(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
               bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_FORIN) || node->isKind(PNK_FOROF));
     MOZ_ASSERT(node->isArity(PN_TERNARY));
     MOZ_ASSERT(!node->pn_kid2);
 
     return Fold(cx, &node->pn_kid1, parser, inGenexpLambda) &&
            Fold(cx, &node->pn_kid3, parser, inGenexpLambda);
 }
 
 static bool
-FoldForHead(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldForHead(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
             bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_FORHEAD));
     MOZ_ASSERT(node->isArity(PN_TERNARY));
 
     if (ParseNode*& init = node->pn_kid1) {
         if (!Fold(cx, &init, parser, inGenexpLambda))
             return false;
@@ -1597,17 +1597,17 @@ FoldForHead(ExclusiveContext* cx, ParseN
         if (!Fold(cx, &update, parser, inGenexpLambda))
             return false;
     }
 
     return true;
 }
 
 static bool
-FoldDottedProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldDottedProperty(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
                    bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_DOT));
     MOZ_ASSERT(node->isArity(PN_NAME));
 
     // Iterate through a long chain of dotted property accesses to find the
     // most-nested non-dotted property node, then fold that.
     ParseNode** nested = &node->pn_expr;
@@ -1615,30 +1615,30 @@ FoldDottedProperty(ExclusiveContext* cx,
         MOZ_ASSERT((*nested)->isArity(PN_NAME));
         nested = &(*nested)->pn_expr;
     }
 
     return Fold(cx, nested, parser, inGenexpLambda);
 }
 
 static bool
-FoldName(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
+FoldName(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
          bool inGenexpLambda)
 {
     MOZ_ASSERT(node->isKind(PNK_NAME));
     MOZ_ASSERT(node->isArity(PN_NAME));
 
     if (!node->pn_expr)
         return true;
 
     return Fold(cx, &node->pn_expr, parser, inGenexpLambda);
 }
 
 bool
-Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda)
+Fold(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda)
 {
     JS_CHECK_RECURSION(cx, return false);
 
     ParseNode* pn = *pnp;
 
     switch (pn->getKind()) {
       case PNK_NOP:
       case PNK_REGEXP:
@@ -1916,17 +1916,17 @@ Fold(ExclusiveContext* cx, ParseNode** p
         MOZ_CRASH("invalid node kind");
     }
 
     MOZ_CRASH("shouldn't reach here");
     return false;
 }
 
 bool
-frontend::FoldConstants(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser)
+frontend::FoldConstants(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser)
 {
     // Don't constant-fold inside "use asm" code, as this could create a parse
     // tree that doesn't type-check as asm.js.
     if (parser->pc->useAsmOrInsideUseAsm())
         return true;
 
     return Fold(cx, pnp, *parser, false);
 }
--- a/js/src/frontend/FoldConstants.h
+++ b/js/src/frontend/FoldConstants.h
@@ -21,20 +21,20 @@ namespace frontend {
 //
 // Usage:
 //    pn = parser->statement();
 //    if (!pn)
 //        return false;
 //    if (!FoldConstants(cx, &pn, parser))
 //        return false;
 MOZ_MUST_USE bool
-FoldConstants(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser);
+FoldConstants(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser);
 
 inline MOZ_MUST_USE bool
-FoldConstants(ExclusiveContext* cx, SyntaxParseHandler::Node* pnp,
+FoldConstants(JSContext* cx, SyntaxParseHandler::Node* pnp,
               Parser<SyntaxParseHandler>* parser)
 {
     return true;
 }
 
 } /* namespace frontend */
 } /* namespace js */
 
--- a/js/src/frontend/FullParseHandler.h
+++ b/js/src/frontend/FullParseHandler.h
@@ -92,34 +92,34 @@ class FullParseHandler
         return node->isInParens() && (node->isKind(PNK_OBJECT) || node->isKind(PNK_ARRAY));
     }
 
     static bool isDestructuringPatternAnyParentheses(ParseNode* node) {
         return isUnparenthesizedDestructuringPattern(node) ||
                isParenthesizedDestructuringPattern(node);
     }
 
-    FullParseHandler(ExclusiveContext* cx, LifoAlloc& alloc,
+    FullParseHandler(JSContext* cx, LifoAlloc& alloc,
                      TokenStream& tokenStream, Parser<SyntaxParseHandler>* syntaxParser,
                      LazyScript* lazyOuterFunction)
       : allocator(cx, alloc),
         tokenStream(tokenStream),
         lazyOuterFunction_(cx, lazyOuterFunction),
         lazyInnerFunctionIndex(0),
         lazyClosedOverBindingIndex(0),
         syntaxParser(syntaxParser)
     {}
 
     static ParseNode* null() { return nullptr; }
 
     ParseNode* freeTree(ParseNode* pn) { return allocator.freeTree(pn); }
     void prepareNodeForMutation(ParseNode* pn) { return allocator.prepareNodeForMutation(pn); }
     const Token& currentToken() { return tokenStream.currentToken(); }
 
-    ParseNode* newName(PropertyName* name, const TokenPos& pos, ExclusiveContext* cx)
+    ParseNode* newName(PropertyName* name, const TokenPos& pos, JSContext* cx)
     {
         return new_<NameNode>(PNK_NAME, JSOP_GETNAME, name, pos);
     }
 
     ParseNode* newComputedName(ParseNode* expr, uint32_t begin, uint32_t end) {
         TokenPos pos(begin, end);
         return new_<UnaryNode>(PNK_COMPUTED_NAME, JSOP_NOP, pos, expr);
     }
@@ -871,32 +871,32 @@ class FullParseHandler
     bool isUnparenthesizedName(ParseNode* node) {
         return node->isKind(PNK_NAME) && !node->isInParens();
     }
 
     bool isNameAnyParentheses(ParseNode* node) {
         return node->isKind(PNK_NAME);
     }
 
-    bool isEvalAnyParentheses(ParseNode* node, ExclusiveContext* cx) {
+    bool isEvalAnyParentheses(ParseNode* node, JSContext* cx) {
         return node->isKind(PNK_NAME) && node->pn_atom == cx->names().eval;
     }
 
-    const char* nameIsArgumentsEvalAnyParentheses(ParseNode* node, ExclusiveContext* cx) {
+    const char* nameIsArgumentsEvalAnyParentheses(ParseNode* node, JSContext* cx) {
         MOZ_ASSERT(isNameAnyParentheses(node),
                    "must only call this function on known names");
 
         if (isEvalAnyParentheses(node, cx))
             return js_eval_str;
         if (node->pn_atom == cx->names().arguments)
             return js_arguments_str;
         return nullptr;
     }
 
-    bool isAsyncKeyword(ParseNode* node, ExclusiveContext* cx) {
+    bool isAsyncKeyword(ParseNode* node, JSContext* cx) {
         return node->isKind(PNK_NAME) &&
                node->pn_pos.begin + strlen("async") == node->pn_pos.end &&
                node->pn_atom == cx->names().async;
     }
 
     bool isCall(ParseNode* pn) {
         return pn->isKind(PNK_CALL);
     }
--- a/js/src/frontend/NameCollections.h
+++ b/js/src/frontend/NameCollections.h
@@ -57,17 +57,17 @@ class CollectionPool
             js_delete(asRepresentative(*it));
 
         all_.clearAndFree();
         recyclable_.clearAndFree();
     }
 
     // Fallibly aquire one of the supported collection types from the pool.
     template <typename Collection>
-    Collection* acquire(ExclusiveContext* cx) {
+    Collection* acquire(JSContext* cx) {
         ConcreteCollectionPool::template assertInvariants<Collection>();
 
         RepresentativeCollection* collection;
         if (recyclable_.empty()) {
             collection = allocate();
             if (!collection)
                 ReportOutOfMemory(cx);
         } else {
@@ -210,31 +210,31 @@ class NameCollectionPool
     }
 
     void removeActiveCompilation() {
         MOZ_ASSERT(hasActiveCompilation());
         activeCompilations_--;
     }
 
     template <typename Map>
-    Map* acquireMap(ExclusiveContext* cx) {
+    Map* acquireMap(JSContext* cx) {
         MOZ_ASSERT(hasActiveCompilation());
         return mapPool_.acquire<Map>(cx);
     }
 
     template <typename Map>
     void releaseMap(Map** map) {
         MOZ_ASSERT(hasActiveCompilation());
         MOZ_ASSERT(map);
         if (*map)
             mapPool_.release(map);
     }
 
     template <typename Vector>
-    Vector* acquireVector(ExclusiveContext* cx) {
+    Vector* acquireVector(JSContext* cx) {
         MOZ_ASSERT(hasActiveCompilation());
         return vectorPool_.acquire<Vector>(cx);
     }
 
     template <typename Vector>
     void releaseVector(Vector** vec) {
         MOZ_ASSERT(hasActiveCompilation());
         MOZ_ASSERT(vec);
@@ -269,17 +269,17 @@ class NameCollectionPool
       : pool_(pool),                                              \
         collection_(nullptr)                                      \
     { }                                                           \
                                                                   \
     ~N() {                                                        \
         pool_.release##T(&collection_);                           \
     }                                                             \
                                                                   \
-    bool acquire(ExclusiveContext* cx) {                          \
+    bool acquire(JSContext* cx) {                                 \
         MOZ_ASSERT(!collection_);                                 \
         collection_ = pool_.acquire##T<T>(cx);                    \
         return !!collection_;                                     \
     }                                                             \
                                                                   \
     explicit operator bool() const {                              \
         return !!collection_;                                     \
     }                                                             \
--- a/js/src/frontend/NameFunctions.cpp
+++ b/js/src/frontend/NameFunctions.cpp
@@ -20,17 +20,17 @@ using namespace js;
 using namespace js::frontend;
 
 namespace {
 
 class NameResolver
 {
     static const size_t MaxParents = 100;
 
-    ExclusiveContext* cx;
+    JSContext* cx;
     size_t nparents;                /* number of parents in the parents array */
     ParseNode* parents[MaxParents]; /* history of ParseNodes we've been looking at */
     StringBuffer* buf;              /* when resolving, buffer to append to */
 
     /* Test whether a ParseNode represents a function invocation */
     bool call(ParseNode* pn) {
         return pn && pn->isKind(PNK_CALL);
     }
@@ -337,17 +337,17 @@ class NameResolver
             if (!resolve(interpolated, prefix))
                 return false;
         }
 
         return true;
     }
 
   public:
-    explicit NameResolver(ExclusiveContext* cx) : cx(cx), nparents(0), buf(nullptr) {}
+    explicit NameResolver(JSContext* cx) : cx(cx), nparents(0), buf(nullptr) {}
 
     /*
      * Resolve all names for anonymous functions recursively within the
      * ParseNode instance given. The prefix is for each subsequent name, and
      * should initially be nullptr.
      */
     bool resolve(ParseNode* cur, HandleAtom prefixArg = nullptr) {
         RootedAtom prefix(cx, prefixArg);
@@ -828,13 +828,13 @@ class NameResolver
         nparents--;
         return true;
     }
 };
 
 } /* anonymous namespace */
 
 bool
-frontend::NameFunctions(ExclusiveContext* cx, ParseNode* pn)
+frontend::NameFunctions(JSContext* cx, ParseNode* pn)
 {
     NameResolver nr(cx);
     return nr.resolve(pn);
 }
--- a/js/src/frontend/NameFunctions.h
+++ b/js/src/frontend/NameFunctions.h
@@ -7,22 +7,19 @@
 #ifndef frontend_NameFunctions_h
 #define frontend_NameFunctions_h
 
 #include "mozilla/Attributes.h"
 
 #include "js/TypeDecls.h"
 
 namespace js {
-
-class ExclusiveContext;
-
 namespace frontend {
 
 class ParseNode;
 
 MOZ_MUST_USE bool
-NameFunctions(ExclusiveContext* cx, ParseNode* pn);
+NameFunctions(JSContext* cx, ParseNode* pn);
 
 } /* namespace frontend */
 } /* namespace js */
 
 #endif /* frontend_NameFunctions_h */
--- a/js/src/frontend/ParseNode.h
+++ b/js/src/frontend/ParseNode.h
@@ -783,17 +783,17 @@ class ParseNode
     ;
 
     enum AllowConstantObjects {
         DontAllowObjects = 0,
         AllowObjects,
         ForCopyOnWriteArray
     };
 
-    MOZ_MUST_USE bool getConstantValue(ExclusiveContext* cx, AllowConstantObjects allowObjects,
+    MOZ_MUST_USE bool getConstantValue(JSContext* cx, AllowConstantObjects allowObjects,
                                        MutableHandleValue vp, Value* compare = nullptr,
                                        size_t ncompare = 0, NewObjectKind newKind = TenuredObject);
     inline bool isConstant();
 
     template <class NodeType>
     inline bool is() const {
         return NodeType::test(*this);
     }
@@ -1239,17 +1239,17 @@ class PropertyByValue : public ParseNode
  */
 struct CallSiteNode : public ListNode {
     explicit CallSiteNode(uint32_t begin): ListNode(PNK_CALLSITEOBJ, TokenPos(begin, begin + 1)) {}
 
     static bool test(const ParseNode& node) {
         return node.isKind(PNK_CALLSITEOBJ);
     }
 
-    MOZ_MUST_USE bool getRawArrayValue(ExclusiveContext* cx, MutableHandleValue vp) {
+    MOZ_MUST_USE bool getRawArrayValue(JSContext* cx, MutableHandleValue vp) {
         return pn_head->getConstantValue(cx, AllowObjects, vp);
     }
 };
 
 struct ClassMethod : public BinaryNode {
     /*
      * Method defintions often keep a name and function body that overlap,
      * so explicitly define the beginning and end here.
@@ -1346,27 +1346,27 @@ struct ClassNode : public TernaryNode {
 
 #ifdef DEBUG
 void DumpParseTree(ParseNode* pn, int indent = 0);
 #endif
 
 class ParseNodeAllocator
 {
   public:
-    explicit ParseNodeAllocator(ExclusiveContext* cx, LifoAlloc& alloc)
+    explicit ParseNodeAllocator(JSContext* cx, LifoAlloc& alloc)
       : cx(cx), alloc(alloc), freelist(nullptr)
     {}
 
     void* allocNode();
     void freeNode(ParseNode* pn);
     ParseNode* freeTree(ParseNode* pn);
     void prepareNodeForMutation(ParseNode* pn);
 
   private:
-    ExclusiveContext* cx;
+    JSContext* cx;
     LifoAlloc& alloc;
     ParseNode* freelist;
 };
 
 inline bool
 ParseNode::isConstant()
 {
     switch (pn_type) {
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -126,17 +126,17 @@ StatementKindIsBraced(StatementKind kind
            kind == StatementKind::Try ||
            kind == StatementKind::Catch ||
            kind == StatementKind::Finally;
 }
 
 void
 ParseContext::Scope::dump(ParseContext* pc)
 {
-    ExclusiveContext* cx = pc->sc()->context;
+    JSContext* cx = pc->sc()->context;
 
     fprintf(stdout, "ParseScope %p", this);
 
     fprintf(stdout, "\n  decls:\n");
     for (DeclaredNameMap::Range r = declared_->all(); !r.empty(); r.popFront()) {
         JSAutoByteString bytes;
         if (!AtomToPrintableString(cx, r.front().key(), &bytes))
             return;
@@ -267,17 +267,17 @@ SharedContext::computeInWith(Scope* scop
     for (ScopeIter si(scope); si; si++) {
         if (si.kind() == ScopeKind::With) {
             inWith_ = true;
             break;
         }
     }
 }
 
-EvalSharedContext::EvalSharedContext(ExclusiveContext* cx, JSObject* enclosingEnv,
+EvalSharedContext::EvalSharedContext(JSContext* cx, JSObject* enclosingEnv,
                                      Scope* enclosingScope, Directives directives,
                                      bool extraWarnings)
   : SharedContext(cx, Kind::Eval, directives, extraWarnings),
     enclosingScope_(cx, enclosingScope),
     bindings(cx)
 {
     computeAllowSyntax(enclosingScope);
     computeInWith(enclosingScope);
@@ -310,17 +310,17 @@ EvalSharedContext::EvalSharedContext(Exc
 bool
 ParseContext::init()
 {
     if (scriptId_ == UINT32_MAX) {
         tokenStream_.reportError(JSMSG_NEED_DIET, js_script_str);
         return false;
     }
 
-    ExclusiveContext* cx = sc()->context;
+    JSContext* cx = sc()->context;
 
     if (isFunctionBox()) {
         // Named lambdas always need a binding for their own name. If this
         // binding is closed over when we finish parsing the function in
         // finishExtraFunctionScopes, the function box needs to be marked as
         // needing a dynamic DeclEnv object.
         RootedFunction fun(cx, functionBox()->function());
         if (fun->isNamedLambda()) {
@@ -395,17 +395,17 @@ ParseContext::~ParseContext()
 {
     // Any funboxes still in the list at the end of parsing means no early
     // error would have occurred for declaring a binding in the nearest var
     // scope. Mark them as needing extra assignments to this var binding.
     finishInnerFunctionBoxesForAnnexB();
 }
 
 bool
-UsedNameTracker::noteUse(ExclusiveContext* cx, JSAtom* name, uint32_t scriptId, uint32_t scopeId)
+UsedNameTracker::noteUse(JSContext* cx, JSAtom* name, uint32_t scriptId, uint32_t scopeId)
 {
     if (UsedNameMap::AddPtr p = map_.lookupForAdd(name)) {
         if (!p->value().noteUsedInScope(scriptId, scopeId))
             return false;
     } else {
         UsedNameInfo info(cx);
         if (!info.noteUsedInScope(scriptId, scopeId))
             return false;
@@ -433,17 +433,17 @@ UsedNameTracker::rewind(RewindToken toke
 {
     scriptCounter_ = token.scriptId;
     scopeCounter_ = token.scopeId;
 
     for (UsedNameMap::Range r = map_.all(); !r.empty(); r.popFront())
         r.front().value().resetToScope(token.scriptId, token.scopeId);
 }
 
-FunctionBox::FunctionBox(ExclusiveContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead,
+FunctionBox::FunctionBox(JSContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead,
                          JSFunction* fun, Directives directives, bool extraWarnings,
                          GeneratorKind generatorKind, FunctionAsyncKind asyncKind)
   : ObjectBox(fun, traceListHead),
     SharedContext(cx, Kind::ObjectBox, directives, extraWarnings),
     enclosingScope_(nullptr),
     namedLambdaBindings_(nullptr),
     functionScopeBindings_(nullptr),
     extraVarScopeBindings_(nullptr),
@@ -685,59 +685,59 @@ Parser<FullParseHandler>::abortIfSyntaxP
 template <>
 bool
 Parser<SyntaxParseHandler>::abortIfSyntaxParser()
 {
     abortedSyntaxParse = true;
     return false;
 }
 
-ParserBase::ParserBase(ExclusiveContext* cx, LifoAlloc& alloc,
+ParserBase::ParserBase(JSContext* cx, LifoAlloc& alloc,
                        const ReadOnlyCompileOptions& options,
                        const char16_t* chars, size_t length,
                        bool foldConstants,
                        UsedNameTracker& usedNames,
                        Parser<SyntaxParseHandler>* syntaxParser,
                        LazyScript* lazyOuterFunction)
   : context(cx),
     alloc(alloc),
     tokenStream(cx, options, chars, length, thisForCtor()),
     traceListHead(nullptr),
     pc(nullptr),
     usedNames(usedNames),
     sct(nullptr),
     ss(nullptr),
-    keepAtoms(cx->perThreadData),
+    keepAtoms(cx),
     foldConstants(foldConstants),
 #ifdef DEBUG
     checkOptionsCalled(false),
 #endif
     abortedSyntaxParse(false),
     isUnexpectedEOF_(false)
 {
-    cx->perThreadData->frontendCollectionPool.addActiveCompilation();
+    cx->frontendCollectionPool().addActiveCompilation();
     tempPoolMark = alloc.mark();
 }
 
 ParserBase::~ParserBase()
 {
     alloc.release(tempPoolMark);
 
     /*
      * The parser can allocate enormous amounts of memory for large functions.
      * Eagerly free the memory now (which otherwise won't be freed until the
      * next GC) to avoid unnecessary OOMs.
      */
     alloc.freeAllIfHugeAndUnused();
 
-    context->perThreadData->frontendCollectionPool.removeActiveCompilation();
-}
-
-template <typename ParseHandler>
-Parser<ParseHandler>::Parser(ExclusiveContext* cx, LifoAlloc& alloc,
+    context->frontendCollectionPool().removeActiveCompilation();
+}
+
+template <typename ParseHandler>
+Parser<ParseHandler>::Parser(JSContext* cx, LifoAlloc& alloc,
                              const ReadOnlyCompileOptions& options,
                              const char16_t* chars, size_t length,
                              bool foldConstants,
                              UsedNameTracker& usedNames,
                              Parser<SyntaxParseHandler>* syntaxParser,
                              LazyScript* lazyOuterFunction)
   : ParserBase(cx, alloc, options, chars, length, foldConstants, usedNames, syntaxParser,
               lazyOuterFunction),
@@ -822,17 +822,17 @@ Parser<ParseHandler>::newFunctionBox(Nod
         handler.setFunctionBox(fn, funbox);
 
     if (tryAnnexB && !pc->addInnerFunctionBoxForAnnexB(funbox))
         return nullptr;
 
     return funbox;
 }
 
-ModuleSharedContext::ModuleSharedContext(ExclusiveContext* cx, ModuleObject* module,
+ModuleSharedContext::ModuleSharedContext(JSContext* cx, ModuleObject* module,
                                          Scope* enclosingScope, ModuleBuilder& builder)
   : SharedContext(cx, Kind::Module, Directives(true), false),
     module_(cx, module),
     enclosingScope_(cx, enclosingScope),
     bindings(cx),
     builder(builder)
 {
     thisBinding_ = ThisBinding::Module;
@@ -1486,17 +1486,17 @@ Parser<FullParseHandler>::checkStatement
         error(JSMSG_UNEXPECTED_TOKEN, "expression", TokenKindToDesc(tt));
         return false;
     }
     return true;
 }
 
 template <typename Scope>
 static typename Scope::Data*
-NewEmptyBindingData(ExclusiveContext* cx, LifoAlloc& alloc, uint32_t numBindings)
+NewEmptyBindingData(JSContext* cx, LifoAlloc& alloc, uint32_t numBindings)
 {
     size_t allocSize = Scope::sizeOfData(numBindings);
     typename Scope::Data* bindings = static_cast<typename Scope::Data*>(alloc.alloc(allocSize));
     if (!bindings) {
         ReportOutOfMemory(cx);
         return nullptr;
     }
     PodZero(bindings);
@@ -1881,17 +1881,17 @@ Parser<FullParseHandler>::finishLexicalS
         return nullptr;
     Maybe<LexicalScope::Data*> bindings = newLexicalScopeData(scope);
     if (!bindings)
         return nullptr;
     return handler.newLexicalScope(*bindings, body);
 }
 
 static bool
-IsArgumentsUsedInLegacyGenerator(ExclusiveContext* cx, Scope* scope)
+IsArgumentsUsedInLegacyGenerator(JSContext* cx, Scope* scope)
 {
     JSAtom* argumentsName = cx->names().arguments;
     for (ScopeIter si(scope); si; si++) {
         if (si.scope()->is<LexicalScope>()) {
             // Using a shadowed lexical 'arguments' is okay.
             for (::BindingIter bi(si.scope()); bi; bi++) {
                 if (bi.name() == argumentsName)
                     return false;
@@ -2053,17 +2053,17 @@ Parser<FullParseHandler>::moduleBody(Mod
         MOZ_ASSERT(name);
 
         DeclaredNamePtr p = modulepc.varScope().lookupDeclaredName(name);
         if (!p) {
             JSAutoByteString str;
             if (!str.encodeLatin1(context, name))
                 return null();
 
-            JS_ReportErrorNumberLatin1(context->asJSContext(), GetErrorMessage, nullptr,
+            JS_ReportErrorNumberLatin1(context, GetErrorMessage, nullptr,
                                        JSMSG_MISSING_EXPORT, str.ptr());
             return null();
         }
 
         p->value()->setClosedOver();
     }
 
     if (!FoldConstants(context, &pn, this))
@@ -3127,17 +3127,17 @@ Parser<ParseHandler>::functionDefinition
         return pn;
     }
 
     RootedObject proto(context);
     if (generatorKind == StarGenerator) {
         // If we are off the main thread, the generator meta-objects have
         // already been created by js::StartOffThreadParseTask, so cx will not
         // be necessary.
-        JSContext* cx = context->maybeJSContext();
+        JSContext* cx = context->helperThread() ? nullptr : context;
         proto = GlobalObject::getOrCreateStarGeneratorFunctionPrototype(cx, context->global());
         if (!proto)
             return null();
     }
     RootedFunction fun(context, newFunction(funName, kind, generatorKind, asyncKind, proto));
     if (!fun)
         return null();
 
@@ -3226,18 +3226,18 @@ Parser<FullParseHandler>::trySyntaxParse
                                    yieldHandling, kind, inheritedDirectives, newDirectives))
         {
             if (parser->hadAbortedSyntaxParse()) {
                 // Try again with a full parse. UsedNameTracker needs to be
                 // rewound to just before we tried the syntax parse for
                 // correctness.
                 parser->clearAbortedSyntaxParse();
                 usedNames.rewind(token);
-                MOZ_ASSERT_IF(parser->context->isJSContext(),
-                              !parser->context->asJSContext()->isExceptionPending());
+                MOZ_ASSERT_IF(!parser->context->helperThread(),
+                              !parser->context->isExceptionPending());
                 break;
             }
             return false;
         }
 
         // Advance this parser over tokens processed by the syntax parser.
         parser->tokenStream.tell(&position);
         if (!tokenStream.seek(position, parser->tokenStream))
@@ -7977,17 +7977,17 @@ Parser<ParseHandler>::generatorComprehen
         return null();
 
     ParseContext* outerpc = pc;
 
     // If we are off the main thread, the generator meta-objects have
     // already been created by js::StartOffThreadParseScript, so cx will not
     // be necessary.
     RootedObject proto(context);
-    JSContext* cx = context->maybeJSContext();
+    JSContext* cx = context->helperThread() ? nullptr : context;
     proto = GlobalObject::getOrCreateStarGeneratorFunctionPrototype(cx, context->global());
     if (!proto)
         return null();
 
     RootedFunction fun(context, newFunction(/* atom = */ nullptr, Expression,
                                             StarGenerator, SyncFunction, proto));
     if (!fun)
         return null();
@@ -8843,17 +8843,17 @@ Parser<ParseHandler>::arrayInitializer(Y
 
         MUST_MATCH_TOKEN_MOD(TOK_RB, modifier, JSMSG_BRACKET_AFTER_LIST);
     }
     handler.setEndPosition(literal, pos().end);
     return literal;
 }
 
 static JSAtom*
-DoubleToAtom(ExclusiveContext* cx, double value)
+DoubleToAtom(JSContext* cx, double value)
 {
     // This is safe because doubles can not be moved.
     Value tmp = DoubleValue(value);
     return ToAtom<CanGC>(cx, HandleValue::fromMarkedLocation(&tmp));
 }
 
 template <typename ParseHandler>
 typename ParseHandler::Node
@@ -9562,50 +9562,47 @@ Parser<ParseHandler>::exprInParens(InHan
 {
     MOZ_ASSERT(tokenStream.isCurrentTokenType(TOK_LP));
     return expr(inHandling, yieldHandling, tripledotHandling, possibleError, PredictInvoked);
 }
 
 void
 ParserBase::addTelemetry(JSCompartment::DeprecatedLanguageExtension e)
 {
-    JSContext* cx = context->maybeJSContext();
-    if (!cx)
+    if (context->helperThread())
         return;
-    cx->compartment()->addTelemetry(getFilename(), e);
+    context->compartment()->addTelemetry(getFilename(), e);
 }
 
 bool
 ParserBase::warnOnceAboutExprClosure()
 {
 #ifndef RELEASE_OR_BETA
-    JSContext* cx = context->maybeJSContext();
-    if (!cx)
+    if (context->helperThread())
         return true;
 
-    if (!cx->compartment()->warnedAboutExprClosure) {
+    if (!context->compartment()->warnedAboutExprClosure) {
         if (!warning(JSMSG_DEPRECATED_EXPR_CLOSURE))
             return false;
-        cx->compartment()->warnedAboutExprClosure = true;
+        context->compartment()->warnedAboutExprClosure = true;
     }
 #endif
     return true;
 }
 
 bool
 ParserBase::warnOnceAboutForEach()
 {
-    JSContext* cx = context->maybeJSContext();
-    if (!cx)
+    if (context->helperThread())
         return true;
 
-    if (!cx->compartment()->warnedAboutForEach) {
+    if (!context->compartment()->warnedAboutForEach) {
         if (!warning(JSMSG_DEPRECATED_FOR_EACH))
             return false;
-        cx->compartment()->warnedAboutForEach = true;
+        context->compartment()->warnedAboutForEach = true;
     }
     return true;
 }
 
 template class Parser<FullParseHandler>;
 template class Parser<SyntaxParseHandler>;
 
 } /* namespace frontend */
--- a/js/src/frontend/Parser.h
+++ b/js/src/frontend/Parser.h
@@ -626,17 +626,17 @@ class UsedNameTracker
     {
         friend class UsedNameTracker;
 
         Vector<Use, 6> uses_;
 
         void resetToScope(uint32_t scriptId, uint32_t scopeId);
 
       public:
-        explicit UsedNameInfo(ExclusiveContext* cx)
+        explicit UsedNameInfo(JSContext* cx)
           : uses_(cx)
         { }
 
         UsedNameInfo(UsedNameInfo&& other)
           : uses_(mozilla::Move(other.uses_))
         { }
 
         bool noteUsedInScope(uint32_t scriptId, uint32_t scopeId) {
@@ -672,17 +672,17 @@ class UsedNameTracker
 
     // Monotonically increasing id for all nested scripts.
     uint32_t scriptCounter_;
 
     // Monotonically increasing id for all nested scopes.
     uint32_t scopeCounter_;
 
   public:
-    explicit UsedNameTracker(ExclusiveContext* cx)
+    explicit UsedNameTracker(JSContext* cx)
       : map_(cx),
         scriptCounter_(0),
         scopeCounter_(0)
     { }
 
     MOZ_MUST_USE bool init() {
         return map_.init();
     }
@@ -697,17 +697,17 @@ class UsedNameTracker
         MOZ_ASSERT(scopeCounter_ != UINT32_MAX);
         return scopeCounter_++;
     }
 
     UsedNameMap::Ptr lookup(JSAtom* name) const {
         return map_.lookup(name);
     }
 
-    MOZ_MUST_USE bool noteUse(ExclusiveContext* cx, JSAtom* name,
+    MOZ_MUST_USE bool noteUse(JSContext* cx, JSAtom* name,
                               uint32_t scriptId, uint32_t scopeId);
 
     struct RewindToken
     {
       private:
         friend class UsedNameTracker;
         uint32_t scriptId;
         uint32_t scopeId;
@@ -735,17 +735,17 @@ class UsedNameTracker
 };
 
 class ParserBase : public StrictModeGetter
 {
   private:
     ParserBase* thisForCtor() { return this; }
 
   public:
-    ExclusiveContext* const context;
+    JSContext* const context;
 
     LifoAlloc& alloc;
 
     TokenStream tokenStream;
     LifoAlloc::Mark tempPoolMark;
 
     /* list of parsed objects for GC tracing */
     ObjectBox* traceListHead;
@@ -779,17 +779,17 @@ class ParserBase : public StrictModeGett
      * the parse will return false.
      */
     bool abortedSyntaxParse:1;
 
     /* Unexpected end of input, i.e. TOK_EOF not at top-level. */
     bool isUnexpectedEOF_:1;
 
   public:
-    ParserBase(ExclusiveContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
+    ParserBase(JSContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
                const char16_t* chars, size_t length, bool foldConstants,
                UsedNameTracker& usedNames, Parser<SyntaxParseHandler>* syntaxParser,
                LazyScript* lazyOuterFunction);
     ~ParserBase();
 
     const char* getFilename() const { return tokenStream.getFilename(); }
     JSVersion versionNumber() const { return tokenStream.versionNumber(); }
     TokenPos pos() const { return tokenStream.currentToken().pos; }
@@ -987,17 +987,17 @@ class Parser final : public ParserBase, 
   public:
     /* State specific to the kind of parse being performed. */
     ParseHandler handler;
 
     void prepareNodeForMutation(Node node) { handler.prepareNodeForMutation(node); }
     void freeTree(Node node) { handler.freeTree(node); }
 
   public:
-    Parser(ExclusiveContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
+    Parser(JSContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
            const char16_t* chars, size_t length, bool foldConstants, UsedNameTracker& usedNames,
            Parser<SyntaxParseHandler>* syntaxParser, LazyScript* lazyOuterFunction);
     ~Parser();
 
     bool checkOptions();
 
     // A Parser::Mark is the extension of the LifoAlloc::Mark to the entire
     // Parser's state. Note: clients must still take care that any ParseContext
--- a/js/src/frontend/SharedContext.h
+++ b/js/src/frontend/SharedContext.h
@@ -267,17 +267,17 @@ class ModuleSharedContext;
 /*
  * The struct SharedContext is part of the current parser context (see
  * ParseContext). It stores information that is reused between the parser and
  * the bytecode emitter.
  */
 class SharedContext
 {
   public:
-    ExclusiveContext* const context;
+    JSContext* const context;
     AnyContextFlags anyCxFlags;
     bool strictScript;
     bool localStrict;
     bool extraWarnings;
 
   protected:
     enum class Kind {
         ObjectBox,
@@ -296,17 +296,17 @@ class SharedContext
     bool inWith_;
     bool needsThisTDZChecks_;
 
     void computeAllowSyntax(Scope* scope);
     void computeInWith(Scope* scope);
     void computeThisBinding(Scope* scope);
 
   public:
-    SharedContext(ExclusiveContext* cx, Kind kind, Directives directives, bool extraWarnings)
+    SharedContext(JSContext* cx, Kind kind, Directives directives, bool extraWarnings)
       : context(cx),
         anyCxFlags(),
         strictScript(directives.strict()),
         localStrict(false),
         extraWarnings(extraWarnings),
         kind_(kind),
         thisBinding_(ThisBinding::Global),
         allowNewTarget_(false),
@@ -372,17 +372,17 @@ class SharedContext
 
 class MOZ_STACK_CLASS GlobalSharedContext : public SharedContext
 {
     ScopeKind scopeKind_;
 
   public:
     Rooted<GlobalScope::Data*> bindings;
 
-    GlobalSharedContext(ExclusiveContext* cx, ScopeKind scopeKind, Directives directives,
+    GlobalSharedContext(JSContext* cx, ScopeKind scopeKind, Directives directives,
                         bool extraWarnings)
       : SharedContext(cx, Kind::Global, directives, extraWarnings),
         scopeKind_(scopeKind),
         bindings(cx)
     {
         MOZ_ASSERT(scopeKind == ScopeKind::Global || scopeKind == ScopeKind::NonSyntactic);
         thisBinding_ = ThisBinding::Global;
     }
@@ -405,17 +405,17 @@ SharedContext::asGlobalContext()
 
 class MOZ_STACK_CLASS EvalSharedContext : public SharedContext
 {
     RootedScope enclosingScope_;
 
   public:
     Rooted<EvalScope::Data*> bindings;
 
-    EvalSharedContext(ExclusiveContext* cx, JSObject* enclosingEnv, Scope* enclosingScope,
+    EvalSharedContext(JSContext* cx, JSObject* enclosingEnv, Scope* enclosingScope,
                       Directives directives, bool extraWarnings);
 
     Scope* compilationEnclosingScope() const override {
         return enclosingScope_;
     }
 };
 
 inline EvalSharedContext*
@@ -473,32 +473,32 @@ class FunctionBox : public ObjectBox, pu
     bool            usesReturn:1;           /* contains a 'return' statement */
     bool            hasRest_:1;             /* has rest parameter */
     bool            isExprBody_:1;          /* arrow function with expression
                                              * body or expression closure:
                                              * function(x) x*x */
 
     FunctionContextFlags funCxFlags;
 
-    FunctionBox(ExclusiveContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead, JSFunction* fun,
+    FunctionBox(JSContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead, JSFunction* fun,
                 Directives directives, bool extraWarnings, GeneratorKind generatorKind,
                 FunctionAsyncKind asyncKind);
 
     MutableHandle<LexicalScope::Data*> namedLambdaBindings() {
-        MOZ_ASSERT(context->compartment()->runtimeFromAnyThread()->keepAtoms());
+        MOZ_ASSERT(context->keepAtoms);
         return MutableHandle<LexicalScope::Data*>::fromMarkedLocation(&namedLambdaBindings_);
     }
 
     MutableHandle<FunctionScope::Data*> functionScopeBindings() {
-        MOZ_ASSERT(context->compartment()->runtimeFromAnyThread()->keepAtoms());
+        MOZ_ASSERT(context->keepAtoms);
         return MutableHandle<FunctionScope::Data*>::fromMarkedLocation(&functionScopeBindings_);
     }
 
     MutableHandle<VarScope::Data*> extraVarScopeBindings() {
-        MOZ_ASSERT(context->compartment()->runtimeFromAnyThread()->keepAtoms());
+        MOZ_ASSERT(context->keepAtoms);
         return MutableHandle<VarScope::Data*>::fromMarkedLocation(&extraVarScopeBindings_);
     }
 
     void initFromLazyFunction();
     void initStandaloneFunction(Scope* enclosingScope);
     void initWithEnclosingParseContext(ParseContext* enclosing, FunctionSyntaxKind kind);
 
     ObjectBox* toObjectBox() override { return this; }
@@ -616,17 +616,17 @@ class MOZ_STACK_CLASS ModuleSharedContex
 {
     RootedModuleObject module_;
     RootedScope enclosingScope_;
 
   public:
     Rooted<ModuleScope::Data*> bindings;
     ModuleBuilder& builder;
 
-    ModuleSharedContext(ExclusiveContext* cx, ModuleObject* module, Scope* enclosingScope,
+    ModuleSharedContext(JSContext* cx, ModuleObject* module, Scope* enclosingScope,
                         ModuleBuilder& builder);
 
     HandleModuleObject module() const { return module_; }
     Scope* compilationEnclosingScope() const override { return enclosingScope_; }
 };
 
 inline ModuleSharedContext*
 SharedContext::asModuleContext()
--- a/js/src/frontend/SyntaxParseHandler.h
+++ b/js/src/frontend/SyntaxParseHandler.h
@@ -165,31 +165,31 @@ class SyntaxParseHandler
     }
 
     static bool isDestructuringPatternAnyParentheses(Node node) {
         return isUnparenthesizedDestructuringPattern(node) ||
                 isParenthesizedDestructuringPattern(node);
     }
 
   public:
-    SyntaxParseHandler(ExclusiveContext* cx, LifoAlloc& alloc,
+    SyntaxParseHandler(JSContext* cx, LifoAlloc& alloc,
                        TokenStream& tokenStream, Parser<SyntaxParseHandler>* syntaxParser,
                        LazyScript* lazyOuterFunction)
       : lastAtom(nullptr),
         tokenStream(tokenStream)
     {}
 
     static Node null() { return NodeFailure; }
 
     void prepareNodeForMutation(Node node) {}
     void freeTree(Node node) {}
 
     void trace(JSTracer* trc) {}
 
-    Node newName(PropertyName* name, const TokenPos& pos, ExclusiveContext* cx) {
+    Node newName(PropertyName* name, const TokenPos& pos, JSContext* cx) {
         lastAtom = name;
         if (name == cx->names().arguments)
             return NodeUnparenthesizedArgumentsName;
         if (pos.begin + strlen("async") == pos.end && name == cx->names().async)
             return NodePotentialAsyncKeyword;
         if (name == cx->names().eval)
             return NodeUnparenthesizedEvalName;
         return NodeUnparenthesizedName;
@@ -542,32 +542,32 @@ class SyntaxParseHandler
     bool isNameAnyParentheses(Node node) {
         if (isUnparenthesizedName(node))
             return true;
         return node == NodeParenthesizedArgumentsName ||
                node == NodeParenthesizedEvalName ||
                node == NodeParenthesizedName;
     }
 
-    bool isEvalAnyParentheses(Node node, ExclusiveContext* cx) {
+    bool isEvalAnyParentheses(Node node, JSContext* cx) {
         return node == NodeUnparenthesizedEvalName || node == NodeParenthesizedEvalName;
     }
 
-    const char* nameIsArgumentsEvalAnyParentheses(Node node, ExclusiveContext* cx) {
+    const char* nameIsArgumentsEvalAnyParentheses(Node node, JSContext* cx) {
         MOZ_ASSERT(isNameAnyParentheses(node),
                    "must only call this method on known names");
 
         if (isEvalAnyParentheses(node, cx))
             return js_eval_str;
         if (node == NodeUnparenthesizedArgumentsName || node == NodeParenthesizedArgumentsName)
             return js_arguments_str;
         return nullptr;
     }
 
-    bool isAsyncKeyword(Node node, ExclusiveContext* cx) {
+    bool isAsyncKeyword(Node node, JSContext* cx) {
         return node == NodePotentialAsyncKeyword;
     }
 
     PropertyName* maybeDottedProperty(Node node) {
         // Note: |super.apply(...)| is a special form that calls an "apply"
         // method retrieved from one value, but using a *different* value as
         // |this|.  It's not really eligible for the funapply/funcall
         // optimizations as they're currently implemented (assuming a single
--- a/js/src/frontend/TokenStream.cpp
+++ b/js/src/frontend/TokenStream.cpp
@@ -180,17 +180,17 @@ frontend::IsIdentifier(const char16_t* c
 }
 
 bool
 frontend::IsKeyword(JSLinearString* str)
 {
     return FindKeyword(str) != nullptr;
 }
 
-TokenStream::SourceCoords::SourceCoords(ExclusiveContext* cx, uint32_t ln)
+TokenStream::SourceCoords::SourceCoords(JSContext* cx, uint32_t ln)
   : lineStartOffsets_(cx), initialLineNum_(ln), lastLineIndex_(0)
 {
     // This is actually necessary!  Removing it causes compile errors on
     // GCC and clang.  You could try declaring this:
     //
     //   const uint32_t TokenStream::SourceCoords::MAX_PTR;
     //
     // which fixes the GCC/clang error, but causes bustage on Windows.  Sigh.
@@ -334,17 +334,17 @@ TokenStream::SourceCoords::lineNumAndCol
     *columnIndex = offset - lineStartOffset;
 }
 
 #ifdef _MSC_VER
 #pragma warning(push)
 #pragma warning(disable:4351)
 #endif
 
-TokenStream::TokenStream(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
+TokenStream::TokenStream(JSContext* cx, const ReadOnlyCompileOptions& options,
                          const char16_t* base, size_t length, StrictModeGetter* smg)
   : srcCoords(cx, options.lineno),
     options_(options),
     tokens(),
     cursor(),
     lookahead(),
     lineno(options.lineno),
     flags(),
@@ -647,17 +647,17 @@ TokenStream::reportCompileErrorNumberVA(
         flags &= ~JSREPORT_WARNING;
         warning = false;
     }
 
     // On the main thread, report the error immediately. When compiling off
     // thread, save the error so that the main thread can report it later.
     CompileError tempErr;
     CompileError* tempErrPtr = &tempErr;
-    if (!cx->isJSContext() && !cx->addPendingCompileError(&tempErrPtr))
+    if (cx->helperThread() && !cx->addPendingCompileError(&tempErrPtr))
         return false;
     CompileError& err = *tempErrPtr;
 
     err.flags = flags;
     err.errorNumber = errorNumber;
     err.filename = filename;
     err.isMuted = mutedErrors;
     if (offset == NoOffset) {
@@ -665,18 +665,18 @@ TokenStream::reportCompileErrorNumberVA(
         err.column = 0;
     } else {
         err.lineno = srcCoords.lineNum(offset);
         err.column = srcCoords.columnIndex(offset);
     }
 
     // If we have no location information, try to get one from the caller.
     bool callerFilename = false;
-    if (offset != NoOffset && !err.filename && cx->isJSContext()) {
-        NonBuiltinFrameIter iter(cx->asJSContext(),
+    if (offset != NoOffset && !err.filename && !cx->helperThread()) {
+        NonBuiltinFrameIter iter(cx,
                                  FrameIter::FOLLOW_DEBUGGER_EVAL_PREV_LINK,
                                  cx->compartment()->principals());
         if (!iter.done() && iter.filename()) {
             callerFilename = true;
             err.filename = iter.filename();
             err.lineno = iter.computeLine(&err.column);
         }
     }
@@ -731,18 +731,18 @@ TokenStream::reportCompileErrorNumberVA(
         // The window into the offending source line, without final \n.
         UniqueTwoByteChars linebuf(windowBuf.stealChars());
         if (!linebuf)
             return false;
 
         err.initOwnedLinebuf(linebuf.release(), windowLength, offset - windowStart);
     }
 
-    if (cx->isJSContext())
-        err.throwError(cx->asJSContext());
+    if (!cx->helperThread())
+        err.throwError(cx);
 
     return warning;
 }
 
 bool
 TokenStream::reportStrictModeError(unsigned errorNumber, ...)
 {
     va_list args;
@@ -1064,17 +1064,17 @@ TokenStream::newToken(ptrdiff_t adjust)
 
     // NOTE: tp->pos.end is not set until the very end of getTokenInternal().
     MOZ_MAKE_MEM_UNDEFINED(&tp->pos.end, sizeof(tp->pos.end));
 
     return tp;
 }
 
 MOZ_ALWAYS_INLINE JSAtom*
-TokenStream::atomize(ExclusiveContext* cx, CharBuffer& cb)
+TokenStream::atomize(JSContext* cx, CharBuffer& cb)
 {
     return AtomizeChars(cx, cb.begin(), cb.length());
 }
 
 #ifdef DEBUG
 static bool
 IsTokenSane(Token* tp)
 {
--- a/js/src/frontend/TokenStream.h
+++ b/js/src/frontend/TokenStream.h
@@ -321,17 +321,17 @@ class MOZ_STACK_CLASS TokenStream
     static const size_t ntokens = 4;                // 1 current + 2 lookahead, rounded
                                                     // to power of 2 to avoid divmod by 3
     static const unsigned maxLookahead = 2;
     static const unsigned ntokensMask = ntokens - 1;
 
   public:
     typedef Vector<char16_t, 32> CharBuffer;
 
-    TokenStream(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
+    TokenStream(JSContext* cx, const ReadOnlyCompileOptions& options,
                 const char16_t* base, size_t length, StrictModeGetter* smg);
 
     ~TokenStream();
 
     MOZ_MUST_USE bool checkOptions();
 
     // Accessors.
     const Token& currentToken() const { return tokens[cursor]; }
@@ -475,17 +475,17 @@ class MOZ_STACK_CLASS TokenStream
                 errorAt(offset, JSMSG_UNICODE_OVERFLOW, "escape sequence");
                 return;
             case InvalidEscapeType::Octal:
                 errorAt(offset, JSMSG_DEPRECATED_OCTAL);
                 return;
         }
     }
 
-    static JSAtom* atomize(ExclusiveContext* cx, CharBuffer& cb);
+    static JSAtom* atomize(JSContext* cx, CharBuffer& cb);
     MOZ_MUST_USE bool putIdentInTokenbuf(const char16_t* identStart);
 
     struct Flags
     {
         bool isEOF:1;           // Hit end of file.
         bool isDirtyLine:1;     // Non-whitespace since start of line.
         bool sawOctalEscape:1;  // Saw an octal character escape.
         bool hadError:1;        // Hit a syntax error, at start or during a
@@ -867,17 +867,17 @@ class MOZ_STACK_CLASS TokenStream
         uint32_t lineIndexOf(uint32_t offset) const;
 
         static const uint32_t MAX_PTR = UINT32_MAX;
 
         uint32_t lineIndexToNum(uint32_t lineIndex) const { return lineIndex + initialLineNum_; }
         uint32_t lineNumToIndex(uint32_t lineNum)   const { return lineNum   - initialLineNum_; }
 
       public:
-        SourceCoords(ExclusiveContext* cx, uint32_t ln);
+        SourceCoords(JSContext* cx, uint32_t ln);
 
         MOZ_MUST_USE bool add(uint32_t lineNum, uint32_t lineStartOffset);
         MOZ_MUST_USE bool fill(const SourceCoords& other);
 
         bool isOnThisLine(uint32_t offset, uint32_t lineNum, bool* onThisLine) const {
             uint32_t lineIndex = lineNumToIndex(lineNum);
             if (lineIndex + 1 >= lineStartOffsets_.length()) // +1 due to sentinel
                 return false;
@@ -892,17 +892,17 @@ class MOZ_STACK_CLASS TokenStream
     };
 
     SourceCoords srcCoords;
 
     JSAtomState& names() const {
         return cx->names();
     }
 
-    ExclusiveContext* context() const {
+    JSContext* context() const {
         return cx;
     }
 
     const ReadOnlyCompileOptions& options() const {
         return options_;
     }
 
   private:
@@ -913,17 +913,17 @@ class MOZ_STACK_CLASS TokenStream
     // chars" refers to the lack of EOL sequence normalization.)
     //
     // buf[0..length-1] often represents a substring of some larger source,
     // where we have only the substring in memory. The |startOffset| argument
     // indicates the offset within this larger string at which our string
     // begins, the offset of |buf[0]|.
     class TokenBuf {
       public:
-        TokenBuf(ExclusiveContext* cx, const char16_t* buf, size_t length, size_t startOffset)
+        TokenBuf(JSContext* cx, const char16_t* buf, size_t length, size_t startOffset)
           : base_(buf),
             startOffset_(startOffset),
             limit_(buf + length),
             ptr(buf)
         { }
 
         bool hasRawChars() const {
             return ptr < limit_;
@@ -1092,17 +1092,17 @@ class MOZ_STACK_CLASS TokenStream
     size_t              linebase;           // start of current line
     size_t              prevLinebase;       // start of previous line;  size_t(-1) if on the first line
     TokenBuf            userbuf;            // user input buffer
     const char*         filename;           // input filename or null
     UniqueTwoByteChars  displayURL_;        // the user's requested source URL or null
     UniqueTwoByteChars  sourceMapURL_;      // source map's filename or null
     CharBuffer          tokenbuf;           // current token string buffer
     uint8_t             isExprEnding[TOK_LIMIT];// which tokens definitely terminate exprs?
-    ExclusiveContext*   const cx;
+    JSContext* const    cx;
     bool                mutedErrors;
     StrictModeGetter*   strictModeGetter;  // used to test for strict mode
 };
 
 class MOZ_STACK_CLASS AutoAwaitIsKeyword
 {
 private:
     TokenStream* ts_;
--- a/js/src/gc/Allocator.cpp
+++ b/js/src/gc/Allocator.cpp
@@ -20,95 +20,94 @@
 
 #include "gc/Heap-inl.h"
 
 using namespace js;
 using namespace gc;
 
 template <typename T, AllowGC allowGC /* = CanGC */>
 JSObject*
-js::Allocate(ExclusiveContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
+js::Allocate(JSContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
              const Class* clasp)
 {
     static_assert(mozilla::IsConvertible<T*, JSObject*>::value, "must be JSObject derived");
     MOZ_ASSERT(IsObjectAllocKind(kind));
     size_t thingSize = Arena::thingSize(kind);
 
     MOZ_ASSERT(thingSize == Arena::thingSize(kind));
     MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
     static_assert(sizeof(JSObject_Slots0) >= CellSize,
                   "All allocations must be at least the allocator-imposed minimum size.");
 
     MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative() || clasp->isProxy());
 
     // Off-main-thread alloc cannot trigger GC or make runtime assertions.
-    if (!cx->isJSContext()) {
+    if (cx->helperThread()) {
         JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
         if (MOZ_UNLIKELY(allowGC && !obj))
             ReportOutOfMemory(cx);
         return obj;
     }
 
-    JSContext* ncx = cx->asJSContext();
-    JSRuntime* rt = ncx->runtime();
-    if (!rt->gc.checkAllocatorState<allowGC>(ncx, kind))
+    JSRuntime* rt = cx->runtime();
+    if (!rt->gc.checkAllocatorState<allowGC>(cx, kind))
         return nullptr;
 
-    if (ncx->nursery().isEnabled() && heap != TenuredHeap) {
-        JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(ncx, thingSize, nDynamicSlots, clasp);
+    if (cx->nursery().isEnabled() && heap != TenuredHeap) {
+        JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(cx, thingSize, nDynamicSlots, clasp);
         if (obj)
             return obj;
 
         // Our most common non-jit allocation path is NoGC; thus, if we fail the
         // alloc and cannot GC, we *must* return nullptr here so that the caller
         // will do a CanGC allocation to clear the nursery. Failing to do so will
         // cause all allocations on this path to land in Tenured, and we will not
         // get the benefit of the nursery.
         if (!allowGC)
             return nullptr;
     }
 
     return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize, nDynamicSlots);
 }
-template JSObject* js::Allocate<JSObject, NoGC>(ExclusiveContext* cx, gc::AllocKind kind,
+template JSObject* js::Allocate<JSObject, NoGC>(JSContext* cx, gc::AllocKind kind,
                                                 size_t nDynamicSlots, gc::InitialHeap heap,
                                                 const Class* clasp);
-template JSObject* js::Allocate<JSObject, CanGC>(ExclusiveContext* cx, gc::AllocKind kind,
+template JSObject* js::Allocate<JSObject, CanGC>(JSContext* cx, gc::AllocKind kind,
                                                  size_t nDynamicSlots, gc::InitialHeap heap,
                                                  const Class* clasp);
 
 // Attempt to allocate a new GC thing out of the nursery. If there is not enough
 // room in the nursery or there is an OOM, this method will return nullptr.
 template <AllowGC allowGC>
 JSObject*
 GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots, const Class* clasp)
 {
-    MOZ_ASSERT(isNurseryAllocAllowed());
-    MOZ_ASSERT(!cx->zone()->usedByExclusiveThread);
+    MOZ_ASSERT(cx->isNurseryAllocAllowed());
+    MOZ_ASSERT(!cx->helperThread());
     MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
-    JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
+    JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
     if (obj)
         return obj;
 
-    if (allowGC && !rt->mainThread.suppressGC) {
-        minorGC(JS::gcreason::OUT_OF_NURSERY);
+    if (allowGC && !cx->suppressGC) {
+        cx->zone()->group()->minorGC(JS::gcreason::OUT_OF_NURSERY);
 
         // Exceeding gcMaxBytes while tenuring can disable the Nursery.
-        if (nursery.isEnabled()) {
-            JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
+        if (cx->nursery().isEnabled()) {
+            JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
             MOZ_ASSERT(obj);
             return obj;
         }
     }
     return nullptr;
 }
 
 template <AllowGC allowGC>
 JSObject*
-GCRuntime::tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
+GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
                                size_t nDynamicSlots)
 {
     HeapSlot* slots = nullptr;
     if (nDynamicSlots) {
         slots = cx->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
         if (MOZ_UNLIKELY(!slots)) {
             if (allowGC)
                 ReportOutOfMemory(cx);
@@ -124,60 +123,59 @@ GCRuntime::tryNewTenuredObject(Exclusive
     else
         js_free(slots);
 
     return obj;
 }
 
 template <typename T, AllowGC allowGC /* = CanGC */>
 T*
-js::Allocate(ExclusiveContext* cx)
+js::Allocate(JSContext* cx)
 {
     static_assert(!mozilla::IsConvertible<T*, JSObject*>::value, "must not be JSObject derived");
     static_assert(sizeof(T) >= CellSize,
                   "All allocations must be at least the allocator-imposed minimum size.");
 
     AllocKind kind = MapTypeToFinalizeKind<T>::kind;
     size_t thingSize = sizeof(T);
     MOZ_ASSERT(thingSize == Arena::thingSize(kind));
 
-    if (cx->isJSContext()) {
-        JSContext* ncx = cx->asJSContext();
-        if (!ncx->runtime()->gc.checkAllocatorState<allowGC>(ncx, kind))
+    if (!cx->helperThread()) {
+        if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind))
             return nullptr;
     }
 
     return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
 }
 
 #define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType) \
-    template type* js::Allocate<type, NoGC>(ExclusiveContext* cx);\
-    template type* js::Allocate<type, CanGC>(ExclusiveContext* cx);
+    template type* js::Allocate<type, NoGC>(JSContext* cx);\
+    template type* js::Allocate<type, CanGC>(JSContext* cx);
 FOR_EACH_NONOBJECT_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
 #undef DECL_ALLOCATOR_INSTANCES
 
 template <typename T, AllowGC allowGC>
 /* static */ T*
-GCRuntime::tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize)
+GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize)
 {
     // Bump allocate in the arena's current free-list span.
     T* t = reinterpret_cast<T*>(cx->arenas()->allocateFromFreeList(kind, thingSize));
     if (MOZ_UNLIKELY(!t)) {
         // Get the next available free list and allocate out of it. This may
         // acquire a new arena, which will lock the chunk list. If there are no
         // chunks available it may also allocate new memory directly.
         t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind, thingSize));
 
-        if (MOZ_UNLIKELY(!t && allowGC && cx->isJSContext())) {
+        if (MOZ_UNLIKELY(!t && allowGC && !cx->helperThread())) {
             // We have no memory available for a new chunk; perform an
             // all-compartments, non-incremental, shrinking GC and wait for
             // sweeping to finish.
-            JS::PrepareForFullGC(cx->asJSContext());
-            cx->asJSContext()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
-            cx->asJSContext()->gc.waitBackgroundSweepOrAllocEnd();
+            JS::PrepareForFullGC(cx);
+            cx->runtime()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
+            cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
 
             t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
             if (!t)
                 ReportOutOfMemory(cx);
         }
     }
 
     checkIncrementalZoneState(cx, t);
@@ -199,23 +197,23 @@ GCRuntime::checkAllocatorState(JSContext
                   kind == AllocKind::ATOM ||
                   kind == AllocKind::FAT_INLINE_ATOM ||
                   kind == AllocKind::SYMBOL ||
                   kind == AllocKind::JITCODE ||
                   kind == AllocKind::SCOPE);
     MOZ_ASSERT_IF(!cx->compartment()->isAtomsCompartment(),
                   kind != AllocKind::ATOM &&
                   kind != AllocKind::FAT_INLINE_ATOM);
-    MOZ_ASSERT(!rt->isHeapBusy());
-    MOZ_ASSERT(isAllocAllowed());
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    MOZ_ASSERT(cx->isAllocAllowed());
 #endif
 
     // Crash if we perform a GC action when it is not safe.
-    if (allowGC && !rt->mainThread.suppressGC)
-        rt->gc.verifyIsSafeToGC();
+    if (allowGC && !cx->suppressGC)
+        cx->verifyIsSafeToGC();
 
     // For testing out of memory conditions
     if (js::oom::ShouldFailWithOOM()) {
         // If we are doing a fallible allocation, percolate up the OOM
         // instead of reporting it.
         if (allowGC)
             ReportOutOfMemory(cx);
         return false;
@@ -229,17 +227,17 @@ GCRuntime::gcIfNeededPerAllocation(JSCon
 {
 #ifdef JS_GC_ZEAL
     if (needZealousGC())
         runDebugGC();
 #endif
 
     // Invoking the interrupt callback can fail and we can't usefully
     // handle that here. Just check in case we need to collect instead.
-    if (rt->hasPendingInterrupt())
+    if (cx->hasPendingInterrupt())
         gcIfRequested();
 
     // If we have grown past our GC heap threshold while in the middle of
     // an incremental GC, we're growing faster than we're GCing, so stop
     // the world and do a full, non-incremental GC right now, if possible.
     if (isIncrementalGCInProgress() &&
         cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
     {
@@ -247,23 +245,23 @@ GCRuntime::gcIfNeededPerAllocation(JSCon
         gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
     }
 
     return true;
 }
 
 template <typename T>
 /* static */ void
-GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
+GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t)
 {
 #ifdef DEBUG
-    if (!cx->isJSContext())
+    if (cx->helperThread())
         return;
 
-    Zone* zone = cx->asJSContext()->zone();
+    Zone* zone = cx->zone();
     MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
                   t->asTenured().arena()->allocatedDuringIncremental);
 #endif
 }
 
 
 // ///////////  Arena -> Thing Allocator  //////////////////////////////////////
 
@@ -276,81 +274,81 @@ GCRuntime::startBackgroundAllocTaskIfIdl
 
     // Join the previous invocation of the task. This will return immediately
     // if the thread has never been started.
     allocTask.joinWithLockHeld(helperLock);
     allocTask.startWithLockHeld(helperLock);
 }
 
 /* static */ TenuredCell*
-GCRuntime::refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind, size_t thingSize)
+GCRuntime::refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
 {
     cx->arenas()->checkEmptyFreeList(thingKind);
 
-    if (cx->isJSContext())
-        return refillFreeListFromMainThread(cx->asJSContext(), thingKind, thingSize);
+    if (!cx->helperThread())
+        return refillFreeListFromMainThread(cx, thingKind, thingSize);
 
     return refillFreeListOffMainThread(cx, thingKind);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
 {
     // It should not be possible to allocate on the main thread while we are
     // inside a GC.
     Zone *zone = cx->zone();
-    MOZ_ASSERT(!cx->runtime()->isHeapBusy(), "allocating while under GC");
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy(), "allocating while under GC");
 
     AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
     return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
 }
 
 /* static */ TenuredCell*
-GCRuntime::refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind)
+GCRuntime::refillFreeListOffMainThread(JSContext* cx, AllocKind thingKind)
 {
-    // A GC may be happening on the main thread, but zones used by exclusive
-    // contexts are never collected.
+    // A GC may be happening on the main thread, but zones used by off thread
+    // tasks are never collected.
     Zone* zone = cx->zone();
     MOZ_ASSERT(!zone->wasGCStarted());
 
     AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
     return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
 }
 
 /* static */ TenuredCell*
 GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
 {
     /*
      * Called by compacting GC to refill a free list while we are in a GC.
      */
 
     zone->arenas.checkEmptyFreeList(thingKind);
     mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
-    MOZ_ASSERT(rt->isHeapCollecting());
-    MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
+    MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT_IF(!JS::CurrentThreadIsHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
 
     AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
     return zone->arenas.allocateFromArena(zone, thingKind, DontCheckThresholds,
                                           maybeStartBackgroundAllocation);
 }
 
 TenuredCell*
 ArenaLists::allocateFromArena(JS::Zone* zone, AllocKind thingKind,
                               ShouldCheckThresholds checkThresholds,
                               AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc)
 {
     JSRuntime* rt = zone->runtimeFromAnyThread();
 
     mozilla::Maybe<AutoLockGC> maybeLock;
 
     // See if we can proceed without taking the GC lock.
-    if (backgroundFinalizeState[thingKind] != BFS_DONE)
+    if (backgroundFinalizeState(thingKind) != BFS_DONE)
         maybeLock.emplace(rt);
 
-    ArenaList& al = arenaLists[thingKind];
+    ArenaList& al = arenaLists(thingKind);
     Arena* arena = al.takeNextArena();
     if (arena) {
         // Empty arenas should be immediately freed.
         MOZ_ASSERT(!arena->isEmpty());
 
         return allocateFromArenaInner(zone, arena, thingKind);
     }
 
@@ -375,21 +373,21 @@ ArenaLists::allocateFromArena(JS::Zone* 
     return allocateFromArenaInner(zone, arena, thingKind);
 }
 
 inline TenuredCell*
 ArenaLists::allocateFromArenaInner(JS::Zone* zone, Arena* arena, AllocKind kind)
 {
     size_t thingSize = Arena::thingSize(kind);
 
-    freeLists[kind] = arena->getFirstFreeSpan();
+    freeLists(kind) = arena->getFirstFreeSpan();
 
     if (MOZ_UNLIKELY(zone->wasGCStarted()))
         zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, arena);
-    TenuredCell* thing = freeLists[kind]->allocate(thingSize);
+    TenuredCell* thing = freeLists(kind)->allocate(thingSize);
     MOZ_ASSERT(thing); // This allocation is infallible.
     return thing;
 }
 
 void
 GCRuntime::arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena)
 {
     if (zone->needsIncrementalBarrier()) {
@@ -554,49 +552,49 @@ GCRuntime::pickChunk(const AutoLockGC& l
     chunkAllocationSinceLastGC = true;
 
     availableChunks(lock).push(chunk);
 
     return chunk;
 }
 
 BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
-  : runtime(rt),
+  : GCParallelTask(rt),
     chunkPool_(pool),
     enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
 {
 }
 
 /* virtual */ void
 BackgroundAllocTask::run()
 {
     TraceLoggerThread* logger = TraceLoggerForCurrentThread();
     AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
 
-    AutoLockGC lock(runtime);
-    while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
+    AutoLockGC lock(runtime());
+    while (!cancel_ && runtime()->gc.wantBackgroundAllocation(lock)) {
         Chunk* chunk;
         {
             AutoUnlockGC unlock(lock);
-            chunk = Chunk::allocate(runtime);
+            chunk = Chunk::allocate(runtime());
             if (!chunk)
                 break;
-            chunk->init(runtime);
+            chunk->init(runtime());
         }
-        chunkPool_.push(chunk);
+        chunkPool_.ref().push(chunk);
     }
 }
 
 /* static */ Chunk*
 Chunk::allocate(JSRuntime* rt)
 {
     Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
     if (!chunk)
         return nullptr;
-    rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
+    rt->gc.stats().count(gcstats::STAT_NEW_CHUNK);
     return chunk;
 }
 
 void
 Chunk::init(JSRuntime* rt)
 {
     JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
 
--- a/js/src/gc/Allocator.h
+++ b/js/src/gc/Allocator.h
@@ -18,18 +18,18 @@ struct Class;
 // trigger GC. This will ensure that GC tracing never sees junk values stored
 // in the partially initialized thing.
 //
 // Note that JSObject allocation must use the longer signature below that
 // includes slot, heap, and finalizer information in support of various
 // object-specific optimizations.
 template <typename T, AllowGC allowGC = CanGC>
 T*
-Allocate(ExclusiveContext* cx);
+Allocate(JSContext* cx);
 
 template <typename, AllowGC allowGC = CanGC>
 JSObject*
-Allocate(ExclusiveContext* cx, gc::AllocKind kind, size_t nDynamicSlots, gc::InitialHeap heap,
+Allocate(JSContext* cx, gc::AllocKind kind, size_t nDynamicSlots, gc::InitialHeap heap,
          const Class* clasp);
 
 } // namespace js
 
 #endif // gc_Allocator_h
--- a/js/src/gc/AtomMarking.cpp
+++ b/js/src/gc/AtomMarking.cpp
@@ -73,33 +73,33 @@ AtomMarkingRuntime::registerArena(Arena*
     MOZ_ASSERT(arena->getThingSize() != 0);
     MOZ_ASSERT(arena->getThingSize() % CellSize == 0);
     MOZ_ASSERT(arena->zone->isAtomsZone());
     MOZ_ASSERT(arena->zone->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
 
     // We need to find a range of bits from the atoms bitmap for this arena.
 
     // Look for a free range of bits compatible with this arena.
-    if (freeArenaIndexes.length()) {
-        arena->atomBitmapStart() = freeArenaIndexes.popCopy();
+    if (freeArenaIndexes.ref().length()) {
+        arena->atomBitmapStart() = freeArenaIndexes.ref().popCopy();
         return;
     }
 
     // Allocate a range of bits from the end for this arena.
     arena->atomBitmapStart() = allocatedWords;
     allocatedWords += ArenaBitmapWords;
 }
 
 void
 AtomMarkingRuntime::unregisterArena(Arena* arena)
 {
     MOZ_ASSERT(arena->zone->isAtomsZone());
 
     // Leak these atom bits if we run out of memory.
-    mozilla::Unused << freeArenaIndexes.emplaceBack(arena->atomBitmapStart());
+    mozilla::Unused << freeArenaIndexes.ref().emplaceBack(arena->atomBitmapStart());
 }
 
 bool
 AtomMarkingRuntime::computeBitmapFromChunkMarkBits(JSRuntime* runtime, Bitmap& bitmap)
 {
     MOZ_ASSERT(runtime->currentThreadHasExclusiveAccess());
 
     MOZ_ASSERT(bitmap.empty());
@@ -122,23 +122,23 @@ AtomMarkingRuntime::computeBitmapFromChu
 void
 AtomMarkingRuntime::updateZoneBitmap(Zone* zone, const Bitmap& bitmap)
 {
     if (zone->isAtomsZone())
         return;
 
     // |bitmap| was produced by computeBitmapFromChunkMarkBits, so it should
     // have the maximum possible size.
-    MOZ_ASSERT(zone->markedAtoms.length() <= bitmap.length());
+    MOZ_ASSERT(zone->markedAtoms().length() <= bitmap.length());
 
     // Take the bitwise and between the two mark bitmaps to get the best new
     // overapproximation we can. |bitmap| might include bits that are not in
     // the zone's mark bitmap, if additional zones were collected by the GC.
-    for (size_t i = 0; i < zone->markedAtoms.length(); i++)
-        zone->markedAtoms[i] &= bitmap[i];
+    for (size_t i = 0; i < zone->markedAtoms().length(); i++)
+        zone->markedAtoms()[i] &= bitmap[i];
 }
 
 // Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
 static void
 AddBitmapToChunkMarkBits(JSRuntime* runtime, AtomMarkingRuntime::Bitmap& bitmap)
 {
     // Make sure that by copying the mark bits for one arena in word sizes we
     // do not affect the mark bits for other arenas.
@@ -174,26 +174,26 @@ AtomMarkingRuntime::updateChunkMarkBits(
     // updating the chunk mark bitmaps separately for each zone.
     Bitmap markedUnion;
     if (EnsureBitmapLength(markedUnion, allocatedWords)) {
         for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
             // We only need to update the chunk mark bits for zones which were
             // not collected in the current GC. Atoms which are referenced by
             // collected zones have already been marked.
             if (!zone->isCollectingFromAnyThread()) {
-                MOZ_ASSERT(zone->markedAtoms.length() <= allocatedWords);
-                for (size_t i = 0; i < zone->markedAtoms.length(); i++)
-                    markedUnion[i] |= zone->markedAtoms[i];
+                MOZ_ASSERT(zone->markedAtoms().length() <= allocatedWords);
+                for (size_t i = 0; i < zone->markedAtoms().length(); i++)
+                    markedUnion[i] |= zone->markedAtoms()[i];
             }
         }
         AddBitmapToChunkMarkBits(runtime, markedUnion);
     } else {
         for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
             if (!zone->isCollectingFromAnyThread())
-                AddBitmapToChunkMarkBits(runtime, zone->markedAtoms);
+                AddBitmapToChunkMarkBits(runtime, zone->markedAtoms());
         }
     }
 }
 
 static inline size_t
 GetAtomBit(TenuredCell* thing)
 {
     MOZ_ASSERT(thing->zoneFromAnyThread()->isAtomsZone());
@@ -209,78 +209,78 @@ ThingIsPermanent(TenuredCell* thing)
     if (kind == JS::TraceKind::String && static_cast<JSString*>(thing)->isPermanentAtom())
         return true;
     if (kind == JS::TraceKind::Symbol && static_cast<JS::Symbol*>(thing)->isWellKnownSymbol())
         return true;
     return false;
 }
 
 void
-AtomMarkingRuntime::markAtom(ExclusiveContext* cx, TenuredCell* thing)
+AtomMarkingRuntime::markAtom(JSContext* cx, TenuredCell* thing)
 {
     // The context's zone will be null during initialization of the runtime.
     if (!thing || !cx->zone())
         return;
     MOZ_ASSERT(!cx->zone()->isAtomsZone());
 
     if (ThingIsPermanent(thing) || !thing->zoneFromAnyThread()->isAtomsZone())
         return;
 
     size_t bit = GetAtomBit(thing);
 
     {
         AutoEnterOOMUnsafeRegion oomUnsafe;
-        if (!EnsureBitmapLength(cx->zone()->markedAtoms, allocatedWords))
+        if (!EnsureBitmapLength(cx->zone()->markedAtoms(), allocatedWords))
             oomUnsafe.crash("Atom bitmap OOM");
     }
 
-    SetBit(cx->zone()->markedAtoms.begin(), bit);
+    SetBit(cx->zone()->markedAtoms().begin(), bit);
 
-    if (cx->isJSContext()) {
+    if (!cx->helperThread()) {
         // Trigger a read barrier on the atom, in case there is an incremental
         // GC in progress. This is necessary if the atom is being marked
         // because a reference to it was obtained from another zone which is
         // not being collected by the incremental GC.
         TenuredCell::readBarrier(thing);
     }
 }
 
 void
-AtomMarkingRuntime::markId(ExclusiveContext* cx, jsid id)
+AtomMarkingRuntime::markId(JSContext* cx, jsid id)
 {
     if (JSID_IS_GCTHING(id))
         markAtom(cx, &JSID_TO_GCTHING(id).asCell()->asTenured());
 }
 
 void
-AtomMarkingRuntime::markAtomValue(ExclusiveContext* cx, const Value& value)
+AtomMarkingRuntime::markAtomValue(JSContext* cx, const Value& value)
 {
     if (value.isGCThing()) {
         Cell* thing = value.toGCThing();
         if (thing && !IsInsideNursery(thing))
             markAtom(cx, &thing->asTenured());
     }
 }
 
 void
 AtomMarkingRuntime::adoptMarkedAtoms(Zone* target, Zone* source)
 {
     MOZ_ASSERT(target->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
 
-    Bitmap* targetBitmap = &target->markedAtoms;
-    Bitmap* sourceBitmap = &source->markedAtoms;
+    Bitmap* targetBitmap = &target->markedAtoms();
+    Bitmap* sourceBitmap = &source->markedAtoms();
     if (targetBitmap->length() < sourceBitmap->length())
         std::swap(targetBitmap, sourceBitmap);
     for (size_t i = 0; i < sourceBitmap->length(); i++)
         (*targetBitmap)[i] |= (*sourceBitmap)[i];
 
-    if (targetBitmap != &target->markedAtoms)
-        target->markedAtoms = Move(source->markedAtoms);
+    if (targetBitmap != &target->markedAtoms())
+        target->markedAtoms() = Move(source->markedAtoms());
     else
-        source->markedAtoms.clear();
+        source->markedAtoms().clear();
 }
 
 #ifdef DEBUG
 
 bool
 AtomMarkingRuntime::atomIsMarked(Zone* zone, Cell* thingArg)
 {
     if (!thingArg || IsInsideNursery(thingArg))
@@ -296,19 +296,19 @@ AtomMarkingRuntime::atomIsMarked(Zone* z
     JS::TraceKind kind = thing->getTraceKind();
     if (kind == JS::TraceKind::String) {
         JSAtom* atom = static_cast<JSAtom*>(thing);
         if (AtomIsPinnedInRuntime(zone->runtimeFromAnyThread(), atom))
             return true;
     }
 
     size_t bit = GetAtomBit(thing);
-    if (bit >= zone->markedAtoms.length() * JS_BITS_PER_WORD)
+    if (bit >= zone->markedAtoms().length() * JS_BITS_PER_WORD)
         return false;
-    return GetBit(zone->markedAtoms.begin(), bit);
+    return GetBit(zone->markedAtoms().begin(), bit);
 }
 
 bool
 AtomMarkingRuntime::idIsMarked(Zone* zone, jsid id)
 {
     if (JSID_IS_GCTHING(id))
         return atomIsMarked(zone, JSID_TO_GCTHING(id).asCell());
     return true;
--- a/js/src/gc/AtomMarking.h
+++ b/js/src/gc/AtomMarking.h
@@ -4,26 +4,27 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef gc_AtomMarking_h
 #define gc_AtomMarking_h
 
 #include "NamespaceImports.h"
 #include "gc/Heap.h"
+#include "threading/ProtectedData.h"
 
 namespace js {
 namespace gc {
 
 // This class manages state used for marking atoms during GCs.
 // See AtomMarking.cpp for details.
 class AtomMarkingRuntime
 {
     // Unused arena atom bitmap indexes. Protected by the GC lock.
-    Vector<size_t, 0, SystemAllocPolicy> freeArenaIndexes;
+    js::ExclusiveAccessLockOrGCTaskData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
 
     // The extent of all allocated and free words in atom mark bitmaps.
     // This monotonically increases and may be read from without locking.
     mozilla::Atomic<size_t> allocatedWords;
 
   public:
     typedef Vector<uintptr_t, 0, SystemAllocPolicy> Bitmap;
 
@@ -46,19 +47,19 @@ class AtomMarkingRuntime
     // overapproximation of the reachable atoms in |bitmap|.
     void updateZoneBitmap(Zone* zone, const Bitmap& bitmap);
 
     // Set any bits in the chunk mark bitmaps for atoms which are marked in any
     // zone in the runtime.
     void updateChunkMarkBits(JSRuntime* runtime);
 
     // Mark an atom or id as being newly reachable by the context's zone.
-    void markAtom(ExclusiveContext* cx, TenuredCell* thing);
-    void markId(ExclusiveContext* cx, jsid id);
-    void markAtomValue(ExclusiveContext* cx, const Value& value);
+    void markAtom(JSContext* cx, TenuredCell* thing);
+    void markId(JSContext* cx, jsid id);
+    void markAtomValue(JSContext* cx, const Value& value);
 
     // Mark all atoms in |source| as being reachable within |target|.
     void adoptMarkedAtoms(Zone* target, Zone* source);
 
 #ifdef DEBUG
     // Return whether |thing/id| is in the atom marking bitmap for |zone|.
     bool atomIsMarked(Zone* zone, Cell* thing);
     bool idIsMarked(Zone* zone, jsid id);
--- a/js/src/gc/Barrier.cpp
+++ b/js/src/gc/Barrier.cpp
@@ -19,17 +19,18 @@
 #include "vm/Symbol.h"
 #include "wasm/WasmJS.h"
 
 namespace js {
 
 bool
 RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone)
 {
-    return shadowZone->runtimeFromMainThread()->isHeapMajorCollecting();
+    MOZ_ASSERT(CurrentThreadCanAccessRuntime(shadowZone->runtimeFromMainThread()));
+    return JS::CurrentThreadIsHeapMajorCollecting();
 }
 
 #ifdef DEBUG
 
 bool
 IsMarkedBlack(NativeObject* obj)
 {
     // Note: we assume conservatively that Nursery things will be live.
@@ -62,29 +63,29 @@ HeapSlot::assertPreconditionForWriteBarr
 
     MOZ_ASSERT_IF(target.isGCThing() && IsMarkedBlack(obj),
                   !JS::GCThingIsMarkedGray(JS::GCCellPtr(target)));
 }
 
 bool
 CurrentThreadIsIonCompiling()
 {
-    return TlsPerThreadData.get()->ionCompiling;
+    return TlsContext.get()->ionCompiling;
 }
 
 bool
 CurrentThreadIsIonCompilingSafeForMinorGC()
 {
-    return TlsPerThreadData.get()->ionCompilingSafeForMinorGC;
+    return TlsContext.get()->ionCompilingSafeForMinorGC;
 }
 
 bool
 CurrentThreadIsGCSweeping()
 {
-    return TlsPerThreadData.get()->gcSweeping;
+    return TlsContext.get()->gcSweeping;
 }
 
 #endif // DEBUG
 
 template <typename S>
 template <typename T>
 void
 ReadBarrierFunctor<S>::operator()(T* t)
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -80,26 +80,26 @@ class MOZ_RAII AutoStopVerifyingBarriers
         }
     }
 
     ~AutoStopVerifyingBarriers() {
         // Nasty special case: verification runs a minor GC, which *may* nest
         // inside of an outer minor GC. This is not allowed by the
         // gc::Statistics phase tree. So we pause the "real" GC, if in fact one
         // is in progress.
-        gcstats::Phase outer = gc->stats.currentPhase();
+        gcstats::Phase outer = gc->stats().currentPhase();
         if (outer != gcstats::PHASE_NONE)
-            gc->stats.endPhase(outer);
-        MOZ_ASSERT(gc->stats.currentPhase() == gcstats::PHASE_NONE);
+            gc->stats().endPhase(outer);
+        MOZ_ASSERT(gc->stats().currentPhase() == gcstats::PHASE_NONE);
 
         if (restartPreVerifier)
             gc->startVerifyPreBarriers();
 
         if (outer != gcstats::PHASE_NONE)
-            gc->stats.beginPhase(outer);
+            gc->stats().beginPhase(outer);
     }
 };
 #else
 struct MOZ_RAII AutoStopVerifyingBarriers
 {
     AutoStopVerifyingBarriers(JSRuntime*, bool) {}
 };
 #endif /* JS_GC_ZEAL */
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -24,17 +24,17 @@
 namespace js {
 
 class AutoLockGC;
 class AutoLockHelperThreadState;
 class VerifyPreTracer;
 
 namespace gc {
 
-typedef Vector<JS::Zone*, 4, SystemAllocPolicy> ZoneVector;
+typedef Vector<ZoneGroup*, 4, SystemAllocPolicy> ZoneGroupVector;
 using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
 
 class AutoMaybeStartBackgroundAllocation;
 class MarkingValidator;
 class AutoTraceSession;
 struct MovingTracer;
 
 class ChunkPool
@@ -72,18 +72,17 @@ class ChunkPool
     };
 };
 
 // Performs extra allocation off the main thread so that when memory is
 // required on the main thread it will already be available and waiting.
 class BackgroundAllocTask : public GCParallelTask
 {
     // Guarded by the GC lock.
-    JSRuntime* runtime;
-    ChunkPool& chunkPool_;
+    GCLockData<ChunkPool&> chunkPool_;
 
     const bool enabled_;
 
   public:
     BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool);
     bool enabled() const { return enabled_; }
 
   protected:
@@ -91,103 +90,106 @@ class BackgroundAllocTask : public GCPar
 };
 
 // Search the provided Chunks for free arenas and decommit them.
 class BackgroundDecommitTask : public GCParallelTask
 {
   public:
     using ChunkVector = mozilla::Vector<Chunk*>;
 
-    explicit BackgroundDecommitTask(JSRuntime *rt) : runtime(rt) {}
+    explicit BackgroundDecommitTask(JSRuntime *rt) : GCParallelTask(rt) {}
     void setChunksToScan(ChunkVector &chunks);
 
   protected:
     void run() override;
 
   private:
-    JSRuntime* runtime;
-    ChunkVector toDecommit;
+    UnprotectedData<ChunkVector> toDecommit;
 };
 
 /*
  * Encapsulates all of the GC tunables. These are effectively constant and
  * should only be modified by setParameter.
  */
 class GCSchedulingTunables
 {
     /*
      * Soft limit on the number of bytes we are allowed to allocate in the GC
      * heap. Attempts to allocate gcthings over this limit will return null and
      * subsequently invoke the standard OOM machinery, independent of available
      * physical memory.
      */
-    size_t gcMaxBytes_;
+    UnprotectedData<size_t> gcMaxBytes_;
+
+    /* Maximum nursery size for each zone group. */
+    UnprotectedData<size_t> gcMaxNurseryBytes_;
 
     /*
      * The base value used to compute zone->trigger.gcBytes(). When
      * usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
      * be scheduled for a GC, depending on the exact circumstances.
      */
-    size_t gcZoneAllocThresholdBase_;
+    UnprotectedData<size_t> gcZoneAllocThresholdBase_;
 
     /* Fraction of threshold.gcBytes() which triggers an incremental GC. */
-    double zoneAllocThresholdFactor_;
+    UnprotectedData<double> zoneAllocThresholdFactor_;
 
     /*
      * Number of bytes to allocate between incremental slices in GCs triggered
      * by the zone allocation threshold.
      */
-    size_t zoneAllocDelayBytes_;
+    UnprotectedData<size_t> zoneAllocDelayBytes_;
 
     /*
      * Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
      * tunables that make GC non-deterministic.
      */
-    bool dynamicHeapGrowthEnabled_;
+    UnprotectedData<bool> dynamicHeapGrowthEnabled_;
 
     /*
      * We enter high-frequency mode if we GC a twice within this many
      * microseconds. This value is stored directly in microseconds.
      */
-    uint64_t highFrequencyThresholdUsec_;
+    UnprotectedData<uint64_t> highFrequencyThresholdUsec_;
 
     /*
      * When in the |highFrequencyGC| mode, these parameterize the per-zone
      * "HeapGrowthFactor" computation.
      */
-    uint64_t highFrequencyLowLimitBytes_;
-    uint64_t highFrequencyHighLimitBytes_;
-    double highFrequencyHeapGrowthMax_;
-    double highFrequencyHeapGrowthMin_;
+    UnprotectedData<uint64_t> highFrequencyLowLimitBytes_;
+    UnprotectedData<uint64_t> highFrequencyHighLimitBytes_;
+    UnprotectedData<double> highFrequencyHeapGrowthMax_;
+    UnprotectedData<double> highFrequencyHeapGrowthMin_;
 
     /*
      * When not in |highFrequencyGC| mode, this is the global (stored per-zone)
      * "HeapGrowthFactor".
      */
-    double lowFrequencyHeapGrowth_;
+    UnprotectedData<double> lowFrequencyHeapGrowth_;
 
     /*
      * Doubles the length of IGC slices when in the |highFrequencyGC| mode.
      */
-    bool dynamicMarkSliceEnabled_;
+    UnprotectedData<bool> dynamicMarkSliceEnabled_;
 
     /*
      * Controls whether painting can trigger IGC slices.
      */
-    bool refreshFrameSlicesEnabled_;
+    UnprotectedData<bool> refreshFrameSlicesEnabled_;
 
     /*
      * Controls the number of empty chunks reserved for future allocation.
      */
-    uint32_t minEmptyChunkCount_;
-    uint32_t maxEmptyChunkCount_;
+    UnprotectedData<uint32_t> minEmptyChunkCount_;
+    UnprotectedData<uint32_t> maxEmptyChunkCount_;
 
   public:
     GCSchedulingTunables()
       : gcMaxBytes_(0),
+        gcMaxNurseryBytes_(0),
         gcZoneAllocThresholdBase_(30 * 1024 * 1024),
         zoneAllocThresholdFactor_(0.9),
         zoneAllocDelayBytes_(1024 * 1024),
         dynamicHeapGrowthEnabled_(false),
         highFrequencyThresholdUsec_(1000 * 1000),
         highFrequencyLowLimitBytes_(100 * 1024 * 1024),
         highFrequencyHighLimitBytes_(500 * 1024 * 1024),
         highFrequencyHeapGrowthMax_(3.0),
@@ -195,16 +197,17 @@ class GCSchedulingTunables
         lowFrequencyHeapGrowth_(1.5),
         dynamicMarkSliceEnabled_(false),
         refreshFrameSlicesEnabled_(true),
         minEmptyChunkCount_(1),
         maxEmptyChunkCount_(30)
     {}
 
     size_t gcMaxBytes() const { return gcMaxBytes_; }
+    size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
     size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
     double zoneAllocThresholdFactor() const { return zoneAllocThresholdFactor_; }
     size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
     bool isDynamicHeapGrowthEnabled() const { return dynamicHeapGrowthEnabled_; }
     uint64_t highFrequencyThresholdUsec() const { return highFrequencyThresholdUsec_; }
     uint64_t highFrequencyLowLimitBytes() const { return highFrequencyLowLimitBytes_; }
     uint64_t highFrequencyHighLimitBytes() const { return highFrequencyHighLimitBytes_; }
     double highFrequencyHeapGrowthMax() const { return highFrequencyHeapGrowthMax_; }
@@ -518,17 +521,17 @@ class GCSchedulingTunables
 class GCSchedulingState
 {
     /*
      * Influences how we schedule and run GC's in several subtle ways. The most
      * important factor is in how it controls the "HeapGrowthFactor". The
      * growth factor is a measure of how large (as a percentage of the last GC)
      * the heap is allowed to grow before we try to schedule another GC.
      */
-    bool inHighFrequencyGCMode_;
+    UnprotectedData<bool> inHighFrequencyGCMode_;
 
   public:
     GCSchedulingState()
       : inHighFrequencyGCMode_(false)
     {}
 
     bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
 
@@ -537,29 +540,29 @@ class GCSchedulingState
         inHighFrequencyGCMode_ =
             tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
             lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
     }
 };
 
 template<typename F>
 struct Callback {
-    F op;
-    void* data;
+    UnprotectedData<F> op;
+    UnprotectedData<void*> data;
 
     Callback()
       : op(nullptr), data(nullptr)
     {}
     Callback(F op, void* data)
       : op(op), data(data)
     {}
 };
 
 template<typename F>
-using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
+using CallbackVector = UnprotectedData<Vector<Callback<F>, 4, SystemAllocPolicy>>;
 
 template <typename T, typename Iter0, typename Iter1>
 class ChainedIter
 {
     Iter0 iter0_;
     Iter1 iter1_;
 
   public:
@@ -613,21 +616,16 @@ class GCRuntime
     MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
     uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
 
     MOZ_MUST_USE bool triggerGC(JS::gcreason::Reason reason);
     void maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock);
     // The return value indicates if we were able to do the GC.
     bool triggerZoneGC(Zone* zone, JS::gcreason::Reason reason);
     void maybeGC(Zone* zone);
-    void minorGC(JS::gcreason::Reason reason,
-                 gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
-    void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
-        minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
-    }
     // The return value indicates whether a major GC was performed.
     bool gcIfRequested();
     void gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason);
     void startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis = 0);
     void gcSlice(JS::gcreason::Reason reason, int64_t millis = 0);
     void finishGC(JS::gcreason::Reason reason);
     void abortGC();
     void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
@@ -703,69 +701,29 @@ class GCRuntime
         lock.lock();
     }
 
     void unlockGC() {
         lock.unlock();
     }
 
 #ifdef DEBUG
-    bool isAllocAllowed() { return noGCOrAllocationCheck == 0; }
-    void disallowAlloc() { ++noGCOrAllocationCheck; }
-    void allowAlloc() {
-        MOZ_ASSERT(!isAllocAllowed());
-        --noGCOrAllocationCheck;
-    }
-
-    bool isNurseryAllocAllowed() { return noNurseryAllocationCheck == 0; }
-    void disallowNurseryAlloc() { ++noNurseryAllocationCheck; }
-    void allowNurseryAlloc() {
-        MOZ_ASSERT(!isNurseryAllocAllowed());
-        --noNurseryAllocationCheck;
-    }
-
-    bool isStrictProxyCheckingEnabled() { return disableStrictProxyCheckingCount == 0; }
-    void disableStrictProxyChecking() { ++disableStrictProxyCheckingCount; }
-    void enableStrictProxyChecking() {
-        MOZ_ASSERT(disableStrictProxyCheckingCount > 0);
-        --disableStrictProxyCheckingCount;
+    bool currentThreadHasLockedGC() const {
+        return lock.ownedByCurrentThread();
     }
 #endif // DEBUG
 
-    bool isInsideUnsafeRegion() { return inUnsafeRegion != 0; }
-    void enterUnsafeRegion() {
-        MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-        ++inUnsafeRegion;
-    }
-    void leaveUnsafeRegion() {
-        MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-        MOZ_ASSERT(inUnsafeRegion > 0);
-        --inUnsafeRegion;
-    }
-
-    void verifyIsSafeToGC() {
-        MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-        MOZ_DIAGNOSTIC_ASSERT(!isInsideUnsafeRegion(),
-                              "[AutoAssertNoGC] possible GC in GC-unsafe region");
-    }
-
     void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
 
     bool isIncrementalGCAllowed() const { return incrementalAllowed; }
     void disallowIncrementalGC() { incrementalAllowed = false; }
 
     bool isIncrementalGCEnabled() const { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
     bool isIncrementalGCInProgress() const { return state() != State::NotActive; }
 
-    bool isGenerationalGCEnabled() const { return generationalDisabled == 0; }
-    void disableGenerationalGC();
-    void enableGenerationalGC();
-
-    void disableCompactingGC();
-    void enableCompactingGC();
     bool isCompactingGCEnabled() const;
 
     void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
     MOZ_MUST_USE bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
     void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
 
     void setMaxMallocBytes(size_t value);
     int32_t getMallocBytes() const { return mallocBytesUntilGC; }
@@ -793,17 +751,17 @@ class GCRuntime
     JS::DoCycleCollectionCallback setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback);
     void callDoCycleCollectionCallback(JSContext* cx);
 
     void setFullCompartmentChecks(bool enable);
 
     JS::Zone* getCurrentZoneGroup() { return currentZoneGroup; }
     void setFoundBlackGrayEdges(TenuredCell& target) {
         AutoEnterOOMUnsafeRegion oomUnsafe;
-        if (!foundBlackGrayEdges.append(&target))
+        if (!foundBlackGrayEdges.ref().append(&target))
             oomUnsafe.crash("OOM|small: failed to insert into foundBlackGrayEdges");
     }
 
     uint64_t gcNumber() const { return number; }
 
     uint64_t minorGCCount() const { return minorGCNumber; }
     void incMinorGcNumber() { ++minorGCNumber; ++number; }
 
@@ -811,16 +769,19 @@ class GCRuntime
     void incMajorGcNumber() { ++majorGCNumber; ++number; }
 
     int64_t defaultSliceBudget() const { return defaultTimeBudget_; }
 
     bool isIncrementalGc() const { return isIncremental; }
     bool isFullGc() const { return isFull; }
     bool isCompactingGc() const { return isCompacting; }
 
+    bool areGrayBitsValid() const { return grayBitsValid; }
+    void setGrayBitsInvalid() { grayBitsValid = false; }
+
     bool minorGCRequested() const { return minorGCTriggerReason != JS::gcreason::NO_REASON; }
     bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; }
     bool isGcNeeded() { return minorGCRequested() || majorGCRequested(); }
 
     bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; }
 
     double computeHeapGrowthFactor(size_t lastBytes);
     size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
@@ -829,25 +790,25 @@ class GCRuntime
     void setGCMode(JSGCMode m) {
         mode = m;
         marker.setGCMode(mode);
     }
 
     inline void updateOnFreeArenaAlloc(const ChunkInfo& info);
     inline void updateOnArenaFree(const ChunkInfo& info);
 
-    ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_; }
-    ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_; }
-    ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_; }
-    const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_; }
-    const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_; }
-    const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_; }
+    ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
+    ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_.ref(); }
+    ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
+    const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_.ref(); }
+    const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_.ref(); }
+    const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_.ref(); }
     typedef ChainedIter<Chunk*, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
     NonEmptyChunksIter allNonEmptyChunks() {
-        return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_), ChunkPool::Iter(fullChunks_));
+        return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_.ref()), ChunkPool::Iter(fullChunks_.ref()));
     }
 
     Chunk* getOrAllocChunk(const AutoLockGC& lock,
                            AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
     void recycleChunk(Chunk* chunk, const AutoLockGC& lock);
 
 #ifdef JS_GC_ZEAL
     void startVerifyPreBarriers();
@@ -856,40 +817,34 @@ class GCRuntime
     bool isVerifyPreBarriersEnabled() const { return !!verifyPreData; }
 #else
     bool isVerifyPreBarriersEnabled() const { return false; }
 #endif
 
     // Free certain LifoAlloc blocks when it is safe to do so.
     void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
     void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
-    void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
-
-    // Queue a thunk to run after the next minor GC.
-    void callAfterMinorGC(void (*thunk)(void* data), void* data) {
-        nursery.queueSweepAction(thunk, data);
-    }
 
     // Public here for ReleaseArenaLists and FinalizeTypedArenas.
     void releaseArena(Arena* arena, const AutoLockGC& lock);
 
     void releaseHeldRelocatedArenas();
     void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
 
     // Allocator
     template <AllowGC allowGC>
     MOZ_MUST_USE bool checkAllocatorState(JSContext* cx, AllocKind kind);
     template <AllowGC allowGC>
     JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
                                   const Class* clasp);
     template <AllowGC allowGC>
-    static JSObject* tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
+    static JSObject* tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
                                          size_t nDynamicSlots);
     template <typename T, AllowGC allowGC>
-    static T* tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize);
+    static T* tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize);
     static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
 
   private:
     enum IncrementalProgress
     {
         NotFinished = 0,
         Finished
     };
@@ -900,22 +855,22 @@ class GCRuntime
                      AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
     Arena* allocateArena(Chunk* chunk, Zone* zone, AllocKind kind,
                          ShouldCheckThresholds checkThresholds, const AutoLockGC& lock);
     void arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena);
 
     // Allocator internals
     MOZ_MUST_USE bool gcIfNeededPerAllocation(JSContext* cx);
     template <typename T>
-    static void checkIncrementalZoneState(ExclusiveContext* cx, T* t);
-    static TenuredCell* refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind,
+    static void checkIncrementalZoneState(JSContext* cx, T* t);
+    static TenuredCell* refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind,
                                                     size_t thingSize);
     static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
                                                      size_t thingSize);
-    static TenuredCell* refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind);
+    static TenuredCell* refillFreeListOffMainThread(JSContext* cx, AllocKind thingKind);
 
     /*
      * Return the list of chunks that can be released outside the GC lock.
      * Must be called either during the GC or with the GC lock taken.
      */
     friend class BackgroundDecommitTask;
     ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
     void freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock);
@@ -1013,28 +968,31 @@ class GCRuntime
     void checkForCompartmentMismatches();
 #endif
 
     void callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const;
     void callWeakPointerZoneGroupCallbacks() const;
     void callWeakPointerCompartmentCallbacks(JSCompartment* comp) const;
 
   public:
-    JSRuntime* rt;
+    JSRuntime* const rt;
 
     /* Embedders can use this zone however they wish. */
-    JS::Zone* systemZone;
+    UnprotectedData<JS::Zone*> systemZone;
 
-    /* List of compartments and zones (protected by the GC lock). */
-    ZoneVector zones;
+    // List of all zone groups (protected by the GC lock).
+    UnprotectedData<ZoneGroupVector> groups;
 
-    Nursery nursery;
-    StoreBuffer storeBuffer;
+    // The unique atoms zone, which has no zone group.
+    WriteOnceData<Zone*> atomsZone;
 
-    gcstats::Statistics stats;
+  private:
+    UnprotectedData<gcstats::Statistics> stats_;
+  public:
+    gcstats::Statistics& stats() { return stats_.ref(); }
 
     GCMarker marker;
 
     /* Track heap usage for this runtime. */
     HeapUsage usage;
 
     /* GC scheduling state and parameters. */
     GCSchedulingTunables tunables;
@@ -1046,226 +1004,208 @@ class GCRuntime
     // exclusive access lock.
     AtomMarkingRuntime atomMarking;
 
   private:
     // When empty, chunks reside in the emptyChunks pool and are re-used as
     // needed or eventually expired if not re-used. The emptyChunks pool gets
     // refilled from the background allocation task heuristically so that empty
     // chunks should always available for immediate allocation without syscalls.
-    ChunkPool             emptyChunks_;
+    GCLockData<ChunkPool> emptyChunks_;
 
     // Chunks which have had some, but not all, of their arenas allocated live
     // in the available chunk lists. When all available arenas in a chunk have
     // been allocated, the chunk is removed from the available list and moved
     // to the fullChunks pool. During a GC, if all arenas are free, the chunk
     // is moved back to the emptyChunks pool and scheduled for eventual
     // release.
-    ChunkPool             availableChunks_;
+    UnprotectedData<ChunkPool> availableChunks_;
 
     // When all arenas in a chunk are used, it is moved to the fullChunks pool
     // so as to reduce the cost of operations on the available lists.
-    ChunkPool             fullChunks_;
+    UnprotectedData<ChunkPool> fullChunks_;
 
-    RootedValueMap rootsHash;
+    UnprotectedData<RootedValueMap> rootsHash;
 
-    size_t maxMallocBytes;
+    UnprotectedData<size_t> maxMallocBytes;
 
     // An incrementing id used to assign unique ids to cells that require one.
     mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
 
     /*
      * Number of the committed arenas in all GC chunks including empty chunks.
      */
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
-    VerifyPreTracer* verifyPreData;
+    UnprotectedData<VerifyPreTracer*> verifyPreData;
 
   private:
-    bool chunkAllocationSinceLastGC;
-    int64_t lastGCTime;
+    UnprotectedData<bool> chunkAllocationSinceLastGC;
+    UnprotectedData<int64_t> lastGCTime;
 
-    JSGCMode mode;
+    UnprotectedData<JSGCMode> mode;
 
     mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
 
     /* During shutdown, the GC needs to clean up every possible object. */
-    bool cleanUpEverything;
+    UnprotectedData<bool> cleanUpEverything;
 
     // Gray marking must be done after all black marking is complete. However,
     // we do not have write barriers on XPConnect roots. Therefore, XPConnect
     // roots must be accumulated in the first slice of incremental GC. We
     // accumulate these roots in each zone's gcGrayRoots vector and then mark
     // them later, after black marking is complete for each compartment. This
     // accumulation can fail, but in that case we switch to non-incremental GC.
     enum class GrayBufferState {
         Unused,
         Okay,
         Failed
     };
-    GrayBufferState grayBufferState;
+    UnprotectedData<GrayBufferState> grayBufferState;
     bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
 
     // Clear each zone's gray buffers, but do not change the current state.
     void resetBufferedGrayRoots() const;
 
     // Reset the gray buffering state to Unused.
     void clearBufferedGrayRoots() {
         grayBufferState = GrayBufferState::Unused;
         resetBufferedGrayRoots();
     }
 
+    /*
+     * The gray bits can become invalid if UnmarkGray overflows the stack. A
+     * full GC will reset this bit, since it fills in all the gray bits.
+     */
+    UnprotectedData<bool> grayBitsValid;
+
     mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
 
-    JS::gcreason::Reason minorGCTriggerReason;
+  public:
+    UnprotectedData<JS::gcreason::Reason> minorGCTriggerReason;
 
+  private:
     /* Perform full GC if rt->keepAtoms() becomes false. */
-    bool fullGCForAtomsRequested_;
+    UnprotectedData<bool> fullGCForAtomsRequested_;
 
     /* Incremented at the start of every minor GC. */
-    uint64_t minorGCNumber;
+    UnprotectedData<uint64_t> minorGCNumber;
 
     /* Incremented at the start of every major GC. */
-    uint64_t majorGCNumber;
+    UnprotectedData<uint64_t> majorGCNumber;
 
     /* The major GC number at which to release observed type information. */
-    uint64_t jitReleaseNumber;
+    UnprotectedData<uint64_t> jitReleaseNumber;
 
     /* Incremented on every GC slice. */
-    uint64_t number;
+    UnprotectedData<uint64_t> number;
 
     /* The number at the time of the most recent GC's first slice. */
-    uint64_t startNumber;
+    UnprotectedData<uint64_t> startNumber;
 
     /* Whether the currently running GC can finish in multiple slices. */
-    bool isIncremental;
+    UnprotectedData<bool> isIncremental;
 
     /* Whether all zones are being collected in first GC slice. */
-    bool isFull;
+    UnprotectedData<bool> isFull;
 
     /* Whether the heap will be compacted at the end of GC. */
-    bool isCompacting;
+    UnprotectedData<bool> isCompacting;
 
     /* The invocation kind of the current GC, taken from the first slice. */
-    JSGCInvocationKind invocationKind;
+    UnprotectedData<JSGCInvocationKind> invocationKind;
 
     /* The initial GC reason, taken from the first slice. */
-    JS::gcreason::Reason initialReason;
-
-#ifdef DEBUG
-    /*
-     * If this is 0, all cross-compartment proxies must be registered in the
-     * wrapper map. This checking must be disabled temporarily while creating
-     * new wrappers. When non-zero, this records the recursion depth of wrapper
-     * creation.
-     */
-    uintptr_t disableStrictProxyCheckingCount;
-#endif
+    UnprotectedData<JS::gcreason::Reason> initialReason;
 
     /*
      * The current incremental GC phase. This is also used internally in
      * non-incremental GC.
      */
-    State incrementalState;
+    UnprotectedData<State> incrementalState;
 
     /* Indicates that the last incremental slice exhausted the mark stack. */
-    bool lastMarkSlice;
+    UnprotectedData<bool> lastMarkSlice;
 
     /* Whether any sweeping will take place in the separate GC helper thread. */
-    bool sweepOnBackgroundThread;
+    UnprotectedData<bool> sweepOnBackgroundThread;
 
     /* Whether observed type information is being released in the current GC. */
-    bool releaseObservedTypes;
+    UnprotectedData<bool> releaseObservedTypes;
 
     /* Whether any black->gray edges were found during marking. */
-    BlackGrayEdgeVector foundBlackGrayEdges;
+    UnprotectedData<BlackGrayEdgeVector> foundBlackGrayEdges;
 
-    /* Singly linekd list of zones to be swept in the background. */
-    ZoneList backgroundSweepZones;
+    /* Singly linked list of zones to be swept in the background. */
+    UnprotectedData<ZoneList> backgroundSweepZones;
 
     /*
      * Free LIFO blocks are transferred to this allocator before being freed on
      * the background GC thread after sweeping.
      */
-    LifoAlloc blocksToFreeAfterSweeping;
+    UnprotectedData<LifoAlloc> blocksToFreeAfterSweeping;
 
-    /*
-     * Free LIFO blocks are transferred to this allocator before being freed
-     * after minor GC.
-     */
-    LifoAlloc blocksToFreeAfterMinorGC;
-
+  private:
     /* Index of current zone group (for stats). */
-    unsigned zoneGroupIndex;
+    UnprotectedData<unsigned> zoneGroupIndex;
 
     /*
      * Incremental sweep state.
      */
-    JS::Zone* zoneGroups;
-    JS::Zone* currentZoneGroup;
-    bool sweepingTypes;
-    unsigned finalizePhase;
-    JS::Zone* sweepZone;
-    AllocKind sweepKind;
-    bool abortSweepAfterCurrentGroup;
+    UnprotectedData<JS::Zone*> zoneGroups;
+    UnprotectedData<JS::Zone*> currentZoneGroup;
+    UnprotectedData<bool> sweepingTypes;
+    UnprotectedData<unsigned> finalizePhase;
+    UnprotectedData<JS::Zone*> sweepZone;
+    UnprotectedData<AllocKind> sweepKind;
+    UnprotectedData<bool> abortSweepAfterCurrentGroup;
 
     /*
      * Concurrent sweep infrastructure.
      */
     void startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
     void joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
 
     /*
      * List head of arenas allocated during the sweep phase.
      */
-    Arena* arenasAllocatedDuringSweep;
+    UnprotectedData<Arena*> arenasAllocatedDuringSweep;
 
     /*
      * Incremental compacting state.
      */
-    bool startedCompacting;
-    ZoneList zonesToMaybeCompact;
-    Arena* relocatedArenasToRelease;
+    UnprotectedData<bool> startedCompacting;
+    UnprotectedData<ZoneList> zonesToMaybeCompact;
+    UnprotectedData<Arena*> relocatedArenasToRelease;
 
 #ifdef JS_GC_ZEAL
-    MarkingValidator* markingValidator;
+    UnprotectedData<MarkingValidator*> markingValidator;
 #endif
 
     /*
      * Indicates that a GC slice has taken place in the middle of an animation
      * frame, rather than at the beginning. In this case, the next slice will be
      * delayed so that we don't get back-to-back slices.
      */
-    bool interFrameGC;
+    UnprotectedData<bool> interFrameGC;
 
     /* Default budget for incremental GC slice. See js/SliceBudget.h. */
-    int64_t defaultTimeBudget_;
+    UnprotectedData<int64_t> defaultTimeBudget_;
 
     /*
      * We disable incremental GC if we encounter a Class with a trace hook
      * that does not implement write barriers.
      */
-    bool incrementalAllowed;
-
-    /*
-     * GGC can be enabled from the command line while testing.
-     */
-    unsigned generationalDisabled;
+    UnprotectedData<bool> incrementalAllowed;
 
     /*
      * Whether compacting GC can is enabled globally.
      */
-    bool compactingEnabled;
+    UnprotectedData<bool> compactingEnabled;
 
-    /*
-     * Some code cannot tolerate compacting GC so it can be disabled temporarily
-     * with AutoDisableCompactingGC which uses this counter.
-     */
-    unsigned compactingDisabledCount;
-
-    bool poked;
+    UnprotectedData<bool> poked;
 
     /*
      * These options control the zealousness of the GC. At every allocation,
      * nextScheduled is decremented. When it reaches zero we do a full GC.
      *
      * At this point, if zeal_ is one of the types that trigger periodic
      * collection, then nextScheduled is reset to the value of zealFrequency.
      * Otherwise, no additional GCs take place.
@@ -1282,26 +1222,26 @@ class GCRuntime
      * in jsgc.cpp for more information about this.
      *
      * zeal_ values from 8 to 10 periodically run different types of
      * incremental GC.
      *
      * zeal_ value 14 performs periodic shrinking collections.
      */
 #ifdef JS_GC_ZEAL
-    uint32_t zealModeBits;
-    int zealFrequency;
-    int nextScheduled;
-    bool deterministicOnly;
-    int incrementalLimit;
+    UnprotectedData<uint32_t> zealModeBits;
+    UnprotectedData<int> zealFrequency;
+    UnprotectedData<int> nextScheduled;
+    UnprotectedData<bool> deterministicOnly;
+    UnprotectedData<int> incrementalLimit;
 
-    Vector<JSObject*, 0, SystemAllocPolicy> selectedForMarking;
+    UnprotectedData<Vector<JSObject*, 0, SystemAllocPolicy>> selectedForMarking;
 #endif
 
-    bool fullCompartmentChecks;
+    UnprotectedData<bool> fullCompartmentChecks;
 
     Callback<JSGCCallback> gcCallback;
     Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
     Callback<JSObjectsTenuredCallback> tenuredCallback;
     CallbackVector<JSFinalizeCallback> finalizeCallbacks;
     CallbackVector<JSWeakPointerZoneGroupCallback> updateWeakPointerZoneGroupCallbacks;
     CallbackVector<JSWeakPointerCompartmentCallback> updateWeakPointerCompartmentCallbacks;
 
@@ -1322,46 +1262,36 @@ class GCRuntime
      * tracing through black roots and the other is for tracing through gray
      * roots. The black/gray distinction is only relevant to the cycle
      * collector.
      */
     CallbackVector<JSTraceDataOp> blackRootTracers;
     Callback<JSTraceDataOp> grayRootTracer;
 
     /* Always preserve JIT code during GCs, for testing. */
-    bool alwaysPreserveCode;
-
-    /*
-     * Some regions of code are hard for the static rooting hazard analysis to
-     * understand. In those cases, we trade the static analysis for a dynamic
-     * analysis. When this is non-zero, we should assert if we trigger, or
-     * might trigger, a GC.
-     */
-    int inUnsafeRegion;
+    UnprotectedData<bool> alwaysPreserveCode;
 
 #ifdef DEBUG
-    size_t noGCOrAllocationCheck;
-    size_t noNurseryAllocationCheck;
-
-    bool arenasEmptyAtShutdown;
+    UnprotectedData<bool> arenasEmptyAtShutdown;
 #endif
 
-    /* Synchronize GC heap access between main thread and GCHelperState. */
+    /* Synchronize GC heap access among GC helper threads and main threads. */
     friend class js::AutoLockGC;
     js::Mutex lock;
 
     BackgroundAllocTask allocTask;
     BackgroundDecommitTask decommitTask;
+
     GCHelperState helperState;
 
     /*
      * During incremental sweeping, this field temporarily holds the arenas of
      * the current AllocKind being swept in order of increasing free space.
      */
-    SortedArenaList incrementalSweepList;
+    UnprotectedData<SortedArenaList> incrementalSweepList;
 
     friend class js::GCHelperState;
     friend class MarkingValidator;
     friend class AutoTraceSession;
     friend class AutoEnterIteration;
 };
 
 /* Prevent compartments and zones from being collected during iteration. */
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -1059,17 +1059,17 @@ static_assert(js::gc::ChunkLocationOffse
  * memory usage relationship between GCRuntime and Zones.
  */
 class HeapUsage
 {
     /*
      * A heap usage that contains our parent's heap usage, or null if this is
      * the top-level usage container.
      */
-    HeapUsage* parent_;
+    HeapUsage* const parent_;
 
     /*
      * The approximate number of bytes in use on the GC heap, to the nearest
      * ArenaSize. This does not include any malloc data. It also does not
      * include not-actively-used addresses that are still reserved at the OS
      * level for GC usage. It is atomic because it is updated by both the main
      * and GC helper threads.
      */
--- a/js/src/gc/Iteration.cpp
+++ b/js/src/gc/Iteration.cpp
@@ -29,80 +29,80 @@ IterateCompartmentsArenasCells(JSContext
         (*compartmentCallback)(cx, data, comp);
 
     for (auto thingKind : AllAllocKinds()) {
         JS::TraceKind traceKind = MapAllocToTraceKind(thingKind);
         size_t thingSize = Arena::thingSize(thingKind);
 
         for (ArenaIter aiter(zone, thingKind); !aiter.done(); aiter.next()) {
             Arena* arena = aiter.get();
-            (*arenaCallback)(cx, data, arena, traceKind, thingSize);
+            (*arenaCallback)(cx->runtime(), data, arena, traceKind, thingSize);
             for (ArenaCellIter iter(arena); !iter.done(); iter.next())
-                (*cellCallback)(cx, data, iter.getCell(), traceKind, thingSize);
+                (*cellCallback)(cx->runtime(), data, iter.getCell(), traceKind, thingSize);
         }
     }
 }
 
 void
 js::IterateZonesCompartmentsArenasCells(JSContext* cx, void* data,
                                         IterateZoneCallback zoneCallback,
                                         JSIterateCompartmentCallback compartmentCallback,
                                         IterateArenaCallback arenaCallback,
                                         IterateCellCallback cellCallback)
 {
     AutoPrepareForTracing prop(cx, WithAtoms);
 
-    for (ZonesIter zone(cx, WithAtoms); !zone.done(); zone.next()) {
-        (*zoneCallback)(cx, data, zone);
+    for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
+        (*zoneCallback)(cx->runtime(), data, zone);
         IterateCompartmentsArenasCells(cx, zone, data,
                                        compartmentCallback, arenaCallback, cellCallback);
     }
 }
 
 void
 js::IterateZoneCompartmentsArenasCells(JSContext* cx, Zone* zone, void* data,
                                        IterateZoneCallback zoneCallback,
                                        JSIterateCompartmentCallback compartmentCallback,
                                        IterateArenaCallback arenaCallback,
                                        IterateCellCallback cellCallback)
 {
     AutoPrepareForTracing prop(cx, WithAtoms);
 
-    (*zoneCallback)(cx, data, zone);
+    (*zoneCallback)(cx->runtime(), data, zone);
     IterateCompartmentsArenasCells(cx, zone, data,
                                    compartmentCallback, arenaCallback, cellCallback);
 }
 
 void
 js::IterateChunks(JSContext* cx, void* data, IterateChunkCallback chunkCallback)
 {
     AutoPrepareForTracing prep(cx, SkipAtoms);
 
-    for (auto chunk = cx->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
-        chunkCallback(cx, data, chunk);
+    for (auto chunk = cx->runtime()->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
+        chunkCallback(cx->runtime(), data, chunk);
 }
 
 void
 js::IterateScripts(JSContext* cx, JSCompartment* compartment,
                    void* data, IterateScriptCallback scriptCallback)
 {
-    MOZ_ASSERT(!cx->mainThread().suppressGC);
+    MOZ_ASSERT(!cx->suppressGC);
     AutoEmptyNursery empty(cx);
     AutoPrepareForTracing prep(cx, SkipAtoms);
 
     if (compartment) {
         Zone* zone = compartment->zone();
         for (auto script = zone->cellIter<JSScript>(empty); !script.done(); script.next()) {
             if (script->compartment() == compartment)
-                scriptCallback(cx, data, script);
+                scriptCallback(cx->runtime(), data, script);
         }
     } else {
-        for (ZonesIter zone(cx, SkipAtoms); !zone.done(); zone.next()) {
+        for (ZonesIter zone(cx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
             for (auto script = zone->cellIter<JSScript>(empty); !script.done(); script.next())
-                scriptCallback(cx, data, script);
+                scriptCallback(cx->runtime(), data, script);
         }
     }
 }
 
 static void
 IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
 {
     for (auto kind : ObjectAllocKinds()) {
@@ -111,32 +111,31 @@ IterateGrayObjects(Zone* zone, GCThingCa
                 cellCallback(data, JS::GCCellPtr(obj.get()));
         }
     }
 }
 
 void
 js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
 {
-    JSRuntime* rt = zone->runtimeFromMainThread();
-    MOZ_ASSERT(!rt->isHeapBusy());
-    AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms);
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
+    AutoPrepareForTracing prep(TlsContext.get(), SkipAtoms);
     ::IterateGrayObjects(zone, cellCallback, data);
 }
 
 void
 js::IterateGrayObjectsUnderCC(Zone* zone, GCThingCallback cellCallback, void* data)
 {
     mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
-    MOZ_ASSERT(rt->isCycleCollecting());
+    MOZ_ASSERT(JS::CurrentThreadIsHeapCycleCollecting());
     MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
     ::IterateGrayObjects(zone, cellCallback, data);
 }
 
 JS_PUBLIC_API(void)
 JS_IterateCompartments(JSContext* cx, void* data,
                        JSIterateCompartmentCallback compartmentCallback)
 {
-    AutoTraceSession session(cx);
+    AutoTraceSession session(cx->runtime());
 
-    for (CompartmentsIter c(cx, WithAtoms); !c.done(); c.next())
+    for (CompartmentsIter c(cx->runtime(), WithAtoms); !c.done(); c.next())
         (*compartmentCallback)(cx, data, c);
 }
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -252,17 +252,19 @@ js::CheckTracedThing(JSTracer* trc, T* t
      * if it has not then we assume it is allocated, but if it has then it is
      * either free or uninitialized in which case we check the free list.
      *
      * Further complications are that background sweeping may be running and
      * concurrently modifiying the free list and that tracing is done off main
      * thread during compacting GC and reading the contents of the thing by
      * IsThingPoisoned would be racy in this case.
      */
-    MOZ_ASSERT_IF(rt->isHeapBusy() && !zone->isGCCompacting() && !rt->gc.isBackgroundSweeping(),
+    MOZ_ASSERT_IF(JS::CurrentThreadIsHeapBusy() &&
+                  !zone->isGCCompacting() &&
+                  !rt->gc.isBackgroundSweeping(),
                   !IsThingPoisoned(thing) || !InFreeList(thing->asTenured().arena(), thing));
 #endif
 }
 
 template <typename S>
 struct CheckTracedFunctor : public VoidDefaultAdaptor<S> {
     template <typename T> void operator()(T* t, JSTracer* trc) { CheckTracedThing(trc, t); }
 };
@@ -720,17 +722,17 @@ GCMarker::markImplicitEdgesHelper(T mark
 {
     if (!isWeakMarkingTracer())
         return;
 
     Zone* zone = gc::TenuredCell::fromPointer(markedThing)->zone();
     MOZ_ASSERT(zone->isGCMarking());
     MOZ_ASSERT(!zone->isGCSweeping());
 
-    auto p = zone->gcWeakKeys.get(JS::GCCellPtr(markedThing));
+    auto p = zone->gcWeakKeys().get(JS::GCCellPtr(markedThing));
     if (!p)
         return;
     WeakEntryVector& markables = p->value;
 
     markEphemeronValues(markedThing, markables);
     markables.clear(); // If key address is reused, it should do nothing
 }
 
@@ -839,17 +841,17 @@ js::GCMarker::noteWeakEdge(T* edge)
 {
     static_assert(IsBaseOf<Cell, typename mozilla::RemovePointer<T>::Type>::value,
                   "edge must point to a GC pointer");
     MOZ_ASSERT((*edge)->isTenured());
 
     // Note: we really want the *source* Zone here. The edge may start in a
     // non-gc heap location, however, so we use the fact that cross-zone weak
     // references are not allowed and use the *target's* zone.
-    JS::Zone::WeakEdges &weakRefs = (*edge)->asTenured().zone()->gcWeakRefs;
+    JS::Zone::WeakEdges &weakRefs = (*edge)->asTenured().zone()->gcWeakRefs();
     AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!weakRefs.append(reinterpret_cast<TenuredCell**>(edge)))
         oomUnsafe.crash("Failed to record a weak edge for sweeping.");
 }
 
 // The simplest traversal calls out to the fully generic traceChildren function
 // to visit the child edges. In the absence of other traversal mechanisms, this
 // function will rapidly grow the stack past its bounds and crash the process.
@@ -1965,17 +1967,17 @@ MarkStack::reset()
         baseCapacity_ = capacity();
     }
     setStack(newStack, 0, baseCapacity_);
 }
 
 bool
 MarkStack::enlarge(unsigned count)
 {
-    size_t newCapacity = Min(maxCapacity_, capacity() * 2);
+    size_t newCapacity = Min(maxCapacity_.ref(), capacity() * 2);
     if (newCapacity < capacity() + count)
         return false;
 
     size_t tosIndex = position();
 
     MOZ_ASSERT(newCapacity != 0);
     uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * newCapacity);
     if (!newStack)
@@ -2053,17 +2055,17 @@ GCMarker::stop()
     MOZ_ASSERT(!unmarkedArenaStackTop);
     MOZ_ASSERT(markLaterArenas == 0);
 #endif
 
     /* Free non-ballast stack memory. */
     stack.reset();
     AutoEnterOOMUnsafeRegion oomUnsafe;
     for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
-        if (!zone->gcWeakKeys.clear())
+        if (!zone->gcWeakKeys().clear())
             oomUnsafe.crash("clearing weak keys in GCMarker::stop()");
     }
 }
 
 void
 GCMarker::reset()
 {
     color = BLACK;
@@ -2099,17 +2101,17 @@ GCMarker::enterWeakMarkingMode()
     // weakmaps -- or more precisely, the keys of marked weakmaps that are
     // mapped to not yet live values. (Once bug 1167452 implements incremental
     // weakmap marking, this initialization step will become unnecessary, as
     // the table will already hold all such keys.)
     if (weakMapAction() == ExpandWeakMaps) {
         tag_ = TracerKindTag::WeakMarking;
 
         for (GCZoneGroupIter zone(runtime()); !zone.done(); zone.next()) {
-            for (WeakMapBase* m : zone->gcWeakMapList) {
+            for (WeakMapBase* m : zone->gcWeakMapList()) {
                 if (m->marked)
                     (void) m->markIteratively(this);
             }
         }
     }
 }
 
 void
@@ -2118,17 +2120,17 @@ GCMarker::leaveWeakMarkingMode()
     MOZ_ASSERT_IF(weakMapAction() == ExpandWeakMaps && !linearWeakMarkingDisabled_,
                   tag_ == TracerKindTag::WeakMarking);
     tag_ = TracerKindTag::Marking;
 
     // Table is expensive to maintain when not in weak marking mode, so we'll
     // rebuild it upon entry rather than allow it to contain stale data.
     AutoEnterOOMUnsafeRegion oomUnsafe;
     for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
-        if (!zone->gcWeakKeys.clear())
+        if (!zone->gcWeakKeys().clear())
             oomUnsafe.crash("clearing weak keys in GCMarker::leaveWeakMarkingMode()");
     }
 }
 
 void
 GCMarker::markDelayedChildren(Arena* arena)
 {
     if (arena->markOverflow) {
@@ -2153,17 +2155,17 @@ GCMarker::markDelayedChildren(Arena* are
      * allocatedDuringIncremental flag if we continue marking.
      */
 }
 
 bool
 GCMarker::markDelayedChildren(SliceBudget& budget)
 {
     GCRuntime& gc = runtime()->gc;
-    gcstats::AutoPhase ap(gc.stats, gc.state() == State::Mark, gcstats::PHASE_MARK_DELAYED);
+    gcstats::AutoPhase ap(gc.stats(), gc.state() == State::Mark, gcstats::PHASE_MARK_DELAYED);
 
     MOZ_ASSERT(unmarkedArenaStackTop);
     do {
         /*
          * If marking gets delayed at the same arena again, we must repeat
          * marking of its things. For that we pop arena from the stack and
          * clear its hasDelayedMarking flag before we begin the marking.
          */
@@ -2217,17 +2219,17 @@ GCMarker::checkZone(void* p)
 }
 #endif
 
 size_t
 GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
 {
     size_t size = stack.sizeOfExcludingThis(mallocSizeOf);
     for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next())
-        size += zone->gcGrayRoots.sizeOfExcludingThis(mallocSizeOf);
+        size += zone->gcGrayRoots().sizeOfExcludingThis(mallocSizeOf);
     return size;
 }
 
 
 /*** Tenuring Tracer *****************************************************************************/
 
 namespace js {
 template <typename T>
@@ -2676,17 +2678,17 @@ CheckIsMarkedThing(T* thingp)
 #undef IS_SAME_TYPE_OR
 
 #ifdef DEBUG
     MOZ_ASSERT(thingp);
     MOZ_ASSERT(*thingp);
     JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
     MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(*thingp),
                   CurrentThreadCanAccessRuntime(rt) ||
-                  (rt->isHeapCollecting() && rt->gc.state() == State::Sweep));
+                  (JS::CurrentThreadIsHeapCollecting() && rt->gc.state() == State::Sweep));
 #endif
 }
 
 template <typename T>
 static bool
 IsMarkedInternalCommon(T* thingp)
 {
     CheckIsMarkedThing(thingp);
@@ -2714,17 +2716,17 @@ template <>
 /* static */ bool
 IsMarkedInternal(JSRuntime* rt, JSObject** thingp)
 {
     if (IsOwnedByOtherRuntime(rt, *thingp))
         return true;
 
     if (IsInsideNursery(*thingp)) {
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-        return rt->gc.nursery.getForwardedPointer(thingp);
+        return rt->zoneGroupFromMainThread()->nursery().getForwardedPointer(thingp);
     }
     return IsMarkedInternalCommon(thingp);
 }
 
 template <typename S>
 struct IsMarkedFunctor : public IdentityDefaultAdaptor<S> {
     template <typename T> S operator()(T* t, JSRuntime* rt, bool* rv) {
         *rv = IsMarkedInternal(rt, &t);
@@ -2755,23 +2757,22 @@ template <typename T>
 static bool
 IsAboutToBeFinalizedInternal(T** thingp)
 {
     CheckIsMarkedThing(thingp);
     T* thing = *thingp;
     JSRuntime* rt = thing->runtimeFromAnyThread();
 
     /* Permanent atoms are never finalized by non-owning runtimes. */
-    if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && !TlsPerThreadData.get()->associatedWith(rt))
+    if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && TlsContext.get()->runtime() != rt)
         return false;
 
-    Nursery& nursery = rt->gc.nursery;
     if (IsInsideNursery(thing)) {
-        MOZ_ASSERT(rt->isHeapMinorCollecting());
-        return !nursery.getForwardedPointer(reinterpret_cast<JSObject**>(thingp));
+        MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
+        return !Nursery::getForwardedPointer(reinterpret_cast<JSObject**>(thingp));
     }
 
     Zone* zone = thing->asTenured().zoneFromAnyThread();
     if (zone->isGCSweeping()) {
         return IsAboutToBeFinalizedDuringSweep(thing->asTenured());
     } else if (zone->isGCCompacting() && IsForwarded(thing)) {
         *thingp = Forwarded(thing);
         return false;
@@ -2928,23 +2929,23 @@ struct UnmarkGrayTracer : public JS::Cal
  * - To prevent any incorrectly gray objects from escaping to live JS outside
  *   of the containers, we must add unmark-graying read barriers to these
  *   containers.
  */
 void
 UnmarkGrayTracer::onChild(const JS::GCCellPtr& thing)
 {
     int stackDummy;
-    JSContext* cx = runtime()->contextFromMainThread();
-    if (!JS_CHECK_STACK_SIZE(cx->nativeStackLimit[StackForSystemCode], &stackDummy)) {
+    JSContext* cx = TlsContext.get();
+    if (!JS_CHECK_STACK_SIZE(cx->nativeStackLimit[JS::StackForSystemCode], &stackDummy)) {
         /*
          * If we run out of stack, we take a more drastic measure: require that
          * we GC again before the next CC.
          */
-        runtime()->setGCGrayBitsValid(false);
+        runtime()->gc.setGrayBitsInvalid();
         return;
     }
 
     Cell* cell = thing.asCell();
 
     // Cells in the nursery cannot be gray, and therefore must necessarily point
     // to only black edges.
     if (!cell->isTenured()) {
@@ -2995,31 +2996,31 @@ UnmarkGrayTracer::onChild(const JS::GCCe
 
 template <typename T>
 static bool
 TypedUnmarkGrayCellRecursively(T* t)
 {
     MOZ_ASSERT(t);
 
     JSRuntime* rt = t->runtimeFromMainThread();
-    MOZ_ASSERT(!rt->isHeapCollecting());
-    MOZ_ASSERT(!rt->isCycleCollecting());
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapCycleCollecting());
 
     bool unmarkedArg = false;
     if (t->isTenured()) {
         if (!t->asTenured().isMarked(GRAY))
             return false;
 
         t->asTenured().unmark(GRAY);
         unmarkedArg = true;
     }
 
     UnmarkGrayTracer trc(rt);
-    gcstats::AutoPhase outerPhase(rt->gc.stats, gcstats::PHASE_BARRIER);
-    gcstats::AutoPhase innerPhase(rt->gc.stats, gcstats::PHASE_UNMARK_GRAY);
+    gcstats::AutoPhase outerPhase(rt->gc.stats(), gcstats::PHASE_BARRIER);
+    gcstats::AutoPhase innerPhase(rt->gc.stats(), gcstats::PHASE_UNMARK_GRAY);
     t->traceChildren(&trc);
 
     return unmarkedArg || trc.unmarkedAny;
 }
 
 struct UnmarkGrayCellRecursivelyFunctor {
     template <typename T> bool operator()(T* t) { return TypedUnmarkGrayCellRecursively(t); }
 };
--- a/js/src/gc/Marking.h
+++ b/js/src/gc/Marking.h
@@ -14,16 +14,17 @@
 
 #include "ds/OrderedHashTable.h"
 #include "gc/Heap.h"
 #include "gc/Tracer.h"
 #include "js/GCAPI.h"
 #include "js/HeapAPI.h"
 #include "js/SliceBudget.h"
 #include "js/TracingAPI.h"
+#include "threading/ProtectedData.h"
 #include "vm/TaggedProto.h"
 
 class JSLinearString;
 class JSRope;
 namespace js {
 class BaseShape;
 class GCMarker;
 class LazyScript;
@@ -51,23 +52,23 @@ static const size_t INCREMENTAL_MARK_STA
  * stack list with the pointer to stack top in GCMarker::unmarkedArenaStackTop.
  * GCMarker::delayMarkingChildren adds arenas to the stack as necessary while
  * markDelayedChildren pops the arenas from the stack until it empties.
  */
 class MarkStack
 {
     friend class GCMarker;
 
-    uintptr_t* stack_;
-    uintptr_t* tos_;
-    uintptr_t* end_;
+    UnprotectedData<uintptr_t*> stack_;
+    UnprotectedData<uintptr_t*> tos_;
+    UnprotectedData<uintptr_t*> end_;
 
     // The capacity we start with and reset() to.
-    size_t baseCapacity_;
-    size_t maxCapacity_;
+    UnprotectedData<size_t> baseCapacity_;
+    UnprotectedData<size_t> maxCapacity_;
 
   public:
     explicit MarkStack(size_t maxCapacity)
       : stack_(nullptr),
         tos_(nullptr),
         end_(nullptr),
         baseCapacity_(0),
         maxCapacity_(maxCapacity)
@@ -330,39 +331,39 @@ class GCMarker : public JSTracer
     MOZ_MUST_USE bool restoreValueArray(JSObject* obj, void** vpp, void** endp);
     void saveValueRanges();
     inline void processMarkStackTop(SliceBudget& budget);
 
     /* The mark stack. Pointers in this stack are "gray" in the GC sense. */
     MarkStack stack;
 
     /* The color is only applied to objects and functions. */
-    uint32_t color;
+    UnprotectedData<uint32_t> color;
 
     /* Pointer to the top of the stack of arenas we are delaying marking on. */
-    js::gc::Arena* unmarkedArenaStackTop;
+    UnprotectedData<js::gc::Arena*> unmarkedArenaStackTop;
 
     /*
      * If the weakKeys table OOMs, disable the linear algorithm and fall back
      * to iterating until the next GC.
      */
-    bool linearWeakMarkingDisabled_;
+    UnprotectedData<bool> linearWeakMarkingDisabled_;
 
 #ifdef DEBUG
     /* Count of arenas that are currently in the stack. */
-    size_t markLaterArenas;
+    UnprotectedData<size_t> markLaterArenas;
 
     /* Assert that start and stop are called with correct ordering. */
-    bool started;
+    UnprotectedData<bool> started;
 
     /*
      * If this is true, all marked objects must belong to a compartment being
      * GCed. This is used to look for compartment bugs.
      */
-    bool strictCompartmentChecking;
+    UnprotectedData<bool> strictCompartmentChecking;
 #endif // DEBUG
 };
 
 #ifdef DEBUG
 // Return true if this trace is happening on behalf of gray buffering during
 // the marking phase of incremental GC.
 bool
 IsBufferGrayRootsTracer(JSTracer* trc);
--- a/js/src/gc/MemoryProfiler.cpp
+++ b/js/src/gc/MemoryProfiler.cpp
@@ -25,17 +25,17 @@ GCHeapProfiler*
 MemProfiler::GetGCHeapProfiler(JSRuntime* runtime)
 {
     return runtime->gc.mMemProfiler.mGCHeapProfiler;
 }
 
 MemProfiler*
 MemProfiler::GetMemProfiler(JSContext* context)
 {
-    return &context->gc.mMemProfiler;
+    return &context->runtime()->gc.mMemProfiler;
 }
 
 void
 MemProfiler::start(GCHeapProfiler* aGCHeapProfiler)
 {
     ReleaseAllJITCode(mRuntime->defaultFreeOp());
     mGCHeapProfiler = aGCHeapProfiler;
     sActiveProfilerCount++;
--- a/js/src/gc/Nursery-inl.h
+++ b/js/src/gc/Nursery-inl.h
@@ -12,75 +12,72 @@
 
 #include "jscntxt.h"
 
 #include "gc/Heap.h"
 #include "gc/Zone.h"
 #include "js/TracingAPI.h"
 #include "vm/Runtime.h"
 
-MOZ_ALWAYS_INLINE bool
-js::Nursery::getForwardedPointer(JSObject** ref) const
+MOZ_ALWAYS_INLINE /* static */ bool
+js::Nursery::getForwardedPointer(JSObject** ref)
 {
     MOZ_ASSERT(ref);
-    MOZ_ASSERT(isInside((void*)*ref));
+    MOZ_ASSERT(IsInsideNursery(*ref));
     const gc::RelocationOverlay* overlay = reinterpret_cast<const gc::RelocationOverlay*>(*ref);
     if (!overlay->isForwarded())
         return false;
     *ref = static_cast<JSObject*>(overlay->forwardingAddress());
     return true;
 }
 
 namespace js {
 
 // The allocation methods below will not run the garbage collector. If the
 // nursery cannot accomodate the allocation, the malloc heap will be used
 // instead.
 
 template <typename T>
 static inline T*
-AllocateObjectBuffer(ExclusiveContext* cx, uint32_t count)
+AllocateObjectBuffer(JSContext* cx, uint32_t count)
 {
-    if (cx->isJSContext()) {
-        Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
+    if (!cx->helperThread()) {
         size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
-        T* buffer = static_cast<T*>(nursery.allocateBuffer(cx->zone(), nbytes));
+        T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), nbytes));
         if (!buffer)
             ReportOutOfMemory(cx);
         return buffer;
     }
     return cx->zone()->pod_malloc<T>(count);
 }
 
 template <typename T>
 static inline T*
-AllocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, uint32_t count)
+AllocateObjectBuffer(JSContext* cx, JSObject* obj, uint32_t count)
 {
-    if (cx->isJSContext()) {
-        Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
+    if (!cx->helperThread()) {
         size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
-        T* buffer = static_cast<T*>(nursery.allocateBuffer(obj, nbytes));
+        T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(obj, nbytes));
         if (!buffer)
             ReportOutOfMemory(cx);
         return buffer;
     }
     return obj->zone()->pod_malloc<T>(count);
 }
 
 // If this returns null then the old buffer will be left alone.
 template <typename T>
 static inline T*
-ReallocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, T* oldBuffer,
+ReallocateObjectBuffer(JSContext* cx, JSObject* obj, T* oldBuffer,
                        uint32_t oldCount, uint32_t newCount)
 {
-    if (cx->isJSContext()) {
-        Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
-        T* buffer =  static_cast<T*>(nursery.reallocateBuffer(obj, oldBuffer,
-                                                              oldCount * sizeof(T),
-                                                              newCount * sizeof(T)));
+    if (!cx->helperThread()) {
+        T* buffer =  static_cast<T*>(cx->nursery().reallocateBuffer(obj, oldBuffer,
+                                                                    oldCount * sizeof(T),
+                                                                    newCount * sizeof(T)));
         if (!buffer)
             ReportOutOfMemory(cx);
         return buffer;
     }
     return obj->zone()->pod_realloc<T>(oldBuffer, oldCount, newCount);
 }
 
 } // namespace js
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -41,17 +41,17 @@ using mozilla::DebugOnly;
 using mozilla::PodCopy;
 using mozilla::TimeDuration;
 using mozilla::TimeStamp;
 
 static const uintptr_t CanaryMagicValue = 0xDEADB15D;
 
 struct js::Nursery::FreeMallocedBuffersTask : public GCParallelTask
 {
-    explicit FreeMallocedBuffersTask(FreeOp* fop) : fop_(fop) {}
+    explicit FreeMallocedBuffersTask(FreeOp* fop) : GCParallelTask(fop->runtime()), fop_(fop) {}
     bool init() { return buffers_.init(); }
     void transferBuffersToFree(MallocedBuffersSet& buffersToFree,
                                const AutoLockHelperThreadState& lock);
     ~FreeMallocedBuffersTask() override { join(); }
 
   private:
     FreeOp* fop_;
     MallocedBuffersSet buffers_;
@@ -88,35 +88,35 @@ js::Nursery::NurseryChunk::poisonAndInit
 {
     JS_POISON(this, poison, ChunkSize);
     init(rt);
 }
 
 inline void
 js::Nursery::NurseryChunk::init(JSRuntime* rt)
 {
-    new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
+    new (&trailer) gc::ChunkTrailer(rt, &rt->zoneGroupFromMainThread()->storeBuffer());
 }
 
 /* static */ inline js::Nursery::NurseryChunk*
 js::Nursery::NurseryChunk::fromChunk(Chunk* chunk)
 {
     return reinterpret_cast<NurseryChunk*>(chunk);
 }
 
 inline Chunk*
 js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
 {
     auto chunk = reinterpret_cast<Chunk*>(this);
     chunk->init(rt);
     return chunk;
 }
 
-js::Nursery::Nursery(JSRuntime* rt)
-  : runtime_(rt)
+js::Nursery::Nursery(ZoneGroup* group)
+  : zoneGroup_(group)
   , position_(0)
   , currentStartChunk_(0)
   , currentStartPosition_(0)
   , currentEnd_(0)
   , currentChunk_(0)
   , maxNurseryChunks_(0)
   , previousPromotionRate_(0)
   , profileThreshold_(0)
@@ -131,27 +131,27 @@ js::Nursery::Nursery(JSRuntime* rt)
 {}
 
 bool
 js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
 {
     /* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
     maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
 
-    /* If no chunks are specified then the nursery is permenantly disabled. */
+    /* If no chunks are specified then the nursery is permanently disabled. */
     if (maxNurseryChunks_ == 0)
         return true;
 
     if (!mallocedBuffers.init())
         return false;
 
     if (!cellsWithUid_.init())
         return false;
 
-    freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
+    freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(zoneGroup()->runtime->defaultFreeOp());
     if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
         return false;
 
     AutoMaybeStartBackgroundAllocation maybeBgAlloc;
     updateNumChunksLocked(1, maybeBgAlloc, lock);
     if (numChunks() == 0)
         return false;
 
@@ -174,71 +174,70 @@ js::Nursery::init(uint32_t maxNurseryByt
         if (0 == strcmp(env, "help")) {
             fprintf(stderr, "JS_GC_REPORT_TENURING=N\n"
                     "\tAfter a minor GC, report any ObjectGroups with at least N instances tenured.\n");
             exit(0);
         }
         reportTenurings_ = atoi(env);
     }
 
-    if (!runtime()->gc.storeBuffer.enable())
+    if (!zoneGroup()->storeBuffer().enable())
         return false;
 
     MOZ_ASSERT(isEnabled());
     return true;
 }
 
 js::Nursery::~Nursery()
 {
     disable();
     js_delete(freeMallocedBuffersTask);
 }
 
 void
 js::Nursery::enable()
 {
     MOZ_ASSERT(isEmpty());
-    MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
-    if (isEnabled())
+    MOZ_ASSERT(!zoneGroup()->runtime->gc.isVerifyPreBarriersEnabled());
+    if (isEnabled() || !maxChunks())
         return;
 
     updateNumChunks(1);
     if (numChunks() == 0)
         return;
 
     setCurrentChunk(0);
     setStartPosition();
 #ifdef JS_GC_ZEAL
-    if (runtime()->hasZealMode(ZealMode::GenerationalGC))
+    if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC))
         enterZealMode();
 #endif
 
-    MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
+    MOZ_ALWAYS_TRUE(zoneGroup()->storeBuffer().enable());
     return;
 }
 
 void
 js::Nursery::disable()
 {
     MOZ_ASSERT(isEmpty());
     if (!isEnabled())
         return;
     updateNumChunks(0);
     currentEnd_ = 0;
-    runtime()->gc.storeBuffer.disable();
+    zoneGroup()->storeBuffer().disable();
 }
 
 bool
 js::Nursery::isEmpty() const
 {
-    MOZ_ASSERT(runtime_);
     if (!isEnabled())
         return true;
 
-    if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
+    if (!zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC)) {
         MOZ_ASSERT(currentStartChunk_ == 0);
         MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
     }
     return position() == currentStartPosition_;
 }
 
 #ifdef JS_GC_ZEAL
 void
@@ -292,40 +291,40 @@ js::Nursery::allocateObject(JSContext* c
     TraceNurseryAlloc(obj, size);
     return obj;
 }
 
 void*
 js::Nursery::allocate(size_t size)
 {
     MOZ_ASSERT(isEnabled());
-    MOZ_ASSERT(!runtime()->isHeapBusy());
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
     MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
     MOZ_ASSERT(position() % gc::CellSize == 0);
     MOZ_ASSERT(size % gc::CellSize == 0);
 
 #ifdef JS_GC_ZEAL
     static const size_t CanarySize = (sizeof(Nursery::Canary) + CellSize - 1) & ~CellMask;
-    if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
+    if (zoneGroup()->runtime->gc.hasZealMode(ZealMode::CheckNursery))
         size += CanarySize;
 #endif
 
     if (currentEnd() < position() + size) {
         if (currentChunk_ + 1 == numChunks())
             return nullptr;
         setCurrentChunk(currentChunk_ + 1);
     }
 
     void* thing = (void*)position();
     position_ = position() + size;
 
     JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
 
 #ifdef JS_GC_ZEAL
-    if (runtime()->gc.hasZealMode(ZealMode::CheckNursery)) {
+    if (zoneGroup()->runtime->gc.hasZealMode(ZealMode::CheckNursery)) {
         auto canary = reinterpret_cast<Canary*>(position() - CanarySize);
         canary->magicValue = CanaryMagicValue;
         canary->next = nullptr;
         if (lastCanary_) {
             MOZ_ASSERT(!lastCanary_->next);
             lastCanary_->next = canary;
         }
         lastCanary_ = canary;
@@ -539,69 +538,70 @@ js::Nursery::maybeStartProfile(ProfileKe
 inline void
 js::Nursery::maybeEndProfile(ProfileKey key)
 {
     if (enableProfiling_)
         endProfile(key);
 }
 
 void
-js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
+js::Nursery::collect(JS::gcreason::Reason reason)
 {
-    MOZ_ASSERT(!rt->mainThread.suppressGC);
-    MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
+    MOZ_ASSERT(!TlsContext.get()->suppressGC);
+    MOZ_RELEASE_ASSERT(TlsContext.get() == zoneGroup()->context);
 
     if (!isEnabled() || isEmpty()) {
         // Our barriers are not always exact, and there may be entries in the
         // storebuffer even when the nursery is disabled or empty. It's not safe
         // to keep these entries as they may refer to tenured cells which may be
         // freed after this point.
-        rt->gc.storeBuffer.clear();
+        zoneGroup()->storeBuffer().clear();
     }
 
     if (!isEnabled())
         return;
 
+    JSRuntime* rt = zoneGroup()->runtime;
     rt->gc.incMinorGcNumber();
 
 #ifdef JS_GC_ZEAL
     if (rt->gc.hasZealMode(ZealMode::CheckNursery)) {
         for (auto canary = lastCanary_; canary; canary = canary->next)
             MOZ_ASSERT(canary->magicValue == CanaryMagicValue);
     }
     lastCanary_ = nullptr;
 #endif
 
-    rt->gc.stats.beginNurseryCollection(reason);
+    rt->gc.stats().beginNurseryCollection(reason);
     TraceMinorGCStart();
 
     startProfile(ProfileKey::Total);
 
     // The hazard analysis thinks doCollection can invalidate pointers in
     // tenureCounts below.
     JS::AutoSuppressGCAnalysis nogc;
 
     TenureCountCache tenureCounts;
     double promotionRate = 0;
     if (!isEmpty())
-        promotionRate = doCollection(rt, reason, tenureCounts);
+        promotionRate = doCollection(reason, tenureCounts);
 
     // Resize the nursery.
     maybeStartProfile(ProfileKey::Resize);
     maybeResizeNursery(reason, promotionRate);
     maybeEndProfile(ProfileKey::Resize);
 
     // If we are promoting the nursery, or exhausted the store buffer with
     // pointers to nursery things, which will force a collection well before
     // the nursery is full, look for object groups that are getting promoted
     // excessively and try to pretenure them.
     maybeStartProfile(ProfileKey::Pretenure);
     uint32_t pretenureCount = 0;
     if (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER) {
-        JSContext* cx = rt->contextFromMainThread();
+        JSContext* cx = TlsContext.get();
         for (auto& entry : tenureCounts.entries) {
             if (entry.count >= 3000) {
                 ObjectGroup* group = entry.group;
                 if (group->canPreTenure()) {
                     AutoCompartment ac(cx, group->compartment());
                     group->setShouldPreTenure(cx);
                     pretenureCount++;
                 }
@@ -622,21 +622,21 @@ js::Nursery::collect(JSRuntime* rt, JS::
     TimeDuration totalTime = profileDurations_[ProfileKey::Total];
     rt->addTelemetry(JS_TELEMETRY_GC_MINOR_US, totalTime.ToMicroseconds());
     rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON, reason);
     if (totalTime.ToMilliseconds() > 1.0)
         rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON_LONG, reason);
     rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_BYTES, sizeOfHeapCommitted());
     rt->addTelemetry(JS_TELEMETRY_GC_PRETENURE_COUNT, pretenureCount);
 
-    rt->gc.stats.endNurseryCollection(reason);
+    rt->gc.stats().endNurseryCollection(reason);
     TraceMinorGCEnd();
 
     if (enableProfiling_ && totalTime >= profileThreshold_) {
-        rt->gc.stats.maybePrintProfileHeaders();
+        rt->gc.stats().maybePrintProfileHeaders();
 
         fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
                 JS::gcreason::ExplainReason(reason),
                 promotionRate * 100,
                 numChunks());
         printProfileDurations(profileDurations_);
 
         if (reportTenurings_) {
@@ -646,32 +646,33 @@ js::Nursery::collect(JSRuntime* rt, JS::
                     entry.group->print();
                 }
             }
         }
     }
 }
 
 double
-js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
+js::Nursery::doCollection(JS::gcreason::Reason reason,
                           TenureCountCache& tenureCounts)
 {
+    JSRuntime* rt = zoneGroup()->runtime;
     AutoTraceSession session(rt, JS::HeapState::MinorCollecting);
     AutoSetThreadIsPerformingGC performingGC;
     AutoStopVerifyingBarriers av(rt, false);
-    AutoDisableProxyCheck disableStrictProxyChecking(rt);
+    AutoDisableProxyCheck disableStrictProxyChecking;
     mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
 
     size_t initialNurserySize = spaceToEnd();
 
     // Move objects pointed to by roots from the nursery to the major heap.
     TenuringTracer mover(rt, this);
 
     // Mark the store buffer. This must happen first.
-    StoreBuffer& sb = rt->gc.storeBuffer;
+    StoreBuffer& sb = zoneGroup()->storeBuffer();
 
     // The MIR graph only contains nursery pointers if cancelIonCompilations()
     // is set on the store buffer, in which case we cancel all compilations.
     maybeStartProfile(ProfileKey::CancelIonCompilations);
     if (sb.cancelIonCompilations())
         js::CancelOffThreadIonCompile(rt);
     maybeEndProfile(ProfileKey::CancelIonCompilations);
 
@@ -696,23 +697,23 @@ js::Nursery::doCollection(JSRuntime* rt,
     maybeEndProfile(ProfileKey::TraceGenericEntries);
 
     maybeStartProfile(ProfileKey::MarkRuntime);
     rt->gc.traceRuntimeForMinorGC(&mover, session.lock);
     maybeEndProfile(ProfileKey::MarkRuntime);
 
     maybeStartProfile(ProfileKey::MarkDebugger);
     {
-        gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
+        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_MARK_ROOTS);
         Debugger::traceAll(&mover);
     }
     maybeEndProfile(ProfileKey::MarkDebugger);
 
     maybeStartProfile(ProfileKey::ClearNewObjectCache);
-    rt->contextFromMainThread()->caches.newObjectCache.clearNurseryObjects(rt);
+    zoneGroup()->caches().newObjectCache.clearNurseryObjects(zoneGroup());
     maybeEndProfile(ProfileKey::ClearNewObjectCache);
 
     // Most of the work is done here. This loop iterates over objects that have
     // been moved to the major heap. If these objects have any outgoing pointers
     // to the nursery, then those nursery objects get moved as well, until no
     // objects are left to move. That is, we iterate to a fixed point.
     maybeStartProfile(ProfileKey::CollectToFP);
     collectToFixedPoint(mover, tenureCounts);
@@ -739,17 +740,17 @@ js::Nursery::doCollection(JSRuntime* rt,
     freeMallocedBuffers();
     maybeEndProfile(ProfileKey::FreeMallocedBuffers);
 
     maybeStartProfile(ProfileKey::Sweep);
     sweep();
     maybeEndProfile(ProfileKey::Sweep);
 
     maybeStartProfile(ProfileKey::ClearStoreBuffer);
-    rt->gc.storeBuffer.clear();
+    zoneGroup()->storeBuffer().clear();
     maybeEndProfile(ProfileKey::ClearStoreBuffer);
 
     // Make sure hashtables have been updated after the collection.
     maybeStartProfile(ProfileKey::CheckHashTables);
 #ifdef JS_GC_ZEAL
     if (rt->hasZealMode(ZealMode::CheckHashTablesOnMinorGC))
         CheckHashTablesAfterMovingGC(rt);
 #endif
@@ -788,17 +789,17 @@ js::Nursery::freeMallocedBuffers()
     {
         AutoLockHelperThreadState lock;
         freeMallocedBuffersTask->joinWithLockHeld(lock);
         freeMallocedBuffersTask->transferBuffersToFree(mallocedBuffers, lock);
         started = freeMallocedBuffersTask->startWithLockHeld(lock);
     }
 
     if (!started)
-        freeMallocedBuffersTask->runFromMainThread(runtime());
+        freeMallocedBuffersTask->runFromMainThread(zoneGroup()->runtime);
 
     MOZ_ASSERT(mallocedBuffers.empty());
 }
 
 void
 js::Nursery::waitBackgroundFreeEnd()
 {
     // We may finishRoots before nursery init if runtime init fails.
@@ -823,35 +824,35 @@ js::Nursery::sweep()
     cellsWithUid_.clear();
 
     runSweepActions();
     sweepDictionaryModeObjects();
 
 #ifdef JS_GC_ZEAL
     /* Poison the nursery contents so touching a freed object will crash. */
     for (unsigned i = 0; i < numChunks(); i++)
-        chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
+        chunk(i).poisonAndInit(zoneGroup()->runtime, JS_SWEPT_NURSERY_PATTERN);
 
-    if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
+    if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC)) {
         /* Only reset the alloc point when we are close to the end. */
         if (currentChunk_ + 1 == numChunks())
             setCurrentChunk(0);
     } else
 #endif
     {
 #ifdef JS_CRASH_DIAGNOSTICS
         for (unsigned i = 0; i < numChunks(); ++i)
-            chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
+            chunk(i).poisonAndInit(zoneGroup()->runtime, JS_SWEPT_NURSERY_PATTERN);
 #endif
         setCurrentChunk(0);
     }
 
     /* Set current start position for isEmpty checks. */
     setStartPosition();
-    MemProfiler::SweepNursery(runtime());
+    MemProfiler::SweepNursery(zoneGroup()->runtime);
 }
 
 size_t
 js::Nursery::spaceToEnd() const
 {
     unsigned lastChunk = numChunks() - 1;
 
     MOZ_ASSERT(lastChunk >= currentStartChunk_);
@@ -868,17 +869,17 @@ js::Nursery::spaceToEnd() const
 MOZ_ALWAYS_INLINE void
 js::Nursery::setCurrentChunk(unsigned chunkno)
 {
     MOZ_ASSERT(chunkno < maxChunks());
     MOZ_ASSERT(chunkno < numChunks());
     currentChunk_ = chunkno;
     position_ = chunk(chunkno).start();
     currentEnd_ = chunk(chunkno).end();
-    chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
+    chunk(chunkno).poisonAndInit(zoneGroup()->runtime, JS_FRESH_NURSERY_PATTERN);
 }
 
 MOZ_ALWAYS_INLINE void
 js::Nursery::setStartPosition()
 {
     currentStartChunk_ = currentChunk_;
     currentStartPosition_ = position();
 }
@@ -909,74 +910,76 @@ js::Nursery::growAllocableSpace()
 {
     updateNumChunks(Min(numChunks() * 2, maxNurseryChunks_));
 }
 
 void
 js::Nursery::shrinkAllocableSpace()
 {
 #ifdef JS_GC_ZEAL
-    if (runtime()->hasZealMode(ZealMode::GenerationalGC))
+    if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC))
         return;
 #endif
     updateNumChunks(Max(numChunks() - 1, 1u));
 }
 
 void
 js::Nursery::minimizeAllocableSpace()
 {
 #ifdef JS_GC_ZEAL
-    if (runtime()->hasZealMode(ZealMode::GenerationalGC))
+    if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC))
         return;
 #endif
     updateNumChunks(1);
 }
 
 void
 js::Nursery::updateNumChunks(unsigned newCount)
 {
     if (numChunks() != newCount) {
         AutoMaybeStartBackgroundAllocation maybeBgAlloc;
-        AutoLockGC lock(runtime());
+        AutoLockGC lock(zoneGroup()->runtime);
         updateNumChunksLocked(newCount, maybeBgAlloc, lock);
     }
 }
 
 void
 js::Nursery::updateNumChunksLocked(unsigned newCount,
                                    AutoMaybeStartBackgroundAllocation& maybeBgAlloc,
                                    AutoLockGC& lock)
 {
     // The GC nursery is an optimization and so if we fail to allocate nursery
     // chunks we do not report an error.
 
+    MOZ_ASSERT(newCount <= maxChunks());
+
     unsigned priorCount = numChunks();
     MOZ_ASSERT(priorCount != newCount);
 
     if (newCount < priorCount) {
         // Shrink the nursery and free unused chunks.
         for (unsigned i = newCount; i < priorCount; i++)
-            runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
+            zoneGroup()->runtime->gc.recycleChunk(chunk(i).toChunk(zoneGroup()->runtime), lock);
         chunks_.shrinkTo(newCount);
         return;
     }
 
     // Grow the nursery and allocate new chunks.
     if (!chunks_.resize(newCount))
         return;
 
     for (unsigned i = priorCount; i < newCount; i++) {
-        auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
+        auto newChunk = zoneGroup()->runtime->gc.getOrAllocChunk(lock, maybeBgAlloc);
         if (!newChunk) {
             chunks_.shrinkTo(i);
             return;
         }
 
         chunks_[i] = NurseryChunk::fromChunk(newChunk);
-        chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
+        chunk(i).poisonAndInit(zoneGroup()->runtime, JS_FRESH_NURSERY_PATTERN);
     }
 }
 
 void
 js::Nursery::queueSweepAction(SweepThunk thunk, void* data)
 {
     static_assert(sizeof(SweepAction) % CellSize == 0,
                   "SweepAction size must be a multiple of cell size");
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -53,16 +53,17 @@ struct Zone;
 } // namespace JS
 
 namespace js {
 
 class ObjectElements;
 class NativeObject;
 class Nursery;
 class HeapSlot;
+class ZoneGroup;
 
 void SetGCZeal(JSRuntime*, uint8_t, uint32_t);
 
 namespace gc {
 class AutoMaybeStartBackgroundAllocation;
 struct Cell;
 class MinorCollectionTracer;
 class RelocationOverlay;
@@ -129,17 +130,17 @@ CanNurseryAllocateFinalizedClass(const j
 }
 
 class Nursery
 {
   public:
     static const size_t Alignment = gc::ChunkSize;
     static const size_t ChunkShift = gc::ChunkShift;
 
-    explicit Nursery(JSRuntime* rt);
+    explicit Nursery(ZoneGroup* group);
     ~Nursery();
 
     MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGC& lock);
 
     unsigned maxChunks() const { return maxNurseryChunks_; }
     unsigned numChunks() const { return chunks_.length(); }
 
     bool exists() const { return maxChunks() != 0; }
@@ -190,24 +191,24 @@ class Nursery
 
     /* Free an object buffer. */
     void freeBuffer(void* buffer);
 
     /* The maximum number of bytes allowed to reside in nursery buffers. */
     static const size_t MaxNurseryBufferSize = 1024;
 
     /* Do a minor collection. */
-    void collect(JSRuntime* rt, JS::gcreason::Reason reason);
+    void collect(JS::gcreason::Reason reason);
 
     /*
      * Check if the thing at |*ref| in the Nursery has been forwarded. If so,
      * sets |*ref| to the new location of the object and returns true. Otherwise
      * returns false and leaves |*ref| unset.
      */
-    MOZ_ALWAYS_INLINE MOZ_MUST_USE bool getForwardedPointer(JSObject** ref) const;
+    MOZ_ALWAYS_INLINE MOZ_MUST_USE static bool getForwardedPointer(JSObject** ref);
 
     /* Forward a slots/elements pointer stored in an Ion frame. */
     void forwardBufferPointer(HeapSlot** pSlotsElems);
 
     void maybeSetForwardingPointer(JSTracer* trc, void* oldData, void* newData, bool direct) {
         if (trc->isTenuringTracer() && isInside(oldData))
             setForwardingPointer(oldData, newData, direct);
     }
@@ -259,16 +260,19 @@ class Nursery
 #endif
 
     /* Print header line for profile times. */
     void printProfileHeader();
 
     /* Print total profile times on shutdown. */
     void printTotalProfileTimes();
 
+    void* addressOfCurrentEnd() const { return (void*)&currentEnd_; }
+    void* addressOfPosition() const { return (void*)&position_; }
+
   private:
     /* The amount of space in the mapped nursery available to allocations. */
     static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
 
     struct NurseryChunk {
         char data[NurseryChunkUsableSize];
         gc::ChunkTrailer trailer;
         static NurseryChunk* fromChunk(gc::Chunk* chunk);
@@ -276,22 +280,18 @@ class Nursery
         void poisonAndInit(JSRuntime* rt, uint8_t poison);
         uintptr_t start() const { return uintptr_t(&data); }
         uintptr_t end() const { return uintptr_t(&trailer); }
         gc::Chunk* toChunk(JSRuntime* rt);
     };
     static_assert(sizeof(NurseryChunk) == gc::ChunkSize,
                   "Nursery chunk size must match gc::Chunk size.");
 
-    /*
-     * The start and end pointers are stored under the runtime so that we can
-     * inline the isInsideNursery check into embedder code. Use the start()
-     * and heapEnd() functions to access these values.
-     */
-    JSRuntime* runtime_;
+    // The set of zones which this is the nursery for.
+    ZoneGroup* zoneGroup_;
 
     /* Vector of allocated chunks to allocate from. */
     Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
 
     /* Pointer to the first unallocated byte in the nursery. */
     uintptr_t position_;
 
     /* Pointer to the logical start of the Nursery. */
@@ -401,37 +401,31 @@ class Nursery
                                AutoLockGC& lock);
 
     MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
         MOZ_ASSERT(numChunks() > 0);
         return chunks_.back()->end();
     }
 
     MOZ_ALWAYS_INLINE uintptr_t currentEnd() const {
-        MOZ_ASSERT(runtime_);
         MOZ_ASSERT(currentEnd_ == chunk(currentChunk_).end());
         return currentEnd_;
     }
-    void* addressOfCurrentEnd() const {
-        MOZ_ASSERT(runtime_);
-        return (void*)&currentEnd_;
-    }
 
     uintptr_t position() const { return position_; }
-    void* addressOfPosition() const { return (void*)&position_; }
 
-    JSRuntime* runtime() const { return runtime_; }
+    ZoneGroup* zoneGroup() const { return zoneGroup_; }
 
     /* Allocates a new GC thing from the tenured generation during minor GC. */
     gc::TenuredCell* allocateFromTenured(JS::Zone* zone, gc::AllocKind thingKind);
 
     /* Common internal allocator function. */
     void* allocate(size_t size);
 
-    double doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
+    double doCollection(JS::gcreason::Reason reason,
                         gc::TenureCountCache& tenureCounts);
 
     /*
      * Move the object at |src| in the Nursery to an already-allocated cell
      * |dst| in Tenured.
      */
     void collectToFixedPoint(TenuringTracer& trc, gc::TenureCountCache& tenureCounts);
 
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -61,90 +61,88 @@ TraceExactStackRootList(JSTracer* trc, J
     while (rooter) {
         T* addr = reinterpret_cast<JS::Rooted<T>*>(rooter)->address();
         TraceFn(trc, addr, name);
         rooter = rooter->previous();
     }
 }
 
 static inline void
-TraceStackRoots(JSTracer* trc, RootedListHeads& stackRoots)
+TraceStackRoots(JSTracer* trc, JS::RootedListHeads& stackRoots)
 {
 #define TRACE_ROOTS(name, type, _) \
     TraceExactStackRootList<type*>(trc, stackRoots[JS::RootKind::name], "exact-" #name);
 JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
 #undef TRACE_ROOTS
     TraceExactStackRootList<jsid>(trc, stackRoots[JS::RootKind::Id], "exact-id");
     TraceExactStackRootList<Value>(trc, stackRoots[JS::RootKind::Value], "exact-value");
     TraceExactStackRootList<ConcreteTraceable,
                            js::DispatchWrapper<ConcreteTraceable>::TraceWrapped>(
         trc, stackRoots[JS::RootKind::Traceable], "Traceable");
 }
 
 void
-js::RootLists::traceStackRoots(JSTracer* trc)
+JS::RootingContext::traceStackRoots(JSTracer* trc)
 {
     TraceStackRoots(trc, stackRoots_);
 }
 
 static void
 TraceExactStackRoots(JSRuntime* rt, JSTracer* trc)
 {
-    for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next())
-        TraceStackRoots(trc, zone->stackRoots_);
-    rt->contextFromMainThread()->roots.traceStackRoots(trc);
+    TlsContext.get()->traceStackRoots(trc);
 }
 
 template <typename T, TraceFunction<T> TraceFn = TraceNullableRoot>
 static inline void
 TracePersistentRootedList(JSTracer* trc, mozilla::LinkedList<PersistentRooted<void*>>& list,
                          const char* name)
 {
     for (PersistentRooted<void*>* r : list)
         TraceFn(trc, reinterpret_cast<PersistentRooted<T>*>(r)->address(), name);
 }
 
 void
-js::RootLists::tracePersistentRoots(JSTracer* trc)
+JSRuntime::tracePersistentRoots(JSTracer* trc)
 {
 #define TRACE_ROOTS(name, type, _) \
-    TracePersistentRootedList<type*>(trc, heapRoots_[JS::RootKind::name], "persistent-" #name);
+    TracePersistentRootedList<type*>(trc, heapRoots.ref()[JS::RootKind::name], "persistent-" #name);
 JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
 #undef TRACE_ROOTS
-    TracePersistentRootedList<jsid>(trc, heapRoots_[JS::RootKind::Id], "persistent-id");
-    TracePersistentRootedList<Value>(trc, heapRoots_[JS::RootKind::Value], "persistent-value");
+    TracePersistentRootedList<jsid>(trc, heapRoots.ref()[JS::RootKind::Id], "persistent-id");
+    TracePersistentRootedList<Value>(trc, heapRoots.ref()[JS::RootKind::Value], "persistent-value");
     TracePersistentRootedList<ConcreteTraceable,
                              js::DispatchWrapper<ConcreteTraceable>::TraceWrapped>(trc,
-            heapRoots_[JS::RootKind::Traceable], "persistent-traceable");
+            heapRoots.ref()[JS::RootKind::Traceable], "persistent-traceable");
 }
 
 static void
 TracePersistentRooted(JSRuntime* rt, JSTracer* trc)
 {
-    rt->contextFromMainThread()->roots.tracePersistentRoots(trc);
+    rt->tracePersistentRoots(trc);
 }
 
 template <typename T>
 static void
 FinishPersistentRootedChain(mozilla::LinkedList<PersistentRooted<void*>>& listArg)
 {
     auto& list = reinterpret_cast<mozilla::LinkedList<PersistentRooted<T>>&>(listArg);
     while (!list.isEmpty())
         list.getFirst()->reset();
 }
 
 void
-js::RootLists::finishPersistentRoots()
+JSRuntime::finishPersistentRoots()
 {
-#define FINISH_ROOT_LIST(name, type, _) \
-    FinishPersistentRootedChain<type*>(heapRoots_[JS::RootKind::name]);
+#define FINISH_ROOT_LIST(name, type, _)                                 \
+    FinishPersistentRootedChain<type*>(heapRoots.ref()[JS::RootKind::name]);
 JS_FOR_EACH_TRACEKIND(FINISH_ROOT_LIST)
 #undef FINISH_ROOT_LIST
-    FinishPersistentRootedChain<jsid>(heapRoots_[JS::RootKind::Id]);
-    FinishPersistentRootedChain<Value>(heapRoots_[JS::RootKind::Value]);
+    FinishPersistentRootedChain<jsid>(heapRoots.ref()[JS::RootKind::Id]);
+    FinishPersistentRootedChain<Value>(heapRoots.ref()[JS::RootKind::Value]);
 
     // Note that we do not finalize the Traceable list as we do not know how to
     // safely clear memebers. We instead assert that none escape the RootLists.
     // See the comment on RootLists::~RootLists for details.
 }
 
 inline void
 AutoGCRooter::trace(JSTracer* trc)
@@ -200,26 +198,26 @@ AutoGCRooter::trace(JSTracer* trc)
     MOZ_ASSERT(tag_ >= 0);
     if (Value* vp = static_cast<AutoArrayRooter*>(this)->array)
         TraceRootRange(trc, tag_, vp, "JS::AutoArrayRooter.array");
 }
 
 /* static */ void
 AutoGCRooter::traceAll(JSTracer* trc)
 {
-    for (AutoGCRooter* gcr = trc->runtime()->contextFromMainThread()->roots.autoGCRooters_; gcr; gcr = gcr->down)
+    for (AutoGCRooter* gcr = TlsContext.get()->autoGCRooters_; gcr; gcr = gcr->down)
         gcr->trace(trc);
 }
 
 /* static */ void
 AutoGCRooter::traceAllWrappers(JSTracer* trc)
 {
-    JSContext* cx = trc->runtime()->contextFromMainThread();
+    JSContext* cx = TlsContext.get();
 
-    for (AutoGCRooter* gcr = cx->roots.autoGCRooters_; gcr; gcr = gcr->down) {
+    for (AutoGCRooter* gcr = cx->autoGCRooters_; gcr; gcr = gcr->down) {
         if (gcr->tag_ == WRAPVECTOR || gcr->tag_ == WRAPPER)
             gcr->trace(trc);
     }
 }
 
 void
 StackShape::trace(JSTracer* trc)
 {
@@ -256,90 +254,90 @@ PropertyDescriptor::trace(JSTracer* trc)
 void
 js::gc::GCRuntime::traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     // FinishRoots will have asserted that every root that we do not expect
     // is gone, so we can simply skip traceRuntime here.
     if (rt->isBeingDestroyed())
         return;
 
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
     if (rt->atomsCompartment(lock)->zone()->isCollecting())
         traceRuntimeAtoms(trc, lock);
     JSCompartment::traceIncomingCrossCompartmentEdgesForZoneGC(trc);
     traceRuntimeCommon(trc, MarkRuntime, lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     // Note that we *must* trace the runtime during the SHUTDOWN_GC's minor GC
     // despite having called FinishRoots already. This is because FinishRoots
     // does not clear the crossCompartmentWrapper map. It cannot do this
     // because Proxy's trace for CrossCompartmentWrappers asserts presence in
     // the map. And we can reach its trace function despite having finished the
     // roots via the edges stored by the pre-barrier verifier when we finish
     // the verifier for the last time.
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
 
     // FIXME: As per bug 1298816 comment 12, we should be able to remove this.
     jit::JitRuntime::TraceJitcodeGlobalTable(trc);
 
     traceRuntimeCommon(trc, TraceRuntime, lock);
 }
 
 void
 js::TraceRuntime(JSTracer* trc)
 {
     MOZ_ASSERT(!trc->isMarkingTracer());
 
     JSRuntime* rt = trc->runtime();
-    rt->gc.evictNursery();
-    AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
-    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
+    rt->zoneGroupFromMainThread()->evictNursery();
+    AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
+    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
     rt->gc.traceRuntime(trc, prep.session().lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(!rt->isBeingDestroyed());
 
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
     traceRuntimeAtoms(trc, lock);
     traceRuntimeCommon(trc, TraceRuntime, lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_RUNTIME_DATA);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_RUNTIME_DATA);
     TracePermanentAtoms(trc);
     TraceAtoms(trc, lock);
     TraceWellKnownSymbols(trc);
     jit::JitRuntime::Trace(trc, lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
                                       AutoLockForExclusiveAccess& lock)
 {
-    MOZ_ASSERT(!rt->mainThread.suppressGC);
+    MOZ_ASSERT(!TlsContext.get()->suppressGC);
 
     {
-        gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_STACK);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_STACK);
 
         // Trace active interpreter and JIT stack roots.
         TraceInterpreterActivations(rt, trc);
         jit::TraceJitActivations(rt, trc);
 
         // Trace legacy C stack roots.
         AutoGCRooter::traceAll(trc);
 
-        for (RootRange r = rootsHash.all(); !r.empty(); r.popFront()) {
+        for (RootRange r = rootsHash.ref().all(); !r.empty(); r.popFront()) {
             const RootEntry& entry = r.front();
             TraceRoot(trc, entry.key(), entry.value());
         }
 
         // Trace C stack roots.
         TraceExactStackRoots(rt, trc);
     }
 
@@ -347,44 +345,44 @@ js::gc::GCRuntime::traceRuntimeCommon(JS
     TracePersistentRooted(rt, trc);
 
     // Trace the self-hosting global compartment.
     rt->traceSelfHostingGlobal(trc);
 
     // Trace the shared Intl data.
     rt->traceSharedIntlData(trc);
 
-    // Trace anything in the single context. Note that this is actually the
-    // same struct as the JSRuntime, but is still split for historical reasons.
-    rt->contextFromMainThread()->trace(trc);
+    // Trace anything in the current thread's context. Ignore other JSContexts,
+    // as these will only refer to ZoneGroups which we are not collecting/tracing.
+    TlsContext.get()->trace(trc);
 
     // Trace all compartment roots, but not the compartment itself; it is
     // traced via the parent pointer if traceRoots actually traces anything.
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
         c->traceRoots(trc, traceOrMark);
 
     // Trace the Gecko Profiler.
-    rt->geckoProfiler.trace(trc);
+    rt->geckoProfiler().trace(trc);
 
     // Trace helper thread roots.
     HelperThreadState().trace(trc);
 
     // Trace the embedding's black and gray roots.
-    if (!rt->isHeapMinorCollecting()) {
-        gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_EMBEDDING);
+    if (!JS::CurrentThreadIsHeapMinorCollecting()) {
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_EMBEDDING);
 
         /*
          * The embedding can register additional roots here.
          *
          * We don't need to trace these in a minor GC because all pointers into
          * the nursery should be in the store buffer, and we want to avoid the
          * time taken to trace all these roots.
          */
-        for (size_t i = 0; i < blackRootTracers.length(); i++) {
-            const Callback<JSTraceDataOp>& e = blackRootTracers[i];
+        for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
+            const Callback<JSTraceDataOp>& e = blackRootTracers.ref()[i];
             (*e.op)(trc, e.data);
         }
 
         /* During GC, we don't trace gray roots at this stage. */
         if (JSTraceDataOp op = grayRootTracer.op) {
             if (traceOrMark == TraceRuntime)
                 (*op)(trc, grayRootTracer.data);
         }
@@ -403,37 +401,39 @@ class AssertNoRootsTracer : public JS::C
       : JS::CallbackTracer(rt, weakTraceKind)
     {}
 };
 #endif // DEBUG
 
 void
 js::gc::GCRuntime::finishRoots()
 {
+    AutoNoteSingleThreadedRegion anstr;
+
     rt->finishAtoms();
 
-    if (rootsHash.initialized())
-        rootsHash.clear();
+    if (rootsHash.ref().initialized())
+        rootsHash.ref().clear();
 
-    rt->contextFromMainThread()->roots.finishPersistentRoots();
+    rt->finishPersistentRoots();
 
     rt->finishSelfHosting();
 
     for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
         c->finishRoots();
 
 #ifdef DEBUG
     // The nsWrapperCache may not be empty before our shutdown GC, so we have
     // to skip that table when verifying that we are fully unrooted.
     auto prior = grayRootTracer;
     grayRootTracer = Callback<JSTraceDataOp>(nullptr, nullptr);
 
     AssertNoRootsTracer trc(rt, TraceWeakMapKeysValues);
-    AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
-    gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
+    AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
+    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
     traceRuntime(&trc, prep.session().lock);
 
     // Restore the wrapper tracing so that we leak instead of leaving dangling
     // pointers.
     grayRootTracer = prior;
 #endif // DEBUG
 }
 
@@ -471,19 +471,19 @@ js::IsBufferGrayRootsTracer(JSTracer* tr
 
 void
 js::gc::GCRuntime::bufferGrayRoots()
 {
     // Precondition: the state has been reset to "unused" after the last GC
     //               and the zone's buffers have been cleared.
     MOZ_ASSERT(grayBufferState == GrayBufferState::Unused);
     for (GCZonesIter zone(rt); !zone.done(); zone.next())
-        MOZ_ASSERT(zone->gcGrayRoots.empty());
+        MOZ_ASSERT(zone->gcGrayRoots().empty());
 
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_BUFFER_GRAY_ROOTS);
 
     BufferGrayRootsTracer grayBufferer(rt);
     if (JSTraceDataOp op = grayRootTracer.op)
         (*op)(&grayBufferer, grayRootTracer.data);
 
     // Propagate the failure flag from the marker to the runtime.
     if (grayBufferer.failed()) {
       grayBufferState = GrayBufferState::Failed;
@@ -495,17 +495,17 @@ js::gc::GCRuntime::bufferGrayRoots()
 
 struct SetMaybeAliveFunctor {
     template <typename T> void operator()(T* t) { SetMaybeAliveFlag(t); }
 };
 
 void
 BufferGrayRootsTracer::onChild(const JS::GCCellPtr& thing)
 {
-    MOZ_ASSERT(runtime()->isHeapBusy());
+    MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
     MOZ_RELEASE_ASSERT(thing);
     // Check if |thing| is corrupt by calling a method that touches the heap.
     MOZ_RELEASE_ASSERT(thing.asCell()->getTraceKind() <= JS::TraceKind::Null);
 
     if (bufferingGrayRootsFailed)
         return;
 
     gc::TenuredCell* tenured = gc::TenuredCell::fromPointer(thing.asCell());
@@ -513,32 +513,37 @@ BufferGrayRootsTracer::onChild(const JS:
     Zone* zone = tenured->zone();
     if (zone->isCollecting()) {
         // See the comment on SetMaybeAliveFlag to see why we only do this for
         // objects and scripts. We rely on gray root buffering for this to work,
         // but we only need to worry about uncollected dead compartments during
         // incremental GCs (when we do gray root buffering).
         DispatchTyped(SetMaybeAliveFunctor(), thing);
 
-        if (!zone->gcGrayRoots.append(tenured))
+        if (!zone->gcGrayRoots().append(tenured))
             bufferingGrayRootsFailed = true;
     }
 }
 
 void
 GCRuntime::markBufferedGrayRoots(JS::Zone* zone)
 {
     MOZ_ASSERT(grayBufferState == GrayBufferState::Okay);
     MOZ_ASSERT(zone->isGCMarkingGray() || zone->isGCCompacting());
 
-    for (auto cell : zone->gcGrayRoots)
+    for (auto cell : zone->gcGrayRoots())
         TraceManuallyBarrieredGenericPointerEdge(&marker, &cell, "buffered gray root");
 }
 
 void
 GCRuntime::resetBufferedGrayRoots() const
 {
     MOZ_ASSERT(grayBufferState != GrayBufferState::Okay,
                "Do not clear the gray buffers unless we are Failed or becoming Unused");
     for (GCZonesIter zone(rt); !zone.done(); zone.next())
-        zone->gcGrayRoots.clearAndFree();
+        zone->gcGrayRoots().clearAndFree();
 }
 
+JS_PUBLIC_API(void)
+JS::AddPersistentRoot(JS::RootingContext* cx, RootKind kind, PersistentRooted<void*>* root)
+{
+    static_cast<JSContext*>(cx)->runtime()->heapRoots.ref()[kind].insertBack(root);
+}
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -1095,27 +1095,27 @@ Statistics::endGC()
     aborted = false;
 }
 
 void
 Statistics::beginNurseryCollection(JS::gcreason::Reason reason)
 {
     count(STAT_MINOR_GC);
     if (nurseryCollectionCallback) {
-        (*nurseryCollectionCallback)(runtime->contextFromMainThread(),
+        (*nurseryCollectionCallback)(TlsContext.get(),
                                      JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START,
                                      reason);
     }
 }
 
 void
 Statistics::endNurseryCollection(JS::gcreason::Reason reason)
 {
     if (nurseryCollectionCallback) {
-        (*nurseryCollectionCallback)(runtime->contextFromMainThread(),
+        (*nurseryCollectionCallback)(TlsContext.get(),
                                      JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END,
                                      reason);
     }
 }
 
 void
 Statistics::beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
                        SliceBudget budget, JS::gcreason::Reason reason)
@@ -1137,17 +1137,17 @@ Statistics::beginSlice(const ZoneGCStats
         return;
     }
 
     runtime->addTelemetry(JS_TELEMETRY_GC_REASON, reason);
 
     // Slice callbacks should only fire for the outermost level.
     bool wasFullGC = zoneStats.isCollectingAllZones();
     if (sliceCallback)
-        (*sliceCallback)(runtime->contextFromMainThread(),
+        (*sliceCallback)(TlsContext.get(),
                          first ? JS::GC_CYCLE_BEGIN : JS::GC_SLICE_BEGIN,
                          JS::GCDescription(!wasFullGC, gckind, reason));
 }
 
 void
 Statistics::endSlice()
 {
     if (!aborted) {
@@ -1183,17 +1183,17 @@ Statistics::endSlice()
 
     if (enableProfiling_ && !aborted && slices.back().duration() >= profileThreshold_)
         printSliceProfile();
 
     // Slice callbacks should only fire for the outermost level.
     if (!aborted) {
         bool wasFullGC = zoneStats.isCollectingAllZones();
         if (sliceCallback)
-            (*sliceCallback)(runtime->contextFromMainThread(),
+            (*sliceCallback)(TlsContext.get(),
                              last ? JS::GC_CYCLE_END : JS::GC_SLICE_END,
                              JS::GCDescription(!wasFullGC, gckind, slices.back().reason));
     }
 
     // Do this after the slice callback since it uses these values.
     if (last) {
         for (auto& count : counts)
             count = 0;
@@ -1400,17 +1400,17 @@ Statistics::computeMMU(TimeDuration wind
 }
 
 void
 Statistics::maybePrintProfileHeaders()
 {
     static int printedHeader = 0;
     if ((printedHeader++ % 200) == 0) {
         printProfileHeader();
-        runtime->gc.nursery.printProfileHeader();
+        runtime->zoneGroupFromMainThread()->nursery().printProfileHeader();
     }
 }
 
 void
 Statistics::printProfileHeader()
 {
     if (!enableProfiling_)
         return;
--- a/js/src/gc/StoreBuffer.cpp
+++ b/js/src/gc/StoreBuffer.cpp
@@ -84,17 +84,17 @@ StoreBuffer::clear()
     bufferWholeCell = nullptr;
 }
 
 void
 StoreBuffer::setAboutToOverflow()
 {
     if (!aboutToOverflow_) {
         aboutToOverflow_ = true;
-        runtime_->gc.stats.count(gcstats::STAT_STOREBUFFER_OVERFLOW);
+        runtime_->gc.stats().count(gcstats::STAT_STOREBUFFER_OVERFLOW);
     }
     runtime_->gc.requestMinorGC(JS::gcreason::FULL_STORE_BUFFER);
 }
 
 void
 StoreBuffer::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::GCSizes
 *sizes)
 {
@@ -121,33 +121,32 @@ ArenaCellSet::ArenaCellSet(Arena* arena)
 {
     bits.clear(false);
 }
 
 ArenaCellSet*
 js::gc::AllocateWholeCellSet(Arena* arena)
 {
     Zone* zone = arena->zone;
-    JSRuntime* rt = zone->runtimeFromMainThread();
-    if (!rt->gc.nursery.isEnabled())
+    if (!zone->group()->nursery().isEnabled())
         return nullptr;
 
     AutoEnterOOMUnsafeRegion oomUnsafe;
-    Nursery& nursery = rt->gc.nursery;
+    Nursery& nursery = zone->group()->nursery();
     void* data = nursery.allocateBuffer(zone, sizeof(ArenaCellSet));
     if (!data) {
         oomUnsafe.crash("Failed to allocate WholeCellSet");
         return nullptr;
     }
 
     if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
-        rt->gc.storeBuffer.setAboutToOverflow();
+        zone->group()->storeBuffer().setAboutToOverflow();
 
     auto cells = static_cast<ArenaCellSet*>(data);
     new (cells) ArenaCellSet(arena);
     arena->bufferedCells() = cells;
-    rt->gc.storeBuffer.addToWholeCellBuffer(cells);
+    zone->group()->storeBuffer().addToWholeCellBuffer(cells);
     return cells;
 }
 
 template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>;
 template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::CellPtrEdge>;
 template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>;
--- a/js/src/gc/StoreBuffer.h
+++ b/js/src/gc/StoreBuffer.h
@@ -337,27 +337,27 @@ class StoreBuffer
             typedef SlotsEdge Lookup;
             static HashNumber hash(const Lookup& l) { return l.objectAndKind_ ^ l.start_ ^ l.count_; }
             static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
         } Hasher;
     };
 
     template <typename Buffer, typename Edge>
     void unput(Buffer& buffer, const Edge& edge) {
-        MOZ_ASSERT(!JS::shadow::Runtime::asShadowRuntime(runtime_)->isHeapBusy());
+        MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
         if (!isEnabled())
             return;
         mozilla::ReentrancyGuard g(*this);
         buffer.unput(this, edge);
     }
 
     template <typename Buffer, typename Edge>
     void put(Buffer& buffer, const Edge& edge) {
-        MOZ_ASSERT(!JS::shadow::Runtime::asShadowRuntime(runtime_)->isHeapBusy());
+        MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
         if (!isEnabled())
             return;
         mozilla::ReentrancyGuard g(*this);
         if (edge.maybeInRememberedSet(nursery_))
             buffer.put(this, edge);
     }
 
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -96,18 +96,18 @@ class js::VerifyPreTracer final : public
     /* This graph represents the initial GC "snapshot". */
     VerifyNode* curnode;
     VerifyNode* root;
     char* edgeptr;
     char* term;
     NodeMap nodemap;
 
     explicit VerifyPreTracer(JSRuntime* rt)
-      : JS::CallbackTracer(rt), noggc(rt), number(rt->gc.gcNumber()), count(0), curnode(nullptr),
-        root(nullptr), edgeptr(nullptr), term(nullptr)
+      : JS::CallbackTracer(rt), noggc(TlsContext.get()), number(rt->gc.gcNumber()),
+        count(0), curnode(nullptr), root(nullptr), edgeptr(nullptr), term(nullptr)
     {}
 
     ~VerifyPreTracer() {
         js_free(root);
     }
 };
 
 /*
@@ -174,31 +174,31 @@ NextNode(VerifyNode* node)
 }
 
 void
 gc::GCRuntime::startVerifyPreBarriers()
 {
     if (verifyPreData || isIncrementalGCInProgress())
         return;
 
-    if (IsIncrementalGCUnsafe(rt) != AbortReason::None || rt->keepAtoms())
+    if (IsIncrementalGCUnsafe(rt) != AbortReason::None || TlsContext.get()->keepAtoms || rt->exclusiveThreadsPresent())
         return;
 
     number++;
 
     VerifyPreTracer* trc = js_new<VerifyPreTracer>(rt);
     if (!trc)
         return;
 
-    AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
+    AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
 
     for (auto chunk = allNonEmptyChunks(); !chunk.done(); chunk.next())
         chunk->bitmap.clear();
 
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_TRACE_HEAP);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_TRACE_HEAP);
 
     const size_t size = 64 * 1024 * 1024;
     trc->root = (VerifyNode*)js_malloc(size);
     if (!trc->root)
         goto oom;
     trc->edgeptr = (char*)trc->root;
     trc->term = trc->edgeptr + size;
 
@@ -345,17 +345,21 @@ gc::GCRuntime::endVerifyPreBarriers()
      * been discarded.
      */
     MOZ_ASSERT(trc->number == number);
     number++;
 
     verifyPreData = nullptr;
     incrementalState = State::NotActive;
 
-    if (!compartmentCreated && IsIncrementalGCUnsafe(rt) == AbortReason::None && !rt->keepAtoms()) {
+    if (!compartmentCreated &&
+        IsIncrementalGCUnsafe(rt) == AbortReason::None &&
+        !TlsContext.get()->keepAtoms &&
+        !rt->exclusiveThreadsPresent())
+    {
         CheckEdgeTracer cetrc(rt);
 
         /* Start after the roots. */
         VerifyNode* node = NextNode(trc->root);
         while ((char*)node < trc->edgeptr) {
             cetrc.node = node;
             js::TraceChildren(&cetrc, node->thing, node->kind);
 
@@ -404,17 +408,17 @@ gc::VerifyBarriers(JSRuntime* rt, Verifi
 }
 
 void
 gc::GCRuntime::maybeVerifyPreBarriers(bool always)
 {
     if (!hasZealMode(ZealMode::VerifierPre))
         return;
 
-    if (rt->mainThread.suppressGC)
+    if (TlsContext.get()->suppressGC)
         return;
 
     if (verifyPreData) {
         if (++verifyPreData->count < zealFrequency && !always)
             return;
 
         endVerifyPreBarriers();
     }
@@ -428,17 +432,17 @@ js::gc::MaybeVerifyBarriers(JSContext* c
     GCRuntime* gc = &cx->runtime()->gc;
     gc->maybeVerifyPreBarriers(always);
 }
 
 void
 js::gc::GCRuntime::finishVerifier()
 {
     if (verifyPreData) {
-        js_delete(verifyPreData);
+        js_delete(verifyPreData.ref());
         verifyPreData = nullptr;
     }
 }
 
 #endif /* JS_GC_ZEAL */
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -18,76 +18,88 @@
 #include "jscompartmentinlines.h"
 #include "jsgcinlines.h"
 
 using namespace js;
 using namespace js::gc;
 
 Zone * const Zone::NotOnList = reinterpret_cast<Zone*>(1);
 
-JS::Zone::Zone(JSRuntime* rt)
+JS::Zone::Zone(JSRuntime* rt, ZoneGroup* group)
   : JS::shadow::Zone(rt, &rt->gc.marker),
-    debuggers(nullptr),
-    suppressAllocationMetadataBuilder(false),
-    arenas(rt),
+    group_(group),
+    debuggers(group, nullptr),
+    uniqueIds_(group),
+    suppressAllocationMetadataBuilder(group, false),
+    arenas(rt, group),
     types(this),
-    compartments(),
-    gcGrayRoots(),
-    gcWeakKeys(SystemAllocPolicy(), rt->randomHashCodeScrambler()),
-    typeDescrObjects(this, SystemAllocPolicy()),
+    gcWeakMapList_(group),
+    compartments_(),
+    gcGrayRoots_(group),
+    gcWeakRefs_(group),
+    weakCaches_(group),
+    gcWeakKeys_(group, SystemAllocPolicy(), rt->randomHashCodeScrambler()),
+    gcZoneGroupEdges_(group),
+    typeDescrObjects_(group, this, SystemAllocPolicy()),
     gcMallocBytes(0),
+    gcMaxMallocBytes(0),
     gcMallocGCTriggered(false),
+    markedAtoms_(group),
     usage(&rt->gc.usage),
+    threshold(),
     gcDelayBytes(0),
-    propertyTree(this),
-    baseShapes(this, BaseShapeSet()),
-    initialShapes(this, InitialShapeSet()),
-    data(nullptr),
-    isSystem(false),
+    propertyTree_(group, this),
+    baseShapes_(group, this, BaseShapeSet()),
+    initialShapes_(group, this, InitialShapeSet()),
+    data(group, nullptr),
+    isSystem(group, false),
     usedByExclusiveThread(false),
-    jitZone_(nullptr),
+#ifdef DEBUG
+    gcLastZoneGroupIndex(group, 0),
+#endif
+    jitZone_(group, nullptr),
     gcState_(NoGC),
     gcScheduled_(false),
-    gcPreserveCode_(false),
-    jitUsingBarriers_(false),
-    keepShapeTables_(false),
-    listNext_(NotOnList)
+    gcPreserveCode_(group, false),
+    jitUsingBarriers_(group, false),
+    keepShapeTables_(group, false),
+    listNext_(group, NotOnList)
 {
     /* Ensure that there are no vtables to mess us up here. */
     MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
                static_cast<JS::shadow::Zone*>(this));
 
     AutoLockGC lock(rt);
     threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables, rt->gc.schedulingState, lock);
     setGCMaxMallocBytes(rt->gc.maxMallocBytesAllocated() * 0.9);
 }
 
 Zone::~Zone()
 {
-    JSRuntime* rt = runtimeFromMainThread();
+    JSRuntime* rt = runtimeFromAnyThread();
     if (this == rt->gc.systemZone)
         rt->gc.systemZone = nullptr;
 
-    js_delete(debuggers);
-    js_delete(jitZone_);
+    js_delete(debuggers.ref());
+    js_delete(jitZone_.ref());
 
 #ifdef DEBUG
     // Avoid assertion destroying the weak map list if the embedding leaked GC things.
     if (!rt->gc.shutdownCollectedEverything())
-        gcWeakMapList.clear();
+        gcWeakMapList().clear();
 #endif
 }
 
 bool Zone::init(bool isSystemArg)
 {
     isSystem = isSystemArg;
-    return uniqueIds_.init() &&
-           gcZoneGroupEdges.init() &&
-           gcWeakKeys.init() &&
-           typeDescrObjects.init();
+    return uniqueIds().init() &&
+           gcZoneGroupEdges().init() &&
+           gcWeakKeys().init() &&
+           typeDescrObjects().init();
 }
 
 void
 Zone::setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit)
 {
     if (updateJit == UpdateJit && needs != jitUsingBarriers_) {
         jit::ToggleBarriers(this, needs);
         jitUsingBarriers_ = needs;
@@ -142,17 +154,17 @@ Zone::getOrCreateDebuggers(JSContext* cx
     if (!debuggers)
         ReportOutOfMemory(cx);
     return debuggers;
 }
 
 void
 Zone::sweepBreakpoints(FreeOp* fop)
 {
-    if (fop->runtime()->debuggerList.isEmpty())
+    if (!group() || group()->debuggerList().isEmpty())
         return;
 
     /*
      * Sweep all compartments in a zone at the same time, since there is no way
      * to iterate over the scripts belonging to a single compartment in a zone.
      */
 
     MOZ_ASSERT(isGCSweepingOrCompacting());
@@ -268,17 +280,17 @@ Zone::discardJitCode(FreeOp* fop, bool d
         jitZone()->cfgSpace()->lifoAlloc().freeAll();
     }
 }
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 void
 JS::Zone::checkUniqueIdTableAfterMovingGC()
 {
-    for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront())
+    for (UniqueIdMap::Enum e(uniqueIds()); !e.empty(); e.popFront())
         js::gc::CheckGCThingAfterMovingGC(e.front().key());
 }
 #endif
 
 uint64_t
 Zone::gcNumber()
 {
     // Zones in use by exclusive threads are not collected, and threads using
@@ -363,36 +375,36 @@ Zone::nextZone() const
 {
     MOZ_ASSERT(isOnList());
     return listNext_;
 }
 
 void
 Zone::clearTables()
 {
-    if (baseShapes.initialized())
-        baseShapes.clear();
-    if (initialShapes.initialized())
-        initialShapes.clear();
+    if (baseShapes().initialized())
+        baseShapes().clear();
+    if (initialShapes().initialized())
+        initialShapes().clear();
 }
 
 void
 Zone::fixupAfterMovingGC()
 {
     fixupInitialShapeTable();
 }
 
 bool
 Zone::addTypeDescrObject(JSContext* cx, HandleObject obj)
 {
     // Type descriptor objects are always tenured so we don't need post barriers
     // on the set.
     MOZ_ASSERT(!IsInsideNursery(obj));
 
-    if (!typeDescrObjects.put(obj)) {
+    if (!typeDescrObjects().put(obj)) {
         ReportOutOfMemory(cx);
         return false;
     }
 
     return true;
 }
 
 ZoneList::ZoneList()
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -27,17 +27,17 @@ class JitZone;
 } // namespace jit
 
 namespace gc {
 
 // This class encapsulates the data that determines when we need to do a zone GC.
 class ZoneHeapThreshold
 {
     // The "growth factor" for computing our next thresholds after a GC.
-    double gcHeapGrowthFactor_;
+    GCLockData<double> gcHeapGrowthFactor_;
 
     // GC trigger threshold for allocations on the GC heap.
     mozilla::Atomic<size_t, mozilla::Relaxed> gcTriggerBytes_;
 
   public:
     ZoneHeapThreshold()
       : gcHeapGrowthFactor_(3.0),
         gcTriggerBytes_(0)
@@ -134,20 +134,32 @@ namespace JS {
 // shapes within it are alive.
 //
 // We always guarantee that a zone has at least one live compartment by refusing
 // to delete the last compartment in a live zone.
 struct Zone : public JS::shadow::Zone,
               public js::gc::GraphNodeBase<JS::Zone>,
               public js::MallocProvider<JS::Zone>
 {
-    explicit Zone(JSRuntime* rt);
+    explicit Zone(JSRuntime* rt, js::ZoneGroup* group);
     ~Zone();
     MOZ_MUST_USE bool init(bool isSystem);
 
+  private:
+    js::ZoneGroup* const group_;
+  public:
+    js::ZoneGroup* group() const {
+        return group_;
+    }
+
+    // For JIT use.
+    static size_t offsetOfGroup() {
+        return offsetof(Zone, group_);
+    }
+
     void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
 
     void discardJitCode(js::FreeOp* fop, bool discardBaselineCode = true);
 
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 size_t* typePool,
                                 size_t* baselineStubsOptimized,
                                 size_t* uniqueIdMap,
@@ -181,17 +193,17 @@ struct Zone : public JS::shadow::Zone,
         return runtimeFromMainThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
     }
     void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
 
     void beginSweepTypes(js::FreeOp* fop, bool releaseTypes);
 
     bool hasMarkedCompartments();
 
-    void scheduleGC() { MOZ_ASSERT(!runtimeFromMainThread()->isHeapBusy()); gcScheduled_ = true; }
+    void scheduleGC() { MOZ_ASSERT(!CurrentThreadIsHeapBusy()); gcScheduled_ = true; }
     void unscheduleGC() { gcScheduled_ = false; }
     bool isGCScheduled() { return gcScheduled_ && canCollect(); }
 
     void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
     bool isPreservingCode() const { return gcPreserveCode_; }
 
     bool canCollect();
 
@@ -201,46 +213,44 @@ struct Zone : public JS::shadow::Zone,
         NoGC,
         Mark,
         MarkGray,
         Sweep,
         Finished,
         Compact
     };
     void setGCState(GCState state) {
-        MOZ_ASSERT(runtimeFromMainThread()->isHeapBusy());
+        MOZ_ASSERT(CurrentThreadIsHeapBusy());
         MOZ_ASSERT_IF(state != NoGC, canCollect());
         gcState_ = state;
         if (state == Finished)
             notifyObservingDebuggers();
     }
 
     bool isCollecting() const {
-        if (runtimeFromMainThread()->isHeapCollecting())
-            return gcState_ != NoGC;
-        else
-            return needsIncrementalBarrier();
+        MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+        return isCollectingFromAnyThread();
     }
 
     bool isCollectingFromAnyThread() const {
-        if (runtimeFromAnyThread()->isHeapCollecting())
+        if (CurrentThreadIsHeapCollecting())
             return gcState_ != NoGC;
         else
             return needsIncrementalBarrier();
     }
 
     // If this returns true, all object tracing must be done with a GC marking
     // tracer.
     bool requireGCTracer() const {
         JSRuntime* rt = runtimeFromAnyThread();
-        return rt->isHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
+        return CurrentThreadIsHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
     }
 
     bool isGCMarking() {
-        if (runtimeFromMainThread()->isHeapCollecting())
+        if (CurrentThreadIsHeapCollecting())
             return gcState_ == Mark || gcState_ == MarkGray;
         else
             return needsIncrementalBarrier();
     }
 
     GCState gcState() const { return gcState_; }
     bool wasGCStarted() const { return gcState_ != NoGC; }
     bool isGCMarkingBlack() { return gcState_ == Mark; }
@@ -276,31 +286,33 @@ struct Zone : public JS::shadow::Zone,
     // For testing purposes, return the index of the zone group which this zone
     // was swept in in the last GC.
     unsigned lastZoneGroupIndex() { return gcLastZoneGroupIndex; }
 #endif
 
     using DebuggerVector = js::Vector<js::Debugger*, 0, js::SystemAllocPolicy>;
 
   private:
-    DebuggerVector* debuggers;
+    js::ZoneGroupData<DebuggerVector*> debuggers;
 
     void sweepBreakpoints(js::FreeOp* fop);
     void sweepUniqueIds(js::FreeOp* fop);
     void sweepWeakMaps();
     void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
 
     js::jit::JitZone* createJitZone(JSContext* cx);
 
     bool isQueuedForBackgroundSweep() {
         return isOnList();
     }
 
     // Side map for storing a unique ids for cells, independent of address.
-    js::gc::UniqueIdMap uniqueIds_;
+    js::ZoneGroupData<js::gc::UniqueIdMap> uniqueIds_;
+
+    js::gc::UniqueIdMap& uniqueIds() { return uniqueIds_.ref(); }
 
   public:
     bool hasDebuggers() const { return debuggers && debuggers->length(); }
     DebuggerVector* getDebuggers() const { return debuggers; }
     DebuggerVector* getOrCreateDebuggers(JSContext* cx);
 
     void clearTables();
 
@@ -308,126 +320,163 @@ struct Zone : public JS::shadow::Zone,
      * When true, skip calling the metadata callback. We use this:
      * - to avoid invoking the callback recursively;
      * - to avoid observing lazy prototype setup (which confuses callbacks that
      *   want to use the types being set up!);
      * - to avoid attaching allocation stacks to allocation stack nodes, which
      *   is silly
      * And so on.
      */
-    bool suppressAllocationMetadataBuilder;
+    js::ZoneGroupData<bool> suppressAllocationMetadataBuilder;
 
     js::gc::ArenaLists arenas;
 
     js::TypeZone types;
 
+  private:
     /* Live weakmaps in this zone. */
-    mozilla::LinkedList<js::WeakMapBase> gcWeakMapList;
+    js::ZoneGroupData<mozilla::LinkedList<js::WeakMapBase>> gcWeakMapList_;
+  public:
+    mozilla::LinkedList<js::WeakMapBase>& gcWeakMapList() { return gcWeakMapList_.ref(); }
 
+    typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
+
+  private:
     // The set of compartments in this zone.
-    typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
-    CompartmentVector compartments;
+    js::UnprotectedData<CompartmentVector> compartments_;
+  public:
+    CompartmentVector& compartments() { return compartments_.ref(); }
 
     // This zone's gray roots.
     typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
-    GrayRootVector gcGrayRoots;
+  private:
+    js::ZoneGroupData<GrayRootVector> gcGrayRoots_;
+  public:
+    GrayRootVector& gcGrayRoots() { return gcGrayRoots_.ref(); }
 
     // This zone's weak edges found via graph traversal during marking,
     // preserved for re-scanning during sweeping.
     using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
-    WeakEdges gcWeakRefs;
+  private:
+    js::ZoneGroupData<WeakEdges> gcWeakRefs_;
+  public:
+    WeakEdges& gcWeakRefs() { return gcWeakRefs_.ref(); }
 
+  private:
     // List of non-ephemeron weak containers to sweep during beginSweepingZoneGroup.
-    mozilla::LinkedList<WeakCache<void*>> weakCaches_;
+    js::ZoneGroupData<mozilla::LinkedList<WeakCache<void*>>> weakCaches_;
+  public:
+    mozilla::LinkedList<WeakCache<void*>>& weakCaches() { return weakCaches_.ref(); }
     void registerWeakCache(WeakCache<void*>* cachep) {
-        weakCaches_.insertBack(cachep);
+        weakCaches().insertBack(cachep);
     }
 
+  private:
     /*
      * Mapping from not yet marked keys to a vector of all values that the key
      * maps to in any live weak map.
      */
-    js::gc::WeakKeyTable gcWeakKeys;
+    js::ZoneGroupData<js::gc::WeakKeyTable> gcWeakKeys_;
+  public:
+    js::gc::WeakKeyTable& gcWeakKeys() { return gcWeakKeys_.ref(); }
 
+  private:
     // A set of edges from this zone to other zones.
     //
     // This is used during GC while calculating zone groups to record edges that
     // can't be determined by examining this zone by itself.
-    ZoneSet gcZoneGroupEdges;
+    js::ZoneGroupData<ZoneSet> gcZoneGroupEdges_;
+  public:
+    ZoneSet& gcZoneGroupEdges() { return gcZoneGroupEdges_.ref(); }
 
     // Keep track of all TypeDescr and related objects in this compartment.
     // This is used by the GC to trace them all first when compacting, since the
     // TypedObject trace hook may access these objects.
     //
     // There are no barriers here - the set contains only tenured objects so no
     // post-barrier is required, and these are weak references so no pre-barrier
     // is required.
     using TypeDescrObjectSet = js::GCHashSet<JSObject*,
                                              js::MovableCellHasher<JSObject*>,
                                              js::SystemAllocPolicy>;
-    JS::WeakCache<TypeDescrObjectSet> typeDescrObjects;
+  private:
+    js::ZoneGroupData<JS::WeakCache<TypeDescrObjectSet>> typeDescrObjects_;
+  public:
+    JS::WeakCache<TypeDescrObjectSet>& typeDescrObjects() { return typeDescrObjects_.ref(); }
 
     bool addTypeDescrObject(JSContext* cx, HandleObject obj);
 
     // Malloc counter to measure memory pressure for GC scheduling. It runs from
     // gcMaxMallocBytes down to zero. This counter should be used only when it's
     // not possible to know the size of a free.
     mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
 
     // GC trigger threshold for allocations on the C heap.
-    size_t gcMaxMallocBytes;
+    js::UnprotectedData<size_t> gcMaxMallocBytes;
 
     // Whether a GC has been triggered as a result of gcMallocBytes falling
     // below zero.
     //
     // This should be a bool, but Atomic only supports 32-bit and pointer-sized
     // types.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
 
+  private:
     // Bitmap of atoms marked by this zone.
-    js::gc::AtomMarkingRuntime::Bitmap markedAtoms;
+    js::ZoneGroupOrGCTaskData<js::gc::AtomMarkingRuntime::Bitmap> markedAtoms_;
+  public:
+    js::gc::AtomMarkingRuntime::Bitmap& markedAtoms() { return markedAtoms_.ref(); }
 
     // Track heap usage under this Zone.
     js::gc::HeapUsage usage;
 
     // Thresholds used to trigger GC.
     js::gc::ZoneHeapThreshold threshold;
 
     // Amount of data to allocate before triggering a new incremental slice for
     // the current GC.
-    size_t gcDelayBytes;
+    js::UnprotectedData<size_t> gcDelayBytes;
 
+  private:
     // Shared Shape property tree.
-    js::PropertyTree propertyTree;
+    js::ZoneGroupData<js::PropertyTree> propertyTree_;
+  public:
+    js::PropertyTree& propertyTree() { return propertyTree_.ref(); }
 
+  private:
     // Set of all unowned base shapes in the Zone.
-    JS::WeakCache<js::BaseShapeSet> baseShapes;
+    js::ZoneGroupData<JS::WeakCache<js::BaseShapeSet>> baseShapes_;
+  public:
+    JS::WeakCache<js::BaseShapeSet>& baseShapes() { return baseShapes_.ref(); }
 
+  private:
     // Set of initial shapes in the Zone. For certain prototypes -- namely,
     // those of various builtin classes -- there are two entries: one for a
     // lookup via TaggedProto, and one for a lookup via JSProtoKey. See
     // InitialShapeProto.
-    JS::WeakCache<js::InitialShapeSet> initialShapes;
+    js::ZoneGroupData<JS::WeakCache<js::InitialShapeSet>> initialShapes_;
+  public:
+    JS::WeakCache<js::InitialShapeSet>& initialShapes() { return initialShapes_.ref(); }
 
 #ifdef JSGC_HASH_TABLE_CHECKS
     void checkInitialShapesTableAfterMovingGC();
     void checkBaseShapeTableAfterMovingGC();
 #endif
     void fixupInitialShapeTable();
     void fixupAfterMovingGC();
 
     // Per-zone data for use by an embedder.
-    void* data;
+    js::ZoneGroupData<void*> data;
 
-    bool isSystem;
+    js::ZoneGroupData<bool> isSystem;
 
     mozilla::Atomic<bool> usedByExclusiveThread;
 
 #ifdef DEBUG
-    unsigned gcLastZoneGroupIndex;
+    js::ZoneGroupData<unsigned> gcLastZoneGroupIndex;
 #endif
 
     static js::HashNumber UniqueIdToHash(uint64_t uid) {
         return js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
     }
 
     // Creates a HashNumber based on getUniqueId. Returns false on OOM.
     MOZ_MUST_USE bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
@@ -440,32 +489,32 @@ struct Zone : public JS::shadow::Zone,
 
     // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
     // puts that into |uidp|. Returns false on OOM.
     MOZ_MUST_USE bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
         MOZ_ASSERT(uidp);
         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
 
         // Get an existing uid, if one has been set.
-        auto p = uniqueIds_.lookupForAdd(cell);
+        auto p = uniqueIds().lookupForAdd(cell);
         if (p) {
             *uidp = p->value();
             return true;
         }
 
         // Set a new uid on the cell.
         *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
-        if (!uniqueIds_.add(p, cell, *uidp))
+        if (!uniqueIds().add(p, cell, *uidp))
             return false;
 
         // If the cell was in the nursery, hopefully unlikely, then we need to
         // tell the nursery about it so that it can sweep the uid if the thing
         // does not get tenured.
-        if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell)) {
-            uniqueIds_.remove(cell);
+        if (!group()->nursery().addedUniqueIdToCell(cell)) {
+            uniqueIds().remove(cell);
             return false;
         }
 
         return true;
     }
 
     js::HashNumber getHashCodeInfallible(js::gc::Cell* cell) {
         return UniqueIdToHash(getUniqueIdInfallible(cell));
@@ -477,45 +526,45 @@ struct Zone : public JS::shadow::Zone,
         if (!getUniqueId(cell, &uid))
             oomUnsafe.crash("failed to allocate uid");
         return uid;
     }
 
     // Return true if this cell has a UID associated with it.
     MOZ_MUST_USE bool hasUniqueId(js::gc::Cell* cell) {
         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
-        return uniqueIds_.has(cell);
+        return uniqueIds().has(cell);
     }
 
     // Transfer an id from another cell. This must only be called on behalf of a
     // moving GC. This method is infallible.
     void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
         MOZ_ASSERT(src != tgt);
         MOZ_ASSERT(!IsInsideNursery(tgt));
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
-        uniqueIds_.rekeyIfMoved(src, tgt);
+        uniqueIds().rekeyIfMoved(src, tgt);
     }
 
     // Remove any unique id associated with this Cell.
     void removeUniqueId(js::gc::Cell* cell) {
         MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
-        uniqueIds_.remove(cell);
+        uniqueIds().remove(cell);
     }
 
     // When finished parsing off-thread, transfer any UIDs we created in the
     // off-thread zone into the target zone.
     void adoptUniqueIds(JS::Zone* source) {
         js::AutoEnterOOMUnsafeRegion oomUnsafe;
-        for (js::gc::UniqueIdMap::Enum e(source->uniqueIds_); !e.empty(); e.popFront()) {
-            MOZ_ASSERT(!uniqueIds_.has(e.front().key()));
-            if (!uniqueIds_.put(e.front().key(), e.front().value()))
+        for (js::gc::UniqueIdMap::Enum e(source->uniqueIds()); !e.empty(); e.popFront()) {
+            MOZ_ASSERT(!uniqueIds().has(e.front().key()));
+            if (!uniqueIds().put(e.front().key(), e.front().value()))
                 oomUnsafe.crash("failed to transfer unique ids from off-main-thread");
         }
-        source->uniqueIds_.clear();
+        source->uniqueIds().clear();
     }
 
     JSContext* contextFromMainThread() {
         return runtime_->contextFromMainThread();
     }
 
 #ifdef JSGC_HASH_TABLE_CHECKS
     // Assert that the UniqueId table has been redirected successfully.
@@ -525,28 +574,28 @@ struct Zone : public JS::shadow::Zone,
     bool keepShapeTables() const {
         return keepShapeTables_;
     }
     void setKeepShapeTables(bool b) {
         keepShapeTables_ = b;
     }
 
   private:
-    js::jit::JitZone* jitZone_;
+    js::ZoneGroupData<js::jit::JitZone*> jitZone_;
 
-    GCState gcState_;
-    bool gcScheduled_;
-    bool gcPreserveCode_;
-    bool jitUsingBarriers_;
-    bool keepShapeTables_;
+    js::UnprotectedData<GCState> gcState_;
+    js::UnprotectedData<bool> gcScheduled_;
+    js::ZoneGroupData<bool> gcPreserveCode_;
+    js::ZoneGroupData<bool> jitUsingBarriers_;
+    js::ZoneGroupData<bool> keepShapeTables_;
 
     // Allow zones to be linked into a list
     friend class js::gc::ZoneList;
     static Zone * const NotOnList;
-    Zone* listNext_;
+    js::ZoneGroupOrGCTaskData<Zone*> listNext_;
     bool isOnList() const;
     Zone* nextZone() const;
 
     friend bool js::CurrentThreadCanAccessZone(Zone* zone);
     friend class js::gc::GCRuntime;
 };
 
 } // namespace JS
@@ -560,60 +609,62 @@ namespace js {
 enum ZoneSelector {
     WithAtoms,
     SkipAtoms
 };
 
 class ZonesIter
 {
     gc::AutoEnterIteration iterMarker;
+    JSRuntime* rt;
     JS::Zone** it;
     JS::Zone** end;
 
   public:
-    ZonesIter(JSRuntime* rt, ZoneSelector selector) : iterMarker(&rt->gc) {
-        it = rt->gc.zones.begin();
-        end = rt->gc.zones.end();
-
-        if (selector == SkipAtoms) {
-            MOZ_ASSERT(atAtomsZone(rt));
-            it++;
-        }