Bug 1274612 - Check callers have exclusive access lock at compile time r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Mon, 23 May 2016 09:04:28 +0100
changeset 337755 99d94b060b6367c67af74d16bd9cc69afad0c05e
parent 337754 dc9495156e2baae9ced1f29177ae446cd364b2f6
child 337756 aa5838e85fc88a169fc8c9918160d422bab442d1
push id6249
push userjlund@mozilla.com
push dateMon, 01 Aug 2016 13:59:36 +0000
treeherdermozilla-beta@bad9d4f5bf7e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1274612
milestone49.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1274612 - Check callers have exclusive access lock at compile time r=terrence
js/src/frontend/ParseMaps-inl.h
js/src/frontend/Parser.cpp
js/src/gc/FindSCCs.h
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/Iteration.cpp
js/src/gc/Nursery.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Verifier.cpp
js/src/gc/Zone.h
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/Ion.cpp
js/src/jit/JitCompartment.h
js/src/jsapi-tests/testFindSCCs.cpp
js/src/jsatom.cpp
js/src/jsatom.h
js/src/jscntxt.h
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jsgc.cpp
js/src/jsscript.cpp
js/src/jsscript.h
js/src/vm/Debugger.cpp
js/src/vm/Debugger.h
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/String.cpp
js/src/vm/Symbol.cpp
js/src/vm/Symbol.h
js/src/vm/UbiNodeCensus.cpp
--- a/js/src/frontend/ParseMaps-inl.h
+++ b/js/src/frontend/ParseMaps-inl.h
@@ -17,49 +17,49 @@ namespace frontend {
 template <class Map>
 inline bool
 AtomThingMapPtr<Map>::ensureMap(ExclusiveContext* cx)
 {
     if (map_)
         return true;
 
     AutoLockForExclusiveAccess lock(cx);
-    map_ = cx->parseMapPool().acquire<Map>();
+    map_ = cx->parseMapPool(lock).acquire<Map>();
     if (!map_)
         ReportOutOfMemory(cx);
     return !!map_;
 }
 
 template <class Map>
 inline void
 AtomThingMapPtr<Map>::releaseMap(ExclusiveContext* cx)
 {
     if (!map_)
         return;
 
     AutoLockForExclusiveAccess lock(cx);
-    cx->parseMapPool().release(map_);
+    cx->parseMapPool(lock).release(map_);
     map_ = nullptr;
 }
 
 template <typename ParseHandler>
 inline bool
 AtomDecls<ParseHandler>::init()
 {
     AutoLockForExclusiveAccess lock(cx);
-    map = cx->parseMapPool().acquire<AtomDefnListMap>();
+    map = cx->parseMapPool(lock).acquire<AtomDefnListMap>();
     return map;
 }
 
 template <typename ParseHandler>
 inline
 AtomDecls<ParseHandler>::~AtomDecls()
 {
     if (map) {
         AutoLockForExclusiveAccess lock(cx);
-        cx->parseMapPool().release(map);
+        cx->parseMapPool(lock).release(map);
     }
 }
 
 } /* namespace frontend */
 } /* namespace js */
 
 #endif /* frontend_ParseMaps_inl_h */
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -696,17 +696,17 @@ Parser<ParseHandler>::Parser(ExclusiveCo
     checkOptionsCalled(false),
 #endif
     abortedSyntaxParse(false),
     isUnexpectedEOF_(false),
     handler(cx, *alloc, tokenStream, syntaxParser, lazyOuterFunction)
 {
     {
         AutoLockForExclusiveAccess lock(cx);
-        cx->perThreadData->addActiveCompilation();
+        cx->perThreadData->addActiveCompilation(lock);
     }
 
     // The Mozilla specific JSOPTION_EXTRA_WARNINGS option adds extra warnings
     // which are not generated if functions are parsed lazily. Note that the
     // standard "use strict" does not inhibit lazy parsing.
     if (options.extraWarningsOption)
         handler.disableSyntaxParser();
 
@@ -737,17 +737,17 @@ Parser<ParseHandler>::~Parser()
      * The parser can allocate enormous amounts of memory for large functions.
      * Eagerly free the memory now (which otherwise won't be freed until the
      * next GC) to avoid unnecessary OOMs.
      */
     alloc.freeAllIfHugeAndUnused();
 
     {
         AutoLockForExclusiveAccess lock(context);
-        context->perThreadData->removeActiveCompilation();
+        context->perThreadData->removeActiveCompilation(lock);
     }
 }
 
 template <typename ParseHandler>
 ObjectBox*
 Parser<ParseHandler>::newObjectBox(JSObject* obj)
 {
     MOZ_ASSERT(obj);
--- a/js/src/gc/FindSCCs.h
+++ b/js/src/gc/FindSCCs.h
@@ -2,16 +2,18 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef gc_FindSCCs_h
 #define gc_FindSCCs_h
 
+#include "mozilla/Move.h"
+
 #include "jsfriendapi.h"
 #include "jsutil.h"
 
 namespace js {
 namespace gc {
 
 template<class Node>
 struct GraphNodeBase
@@ -42,30 +44,38 @@ struct GraphNodeBase
 
 /*
  * Find the strongly connected components of a graph using Tarjan's algorithm,
  * and return them in topological order.
  *
  * Nodes derive from GraphNodeBase and implement findGraphEdges, which calls
  * finder.addEdgeTo to describe the outgoing edges from that node:
  *
+ * struct MyComponentFinder;
+ *
  * struct MyGraphNode : public GraphNodeBase
  * {
- *     void findOutgoingEdges(ComponentFinder<MyGraphNode>& finder)
+ *     void findOutgoingEdges(MyComponentFinder& finder)
  *     {
  *         for edge in my_outgoing_edges:
  *             if is_relevant(edge):
  *                 finder.addEdgeTo(edge.destination)
  *     }
  * }
  *
- * ComponentFinder<MyGraphNode> finder;
+ * struct MyComponentFinder : public ComponentFinder<MyGraphNode, MyComponentFinder>
+ * {
+ *     ...
+ * };
+ *
+ * MyComponentFinder finder;
  * finder.addNode(v);
  */
-template<class Node>
+
+template <typename Node, typename Derived>
 class ComponentFinder
 {
   public:
     explicit ComponentFinder(uintptr_t sl)
       : clock(1),
         stack(nullptr),
         firstComponent(nullptr),
         cur(nullptr),
@@ -151,17 +161,17 @@ class ComponentFinder
         int stackDummy;
         if (stackFull || !JS_CHECK_STACK_SIZE(stackLimit, &stackDummy)) {
             stackFull = true;
             return;
         }
 
         Node* old = cur;
         cur = v;
-        cur->findOutgoingEdges(*this);
+        cur->findOutgoingEdges(*static_cast<Derived*>(this));
         cur = old;
 
         if (stackFull)
             return;
 
         if (v->gcLowLink == v->gcDiscoveryTime) {
             Node* nextComponent = firstComponent;
             Node* w;
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -27,33 +27,36 @@ void FinishGC(JSRuntime* rt);
  * heap in order to trace through it...
  */
 class MOZ_RAII AutoTraceSession
 {
   public:
     explicit AutoTraceSession(JSRuntime* rt, JS::HeapState state = JS::HeapState::Tracing);
     ~AutoTraceSession();
 
+    AutoLockForExclusiveAccess lock;
+
   protected:
-    AutoLockForExclusiveAccess lock;
     JSRuntime* runtime;
 
   private:
     AutoTraceSession(const AutoTraceSession&) = delete;
     void operator=(const AutoTraceSession&) = delete;
 
     JS::HeapState prevState;
     AutoSPSEntry pseudoFrame;
 };
 
-struct MOZ_RAII AutoPrepareForTracing
+class MOZ_RAII AutoPrepareForTracing
 {
-    mozilla::Maybe<AutoTraceSession> session;
+    mozilla::Maybe<AutoTraceSession> session_;
 
+  public:
     AutoPrepareForTracing(JSRuntime* rt, ZoneSelector selector);
+    AutoTraceSession& session() { return session_.ref(); }
 };
 
 class IncrementalSafety
 {
     const char* reason_;
 
     explicit IncrementalSafety(const char* reason) : reason_(reason) {}
 
@@ -116,17 +119,17 @@ class MOZ_RAII AutoStopVerifyingBarriers
 struct MOZ_RAII AutoStopVerifyingBarriers
 {
     AutoStopVerifyingBarriers(JSRuntime*, bool) {}
 };
 #endif /* JS_GC_ZEAL */
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 void CheckHashTablesAfterMovingGC(JSRuntime* rt);
-void CheckHeapAfterMovingGC(JSRuntime* rt);
+void CheckHeapAfterMovingGC(JSRuntime* rt, AutoLockForExclusiveAccess& lock);
 #endif
 
 struct MovingTracer : JS::CallbackTracer
 {
     explicit MovingTracer(JSRuntime* rt) : CallbackTracer(rt, TraceWeakMapKeysValues) {}
 
     void onObjectEdge(JSObject** objp) override;
     void onShapeEdge(Shape** shapep) override;
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -633,17 +633,18 @@ class GCRuntime
 
     void runDebugGC();
     inline void poke();
 
     enum TraceOrMarkRuntime {
         TraceRuntime,
         MarkRuntime
     };
-    void markRuntime(JSTracer* trc, TraceOrMarkRuntime traceOrMark = TraceRuntime);
+    void markRuntime(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
+                     AutoLockForExclusiveAccess& lock);
 
     void notifyDidPaint();
     void shrinkBuffers();
     void onOutOfMallocMemory();
     void onOutOfMallocMemory(const AutoLockGC& lock);
 
 #ifdef JS_GC_ZEAL
     const void* addressOfZealModeBits() { return &zealModeBits; }
@@ -911,85 +912,87 @@ class GCRuntime
 
     friend class BackgroundAllocTask;
     friend class AutoMaybeStartBackgroundAllocation;
     inline bool wantBackgroundAllocation(const AutoLockGC& lock) const;
     void startBackgroundAllocTaskIfIdle();
 
     void requestMajorGC(JS::gcreason::Reason reason);
     SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
-    void budgetIncrementalGC(SliceBudget& budget);
-    void resetIncrementalGC(const char* reason);
+    void budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock);
+    void resetIncrementalGC(const char* reason, AutoLockForExclusiveAccess& lock);
 
     // Assert if the system state is such that we should never
     // receive a request to do GC work.
     void checkCanCallAPI();
 
     // Check if the system state is such that GC has been supressed
     // or otherwise delayed.
     MOZ_MUST_USE bool checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason);
 
     gcstats::ZoneGCStats scanZonesBeforeGC();
     void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL;
     MOZ_MUST_USE bool gcCycle(bool nonincrementalByAPI, SliceBudget& budget,
                               JS::gcreason::Reason reason);
-    void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason);
+    void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
+                                 AutoLockForExclusiveAccess& lock);
 
     void pushZealSelectedObjects();
-    void purgeRuntime();
-    MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason);
+    void purgeRuntime(AutoLockForExclusiveAccess& lock);
+    MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
     bool shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
                                JS::gcreason::Reason reason);
     void bufferGrayRoots();
     void markCompartments();
     IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase);
     template <class CompartmentIterT> void markWeakReferences(gcstats::Phase phase);
     void markWeakReferencesInCurrentGroup(gcstats::Phase phase);
     template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::Phase phase);
     void markBufferedGrayRoots(JS::Zone* zone);
     void markGrayReferencesInCurrentGroup(gcstats::Phase phase);
     void markAllWeakReferences(gcstats::Phase phase);
     void markAllGrayReferences(gcstats::Phase phase);
 
-    void beginSweepPhase(bool lastGC);
-    void findZoneGroups();
+    void beginSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
+    void findZoneGroups(AutoLockForExclusiveAccess& lock);
     MOZ_MUST_USE bool findZoneEdgesForWeakMaps();
     void getNextZoneGroup();
     void endMarkingZoneGroup();
-    void beginSweepingZoneGroup();
+    void beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock);
     bool shouldReleaseObservedTypes();
     void endSweepingZoneGroup();
-    IncrementalProgress sweepPhase(SliceBudget& sliceBudget);
-    void endSweepPhase(bool lastGC);
+    IncrementalProgress sweepPhase(SliceBudget& sliceBudget, AutoLockForExclusiveAccess& lock);
+    void endSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
     void sweepZones(FreeOp* fop, bool lastGC);
     void decommitAllWithoutUnlocking(const AutoLockGC& lock);
     void decommitArenas(AutoLockGC& lock);
     void expireChunksAndArenas(bool shouldShrink, AutoLockGC& lock);
     void queueZonesForBackgroundSweep(ZoneList& zones);
     void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks, ThreadType threadType);
     void assertBackgroundSweepingFinished();
     bool shouldCompact();
     void beginCompactPhase();
-    IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget);
+    IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
+                                     AutoLockForExclusiveAccess& lock);
     void endCompactPhase(JS::gcreason::Reason reason);
     void sweepTypesAfterCompacting(Zone* zone);
     void sweepZoneAfterCompacting(Zone* zone);
     MOZ_MUST_USE bool relocateArenas(Zone* zone, JS::gcreason::Reason reason,
                                      Arena*& relocatedListOut, SliceBudget& sliceBudget);
     void updateTypeDescrObjects(MovingTracer* trc, Zone* zone);
     void updateCellPointers(MovingTracer* trc, Zone* zone, AllocKinds kinds, size_t bgTaskCount);
     void updateAllCellPointers(MovingTracer* trc, Zone* zone);
-    void updatePointersToRelocatedCells(Zone* zone);
+    void updatePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock);
     void protectAndHoldArenas(Arena* arenaList);
     void unprotectHeldRelocatedArenas();
     void releaseRelocatedArenas(Arena* arenaList);
     void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, const AutoLockGC& lock);
     void finishCollection(JS::gcreason::Reason reason);
 
-    void computeNonIncrementalMarkingForValidation();
+    void computeNonIncrementalMarkingForValidation(AutoLockForExclusiveAccess& lock);
     void validateIncrementalMarking();
     void finishMarkingValidation();
 
 #ifdef DEBUG
     void checkForCompartmentMismatches();
 #endif
 
     void callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const;
--- a/js/src/gc/Iteration.cpp
+++ b/js/src/gc/Iteration.cpp
@@ -21,17 +21,17 @@ void
 js::TraceRuntime(JSTracer* trc)
 {
     MOZ_ASSERT(!trc->isMarkingTracer());
 
     JSRuntime* rt = trc->runtime();
     rt->gc.evictNursery();
     AutoPrepareForTracing prep(rt, WithAtoms);
     gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
-    rt->gc.markRuntime(trc);
+    rt->gc.markRuntime(trc, GCRuntime::TraceRuntime, prep.session().lock);
 }
 
 static void
 IterateCompartmentsArenasCells(JSRuntime* rt, Zone* zone, void* data,
                                JSIterateCompartmentCallback compartmentCallback,
                                IterateArenaCallback arenaCallback,
                                IterateCellCallback cellCallback)
 {
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -435,17 +435,17 @@ js::Nursery::collect(JSRuntime* rt, JS::
     sb.traceWholeCells(mover);
     TIME_END(traceWholeCells);
 
     TIME_START(traceGenericEntries);
     sb.traceGenericEntries(&mover);
     TIME_END(traceGenericEntries);
 
     TIME_START(markRuntime);
-    rt->gc.markRuntime(&mover);
+    rt->gc.markRuntime(&mover, GCRuntime::TraceRuntime, session.lock);
     TIME_END(markRuntime);
 
     TIME_START(markDebugger);
     {
         gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
         Debugger::markAll(&mover);
     }
     TIME_END(markDebugger);
@@ -498,17 +498,17 @@ js::Nursery::collect(JSRuntime* rt, JS::
     if (rt->hasZealMode(ZealMode::CheckHashTablesOnMinorGC))
         CheckHashTablesAfterMovingGC(rt);
 #endif
     TIME_END(checkHashTables);
 
     TIME_START(checkHeap);
 #ifdef JS_GC_ZEAL
     if (rt->hasZealMode(ZealMode::CheckHeapOnMovingGC))
-        CheckHeapAfterMovingGC(rt);
+        CheckHeapAfterMovingGC(rt, session.lock);
 #endif
     TIME_END(checkHeap);
 
     // Resize the nursery.
     TIME_START(resize);
     double promotionRate = mover.tenuredSize / double(allocationEnd() - start());
     if (promotionRate > 0.05)
         growAllocableSpace();
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -267,17 +267,18 @@ PropertyDescriptor::trace(JSTracer* trc)
     if ((attrs & JSPROP_SETTER) && setter) {
         JSObject* tmp = JS_FUNC_TO_DATA_PTR(JSObject*, setter);
         TraceRoot(trc, &tmp, "Descriptor::set");
         setter = JS_DATA_TO_FUNC_PTR(JSSetterOp, tmp);
     }
 }
 
 void
-js::gc::GCRuntime::markRuntime(JSTracer* trc, TraceOrMarkRuntime traceOrMark)
+js::gc::GCRuntime::markRuntime(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
+                               AutoLockForExclusiveAccess& lock)
 {
     gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
 
     MOZ_ASSERT(traceOrMark == TraceRuntime || traceOrMark == MarkRuntime);
 
     MOZ_ASSERT(!rt->mainThread.suppressGC);
 
     if (traceOrMark == MarkRuntime) {
@@ -301,21 +302,21 @@ js::gc::GCRuntime::markRuntime(JSTracer*
         }
 
         MarkPersistentRooted(rt, trc);
     }
 
     if (!rt->isBeingDestroyed() && !rt->isHeapMinorCollecting()) {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_RUNTIME_DATA);
 
-        if (traceOrMark == TraceRuntime || rt->atomsCompartment()->zone()->isCollecting()) {
+        if (traceOrMark == TraceRuntime || rt->atomsCompartment(lock)->zone()->isCollecting()) {
             MarkPermanentAtoms(trc);
-            MarkAtoms(trc);
+            MarkAtoms(trc, lock);
             MarkWellKnownSymbols(trc);
-            jit::JitRuntime::Mark(trc);
+            jit::JitRuntime::Mark(trc, lock);
         }
     }
 
     if (rt->isHeapMinorCollecting())
         jit::JitRuntime::MarkJitcodeGlobalTableUnconditionally(trc);
 
     for (ContextIter acx(rt); !acx.done(); acx.next())
         acx->mark(trc);
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -201,17 +201,17 @@ gc::GCRuntime::startVerifyPreBarriers()
         goto oom;
 
     /* Create the root node. */
     trc->curnode = MakeNode(trc, nullptr, JS::TraceKind(0));
 
     incrementalState = MARK_ROOTS;
 
     /* Make all the roots be edges emanating from the root node. */
-    markRuntime(trc);
+    markRuntime(trc, TraceRuntime, prep.session().lock);
 
     VerifyNode* node;
     node = trc->curnode;
     if (trc->edgeptr == trc->term)
         goto oom;
 
     /* For each edge, make a node for it if one doesn't already exist. */
     while ((char*)node < trc->edgeptr) {
@@ -416,17 +416,17 @@ js::gc::GCRuntime::finishVerifier()
 
 #ifdef JSGC_HASH_TABLE_CHECKS
 
 class CheckHeapTracer : public JS::CallbackTracer
 {
   public:
     explicit CheckHeapTracer(JSRuntime* rt);
     bool init();
-    bool check();
+    bool check(AutoLockForExclusiveAccess& lock);
 
   private:
     void onChild(const JS::GCCellPtr& thing) override;
 
     struct WorkItem {
         WorkItem(JS::GCCellPtr thing, const char* name, int parentIndex)
           : thing(thing), name(name), parentIndex(parentIndex), processed(false)
         {}
@@ -491,21 +491,21 @@ CheckHeapTracer::onChild(const JS::GCCel
     }
 
     WorkItem item(thing, contextName(), parentIndex);
     if (!stack.append(item))
         oom = true;
 }
 
 bool
-CheckHeapTracer::check()
+CheckHeapTracer::check(AutoLockForExclusiveAccess& lock)
 {
     // The analysis thinks that markRuntime might GC by calling a GC callback.
     JS::AutoSuppressGCAnalysis nogc(rt);
-    rt->gc.markRuntime(this, GCRuntime::TraceRuntime);
+    rt->gc.markRuntime(this, GCRuntime::TraceRuntime, lock);
 
     while (!stack.empty()) {
         WorkItem item = stack.back();
         if (item.processed) {
             stack.popBack();
         } else {
             parentIndex = stack.length() - 1;
             TraceChildren(this, item.thing);
@@ -521,17 +521,17 @@ CheckHeapTracer::check()
                 failures, visited.count());
     }
     MOZ_RELEASE_ASSERT(failures == 0);
 
     return true;
 }
 
 void
-js::gc::CheckHeapAfterMovingGC(JSRuntime* rt)
+js::gc::CheckHeapAfterMovingGC(JSRuntime* rt, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(rt->isHeapCollecting());
     CheckHeapTracer tracer(rt);
-    if (!tracer.init() || !tracer.check())
+    if (!tracer.init() || !tracer.check(lock))
         fprintf(stderr, "OOM checking heap\n");
 }
 
 #endif /* JSGC_HASH_TABLE_CHECKS */
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -57,16 +57,25 @@ class ZoneHeapThreshold
                                                          const GCSchedulingTunables& tunables,
                                                          const GCSchedulingState& state);
     static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
                                           JSGCInvocationKind gckind,
                                           const GCSchedulingTunables& tunables,
                                           const AutoLockGC& lock);
 };
 
+struct ZoneComponentFinder : public ComponentFinder<JS::Zone, ZoneComponentFinder>
+{
+    ZoneComponentFinder(uintptr_t sl, AutoLockForExclusiveAccess& lock)
+      : ComponentFinder<JS::Zone, ZoneComponentFinder>(sl), lock(lock)
+    {}
+
+    AutoLockForExclusiveAccess& lock;
+};
+
 struct UniqueIdGCPolicy {
     static bool needsSweep(Cell** cell, uint64_t* value);
 };
 
 // Maps a Cell* to a unique, 64bit id.
 using UniqueIdMap = GCHashMap<Cell*,
                               uint64_t,
                               PointerHasher<Cell*, 3>,
@@ -124,17 +133,17 @@ namespace JS {
 struct Zone : public JS::shadow::Zone,
               public js::gc::GraphNodeBase<JS::Zone>,
               public js::MallocProvider<JS::Zone>
 {
     explicit Zone(JSRuntime* rt);
     ~Zone();
     MOZ_MUST_USE bool init(bool isSystem);
 
-    void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone>& finder);
+    void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
 
     void discardJitCode(js::FreeOp* fop);
 
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 size_t* typePool,
                                 size_t* baselineStubsOptimized,
                                 size_t* uniqueIdMap);
 
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -1047,17 +1047,17 @@ BaselineFrame::deleteDebugModeOSRInfo()
     flags_ &= ~HAS_DEBUG_MODE_OSR_INFO;
 }
 
 JitCode*
 JitRuntime::getBaselineDebugModeOSRHandler(JSContext* cx)
 {
     if (!baselineDebugModeOSRHandler_) {
         AutoLockForExclusiveAccess lock(cx);
-        AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
+        AutoCompartment ac(cx, cx->runtime()->atomsCompartment(lock));
         uint32_t offset;
         if (JitCode* code = generateBaselineDebugModeOSRHandler(cx, &offset)) {
             baselineDebugModeOSRHandler_ = code;
             baselineDebugModeOSRHandlerNoFrameRegPopAddr_ = code->raw() + offset;
         }
     }
 
     return baselineDebugModeOSRHandler_;
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -193,21 +193,19 @@ JitRuntime::~JitRuntime()
     freeOsrTempData();
 
     // By this point, the jitcode global table should be empty.
     MOZ_ASSERT_IF(jitcodeGlobalTable_, jitcodeGlobalTable_->empty());
     js_delete(jitcodeGlobalTable_);
 }
 
 bool
-JitRuntime::initialize(JSContext* cx)
+JitRuntime::initialize(JSContext* cx, AutoLockForExclusiveAccess& lock)
 {
-    MOZ_ASSERT(cx->runtime()->currentThreadHasExclusiveAccess());
-
-    AutoCompartment ac(cx, cx->atomsCompartment());
+    AutoCompartment ac(cx, cx->atomsCompartment(lock));
 
     JitContext jctx(cx, nullptr);
 
     if (!cx->compartment()->ensureJitCompartmentExists(cx))
         return false;
 
     functionWrappers_ = cx->new_<VMWrapperMap>(cx);
     if (!functionWrappers_ || !functionWrappers_->init())
@@ -331,17 +329,17 @@ JitRuntime::initialize(JSContext* cx)
 
 JitCode*
 JitRuntime::debugTrapHandler(JSContext* cx)
 {
     if (!debugTrapHandler_) {
         // JitRuntime code stubs are shared across compartments and have to
         // be allocated in the atoms compartment.
         AutoLockForExclusiveAccess lock(cx);
-        AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
+        AutoCompartment ac(cx, cx->runtime()->atomsCompartment(lock));
         debugTrapHandler_ = generateDebugTrapHandler(cx);
     }
     return debugTrapHandler_;
 }
 
 uint8_t*
 JitRuntime::allocateOsrTempData(size_t size)
 {
@@ -582,20 +580,20 @@ jit::LazyLinkTopActivation(JSContext* cx
 
     MOZ_ASSERT(calleeScript->hasBaselineScript());
     MOZ_ASSERT(calleeScript->baselineOrIonRawPointer());
 
     return calleeScript->baselineOrIonRawPointer();
 }
 
 /* static */ void
-JitRuntime::Mark(JSTracer* trc)
+JitRuntime::Mark(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(!trc->runtime()->isHeapMinorCollecting());
-    Zone* zone = trc->runtime()->atomsCompartment()->zone();
+    Zone* zone = trc->runtime()->atomsCompartment(lock)->zone();
     for (gc::ZoneCellIterUnderGC i(zone, gc::AllocKind::JITCODE); !i.done(); i.next()) {
         JitCode* code = i.get<JitCode>();
         TraceRoot(trc, &code, "wrapper");
     }
 }
 
 /* static */ void
 JitRuntime::MarkJitcodeGlobalTableUnconditionally(JSTracer* trc)
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -202,22 +202,22 @@ class JitRuntime
     JitCode* generateFreeStub(JSContext* cx);
     JitCode* generateDebugTrapHandler(JSContext* cx);
     JitCode* generateBaselineDebugModeOSRHandler(JSContext* cx, uint32_t* noFrameRegPopOffsetOut);
     JitCode* generateVMWrapper(JSContext* cx, const VMFunction& f);
 
   public:
     explicit JitRuntime(JSRuntime* rt);
     ~JitRuntime();
-    bool initialize(JSContext* cx);
+    bool initialize(JSContext* cx, js::AutoLockForExclusiveAccess& lock);
 
     uint8_t* allocateOsrTempData(size_t size);
     void freeOsrTempData();
 
-    static void Mark(JSTracer* trc);
+    static void Mark(JSTracer* trc, js::AutoLockForExclusiveAccess& lock);
     static void MarkJitcodeGlobalTableUnconditionally(JSTracer* trc);
     static bool MarkJitcodeGlobalTableIteratively(JSTracer* trc);
     static void SweepJitcodeGlobalTable(JSRuntime* rt);
 
     ExecutableAllocator& execAlloc() {
         return execAlloc_;
     }
     ExecutableAllocator& backedgeExecAlloc() {
--- a/js/src/jsapi-tests/testFindSCCs.cpp
+++ b/js/src/jsapi-tests/testFindSCCs.cpp
@@ -11,28 +11,37 @@
 #include "gc/FindSCCs.h"
 #include "jsapi-tests/tests.h"
 
 static const unsigned MaxVertices = 10;
 
 using js::gc::GraphNodeBase;
 using js::gc::ComponentFinder;
 
+struct TestComponentFinder;
+
 struct TestNode : public GraphNodeBase<TestNode>
 {
     unsigned   index;
     bool       hasEdge[MaxVertices];
 
-    void findOutgoingEdges(ComponentFinder<TestNode>& finder);
+    void findOutgoingEdges(TestComponentFinder& finder);
+};
+
+struct TestComponentFinder : public ComponentFinder<TestNode, TestComponentFinder>
+{
+    TestComponentFinder(uintptr_t sl)
+      : ComponentFinder<TestNode, TestComponentFinder>(sl)
+    {}
 };
 
 static TestNode Vertex[MaxVertices];
 
 void
-TestNode::findOutgoingEdges(ComponentFinder<TestNode>& finder)
+TestNode::findOutgoingEdges(TestComponentFinder& finder)
 {
     for (unsigned i = 0; i < MaxVertices; ++i) {
         if (hasEdge[i])
             finder.addEdgeTo(&Vertex[i]);
     }
 }
 
 BEGIN_TEST(testFindSCCs)
@@ -122,17 +131,17 @@ BEGIN_TEST(testFindSCCs)
     CHECK(group(1, -1));
     CHECK(remaining(-1));
     CHECK(end());
 
     return true;
 }
 
 unsigned vertex_count;
-ComponentFinder<TestNode>* finder;
+TestComponentFinder* finder;
 TestNode* resultsList;
 
 void setup(unsigned count)
 {
     vertex_count = count;
     for (unsigned i = 0; i < MaxVertices; ++i) {
         TestNode& v = Vertex[i];
         v.gcNextGraphNode = nullptr;
@@ -143,17 +152,17 @@ void setup(unsigned count)
 
 void edge(unsigned src_index, unsigned dest_index)
 {
     Vertex[src_index].hasEdge[dest_index] = true;
 }
 
 void run()
 {
-    finder = new ComponentFinder<TestNode>(rt->mainThread.nativeStackLimit[js::StackForSystemCode]);
+    finder = new TestComponentFinder(rt->mainThread.nativeStackLimit[js::StackForSystemCode]);
     for (unsigned i = 0; i < vertex_count; ++i)
         finder->addNode(&Vertex[i]);
     resultsList = finder->getResultsList();
 }
 
 bool group(int vertex, ...)
 {
     TestNode* v = resultsList;
@@ -197,32 +206,40 @@ bool end()
     CHECK(resultsList == nullptr);
 
     delete finder;
     finder = nullptr;
     return true;
 }
 END_TEST(testFindSCCs)
 
+struct TestComponentFinder2;
+
 struct TestNode2 : public GraphNodeBase<TestNode2>
 {
     TestNode2* edge;
 
-    TestNode2() :
-        edge(nullptr)
-    {
-    }
+    TestNode2() : edge(nullptr) {}
+    void findOutgoingEdges(TestComponentFinder2& finder);
+};
 
-    void
-    findOutgoingEdges(ComponentFinder<TestNode2>& finder) {
-        if (edge)
-            finder.addEdgeTo(edge);
-    }
+struct TestComponentFinder2 : public ComponentFinder<TestNode2, TestComponentFinder2>
+{
+    TestComponentFinder2(uintptr_t sl)
+      : ComponentFinder<TestNode2, TestComponentFinder2>(sl)
+    {}
 };
 
+void
+TestNode2::findOutgoingEdges(TestComponentFinder2& finder)
+{
+    if (edge)
+        finder.addEdgeTo(edge);
+}
+
 BEGIN_TEST(testFindSCCsStackLimit)
 {
     /*
      * Test what happens if recusion causes the stack to become full while
      * traversing the graph.
      *
      * The test case is a large number of vertices, almost all of which are
      * arranged in a linear chain.  The last few are left unlinked to exercise
@@ -234,17 +251,17 @@ BEGIN_TEST(testFindSCCsStackLimit)
      */
     const unsigned max = 1000000;
     const unsigned initial = 10;
 
     TestNode2* vertices = new TestNode2[max]();
     for (unsigned i = initial; i < (max - 10); ++i)
         vertices[i].edge = &vertices[i + 1];
 
-    ComponentFinder<TestNode2> finder(rt->mainThread.nativeStackLimit[js::StackForSystemCode]);
+    TestComponentFinder2 finder(rt->mainThread.nativeStackLimit[js::StackForSystemCode]);
     for (unsigned i = 0; i < max; ++i)
         finder.addNode(&vertices[i]);
 
     TestNode2* r = finder.getResultsList();
     CHECK(r);
     TestNode2* v = r;
 
     unsigned count = 0;
--- a/js/src/jsatom.cpp
+++ b/js/src/jsatom.cpp
@@ -192,20 +192,20 @@ JSRuntime::finishAtoms()
     staticStrings = nullptr;
     commonNames = nullptr;
     permanentAtoms = nullptr;
     wellKnownSymbols = nullptr;
     emptyString = nullptr;
 }
 
 void
-js::MarkAtoms(JSTracer* trc)
+js::MarkAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     JSRuntime* rt = trc->runtime();
-    for (AtomSet::Enum e(rt->atoms()); !e.empty(); e.popFront()) {
+    for (AtomSet::Enum e(rt->atoms(lock)); !e.empty(); e.popFront()) {
         const AtomStateEntry& entry = e.front();
         if (!entry.isPinned())
             continue;
 
         JSAtom* atom = entry.asPtrUnbarriered();
         TraceRoot(trc, &atom, "interned_atom");
         MOZ_ASSERT(entry.asPtrUnbarriered() == atom);
     }
@@ -291,17 +291,17 @@ AtomIsPinned(JSContext* cx, JSAtom* atom
     /* Likewise, permanent strings are considered to be interned. */
     MOZ_ASSERT(cx->isPermanentAtomsInitialized());
     AtomSet::Ptr p = cx->permanentAtoms().readonlyThreadsafeLookup(lookup);
     if (p)
         return true;
 
     AutoLockForExclusiveAccess lock(cx);
 
-    p = cx->runtime()->atoms().lookup(lookup);
+    p = cx->runtime()->atoms(lock).lookup(lookup);
     if (!p)
         return false;
 
     return p->isPinned();
 }
 
 /* |tbchars| must not point into an inline or short string. */
 template <typename CharT>
@@ -322,25 +322,25 @@ AtomizeAndCopyChars(ExclusiveContext* cx
     if (cx->isPermanentAtomsInitialized()) {
         AtomSet::Ptr pp = cx->permanentAtoms().readonlyThreadsafeLookup(lookup);
         if (pp)
             return pp->asPtr();
     }
 
     AutoLockForExclusiveAccess lock(cx);
 
-    AtomSet& atoms = cx->atoms();
+    AtomSet& atoms = cx->atoms(lock);
     AtomSet::AddPtr p = atoms.lookupForAdd(lookup);
     if (p) {
         JSAtom* atom = p->asPtr();
         p->setPinned(bool(pin));
         return atom;
     }
 
-    AutoCompartment ac(cx, cx->atomsCompartment());
+    AutoCompartment ac(cx, cx->atomsCompartment(lock));
 
     JSFlatString* flat = NewStringCopyN<NoGC>(cx, tbchars, length);
     if (!flat) {
         // Grudgingly forgo last-ditch GC. The alternative would be to release
         // the lock, manually GC here, and retry from the top. If you fix this,
         // please also fix or comment the similar case in Symbol::new_.
         ReportOutOfMemory(cx);
         return nullptr;
@@ -380,17 +380,17 @@ js::AtomizeString(ExclusiveContext* cx, 
         /* Likewise, permanent atoms are always interned. */
         MOZ_ASSERT(cx->isPermanentAtomsInitialized());
         AtomSet::Ptr p = cx->permanentAtoms().readonlyThreadsafeLookup(lookup);
         if (p)
             return &atom;
 
         AutoLockForExclusiveAccess lock(cx);
 
-        p = cx->atoms().lookup(lookup);
+        p = cx->atoms(lock).lookup(lookup);
         MOZ_ASSERT(p); /* Non-static atom must exist in atom state set. */
         MOZ_ASSERT(p->asPtr() == &atom);
         MOZ_ASSERT(pin == PinAtom);
         p->setPinned(bool(pin));
         return &atom;
     }
 
     JSLinearString* linear = str->ensureLinear(cx);
--- a/js/src/jsatom.h
+++ b/js/src/jsatom.h
@@ -186,21 +186,23 @@ extern const char js_this_str[];
 extern const char js_try_str[];
 extern const char js_typeof_str[];
 extern const char js_void_str[];
 extern const char js_while_str[];
 extern const char js_with_str[];
 
 namespace js {
 
+class AutoLockForExclusiveAccess;
+
 /*
  * Atom tracing and garbage collection hooks.
  */
 void
-MarkAtoms(JSTracer* trc);
+MarkAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
 
 void
 MarkPermanentAtoms(JSTracer* trc);
 
 void
 MarkWellKnownSymbols(JSTracer* trc);
 
 /* N.B. must correspond to boolean tagging behavior. */
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -264,30 +264,30 @@ class ExclusiveContext : public ContextF
     // Zone local methods that can be used freely from an ExclusiveContext.
     inline js::LifoAlloc& typeLifoAlloc();
 
     // Current global. This is only safe to use within the scope of the
     // AutoCompartment from which it's called.
     inline js::Handle<js::GlobalObject*> global() const;
 
     // Methods to access runtime data that must be protected by locks.
-    frontend::ParseMapPool& parseMapPool() {
-        return runtime_->parseMapPool();
+    frontend::ParseMapPool& parseMapPool(AutoLockForExclusiveAccess& lock) {
+        return runtime_->parseMapPool(lock);
     }
-    AtomSet& atoms() {
-        return runtime_->atoms();
+    AtomSet& atoms(js::AutoLockForExclusiveAccess& lock) {
+        return runtime_->atoms(lock);
     }
-    JSCompartment* atomsCompartment() {
-        return runtime_->atomsCompartment();
+    JSCompartment* atomsCompartment(js::AutoLockForExclusiveAccess& lock) {
+        return runtime_->atomsCompartment(lock);
     }
-    SymbolRegistry& symbolRegistry() {
-        return runtime_->symbolRegistry();
+    SymbolRegistry& symbolRegistry(js::AutoLockForExclusiveAccess& lock) {
+        return runtime_->symbolRegistry(lock);
     }
-    ScriptDataTable& scriptDataTable() {
-        return runtime_->scriptDataTable();
+    ScriptDataTable& scriptDataTable(AutoLockForExclusiveAccess& lock) {
+        return runtime_->scriptDataTable(lock);
     }
 
     // Methods specific to any HelperThread for the context.
     bool addPendingCompileError(frontend::CompileError** err);
     void addPendingOverRecursed();
     void addPendingOutOfMemory();
 };
 
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -167,17 +167,17 @@ JSRuntime::createJitRuntime(JSContext* c
 
     // Protect jitRuntime_ from being observed (by InterruptRunningJitCode)
     // while it is being initialized. Unfortunately, initialization depends on
     // jitRuntime_ being non-null, so we can't just wait to assign jitRuntime_.
     JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime(), jrt);
     jitRuntime_ = jrt;
 
     AutoEnterOOMUnsafeRegion noOOM;
-    if (!jitRuntime_->initialize(cx)) {
+    if (!jitRuntime_->initialize(cx, atomsLock)) {
         // Handling OOM here is complicated: if we delete jitRuntime_ now, we
         // will destroy the ExecutableAllocator, even though there may still be
         // JitCode instances holding references to ExecutablePools.
         noOOM.crash("OOM in createJitRuntime");
     }
 
     return jitRuntime_;
 }
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -24,17 +24,17 @@
 
 namespace js {
 
 namespace jit {
 class JitCompartment;
 } // namespace jit
 
 namespace gc {
-template<class Node> class ComponentFinder;
+template <typename Node, typename Derived> class ComponentFinder;
 } // namespace gc
 
 namespace wasm {
 class Module;
 } // namespace wasm
 
 class ClonedBlockObject;
 class ScriptSourceObject;
@@ -651,17 +651,17 @@ struct JSCompartment
     void setNewObjectMetadata(JSContext* cx, JS::HandleObject obj);
     void clearObjectMetadata();
     const void* addressOfMetadataBuilder() const {
         return &allocationMetadataBuilder;
     }
 
     js::SavedStacks& savedStacks() { return savedStacks_; }
 
-    void findOutgoingEdges(js::gc::ComponentFinder<JS::Zone>& finder);
+    void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
 
     js::DtoaCache dtoaCache;
 
     // Random number generator for Math.random().
     mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> randomNumberGenerator;
 
     // Initialize randomNumberGenerator if needed.
     void ensureRandomNumberGenerator();
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -2710,20 +2710,19 @@ GCRuntime::updateAllCellPointers(MovingT
 
 /*
  * Update pointers to relocated cells by doing a full heap traversal and sweep.
  *
  * The latter is necessary to update weak references which are not marked as
  * part of the traversal.
  */
 void
-GCRuntime::updatePointersToRelocatedCells(Zone* zone)
+GCRuntime::updatePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(zone->isGCCompacting());
-    MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
 
     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_UPDATE);
     MovingTracer trc(rt);
 
     // Fixup compartment global pointers as these get accessed during marking.
     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
         comp->fixupAfterMovingGC();
     JSCompartment::fixupCrossCompartmentWrappersAfterMovingGC(&trc);
@@ -2731,17 +2730,17 @@ GCRuntime::updatePointersToRelocatedCell
 
     // Iterate through all cells that can contain relocatable pointers to update
     // them. Since updating each cell is independent we try to parallelize this
     // as much as possible.
     updateAllCellPointers(&trc, zone);
 
     // Mark roots to update them.
     {
-        markRuntime(&trc, MarkRuntime);
+        markRuntime(&trc, MarkRuntime, lock);
 
         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
         Debugger::markAll(&trc);
         Debugger::markIncomingCrossCompartmentEdges(&trc);
 
         WeakMapBase::markAll(zone, &trc);
         for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
             c->trace(&trc);
@@ -3849,33 +3848,33 @@ ArenaLists::checkEmptyArenaList(AllocKin
                         t, AllocKindToAscii(kind));
             }
         }
     }
 #endif // DEBUG
 }
 
 void
-GCRuntime::purgeRuntime()
+GCRuntime::purgeRuntime(AutoLockForExclusiveAccess& lock)
 {
     for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
         comp->purge();
 
     freeUnusedLifoBlocksAfterSweeping(&rt->tempLifoAlloc);
 
     rt->interpreterStack().purge(rt);
     rt->gsnCache.purge();
     rt->scopeCoordinateNameCache.purge();
     rt->newObjectCache.purge();
     rt->nativeIterCache.purge();
     rt->uncompressedSourceCache.purge();
     rt->evalCache.clear();
 
     if (!rt->hasActiveCompilations())
-        rt->parseMapPool().purgeAll();
+        rt->parseMapPool(lock).purgeAll();
 
     if (auto cache = rt->maybeThisRuntimeSharedImmutableStrings())
         cache->purge();
 }
 
 bool
 GCRuntime::shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
                                  JS::gcreason::Reason reason)
@@ -4000,17 +3999,17 @@ RelazifyFunctions(Zone* zone, AllocKind 
     for (ZoneCellIterUnderGC i(zone, kind); !i.done(); i.next()) {
         JSFunction* fun = &i.get<JSObject>()->as<JSFunction>();
         if (fun->hasScript())
             fun->maybeRelazify(rt);
     }
 }
 
 bool
-GCRuntime::beginMarkPhase(JS::gcreason::Reason reason)
+GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock)
 {
     int64_t currentTime = PRMJ_Now();
 
 #ifdef DEBUG
     if (fullCompartmentChecks)
         checkForCompartmentMismatches();
 #endif
 
@@ -4059,17 +4058,17 @@ GCRuntime::beginMarkPhase(JS::gcreason::
      * atoms. Otherwise, the non-collected zones could contain pointers
      * to atoms that we would miss.
      *
      * keepAtoms() will only change on the main thread, which we are currently
      * on. If the value of keepAtoms() changes between GC slices, then we'll
      * cancel the incremental GC. See IsIncrementalGCSafe.
      */
     if (isFull && !rt->keepAtoms()) {
-        Zone* atomsZone = rt->atomsCompartment()->zone();
+        Zone* atomsZone = rt->atomsCompartment(lock)->zone();
         if (atomsZone->isGCScheduled()) {
             MOZ_ASSERT(!atomsZone->isCollecting());
             atomsZone->setGCState(Zone::Mark);
             any = true;
         }
     }
 
     /* Check that at least one zone is scheduled for collection. */
@@ -4119,17 +4118,17 @@ GCRuntime::beginMarkPhase(JS::gcreason::
      * GC will be broken, as follows. If some object is reachable only through
      * some cache (say the dtoaCache) then it will not be part of the snapshot.
      * If we purge after root marking, then the mutator could obtain a pointer
      * to the object and start using it. This object might never be marked, so
      * a GC hazard would exist.
      */
     {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_PURGE);
-        purgeRuntime();
+        purgeRuntime(lock);
     }
 
     /*
      * Mark phase.
      */
     gcstats::AutoPhase ap1(stats, gcstats::PHASE_MARK);
 
     {
@@ -4141,20 +4140,20 @@ GCRuntime::beginMarkPhase(JS::gcreason::
         }
 
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             /* Unmark all weak maps in the zones being collected. */
             WeakMapBase::unmarkZone(zone);
         }
 
         if (isFull)
-            UnmarkScriptData(rt);
-    }
-
-    markRuntime(gcmarker, MarkRuntime);
+            UnmarkScriptData(rt, lock);
+    }
+
+    markRuntime(gcmarker, MarkRuntime, lock);
 
     gcstats::AutoPhase ap2(stats, gcstats::PHASE_MARK_ROOTS);
 
     if (isIncremental) {
         gcstats::AutoPhase ap3(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS);
         bufferGrayRoots();
     }
 
@@ -4317,17 +4316,17 @@ struct GCChunkHasher {
     }
 };
 
 class js::gc::MarkingValidator
 {
   public:
     explicit MarkingValidator(GCRuntime* gc);
     ~MarkingValidator();
-    void nonIncrementalMark();
+    void nonIncrementalMark(AutoLockForExclusiveAccess& lock);
     void validate();
 
   private:
     GCRuntime* gc;
     bool initialized;
 
     typedef HashMap<Chunk*, ChunkBitmap*, GCChunkHasher, SystemAllocPolicy> BitmapMap;
     BitmapMap map;
@@ -4343,17 +4342,17 @@ js::gc::MarkingValidator::~MarkingValida
     if (!map.initialized())
         return;
 
     for (BitmapMap::Range r(map.all()); !r.empty(); r.popFront())
         js_delete(r.front().value());
 }
 
 void
-js::gc::MarkingValidator::nonIncrementalMark()
+js::gc::MarkingValidator::nonIncrementalMark(AutoLockForExclusiveAccess& lock)
 {
     /*
      * Perform a non-incremental mark for all collecting zones and record
      * the results for later comparison.
      *
      * Currently this does not validate gray marking.
      */
 
@@ -4429,17 +4428,17 @@ js::gc::MarkingValidator::nonIncremental
 
             MOZ_ASSERT(gcmarker->isDrained());
             gcmarker->reset();
 
             for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
                 chunk->bitmap.clear();
         }
 
-        gc->markRuntime(gcmarker, GCRuntime::MarkRuntime);
+        gc->markRuntime(gcmarker, GCRuntime::MarkRuntime, lock);
 
         gc->incrementalState = MARK;
         auto unlimited = SliceBudget::unlimited();
         MOZ_RELEASE_ASSERT(gc->marker.drainMarkStack(unlimited));
     }
 
     gc->incrementalState = SWEEP;
     {
@@ -4548,24 +4547,24 @@ js::gc::MarkingValidator::validate()
             }
         }
     }
 }
 
 #endif // JS_GC_ZEAL
 
 void
-GCRuntime::computeNonIncrementalMarkingForValidation()
+GCRuntime::computeNonIncrementalMarkingForValidation(AutoLockForExclusiveAccess& lock)
 {
 #ifdef JS_GC_ZEAL
     MOZ_ASSERT(!markingValidator);
     if (isIncremental && hasZealMode(ZealMode::IncrementalMarkingValidator))
         markingValidator = js_new<MarkingValidator>(this);
     if (markingValidator)
-        markingValidator->nonIncrementalMark();
+        markingValidator->nonIncrementalMark(lock);
 #endif
 }
 
 void
 GCRuntime::validateIncrementalMarking()
 {
 #ifdef JS_GC_ZEAL
     if (markingValidator)
@@ -4610,19 +4609,19 @@ DropStringWrappers(JSRuntime* rt)
  * any strongly-connected component of this graph must be swept in the same
  * slice.
  *
  * Tarjan's algorithm is used to calculate the components.
  */
 namespace {
 struct AddOutgoingEdgeFunctor {
     bool needsEdge_;
-    ComponentFinder<JS::Zone>& finder_;
-
-    AddOutgoingEdgeFunctor(bool needsEdge, ComponentFinder<JS::Zone>& finder)
+    ZoneComponentFinder& finder_;
+
+    AddOutgoingEdgeFunctor(bool needsEdge, ZoneComponentFinder& finder)
       : needsEdge_(needsEdge), finder_(finder)
     {}
 
     using ReturnType = void;
     template <typename T>
     ReturnType operator()(T tp) {
         TenuredCell& other = (*tp)->asTenured();
 
@@ -4636,40 +4635,41 @@ struct AddOutgoingEdgeFunctor {
             if (zone->isGCMarking())
                 finder_.addEdgeTo(zone);
         }
     }
 };
 } // namespace (anonymous)
 
 void
-JSCompartment::findOutgoingEdges(ComponentFinder<JS::Zone>& finder)
+JSCompartment::findOutgoingEdges(ZoneComponentFinder& finder)
 {
     for (js::WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
         CrossCompartmentKey& key = e.front().mutableKey();
         MOZ_ASSERT(!key.is<JSString*>());
         bool needsEdge = true;
         if (key.is<JSObject*>()) {
             TenuredCell& other = key.as<JSObject*>()->asTenured();
             needsEdge = !other.isMarked(BLACK) || other.isMarked(GRAY);
         }
         key.applyToWrapped(AddOutgoingEdgeFunctor(needsEdge, finder));
     }
 }
 
 void
-Zone::findOutgoingEdges(ComponentFinder<JS::Zone>& finder)
+Zone::findOutgoingEdges(ZoneComponentFinder& finder)
 {
     /*
      * Any compartment may have a pointer to an atom in the atoms
      * compartment, and these aren't in the cross compartment map.
      */
     JSRuntime* rt = runtimeFromMainThread();
-    if (rt->atomsCompartment()->zone()->isGCMarking())
-        finder.addEdgeTo(rt->atomsCompartment()->zone());
+    Zone* atomsZone = rt->atomsCompartment(finder.lock)->zone();
+    if (atomsZone->isGCMarking())
+        finder.addEdgeTo(atomsZone);
 
     for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next())
         comp->findOutgoingEdges(finder);
 
     for (ZoneSet::Range r = gcZoneGroupEdges.all(); !r.empty(); r.popFront()) {
         if (r.front()->isGCMarking())
             finder.addEdgeTo(r.front());
     }
@@ -4695,19 +4695,19 @@ GCRuntime::findZoneEdgesForWeakMaps()
         if (!WeakMapBase::findInterZoneEdges(zone))
             return false;
     }
 
     return true;
 }
 
 void
-GCRuntime::findZoneGroups()
-{
-    ComponentFinder<Zone> finder(rt->mainThread.nativeStackLimit[StackForSystemCode]);
+GCRuntime::findZoneGroups(AutoLockForExclusiveAccess& lock)
+{
+    ZoneComponentFinder finder(rt->mainThread.nativeStackLimit[StackForSystemCode], lock);
     if (!isIncremental || !findZoneEdgesForWeakMaps())
         finder.useOneComponent();
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         MOZ_ASSERT(zone->isGCMarking());
         finder.addNode(zone);
     }
     zoneGroups = finder.getResultsList();
@@ -4736,17 +4736,17 @@ GCRuntime::getNextZoneGroup()
     }
 
     for (Zone* zone = currentZoneGroup; zone; zone = zone->nextNodeInGroup()) {
         MOZ_ASSERT(zone->isGCMarking());
         MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
     }
 
     if (!isIncremental)
-        ComponentFinder<Zone>::mergeGroups(currentZoneGroup);
+        ZoneComponentFinder::mergeGroups(currentZoneGroup);
 
     if (abortSweepAfterCurrentGroup) {
         MOZ_ASSERT(!isIncremental);
         for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
             MOZ_ASSERT(!zone->gcNextGraphComponent);
             MOZ_ASSERT(zone->isGCMarking());
             zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
             zone->setGCState(Zone::NoGC);
@@ -5155,17 +5155,17 @@ GCRuntime::startTask(GCParallelTask& tas
 void
 GCRuntime::joinTask(GCParallelTask& task, gcstats::Phase phase)
 {
     gcstats::AutoPhase ap(stats, task, phase);
     task.joinWithLockHeld();
 }
 
 void
-GCRuntime::beginSweepingZoneGroup()
+GCRuntime::beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock)
 {
     /*
      * Begin sweeping the group of zones in gcCurrentZoneGroup,
      * performing actions that must be done before yielding to caller.
      */
 
     bool sweepingAtoms = false;
     for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
@@ -5306,17 +5306,17 @@ GCRuntime::beginSweepingZoneGroup()
             gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_BREAKPOINT);
             for (GCZoneGroupIter zone(rt); !zone.done(); zone.next())
                 zone->sweepUniqueIds(&fop);
         }
     }
 
     if (sweepingAtoms) {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP_SYMBOL_REGISTRY);
-        rt->symbolRegistry().sweep();
+        rt->symbolRegistry(lock).sweep();
     }
 
     // Rejoin our off-main-thread tasks.
     if (sweepingAtoms) {
         AutoLockHelperThreadState helperLock;
         joinTask(sweepAtomsTask, gcstats::PHASE_SWEEP_ATOMS);
     }
 
@@ -5398,47 +5398,47 @@ GCRuntime::endSweepingZoneGroup()
     /* Reset the list of arenas marked as being allocated during sweep phase. */
     while (Arena* arena = arenasAllocatedDuringSweep) {
         arenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
         arena->unsetAllocDuringSweep();
     }
 }
 
 void
-GCRuntime::beginSweepPhase(bool destroyingRuntime)
+GCRuntime::beginSweepPhase(bool destroyingRuntime, AutoLockForExclusiveAccess& lock)
 {
     /*
      * Sweep phase.
      *
      * Finalize as we sweep, outside of lock but with rt->isHeapBusy()
      * true so that any attempt to allocate a GC-thing from a finalizer will
      * fail, rather than nest badly and leave the unmarked newborn to be swept.
      */
 
     MOZ_ASSERT(!abortSweepAfterCurrentGroup);
 
     AutoSetThreadIsSweeping threadIsSweeping;
 
     releaseHeldRelocatedArenas();
 
-    computeNonIncrementalMarkingForValidation();
+    computeNonIncrementalMarkingForValidation(lock);
 
     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
 
     sweepOnBackgroundThread =
         !destroyingRuntime && !TraceEnabled() && CanUseExtraThreads();
 
     releaseObservedTypes = shouldReleaseObservedTypes();
 
     AssertNoWrappersInGrayList(rt);
     DropStringWrappers(rt);
 
-    findZoneGroups();
+    findZoneGroups(lock);
     endMarkingZoneGroup();
-    beginSweepingZoneGroup();
+    beginSweepingZoneGroup(lock);
 }
 
 bool
 ArenaLists::foregroundFinalize(FreeOp* fop, AllocKind thingKind, SliceBudget& sliceBudget,
                                SortedArenaList& sweepList)
 {
     if (!arenaListsToSweep[thingKind] && incrementalSweptArenas.isEmpty())
         return true;
@@ -5503,17 +5503,17 @@ SweepArenaList(Arena** arenasToSweep, Sl
         if (sliceBudget.isOverBudget())
             return false;
     }
 
     return true;
 }
 
 GCRuntime::IncrementalProgress
-GCRuntime::sweepPhase(SliceBudget& sliceBudget)
+GCRuntime::sweepPhase(SliceBudget& sliceBudget, AutoLockForExclusiveAccess& lock)
 {
     AutoSetThreadIsSweeping threadIsSweeping;
 
     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
     FreeOp fop(rt);
 
     if (drainMarkStack(sliceBudget, gcstats::PHASE_SWEEP_MARK) == NotFinished)
         return NotFinished;
@@ -5607,22 +5607,22 @@ GCRuntime::sweepPhase(SliceBudget& slice
         }
 
         endSweepingZoneGroup();
         getNextZoneGroup();
         if (!currentZoneGroup)
             return Finished;
 
         endMarkingZoneGroup();
-        beginSweepingZoneGroup();
-    }
-}
-
-void
-GCRuntime::endSweepPhase(bool destroyingRuntime)
+        beginSweepingZoneGroup(lock);
+    }
+}
+
+void
+GCRuntime::endSweepPhase(bool destroyingRuntime, AutoLockForExclusiveAccess& lock)
 {
     AutoSetThreadIsSweeping threadIsSweeping;
 
     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
     FreeOp fop(rt);
 
     MOZ_ASSERT_IF(destroyingRuntime, !sweepOnBackgroundThread);
 
@@ -5657,17 +5657,17 @@ GCRuntime::endSweepPhase(bool destroying
 
         /*
          * Sweep script filenames after sweeping functions in the generic loop
          * above. In this way when a scripted function's finalizer destroys the
          * script and calls rt->destroyScriptHook, the hook can still access the
          * script's filename. See bug 323267.
          */
         if (isFull)
-            SweepScriptData(rt);
+            SweepScriptData(rt, lock);
 
         /* Clear out any small pools that we're hanging on to. */
         if (jit::JitRuntime* jitRuntime = rt->jitRuntime()) {
             jitRuntime->execAlloc().purge();
             jitRuntime->backedgeExecAlloc().purge();
         }
     }
 
@@ -5726,31 +5726,32 @@ GCRuntime::beginCompactPhase()
             zonesToMaybeCompact.append(zone);
     }
 
     MOZ_ASSERT(!relocatedArenasToRelease);
     startedCompacting = true;
 }
 
 GCRuntime::IncrementalProgress
-GCRuntime::compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget)
+GCRuntime::compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
+                        AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(rt->gc.nursery.isEmpty());
     assertBackgroundSweepingFinished();
     MOZ_ASSERT(startedCompacting);
 
     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
 
     while (!zonesToMaybeCompact.isEmpty()) {
         Zone* zone = zonesToMaybeCompact.front();
         MOZ_ASSERT(zone->isGCFinished());
         Arena* relocatedArenas = nullptr;
         if (relocateArenas(zone, reason, relocatedArenas, sliceBudget)) {
             zone->setGCState(Zone::Compact);
-            updatePointersToRelocatedCells(zone);
+            updatePointersToRelocatedCells(zone, lock);
             zone->setGCState(Zone::Finished);
         }
         if (ShouldProtectRelocatedArenas(reason))
             protectAndHoldArenas(relocatedArenas);
         else
             releaseRelocatedArenas(relocatedArenas);
         zonesToMaybeCompact.removeFront();
         if (sliceBudget.isOverBudget())
@@ -5761,17 +5762,17 @@ GCRuntime::compactPhase(JS::gcreason::Re
     rt->newObjectCache.purge();
     rt->nativeIterCache.purge();
 
 #ifdef DEBUG
     CheckHashTablesAfterMovingGC(rt);
 #endif
 #ifdef JS_GC_ZEAL
     if (rt->hasZealMode(ZealMode::CheckHeapOnMovingGC))
-        CheckHeapAfterMovingGC(rt);
+        CheckHeapAfterMovingGC(rt, lock);
 #endif
 
     return zonesToMaybeCompact.isEmpty() ? Finished : NotFinished;
 }
 
 void
 GCRuntime::endCompactPhase(JS::gcreason::Reason reason)
 {
@@ -5875,17 +5876,17 @@ AutoTraceSession::~AutoTraceSession()
         // Notify any helper threads waiting for the trace session to end.
         HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER);
     } else {
         runtime->heapState_ = prevState;
     }
 }
 
 void
-GCRuntime::resetIncrementalGC(const char* reason)
+GCRuntime::resetIncrementalGC(const char* reason, AutoLockForExclusiveAccess& lock)
 {
     switch (incrementalState) {
       case NO_INCREMENTAL:
         return;
 
       case MARK: {
         /* Cancel any ongoing marking. */
         marker.reset();
@@ -5919,17 +5920,17 @@ GCRuntime::resetIncrementalGC(const char
         /* Finish sweeping the current zone group, then abort. */
         abortSweepAfterCurrentGroup = true;
 
         /* Don't perform any compaction after sweeping. */
         bool wasCompacting = isCompacting;
         isCompacting = false;
 
         auto unlimited = SliceBudget::unlimited();
-        incrementalCollectSlice(unlimited, JS::gcreason::RESET);
+        incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
 
         isCompacting = wasCompacting;
 
         {
             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
             rt->gc.waitBackgroundSweepOrAllocEnd();
         }
         break;
@@ -5940,32 +5941,32 @@ GCRuntime::resetIncrementalGC(const char
             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
             rt->gc.waitBackgroundSweepOrAllocEnd();
         }
 
         bool wasCompacting = isCompacting;
         isCompacting = false;
 
         auto unlimited = SliceBudget::unlimited();
-        incrementalCollectSlice(unlimited, JS::gcreason::RESET);
+        incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
 
         isCompacting = wasCompacting;
 
         break;
       }
 
       case COMPACT: {
         bool wasCompacting = isCompacting;
 
         isCompacting = true;
         startedCompacting = true;
         zonesToMaybeCompact.clear();
 
         auto unlimited = SliceBudget::unlimited();
-        incrementalCollectSlice(unlimited, JS::gcreason::RESET);
+        incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
 
         isCompacting = wasCompacting;
         break;
       }
 
       default:
         MOZ_CRASH("Invalid incremental GC state");
     }
@@ -6059,20 +6060,19 @@ ShouldCleanUpEverything(JS::gcreason::Re
 {
     // During shutdown, we must clean everything up, for the sake of leak
     // detection. When a runtime has no contexts, or we're doing a GC before a
     // shutdown CC, those are strong indications that we're shutting down.
     return IsShutdownGC(reason) || gckind == GC_SHRINK;
 }
 
 void
-GCRuntime::incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason)
-{
-    MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
-
+GCRuntime::incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
+                                   AutoLockForExclusiveAccess& lock)
+{
     AutoGCSlice slice(rt);
 
     bool destroyingRuntime = (reason == JS::gcreason::DESTROY_RUNTIME);
 
     gc::State initialState = incrementalState;
 
     bool useZeal = false;
 #ifdef JS_GC_ZEAL
@@ -6106,17 +6106,17 @@ GCRuntime::incrementalCollectSlice(Slice
         isCompacting = shouldCompact();
         lastMarkSlice = false;
 
         incrementalState = MARK_ROOTS;
 
         MOZ_FALLTHROUGH;
 
       case MARK_ROOTS:
-        if (!beginMarkPhase(reason)) {
+        if (!beginMarkPhase(reason, lock)) {
             incrementalState = NO_INCREMENTAL;
             return;
         }
 
         if (!destroyingRuntime)
             pushZealSelectedObjects();
 
         incrementalState = MARK;
@@ -6154,34 +6154,34 @@ GCRuntime::incrementalCollectSlice(Slice
         }
 
         incrementalState = SWEEP;
 
         /*
          * This runs to completion, but we don't continue if the budget is
          * now exhasted.
          */
-        beginSweepPhase(destroyingRuntime);
+        beginSweepPhase(destroyingRuntime, lock);
         if (budget.isOverBudget())
             break;
 
         /*
          * Always yield here when running in incremental multi-slice zeal
          * mode, so RunDebugGC can reset the slice buget.
          */
         if (isIncremental && useZeal && hasZealMode(ZealMode::IncrementalMultipleSlices))
             break;
 
         MOZ_FALLTHROUGH;
 
       case SWEEP:
-        if (sweepPhase(budget) == NotFinished)
+        if (sweepPhase(budget, lock) == NotFinished)
             break;
 
-        endSweepPhase(destroyingRuntime);
+        endSweepPhase(destroyingRuntime, lock);
 
         incrementalState = FINALIZE;
 
         /* Yield before compacting since it is not incremental. */
         if (isCompacting && isIncremental)
             break;
 
         MOZ_FALLTHROUGH;
@@ -6216,17 +6216,17 @@ GCRuntime::incrementalCollectSlice(Slice
 
         MOZ_FALLTHROUGH;
 
       case COMPACT:
         if (isCompacting) {
             if (!startedCompacting)
                 beginCompactPhase();
 
-            if (compactPhase(reason, budget) == NotFinished)
+            if (compactPhase(reason, budget, lock) == NotFinished)
                 break;
 
             endCompactPhase(reason);
         }
 
         finishCollection(reason);
 
         incrementalState = NO_INCREMENTAL;
@@ -6247,28 +6247,28 @@ gc::IsIncrementalGCSafe(JSRuntime* rt)
 
     if (!rt->gc.isIncrementalGCAllowed())
         return IncrementalSafety::Unsafe("incremental permanently disabled");
 
     return IncrementalSafety::Safe();
 }
 
 void
-GCRuntime::budgetIncrementalGC(SliceBudget& budget)
+GCRuntime::budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock)
 {
     IncrementalSafety safe = IsIncrementalGCSafe(rt);
     if (!safe) {
-        resetIncrementalGC(safe.reason());
+        resetIncrementalGC(safe.reason(), lock);
         budget.makeUnlimited();
         stats.nonincremental(safe.reason());
         return;
     }
 
     if (mode != JSGC_MODE_INCREMENTAL) {
-        resetIncrementalGC("GC mode change");
+        resetIncrementalGC("GC mode change", lock);
         budget.makeUnlimited();
         stats.nonincremental("GC mode");
         return;
     }
 
     if (isTooMuchMalloc()) {
         budget.makeUnlimited();
         stats.nonincremental("malloc bytes trigger");
@@ -6286,17 +6286,17 @@ GCRuntime::budgetIncrementalGC(SliceBudg
 
         if (zone->isTooMuchMalloc()) {
             budget.makeUnlimited();
             stats.nonincremental("malloc bytes trigger");
         }
     }
 
     if (reset)
-        resetIncrementalGC("zone change");
+        resetIncrementalGC("zone change", lock);
 }
 
 namespace {
 
 class AutoScheduleZonesForGC
 {
     JSRuntime* rt_;
 
@@ -6377,31 +6377,31 @@ GCRuntime::gcCycle(bool nonincrementalBy
     State prevState = incrementalState;
 
     if (nonincrementalByAPI) {
         // Reset any in progress incremental GC if this was triggered via the
         // API. This isn't required for correctness, but sometimes during tests
         // the caller expects this GC to collect certain objects, and we need
         // to make sure to collect everything possible.
         if (reason != JS::gcreason::ALLOC_TRIGGER)
-            resetIncrementalGC("requested");
+            resetIncrementalGC("requested", session.lock);
 
         stats.nonincremental("requested");
         budget.makeUnlimited();
     } else {
-        budgetIncrementalGC(budget);
+        budgetIncrementalGC(budget, session.lock);
     }
 
     /* The GC was reset, so we need a do-over. */
     if (prevState != NO_INCREMENTAL && !isIncrementalGCInProgress())
         return true;
 
     TraceMajorGCStart();
 
-    incrementalCollectSlice(budget, reason);
+    incrementalCollectSlice(budget, reason, session.lock);
 
 #ifndef JS_MORE_DETERMINISTIC
     nextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
 #endif
 
     chunkAllocationSinceLastGC = false;
 
 #ifdef JS_GC_ZEAL
@@ -6612,17 +6612,17 @@ GCRuntime::abortGC()
 
     gcstats::AutoGCSlice agc(stats, scanZonesBeforeGC(), invocationKind,
                              SliceBudget::unlimited(), JS::gcreason::ABORT_GC);
 
     evictNursery(JS::gcreason::ABORT_GC);
     AutoTraceSession session(rt, JS::HeapState::MajorCollecting);
 
     number++;
-    resetIncrementalGC("abort");
+    resetIncrementalGC("abort", session.lock);
 }
 
 void
 GCRuntime::notifyDidPaint()
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
 #ifdef JS_GC_ZEAL
@@ -6816,17 +6816,17 @@ void js::gc::FinishGC(JSRuntime* rt)
     }
 
     rt->gc.nursery.waitBackgroundFreeEnd();
 }
 
 AutoPrepareForTracing::AutoPrepareForTracing(JSRuntime* rt, ZoneSelector selector)
 {
     js::gc::FinishGC(rt);
-    session.emplace(rt);
+    session_.emplace(rt);
 }
 
 JSCompartment*
 js::NewCompartment(JSContext* cx, Zone* zone, JSPrincipals* principals,
                    const JS::CompartmentOptions& options)
 {
     JSRuntime* rt = cx->runtime();
     JS_AbortIfWrongThread(rt);
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -2469,22 +2469,22 @@ SaveSharedScriptData(ExclusiveContext* c
 {
     MOZ_ASSERT(script != nullptr);
     MOZ_ASSERT(ssd != nullptr);
 
     AutoLockForExclusiveAccess lock(cx);
 
     ScriptBytecodeHasher::Lookup l(ssd);
 
-    ScriptDataTable::AddPtr p = cx->scriptDataTable().lookupForAdd(l);
+    ScriptDataTable::AddPtr p = cx->scriptDataTable(lock).lookupForAdd(l);
     if (p) {
         js_free(ssd);
         ssd = *p;
     } else {
-        if (!cx->scriptDataTable().add(p, ssd)) {
+        if (!cx->scriptDataTable(lock).add(p, ssd)) {
             script->setCode(nullptr);
             script->atoms = nullptr;
             js_free(ssd);
             ReportOutOfMemory(cx);
             return false;
         }
     }
 
@@ -2513,48 +2513,48 @@ MarkScriptData(JSRuntime* rt, const jsby
      * a GC. Since SweepScriptBytecodes is only called during a full gc,
      * to preserve this invariant, only mark during a full gc.
      */
     if (rt->gc.isFullGc())
         SharedScriptData::fromBytecode(bytecode)->marked = true;
 }
 
 void
-js::UnmarkScriptData(JSRuntime* rt)
+js::UnmarkScriptData(JSRuntime* rt, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(rt->gc.isFullGc());
-    ScriptDataTable& table = rt->scriptDataTable();
+    ScriptDataTable& table = rt->scriptDataTable(lock);
     for (ScriptDataTable::Enum e(table); !e.empty(); e.popFront()) {
         SharedScriptData* entry = e.front();
         entry->marked = false;
     }
 }
 
 void
-js::SweepScriptData(JSRuntime* rt)
+js::SweepScriptData(JSRuntime* rt, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(rt->gc.isFullGc());
-    ScriptDataTable& table = rt->scriptDataTable();
+    ScriptDataTable& table = rt->scriptDataTable(lock);
 
     if (rt->keepAtoms())
         return;
 
     for (ScriptDataTable::Enum e(table); !e.empty(); e.popFront()) {
         SharedScriptData* entry = e.front();
         if (!entry->marked) {
             js_free(entry);
             e.removeFront();
         }
     }
 }
 
 void
-js::FreeScriptData(JSRuntime* rt)
+js::FreeScriptData(JSRuntime* rt, AutoLockForExclusiveAccess& lock)
 {
-    ScriptDataTable& table = rt->scriptDataTable();
+    ScriptDataTable& table = rt->scriptDataTable(lock);
     if (!table.initialized())
         return;
 
     for (ScriptDataTable::Enum e(table); !e.empty(); e.popFront())
         js_free(e.front());
 
     table.clear();
 }
--- a/js/src/jsscript.h
+++ b/js/src/jsscript.h
@@ -2454,23 +2454,23 @@ struct ScriptBytecodeHasher
     }
 };
 
 typedef HashSet<SharedScriptData*,
                 ScriptBytecodeHasher,
                 SystemAllocPolicy> ScriptDataTable;
 
 extern void
-UnmarkScriptData(JSRuntime* rt);
+UnmarkScriptData(JSRuntime* rt, AutoLockForExclusiveAccess& lock);
 
 extern void
-SweepScriptData(JSRuntime* rt);
+SweepScriptData(JSRuntime* rt, AutoLockForExclusiveAccess& lock);
 
 extern void
-FreeScriptData(JSRuntime* rt);
+FreeScriptData(JSRuntime* rt, AutoLockForExclusiveAccess& lock);
 
 struct ScriptAndCounts
 {
     /* This structure is stored and marked from the JSRuntime. */
     JSScript* script;
     ScriptCounts scriptCounts;
 
     inline explicit ScriptAndCounts(JSScript* script);
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -2883,17 +2883,17 @@ Debugger::detachAllDebuggersFromGlobal(F
 {
     const GlobalObject::DebuggerVector* debuggers = global->getDebuggers();
     MOZ_ASSERT(!debuggers->empty());
     while (!debuggers->empty())
         debuggers->back()->removeDebuggeeGlobal(fop, global, nullptr);
 }
 
 /* static */ void
-Debugger::findZoneEdges(Zone* zone, js::gc::ComponentFinder<Zone>& finder)
+Debugger::findZoneEdges(Zone* zone, js::gc::ZoneComponentFinder& finder)
 {
     /*
      * For debugger cross compartment wrappers, add edges in the opposite
      * direction to those already added by JSCompartment::findOutgoingEdges.
      * This ensure that debuggers and their debuggees are finalized in the same
      * group.
      */
     for (Debugger* dbg : zone->runtimeFromMainThread()->debuggerList) {
--- a/js/src/vm/Debugger.h
+++ b/js/src/vm/Debugger.h
@@ -782,17 +782,17 @@ class Debugger : private mozilla::Linked
      * Debugger objects that are definitely live but not yet marked, it marks
      * them and returns true. If not, it returns false.
      */
     static void markIncomingCrossCompartmentEdges(JSTracer* tracer);
     static bool markAllIteratively(GCMarker* trc);
     static void markAll(JSTracer* trc);
     static void sweepAll(FreeOp* fop);
     static void detachAllDebuggersFromGlobal(FreeOp* fop, GlobalObject* global);
-    static void findZoneEdges(JS::Zone* v, gc::ComponentFinder<JS::Zone>& finder);
+    static void findZoneEdges(JS::Zone* v, gc::ZoneComponentFinder& finder);
 
     // Checks it the current compartment is allowed to execute code.
     static inline bool checkNoExecute(JSContext* cx, HandleScript script);
 
     /*
      * JSTrapStatus Overview
      * ---------------------
      *
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -429,27 +429,24 @@ JSRuntime::~JSRuntime()
     /*
      * Clear the self-hosted global and delete self-hosted classes *after*
      * GC, as finalizers for objects check for clasp->finalize during GC.
      */
     finishSelfHosting();
 
     MOZ_ASSERT(!exclusiveAccessOwner);
 
-    // Avoid bogus asserts during teardown.
     MOZ_ASSERT(!numExclusiveThreads);
-#ifdef DEBUG
-    mainThreadHasExclusiveAccess = true;
-#endif
+    AutoLockForExclusiveAccess lock(this);
 
     /*
      * Even though all objects in the compartment are dead, we may have keep
      * some filenames around because of gcKeepAtoms.
      */
-    FreeScriptData(this);
+    FreeScriptData(this, lock);
 
 #ifdef DEBUG
     /* Don't hurt everyone in leaky ol' Mozilla with a fatal MOZ_ASSERT! */
     if (hasContexts()) {
         unsigned cxcount = 0;
         for (ContextIter acx(this); !acx.done(); acx.next()) {
             fprintf(stderr,
 "JS API usage error: found live context at %p\n",
@@ -524,17 +521,17 @@ NewObjectCache::clearNurseryObjects(JSRu
 void
 JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::RuntimeSizes* rtSizes)
 {
     // Several tables in the runtime enumerated below can be used off thread.
     AutoLockForExclusiveAccess lock(this);
 
     rtSizes->object += mallocSizeOf(this);
 
-    rtSizes->atomsTable += atoms().sizeOfIncludingThis(mallocSizeOf);
+    rtSizes->atomsTable += atoms(lock).sizeOfIncludingThis(mallocSizeOf);
 
     if (!parentRuntime) {
         rtSizes->atomsTable += mallocSizeOf(staticStrings);
         rtSizes->atomsTable += mallocSizeOf(commonNames);
         rtSizes->atomsTable += permanentAtoms->sizeOfIncludingThis(mallocSizeOf);
     }
 
     for (ContextIter acx(this); !acx.done(); acx.next())
@@ -548,18 +545,18 @@ JSRuntime::addSizeOfIncludingThis(mozill
 
     if (sharedImmutableStrings_) {
         rtSizes->sharedImmutableStringsCache +=
             sharedImmutableStrings_->sizeOfExcludingThis(mallocSizeOf);
     }
 
     rtSizes->uncompressedSourceCache += uncompressedSourceCache.sizeOfExcludingThis(mallocSizeOf);
 
-    rtSizes->scriptData += scriptDataTable().sizeOfExcludingThis(mallocSizeOf);
-    for (ScriptDataTable::Range r = scriptDataTable().all(); !r.empty(); r.popFront())
+    rtSizes->scriptData += scriptDataTable(lock).sizeOfExcludingThis(mallocSizeOf);
+    for (ScriptDataTable::Range r = scriptDataTable(lock).all(); !r.empty(); r.popFront())
         rtSizes->scriptData += mallocSizeOf(r.front());
 
     if (jitRuntime_) {
         jitRuntime_->execAlloc().addSizeOfCode(&rtSizes->code);
         jitRuntime_->backedgeExecAlloc().addSizeOfCode(&rtSizes->code);
     }
 
     rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf);
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -574,18 +574,18 @@ class PerThreadData : public PerThreadDa
 
     bool init();
 
     bool associatedWith(const JSRuntime* rt) { return runtime_ == rt; }
     inline JSRuntime* runtimeFromMainThread();
     inline JSRuntime* runtimeIfOnOwnerThread();
 
     inline bool exclusiveThreadsPresent();
-    inline void addActiveCompilation();
-    inline void removeActiveCompilation();
+    inline void addActiveCompilation(AutoLockForExclusiveAccess& lock);
+    inline void removeActiveCompilation(AutoLockForExclusiveAccess& lock);
 
     // For threads which may be associated with different runtimes, depending
     // on the work they are doing.
     class MOZ_STACK_CLASS AutoEnterRuntime
     {
         PerThreadData* pt;
 
       public:
@@ -1329,28 +1329,28 @@ struct JSRuntime : public JS::shadow::Ru
 
     // Pool of maps used during parse/emit. This may be modified by threads
     // with an ExclusiveContext and requires a lock. Active compilations
     // prevent the pool from being purged during GCs.
   private:
     js::frontend::ParseMapPool parseMapPool_;
     unsigned activeCompilations_;
   public:
-    js::frontend::ParseMapPool& parseMapPool() {
+    js::frontend::ParseMapPool& parseMapPool(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         return parseMapPool_;
     }
     bool hasActiveCompilations() {
         return activeCompilations_ != 0;
     }
-    void addActiveCompilation() {
+    void addActiveCompilation(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         activeCompilations_++;
     }
-    void removeActiveCompilation() {
+    void removeActiveCompilation(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         activeCompilations_--;
     }
 
     // Count of AutoKeepAtoms instances on the main thread's stack. When any
     // instances exist, atoms in the runtime will not be collected. Threads
     // with an ExclusiveContext do not increment this value, but the presence
     // of any such threads also inhibits collection of atoms. We don't scan the
@@ -1400,35 +1400,35 @@ struct JSRuntime : public JS::shadow::Ru
     js::SymbolRegistry symbolRegistry_;
 
   public:
     bool initializeAtoms(JSContext* cx);
     void finishAtoms();
 
     void sweepAtoms();
 
-    js::AtomSet& atoms() {
+    js::AtomSet& atoms(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         return *atoms_;
     }
-    JSCompartment* atomsCompartment() {
+    JSCompartment* atomsCompartment(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         return atomsCompartment_;
     }
 
     bool isAtomsCompartment(JSCompartment* comp) {
         return comp == atomsCompartment_;
     }
 
     // The atoms compartment is the only one in its zone.
     inline bool isAtomsZone(const JS::Zone* zone) const;
 
     bool activeGCInAtomsZone();
 
-    js::SymbolRegistry& symbolRegistry() {
+    js::SymbolRegistry& symbolRegistry(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         return symbolRegistry_;
     }
 
     // Permanent atoms are fixed during initialization of the runtime and are
     // not modified or collected until the runtime is destroyed. These may be
     // shared with another, longer living runtime through |parentRuntime| and
     // can be freely accessed with no locking necessary.
@@ -1454,17 +1454,17 @@ struct JSRuntime : public JS::shadow::Ru
     js::PreserveWrapperCallback            preserveWrapperCallback;
 
     // Table of bytecode and other data that may be shared across scripts
     // within the runtime. This may be modified by threads with an
     // ExclusiveContext and requires a lock.
   private:
     js::ScriptDataTable scriptDataTable_;
   public:
-    js::ScriptDataTable& scriptDataTable() {
+    js::ScriptDataTable& scriptDataTable(js::AutoLockForExclusiveAccess& lock) {
         MOZ_ASSERT(currentThreadHasExclusiveAccess());
         return scriptDataTable_;
     }
 
     bool                jitSupportsFloatingPoint;
     bool                jitSupportsSimd;
 
     // Cache for jit::GetPcScript().
@@ -1828,28 +1828,28 @@ PerThreadData::runtimeIfOnOwnerThread()
 
 inline bool
 PerThreadData::exclusiveThreadsPresent()
 {
     return runtime_->exclusiveThreadsPresent();
 }
 
 inline void
-PerThreadData::addActiveCompilation()
+PerThreadData::addActiveCompilation(AutoLockForExclusiveAccess& lock)
 {
     activeCompilations++;
-    runtime_->addActiveCompilation();
+    runtime_->addActiveCompilation(lock);
 }
 
 inline void
-PerThreadData::removeActiveCompilation()
+PerThreadData::removeActiveCompilation(AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(activeCompilations);
     activeCompilations--;
-    runtime_->removeActiveCompilation();
+    runtime_->removeActiveCompilation(lock);
 }
 
 /************************************************************************/
 
 static MOZ_ALWAYS_INLINE void
 MakeRangeGCSafe(Value* vec, size_t len)
 {
     mozilla::PodZero(vec, len);
--- a/js/src/vm/String.cpp
+++ b/js/src/vm/String.cpp
@@ -774,17 +774,17 @@ const StaticStrings::SmallChar StaticStr
 #undef R4
 #undef R6
 #undef R7
 
 bool
 StaticStrings::init(JSContext* cx)
 {
     AutoLockForExclusiveAccess lock(cx);
-    AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
+    AutoCompartment ac(cx, cx->runtime()->atomsCompartment(lock));
 
     static_assert(UNIT_STATIC_LIMIT - 1 <= JSString::MAX_LATIN1_CHAR,
                   "Unit strings must fit in Latin1Char.");
 
     for (uint32_t i = 0; i < UNIT_STATIC_LIMIT; i++) {
         Latin1Char buffer[] = { Latin1Char(i), '\0' };
         JSFlatString* s = NewStringCopyN<NoGC>(cx, buffer, 1);
         if (!s)
--- a/js/src/vm/Symbol.cpp
+++ b/js/src/vm/Symbol.cpp
@@ -15,20 +15,20 @@
 #include "vm/StringBuffer.h"
 
 #include "jscompartmentinlines.h"
 
 using JS::Symbol;
 using namespace js;
 
 Symbol*
-Symbol::newInternal(ExclusiveContext* cx, JS::SymbolCode code, JSAtom* description)
+Symbol::newInternal(ExclusiveContext* cx, JS::SymbolCode code, JSAtom* description,
+                    AutoLockForExclusiveAccess& lock)
 {
-    MOZ_ASSERT(cx->compartment() == cx->atomsCompartment());
-    MOZ_ASSERT(cx->atomsCompartment()->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
+    MOZ_ASSERT(cx->compartment() == cx->atomsCompartment(lock));
 
     // Following js::AtomizeString, we grudgingly forgo last-ditch GC here.
     Symbol* p = Allocate<JS::Symbol, NoGC>(cx);
     if (!p) {
         ReportOutOfMemory(cx);
         return nullptr;
     }
     return new (p) Symbol(code, description);
@@ -42,36 +42,36 @@ Symbol::new_(ExclusiveContext* cx, JS::S
         atom = AtomizeString(cx, description);
         if (!atom)
             return nullptr;
     }
 
     // Lock to allocate. If symbol allocation becomes a bottleneck, this can
     // probably be replaced with an assertion that we're on the main thread.
     AutoLockForExclusiveAccess lock(cx);
-    AutoCompartment ac(cx, cx->atomsCompartment());
-    return newInternal(cx, code, atom);
+    AutoCompartment ac(cx, cx->atomsCompartment(lock));
+    return newInternal(cx, code, atom, lock);
 }
 
 Symbol*
 Symbol::for_(js::ExclusiveContext* cx, HandleString description)
 {
     JSAtom* atom = AtomizeString(cx, description);
     if (!atom)
         return nullptr;
 
     AutoLockForExclusiveAccess lock(cx);
 
-    SymbolRegistry& registry = cx->symbolRegistry();
+    SymbolRegistry& registry = cx->symbolRegistry(lock);
     SymbolRegistry::AddPtr p = registry.lookupForAdd(atom);
     if (p)
         return *p;
 
-    AutoCompartment ac(cx, cx->atomsCompartment());
-    Symbol* sym = newInternal(cx, SymbolCode::InSymbolRegistry, atom);
+    AutoCompartment ac(cx, cx->atomsCompartment(lock));
+    Symbol* sym = newInternal(cx, SymbolCode::InSymbolRegistry, atom, lock);
     if (!sym)
         return nullptr;
 
     // p is still valid here because we have held the lock since the
     // lookupForAdd call, and newInternal can't GC.
     if (!registry.add(p, sym)) {
         // SystemAllocPolicy does not report OOM.
         ReportOutOfMemory(cx);
--- a/js/src/vm/Symbol.h
+++ b/js/src/vm/Symbol.h
@@ -16,16 +16,20 @@
 
 #include "gc/Barrier.h"
 #include "gc/Marking.h"
 
 #include "js/GCHashTable.h"
 #include "js/RootingAPI.h"
 #include "js/TypeDecls.h"
 
+namespace js {
+class AutoLockForExclusiveAccess;
+} // namespace js
+
 namespace JS {
 
 class Symbol : public js::gc::TenuredCell
 {
   private:
     SymbolCode code_;
     JSAtom* description_;
 
@@ -40,17 +44,18 @@ class Symbol : public js::gc::TenuredCel
         // Silence warnings about unused2 being... unused.
         (void)unused2_;
     }
 
     Symbol(const Symbol&) = delete;
     void operator=(const Symbol&) = delete;
 
     static Symbol*
-    newInternal(js::ExclusiveContext* cx, SymbolCode code, JSAtom* description);
+    newInternal(js::ExclusiveContext* cx, SymbolCode code, JSAtom* description,
+                js::AutoLockForExclusiveAccess& lock);
 
   public:
     static Symbol* new_(js::ExclusiveContext* cx, SymbolCode code, JSString* description);
     static Symbol* for_(js::ExclusiveContext* cx, js::HandleString description);
 
     JSAtom* description() const { return description_; }
     SymbolCode code() const { return code_; }
 
--- a/js/src/vm/UbiNodeCensus.cpp
+++ b/js/src/vm/UbiNodeCensus.cpp
@@ -27,17 +27,17 @@ CountDeleter::operator()(CountBase* ptr)
     // pointer.
     ptr->destruct();
     js_free(ptr);
 }
 
 bool
 Census::init() {
     AutoLockForExclusiveAccess lock(cx);
-    atomsZone = cx->runtime()->atomsCompartment()->zone();
+    atomsZone = cx->runtime()->atomsCompartment(lock)->zone();
     return targetZones.init();
 }
 
 
 /*** Count Types ***********************************************************************************/
 
 // The simplest type: just count everything.
 class SimpleCount : public CountType {