Bug 1369748 - Parallelise the start of GC marking r=sfink r=smaug
authorJon Coppeard <jcoppeard@mozilla.com>
Fri, 09 Jun 2017 11:44:15 +0100
changeset 411320 825e71dae9bf360ff0ae51bf0d4bb663189df72c
parent 411319 aafdd9bcceaf566fcfb4a259f0f90d3dc620e350
child 411321 7457b240847db66b78493ed3a6d03663173bd745
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink, smaug
bugs1369748
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1369748 - Parallelise the start of GC marking r=sfink r=smaug
dom/console/Console.cpp
dom/workers/WorkerPrivate.cpp
js/src/gc/GCRuntime.h
js/src/gc/GenerateStatsPhases.py
js/src/gc/Marking.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Zone.h
js/src/jsgc.cpp
--- a/dom/console/Console.cpp
+++ b/dom/console/Console.cpp
@@ -176,18 +176,16 @@ public:
     }
 
     return true;
   }
 
   void
   Trace(const TraceCallbacks& aCallbacks, void* aClosure)
   {
-    AssertIsOnOwningThread();
-
     ConsoleCallData* tmp = this;
     for (uint32_t i = 0; i < mCopiedArguments.Length(); ++i) {
       NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mCopiedArguments[i])
     }
 
     NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mGlobal);
   }
 
--- a/dom/workers/WorkerPrivate.cpp
+++ b/dom/workers/WorkerPrivate.cpp
@@ -4029,17 +4029,16 @@ template <class Derived>
 NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(WorkerPrivateParent<Derived>,
                                                 DOMEventTargetHelper)
   tmp->Terminate();
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 
 template <class Derived>
 NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN_INHERITED(WorkerPrivateParent<Derived>,
                                                DOMEventTargetHelper)
-  tmp->AssertIsOnParentThread();
 NS_IMPL_CYCLE_COLLECTION_TRACE_END
 
 #ifdef DEBUG
 
 template <class Derived>
 void
 WorkerPrivateParent<Derived>::AssertIsOnParentThread() const
 {
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -552,18 +552,18 @@ class GCSchedulingState
         inHighFrequencyGCMode_ =
             tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
             lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
     }
 };
 
 template<typename F>
 struct Callback {
-    ActiveThreadData<F> op;
-    ActiveThreadData<void*> data;
+    ActiveThreadOrGCTaskData<F> op;
+    ActiveThreadOrGCTaskData<void*> data;
 
     Callback()
       : op(nullptr), data(nullptr)
     {}
     Callback(F op, void* data)
       : op(op), data(data)
     {}
 };
@@ -900,16 +900,18 @@ class GCRuntime
                                   const Class* clasp);
     template <AllowGC allowGC>
     static JSObject* tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
                                          size_t nDynamicSlots);
     template <typename T, AllowGC allowGC>
     static T* tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize);
     static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
 
+    void bufferGrayRoots();
+
   private:
     enum IncrementalResult
     {
         Reset = 0,
         Ok
     };
 
     // For ArenaLists::allocateFromArena()
@@ -972,17 +974,16 @@ class GCRuntime
     bool prepareZonesForCollection(JS::gcreason::Reason reason, bool* isFullOut,
                                    AutoLockForExclusiveAccess& lock);
     bool shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
                                JS::gcreason::Reason reason, bool canAllocateMoreCode);
     void traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
     void traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
     void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
                             AutoLockForExclusiveAccess& lock);
-    void bufferGrayRoots();
     void maybeDoCycleCollection();
     void markCompartments();
     IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::PhaseKind phase);
     template <class CompartmentIterT> void markWeakReferences(gcstats::PhaseKind phase);
     void markWeakReferencesInCurrentGroup(gcstats::PhaseKind phase);
     template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::PhaseKind phase);
     void markBufferedGrayRoots(JS::Zone* zone);
     void markGrayReferencesInCurrentGroup(gcstats::PhaseKind phase);
@@ -1136,17 +1137,17 @@ class GCRuntime
     // accumulate these roots in each zone's gcGrayRoots vector and then mark
     // them later, after black marking is complete for each compartment. This
     // accumulation can fail, but in that case we switch to non-incremental GC.
     enum class GrayBufferState {
         Unused,
         Okay,
         Failed
     };
-    ActiveThreadData<GrayBufferState> grayBufferState;
+    ActiveThreadOrGCTaskData<GrayBufferState> grayBufferState;
     bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
 
     // Clear each zone's gray buffers, but do not change the current state.
     void resetBufferedGrayRoots() const;
 
     // Reset the gray buffering state to Unused.
     void clearBufferedGrayRoots() {
         grayBufferState = GrayBufferState::Unused;
--- a/js/src/gc/GenerateStatsPhases.py
+++ b/js/src/gc/GenerateStatsPhases.py
@@ -57,37 +57,41 @@ class PhaseKind():
     def __init__(self, name, descr, bucket, children = []):
         self.name = name
         self.descr = descr
         self.bucket = bucket
         self.children = children
 
 # The root marking phase appears in several places in the graph.
 MarkRootsPhaseKind = PhaseKind("MARK_ROOTS", "Mark Roots", 48, [
-    PhaseKind("BUFFER_GRAY_ROOTS", "Buffer Gray Roots", 49),
     PhaseKind("MARK_CCWS", "Mark Cross Compartment Wrappers", 50),
     PhaseKind("MARK_STACK", "Mark C and JS stacks", 51),
     PhaseKind("MARK_RUNTIME_DATA", "Mark Runtime-wide Data", 52),
     PhaseKind("MARK_EMBEDDING", "Mark Embedding", 53),
-    PhaseKind("MARK_COMPARTMENTS", "Mark Compartments", 54),
+    PhaseKind("MARK_COMPARTMENTS", "Mark Compartments", 54)
 ])
 
 JoinParallelTasksPhaseKind = PhaseKind("JOIN_PARALLEL_TASKS", "Join Parallel Tasks", 67)
 
 PhaseKindGraphRoots = [
     PhaseKind("MUTATOR", "Mutator Running", 0),
     PhaseKind("GC_BEGIN", "Begin Callback", 1),
     PhaseKind("WAIT_BACKGROUND_THREAD", "Wait Background Thread", 2),
-    PhaseKind("MARK_DISCARD_CODE", "Mark Discard Code", 3),
-    PhaseKind("RELAZIFY_FUNCTIONS", "Relazify Functions", 4),
-    PhaseKind("PURGE", "Purge", 5),
+    PhaseKind("PREPARE", "Prepare For Collection", 69, [
+        PhaseKind("UNMARK", "Unmark", 7),
+        PhaseKind("BUFFER_GRAY_ROOTS", "Buffer Gray Roots", 49),
+        PhaseKind("MARK_DISCARD_CODE", "Mark Discard Code", 3),
+        PhaseKind("RELAZIFY_FUNCTIONS", "Relazify Functions", 4),
+        PhaseKind("PURGE", "Purge", 5),
+        PhaseKind("PURGE_SHAPE_TABLES", "Purge ShapeTables", 60),
+        JoinParallelTasksPhaseKind
+        ]),
     PhaseKind("MARK", "Mark", 6, [
-        PhaseKind("UNMARK", "Unmark", 7),
         MarkRootsPhaseKind,
-        PhaseKind("MARK_DELAYED", "Mark Delayed", 8),
+        PhaseKind("MARK_DELAYED", "Mark Delayed", 8)
         ]),
     PhaseKind("SWEEP", "Sweep", 9, [
         PhaseKind("SWEEP_MARK", "Mark During Sweeping", 10, [
             PhaseKind("SWEEP_MARK_TYPES", "Mark Types During Sweeping", 11),
             PhaseKind("SWEEP_MARK_INCOMING_BLACK", "Mark Incoming Black Pointers", 12),
             PhaseKind("SWEEP_MARK_WEAK", "Mark Weak", 13),
             PhaseKind("SWEEP_MARK_INCOMING_GRAY", "Mark Incoming Gray Pointers", 14),
             PhaseKind("SWEEP_MARK_GRAY", "Mark Gray", 15),
@@ -146,18 +150,17 @@ PhaseKindGraphRoots = [
     PhaseKind("EVICT_NURSERY", "Minor GCs to Evict Nursery", 46, [
         MarkRootsPhaseKind,
     ]),
     PhaseKind("TRACE_HEAP", "Trace Heap", 47, [
         MarkRootsPhaseKind,
     ]),
     PhaseKind("BARRIER", "Barriers", 55, [
         PhaseKind("UNMARK_GRAY", "Unmark gray", 56),
-    ]),
-    PhaseKind("PURGE_SHAPE_TABLES", "Purge ShapeTables", 60)
+    ])
 ]
 
 # Make a linear list of all unique phases by performing a depth first
 # search on the phase graph starting at the roots.  This will be used to
 # generate the PhaseKind enum.
 
 def findAllPhaseKinds():
     phases = []
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -212,18 +212,20 @@ js::CheckTracedThing(JSTracer* trc, T* t
      * with this runtime, but will be ignored during marking.
      */
     if (IsOwnedByOtherRuntime(trc->runtime(), thing))
         return;
 
     Zone* zone = thing->zoneFromAnyThread();
     JSRuntime* rt = trc->runtime();
 
-    MOZ_ASSERT_IF(!IsMovingTracer(trc), CurrentThreadCanAccessZone(zone));
-    MOZ_ASSERT_IF(!IsMovingTracer(trc), CurrentThreadCanAccessRuntime(rt));
+    if (!IsMovingTracer(trc) && !IsBufferGrayRootsTracer(trc)) {
+        MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
+        MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+    }
 
     MOZ_ASSERT(zone->runtimeFromAnyThread() == trc->runtime());
 
     // It shouldn't be possible to trace into zones used by helper threads.
     MOZ_ASSERT(!zone->usedByHelperThread());
 
     MOZ_ASSERT(thing->isAligned());
     MOZ_ASSERT(MapTypeToTraceKind<typename mozilla::RemovePointer<T>::Type>::kind ==
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -476,18 +476,16 @@ void
 js::gc::GCRuntime::bufferGrayRoots()
 {
     // Precondition: the state has been reset to "unused" after the last GC
     //               and the zone's buffers have been cleared.
     MOZ_ASSERT(grayBufferState == GrayBufferState::Unused);
     for (GCZonesIter zone(rt); !zone.done(); zone.next())
         MOZ_ASSERT(zone->gcGrayRoots().empty());
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::BUFFER_GRAY_ROOTS);
-
     BufferGrayRootsTracer grayBufferer(rt);
     if (JSTraceDataOp op = grayRootTracer.op)
         (*op)(&grayBufferer, grayRootTracer.data);
 
     // Propagate the failure flag from the marker to the runtime.
     if (grayBufferer.failed()) {
       grayBufferState = GrayBufferState::Failed;
       resetBufferedGrayRoots();
@@ -508,18 +506,20 @@ BufferGrayRootsTracer::onChild(const JS:
     // Check if |thing| is corrupt by calling a method that touches the heap.
     MOZ_RELEASE_ASSERT(thing.asCell()->getTraceKind() <= JS::TraceKind::Null);
 
     if (bufferingGrayRootsFailed)
         return;
 
     gc::TenuredCell* tenured = gc::TenuredCell::fromPointer(thing.asCell());
 
-    Zone* zone = tenured->zone();
-    if (zone->isCollecting()) {
+    // This is run from a helper thread while the mutator is paused so we have
+    // to use *FromAnyThread methods here.
+    Zone* zone = tenured->zoneFromAnyThread();
+    if (zone->isCollectingFromAnyThread()) {
         // See the comment on SetMaybeAliveFlag to see why we only do this for
         // objects and scripts. We rely on gray root buffering for this to work,
         // but we only need to worry about uncollected dead compartments during
         // incremental GCs (when we do gray root buffering).
         DispatchTyped(SetMaybeAliveFunctor(), thing);
 
         if (!zone->gcGrayRoots().append(tenured))
             bufferingGrayRootsFailed = true;
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -331,17 +331,17 @@ struct Zone : public JS::shadow::Zone,
     // The set of compartments in this zone.
     js::ActiveThreadOrGCTaskData<CompartmentVector> compartments_;
   public:
     CompartmentVector& compartments() { return compartments_.ref(); }
 
     // This zone's gray roots.
     typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
   private:
-    js::ZoneGroupData<GrayRootVector> gcGrayRoots_;
+    js::ZoneGroupOrGCTaskData<GrayRootVector> gcGrayRoots_;
   public:
     GrayRootVector& gcGrayRoots() { return gcGrayRoots_.ref(); }
 
     // This zone's weak edges found via graph traversal during marking,
     // preserved for re-scanning during sweeping.
     using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
   private:
     js::ZoneGroupOrGCTaskData<WeakEdges> gcWeakRefs_;
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -3618,16 +3618,44 @@ ArenaLists::checkEmptyArenaList(AllocKin
             }
         }
         fprintf(stderr, "ERROR: GC found %" PRIuSIZE " live Cells at shutdown\n", num_live);
     }
 #endif // DEBUG
     return num_live == 0;
 }
 
+class MOZ_RAII js::gc::AutoRunParallelTask : public GCParallelTask
+{
+    using Func = void (*)(JSRuntime*);
+
+    Func func_;
+    gcstats::PhaseKind phase_;
+    AutoLockHelperThreadState& lock_;
+
+  public:
+    AutoRunParallelTask(JSRuntime* rt, Func func, gcstats::PhaseKind phase,
+                        AutoLockHelperThreadState& lock)
+      : GCParallelTask(rt),
+        func_(func),
+        phase_(phase),
+        lock_(lock)
+    {
+        runtime()->gc.startTask(*this, phase_, lock_);
+    }
+
+    ~AutoRunParallelTask() {
+        runtime()->gc.joinTask(*this, phase_, lock_);
+    }
+
+    void run() override {
+        func_(runtime());
+    }
+};
+
 void
 GCRuntime::purgeRuntime(AutoLockForExclusiveAccess& lock)
 {
     gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE);
 
     for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
         comp->purge();
 
@@ -3921,29 +3949,33 @@ PurgeShapeTablesForShrinkingGC(JSRuntime
         for (auto baseShape = zone->cellIter<BaseShape>(); !baseShape.done(); baseShape.next())
             baseShape->maybePurgeTable();
     }
 }
 
 static void
 UnmarkCollectedZones(JSRuntime* rt)
 {
-    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::UNMARK);
-
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         /* Unmark everything in the zones being collected. */
         zone->arenas.unmarkAll();
     }
 
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         /* Unmark all weak maps in the zones being collected. */
         WeakMapBase::unmarkZone(zone);
     }
 }
 
+static void
+BufferGrayRoots(JSRuntime* rt)
+{
+    rt->gc.bufferGrayRoots();
+}
+
 bool
 GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock)
 {
 #ifdef DEBUG
     if (fullCompartmentChecks)
         checkForCompartmentMismatches();
 #endif
 
@@ -3958,72 +3990,96 @@ GCRuntime::beginMarkPhase(JS::gcreason::
         for (GCZonesIter zone(rt); !zone.done(); zone.next())
             zone->arenas.prepareForIncrementalGC();
     }
 
     MemProfiler::MarkTenuredStart(rt);
     marker.start();
     GCMarker* gcmarker = &marker;
 
-    /* For non-incremental GC the following sweep discards the jit code. */
-    if (isIncremental)
-        DiscardJITCodeForIncrementalGC(rt);
-
-    /*
-     * Relazify functions after discarding JIT code (we can't relazify functions
-     * with JIT code) and before the actual mark phase, so that the current GC
-     * can collect the JSScripts we're unlinking here.  We do this only when
-     * we're performing a shrinking GC, as too much relazification can cause
-     * performance issues when we have to reparse the same functions over and
-     * over.
-     */
-    if (invocationKind == GC_SHRINK) {
-        RelazifyFunctionsForShrinkingGC(rt);
-        PurgeShapeTablesForShrinkingGC(rt);
-    }
-
-    /* Process any queued source compressions during the start of a major GC. */
     {
+        gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::PREPARE);
         AutoLockHelperThreadState helperLock;
-        HelperThreadState().startHandlingCompressionTasks(helperLock);
-    }
-
-    /*
-     * We must purge the runtime at the beginning of an incremental GC. The
-     * danger if we purge later is that the snapshot invariant of incremental GC
-     * will be broken, as follows. If some object is reachable only through some
-     * cache (say the dtoaCache) then it will not be part of the snapshot.  If
-     * we purge after root marking, then the mutator could obtain a pointer to
-     * the object and start using it. This object might never be marked, so a GC
-     * hazard would exist.
-     */
-    purgeRuntime(lock);
+
+        /*
+         * Clear all mark state for the zones we are collecting. This is linear
+         * in the size of the heap we are collecting and so can be slow. Do this
+         * in parallel with the rest of this block.
+         */
+        AutoRunParallelTask
+            unmarkCollectedZones(rt, UnmarkCollectedZones, gcstats::PhaseKind::UNMARK, helperLock);
+
+        /*
+         * Buffer gray roots for incremental collections. This is linear in the
+         * number of roots which can be in the tens of thousands. Do this in
+         * parallel with the rest of this block.
+         */
+        Maybe<AutoRunParallelTask> bufferGrayRoots;
+        if (isIncremental)
+            bufferGrayRoots.emplace(rt, BufferGrayRoots, gcstats::PhaseKind::BUFFER_GRAY_ROOTS, helperLock);
+        AutoUnlockHelperThreadState unlock(helperLock);
+
+        /*
+         * Discard JIT code for incremental collections (for non-incremental
+         * collections the following sweep discards the jit code).
+         */
+        if (isIncremental)
+            DiscardJITCodeForIncrementalGC(rt);
+
+        /*
+         * Relazify functions after discarding JIT code (we can't relazify
+         * functions with JIT code) and before the actual mark phase, so that
+         * the current GC can collect the JSScripts we're unlinking here.  We do
+         * this only when we're performing a shrinking GC, as too much
+         * relazification can cause performance issues when we have to reparse
+         * the same functions over and over.
+         */
+        if (invocationKind == GC_SHRINK) {
+            RelazifyFunctionsForShrinkingGC(rt);
+            PurgeShapeTablesForShrinkingGC(rt);
+        }
+
+        /*
+         * We must purge the runtime at the beginning of an incremental GC. The
+         * danger if we purge later is that the snapshot invariant of
+         * incremental GC will be broken, as follows. If some object is
+         * reachable only through some cache (say the dtoaCache) then it will
+         * not be part of the snapshot.  If we purge after root marking, then
+         * the mutator could obtain a pointer to the object and start using
+         * it. This object might never be marked, so a GC hazard would exist.
+         */
+        purgeRuntime(lock);
+    }
 
     /*
      * Mark phase.
      */
-    gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK);
-
-    UnmarkCollectedZones(rt);
+    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK);
     traceRuntimeForMajorGC(gcmarker, lock);
 
-    gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_ROOTS);
-
-    if (isIncremental) {
-        bufferGrayRoots();
+    if (isIncremental)
         markCompartments();
+
+    /*
+     * Process any queued source compressions during the start of a major
+     * GC.
+     */
+    {
+        AutoLockHelperThreadState helperLock;
+        HelperThreadState().startHandlingCompressionTasks(helperLock);
     }
 
     return true;
 }
 
 void
 GCRuntime::markCompartments()
 {
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_COMPARTMENTS);
+    gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK_ROOTS);
+    gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_COMPARTMENTS);
 
     /*
      * This code ensures that if a compartment is "dead", then it will be
      * collected in this GC. A compartment is considered dead if its maybeAlive
      * flag is false. The maybeAlive flag is set if:
      *
      *   (1) the compartment has been entered (set in beginMarkPhase() above)
      *   (2) the compartment is not being collected (set in beginMarkPhase()
@@ -4279,30 +4335,34 @@ js::gc::MarkingValidator::nonIncremental
      */
     initialized = true;
 
     /* Re-do all the marking, but non-incrementally. */
     js::gc::State state = gc->incrementalState;
     gc->incrementalState = State::MarkRoots;
 
     {
-        gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::MARK);
+        gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::PREPARE);
 
         {
             gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::UNMARK);
 
             for (GCZonesIter zone(runtime); !zone.done(); zone.next())
                 WeakMapBase::unmarkZone(zone);
 
             MOZ_ASSERT(gcmarker->isDrained());
             gcmarker->reset();
 
             for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
                 chunk->bitmap.clear();
         }
+    }
+
+    {
+        gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::MARK);
 
         gc->traceRuntimeForMajorGC(gcmarker, lock);
 
         gc->incrementalState = State::Mark;
         auto unlimited = SliceBudget::unlimited();
         MOZ_RELEASE_ASSERT(gc->marker.drainMarkStack(unlimited));
     }
 
@@ -5184,44 +5244,16 @@ PrepareWeakCacheTasks(JSRuntime* rt)
             return true;
         });
         tasks.clear();
     }
 
     return tasks;
 }
 
-class MOZ_RAII js::gc::AutoRunParallelTask : public GCParallelTask
-{
-    using Func = void (*)(JSRuntime*);
-
-    Func func_;
-    gcstats::PhaseKind phase_;
-    AutoLockHelperThreadState& lock_;
-
-  public:
-    AutoRunParallelTask(JSRuntime* rt, Func func, gcstats::PhaseKind phase,
-                        AutoLockHelperThreadState& lock)
-      : GCParallelTask(rt),
-        func_(func),
-        phase_(phase),
-        lock_(lock)
-    {
-        runtime()->gc.startTask(*this, phase_, lock_);
-    }
-
-    ~AutoRunParallelTask() {
-        runtime()->gc.joinTask(*this, phase_, lock_);
-    }
-
-    void run() override {
-        func_(runtime());
-    }
-};
-
 void
 GCRuntime::beginSweepingSweepGroup()
 {
     /*
      * Begin sweeping the group of zones in currentSweepGroup, performing
      * actions that must be done before yielding to caller.
      */