Backed out changeset 36f2908f6650 (bug 1361458) for crashing [@ js::GCMarker::drainMarkStack], e.g. in devtools' devtools/client/debugger/new/test/mochitest/browser_dbg-sourcemaps.js. r=backout
authorSebastian Hengst <archaeopteryx@coole-files.de>
Wed, 17 May 2017 12:53:58 +0200
changeset 358759 1627485da92e28dcb2513f631eaedf44826574a2
parent 358758 89f59c12ff7f7e5352d445ef60e4134d06f43370
child 358760 f1ffecc195a5edae84a363c85b5b085fc94ab9a0
push id90374
push userarchaeopteryx@coole-files.de
push dateWed, 17 May 2017 10:54:16 +0000
treeherdermozilla-inbound@1627485da92e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1361458
milestone55.0a1
backs out36f2908f6650129ababf0665d8d5be185d31f5b2
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset 36f2908f6650 (bug 1361458) for crashing [@ js::GCMarker::drainMarkStack], e.g. in devtools' devtools/client/debugger/new/test/mochitest/browser_dbg-sourcemaps.js. r=backout
config/check_spidermonkey_style.py
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/GenerateStatsPhases.py
js/src/gc/Marking.cpp
js/src/gc/Nursery.cpp
js/src/gc/RootMarking.cpp
js/src/gc/Statistics.cpp
js/src/gc/Statistics.h
js/src/gc/Verifier.cpp
js/src/jscompartment.cpp
js/src/jsfriendapi.cpp
js/src/jsgc.cpp
js/src/moz.build
--- a/config/check_spidermonkey_style.py
+++ b/config/check_spidermonkey_style.py
@@ -59,18 +59,16 @@ ignored_js_src_dirs = [
 # We ignore #includes of these files, because they don't follow the usual rules.
 included_inclnames_to_ignore = set([
     'ffi.h',                    # generated in ctypes/libffi/
     'devtools/sharkctl.h',      # we ignore devtools/ in general
     'devtools/Instruments.h',   # we ignore devtools/ in general
     'double-conversion.h',      # strange MFBT case
     'javascript-trace.h',       # generated in $OBJDIR if HAVE_DTRACE is defined
     'frontend/ReservedWordsGenerated.h', # generated in $OBJDIR
-    'gc/StatsPhasesGenerated.h',         # generated in $OBJDIR
-    'gc/StatsPhasesGenerated.cpp',       # generated in $OBJDIR
     'jscustomallocator.h',      # provided by embedders;  allowed to be missing
     'js-config.h',              # generated in $OBJDIR
     'fdlibm.h',                 # fdlibm
     'pratom.h',                 # NSPR
     'prcvar.h',                 # NSPR
     'prerror.h',                # NSPR
     'prinit.h',                 # NSPR
     'prio.h',                   # NSPR
@@ -101,18 +99,16 @@ included_inclnames_to_ignore = set([
     'vtune/VTuneWrapper.h'      # VTune
 ])
 
 # These files have additional constraints on where they are #included, so we
 # ignore #includes of them when checking #include ordering.
 oddly_ordered_inclnames = set([
     'ctypes/typedefs.h',        # Included multiple times in the body of ctypes/CTypes.h
     'frontend/ReservedWordsGenerated.h', # Included in the body of frontend/TokenStream.h
-    'gc/StatsPhasesGenerated.h',         # Included in the body of gc/Statistics.h
-    'gc/StatsPhasesGenerated.cpp',       # Included in the body of gc/Statistics.cpp
     'jswin.h',                  # Must be #included before <psapi.h>
     'machine/endian.h',         # Must be included after <sys/types.h> on BSD
     'winbase.h',                # Must precede other system headers(?)
     'windef.h'                  # Must precede other system headers(?)
 ])
 
 # The files in tests/style/ contain code that fails this checking in various
 # ways.  Here is the output we expect.  If the actual output differs from
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -80,25 +80,25 @@ class MOZ_RAII AutoStopVerifyingBarriers
         }
     }
 
     ~AutoStopVerifyingBarriers() {
         // Nasty special case: verification runs a minor GC, which *may* nest
         // inside of an outer minor GC. This is not allowed by the
         // gc::Statistics phase tree. So we pause the "real" GC, if in fact one
         // is in progress.
-        gcstats::PhaseKind outer = gc->stats().currentPhaseKind();
-        if (outer != gcstats::PhaseKind::NONE)
+        gcstats::Phase outer = gc->stats().currentPhase();
+        if (outer != gcstats::PHASE_NONE)
             gc->stats().endPhase(outer);
-        MOZ_ASSERT(gc->stats().currentPhaseKind() == gcstats::PhaseKind::NONE);
+        MOZ_ASSERT(gc->stats().currentPhase() == gcstats::PHASE_NONE);
 
         if (restartPreVerifier)
             gc->startVerifyPreBarriers();
 
-        if (outer != gcstats::PhaseKind::NONE)
+        if (outer != gcstats::PHASE_NONE)
             gc->stats().beginPhase(outer);
     }
 };
 #else
 struct MOZ_RAII AutoStopVerifyingBarriers
 {
     AutoStopVerifyingBarriers(JSRuntime*, bool) {}
 };
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -968,24 +968,24 @@ class GCRuntime
                                JS::gcreason::Reason reason, bool canAllocateMoreCode);
     void traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
     void traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
     void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
                             AutoLockForExclusiveAccess& lock);
     void bufferGrayRoots();
     void maybeDoCycleCollection();
     void markCompartments();
-    IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::PhaseKind phase);
-    template <class CompartmentIterT> void markWeakReferences(gcstats::PhaseKind phase);
-    void markWeakReferencesInCurrentGroup(gcstats::PhaseKind phase);
-    template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::PhaseKind phase);
+    IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase);
+    template <class CompartmentIterT> void markWeakReferences(gcstats::Phase phase);
+    void markWeakReferencesInCurrentGroup(gcstats::Phase phase);
+    template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::Phase phase);
     void markBufferedGrayRoots(JS::Zone* zone);
-    void markGrayReferencesInCurrentGroup(gcstats::PhaseKind phase);
-    void markAllWeakReferences(gcstats::PhaseKind phase);
-    void markAllGrayReferences(gcstats::PhaseKind phase);
+    void markGrayReferencesInCurrentGroup(gcstats::Phase phase);
+    void markAllWeakReferences(gcstats::Phase phase);
+    void markAllGrayReferences(gcstats::Phase phase);
 
     void beginSweepPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
     void groupZonesForSweeping(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
     MOZ_MUST_USE bool findInterZoneEdges();
     void getNextSweepGroup();
     void endMarkingSweepGroup();
     void beginSweepingSweepGroup();
     bool shouldReleaseObservedTypes();
@@ -1223,18 +1223,18 @@ class GCRuntime
     ActiveThreadData<size_t> sweepPhaseIndex;
     ActiveThreadData<JS::Zone*> sweepZone;
     ActiveThreadData<size_t> sweepActionIndex;
     ActiveThreadData<bool> abortSweepAfterCurrentGroup;
 
     /*
      * Concurrent sweep infrastructure.
      */
-    void startTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked);
-    void joinTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked);
+    void startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
+    void joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
     friend class AutoRunParallelTask;
 
     /*
      * List head of arenas allocated during the sweep phase.
      */
     ActiveThreadData<Arena*> arenasAllocatedDuringSweep;
 
     /*
@@ -1361,19 +1361,19 @@ class GCRuntime
     const void* addressOfNurseryPosition() {
         return nursery_.refNoCheck().addressOfPosition();
     }
     const void* addressOfNurseryCurrentEnd() {
         return nursery_.refNoCheck().addressOfCurrentEnd();
     }
 
     void minorGC(JS::gcreason::Reason reason,
-                 gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC) JS_HAZ_GC_CALL;
+                 gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
     void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
-        minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
+        minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
     }
     void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
 
     friend class js::GCHelperState;
     friend class MarkingValidator;
     friend class AutoTraceSession;
     friend class AutoEnterIteration;
 };
deleted file mode 100644
--- a/js/src/gc/GenerateStatsPhases.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-# Generate graph structures for GC statistics recording.
-#
-# Stats phases are nested and form a directed acyclic graph starting
-# from a set of root phases. Importantly, a phase may appear under more
-# than one parent phase.
-#
-# For example, the following arrangement is possible:
-#
-#            +---+
-#            | A |
-#            +---+
-#              |
-#      +-------+-------+
-#      |       |       |
-#      v       v       v
-#    +---+   +---+   +---+
-#    | B |   | C |   | D |
-#    +---+   +---+   +---+
-#              |       |
-#              +---+---+
-#                  |
-#                  v
-#                +---+
-#                | E |
-#                +---+
-#
-# This graph is expanded into a tree (or really a forest) and phases
-# with multiple parents are duplicated.
-#
-# For example, the input example above would be expanded to:
-#
-#            +---+
-#            | A |
-#            +---+
-#              |
-#      +-------+-------+
-#      |       |       |
-#      v       v       v
-#    +---+   +---+   +---+
-#    | B |   | C |   | D |
-#    +---+   +---+   +---+
-#              |       |
-#              v       v
-#            +---+   +---+
-#            | E |   | E'|
-#            +---+   +---+
-
-import sys
-import collections
-
-class PhaseKind():
-    def __init__(self, name, descr, bucket, children = []):
-        self.name = name
-        self.descr = descr
-        self.bucket = bucket
-        self.children = children
-
-# The root marking phase appears in several places in the graph.
-MarkRootsPhaseKind = PhaseKind("MARK_ROOTS", "Mark Roots", 48, [
-    PhaseKind("BUFFER_GRAY_ROOTS", "Buffer Gray Roots", 49),
-    PhaseKind("MARK_CCWS", "Mark Cross Compartment Wrappers", 50),
-    PhaseKind("MARK_STACK", "Mark C and JS stacks", 51),
-    PhaseKind("MARK_RUNTIME_DATA", "Mark Runtime-wide Data", 52),
-    PhaseKind("MARK_EMBEDDING", "Mark Embedding", 53),
-    PhaseKind("MARK_COMPARTMENTS", "Mark Compartments", 54),
-])
-
-PhaseKindGraphRoots = [
-    PhaseKind("MUTATOR", "Mutator Running", 0),
-    PhaseKind("GC_BEGIN", "Begin Callback", 1),
-    PhaseKind("WAIT_BACKGROUND_THREAD", "Wait Background Thread", 2),
-    PhaseKind("MARK_DISCARD_CODE", "Mark Discard Code", 3),
-    PhaseKind("RELAZIFY_FUNCTIONS", "Relazify Functions", 4),
-    PhaseKind("PURGE", "Purge", 5),
-    PhaseKind("MARK", "Mark", 6, [
-        PhaseKind("UNMARK", "Unmark", 7),
-        MarkRootsPhaseKind,
-        PhaseKind("MARK_DELAYED", "Mark Delayed", 8),
-        ]),
-    PhaseKind("SWEEP", "Sweep", 9, [
-        PhaseKind("SWEEP_MARK", "Mark During Sweeping", 10, [
-            PhaseKind("SWEEP_MARK_TYPES", "Mark Types During Sweeping", 11),
-            PhaseKind("SWEEP_MARK_INCOMING_BLACK", "Mark Incoming Black Pointers", 12),
-            PhaseKind("SWEEP_MARK_WEAK", "Mark Weak", 13),
-            PhaseKind("SWEEP_MARK_INCOMING_GRAY", "Mark Incoming Gray Pointers", 14),
-            PhaseKind("SWEEP_MARK_GRAY", "Mark Gray", 15),
-            PhaseKind("SWEEP_MARK_GRAY_WEAK", "Mark Gray and Weak", 16)
-        ]),
-        PhaseKind("FINALIZE_START", "Finalize Start Callbacks", 17, [
-            PhaseKind("WEAK_ZONES_CALLBACK", "Per-Slice Weak Callback", 57),
-            PhaseKind("WEAK_COMPARTMENT_CALLBACK", "Per-Compartment Weak Callback", 58)
-        ]),
-        PhaseKind("SWEEP_ATOMS", "Sweep Atoms", 18),
-        PhaseKind("SWEEP_COMPARTMENTS", "Sweep Compartments", 20, [
-            PhaseKind("SWEEP_DISCARD_CODE", "Sweep Discard Code", 21),
-            PhaseKind("SWEEP_INNER_VIEWS", "Sweep Inner Views", 22),
-            PhaseKind("SWEEP_CC_WRAPPER", "Sweep Cross Compartment Wrappers", 23),
-            PhaseKind("SWEEP_BASE_SHAPE", "Sweep Base Shapes", 24),
-            PhaseKind("SWEEP_INITIAL_SHAPE", "Sweep Initial Shapes", 25),
-            PhaseKind("SWEEP_TYPE_OBJECT", "Sweep Type Objects", 26),
-            PhaseKind("SWEEP_BREAKPOINT", "Sweep Breakpoints", 27),
-            PhaseKind("SWEEP_REGEXP", "Sweep Regexps", 28),
-            PhaseKind("SWEEP_COMPRESSION", "Sweep Compression Tasks", 62),
-            PhaseKind("SWEEP_WEAKMAPS", "Sweep WeakMaps", 63),
-            PhaseKind("SWEEP_UNIQUEIDS", "Sweep Unique IDs", 64),
-            PhaseKind("SWEEP_JIT_DATA", "Sweep JIT Data", 65),
-            PhaseKind("SWEEP_WEAK_CACHES", "Sweep Weak Caches", 66),
-            PhaseKind("SWEEP_MISC", "Sweep Miscellaneous", 29),
-            PhaseKind("SWEEP_TYPES", "Sweep type information", 30, [
-                PhaseKind("SWEEP_TYPES_BEGIN", "Sweep type tables and compilations", 31),
-                PhaseKind("SWEEP_TYPES_END", "Free type arena", 32),
-            ]),
-        ]),
-        PhaseKind("SWEEP_OBJECT", "Sweep Object", 33),
-        PhaseKind("SWEEP_STRING", "Sweep String", 34),
-        PhaseKind("SWEEP_SCRIPT", "Sweep Script", 35),
-        PhaseKind("SWEEP_SCOPE", "Sweep Scope", 59),
-        PhaseKind("SWEEP_REGEXP_SHARED", "Sweep RegExpShared", 61),
-        PhaseKind("SWEEP_SHAPE", "Sweep Shape", 36),
-        PhaseKind("SWEEP_JITCODE", "Sweep JIT code", 37),
-        PhaseKind("FINALIZE_END", "Finalize End Callback", 38),
-        PhaseKind("DESTROY", "Deallocate", 39)
-        ]),
-    PhaseKind("COMPACT", "Compact", 40, [
-        PhaseKind("COMPACT_MOVE", "Compact Move", 41),
-        PhaseKind("COMPACT_UPDATE", "Compact Update", 42, [
-            MarkRootsPhaseKind,
-            PhaseKind("COMPACT_UPDATE_CELLS", "Compact Update Cells", 43),
-        ]),
-    ]),
-    PhaseKind("GC_END", "End Callback", 44),
-    PhaseKind("MINOR_GC", "All Minor GCs", 45, [
-        MarkRootsPhaseKind,
-    ]),
-    PhaseKind("EVICT_NURSERY", "Minor GCs to Evict Nursery", 46, [
-        MarkRootsPhaseKind,
-    ]),
-    PhaseKind("TRACE_HEAP", "Trace Heap", 47, [
-        MarkRootsPhaseKind,
-    ]),
-    PhaseKind("BARRIER", "Barriers", 55, [
-        PhaseKind("UNMARK_GRAY", "Unmark gray", 56),
-    ]),
-    PhaseKind("PURGE_SHAPE_TABLES", "Purge ShapeTables", 60)
-]
-
-# Make a linear list of all unique phases by performing a depth first
-# search on the phase graph starting at the roots.  This will be used to
-# generate the PhaseKind enum.
-
-def findAllPhaseKinds():
-    phases = []
-    seen = set()
-
-    def dfs(phase):
-        if phase in seen:
-            return
-        phases.append(phase)
-        seen.add(phase)
-        for child in phase.children:
-            dfs(child)
-
-    for phase in PhaseKindGraphRoots:
-        dfs(phase)
-    return phases
-
-AllPhaseKinds = findAllPhaseKinds()
-
-# Expand the DAG into a tree, duplicating phases which have more than
-# one parent.
-
-class Phase:
-    def __init__(self, phaseKind, parent, depth):
-        self.phaseKind = phaseKind
-        self.parent = parent
-        self.depth = depth
-        self.children = []
-        self.nextSibling = None
-        self.nextInPhaseKind = None
-
-def expandPhases():
-    phases = []
-    phasesForPhase = collections.defaultdict(list)
-
-    def traverse(phaseKind, parent, depth):
-        ep = Phase(phaseKind, parent, depth)
-        phases.append(ep)
-
-        # Update list of expanded phases for this phase kind.
-        if phasesForPhase[phaseKind]:
-            phasesForPhase[phaseKind][-1].nextInPhaseKind = ep
-        phasesForPhase[phaseKind].append(ep)
-
-        # Recurse over children.
-        for child in phaseKind.children:
-            child_ep = traverse(child, ep, depth + 1)
-            if ep.children:
-                ep.children[-1].nextSibling = child_ep
-            ep.children.append(child_ep)
-        return ep
-
-    for phaseKind in PhaseKindGraphRoots:
-        traverse(phaseKind, None, 0)
-
-    return phases, phasesForPhase
-
-AllPhases, PhasesForPhaseKind = expandPhases()
-
-# Name expanded phases based on phase kind name and index if there are
-# multiple expanded phases corresponding to a single phase kind.
-
-for phaseKind in AllPhaseKinds:
-    phases = PhasesForPhaseKind[phaseKind]
-    if len(phases) == 1:
-        phases[0].name = "%s" % phaseKind.name
-    else:
-        for index, xphase in enumerate(phases):
-            xphase.name = "%s_%d" % (phaseKind.name, index + 1)
-
-# Generate code.
-
-def writeList(out, items):
-    if items:
-        out.write(",\n".join("  " + item for item in items) + "\n")
-
-def writeEnumClass(out, name, type, items, extraItems):
-    items = [ "FIRST" ] + items + [ "LIMIT" ] + extraItems
-    items[1] += " = " + items[0]
-    out.write("enum class %s : %s {\n" % (name, type));
-    writeList(out, items)
-    out.write("};\n")
-
-def generateHeader(out):
-    #
-    # Generate PhaseKind enum.
-    #
-    phaseKindNames = map(lambda phaseKind: phaseKind.name, AllPhaseKinds)
-    extraPhaseKinds = [
-        "NONE = LIMIT",
-        "EXPLICIT_SUSPENSION = LIMIT",
-        "IMPLICIT_SUSPENSION"
-    ]
-    writeEnumClass(out, "PhaseKind", "uint8_t", phaseKindNames, extraPhaseKinds)
-    out.write("\n")
-
-    #
-    # Generate Phase enum.
-    #
-    expandedPhaseNames = map(lambda xphase: xphase.name, AllPhases)
-    extraPhases = [
-        "NONE = LIMIT",
-        "EXPLICIT_SUSPENSION = LIMIT",
-        "IMPLICIT_SUSPENSION"
-    ]
-    writeEnumClass(out, "Phase", "uint8_t", expandedPhaseNames, extraPhases)
-
-def generateCpp(out):
-    #
-    # Generate the PhaseKindInfo table.
-    #
-    out.write("static const PhaseKindTable phaseKinds = {\n")
-    for phaseKind in AllPhaseKinds:
-        xPhase = PhasesForPhaseKind[phaseKind][0]
-        out.write("    /* PhaseKind::%s */ PhaseKindInfo { Phase::%s, %d },\n" %
-                  (phaseKind.name, xPhase.name, phaseKind.bucket))
-    out.write("};\n")
-    out.write("\n")
-
-    #
-    # Generate the PhaseInfo tree.
-    #
-    def name(xphase):
-        return "Phase::" + xphase.name if xphase else "Phase::NONE"
-
-    out.write("static const PhaseTable phases = {\n")
-    for xphase in AllPhases:
-        firstChild = xphase.children[0] if xphase.children else None
-        phaseKind = xphase.phaseKind
-        out.write("    /* %s */ PhaseInfo { %s, %s, %s, %s, PhaseKind::%s, %d, \"%s\" },\n" %
-                  (name(xphase),
-                   name(xphase.parent),
-                   name(firstChild),
-                   name(xphase.nextSibling),
-                   name(xphase.nextInPhaseKind),
-                   phaseKind.name,
-                   xphase.depth,
-                   phaseKind.descr))
-    out.write("};\n")
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -2518,17 +2518,17 @@ GCMarker::markDelayedChildren(Arena* are
      * allocatedDuringIncremental flag if we continue marking.
      */
 }
 
 bool
 GCMarker::markDelayedChildren(SliceBudget& budget)
 {
     GCRuntime& gc = runtime()->gc;
-    gcstats::AutoPhase ap(gc.stats(), gc.state() == State::Mark, gcstats::PhaseKind::MARK_DELAYED);
+    gcstats::AutoPhase ap(gc.stats(), gc.state() == State::Mark, gcstats::PHASE_MARK_DELAYED);
 
     MOZ_ASSERT(unmarkedArenaStackTop);
     do {
         /*
          * If marking gets delayed at the same arena again, we must repeat
          * marking of its things. For that we pop arena from the stack and
          * clear its hasDelayedMarking flag before we begin the marking.
          */
@@ -3401,18 +3401,18 @@ TypedUnmarkGrayCellRecursively(T* t)
 {
     MOZ_ASSERT(t);
 
     JSRuntime* rt = t->runtimeFromActiveCooperatingThread();
     MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
     MOZ_ASSERT(!JS::CurrentThreadIsHeapCycleCollecting());
 
     UnmarkGrayTracer unmarker(rt);
-    gcstats::AutoPhase outerPhase(rt->gc.stats(), gcstats::PhaseKind::BARRIER);
-    gcstats::AutoPhase innerPhase(rt->gc.stats(), gcstats::PhaseKind::UNMARK_GRAY);
+    gcstats::AutoPhase outerPhase(rt->gc.stats(), gcstats::PHASE_BARRIER);
+    gcstats::AutoPhase innerPhase(rt->gc.stats(), gcstats::PHASE_UNMARK_GRAY);
     unmarker.unmark(JS::GCCellPtr(t, MapTypeToTraceKind<T>::kind));
     return unmarker.unmarkedAny;
 }
 
 struct UnmarkGrayCellRecursivelyFunctor {
     template <typename T> bool operator()(T* t) { return TypedUnmarkGrayCellRecursively(t); }
 };
 
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -733,17 +733,17 @@ js::Nursery::doCollection(JS::gcreason::
     maybeEndProfile(ProfileKey::TraceGenericEntries);
 
     maybeStartProfile(ProfileKey::MarkRuntime);
     rt->gc.traceRuntimeForMinorGC(&mover, session.lock);
     maybeEndProfile(ProfileKey::MarkRuntime);
 
     maybeStartProfile(ProfileKey::MarkDebugger);
     {
-        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::MARK_ROOTS);
+        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_MARK_ROOTS);
         Debugger::traceAllForMovingGC(&mover);
     }
     maybeEndProfile(ProfileKey::MarkDebugger);
 
     maybeStartProfile(ProfileKey::ClearNewObjectCache);
     rt->caches().newObjectCache.clearNurseryObjects(rt);
     maybeEndProfile(ProfileKey::ClearNewObjectCache);
 
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -254,80 +254,80 @@ PropertyDescriptor::trace(JSTracer* trc)
 void
 js::gc::GCRuntime::traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     // FinishRoots will have asserted that every root that we do not expect
     // is gone, so we can simply skip traceRuntime here.
     if (rt->isBeingDestroyed())
         return;
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
     if (rt->atomsCompartment(lock)->zone()->isCollecting())
         traceRuntimeAtoms(trc, lock);
     JSCompartment::traceIncomingCrossCompartmentEdgesForZoneGC(trc);
     traceRuntimeCommon(trc, MarkRuntime, lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     // Note that we *must* trace the runtime during the SHUTDOWN_GC's minor GC
     // despite having called FinishRoots already. This is because FinishRoots
     // does not clear the crossCompartmentWrapper map. It cannot do this
     // because Proxy's trace for CrossCompartmentWrappers asserts presence in
     // the map. And we can reach its trace function despite having finished the
     // roots via the edges stored by the pre-barrier verifier when we finish
     // the verifier for the last time.
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
 
     jit::JitRuntime::TraceJitcodeGlobalTableForMinorGC(trc);
 
     traceRuntimeCommon(trc, TraceRuntime, lock);
 }
 
 void
 js::TraceRuntime(JSTracer* trc)
 {
     MOZ_ASSERT(!trc->isMarkingTracer());
 
     JSRuntime* rt = trc->runtime();
     EvictAllNurseries(rt);
     AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
-    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
     rt->gc.traceRuntime(trc, prep.session().lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(!rt->isBeingDestroyed());
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
     traceRuntimeAtoms(trc, lock);
     traceRuntimeCommon(trc, TraceRuntime, lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock)
 {
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_RUNTIME_DATA);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_RUNTIME_DATA);
     TracePermanentAtoms(trc);
     TraceAtoms(trc, lock);
     TraceWellKnownSymbols(trc);
     jit::JitRuntime::Trace(trc, lock);
 }
 
 void
 js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
                                       AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(!TlsContext.get()->suppressGC);
 
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_STACK);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_STACK);
 
         JSContext* cx = TlsContext.get();
         for (const CooperatingContext& target : rt->cooperatingContexts()) {
             // Trace active interpreter and JIT stack roots.
             TraceInterpreterActivations(cx, target, trc);
             jit::TraceJitActivations(cx, target, trc);
             wasm::TraceActivations(cx, target, trc);
 
@@ -365,17 +365,17 @@ js::gc::GCRuntime::traceRuntimeCommon(JS
     // Trace the Gecko Profiler.
     rt->geckoProfiler().trace(trc);
 
     // Trace helper thread roots.
     HelperThreadState().trace(trc);
 
     // Trace the embedding's black and gray roots.
     if (!JS::CurrentThreadIsHeapMinorCollecting()) {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_EMBEDDING);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_EMBEDDING);
 
         /*
          * The embedding can register additional roots here.
          *
          * We don't need to trace these in a minor GC because all pointers into
          * the nursery should be in the store buffer, and we want to avoid the
          * time taken to trace all these roots.
          */
@@ -426,17 +426,17 @@ js::gc::GCRuntime::finishRoots()
 #ifdef DEBUG
     // The nsWrapperCache may not be empty before our shutdown GC, so we have
     // to skip that table when verifying that we are fully unrooted.
     auto prior = grayRootTracer;
     grayRootTracer = Callback<JSTraceDataOp>(nullptr, nullptr);
 
     AssertNoRootsTracer trc(rt, TraceWeakMapKeysValues);
     AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
-    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
     traceRuntime(&trc, prep.session().lock);
 
     // Restore the wrapper tracing so that we leak instead of leaving dangling
     // pointers.
     grayRootTracer = prior;
 #endif // DEBUG
 }
 
@@ -476,17 +476,17 @@ void
 js::gc::GCRuntime::bufferGrayRoots()
 {
     // Precondition: the state has been reset to "unused" after the last GC
     //               and the zone's buffers have been cleared.
     MOZ_ASSERT(grayBufferState == GrayBufferState::Unused);
     for (GCZonesIter zone(rt); !zone.done(); zone.next())
         MOZ_ASSERT(zone->gcGrayRoots().empty());
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::BUFFER_GRAY_ROOTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_BUFFER_GRAY_ROOTS);
 
     BufferGrayRootsTracer grayBufferer(rt);
     if (JSTraceDataOp op = grayRootTracer.op)
         (*op)(&grayBufferer, grayRootTracer.data);
 
     // Propagate the failure flag from the marker to the runtime.
     if (grayBufferer.failed()) {
       grayBufferState = GrayBufferState::Failed;
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -26,34 +26,33 @@
 #include "vm/Runtime.h"
 #include "vm/Time.h"
 
 using namespace js;
 using namespace js::gc;
 using namespace js::gcstats;
 
 using mozilla::DebugOnly;
-using mozilla::EnumeratedArray;
 using mozilla::IntegerRange;
 using mozilla::PodArrayZero;
 using mozilla::PodZero;
 using mozilla::TimeStamp;
 using mozilla::TimeDuration;
 
 /*
  * If this fails, then you can either delete this assertion and allow all
  * larger-numbered reasons to pile up in the last telemetry bucket, or switch
  * to GC_REASON_3 and bump the max value.
  */
 JS_STATIC_ASSERT(JS::gcreason::NUM_TELEMETRY_REASONS >= JS::gcreason::NUM_REASONS);
 
-static inline decltype(mozilla::MakeEnumeratedRange(PhaseKind::FIRST, PhaseKind::LIMIT))
-AllPhaseKinds()
+static inline decltype(mozilla::MakeEnumeratedRange(PHASE_FIRST, PHASE_LIMIT))
+AllPhases()
 {
-    return mozilla::MakeEnumeratedRange(PhaseKind::FIRST, PhaseKind::LIMIT);
+    return mozilla::MakeEnumeratedRange(PHASE_FIRST, PHASE_LIMIT);
 }
 
 const char*
 js::gcstats::ExplainInvocationKind(JSGCInvocationKind gckind)
 {
     MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
     if (gckind == GC_NORMAL)
          return "Normal";
@@ -86,102 +85,229 @@ js::gcstats::ExplainAbortReason(gc::Abor
         GC_ABORT_REASONS(SWITCH_REASON)
 
         default:
           MOZ_CRASH("bad GC abort reason");
 #undef SWITCH_REASON
     }
 }
 
-struct PhaseKindInfo
-{
-    Phase firstPhase;
-    uint8_t telemetryBucket;
-};
-
-// PhaseInfo objects form a tree.
-struct PhaseInfo
-{
-    Phase parent;
-    Phase firstChild;
-    Phase nextSibling;
-    Phase nextInPhase;
-    PhaseKind phaseKind;
-    uint8_t depth;
-    const char* name;
-};
-
-// A table of ExpandePhaseInfo indexed by Phase.
-using PhaseTable = EnumeratedArray<Phase, Phase::LIMIT, PhaseInfo>;
-
-// A table of PhaseKindInfo indexed by Phase.
-using PhaseKindTable = EnumeratedArray<PhaseKind, PhaseKind::LIMIT, PhaseKindInfo>;
-
-#include "gc/StatsPhasesGenerated.cpp"
-
 static double
 t(TimeDuration duration)
 {
     return duration.ToMilliseconds();
 }
 
-Phase
-Statistics::currentPhase() const
+struct PhaseInfo
 {
-    return phaseNestingDepth ? phaseNesting[phaseNestingDepth - 1] : Phase::NONE;
-}
+    Phase index;
+    const char* name;
+    Phase parent;
+    uint8_t telemetryBucket;
+};
+
+// The zeroth entry in the timing arrays is used for phases that have a
+// unique lineage.
+static const size_t PHASE_DAG_NONE = 0;
+
+// These are really just fields of PhaseInfo, but I have to initialize them
+// programmatically, which prevents making phases[] const. (And marking these
+// fields mutable does not work on Windows; the whole thing gets created in
+// read-only memory anyway.)
+struct ExtraPhaseInfo
+{
+    // Depth in the tree of each phase type
+    size_t depth;
 
-PhaseKind
-Statistics::currentPhaseKind() const
-{
-    // Public API to get the current phase.  Return the current phase,
-    // suppressing the synthetic PhaseKind::MUTATOR phase.
+    // Index into the set of parallel arrays of timing data, for parents with
+    // at least one multi-parented child
+    size_t dagSlot;
+
+    ExtraPhaseInfo() : depth(0), dagSlot(0) {}
+};
+
+static const Phase PHASE_NO_PARENT = PHASE_LIMIT;
 
-    Phase phase = currentPhase();
-    MOZ_ASSERT_IF(phase == Phase::MUTATOR, phaseNestingDepth == 1);
-    if (phase == Phase::NONE || phase == Phase::MUTATOR)
-        return PhaseKind::NONE;
+struct DagChildEdge {
+    Phase parent;
+    Phase child;
+} dagChildEdges[] = {
+    { PHASE_MARK, PHASE_MARK_ROOTS },
+    { PHASE_MINOR_GC, PHASE_MARK_ROOTS },
+    { PHASE_TRACE_HEAP, PHASE_MARK_ROOTS },
+    { PHASE_EVICT_NURSERY, PHASE_MARK_ROOTS },
+    { PHASE_COMPACT_UPDATE, PHASE_MARK_ROOTS }
+};
+
+/*
+ * Note that PHASE_MUTATOR never has any child phases. If beginPhase is called
+ * while PHASE_MUTATOR is active, it will automatically be suspended and
+ * resumed when the phase stack is next empty. Timings for these phases are
+ * thus exclusive of any other phase.
+ */
 
-    return phases[phase].phaseKind;
-}
+static const PhaseInfo phases[] = {
+    { PHASE_MUTATOR, "Mutator Running", PHASE_NO_PARENT, 0 },
+    { PHASE_GC_BEGIN, "Begin Callback", PHASE_NO_PARENT, 1 },
+    { PHASE_WAIT_BACKGROUND_THREAD, "Wait Background Thread", PHASE_NO_PARENT, 2 },
+    { PHASE_MARK_DISCARD_CODE, "Mark Discard Code", PHASE_NO_PARENT, 3 },
+    { PHASE_RELAZIFY_FUNCTIONS, "Relazify Functions", PHASE_NO_PARENT, 4 },
+    { PHASE_PURGE, "Purge", PHASE_NO_PARENT, 5 },
+    { PHASE_MARK, "Mark", PHASE_NO_PARENT, 6 },
+        { PHASE_UNMARK, "Unmark", PHASE_MARK, 7 },
+        /* PHASE_MARK_ROOTS */
+        { PHASE_MARK_DELAYED, "Mark Delayed", PHASE_MARK, 8 },
+    { PHASE_SWEEP, "Sweep", PHASE_NO_PARENT, 9 },
+        { PHASE_SWEEP_MARK, "Mark During Sweeping", PHASE_SWEEP, 10 },
+            { PHASE_SWEEP_MARK_TYPES, "Mark Types During Sweeping", PHASE_SWEEP_MARK, 11 },
+            { PHASE_SWEEP_MARK_INCOMING_BLACK, "Mark Incoming Black Pointers", PHASE_SWEEP_MARK, 12 },
+            { PHASE_SWEEP_MARK_WEAK, "Mark Weak", PHASE_SWEEP_MARK, 13 },
+            { PHASE_SWEEP_MARK_INCOMING_GRAY, "Mark Incoming Gray Pointers", PHASE_SWEEP_MARK, 14 },
+            { PHASE_SWEEP_MARK_GRAY, "Mark Gray", PHASE_SWEEP_MARK, 15 },
+            { PHASE_SWEEP_MARK_GRAY_WEAK, "Mark Gray and Weak", PHASE_SWEEP_MARK, 16 },
+        { PHASE_FINALIZE_START, "Finalize Start Callbacks", PHASE_SWEEP, 17 },
+            { PHASE_WEAK_ZONES_CALLBACK, "Per-Slice Weak Callback", PHASE_FINALIZE_START, 57 },
+            { PHASE_WEAK_COMPARTMENT_CALLBACK, "Per-Compartment Weak Callback", PHASE_FINALIZE_START, 58 },
+        { PHASE_SWEEP_ATOMS, "Sweep Atoms", PHASE_SWEEP, 18 },
+        { PHASE_SWEEP_COMPARTMENTS, "Sweep Compartments", PHASE_SWEEP, 20 },
+            { PHASE_SWEEP_DISCARD_CODE, "Sweep Discard Code", PHASE_SWEEP_COMPARTMENTS, 21 },
+            { PHASE_SWEEP_INNER_VIEWS, "Sweep Inner Views", PHASE_SWEEP_COMPARTMENTS, 22 },
+            { PHASE_SWEEP_CC_WRAPPER, "Sweep Cross Compartment Wrappers", PHASE_SWEEP_COMPARTMENTS, 23 },
+            { PHASE_SWEEP_BASE_SHAPE, "Sweep Base Shapes", PHASE_SWEEP_COMPARTMENTS, 24 },
+            { PHASE_SWEEP_INITIAL_SHAPE, "Sweep Initial Shapes", PHASE_SWEEP_COMPARTMENTS, 25 },
+            { PHASE_SWEEP_TYPE_OBJECT, "Sweep Type Objects", PHASE_SWEEP_COMPARTMENTS, 26 },
+            { PHASE_SWEEP_BREAKPOINT, "Sweep Breakpoints", PHASE_SWEEP_COMPARTMENTS, 27 },
+            { PHASE_SWEEP_REGEXP, "Sweep Regexps", PHASE_SWEEP_COMPARTMENTS, 28 },
+            { PHASE_SWEEP_COMPRESSION, "Sweep Compression Tasks", PHASE_SWEEP_COMPARTMENTS, 62 },
+            { PHASE_SWEEP_WEAKMAPS, "Sweep WeakMaps", PHASE_SWEEP_COMPARTMENTS, 63 },
+            { PHASE_SWEEP_UNIQUEIDS, "Sweep Unique IDs", PHASE_SWEEP_COMPARTMENTS, 64 },
+            { PHASE_SWEEP_JIT_DATA, "Sweep JIT Data", PHASE_SWEEP_COMPARTMENTS, 65 },
+            { PHASE_SWEEP_WEAK_CACHES, "Sweep Weak Caches", PHASE_SWEEP_COMPARTMENTS, 66 },
+            { PHASE_SWEEP_MISC, "Sweep Miscellaneous", PHASE_SWEEP_COMPARTMENTS, 29 },
+            { PHASE_SWEEP_TYPES, "Sweep type information", PHASE_SWEEP_COMPARTMENTS, 30 },
+                { PHASE_SWEEP_TYPES_BEGIN, "Sweep type tables and compilations", PHASE_SWEEP_TYPES, 31 },
+                { PHASE_SWEEP_TYPES_END, "Free type arena", PHASE_SWEEP_TYPES, 32 },
+        { PHASE_SWEEP_OBJECT, "Sweep Object", PHASE_SWEEP, 33 },
+        { PHASE_SWEEP_STRING, "Sweep String", PHASE_SWEEP, 34 },
+        { PHASE_SWEEP_SCRIPT, "Sweep Script", PHASE_SWEEP, 35 },
+        { PHASE_SWEEP_SCOPE, "Sweep Scope", PHASE_SWEEP, 59 },
+        { PHASE_SWEEP_REGEXP_SHARED, "Sweep RegExpShared", PHASE_SWEEP, 61 },
+        { PHASE_SWEEP_SHAPE, "Sweep Shape", PHASE_SWEEP, 36 },
+        { PHASE_SWEEP_JITCODE, "Sweep JIT code", PHASE_SWEEP, 37 },
+        { PHASE_FINALIZE_END, "Finalize End Callback", PHASE_SWEEP, 38 },
+        { PHASE_DESTROY, "Deallocate", PHASE_SWEEP, 39 },
+    { PHASE_COMPACT, "Compact", PHASE_NO_PARENT, 40 },
+        { PHASE_COMPACT_MOVE, "Compact Move", PHASE_COMPACT, 41 },
+        { PHASE_COMPACT_UPDATE, "Compact Update", PHASE_COMPACT, 42 },
+            /* PHASE_MARK_ROOTS */
+            { PHASE_COMPACT_UPDATE_CELLS, "Compact Update Cells", PHASE_COMPACT_UPDATE, 43 },
+    { PHASE_GC_END, "End Callback", PHASE_NO_PARENT, 44 },
+    { PHASE_MINOR_GC, "All Minor GCs", PHASE_NO_PARENT, 45 },
+        /* PHASE_MARK_ROOTS */
+    { PHASE_EVICT_NURSERY, "Minor GCs to Evict Nursery", PHASE_NO_PARENT, 46 },
+        /* PHASE_MARK_ROOTS */
+    { PHASE_TRACE_HEAP, "Trace Heap", PHASE_NO_PARENT, 47 },
+        /* PHASE_MARK_ROOTS */
+    { PHASE_BARRIER, "Barriers", PHASE_NO_PARENT, 55 },
+        { PHASE_UNMARK_GRAY, "Unmark gray", PHASE_BARRIER, 56 },
+    { PHASE_MARK_ROOTS, "Mark Roots", PHASE_MULTI_PARENTS, 48 },
+        { PHASE_BUFFER_GRAY_ROOTS, "Buffer Gray Roots", PHASE_MARK_ROOTS, 49 },
+        { PHASE_MARK_CCWS, "Mark Cross Compartment Wrappers", PHASE_MARK_ROOTS, 50 },
+        { PHASE_MARK_STACK, "Mark C and JS stacks", PHASE_MARK_ROOTS, 51 },
+        { PHASE_MARK_RUNTIME_DATA, "Mark Runtime-wide Data", PHASE_MARK_ROOTS, 52 },
+        { PHASE_MARK_EMBEDDING, "Mark Embedding", PHASE_MARK_ROOTS, 53 },
+        { PHASE_MARK_COMPARTMENTS, "Mark Compartments", PHASE_MARK_ROOTS, 54 },
+    { PHASE_PURGE_SHAPE_TABLES, "Purge ShapeTables", PHASE_NO_PARENT, 60 },
 
-Phase
-Statistics::lookupChildPhase(PhaseKind phaseKind) const
-{
-    if (phaseKind == PhaseKind::IMPLICIT_SUSPENSION)
-        return Phase::IMPLICIT_SUSPENSION;
-    if (phaseKind == PhaseKind::EXPLICIT_SUSPENSION)
-        return Phase::EXPLICIT_SUSPENSION;
+    { PHASE_LIMIT, nullptr, PHASE_NO_PARENT, 66 }
+
+    // The current number of telemetryBuckets is equal to the value for
+    // PHASE_LIMIT. If you insert new phases somewhere, start at that number and
+    // count up. Do not change any existing numbers.
+};
+
+static mozilla::EnumeratedArray<Phase, PHASE_LIMIT, ExtraPhaseInfo> phaseExtra;
+
+// Mapping from all nodes with a multi-parented child to a Vector of all
+// multi-parented children and their descendants. (Single-parented children will
+// not show up in this list.)
+static mozilla::Vector<Phase, 0, SystemAllocPolicy> dagDescendants[Statistics::NumTimingArrays];
 
-    MOZ_ASSERT(phaseKind < PhaseKind::LIMIT);
+// Preorder iterator over all phases in the expanded tree. Positions are
+// returned as <phase,dagSlot> pairs (dagSlot will be zero aka PHASE_DAG_NONE
+// for the top nodes with a single path from the parent, and 1 or more for
+// nodes in multiparented subtrees).
+struct AllPhaseIterator {
+    // If 'descendants' is empty, the current Phase position.
+    int current;
+
+    // The depth of the current multiparented node that we are processing, or
+    // zero if we are pointing to the top portion of the tree.
+    int baseLevel;
 
-    // Most phases only correspond to a single expanded phase so check for that
-    // first.
-    Phase phase = phaseKinds[phaseKind].firstPhase;
-    if (phases[phase].nextInPhase == Phase::NONE) {
-        MOZ_ASSERT(phases[phase].parent == currentPhase());
-        return phase;
+    // When looking at multiparented descendants, the dag slot (index into
+    // PhaseTimeTables) containing the entries for the current parent.
+    size_t activeSlot;
+
+    // When iterating over a multiparented subtree, the list of (remaining)
+    // subtree nodes.
+    mozilla::Vector<Phase, 0, SystemAllocPolicy>::Range descendants;
+
+    explicit AllPhaseIterator()
+      : current(0)
+      , baseLevel(0)
+      , activeSlot(PHASE_DAG_NONE)
+      , descendants(dagDescendants[PHASE_DAG_NONE].all()) /* empty range */
+    {
     }
 
-    // Otherwise search all expanded phases that correspond to the required
-    // phase to find the one whose parent is the current expanded phase.
-    Phase parent = currentPhase();
-    while (phases[phase].parent != parent) {
-        phase = phases[phase].nextInPhase;
-        MOZ_ASSERT(phase != Phase::NONE);
+    void get(Phase* phase, size_t* dagSlot, int* level = nullptr) {
+        MOZ_ASSERT(!done());
+        *dagSlot = activeSlot;
+        *phase = descendants.empty() ? Phase(current) : descendants.front();
+        if (level)
+            *level = phaseExtra[*phase].depth + baseLevel;
     }
 
-    return phase;
-}
+    void advance() {
+        MOZ_ASSERT(!done());
+
+        if (!descendants.empty()) {
+            // Currently iterating over a multiparented subtree.
+            descendants.popFront();
+            if (!descendants.empty())
+                return;
+
+            // Just before leaving the last child, reset the iterator to look
+            // at "main" phases (in PHASE_DAG_NONE) instead of multiparented
+            // subtree phases.
+            ++current;
+            activeSlot = PHASE_DAG_NONE;
+            baseLevel = 0;
+            return;
+        }
 
-inline decltype(mozilla::MakeEnumeratedRange(Phase::FIRST, Phase::LIMIT))
-AllPhases()
-{
-    return mozilla::MakeEnumeratedRange(Phase::FIRST, Phase::LIMIT);
-}
+        auto phase = Phase(current);
+        if (phaseExtra[phase].dagSlot != PHASE_DAG_NONE) {
+            // The current phase has a shared subtree. Load them up into
+            // 'descendants' and advance to the first child.
+            activeSlot = phaseExtra[phase].dagSlot;
+            descendants = dagDescendants[activeSlot].all();
+            MOZ_ASSERT(!descendants.empty());
+            baseLevel += phaseExtra[phase].depth + 1;
+            return;
+        }
+
+        ++current;
+    }
+
+    bool done() const {
+        return phases[current].parent == PHASE_MULTI_PARENTS;
+    }
+};
 
 void
 Statistics::gcDuration(TimeDuration* total, TimeDuration* maxPause) const
 {
     *total = *maxPause = 0;
     for (auto& slice : slices_) {
         *total += slice.duration();
         if (slice.duration() > *maxPause)
@@ -199,55 +325,62 @@ Statistics::sccDurations(TimeDuration* t
         *total += sccTimes[i];
         *maxPause = Max(*maxPause, sccTimes[i]);
     }
 }
 
 typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
 
 static UniqueChars
-Join(const FragmentVector& fragments, const char* separator = "")
-{
+Join(const FragmentVector& fragments, const char* separator = "") {
     const size_t separatorLength = strlen(separator);
     size_t length = 0;
     for (size_t i = 0; i < fragments.length(); ++i) {
         length += fragments[i] ? strlen(fragments[i].get()) : 0;
         if (i < (fragments.length() - 1))
             length += separatorLength;
     }
 
     char* joined = js_pod_malloc<char>(length + 1);
-    if (!joined)
-        return UniqueChars();
+    joined[length] = '\0';
 
-    joined[length] = '\0';
     char* cursor = joined;
     for (size_t i = 0; i < fragments.length(); ++i) {
         if (fragments[i])
             strcpy(cursor, fragments[i].get());
         cursor += fragments[i] ? strlen(fragments[i].get()) : 0;
         if (i < (fragments.length() - 1)) {
             if (separatorLength)
                 strcpy(cursor, separator);
             cursor += separatorLength;
         }
     }
 
     return UniqueChars(joined);
 }
 
 static TimeDuration
-SumChildTimes(Phase phase, const Statistics::PhaseTimeTable& phaseTimes)
+SumChildTimes(size_t phaseSlot, Phase phase, const Statistics::PhaseTimeTable& phaseTimes)
 {
+    // Sum the contributions from single-parented children.
     TimeDuration total = 0;
-    for (phase = phases[phase].firstChild;
-         phase != Phase::NONE;
-         phase = phases[phase].nextSibling)
-    {
-        total += phaseTimes[phase];
+    size_t depth = phaseExtra[phase].depth;
+    for (unsigned i = phase + 1; i < PHASE_LIMIT && phaseExtra[Phase(i)].depth > depth; i++) {
+        if (phases[i].parent == phase)
+            total += phaseTimes[phaseSlot][Phase(i)];
+    }
+
+    // Sum the contributions from multi-parented children.
+    size_t dagSlot = phaseExtra[phase].dagSlot;
+    MOZ_ASSERT(dagSlot <= Statistics::MaxMultiparentPhases - 1);
+    if (dagSlot != PHASE_DAG_NONE) {
+        for (auto edge : dagChildEdges) {
+            if (edge.parent == phase)
+                total += phaseTimes[dagSlot][edge.child];
+        }
     }
     return total;
 }
 
 UniqueChars
 Statistics::formatCompactSliceMessage() const
 {
     // Skip if we OOM'ed.
@@ -332,22 +465,25 @@ Statistics::formatCompactSummaryMessage(
 
 UniqueChars
 Statistics::formatCompactSlicePhaseTimes(const PhaseTimeTable& phaseTimes) const
 {
     static const TimeDuration MaxUnaccountedTime = TimeDuration::FromMicroseconds(100);
 
     FragmentVector fragments;
     char buffer[128];
-    for (auto phase : AllPhases()) {
-        DebugOnly<uint8_t> level = phases[phase].depth;
+    for (AllPhaseIterator iter; !iter.done(); iter.advance()) {
+        Phase phase;
+        size_t dagSlot;
+        int level;
+        iter.get(&phase, &dagSlot, &level);
         MOZ_ASSERT(level < 4);
 
-        TimeDuration ownTime = phaseTimes[phase];
-        TimeDuration childTime = SumChildTimes(phase, phaseTimes);
+        TimeDuration ownTime = phaseTimes[dagSlot][phase];
+        TimeDuration childTime = SumChildTimes(dagSlot, phase, phaseTimes);
         if (ownTime > MaxUnaccountedTime) {
             SprintfLiteral(buffer, "%s: %.3fms", phases[phase].name, t(ownTime));
             if (!fragments.append(DuplicateString(buffer)))
                 return UniqueChars(nullptr);
 
             if (childTime && (ownTime - childTime) > MaxUnaccountedTime) {
                 MOZ_ASSERT(level < 3);
                 SprintfLiteral(buffer, "%s: %.3fms", "Other", t(ownTime - childTime));
@@ -456,20 +592,24 @@ Statistics::formatDetailedSliceDescripti
 
 UniqueChars
 Statistics::formatDetailedPhaseTimes(const PhaseTimeTable& phaseTimes) const
 {
     static const TimeDuration MaxUnaccountedChildTime = TimeDuration::FromMicroseconds(50);
 
     FragmentVector fragments;
     char buffer[128];
-    for (auto phase : AllPhases()) {
-        uint8_t level = phases[phase].depth;
-        TimeDuration ownTime = phaseTimes[phase];
-        TimeDuration childTime = SumChildTimes(phase, phaseTimes);
+    for (AllPhaseIterator iter; !iter.done(); iter.advance()) {
+        Phase phase;
+        size_t dagSlot;
+        int level;
+        iter.get(&phase, &dagSlot, &level);
+
+        TimeDuration ownTime = phaseTimes[dagSlot][phase];
+        TimeDuration childTime = SumChildTimes(dagSlot, phase, phaseTimes);
         if (!ownTime.IsZero()) {
             SprintfLiteral(buffer, "      %*s: %.3fms\n",
                            level * 2, phases[phase].name, t(ownTime));
             if (!fragments.append(DuplicateString(buffer)))
                 return UniqueChars(nullptr);
 
             if (childTime && (ownTime - childTime) > MaxUnaccountedChildTime) {
                 SprintfLiteral(buffer, "      %*s: %.3fms\n",
@@ -637,33 +777,38 @@ SanitizeJsonKey(const char* const buffer
     }
 
     return UniqueChars(mut);
 }
 
 void
 Statistics::formatJsonPhaseTimes(const PhaseTimeTable& phaseTimes, JSONPrinter& json) const
 {
-    for (auto phase : AllPhases()) {
+    for (AllPhaseIterator iter; !iter.done(); iter.advance()) {
+        Phase phase;
+        size_t dagSlot;
+        iter.get(&phase, &dagSlot);
+
         UniqueChars name = SanitizeJsonKey(phases[phase].name);
         if (!name)
             json.outOfMemory();
-        TimeDuration ownTime = phaseTimes[phase];
+        TimeDuration ownTime = phaseTimes[dagSlot][phase];
         if (!ownTime.IsZero())
             json.property(name.get(), ownTime, JSONPrinter::MILLISECONDS);
     }
 }
 
 Statistics::Statistics(JSRuntime* rt)
   : runtime(rt),
     fp(nullptr),
     nonincrementalReason_(gc::AbortReason::None),
     preBytes(0),
     maxPauseInInterval(0),
     phaseNestingDepth(0),
+    activeDagSlot(PHASE_DAG_NONE),
     suspended(0),
     sliceCallback(nullptr),
     nurseryCollectionCallback(nullptr),
     aborted(false),
     enableProfiling_(false),
     sliceCount_(0)
 {
     for (auto& count : counts)
@@ -703,47 +848,64 @@ Statistics::~Statistics()
     if (fp && fp != stdout && fp != stderr)
         fclose(fp);
 }
 
 /* static */ bool
 Statistics::initialize()
 {
 #ifdef DEBUG
-    // Sanity check generated tables.
     for (auto i : AllPhases()) {
-        auto parent = phases[i].parent;
-        if (parent != Phase::NONE) {
-            MOZ_ASSERT(phases[i].depth == phases[parent].depth + 1);
-        }
-        auto firstChild = phases[i].firstChild;
-        if (firstChild != Phase::NONE) {
-            MOZ_ASSERT(i == phases[firstChild].parent);
-            MOZ_ASSERT(phases[i].depth == phases[firstChild].depth - 1);
-        }
-        auto nextSibling = phases[i].nextSibling;
-        if (nextSibling != Phase::NONE) {
-            MOZ_ASSERT(parent == phases[nextSibling].parent);
-            MOZ_ASSERT(phases[i].depth == phases[nextSibling].depth);
-        }
-        auto nextInPhase = phases[i].nextInPhase;
-        if (nextInPhase != Phase::NONE) {
-            MOZ_ASSERT(phases[i].phaseKind == phases[nextInPhase].phaseKind);
-            MOZ_ASSERT(parent != phases[nextInPhase].parent);
-        }
-    }
-    for (auto i : AllPhaseKinds()) {
-        MOZ_ASSERT(phases[phaseKinds[i].firstPhase].phaseKind == i);
-        for (auto j : AllPhaseKinds()) {
-            MOZ_ASSERT_IF(i != j,
-                          phaseKinds[i].telemetryBucket != phaseKinds[j].telemetryBucket);
-        }
+        MOZ_ASSERT(phases[i].index == i);
+        for (auto j : AllPhases())
+            MOZ_ASSERT_IF(i != j, phases[i].telemetryBucket != phases[j].telemetryBucket);
     }
 #endif
 
+    // Create a static table of descendants for every phase with multiple
+    // children. This assumes that all descendants come linearly in the
+    // list, which is reasonable since full dags are not supported; any
+    // path from the leaf to the root must encounter at most one node with
+    // multiple parents.
+    size_t dagSlot = 0;
+    for (size_t i = 0; i < mozilla::ArrayLength(dagChildEdges); i++) {
+        Phase parent = dagChildEdges[i].parent;
+        if (!phaseExtra[parent].dagSlot)
+            phaseExtra[parent].dagSlot = ++dagSlot;
+
+        Phase child = dagChildEdges[i].child;
+        MOZ_ASSERT(phases[child].parent == PHASE_MULTI_PARENTS);
+        int j = child;
+        do {
+            if (!dagDescendants[phaseExtra[parent].dagSlot].append(Phase(j)))
+                return false;
+            j++;
+        } while (j != PHASE_LIMIT && phases[j].parent != PHASE_MULTI_PARENTS);
+    }
+    MOZ_ASSERT(dagSlot <= MaxMultiparentPhases - 1);
+
+    // Fill in the depth of each node in the tree. Multi-parented nodes
+    // have depth 0.
+    mozilla::Vector<Phase, 0, SystemAllocPolicy> stack;
+    if (!stack.append(PHASE_LIMIT)) // Dummy entry to avoid special-casing the first node
+        return false;
+    for (auto i : AllPhases()) {
+        if (phases[i].parent == PHASE_NO_PARENT ||
+            phases[i].parent == PHASE_MULTI_PARENTS)
+        {
+            stack.clear();
+        } else {
+            while (stack.back() != phases[i].parent)
+                stack.popBack();
+        }
+        phaseExtra[i].depth = stack.length();
+        if (!stack.append(i))
+            return false;
+    }
+
     return true;
 }
 
 JS::GCSliceCallback
 Statistics::setSliceCallback(JS::GCSliceCallback newCallback)
 {
     JS::GCSliceCallback oldCallback = sliceCallback;
     sliceCallback = newCallback;
@@ -770,75 +932,78 @@ TimeDuration
 Statistics::getMaxGCPauseSinceClear()
 {
     return maxPauseInInterval;
 }
 
 // Sum up the time for a phase, including instances of the phase with different
 // parents.
 static TimeDuration
-SumPhase(PhaseKind phaseKind, const Statistics::PhaseTimeTable& times)
+SumPhase(Phase phase, const Statistics::PhaseTimeTable& times)
 {
     TimeDuration sum = 0;
-    for (Phase phase = phaseKinds[phaseKind].firstPhase;
-         phase != Phase::NONE;
-         phase = phases[phase].nextInPhase)
-    {
-        sum += times[phase];
-    }
+    for (const auto& phaseTimes : times)
+        sum += phaseTimes[phase];
     return sum;
 }
 
 static void
-CheckSelfTime(Phase parent,
-              Phase child,
-              const Statistics::PhaseTimeTable& times,
-              const Statistics::PhaseTimeTable& selfTimes,
-              TimeDuration childTime)
+CheckSelfTime(Phase parent, Phase child, const Statistics::PhaseTimeTable& times, TimeDuration selfTimes[PHASE_LIMIT], TimeDuration childTime)
 {
     if (selfTimes[parent] < childTime) {
         fprintf(stderr,
-                "Parent %s time = %.3fms with %.3fms remaining, child %s time %.3fms\n",
-                phases[parent].name,
-                times[parent].ToMilliseconds(),
+                "Parent %s time = %.3fms"
+                " with %.3fms remaining, "
+                "child %s time %.3fms\n",
+                phases[parent].name, SumPhase(parent, times).ToMilliseconds(),
                 selfTimes[parent].ToMilliseconds(),
-                phases[child].name,
-                childTime.ToMilliseconds());
-        MOZ_CRASH();
+                phases[child].name, childTime.ToMilliseconds());
     }
 }
 
-static PhaseKind
+static Phase
 LongestPhaseSelfTime(const Statistics::PhaseTimeTable& times)
 {
-    // Start with total times per expanded phase, including children's times.
-    Statistics::PhaseTimeTable selfTimes(times);
+    TimeDuration selfTimes[PHASE_LIMIT];
+
+    // Start with total times, including children's times.
+    for (auto i : AllPhases())
+        selfTimes[i] = SumPhase(i, times);
 
     // We have the total time spent in each phase, including descendant times.
     // Loop over the children and subtract their times from their parent's self
     // time.
     for (auto i : AllPhases()) {
         Phase parent = phases[i].parent;
-        if (parent != Phase::NONE) {
-            CheckSelfTime(parent, i, times, selfTimes, times[i]);
-            selfTimes[parent] -= times[i];
+        if (parent == PHASE_MULTI_PARENTS) {
+            // Current phase i has multiple parents. Each "instance" of this
+            // phase is in a parallel array of times indexed by 'dagSlot', so
+            // subtract only the dagSlot-specific child's time from the parent.
+            for (auto edge : dagChildEdges) {
+                if (edge.parent == i) {
+                    size_t dagSlot = phaseExtra[edge.parent].dagSlot;
+                    MOZ_ASSERT(dagSlot <= Statistics::MaxMultiparentPhases - 1);
+                    CheckSelfTime(edge.parent, edge.child, times,
+                                  selfTimes, times[dagSlot][edge.child]);
+                    MOZ_ASSERT(selfTimes[edge.parent] >= times[dagSlot][edge.child]);
+                    selfTimes[edge.parent] -= times[dagSlot][edge.child];
+                }
+            }
+        } else if (parent != PHASE_NO_PARENT) {
+            CheckSelfTime(parent, i, times, selfTimes, selfTimes[i]);
+            MOZ_ASSERT(selfTimes[parent] >= selfTimes[i]);
+            selfTimes[parent] -= selfTimes[i];
         }
     }
 
-    // Sum expanded phases corresponding to the same phase.
-    EnumeratedArray<PhaseKind, PhaseKind::LIMIT, TimeDuration> phaseTimes;
-    for (auto i : AllPhaseKinds())
-        phaseTimes[i] = SumPhase(i, selfTimes);
-
-    // Loop over this table to find the longest phase.
     TimeDuration longestTime = 0;
-    PhaseKind longestPhase = PhaseKind::NONE;
-    for (auto i : AllPhaseKinds()) {
-        if (phaseTimes[i] > longestTime) {
-            longestTime = phaseTimes[i];
+    Phase longestPhase = PHASE_NONE;
+    for (auto i : AllPhases()) {
+        if (selfTimes[i] > longestTime) {
+            longestTime = selfTimes[i];
             longestPhase = i;
         }
     }
 
     return longestPhase;
 }
 
 void
@@ -867,30 +1032,35 @@ Statistics::beginGC(JSGCInvocationKind k
 
     preBytes = runtime->gc.usage.gcBytes();
     startingMajorGCNumber = runtime->gc.majorGCCount();
 }
 
 void
 Statistics::endGC()
 {
+    for (auto j : IntegerRange(NumTimingArrays)) {
+        for (auto i : AllPhases())
+            phaseTotals[j][i] += phaseTimes[j][i];
+    }
+
     TimeDuration sccTotal, sccLongest;
     sccDurations(&sccTotal, &sccLongest);
 
     runtime->addTelemetry(JS_TELEMETRY_GC_IS_ZONE_GC, !zoneStats.isCollectingAllZones());
-    TimeDuration markTotal = SumPhase(PhaseKind::MARK, phaseTimes);
-    TimeDuration markRootsTotal = SumPhase(PhaseKind::MARK_ROOTS, phaseTimes);
+    TimeDuration markTotal = SumPhase(PHASE_MARK, phaseTimes);
+    TimeDuration markRootsTotal = SumPhase(PHASE_MARK_ROOTS, phaseTimes);
     runtime->addTelemetry(JS_TELEMETRY_GC_MARK_MS, t(markTotal));
-    runtime->addTelemetry(JS_TELEMETRY_GC_SWEEP_MS, t(phaseTimes[Phase::SWEEP]));
+    runtime->addTelemetry(JS_TELEMETRY_GC_SWEEP_MS, t(phaseTimes[PHASE_DAG_NONE][PHASE_SWEEP]));
     if (runtime->gc.isCompactingGc()) {
         runtime->addTelemetry(JS_TELEMETRY_GC_COMPACT_MS,
-                              t(phaseTimes[Phase::COMPACT]));
+                              t(phaseTimes[PHASE_DAG_NONE][PHASE_COMPACT]));
     }
     runtime->addTelemetry(JS_TELEMETRY_GC_MARK_ROOTS_MS, t(markRootsTotal));
-    runtime->addTelemetry(JS_TELEMETRY_GC_MARK_GRAY_MS, t(phaseTimes[Phase::SWEEP_MARK_GRAY]));
+    runtime->addTelemetry(JS_TELEMETRY_GC_MARK_GRAY_MS, t(phaseTimes[PHASE_DAG_NONE][PHASE_SWEEP_MARK_GRAY]));
     runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL, nonincremental());
     if (nonincremental())
         runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL_REASON, uint32_t(nonincrementalReason_));
     runtime->addTelemetry(JS_TELEMETRY_GC_INCREMENTAL_DISABLED, !runtime->gc.isIncrementalGCAllowed());
     runtime->addTelemetry(JS_TELEMETRY_GC_SCC_SWEEP_TOTAL_MS, t(sccTotal));
     runtime->addTelemetry(JS_TELEMETRY_GC_SCC_SWEEP_MAX_PAUSE_MS, t(sccLongest));
 
     if (!aborted) {
@@ -981,19 +1151,18 @@ Statistics::endSlice()
         if (slices_.back().budget.isTimeBudget()) {
             int64_t budget_ms = slices_.back().budget.timeBudget.budget;
             runtime->addTelemetry(JS_TELEMETRY_GC_BUDGET_MS, budget_ms);
             if (budget_ms == runtime->gc.defaultSliceBudget())
                 runtime->addTelemetry(JS_TELEMETRY_GC_ANIMATION_MS, t(sliceTime));
 
             // Record any phase that goes more than 2x over its budget.
             if (sliceTime.ToMilliseconds() > 2 * budget_ms) {
-                PhaseKind longest = LongestPhaseSelfTime(slices_.back().phaseTimes);
-                uint8_t bucket = phaseKinds[longest].telemetryBucket;
-                runtime->addTelemetry(JS_TELEMETRY_GC_SLOW_PHASE, bucket);
+                Phase longest = LongestPhaseSelfTime(slices_.back().phaseTimes);
+                runtime->addTelemetry(JS_TELEMETRY_GC_SLOW_PHASE, phases[longest].telemetryBucket);
             }
         }
 
         sliceCount_++;
     }
 
     bool last = !runtime->gc.isIncrementalGCInProgress();
     if (last)
@@ -1011,169 +1180,160 @@ Statistics::endSlice()
                              JS::GCDescription(!wasFullGC, gckind, slices_.back().reason));
     }
 
     // Do this after the slice callback since it uses these values.
     if (last) {
         for (auto& count : counts)
             count = 0;
 
-        // Clear the timers at the end of a GC, preserving the data for PhaseKind::MUTATOR.
-        auto mutatorStartTime = phaseStartTimes[Phase::MUTATOR];
-        auto mutatorTime = phaseTimes[Phase::MUTATOR];
-        PodZero(&phaseStartTimes);
-        PodZero(&phaseTimes);
-        phaseStartTimes[Phase::MUTATOR] = mutatorStartTime;
-        phaseTimes[Phase::MUTATOR] = mutatorTime;
+        // Clear the timers at the end of a GC because we accumulate time in
+        // between GCs for some (which come before PHASE_GC_BEGIN in the list.)
+        PodZero(&phaseStartTimes[PHASE_GC_BEGIN], PHASE_LIMIT - PHASE_GC_BEGIN);
+        for (size_t d = PHASE_DAG_NONE; d < NumTimingArrays; d++)
+            PodZero(&phaseTimes[d][PHASE_GC_BEGIN], PHASE_LIMIT - PHASE_GC_BEGIN);
     }
 }
 
 bool
 Statistics::startTimingMutator()
 {
     if (phaseNestingDepth != 0) {
         // Should only be called from outside of GC.
         MOZ_ASSERT(phaseNestingDepth == 1);
-        MOZ_ASSERT(phaseNesting[0] == Phase::MUTATOR);
+        MOZ_ASSERT(phaseNesting[0] == PHASE_MUTATOR);
         return false;
     }
 
     MOZ_ASSERT(suspended == 0);
 
     timedGCTime = 0;
-    phaseStartTimes[Phase::MUTATOR] = TimeStamp();
-    phaseTimes[Phase::MUTATOR] = 0;
+    phaseStartTimes[PHASE_MUTATOR] = TimeStamp();
+    phaseTimes[PHASE_DAG_NONE][PHASE_MUTATOR] = 0;
     timedGCStart = TimeStamp();
 
-    beginPhase(PhaseKind::MUTATOR);
+    beginPhase(PHASE_MUTATOR);
     return true;
 }
 
 bool
 Statistics::stopTimingMutator(double& mutator_ms, double& gc_ms)
 {
     // This should only be called from outside of GC, while timing the mutator.
-    if (phaseNestingDepth != 1 || phaseNesting[0] != Phase::MUTATOR)
+    if (phaseNestingDepth != 1 || phaseNesting[0] != PHASE_MUTATOR)
         return false;
 
-    endPhase(PhaseKind::MUTATOR);
-    mutator_ms = t(phaseTimes[Phase::MUTATOR]);
+    endPhase(PHASE_MUTATOR);
+    mutator_ms = t(phaseTimes[PHASE_DAG_NONE][PHASE_MUTATOR]);
     gc_ms = t(timedGCTime);
 
     return true;
 }
 
 void
-Statistics::suspendPhases(PhaseKind suspension)
+Statistics::suspendPhases(Phase suspension)
 {
-    MOZ_ASSERT(suspension == PhaseKind::EXPLICIT_SUSPENSION ||
-               suspension == PhaseKind::IMPLICIT_SUSPENSION);
+    MOZ_ASSERT(suspension == PHASE_EXPLICIT_SUSPENSION || suspension == PHASE_IMPLICIT_SUSPENSION);
     while (phaseNestingDepth) {
         MOZ_ASSERT(suspended < mozilla::ArrayLength(suspendedPhases));
         Phase parent = phaseNesting[phaseNestingDepth - 1];
         suspendedPhases[suspended++] = parent;
         recordPhaseEnd(parent);
     }
-    suspendedPhases[suspended++] = lookupChildPhase(suspension);
+    suspendedPhases[suspended++] = suspension;
 }
 
 void
 Statistics::resumePhases()
 {
-#ifdef DEBUG
-    Phase popped = suspendedPhases[--suspended];
-    MOZ_ASSERT(popped == Phase::EXPLICIT_SUSPENSION ||
-               popped == Phase::IMPLICIT_SUSPENSION);
-#endif
-
+    DebugOnly<Phase> popped = suspendedPhases[--suspended];
+    MOZ_ASSERT(popped == PHASE_EXPLICIT_SUSPENSION || popped == PHASE_IMPLICIT_SUSPENSION);
     while (suspended &&
-           suspendedPhases[suspended - 1] != Phase::EXPLICIT_SUSPENSION &&
-           suspendedPhases[suspended - 1] != Phase::IMPLICIT_SUSPENSION)
+           suspendedPhases[suspended - 1] != PHASE_EXPLICIT_SUSPENSION &&
+           suspendedPhases[suspended - 1] != PHASE_IMPLICIT_SUSPENSION)
     {
         Phase resumePhase = suspendedPhases[--suspended];
-        if (resumePhase == Phase::MUTATOR)
+        if (resumePhase == PHASE_MUTATOR)
             timedGCTime += TimeStamp::Now() - timedGCStart;
-        recordPhaseBegin(resumePhase);
+        beginPhase(resumePhase);
     }
 }
 
 void
-Statistics::beginPhase(PhaseKind phaseKind)
+Statistics::beginPhase(Phase phase)
 {
     // No longer timing these phases. We should never see these.
-    MOZ_ASSERT(phaseKind != PhaseKind::GC_BEGIN && phaseKind != PhaseKind::GC_END);
+    MOZ_ASSERT(phase != PHASE_GC_BEGIN && phase != PHASE_GC_END);
+
+    Phase parent = phaseNestingDepth ? phaseNesting[phaseNestingDepth - 1] : PHASE_NO_PARENT;
 
-    // PhaseKind::MUTATOR is suspended while performing GC.
-    if (currentPhase() == Phase::MUTATOR) {
-        suspendPhases(PhaseKind::IMPLICIT_SUSPENSION);
+    // PHASE_MUTATOR is suspended while performing GC.
+    if (parent == PHASE_MUTATOR) {
+        suspendPhases(PHASE_IMPLICIT_SUSPENSION);
+        parent = phaseNestingDepth ? phaseNesting[phaseNestingDepth - 1] : PHASE_NO_PARENT;
     }
 
-    recordPhaseBegin(lookupChildPhase(phaseKind));
-}
-
-void
-Statistics::recordPhaseBegin(Phase phase)
-{
     // Guard against any other re-entry.
     MOZ_ASSERT(!phaseStartTimes[phase]);
 
+    MOZ_ASSERT(phases[phase].index == phase);
     MOZ_ASSERT(phaseNestingDepth < MAX_NESTING);
-    MOZ_ASSERT(phases[phase].parent == currentPhase());
+    MOZ_ASSERT(phases[phase].parent == parent || phases[phase].parent == PHASE_MULTI_PARENTS);
 
     phaseNesting[phaseNestingDepth] = phase;
     phaseNestingDepth++;
 
+    if (phases[phase].parent == PHASE_MULTI_PARENTS) {
+        MOZ_ASSERT(parent != PHASE_NO_PARENT);
+        activeDagSlot = phaseExtra[parent].dagSlot;
+    }
+    MOZ_ASSERT(activeDagSlot <= MaxMultiparentPhases - 1);
+
     phaseStartTimes[phase] = TimeStamp::Now();
 }
 
 void
 Statistics::recordPhaseEnd(Phase phase)
 {
     TimeStamp now = TimeStamp::Now();
 
-    if (phase == Phase::MUTATOR)
+    if (phase == PHASE_MUTATOR)
         timedGCStart = now;
 
     phaseNestingDepth--;
 
     TimeDuration t = now - phaseStartTimes[phase];
     if (!slices_.empty())
-        slices_.back().phaseTimes[phase] += t;
-    phaseTimes[phase] += t;
+        slices_.back().phaseTimes[activeDagSlot][phase] += t;
+    phaseTimes[activeDagSlot][phase] += t;
     phaseStartTimes[phase] = TimeStamp();
 }
 
 void
-Statistics::endPhase(PhaseKind phaseKind)
+Statistics::endPhase(Phase phase)
 {
-    Phase phase = currentPhase();
-    MOZ_ASSERT(phase != Phase::NONE);
-    MOZ_ASSERT(phases[phase].phaseKind == phaseKind);
-
     recordPhaseEnd(phase);
 
+    if (phases[phase].parent == PHASE_MULTI_PARENTS)
+        activeDagSlot = PHASE_DAG_NONE;
+
     // When emptying the stack, we may need to return to timing the mutator
-    // (PhaseKind::MUTATOR).
-    if (phaseNestingDepth == 0 &&
-        suspended > 0 &&
-        suspendedPhases[suspended - 1] == Phase::IMPLICIT_SUSPENSION)
-    {
+    // (PHASE_MUTATOR).
+    if (phaseNestingDepth == 0 && suspended > 0 && suspendedPhases[suspended - 1] == PHASE_IMPLICIT_SUSPENSION)
         resumePhases();
-    }
 }
 
 void
-Statistics::endParallelPhase(PhaseKind phaseKind, const GCParallelTask* task)
+Statistics::endParallelPhase(Phase phase, const GCParallelTask* task)
 {
-    Phase phase = lookupChildPhase(phaseKind);
     phaseNestingDepth--;
 
     if (!slices_.empty())
-        slices_.back().phaseTimes[phase] += task->duration();
-    phaseTimes[phase] += task->duration();
+        slices_.back().phaseTimes[PHASE_DAG_NONE][phase] += task->duration();
+    phaseTimes[PHASE_DAG_NONE][phase] += task->duration();
     phaseStartTimes[phase] = TimeStamp();
 }
 
 TimeStamp
 Statistics::beginSCC()
 {
     return TimeStamp::Now();
 }
@@ -1276,17 +1436,17 @@ Statistics::printSliceProfile()
     fprintf(stderr, "MajorGC: %20s %1d -> %1d      ",
             ExplainReason(slice.reason), int(slice.initialState), int(slice.finalState));
 
     ProfileDurations times;
     times[ProfileKey::Total] = slice.duration();
     totalTimes_[ProfileKey::Total] += times[ProfileKey::Total];
 
 #define GET_PROFILE_TIME(name, text, phase)                                   \
-    times[ProfileKey::name] = SumPhase(phase, slice.phaseTimes);              \
+    times[ProfileKey::name] = slice.phaseTimes[PHASE_DAG_NONE][phase];                     \
     totalTimes_[ProfileKey::name] += times[ProfileKey::name];
 FOR_EACH_GC_PROFILE_TIME(GET_PROFILE_TIME)
 #undef GET_PROFILE_TIME
 
     printProfileTimes(times);
 }
 
 void
--- a/js/src/gc/Statistics.h
+++ b/js/src/gc/Statistics.h
@@ -25,20 +25,92 @@
 using mozilla::Maybe;
 
 namespace js {
 
 class GCParallelTask;
 
 namespace gcstats {
 
-// Phase data is generated by a script. If you need to add phases, edit
-// js/src/gc/GenerateStatsPhases.py
+enum Phase : uint8_t {
+    PHASE_FIRST,
 
-#include "gc/StatsPhasesGenerated.h"
+    PHASE_MUTATOR = PHASE_FIRST,
+    PHASE_GC_BEGIN,
+    PHASE_WAIT_BACKGROUND_THREAD,
+    PHASE_MARK_DISCARD_CODE,
+    PHASE_RELAZIFY_FUNCTIONS,
+    PHASE_PURGE,
+    PHASE_MARK,
+    PHASE_UNMARK,
+    PHASE_MARK_DELAYED,
+    PHASE_SWEEP,
+    PHASE_SWEEP_MARK,
+    PHASE_SWEEP_MARK_TYPES,
+    PHASE_SWEEP_MARK_INCOMING_BLACK,
+    PHASE_SWEEP_MARK_WEAK,
+    PHASE_SWEEP_MARK_INCOMING_GRAY,
+    PHASE_SWEEP_MARK_GRAY,
+    PHASE_SWEEP_MARK_GRAY_WEAK,
+    PHASE_FINALIZE_START,
+    PHASE_WEAK_ZONES_CALLBACK,
+    PHASE_WEAK_COMPARTMENT_CALLBACK,
+    PHASE_SWEEP_ATOMS,
+    PHASE_SWEEP_COMPARTMENTS,
+    PHASE_SWEEP_DISCARD_CODE,
+    PHASE_SWEEP_INNER_VIEWS,
+    PHASE_SWEEP_CC_WRAPPER,
+    PHASE_SWEEP_BASE_SHAPE,
+    PHASE_SWEEP_INITIAL_SHAPE,
+    PHASE_SWEEP_TYPE_OBJECT,
+    PHASE_SWEEP_BREAKPOINT,
+    PHASE_SWEEP_REGEXP,
+    PHASE_SWEEP_COMPRESSION,
+    PHASE_SWEEP_WEAKMAPS,
+    PHASE_SWEEP_UNIQUEIDS,
+    PHASE_SWEEP_JIT_DATA,
+    PHASE_SWEEP_WEAK_CACHES,
+    PHASE_SWEEP_MISC,
+    PHASE_SWEEP_TYPES,
+    PHASE_SWEEP_TYPES_BEGIN,
+    PHASE_SWEEP_TYPES_END,
+    PHASE_SWEEP_OBJECT,
+    PHASE_SWEEP_STRING,
+    PHASE_SWEEP_SCRIPT,
+    PHASE_SWEEP_SCOPE,
+    PHASE_SWEEP_REGEXP_SHARED,
+    PHASE_SWEEP_SHAPE,
+    PHASE_SWEEP_JITCODE,
+    PHASE_FINALIZE_END,
+    PHASE_DESTROY,
+    PHASE_COMPACT,
+    PHASE_COMPACT_MOVE,
+    PHASE_COMPACT_UPDATE,
+    PHASE_COMPACT_UPDATE_CELLS,
+    PHASE_GC_END,
+    PHASE_MINOR_GC,
+    PHASE_EVICT_NURSERY,
+    PHASE_TRACE_HEAP,
+    PHASE_BARRIER,
+    PHASE_UNMARK_GRAY,
+    PHASE_MARK_ROOTS,
+    PHASE_BUFFER_GRAY_ROOTS,
+    PHASE_MARK_CCWS,
+    PHASE_MARK_STACK,
+    PHASE_MARK_RUNTIME_DATA,
+    PHASE_MARK_EMBEDDING,
+    PHASE_MARK_COMPARTMENTS,
+    PHASE_PURGE_SHAPE_TABLES,
+
+    PHASE_LIMIT,
+    PHASE_NONE = PHASE_LIMIT,
+    PHASE_EXPLICIT_SUSPENSION = PHASE_LIMIT,
+    PHASE_IMPLICIT_SUSPENSION,
+    PHASE_MULTI_PARENTS
+};
 
 enum Stat {
     STAT_NEW_CHUNK,
     STAT_DESTROY_CHUNK,
     STAT_MINOR_GC,
 
     // Number of times a 'put' into a storebuffer overflowed, triggering a
     // compaction
@@ -74,81 +146,99 @@ struct ZoneGCStats
 
     ZoneGCStats()
       : collectedZoneCount(0), zoneCount(0), sweptZoneCount(0),
         collectedCompartmentCount(0), compartmentCount(0), sweptCompartmentCount(0)
     {}
 };
 
 #define FOR_EACH_GC_PROFILE_TIME(_)                                           \
-    _(BeginCallback, "bgnCB",  PhaseKind::GC_BEGIN)                           \
-    _(WaitBgThread,  "waitBG", PhaseKind::WAIT_BACKGROUND_THREAD)             \
-    _(DiscardCode,   "discrd", PhaseKind::MARK_DISCARD_CODE)                  \
-    _(RelazifyFunc,  "relzfy", PhaseKind::RELAZIFY_FUNCTIONS)                 \
-    _(PurgeTables,   "prgTbl", PhaseKind::PURGE_SHAPE_TABLES)                 \
-    _(Purge,         "purge",  PhaseKind::PURGE)                              \
-    _(Mark,          "mark",   PhaseKind::MARK)                               \
-    _(Sweep,         "sweep",  PhaseKind::SWEEP)                              \
-    _(Compact,       "cmpct",  PhaseKind::COMPACT)                            \
-    _(EndCallback,   "endCB",  PhaseKind::GC_END)                             \
-    _(Barriers,      "brrier", PhaseKind::BARRIER)
+    _(BeginCallback, "bgnCB",  PHASE_GC_BEGIN)                                \
+    _(WaitBgThread,  "waitBG", PHASE_WAIT_BACKGROUND_THREAD)                  \
+    _(DiscardCode,   "discrd", PHASE_MARK_DISCARD_CODE)                       \
+    _(RelazifyFunc,  "relzfy", PHASE_RELAZIFY_FUNCTIONS)                      \
+    _(PurgeTables,   "prgTbl", PHASE_PURGE_SHAPE_TABLES)                      \
+    _(Purge,         "purge",  PHASE_PURGE)                                   \
+    _(Mark,          "mark",   PHASE_MARK)                                    \
+    _(Sweep,         "sweep",  PHASE_SWEEP)                                   \
+    _(Compact,       "cmpct",  PHASE_COMPACT)                                 \
+    _(EndCallback,   "endCB",  PHASE_GC_END)                                  \
+    _(Barriers,      "brrier", PHASE_BARRIER)
 
 const char* ExplainAbortReason(gc::AbortReason reason);
 const char* ExplainInvocationKind(JSGCInvocationKind gckind);
 
 /*
  * Struct for collecting timing statistics on a "phase tree". The tree is
  * specified as a limited DAG, but the timings are collected for the whole tree
  * that you would get by expanding out the DAG by duplicating subtrees rooted
  * at nodes with multiple parents.
  *
  * During execution, a child phase can be activated multiple times, and the
  * total time will be accumulated. (So for example, you can start and end
- * PhaseKind::MARK_ROOTS multiple times before completing the parent phase.)
+ * PHASE_MARK_ROOTS multiple times before completing the parent phase.)
  *
  * Incremental GC is represented by recording separate timing results for each
  * slice within the overall GC.
  */
 struct Statistics
 {
     template <typename T, size_t Length>
     using Array = mozilla::Array<T, Length>;
 
     template <typename IndexType, IndexType SizeAsEnumValue, typename ValueType>
     using EnumeratedArray = mozilla::EnumeratedArray<IndexType, SizeAsEnumValue, ValueType>;
 
     using TimeDuration = mozilla::TimeDuration;
     using TimeStamp = mozilla::TimeStamp;
 
-    // Create a convenient type for referring to tables of phase times.
-    using PhaseTimeTable = EnumeratedArray<Phase, Phase::LIMIT, TimeDuration>;
+    /*
+     * Phases are allowed to have multiple parents, though any path from root
+     * to leaf is allowed at most one multi-parented phase. We keep a full set
+     * of timings for each of the multi-parented phases, to be able to record
+     * all the timings in the expanded tree induced by our dag.
+     *
+     * Note that this wastes quite a bit of space, since we have a whole
+     * separate array of timing data containing all the phases. We could be
+     * more clever and keep an array of pointers biased by the offset of the
+     * multi-parented phase, and thereby preserve the simple
+     * timings[slot][PHASE_*] indexing. But the complexity doesn't seem worth
+     * the few hundred bytes of savings. If we want to extend things to full
+     * DAGs, this decision should be reconsidered.
+     */
+    static const size_t MaxMultiparentPhases = 6;
+    static const size_t NumTimingArrays = MaxMultiparentPhases + 1;
+
+    /* Create a convenient type for referring to tables of phase times. */
+    using PhaseTimeTable =
+        Array<EnumeratedArray<Phase, PHASE_LIMIT, TimeDuration>, NumTimingArrays>;
 
     static MOZ_MUST_USE bool initialize();
 
     explicit Statistics(JSRuntime* rt);
     ~Statistics();
 
     Statistics(const Statistics&) = delete;
     Statistics& operator=(const Statistics&) = delete;
 
-    void beginPhase(PhaseKind phaseKind);
-    void endPhase(PhaseKind phaseKind);
-    void endParallelPhase(PhaseKind phaseKind, const GCParallelTask* task);
+    void beginPhase(Phase phase);
+    void endPhase(Phase phase);
+    void endParallelPhase(Phase phase, const GCParallelTask* task);
 
     // Occasionally, we may be in the middle of something that is tracked by
     // this class, and we need to do something unusual (eg evict the nursery)
     // that doesn't normally nest within the current phase. Suspend the
     // currently tracked phase stack, at which time the caller is free to do
     // other tracked operations.
     //
-    // This also happens internally with the PhaseKind::MUTATOR "phase". While in
+    // This also happens internally with the PHASE_MUTATOR "phase". While in
     // this phase, any beginPhase will automatically suspend the non-GC phase,
     // until that inner stack is complete, at which time it will automatically
     // resume the non-GC phase. Explicit suspensions do not get auto-resumed.
-    void suspendPhases(PhaseKind suspension = PhaseKind::EXPLICIT_SUSPENSION);
+    void suspendPhases(Phase suspension = PHASE_EXPLICIT_SUSPENSION);
 
     // Resume a suspended stack of phases.
     void resumePhases();
 
     void beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
                     SliceBudget budget, JS::gcreason::Reason reason);
     void endSlice();
 
@@ -198,17 +288,24 @@ struct Statistics
 
     JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
     JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
         JS::GCNurseryCollectionCallback callback);
 
     TimeDuration clearMaxGCPauseAccumulator();
     TimeDuration getMaxGCPauseSinceClear();
 
-    PhaseKind currentPhaseKind() const;
+    // Return the current phase, suppressing the synthetic PHASE_MUTATOR phase.
+    Phase currentPhase() {
+        if (phaseNestingDepth == 0)
+            return PHASE_NONE;
+        if (phaseNestingDepth == 1)
+            return phaseNesting[0] == PHASE_MUTATOR ? PHASE_NONE : phaseNesting[0];
+        return phaseNesting[phaseNestingDepth - 1];
+    }
 
     static const size_t MAX_NESTING = 20;
 
     struct SliceData {
         SliceData(SliceBudget budget, JS::gcreason::Reason reason,
                   TimeStamp start, size_t startFaults, gc::State initialState)
           : budget(budget), reason(reason),
             initialState(initialState),
@@ -271,25 +368,28 @@ struct Statistics
 
     JSGCInvocationKind gckind;
 
     gc::AbortReason nonincrementalReason_;
 
     SliceDataVector slices_;
 
     /* Most recent time when the given phase started. */
-    EnumeratedArray<Phase, Phase::LIMIT, TimeStamp> phaseStartTimes;
+    EnumeratedArray<Phase, PHASE_LIMIT, TimeStamp> phaseStartTimes;
 
     /* Bookkeeping for GC timings when timingMutator is true */
     TimeStamp timedGCStart;
     TimeDuration timedGCTime;
 
     /* Total time in a given phase for this GC. */
     PhaseTimeTable phaseTimes;
 
+    /* Total time in a given phase over all GCs. */
+    PhaseTimeTable phaseTotals;
+
     /* Number of events of this type for this GC. */
     EnumeratedArray<Stat,
                     STAT_LIMIT,
                     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire>> counts;
 
     /* Allocated space before the GC started. */
     size_t preBytes;
 
@@ -298,22 +398,23 @@ struct Statistics
     uint64_t startingMajorGCNumber;
 
     /* Records the maximum GC pause in an API-controlled interval (in us). */
     mutable TimeDuration maxPauseInInterval;
 
     /* Phases that are currently on stack. */
     Array<Phase, MAX_NESTING> phaseNesting;
     size_t phaseNestingDepth;
+    size_t activeDagSlot;
 
     /*
      * Certain phases can interrupt the phase stack, eg callback phases. When
      * this happens, we move the suspended phases over to a sepearate list,
-     * terminated by a dummy PhaseKind::SUSPENSION phase (so that we can nest
-     * suspensions by suspending multiple stacks with a PhaseKind::SUSPENSION in
+     * terminated by a dummy PHASE_SUSPENSION phase (so that we can nest
+     * suspensions by suspending multiple stacks with a PHASE_SUSPENSION in
      * between).
      */
     Array<Phase, MAX_NESTING * 3> suspendedPhases;
     size_t suspended;
 
     /* Sweep times for SCCs of compartments. */
     Vector<TimeDuration, 0, SystemAllocPolicy> sccTimes;
 
@@ -340,23 +441,19 @@ FOR_EACH_GC_PROFILE_TIME(DEFINE_TIME_KEY
 
     using ProfileDurations = EnumeratedArray<ProfileKey, ProfileKey::KeyCount, TimeDuration>;
 
     TimeDuration profileThreshold_;
     bool enableProfiling_;
     ProfileDurations totalTimes_;
     uint64_t sliceCount_;
 
-    Phase currentPhase() const;
-    Phase lookupChildPhase(PhaseKind phaseKind) const;
-
     void beginGC(JSGCInvocationKind kind);
     void endGC();
 
-    void recordPhaseBegin(Phase phase);
     void recordPhaseEnd(Phase phase);
 
     void gcDuration(TimeDuration* total, TimeDuration* maxPause) const;
     void sccDurations(TimeDuration* total, TimeDuration* maxPause) const;
     void printStats();
 
     UniqueChars formatCompactSlicePhaseTimes(const PhaseTimeTable& phaseTimes) const;
 
@@ -386,49 +483,49 @@ struct MOZ_RAII AutoGCSlice
     }
     ~AutoGCSlice() { stats.endSlice(); }
 
     Statistics& stats;
 };
 
 struct MOZ_RAII AutoPhase
 {
-    AutoPhase(Statistics& stats, PhaseKind phaseKind)
-      : stats(stats), task(nullptr), phaseKind(phaseKind), enabled(true)
+    AutoPhase(Statistics& stats, Phase phase)
+      : stats(stats), task(nullptr), phase(phase), enabled(true)
     {
-        stats.beginPhase(phaseKind);
+        stats.beginPhase(phase);
     }
 
-    AutoPhase(Statistics& stats, bool condition, PhaseKind phaseKind)
-      : stats(stats), task(nullptr), phaseKind(phaseKind), enabled(condition)
+    AutoPhase(Statistics& stats, bool condition, Phase phase)
+      : stats(stats), task(nullptr), phase(phase), enabled(condition)
     {
         if (enabled)
-            stats.beginPhase(phaseKind);
+            stats.beginPhase(phase);
     }
 
-    AutoPhase(Statistics& stats, const GCParallelTask& task, PhaseKind phaseKind)
-      : stats(stats), task(&task), phaseKind(phaseKind), enabled(true)
+    AutoPhase(Statistics& stats, const GCParallelTask& task, Phase phase)
+      : stats(stats), task(&task), phase(phase), enabled(true)
     {
         if (enabled)
-            stats.beginPhase(phaseKind);
+            stats.beginPhase(phase);
     }
 
     ~AutoPhase() {
         if (enabled) {
             // Bug 1309651 - we only record active thread time (including time
             // spent waiting to join with helper threads), but should start
             // recording total work on helper threads sometime by calling
             // endParallelPhase here if task is nonnull.
-            stats.endPhase(phaseKind);
+            stats.endPhase(phase);
         }
     }
 
     Statistics& stats;
     const GCParallelTask* task;
-    PhaseKind phaseKind;
+    Phase phase;
     bool enabled;
 };
 
 struct MOZ_RAII AutoSCC
 {
     AutoSCC(Statistics& stats, unsigned scc)
       : stats(stats), scc(scc)
     {
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -194,17 +194,17 @@ gc::GCRuntime::startVerifyPreBarriers()
     if (!trc)
         return;
 
     AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
 
     for (auto chunk = allNonEmptyChunks(); !chunk.done(); chunk.next())
         chunk->bitmap.clear();
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_TRACE_HEAP);
 
     const size_t size = 64 * 1024 * 1024;
     trc->root = (VerifyNode*)js_malloc(size);
     if (!trc->root)
         goto oom;
     trc->edgeptr = (char*)trc->root;
     trc->term = trc->edgeptr + size;
 
@@ -675,17 +675,17 @@ CheckGrayMarkingTracer::check(AutoLockFo
 JS_FRIEND_API(bool)
 js::CheckGrayMarkingState(JSRuntime* rt)
 {
     MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
     MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
     if (!rt->gc.areGrayBitsValid())
         return true;
 
-    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+    gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
     AutoTraceSession session(rt, JS::HeapState::Tracing);
     CheckGrayMarkingTracer tracer(rt);
     if (!tracer.init())
         return true; // Ignore failure
 
     return tracer.check(session.lock);
 }
 
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -693,17 +693,17 @@ JSCompartment::traceOutgoingCrossCompart
             TraceEdge(trc, wrapper->slotOfPrivate(), "cross-compartment wrapper");
         }
     }
 }
 
 /* static */ void
 JSCompartment::traceIncomingCrossCompartmentEdgesForZoneGC(JSTracer* trc)
 {
-    gcstats::AutoPhase ap(trc->runtime()->gc.stats(), gcstats::PhaseKind::MARK_CCWS);
+    gcstats::AutoPhase ap(trc->runtime()->gc.stats(), gcstats::PHASE_MARK_CCWS);
     MOZ_ASSERT(JS::CurrentThreadIsHeapMajorCollecting());
     for (CompartmentsIter c(trc->runtime(), SkipAtoms); !c.done(); c.next()) {
         if (!c->zone()->isCollecting())
             c->traceOutgoingCrossCompartmentWrappers(trc);
     }
     Debugger::traceIncomingCrossCompartmentEdges(trc);
 }
 
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -1184,17 +1184,17 @@ js::DumpHeap(JSContext* cx, FILE* fp, js
         EvictAllNurseries(cx->runtime(), JS::gcreason::API);
 
     DumpHeapTracer dtrc(fp, cx);
 
     fprintf(dtrc.output, "# Roots.\n");
     {
         JSRuntime* rt = cx->runtime();
         js::gc::AutoPrepareForTracing prep(cx, WithAtoms);
-        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
         rt->gc.traceRuntime(&dtrc, prep.session().lock);
     }
 
     fprintf(dtrc.output, "# Weak maps.\n");
     WeakMapBase::traceAllMappings(&dtrc);
 
     fprintf(dtrc.output, "==========\n");
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -318,97 +318,97 @@ const uint32_t Arena::ThingsPerArena[] =
 FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
 #undef EXPAND_THINGS_PER_ARENA
 };
 
 #undef COUNT
 
 struct js::gc::FinalizePhase
 {
-    gcstats::PhaseKind statsPhase;
+    gcstats::Phase statsPhase;
     AllocKinds kinds;
 };
 
 /*
  * Finalization order for objects swept incrementally on the active thread.
  */
 static const FinalizePhase ForegroundObjectFinalizePhase = {
-    gcstats::PhaseKind::SWEEP_OBJECT, {
+    gcstats::PHASE_SWEEP_OBJECT, {
         AllocKind::OBJECT0,
         AllocKind::OBJECT2,
         AllocKind::OBJECT4,
         AllocKind::OBJECT8,
         AllocKind::OBJECT12,
         AllocKind::OBJECT16
     }
 };
 
 /*
  * Finalization order for GC things swept incrementally on the active thread.
  */
 static const FinalizePhase IncrementalFinalizePhases[] = {
     {
-        gcstats::PhaseKind::SWEEP_STRING, {
+        gcstats::PHASE_SWEEP_STRING, {
             AllocKind::EXTERNAL_STRING
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_SCRIPT, {
+        gcstats::PHASE_SWEEP_SCRIPT, {
             AllocKind::SCRIPT
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_JITCODE, {
+        gcstats::PHASE_SWEEP_JITCODE, {
             AllocKind::JITCODE
         }
     }
 };
 
 /*
  * Finalization order for GC things swept on the background thread.
  */
 static const FinalizePhase BackgroundFinalizePhases[] = {
     {
-        gcstats::PhaseKind::SWEEP_SCRIPT, {
+        gcstats::PHASE_SWEEP_SCRIPT, {
             AllocKind::LAZY_SCRIPT
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_OBJECT, {
+        gcstats::PHASE_SWEEP_OBJECT, {
             AllocKind::FUNCTION,
             AllocKind::FUNCTION_EXTENDED,
             AllocKind::OBJECT0_BACKGROUND,
             AllocKind::OBJECT2_BACKGROUND,
             AllocKind::OBJECT4_BACKGROUND,
             AllocKind::OBJECT8_BACKGROUND,
             AllocKind::OBJECT12_BACKGROUND,
             AllocKind::OBJECT16_BACKGROUND
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_SCOPE, {
+        gcstats::PHASE_SWEEP_SCOPE, {
             AllocKind::SCOPE,
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_REGEXP_SHARED, {
+        gcstats::PHASE_SWEEP_REGEXP_SHARED, {
             AllocKind::REGEXP_SHARED,
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_STRING, {
+        gcstats::PHASE_SWEEP_STRING, {
             AllocKind::FAT_INLINE_STRING,
             AllocKind::STRING,
             AllocKind::FAT_INLINE_ATOM,
             AllocKind::ATOM,
             AllocKind::SYMBOL
         }
     },
     {
-        gcstats::PhaseKind::SWEEP_SHAPE, {
+        gcstats::PHASE_SWEEP_SHAPE, {
             AllocKind::SHAPE,
             AllocKind::ACCESSOR_SHAPE,
             AllocKind::BASE_SHAPE,
             AllocKind::OBJECT_GROUP
         }
     }
 };
 
@@ -2132,17 +2132,17 @@ ArenaLists::relocateArenas(Zone* zone, A
 
     return true;
 }
 
 bool
 GCRuntime::relocateArenas(Zone* zone, JS::gcreason::Reason reason, Arena*& relocatedListOut,
                           SliceBudget& sliceBudget)
 {
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_MOVE);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_COMPACT_MOVE);
 
     MOZ_ASSERT(!zone->isPreservingCode());
     MOZ_ASSERT(CanRelocateZone(zone));
 
     js::CancelOffThreadIonCompile(rt, JS::Zone::Compact);
 
     if (!zone->arenas.relocateArenas(zone, relocatedListOut, reason, sliceBudget, stats()))
         return false;
@@ -2460,28 +2460,28 @@ GCRuntime::updateCellPointers(MovingTrac
 
     {
         AutoLockHelperThreadState lock;
 
         fgTask.emplace(rt, &fgArenas, lock);
 
         for (size_t i = 0; i < bgTaskCount && !bgArenas.done(); i++) {
             bgTasks[i].emplace(rt, &bgArenas, lock);
-            startTask(*bgTasks[i], gcstats::PhaseKind::COMPACT_UPDATE_CELLS, lock);
+            startTask(*bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS, lock);
             tasksStarted = i;
         }
     }
 
     fgTask->runFromActiveCooperatingThread(rt);
 
     {
         AutoLockHelperThreadState lock;
 
         for (size_t i = 0; i < tasksStarted; i++)
-            joinTask(*bgTasks[i], gcstats::PhaseKind::COMPACT_UPDATE_CELLS, lock);
+            joinTask(*bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS, lock);
     }
 }
 
 // After cells have been relocated any pointers to a cell's old locations must
 // be updated to point to the new location.  This happens by iterating through
 // all cells in heap and tracing their children (non-recursively) to update
 // them.
 //
@@ -2555,17 +2555,17 @@ GCRuntime::updateAllCellPointers(MovingT
  * part of the traversal.
  */
 void
 GCRuntime::updateZonePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(!rt->isBeingDestroyed());
     MOZ_ASSERT(zone->isGCCompacting());
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_COMPACT_UPDATE);
     MovingTracer trc(rt);
 
     zone->fixupAfterMovingGC();
 
     // Fixup compartment global pointers as these get accessed during marking.
     for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
         comp->fixupAfterMovingGC();
 
@@ -2573,17 +2573,17 @@ GCRuntime::updateZonePointersToRelocated
 
     // Iterate through all cells that can contain relocatable pointers to update
     // them. Since updating each cell is independent we try to parallelize this
     // as much as possible.
     updateAllCellPointers(&trc, zone);
 
     // Mark roots to update them.
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
 
         WeakMapBase::traceZone(zone, &trc);
         for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
             if (c->watchpointMap)
                 c->watchpointMap->trace(&trc);
         }
     }
 
@@ -2598,28 +2598,28 @@ GCRuntime::updateZonePointersToRelocated
 /*
  * Update runtime-wide pointers to relocated cells.
  */
 void
 GCRuntime::updateRuntimePointersToRelocatedCells(AutoLockForExclusiveAccess& lock)
 {
     MOZ_ASSERT(!rt->isBeingDestroyed());
 
-    gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::COMPACT_UPDATE);
+    gcstats::AutoPhase ap1(stats(), gcstats::PHASE_COMPACT_UPDATE);
     MovingTracer trc(rt);
 
     JSCompartment::fixupCrossCompartmentWrappersAfterMovingGC(&trc);
 
     rt->geckoProfiler().fixupStringsMapAfterMovingGC();
 
     traceRuntimeForMajorGC(&trc, lock);
 
     // Mark roots to update them.
     {
-        gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_ROOTS);
+        gcstats::AutoPhase ap2(stats(), gcstats::PHASE_MARK_ROOTS);
         Debugger::traceAllForMovingGC(&trc);
         Debugger::traceIncomingCrossCompartmentEdges(&trc);
 
         // Mark all gray roots, making sure we call the trace callback to get the
         // current set.
         if (JSTraceDataOp op = grayRootTracer.op)
             (*op)(&trc, grayRootTracer.data);
     }
@@ -3879,42 +3879,42 @@ GCRuntime::beginMarkPhase(JS::gcreason::
     MemProfiler::MarkTenuredStart(rt);
     marker.start();
     GCMarker* gcmarker = &marker;
 
     /* For non-incremental GC the following sweep discards the jit code. */
     if (isIncremental) {
         js::CancelOffThreadIonCompile(rt, JS::Zone::Mark);
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-            gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_DISCARD_CODE);
+            gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_DISCARD_CODE);
             zone->discardJitCode(rt->defaultFreeOp());
         }
     }
 
     /*
      * Relazify functions after discarding JIT code (we can't relazify
      * functions with JIT code) and before the actual mark phase, so that
      * the current GC can collect the JSScripts we're unlinking here.
      * We do this only when we're performing a shrinking GC, as too much
      * relazification can cause performance issues when we have to reparse
      * the same functions over and over.
      */
     if (invocationKind == GC_SHRINK) {
         {
-            gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::RELAZIFY_FUNCTIONS);
+            gcstats::AutoPhase ap(stats(), gcstats::PHASE_RELAZIFY_FUNCTIONS);
             for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
                 if (zone->isSelfHostingZone())
                     continue;
                 RelazifyFunctions(zone, AllocKind::FUNCTION);
                 RelazifyFunctions(zone, AllocKind::FUNCTION_EXTENDED);
             }
         }
 
         /* Purge ShapeTables. */
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE_SHAPE_TABLES);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_PURGE_SHAPE_TABLES);
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             if (zone->keepShapeTables() || zone->isSelfHostingZone())
                 continue;
             for (auto baseShape = zone->cellIter<BaseShape>(); !baseShape.done(); baseShape.next())
                 baseShape->maybePurgeTable();
         }
     }
 
@@ -3934,55 +3934,55 @@ GCRuntime::beginMarkPhase(JS::gcreason::
      * danger if we purge later is that the snapshot invariant of incremental
      * GC will be broken, as follows. If some object is reachable only through
      * some cache (say the dtoaCache) then it will not be part of the snapshot.
      * If we purge after root marking, then the mutator could obtain a pointer
      * to the object and start using it. This object might never be marked, so
      * a GC hazard would exist.
      */
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::PURGE);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_PURGE);
         purgeRuntime(lock);
     }
 
     /*
      * Mark phase.
      */
-    gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK);
+    gcstats::AutoPhase ap1(stats(), gcstats::PHASE_MARK);
 
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::UNMARK);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_UNMARK);
 
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             /* Unmark everything in the zones being collected. */
             zone->arenas.unmarkAll();
         }
 
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             /* Unmark all weak maps in the zones being collected. */
             WeakMapBase::unmarkZone(zone);
         }
     }
 
     traceRuntimeForMajorGC(gcmarker, lock);
 
-    gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::MARK_ROOTS);
+    gcstats::AutoPhase ap2(stats(), gcstats::PHASE_MARK_ROOTS);
 
     if (isIncremental) {
         bufferGrayRoots();
         markCompartments();
     }
 
     return true;
 }
 
 void
 GCRuntime::markCompartments()
 {
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_COMPARTMENTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_COMPARTMENTS);
 
     /*
      * This code ensures that if a compartment is "dead", then it will be
      * collected in this GC. A compartment is considered dead if its maybeAlive
      * flag is false. The maybeAlive flag is set if:
      *
      *   (1) the compartment has been entered (set in beginMarkPhase() above)
      *   (2) the compartment is not being collected (set in beginMarkPhase()
@@ -4040,17 +4040,17 @@ GCRuntime::markCompartments()
         MOZ_ASSERT(!comp->scheduledForDestruction);
         if (!comp->maybeAlive && !rt->isAtomsCompartment(comp))
             comp->scheduledForDestruction = true;
     }
 }
 
 template <class ZoneIterT>
 void
-GCRuntime::markWeakReferences(gcstats::PhaseKind phase)
+GCRuntime::markWeakReferences(gcstats::Phase phase)
 {
     MOZ_ASSERT(marker.isDrained());
 
     gcstats::AutoPhase ap1(stats(), phase);
 
     marker.enterWeakMarkingMode();
 
     // TODO bug 1167452: Make weak marking incremental
@@ -4077,52 +4077,52 @@ GCRuntime::markWeakReferences(gcstats::P
         MOZ_RELEASE_ASSERT(marker.drainMarkStack(unlimited));
     }
     MOZ_ASSERT(marker.isDrained());
 
     marker.leaveWeakMarkingMode();
 }
 
 void
-GCRuntime::markWeakReferencesInCurrentGroup(gcstats::PhaseKind phase)
+GCRuntime::markWeakReferencesInCurrentGroup(gcstats::Phase phase)
 {
     markWeakReferences<GCSweepGroupIter>(phase);
 }
 
 template <class ZoneIterT, class CompartmentIterT>
 void
-GCRuntime::markGrayReferences(gcstats::PhaseKind phase)
+GCRuntime::markGrayReferences(gcstats::Phase phase)
 {
     gcstats::AutoPhase ap(stats(), phase);
     if (hasBufferedGrayRoots()) {
         for (ZoneIterT zone(rt); !zone.done(); zone.next())
             markBufferedGrayRoots(zone);
     } else {
         MOZ_ASSERT(!isIncremental);
         if (JSTraceDataOp op = grayRootTracer.op)
             (*op)(&marker, grayRootTracer.data);
     }
     auto unlimited = SliceBudget::unlimited();
     MOZ_RELEASE_ASSERT(marker.drainMarkStack(unlimited));
 }
 
 void
-GCRuntime::markGrayReferencesInCurrentGroup(gcstats::PhaseKind phase)
+GCRuntime::markGrayReferencesInCurrentGroup(gcstats::Phase phase)
 {
     markGrayReferences<GCSweepGroupIter, GCCompartmentGroupIter>(phase);
 }
 
 void
-GCRuntime::markAllWeakReferences(gcstats::PhaseKind phase)
+GCRuntime::markAllWeakReferences(gcstats::Phase phase)
 {
     markWeakReferences<GCZonesIter>(phase);
 }
 
 void
-GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase)
+GCRuntime::markAllGrayReferences(gcstats::Phase phase)
 {
     markGrayReferences<GCZonesIter, GCCompartmentsIter>(phase);
 }
 
 #ifdef JS_GC_ZEAL
 
 struct GCChunkHasher {
     typedef gc::Chunk* Lookup;
@@ -4240,20 +4240,20 @@ js::gc::MarkingValidator::nonIncremental
      */
     initialized = true;
 
     /* Re-do all the marking, but non-incrementally. */
     js::gc::State state = gc->incrementalState;
     gc->incrementalState = State::MarkRoots;
 
     {
-        gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::MARK);
+        gcstats::AutoPhase ap(gc->stats(), gcstats::PHASE_MARK);
 
         {
-            gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::UNMARK);
+            gcstats::AutoPhase ap(gc->stats(), gcstats::PHASE_UNMARK);
 
             for (GCZonesIter zone(runtime); !zone.done(); zone.next())
                 WeakMapBase::unmarkZone(zone);
 
             MOZ_ASSERT(gcmarker->isDrained());
             gcmarker->reset();
 
             for (auto chunk = gc->allNonEmptyChunks(); !chunk.done(); chunk.next())
@@ -4264,30 +4264,30 @@ js::gc::MarkingValidator::nonIncremental
 
         gc->incrementalState = State::Mark;
         auto unlimited = SliceBudget::unlimited();
         MOZ_RELEASE_ASSERT(gc->marker.drainMarkStack(unlimited));
     }
 
     gc->incrementalState = State::Sweep;
     {
-        gcstats::AutoPhase ap1(gc->stats(), gcstats::PhaseKind::SWEEP);
-        gcstats::AutoPhase ap2(gc->stats(), gcstats::PhaseKind::SWEEP_MARK);
-
-        gc->markAllWeakReferences(gcstats::PhaseKind::SWEEP_MARK_WEAK);
+        gcstats::AutoPhase ap1(gc->stats(), gcstats::PHASE_SWEEP);
+        gcstats::AutoPhase ap2(gc->stats(), gcstats::PHASE_SWEEP_MARK);
+
+        gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_WEAK);
 
         /* Update zone state for gray marking. */
         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
             MOZ_ASSERT(zone->isGCMarkingBlack());
             zone->setGCState(Zone::MarkGray);
         }
         gc->marker.setMarkColorGray();
 
-        gc->markAllGrayReferences(gcstats::PhaseKind::SWEEP_MARK_GRAY);
-        gc->markAllWeakReferences(gcstats::PhaseKind::SWEEP_MARK_GRAY_WEAK);
+        gc->markAllGrayReferences(gcstats::PHASE_SWEEP_MARK_GRAY);
+        gc->markAllWeakReferences(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
 
         /* Restore zone state. */
         for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
             MOZ_ASSERT(zone->isGCMarkingGray());
             zone->setGCState(Zone::Mark);
         }
         MOZ_ASSERT(gc->marker.isDrained());
         gc->marker.setMarkColorBlack();
@@ -4765,19 +4765,19 @@ js::DelayCrossCompartmentGrayMarking(JSO
 #endif
 }
 
 static void
 MarkIncomingCrossCompartmentPointers(JSRuntime* rt, const uint32_t color)
 {
     MOZ_ASSERT(color == BLACK || color == GRAY);
 
-    static const gcstats::PhaseKind statsPhases[] = {
-        gcstats::PhaseKind::SWEEP_MARK_INCOMING_BLACK,
-        gcstats::PhaseKind::SWEEP_MARK_INCOMING_GRAY
+    static const gcstats::Phase statsPhases[] = {
+        gcstats::PHASE_SWEEP_MARK_INCOMING_BLACK,
+        gcstats::PHASE_SWEEP_MARK_INCOMING_GRAY
     };
     gcstats::AutoPhase ap1(rt->gc.stats(), statsPhases[color]);
 
     bool unlinkList = color == GRAY;
 
     for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
         MOZ_ASSERT_IF(color == GRAY, c->zone()->isGCMarkingGray());
         MOZ_ASSERT_IF(color == BLACK, c->zone()->isGCMarkingBlack());
@@ -4891,25 +4891,25 @@ js::NotifyGCPostSwap(JSObject* a, JSObje
         DelayCrossCompartmentGrayMarking(b);
     if (removedFlags & JS_GC_SWAP_OBJECT_B_REMOVED)
         DelayCrossCompartmentGrayMarking(a);
 }
 
 void
 GCRuntime::endMarkingSweepGroup()
 {
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_MARK);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP_MARK);
 
     /*
      * Mark any incoming black pointers from previously swept compartments
      * whose referents are not marked. This can occur when gray cells become
      * black by the action of UnmarkGray.
      */
     MarkIncomingCrossCompartmentPointers(rt, BLACK);
-    markWeakReferencesInCurrentGroup(gcstats::PhaseKind::SWEEP_MARK_WEAK);
+    markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_WEAK);
 
     /*
      * Change state of current group to MarkGray to restrict marking to this
      * group.  Note that there may be pointers to the atoms compartment, and
      * these will be marked through, as they are not marked with
      * MarkCrossCompartmentXXX.
      */
     for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
@@ -4917,18 +4917,18 @@ GCRuntime::endMarkingSweepGroup()
         zone->setGCState(Zone::MarkGray);
     }
     marker.setMarkColorGray();
 
     /* Mark incoming gray pointers from previously swept compartments. */
     MarkIncomingCrossCompartmentPointers(rt, GRAY);
 
     /* Mark gray roots and mark transitively inside the current compartment group. */
-    markGrayReferencesInCurrentGroup(gcstats::PhaseKind::SWEEP_MARK_GRAY);
-    markWeakReferencesInCurrentGroup(gcstats::PhaseKind::SWEEP_MARK_GRAY_WEAK);
+    markGrayReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_GRAY);
+    markWeakReferencesInCurrentGroup(gcstats::PHASE_SWEEP_MARK_GRAY_WEAK);
 
     /* Restore marking state. */
     for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
         MOZ_ASSERT(zone->isGCMarkingGray());
         zone->setGCState(Zone::Mark);
     }
     MOZ_ASSERT(marker.isDrained());
     marker.setMarkColorBlack();
@@ -5058,64 +5058,64 @@ static void
 SweepUniqueIds(JSRuntime* runtime)
 {
     FreeOp fop(nullptr);
     for (GCSweepGroupIter zone(runtime); !zone.done(); zone.next())
         zone->sweepUniqueIds(&fop);
 }
 
 void
-GCRuntime::startTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked)
+GCRuntime::startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked)
 {
     if (!task.startWithLockHeld(locked)) {
         AutoUnlockHelperThreadState unlock(locked);
         gcstats::AutoPhase ap(stats(), phase);
         task.runFromActiveCooperatingThread(rt);
     }
 }
 
 void
-GCRuntime::joinTask(GCParallelTask& task, gcstats::PhaseKind phase, AutoLockHelperThreadState& locked)
+GCRuntime::joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked)
 {
     gcstats::AutoPhase ap(stats(), task, phase);
     task.joinWithLockHeld(locked);
 }
 
 void
 GCRuntime::sweepDebuggerOnMainThread(FreeOp* fop)
 {
     // Detach unreachable debuggers and global objects from each other.
     // This can modify weakmaps and so must happen before weakmap sweeping.
     Debugger::sweepAll(fop);
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP_COMPARTMENTS);
 
     // Sweep debug environment information. This performs lookups in the Zone's
     // unique IDs table and so must not happen in parallel with sweeping that
     // table.
     {
-        gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_MISC);
+        gcstats::AutoPhase ap2(stats(), gcstats::PHASE_SWEEP_MISC);
         for (GCCompartmentGroupIter c(rt); !c.done(); c.next())
             c->sweepDebugEnvironments();
     }
 
     // Sweep breakpoints. This is done here to be with the other debug sweeping,
     // although note that it can cause JIT code to be patched.
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_BREAKPOINT);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP_BREAKPOINT);
         for (GCSweepGroupIter zone(rt); !zone.done(); zone.next())
             zone->sweepBreakpoints(fop);
     }
 }
 
 void
 GCRuntime::sweepJitDataOnMainThread(FreeOp* fop)
 {
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP_JIT_DATA);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP_JIT_DATA);
 
         // Cancel any active or pending off thread compilations.
         js::CancelOffThreadIonCompile(rt, JS::Zone::Sweep);
 
         for (GCCompartmentGroupIter c(rt); !c.done(); c.next())
             c->sweepJitCompartment(fop);
 
         for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
@@ -5127,24 +5127,24 @@ GCRuntime::sweepJitDataOnMainThread(Free
         // work on a single zone-group at once.
 
         // Sweep entries containing about-to-be-finalized JitCode and
         // update relocated TypeSet::Types inside the JitcodeGlobalTable.
         jit::JitRuntime::SweepJitcodeGlobalTable(rt);
     }
 
     {
-        gcstats::AutoPhase apdc(stats(), gcstats::PhaseKind::SWEEP_DISCARD_CODE);
+        gcstats::AutoPhase apdc(stats(), gcstats::PHASE_SWEEP_DISCARD_CODE);
         for (GCSweepGroupIter zone(rt); !zone.done(); zone.next())
             zone->discardJitCode(fop);
     }
 
     {
-        gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_TYPES);
-        gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_TYPES_BEGIN);
+        gcstats::AutoPhase ap1(stats(), gcstats::PHASE_SWEEP_TYPES);
+        gcstats::AutoPhase ap2(stats(), gcstats::PHASE_SWEEP_TYPES_BEGIN);
         for (GCSweepGroupIter zone(rt); !zone.done(); zone.next())
             zone->beginSweepTypes(fop, releaseObservedTypes && !zone->isPreservingCode());
     }
 }
 
 using WeakCacheTaskVector = mozilla::Vector<SweepWeakCacheTask, 0, SystemAllocPolicy>;
 
 template <typename Functor>
@@ -5188,21 +5188,21 @@ PrepareWeakCacheTasks(JSRuntime* rt)
     return tasks;
 }
 
 class MOZ_RAII js::gc::AutoRunParallelTask : public GCParallelTask
 {
     using Func = void (*)(JSRuntime*);
 
     Func func_;
-    gcstats::PhaseKind phase_;
+    gcstats::Phase phase_;
     AutoLockHelperThreadState& lock_;
 
   public:
-    AutoRunParallelTask(JSRuntime* rt, Func func, gcstats::PhaseKind phase,
+    AutoRunParallelTask(JSRuntime* rt, Func func, gcstats::Phase phase,
                        AutoLockHelperThreadState& lock)
       : GCParallelTask(rt),
         func_(func),
         phase_(phase),
         lock_(lock)
     {
         runtime()->gc.startTask(*this, phase_, lock_);
     }
@@ -5243,63 +5243,63 @@ GCRuntime::beginSweepingSweepGroup()
 #endif
     }
 
     validateIncrementalMarking();
 
     FreeOp fop(rt);
 
     {
-        AutoPhase ap(stats(), PhaseKind::FINALIZE_START);
+        AutoPhase ap(stats(), PHASE_FINALIZE_START);
         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_PREPARE);
         {
-            AutoPhase ap2(stats(), PhaseKind::WEAK_ZONES_CALLBACK);
+            AutoPhase ap2(stats(), PHASE_WEAK_ZONES_CALLBACK);
             callWeakPointerZonesCallbacks();
         }
         {
-            AutoPhase ap2(stats(), PhaseKind::WEAK_COMPARTMENT_CALLBACK);
+            AutoPhase ap2(stats(), PHASE_WEAK_COMPARTMENT_CALLBACK);
             for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
                 for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
                     callWeakPointerCompartmentCallbacks(comp);
             }
         }
         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_START);
     }
 
     sweepDebuggerOnMainThread(&fop);
 
     {
         AutoLockHelperThreadState lock;
 
         Maybe<AutoRunParallelTask> sweepAtoms;
         if (sweepingAtoms)
-            sweepAtoms.emplace(rt, SweepAtoms, PhaseKind::SWEEP_ATOMS, lock);
-
-        AutoPhase ap(stats(), PhaseKind::SWEEP_COMPARTMENTS);
+            sweepAtoms.emplace(rt, SweepAtoms, PHASE_SWEEP_ATOMS, lock);
+
+        AutoPhase ap(stats(), PHASE_SWEEP_COMPARTMENTS);
         AutoSCC scc(stats(), sweepGroupIndex);
 
-        AutoRunParallelTask sweepCCWrappers(rt, SweepCCWrappers, PhaseKind::SWEEP_CC_WRAPPER, lock);
-        AutoRunParallelTask sweepObjectGroups(rt, SweepObjectGroups, PhaseKind::SWEEP_TYPE_OBJECT, lock);
-        AutoRunParallelTask sweepRegExps(rt, SweepRegExps, PhaseKind::SWEEP_REGEXP, lock);
-        AutoRunParallelTask sweepMisc(rt, SweepMisc, PhaseKind::SWEEP_MISC, lock);
-        AutoRunParallelTask sweepCompTasks(rt, SweepCompressionTasks, PhaseKind::SWEEP_COMPRESSION, lock);
-        AutoRunParallelTask sweepWeakMaps(rt, SweepWeakMaps, PhaseKind::SWEEP_WEAKMAPS, lock);
-        AutoRunParallelTask sweepUniqueIds(rt, SweepUniqueIds, PhaseKind::SWEEP_UNIQUEIDS, lock);
+        AutoRunParallelTask sweepCCWrappers(rt, SweepCCWrappers, PHASE_SWEEP_CC_WRAPPER, lock);
+        AutoRunParallelTask sweepObjectGroups(rt, SweepObjectGroups, PHASE_SWEEP_TYPE_OBJECT, lock);
+        AutoRunParallelTask sweepRegExps(rt, SweepRegExps, PHASE_SWEEP_REGEXP, lock);
+        AutoRunParallelTask sweepMisc(rt, SweepMisc, PHASE_SWEEP_MISC, lock);
+        AutoRunParallelTask sweepCompTasks(rt, SweepCompressionTasks, PHASE_SWEEP_COMPRESSION, lock);
+        AutoRunParallelTask sweepWeakMaps(rt, SweepWeakMaps, PHASE_SWEEP_WEAKMAPS, lock);
+        AutoRunParallelTask sweepUniqueIds(rt, SweepUniqueIds, PHASE_SWEEP_UNIQUEIDS, lock);
 
         WeakCacheTaskVector sweepCacheTasks = PrepareWeakCacheTasks(rt);
         for (auto& task : sweepCacheTasks)
-            startTask(task, PhaseKind::SWEEP_WEAK_CACHES, lock);
+            startTask(task, PHASE_SWEEP_WEAK_CACHES, lock);
 
         {
             AutoUnlockHelperThreadState unlock(lock);
             sweepJitDataOnMainThread(&fop);
         }
 
         for (auto& task : sweepCacheTasks)
-            joinTask(task, PhaseKind::SWEEP_WEAK_CACHES, lock);
+            joinTask(task, PHASE_SWEEP_WEAK_CACHES, lock);
     }
 
     // Queue all GC things in all zones for sweeping, either on the foreground
     // or on the background thread.
 
     for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
         AutoSCC scc(stats(), sweepGroupIndex);
 
@@ -5317,17 +5317,17 @@ GCRuntime::beginSweepingSweepGroup()
     sweepZone = currentSweepGroup;
     sweepActionIndex = 0;
 }
 
 void
 GCRuntime::endSweepingSweepGroup()
 {
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_FINALIZE_END);
         FreeOp fop(rt);
         callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
     }
 
     /* Update the GC state for zones we have swept. */
     for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
         MOZ_ASSERT(zone->isGCSweeping());
         AutoLockGC lock(rt);
@@ -5366,17 +5366,17 @@ GCRuntime::beginSweepPhase(JS::gcreason:
     MOZ_ASSERT(!abortSweepAfterCurrentGroup);
 
     AutoSetThreadIsSweeping threadIsSweeping;
 
     releaseHeldRelocatedArenas();
 
     computeNonIncrementalMarkingForValidation(lock);
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP);
 
     sweepOnBackgroundThread =
         reason != JS::gcreason::DESTROY_RUNTIME && !TraceEnabled() && CanUseExtraThreads();
 
     releaseObservedTypes = shouldReleaseObservedTypes();
 
     AssertNoWrappersInGrayList(rt);
     DropStringWrappers(rt);
@@ -5417,17 +5417,17 @@ ArenaLists::foregroundFinalize(FreeOp* f
         arenaLists(thingKind) =
             finalized.insertListWithCursorAtEnd(arenaLists(thingKind));
     }
 
     return true;
 }
 
 IncrementalProgress
-GCRuntime::drainMarkStack(SliceBudget& sliceBudget, gcstats::PhaseKind phase)
+GCRuntime::drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase)
 {
     /* Run a marking slice and return whether the stack is now empty. */
     gcstats::AutoPhase ap(stats(), phase);
     return marker.drainMarkStack(sliceBudget) ? Finished : NotFinished;
 }
 
 static void
 SweepThing(Shape* shape)
@@ -5475,32 +5475,32 @@ GCRuntime::sweepTypeInformation(GCRuntim
     // and dead scripts and object groups, so that no dead references remain in
     // them. Type inference can end up crawling these zones again, such as for
     // TypeCompartment::markSetsUnknown, and if this happens after sweeping for
     // the sweep group finishes we won't be able to determine which things in
     // the zone are live.
 
     MOZ_ASSERT(kind == AllocKind::LIMIT);
 
-    gcstats::AutoPhase ap1(gc->stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS);
-    gcstats::AutoPhase ap2(gc->stats(), gcstats::PhaseKind::SWEEP_TYPES);
+    gcstats::AutoPhase ap1(gc->stats(), gcstats::PHASE_SWEEP_COMPARTMENTS);
+    gcstats::AutoPhase ap2(gc->stats(), gcstats::PHASE_SWEEP_TYPES);
 
     ArenaLists& al = zone->arenas;
 
     AutoClearTypeInferenceStateOnOOM oom(zone);
 
     if (!SweepArenaList<JSScript>(&al.gcScriptArenasToUpdate.ref(), budget, &oom))
         return NotFinished;
 
     if (!SweepArenaList<ObjectGroup>(&al.gcObjectGroupArenasToUpdate.ref(), budget, &oom))
         return NotFinished;
 
     // Finish sweeping type information in the zone.
     {
-        gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::SWEEP_TYPES_END);
+        gcstats::AutoPhase ap(gc->stats(), gcstats::PHASE_SWEEP_TYPES_END);
         zone->types.endSweep(gc->rt);
     }
 
     return Finished;
 }
 
 /* static */ IncrementalProgress
 GCRuntime::mergeSweptObjectArenas(GCRuntime* gc, FreeOp* fop, Zone* zone, SliceBudget& budget,
@@ -5536,17 +5536,17 @@ GCRuntime::finalizeAllocKind(GCRuntime* 
 /* static */ IncrementalProgress
 GCRuntime::sweepShapeTree(GCRuntime* gc, FreeOp* fop, Zone* zone, SliceBudget& budget,
                           AllocKind kind)
 {
     // Remove dead shapes from the shape tree, but don't finalize them yet.
 
     MOZ_ASSERT(kind == AllocKind::LIMIT);
 
-    gcstats::AutoPhase ap(gc->stats(), gcstats::PhaseKind::SWEEP_SHAPE);
+    gcstats::AutoPhase ap(gc->stats(), gcstats::PHASE_SWEEP_SHAPE);
 
     ArenaLists& al = zone->arenas;
 
     if (!SweepArenaList<Shape>(&al.gcShapeArenasToUpdate.ref(), budget))
         return NotFinished;
 
     if (!SweepArenaList<AccessorShape>(&al.gcAccessorShapeArenasToUpdate.ref(), budget))
         return NotFinished;
@@ -5593,20 +5593,20 @@ GCRuntime::initializeSweepActions()
     return ok;
 }
 
 IncrementalProgress
 GCRuntime::performSweepActions(SliceBudget& budget, AutoLockForExclusiveAccess& lock)
 {
     AutoSetThreadIsSweeping threadIsSweeping;
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP);
     FreeOp fop(rt);
 
-    if (drainMarkStack(budget, gcstats::PhaseKind::SWEEP_MARK) == NotFinished)
+    if (drainMarkStack(budget, gcstats::PHASE_SWEEP_MARK) == NotFinished)
         return NotFinished;
 
     for (;;) {
         for (; sweepPhaseIndex < SweepPhases.length(); sweepPhaseIndex++) {
             const auto& actions = SweepPhases[sweepPhaseIndex];
             for (; sweepZone; sweepZone = sweepZone->nextNodeInGroup()) {
                 for (; sweepActionIndex < actions.length(); sweepActionIndex++) {
                     const auto& action = actions[sweepActionIndex];
@@ -5659,17 +5659,17 @@ GCRuntime::allCCVisibleZonesWereCollecte
     return true;
 }
 
 void
 GCRuntime::endSweepPhase(bool destroyingRuntime, AutoLockForExclusiveAccess& lock)
 {
     AutoSetThreadIsSweeping threadIsSweeping;
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::SWEEP);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP);
     FreeOp fop(rt);
 
     MOZ_ASSERT_IF(destroyingRuntime, !sweepOnBackgroundThread);
 
     /*
      * Recalculate whether GC was full or not as this may have changed due to
      * newly created zones.  Can only change from full to not full.
      */
@@ -5678,17 +5678,17 @@ GCRuntime::endSweepPhase(bool destroying
             if (!zone->isCollecting()) {
                 isFull = false;
                 break;
             }
         }
     }
 
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::DESTROY);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_DESTROY);
 
         /*
          * Sweep script filenames after sweeping functions in the generic loop
          * above. In this way when a scripted function's finalizer destroys the
          * script and calls rt->destroyScriptHook, the hook can still access the
          * script's filename. See bug 323267.
          */
         SweepScriptData(rt, lock);
@@ -5696,17 +5696,17 @@ GCRuntime::endSweepPhase(bool destroying
         /* Clear out any small pools that we're hanging on to. */
         if (rt->hasJitRuntime()) {
             rt->jitRuntime()->execAlloc().purge();
             rt->jitRuntime()->backedgeExecAlloc().purge();
         }
     }
 
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::FINALIZE_END);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_FINALIZE_END);
         callFinalizeCallbacks(&fop, JSFINALIZE_COLLECTION_END);
 
         if (allCCVisibleZonesWereCollected())
             grayBitsValid = true;
     }
 
     finishMarkingValidation();
 
@@ -5723,17 +5723,17 @@ GCRuntime::endSweepPhase(bool destroying
     AssertNoWrappersInGrayList(rt);
 }
 
 void
 GCRuntime::beginCompactPhase()
 {
     MOZ_ASSERT(!isBackgroundSweeping());
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_COMPACT);
 
     MOZ_ASSERT(zonesToMaybeCompact.ref().isEmpty());
     for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
         if (CanRelocateZone(zone))
             zonesToMaybeCompact.ref().append(zone);
     }
 
     MOZ_ASSERT(!relocatedArenasToRelease);
@@ -5742,17 +5742,17 @@ GCRuntime::beginCompactPhase()
 
 IncrementalProgress
 GCRuntime::compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
                         AutoLockForExclusiveAccess& lock)
 {
     assertBackgroundSweepingFinished();
     MOZ_ASSERT(startedCompacting);
 
-    gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::COMPACT);
+    gcstats::AutoPhase ap(stats(), gcstats::PHASE_COMPACT);
 
     // TODO: JSScripts can move. If the sampler interrupts the GC in the
     // middle of relocating an arena, invalid JSScript pointers may be
     // accessed. Suppress all sampling until a finer-grained solution can be
     // found. See bug 1295775.
     AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
 
     ZoneList relocatedZones;
@@ -5960,25 +5960,25 @@ GCRuntime::resetIncrementalGC(gc::AbortR
         isCompacting = false;
 
         auto unlimited = SliceBudget::unlimited();
         incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
 
         isCompacting = wasCompacting;
 
         {
-            gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+            gcstats::AutoPhase ap(stats(), gcstats::PHASE_WAIT_BACKGROUND_THREAD);
             rt->gc.waitBackgroundSweepOrAllocEnd();
         }
         break;
       }
 
       case State::Finalize: {
         {
-            gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+            gcstats::AutoPhase ap(stats(), gcstats::PHASE_WAIT_BACKGROUND_THREAD);
             rt->gc.waitBackgroundSweepOrAllocEnd();
         }
 
         bool wasCompacting = isCompacting;
         isCompacting = false;
 
         auto unlimited = SliceBudget::unlimited();
         incrementalCollectSlice(unlimited, JS::gcreason::RESET, lock);
@@ -6164,17 +6164,17 @@ GCRuntime::incrementalCollectSlice(Slice
             AutoGCRooter::traceAllWrappers(target, &marker);
 
         /* If we needed delayed marking for gray roots, then collect until done. */
         if (!hasBufferedGrayRoots()) {
             budget.makeUnlimited();
             isIncremental = false;
         }
 
-        if (drainMarkStack(budget, gcstats::PhaseKind::MARK) == NotFinished)
+        if (drainMarkStack(budget, gcstats::PHASE_MARK) == NotFinished)
             break;
 
         MOZ_ASSERT(marker.isDrained());
 
         /*
          * In incremental GCs where we have already performed more than once
          * slice we yield after marking with the aim of starting the sweep in
          * the next slice, since the first slice of sweeping can be expensive.
@@ -6225,34 +6225,34 @@ GCRuntime::incrementalCollectSlice(Slice
         endSweepPhase(destroyingRuntime, lock);
 
         incrementalState = State::Finalize;
 
         MOZ_FALLTHROUGH;
 
       case State::Finalize:
         {
-            gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+            gcstats::AutoPhase ap(stats(), gcstats::PHASE_WAIT_BACKGROUND_THREAD);
 
             // Yield until background finalization is done.
             if (!budget.isUnlimited()) {
                 // Poll for end of background sweeping
                 AutoLockGC lock(rt);
                 if (isBackgroundSweeping())
                     break;
             } else {
                 waitBackgroundSweepEnd();
             }
         }
 
         {
             // Re-sweep the zones list, now that background finalization is
             // finished to actually remove and free dead zones.
-            gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP);
-            gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::DESTROY);
+            gcstats::AutoPhase ap1(stats(), gcstats::PHASE_SWEEP);
+            gcstats::AutoPhase ap2(stats(), gcstats::PHASE_DESTROY);
             AutoSetThreadIsSweeping threadIsSweeping;
             FreeOp fop(rt);
             sweepZoneGroups(&fop, destroyingRuntime);
         }
 
         MOZ_ASSERT(!startedCompacting);
         incrementalState = State::Compact;
 
@@ -6275,17 +6275,17 @@ GCRuntime::incrementalCollectSlice(Slice
 
         startDecommit();
         incrementalState = State::Decommit;
 
         MOZ_FALLTHROUGH;
 
       case State::Decommit:
         {
-            gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+            gcstats::AutoPhase ap(stats(), gcstats::PHASE_WAIT_BACKGROUND_THREAD);
 
             // Yield until background decommit is done.
             if (!budget.isUnlimited() && decommitTask.isRunning())
                 break;
 
             decommitTask.join();
         }
 
@@ -6469,17 +6469,17 @@ GCRuntime::gcCycle(bool nonincrementalBy
     // It's ok if threads other than the active thread have suppressGC set, as
     // they are operating on zones which will not be collected from here.
     MOZ_ASSERT(!TlsContext.get()->suppressGC);
 
     // Assert if this is a GC unsafe region.
     TlsContext.get()->verifyIsSafeToGC();
 
     {
-        gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::WAIT_BACKGROUND_THREAD);
+        gcstats::AutoPhase ap(stats(), gcstats::PHASE_WAIT_BACKGROUND_THREAD);
 
         // Background finalization and decommit are finished by defininition
         // before we can start a new GC session.
         if (!isIncrementalGCInProgress()) {
             assertBackgroundSweepingFinished();
             MOZ_ASSERT(!decommitTask.isRunning());
         }
 
@@ -6683,17 +6683,17 @@ GCRuntime::collect(bool nonincrementalBy
         repeat = (poked && cleanUpEverything) || wasReset || repeatForDeadZone;
     } while (repeat);
 
     if (reason == JS::gcreason::COMPARTMENT_REVIVED)
         maybeDoCycleCollection();
 
 #ifdef JS_GC_ZEAL
     if (rt->hasZealMode(ZealMode::CheckHeapAfterGC)) {
-        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PhaseKind::TRACE_HEAP);
+        gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
         CheckHeapAfterGC(rt);
     }
 #endif
 }
 
 js::AutoEnqueuePendingParseTasksAfterGC::~AutoEnqueuePendingParseTasksAfterGC()
 {
     if (!OffThreadParsingMustWaitForGC(gc_.rt))
@@ -6861,17 +6861,17 @@ GCRuntime::onOutOfMallocMemory(const Aut
 
     // Immediately decommit as many arenas as possible in the hopes that this
     // might let the OS scrape together enough pages to satisfy the failing
     // malloc request.
     decommitAllWithoutUnlocking(lock);
 }
 
 void
-GCRuntime::minorGC(JS::gcreason::Reason reason, gcstats::PhaseKind phase)
+GCRuntime::minorGC(JS::gcreason::Reason reason, gcstats::Phase phase)
 {
     MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
 
     if (TlsContext.get()->suppressGC)
         return;
 
     gcstats::AutoPhase ap(rt->gc.stats(), phase);
 
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -734,16 +734,8 @@ if CONFIG['GNU_CXX']:
     CXXFLAGS += ['-Wno-shadow', '-Werror=format']
 
 # Suppress warnings in third-party code.
 if CONFIG['CLANG_CXX']:
     SOURCES['jsdtoa.cpp'].flags += ['-Wno-implicit-fallthrough']
 
 if CONFIG['OS_ARCH'] == 'WINNT':
     DEFINES['NOMINMAX'] = True
-
-# Generate GC statistics phase data.
-GENERATED_FILES += ['gc/StatsPhasesGenerated.h']
-StatsPhasesGeneratedHeader = GENERATED_FILES['gc/StatsPhasesGenerated.h']
-StatsPhasesGeneratedHeader.script = 'gc/GenerateStatsPhases.py:generateHeader'
-GENERATED_FILES += ['gc/StatsPhasesGenerated.cpp']
-StatsPhasesGeneratedCpp = GENERATED_FILES['gc/StatsPhasesGenerated.cpp']
-StatsPhasesGeneratedCpp.script = 'gc/GenerateStatsPhases.py:generateCpp'