Backed out changesets 4eab437d2b51 and f7104b435b47 (bug 1125101) for suspicion of being the cause of a spike in Linux debug mochitest-bc timeouts.
authorRyan VanderMeulen <ryanvm@gmail.com>
Thu, 26 Feb 2015 14:32:46 -0500
changeset 249275 dcb0abbfa4235ee531cddbed5a38a363cbcf1844
parent 249274 b56e18c9dbfc4ce4785c809e7eec5a835f4d325b
child 249276 6512200e17e21633a00ee5b27c66eb1ed2236c92
push id7860
push userjlund@mozilla.com
push dateMon, 30 Mar 2015 18:46:02 +0000
treeherdermozilla-aurora@8ac636cd51f3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1125101
milestone39.0a1
backs out4eab437d2b51f5f1261926511ce5cde3a1f107da
f7104b435b475d4c4ba6b3ff215694d091df8875
Backed out changesets 4eab437d2b51 and f7104b435b47 (bug 1125101) for suspicion of being the cause of a spike in Linux debug mochitest-bc timeouts.
js/src/gc/GCRuntime.h
js/src/gc/RootMarking.cpp
js/src/gc/Zone.cpp
js/src/jsgc.cpp
js/src/jsgc.h
js/src/vm/Debugger.cpp
js/src/vm/Debugger.h
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -903,30 +903,28 @@ class GCRuntime
     void sweepZones(FreeOp *fop, bool lastGC);
     void decommitAllWithoutUnlocking(const AutoLockGC &lock);
     void decommitArenas(AutoLockGC &lock);
     void expireChunksAndArenas(bool shouldShrink, AutoLockGC &lock);
     void queueZonesForBackgroundSweep(ZoneList &zones);
     void sweepBackgroundThings(ZoneList &zones, LifoAlloc &freeBlocks, ThreadType threadType);
     void assertBackgroundSweepingFinished();
     bool shouldCompact();
-    IncrementalProgress beginCompactPhase();
-    IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget &sliceBudget);
-    void endCompactPhase(JS::gcreason::Reason reason);
+    IncrementalProgress compactPhase(JS::gcreason::Reason reason);
     void sweepTypesAfterCompacting(Zone *zone);
     void sweepZoneAfterCompacting(Zone *zone);
-    bool relocateArenas(Zone *zone, JS::gcreason::Reason reason, SliceBudget &sliceBudget);
-    void updateAllCellPointersParallel(MovingTracer *trc, Zone *zone);
-    void updateAllCellPointersSerial(MovingTracer *trc, Zone *zone);
-    void updatePointersToRelocatedCells(Zone *zone);
-    void releaseRelocatedArenas();
-    void releaseRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
+    ArenaHeader *relocateArenas(JS::gcreason::Reason reason);
+    void updateAllCellPointersParallel(MovingTracer *trc);
+    void updateAllCellPointersSerial(MovingTracer *trc);
+    void updatePointersToRelocatedCells();
+    void releaseRelocatedArenas(ArenaHeader *relocatedList);
+    void releaseRelocatedArenasWithoutUnlocking(ArenaHeader *relocatedList, const AutoLockGC& lock);
 #ifdef DEBUG
-    void protectRelocatedArenas();
-    void unprotectRelocatedArenas();
+    void protectRelocatedArenas(ArenaHeader *relocatedList);
+    void unprotectRelocatedArenas(ArenaHeader *relocatedList);
 #endif
     void finishCollection(JS::gcreason::Reason reason);
 
     void computeNonIncrementalMarkingForValidation();
     void validateIncrementalMarking();
     void finishMarkingValidation();
 
 #ifdef DEBUG
@@ -1056,17 +1054,16 @@ class GCRuntime
     /* Whether observed type information is being released in the current GC. */
     bool releaseObservedTypes;
 
     /* Whether any black->gray edges were found during marking. */
     bool foundBlackGrayEdges;
 
     /* Singly linekd list of zones to be swept in the background. */
     ZoneList backgroundSweepZones;
-
     /*
      * Free LIFO blocks are transferred to this allocator before being freed on
      * the background GC thread.
      */
     js::LifoAlloc freeLifoAlloc;
 
     /* Index of current zone group (for stats). */
     unsigned zoneGroupIndex;
@@ -1088,23 +1085,16 @@ class GCRuntime
     void startTask(GCParallelTask &task, gcstats::Phase phase);
     void joinTask(GCParallelTask &task, gcstats::Phase phase);
 
     /*
      * List head of arenas allocated during the sweep phase.
      */
     js::gc::ArenaHeader *arenasAllocatedDuringSweep;
 
-    /*
-     * Incremental compacting state.
-     */
-    bool startedCompacting;
-    js::gc::ZoneList zonesToMaybeCompact;
-    ArenaHeader* relocatedArenasToRelease;
-
 #ifdef JS_GC_MARKING_VALIDATION
     js::gc::MarkingValidator *markingValidator;
 #endif
 
     /*
      * Indicates that a GC slice has taken place in the middle of an animation
      * frame, rather than at the beginning. In this case, the next slice will be
      * delayed so that we don't get back-to-back slices.
@@ -1229,16 +1219,19 @@ class GCRuntime
      * Some regions of code are hard for the static rooting hazard analysis to
      * understand. In those cases, we trade the static analysis for a dynamic
      * analysis. When this is non-zero, we should assert if we trigger, or
      * might trigger, a GC.
      */
     int inUnsafeRegion;
 
     size_t noGCOrAllocationCheck;
+
+    ArenaHeader* relocatedArenasToRelease;
+
 #endif
 
     /* Synchronize GC heap access between main thread and GCHelperState. */
     PRLock *lock;
     mozilla::DebugOnly<PRThread *> lockOwner;
 
     BackgroundAllocTask allocTask;
     GCHelperState helperState;
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -425,17 +425,17 @@ js::gc::GCRuntime::markRuntime(JSTracer 
 
     if (traceOrMark == MarkRuntime) {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_CCWS);
 
         for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
             if (!c->zone()->isCollecting())
                 c->markCrossCompartmentWrappers(trc);
         }
-        Debugger::markIncomingCrossCompartmentEdges(trc);
+        Debugger::markAllCrossCompartmentEdges(trc);
     }
 
     {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTERS);
 
         AutoGCRooter::traceAll(trc);
 
         if (!rt->isBeingDestroyed()) {
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -362,15 +362,8 @@ ZoneList::removeFront()
 
     Zone *front = head;
     head = head->listNext_;
     if (!head)
         tail = nullptr;
 
     front->listNext_ = Zone::NotOnList;
 }
-
-void
-ZoneList::clear()
-{
-    while (!isEmpty())
-        removeFront();
-}
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1101,18 +1101,16 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
     freeLifoAlloc(JSRuntime::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
     zoneGroupIndex(0),
     zoneGroups(nullptr),
     currentZoneGroup(nullptr),
     sweepZone(nullptr),
     sweepKindIndex(0),
     abortSweepAfterCurrentGroup(false),
     arenasAllocatedDuringSweep(nullptr),
-    startedCompacting(false),
-    relocatedArenasToRelease(nullptr),
 #ifdef JS_GC_MARKING_VALIDATION
     markingValidator(nullptr),
 #endif
     interFrameGC(false),
     sliceBudget(SliceBudget::Unlimited),
     incrementalAllowed(true),
     generationalDisabled(0),
     compactingEnabled(true),
@@ -1131,16 +1129,17 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
     validate(true),
     fullCompartmentChecks(false),
     mallocBytesUntilGC(0),
     mallocGCTriggered(false),
     alwaysPreserveCode(false),
 #ifdef DEBUG
     inUnsafeRegion(0),
     noGCOrAllocationCheck(0),
+    relocatedArenasToRelease(nullptr),
 #endif
     lock(nullptr),
     lockOwner(nullptr),
     allocTask(rt, emptyChunks_),
     helperState(rt)
 {
     setGCMode(JSGC_MODE_GLOBAL);
 }
@@ -2107,17 +2106,17 @@ RelocateCell(Zone *zone, TenuredCell *sr
     // Mark source cell as forwarded and leave a pointer to the destination.
     RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
     overlay->forwardTo(dst);
 
     return true;
 }
 
 static void
-RelocateArena(ArenaHeader *aheader, SliceBudget &sliceBudget)
+RelocateArena(ArenaHeader *aheader)
 {
     MOZ_ASSERT(aheader->allocated());
     MOZ_ASSERT(!aheader->hasDelayedMarking);
     MOZ_ASSERT(!aheader->markOverflow);
     MOZ_ASSERT(!aheader->allocatedDuringIncremental);
 
     Zone *zone = aheader->zone;
 
@@ -2126,43 +2125,32 @@ RelocateArena(ArenaHeader *aheader, Slic
 
     for (ArenaCellIterUnderFinalize i(aheader); !i.done(); i.next()) {
         if (!RelocateCell(zone, i.getCell(), thingKind, thingSize)) {
             // This can only happen in zeal mode or debug builds as we don't
             // otherwise relocate more cells than we have existing free space
             // for.
             CrashAtUnhandlableOOM("Could not allocate new arena while compacting");
         }
-        sliceBudget.step();
-    }
-
-#ifdef DEBUG
-    for (ArenaCellIterUnderFinalize i(aheader); !i.done(); i.next()) {
-        TenuredCell *src = i.getCell();
-        MOZ_ASSERT(RelocationOverlay::isCellForwarded(src));
-        TenuredCell *dest = Forwarded(src);
-        MOZ_ASSERT(src->isMarked(BLACK) == dest->isMarked(BLACK));
-        MOZ_ASSERT(src->isMarked(GRAY) == dest->isMarked(GRAY));
-    }
-#endif
+    }
 }
 
 /*
  * Relocate all arenas identified by pickArenasToRelocate: for each arena,
  * relocate each cell within it, then add it to a list of relocated arenas.
  */
 ArenaHeader *
-ArenaList::relocateArenas(ArenaHeader *toRelocate, ArenaHeader *relocated, SliceBudget &sliceBudget,
+ArenaList::relocateArenas(ArenaHeader *toRelocate, ArenaHeader *relocated,
                           gcstats::Statistics& stats)
 {
     check();
 
     while (ArenaHeader *arena = toRelocate) {
         toRelocate = arena->next;
-        RelocateArena(arena, sliceBudget);
+        RelocateArena(arena);
         // Prepend to list of relocated arenas
         arena->next = relocated;
         relocated = arena;
         stats.count(gcstats::STAT_ARENA_RELOCATED);
     }
 
     check();
 
@@ -2181,17 +2169,17 @@ static bool ShouldRelocateZone(size_t ar
     if (reason == JS::gcreason::MEM_PRESSURE || reason == JS::gcreason::LAST_DITCH)
         return true;
 
     return (relocCount * 100.0) / arenaCount >= MIN_ZONE_RECLAIM_PERCENT;
 }
 
 bool
 ArenaLists::relocateArenas(ArenaHeader *&relocatedListOut, JS::gcreason::Reason reason,
-                           SliceBudget &sliceBudget, gcstats::Statistics& stats)
+                           gcstats::Statistics& stats)
 {
 
     // This is only called from the main thread while we are doing a GC, so
     // there is no need to lock.
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
     MOZ_ASSERT(runtime_->isHeapCompacting());
     MOZ_ASSERT(!runtime_->gc.isBackgroundSweeping());
 
@@ -2200,17 +2188,17 @@ ArenaLists::relocateArenas(ArenaHeader *
     checkEmptyFreeLists();
 
     if (ShouldRelocateAllArenas(runtime_)) {
         for (size_t i = 0; i < FINALIZE_LIMIT; i++) {
             if (CanRelocateAllocKind(AllocKind(i))) {
                 ArenaList &al = arenaLists[i];
                 ArenaHeader *allArenas = al.head();
                 al.clear();
-                relocatedListOut = al.relocateArenas(allArenas, relocatedListOut, sliceBudget, stats);
+                relocatedListOut = al.relocateArenas(allArenas, relocatedListOut, stats);
             }
         }
     } else {
         size_t arenaCount = 0;
         size_t relocCount = 0;
         ArenaHeader **toRelocate[FINALIZE_LIMIT] = {nullptr};
 
         for (size_t i = 0; i < FINALIZE_LIMIT; i++) {
@@ -2220,62 +2208,50 @@ ArenaLists::relocateArenas(ArenaHeader *
 
         if (!ShouldRelocateZone(arenaCount, relocCount, reason))
             return false;
 
         for (size_t i = 0; i < FINALIZE_LIMIT; i++) {
             if (toRelocate[i]) {
                 ArenaList &al = arenaLists[i];
                 ArenaHeader *arenas = al.removeRemainingArenas(toRelocate[i]);
-                relocatedListOut = al.relocateArenas(arenas, relocatedListOut, sliceBudget, stats);
+                relocatedListOut = al.relocateArenas(arenas, relocatedListOut, stats);
             }
         }
     }
 
     // When we allocate new locations for cells, we use
     // allocateFromFreeList(). Reset the free list again so that
     // AutoCopyFreeListToArenasForGC doesn't complain that the free lists are
     // different now.
     purge();
     checkEmptyFreeLists();
 
     return true;
 }
 
-bool
-GCRuntime::relocateArenas(Zone *zone, JS::gcreason::Reason reason, SliceBudget &sliceBudget)
+ArenaHeader *
+GCRuntime::relocateArenas(JS::gcreason::Reason reason)
 {
     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_MOVE);
 
-    MOZ_ASSERT(!zone->isPreservingCode());
-    MOZ_ASSERT(CanRelocateZone(rt, zone));
-
-    jit::StopAllOffThreadCompilations(zone);
-
-    if (!zone->arenas.relocateArenas(relocatedArenasToRelease, reason, sliceBudget, stats))
-        return false;
-
-#ifdef DEBUG
-    // Check that we did as much compaction as we should have. There
-    // should always be less than one arena's worth of free cells.
-    for (size_t i = 0; i < FINALIZE_LIMIT; i++) {
-        size_t thingsPerArena = Arena::thingsPerArena(Arena::thingSize(AllocKind(i)));
-        if (CanRelocateAllocKind(AllocKind(i))) {
-            ArenaList &al = zone->arenas.arenaLists[i];
-            size_t freeCells = 0;
-            for (ArenaHeader *arena = al.arenaAfterCursor(); arena; arena = arena->next)
-                freeCells += arena->countFreeCells();
-            MOZ_ASSERT(freeCells < thingsPerArena);
+    ArenaHeader *relocatedList = nullptr;
+    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
+        MOZ_ASSERT(zone->isGCFinished());
+        MOZ_ASSERT(!zone->isPreservingCode());
+
+        if (CanRelocateZone(rt, zone)) {
+            jit::StopAllOffThreadCompilations(zone);
+            if (zone->arenas.relocateArenas(relocatedList, reason, stats))
+                zone->setGCState(Zone::Compact);
         }
     }
-#endif
-
-    return true;
-}
-
+
+    return relocatedList;
+}
 
 void
 MovingTracer::Visit(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
 {
     TenuredCell *thing = TenuredCell::fromPointer(*thingp);
 
     // Currently we only relocate objects.
     if (kind != JSTRACE_OBJECT) {
@@ -2400,25 +2376,25 @@ namespace gc {
 
 struct ArenasToUpdate
 {
     enum KindsToUpdate {
         FOREGROUND = 1,
         BACKGROUND = 2,
         ALL = FOREGROUND | BACKGROUND
     };
-    ArenasToUpdate(Zone *zone, KindsToUpdate kinds);
+    ArenasToUpdate(JSRuntime *rt, KindsToUpdate kinds);
     bool done() { return initialized && arena == nullptr; }
     ArenaHeader* next(AutoLockHelperThreadState& lock);
     ArenaHeader *getArenasToUpdate(AutoLockHelperThreadState& lock, unsigned max);
 
   private:
     bool initialized;
     KindsToUpdate kinds;
-    Zone *zone;          // Zone to process
+    GCZonesIter zone;    // Current zone to process, unless zone.done()
     unsigned kind;       // Current alloc kind to process
     ArenaHeader *arena;  // Next arena to process
 
     bool shouldProcessKind(unsigned kind);
 };
 
 bool ArenasToUpdate::shouldProcessKind(unsigned kind)
 {
@@ -2432,20 +2408,19 @@ bool ArenasToUpdate::shouldProcessKind(u
     }
 
     if (js::gc::IsBackgroundFinalized(AllocKind(kind)))
         return (kinds & BACKGROUND) != 0;
     else
         return (kinds & FOREGROUND) != 0;
 }
 
-ArenasToUpdate::ArenasToUpdate(Zone *zone, KindsToUpdate kinds)
-  : initialized(false), kinds(kinds), zone(zone)
-{
-    MOZ_ASSERT(zone->isGCCompacting());
+ArenasToUpdate::ArenasToUpdate(JSRuntime *rt, KindsToUpdate kinds)
+  : initialized(false), kinds(kinds), zone(rt, SkipAtoms)
+{
     MOZ_ASSERT(kinds && !(kinds & ~ALL));
 }
 
 ArenaHeader *
 ArenasToUpdate::next(AutoLockHelperThreadState& lock)
 {
     // Find the next arena to update.
     //
@@ -2454,40 +2429,43 @@ ArenasToUpdate::next(AutoLockHelperThrea
     // normal way, returning the first arena found. In subsequent invocations we
     // jump directly into the body of the for loops just after the previous
     // return. All state is stored in class members and so preserved between
     // invocations.
 
     if (initialized) {
         MOZ_ASSERT(arena);
         MOZ_ASSERT(shouldProcessKind(kind));
-        MOZ_ASSERT(zone);
+        MOZ_ASSERT(!zone.done());
         goto resumePoint;
     }
 
     initialized = true;
-    for (kind = 0; kind < FINALIZE_LIMIT; ++kind) {
-        if (shouldProcessKind(kind)) {
-            for (arena = zone->arenas.getFirstArena(AllocKind(kind));
-                 arena;
-                 arena = arena->next)
-            {
-                return arena;
-              resumePoint:;
+    for (; !zone.done(); zone.next()) {
+        if (zone->isGCCompacting()) {
+            for (kind = 0; kind < FINALIZE_LIMIT; ++kind) {
+                if (shouldProcessKind(kind)) {
+                    for (arena = zone.get()->arenas.getFirstArena(AllocKind(kind));
+                         arena;
+                         arena = arena->next)
+                    {
+                        return arena;
+                      resumePoint:;
+                    }
+                }
             }
         }
     }
-    zone = nullptr;
     return nullptr;
 }
 
 ArenaHeader *
 ArenasToUpdate::getArenasToUpdate(AutoLockHelperThreadState& lock, unsigned count)
 {
-    if (!zone)
+    if (zone.done())
         return nullptr;
 
     ArenaHeader *head = nullptr;
     ArenaHeader *tail = nullptr;
 
     for (unsigned i = 0; i < count; ++i) {
         ArenaHeader *arena = next(lock);
         if (!arena)
@@ -2563,29 +2541,29 @@ UpdateCellPointersTask::run()
         }
     }
 }
 
 } // namespace gc
 } // namespace js
 
 void
-GCRuntime::updateAllCellPointersParallel(MovingTracer *trc, Zone *zone)
+GCRuntime::updateAllCellPointersParallel(MovingTracer *trc)
 {
     AutoDisableProxyCheck noProxyCheck(rt); // These checks assert when run in parallel.
 
     const size_t minTasks = 2;
     const size_t maxTasks = 8;
     size_t targetTaskCount = HelperThreadState().cpuCount / 2;
     size_t taskCount = Min(Max(targetTaskCount, minTasks), maxTasks);
     UpdateCellPointersTask bgTasks[maxTasks];
     UpdateCellPointersTask fgTask;
 
-    ArenasToUpdate fgArenas(zone, ArenasToUpdate::FOREGROUND);
-    ArenasToUpdate bgArenas(zone, ArenasToUpdate::BACKGROUND);
+    ArenasToUpdate fgArenas(rt, ArenasToUpdate::FOREGROUND);
+    ArenasToUpdate bgArenas(rt, ArenasToUpdate::BACKGROUND);
 
     unsigned tasksStarted = 0;
     {
         AutoLockHelperThreadState lock;
         unsigned i;
         for (i = 0; i < taskCount && !bgArenas.done(); ++i) {
             bgTasks[i].init(rt, &bgArenas, lock);
             startTask(bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS);
@@ -2600,149 +2578,148 @@ GCRuntime::updateAllCellPointersParallel
     {
         AutoLockHelperThreadState lock;
         for (unsigned i = 0; i < tasksStarted; ++i)
             joinTask(bgTasks[i], gcstats::PHASE_COMPACT_UPDATE_CELLS);
     }
 }
 
 void
-GCRuntime::updateAllCellPointersSerial(MovingTracer *trc, Zone *zone)
+GCRuntime::updateAllCellPointersSerial(MovingTracer *trc)
 {
     UpdateCellPointersTask task;
     {
         AutoLockHelperThreadState lock;
-        ArenasToUpdate allArenas(zone, ArenasToUpdate::ALL);
+        ArenasToUpdate allArenas(rt, ArenasToUpdate::ALL);
         task.init(rt, &allArenas, lock);
     }
     task.runFromMainThread(rt);
 }
 
 /*
  * Update pointers to relocated cells by doing a full heap traversal and sweep.
  *
  * The latter is necessary to update weak references which are not marked as
  * part of the traversal.
  */
 void
-GCRuntime::updatePointersToRelocatedCells(Zone *zone)
-{
-    MOZ_ASSERT(zone->isGCCompacting());
+GCRuntime::updatePointersToRelocatedCells()
+{
     MOZ_ASSERT(rt->currentThreadHasExclusiveAccess());
 
     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT_UPDATE);
     MovingTracer trc(rt);
 
     // Fixup compartment global pointers as these get accessed during marking.
-    for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
+    for (GCCompartmentsIter comp(rt); !comp.done(); comp.next())
         comp->fixupAfterMovingGC();
 
     // Fixup cross compartment wrappers as we assert the existence of wrappers in the map.
     for (CompartmentsIter comp(rt, SkipAtoms); !comp.done(); comp.next()) {
-        // Sweep the wrapper map to update its pointers.
         comp->sweepCrossCompartmentWrappers();
-
-        // Mark the contents of the map to update each wrapper's cross compartment pointer.
         comp->markCrossCompartmentWrappers(&trc);
     }
 
     // Iterate through all cells that can contain JSObject pointers to update
     // them. Since updating each cell is independent we try to parallelize this
     // as much as possible.
     if (CanUseExtraThreads())
-        updateAllCellPointersParallel(&trc, zone);
+        updateAllCellPointersParallel(&trc);
     else
-        updateAllCellPointersSerial(&trc, zone);
+        updateAllCellPointersSerial(&trc);
 
     // Mark roots to update them.
     {
         markRuntime(&trc, MarkRuntime);
 
         gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
         Debugger::markAll(&trc);
         Debugger::markAllCrossCompartmentEdges(&trc);
 
-        for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
+        for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
             WeakMapBase::markAll(c, &trc);
             if (c->watchpointMap)
                 c->watchpointMap->markAll(&trc);
         }
 
         // Mark all gray roots, making sure we call the trace callback to get the
         // current set.
         if (JSTraceDataOp op = grayRootTracer.op)
             (*op)(&trc, grayRootTracer.data);
     }
 
     // Sweep everything to fix up weak pointers
     WatchpointMap::sweepAll(rt);
     Debugger::sweepAll(rt->defaultFreeOp());
-    rt->gc.sweepZoneAfterCompacting(zone);
+    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
+        if (zone->isGCCompacting())
+            rt->gc.sweepZoneAfterCompacting(zone);
+    }
 
     // Type inference may put more blocks here to free.
     freeLifoAlloc.freeAll();
 
     // Clear runtime caches that can contain cell pointers.
     // TODO: Should possibly just call purgeRuntime() here.
     rt->newObjectCache.purge();
     rt->nativeIterCache.purge();
 
     // Call callbacks to get the rest of the system to fixup other untraced pointers.
     callWeakPointerCallbacks();
 }
 
 #ifdef DEBUG
 void
-GCRuntime::protectRelocatedArenas()
-{
-    for (ArenaHeader* arena = relocatedArenasToRelease, *next; arena; arena = next) {
+GCRuntime::protectRelocatedArenas(ArenaHeader *relocatedList)
+{
+    for (ArenaHeader* arena = relocatedList, *next; arena; arena = next) {
         next = arena->next;
 #if defined(XP_WIN)
         DWORD oldProtect;
         if (!VirtualProtect(arena, ArenaSize, PAGE_NOACCESS, &oldProtect))
             MOZ_CRASH();
 #else  // assume Unix
         if (mprotect(arena, ArenaSize, PROT_NONE))
             MOZ_CRASH();
 #endif
     }
 }
 
 void
-GCRuntime::unprotectRelocatedArenas()
-{
-    for (ArenaHeader* arena = relocatedArenasToRelease; arena; arena = arena->next) {
+GCRuntime::unprotectRelocatedArenas(ArenaHeader *relocatedList)
+{
+    for (ArenaHeader* arena = relocatedList; arena; arena = arena->next) {
 #if defined(XP_WIN)
         DWORD oldProtect;
         if (!VirtualProtect(arena, ArenaSize, PAGE_READWRITE, &oldProtect))
             MOZ_CRASH();
 #else  // assume Unix
         if (mprotect(arena, ArenaSize, PROT_READ | PROT_WRITE))
             MOZ_CRASH();
 #endif
     }
 }
 #endif
 
 void
-GCRuntime::releaseRelocatedArenas()
+GCRuntime::releaseRelocatedArenas(ArenaHeader *relocatedList)
 {
     AutoLockGC lock(rt);
-    releaseRelocatedArenasWithoutUnlocking(lock);
+    releaseRelocatedArenasWithoutUnlocking(relocatedList, lock);
     expireChunksAndArenas(true, lock);
 }
 
 void
-GCRuntime::releaseRelocatedArenasWithoutUnlocking(const AutoLockGC &lock)
+GCRuntime::releaseRelocatedArenasWithoutUnlocking(ArenaHeader *relocatedList, const AutoLockGC &lock)
 {
     // Release the relocated arenas, now containing only forwarding pointers
     unsigned count = 0;
-    while (relocatedArenasToRelease) {
-        ArenaHeader *aheader = relocatedArenasToRelease;
-        relocatedArenasToRelease = relocatedArenasToRelease->next;
+    while (relocatedList) {
+        ArenaHeader *aheader = relocatedList;
+        relocatedList = relocatedList->next;
 
         // Clear the mark bits
         aheader->unmarkAll();
 
         // Mark arena as empty
         AllocKind thingKind = aheader->getAllocKind();
         size_t thingSize = aheader->getThingSize();
         Arena *arena = aheader->getArena();
@@ -2762,18 +2739,19 @@ GCRuntime::releaseRelocatedArenasWithout
 
 void
 GCRuntime::releaseHeldRelocatedArenas()
 {
 #ifdef DEBUG
     // In debug mode we don't release relocated arenas straight away.  Instead
     // we protect them and hold onto them until the next GC sweep phase to catch
     // any pointers to them that didn't get forwarded.
-    unprotectRelocatedArenas();
-    releaseRelocatedArenas();
+    unprotectRelocatedArenas(relocatedArenasToRelease);
+    releaseRelocatedArenas(relocatedArenasToRelease);
+    relocatedArenasToRelease = nullptr;
 #endif
 }
 
 void
 ReleaseArenaList(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock)
 {
     ArenaHeader *next;
     for (; aheader; aheader = next) {
@@ -3424,16 +3402,17 @@ GCRuntime::sweepBackgroundThings(ZoneLis
 }
 
 void
 GCRuntime::assertBackgroundSweepingFinished()
 {
 #ifdef DEBUG
     MOZ_ASSERT(backgroundSweepZones.isEmpty());
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
+        MOZ_ASSERT(!zone->isOnList());
         for (unsigned i = 0; i < FINALIZE_LIMIT; ++i) {
             MOZ_ASSERT(!zone->arenas.arenaListsToSweep[i]);
             MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(AllocKind(i)));
         }
     }
     MOZ_ASSERT(freeLifoAlloc.computedSizeOfExcludingThis() == 0);
 #endif
 }
@@ -5491,109 +5470,108 @@ GCRuntime::endSweepPhase(bool lastGC)
             if (e.front().key().kind != CrossCompartmentKey::StringWrapper)
                 AssertNotOnGrayList(&e.front().value().unbarrieredGet().toObject());
         }
     }
 #endif
 }
 
 GCRuntime::IncrementalProgress
-GCRuntime::beginCompactPhase()
+GCRuntime::compactPhase(JS::gcreason::Reason reason)
 {
     gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
 
     if (isIncremental) {
         // Poll for end of background sweeping
         AutoLockGC lock(rt);
         if (isBackgroundSweeping())
             return NotFinished;
     } else {
         waitBackgroundSweepEnd();
     }
 
-    MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
-    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
-        if (CanRelocateZone(rt, zone))
-            zonesToMaybeCompact.append(zone);
-    }
-
-    MOZ_ASSERT(!relocatedArenasToRelease);
-    startedCompacting = true;
-    return Finished;
-}
-
-GCRuntime::IncrementalProgress
-GCRuntime::compactPhase(JS::gcreason::Reason reason, SliceBudget &sliceBudget)
-{
     MOZ_ASSERT(rt->gc.nursery.isEmpty());
     assertBackgroundSweepingFinished();
-    MOZ_ASSERT(startedCompacting);
-
-    gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
-
-    while (!zonesToMaybeCompact.isEmpty()) {
-        Zone *zone = zonesToMaybeCompact.front();
-        MOZ_ASSERT(zone->isGCFinished());
-        if (relocateArenas(zone, reason, sliceBudget)) {
-            zone->setGCState(Zone::Compact);
-            updatePointersToRelocatedCells(zone);
-            zone->setGCState(Zone::Finished);
+
+    ArenaHeader *relocatedList = relocateArenas(reason);
+    if (relocatedList)
+        updatePointersToRelocatedCells();
+
+#ifdef DEBUG
+    for (ArenaHeader *arena = relocatedList; arena; arena = arena->next) {
+        for (ArenaCellIterUnderFinalize i(arena); !i.done(); i.next()) {
+            TenuredCell *src = i.getCell();
+            MOZ_ASSERT(RelocationOverlay::isCellForwarded(src));
+            TenuredCell *dest = Forwarded(src);
+            MOZ_ASSERT(src->isMarked(BLACK) == dest->isMarked(BLACK));
+            MOZ_ASSERT(src->isMarked(GRAY) == dest->isMarked(GRAY));
         }
-        zonesToMaybeCompact.removeFront();
-        if (sliceBudget.isOverBudget())
-            break;
-    }
+    }
+#endif
+
+    // Release the relocated arenas, or in debug builds queue them to be
+    // released until the start of the next GC unless this is the last GC or we
+    // are doing a last ditch GC.
+#ifndef DEBUG
+    releaseRelocatedArenas(relocatedList);
+#else
+    if (reason == JS::gcreason::DESTROY_RUNTIME || reason == JS::gcreason::LAST_DITCH) {
+        releaseRelocatedArenas(relocatedList);
+    } else {
+        MOZ_ASSERT(!relocatedArenasToRelease);
+        protectRelocatedArenas(relocatedList);
+        relocatedArenasToRelease = relocatedList;
+    }
+#endif
 
 #ifdef DEBUG
     CheckHashTablesAfterMovingGC(rt);
+    for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
+        if (zone->isGCCompacting()) {
+            MOZ_ASSERT(!zone->isPreservingCode());
+            zone->arenas.checkEmptyFreeLists();
+
+            // Check that we did as much compaction as we should have. There
+            // should always be less than one arena's worth of free cells.
+            for (size_t i = 0; i < FINALIZE_LIMIT; i++) {
+                size_t thingsPerArena = Arena::thingsPerArena(Arena::thingSize(AllocKind(i)));
+                if (CanRelocateAllocKind(AllocKind(i))) {
+                    ArenaList &al = zone->arenas.arenaLists[i];
+                    size_t freeCells = 0;
+                    for (ArenaHeader *arena = al.arenaAfterCursor(); arena; arena = arena->next)
+                        freeCells += arena->countFreeCells();
+                    MOZ_ASSERT(freeCells < thingsPerArena);
+                }
+            }
+        }
+    }
 #endif
-
-    return zonesToMaybeCompact.isEmpty() ? Finished : NotFinished;
-}
-
-void
-GCRuntime::endCompactPhase(JS::gcreason::Reason reason)
-{
-    // Release the relocated arenas, or in debug builds queue them to be
-    // released at the start of the next GC unless this is the last GC or we are
-    // doing a last ditch GC.
-#ifndef DEBUG
-    releaseRelocatedArenas();
-#else
-    if (reason == JS::gcreason::DESTROY_RUNTIME || reason == JS::gcreason::LAST_DITCH)
-        releaseRelocatedArenas();
-    else
-        protectRelocatedArenas();
-#endif
-
-    startedCompacting = false;
+    return Finished;
 }
 
 void
 GCRuntime::finishCollection(JS::gcreason::Reason reason)
 {
     MOZ_ASSERT(marker.isDrained());
     marker.stop();
 
     uint64_t currentTime = PRMJ_Now();
     schedulingState.updateHighFrequencyMode(lastGCTime, currentTime, tunables);
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->isCollecting()) {
-            MOZ_ASSERT(zone->isGCFinished());
+            MOZ_ASSERT(zone->isGCFinished() || zone->isGCCompacting());
             zone->setGCState(Zone::NoGC);
             zone->active = false;
         }
 
         MOZ_ASSERT(!zone->isCollecting());
         MOZ_ASSERT(!zone->wasGCStarted());
     }
 
-    MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
-
     if (invocationKind == GC_SHRINK) {
         // Ensure excess chunks are returns to the system and free arenas
         // decommitted.
         shrinkBuffers();
     }
 
     lastGCTime = currentTime;
 
@@ -5738,42 +5716,37 @@ GCRuntime::resetIncrementalGC(const char
 
       case COMPACT: {
         {
             gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
             rt->gc.waitBackgroundSweepOrAllocEnd();
         }
 
         bool wasCompacting = isCompacting;
-
-        isCompacting = true;
-        startedCompacting = true;
-        zonesToMaybeCompact.clear();
+        isCompacting = false;
 
         SliceBudget budget;
         incrementalCollectSlice(budget, JS::gcreason::RESET);
 
         isCompacting = wasCompacting;
         break;
       }
 
       default:
         MOZ_CRASH("Invalid incremental GC state");
     }
 
     stats.reset(reason);
 
 #ifdef DEBUG
-    assertBackgroundSweepingFinished();
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
-        MOZ_ASSERT(!zone->isCollecting());
         MOZ_ASSERT(!zone->needsIncrementalBarrier());
-        MOZ_ASSERT(!zone->isOnList());
-    }
-    MOZ_ASSERT(zonesToMaybeCompact.isEmpty());
+        for (unsigned i = 0; i < FINALIZE_LIMIT; ++i)
+            MOZ_ASSERT(!zone->arenas.arenaListsToSweep[i]);
+    }
 #endif
 }
 
 namespace {
 
 class AutoGCSlice {
   public:
     explicit AutoGCSlice(JSRuntime *rt);
@@ -5961,32 +5934,24 @@ GCRuntime::incrementalCollectSlice(Slice
 
       case SWEEP:
         if (sweepPhase(budget) == NotFinished)
             break;
 
         endSweepPhase(lastGC);
 
         incrementalState = COMPACT;
-        MOZ_ASSERT(!startedCompacting);
 
         /* Yield before compacting since it is not incremental. */
         if (isCompacting && isIncremental)
             break;
 
       case COMPACT:
-        if (isCompacting) {
-            if (!startedCompacting && beginCompactPhase() == NotFinished)
-                break;
-
-            if (compactPhase(reason, budget) == NotFinished)
-                break;
-
-            endCompactPhase(reason);
-        }
+        if (isCompacting && compactPhase(reason) == NotFinished)
+            break;
 
         finishCollection(reason);
 
         incrementalState = NO_INCREMENTAL;
         break;
 
       default:
         MOZ_ASSERT(false);
@@ -6436,21 +6401,22 @@ GCRuntime::onOutOfMallocMemory()
 
     AutoLockGC lock(rt);
     onOutOfMallocMemory(lock);
 }
 
 void
 GCRuntime::onOutOfMallocMemory(const AutoLockGC &lock)
 {
-#ifdef DEBUG
     // Release any relocated arenas we may be holding on to, without releasing
     // the GC lock.
-    unprotectRelocatedArenas();
-    releaseRelocatedArenasWithoutUnlocking(lock);
+#ifdef DEBUG
+    unprotectRelocatedArenas(relocatedArenasToRelease);
+    releaseRelocatedArenasWithoutUnlocking(relocatedArenasToRelease, lock);
+    relocatedArenasToRelease = nullptr;
 #endif
 
     // Throw away any excess chunks we have lying around.
     freeEmptyChunks(rt, lock);
 
     // Immediately decommit as many arenas as possible in the hopes that this
     // might let the OS scrape together enough pages to satisfy the failing
     // malloc request.
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -472,17 +472,17 @@ class ArenaList {
         cursorp_ = other.cursorp_;
         check();
         return *this;
     }
 
     ArenaHeader *removeRemainingArenas(ArenaHeader **arenap);
     ArenaHeader **pickArenasToRelocate(size_t &arenaTotalOut, size_t &relocTotalOut);
     ArenaHeader *relocateArenas(ArenaHeader *toRelocate, ArenaHeader *relocated,
-                                SliceBudget &sliceBudget, gcstats::Statistics& stats);
+                                gcstats::Statistics& stats);
 };
 
 /*
  * A class that holds arenas in sorted order by appending arenas to specific
  * segments. Each segment has a head and a tail, which can be linked up to
  * other segments to create a contiguous ArenaList.
  */
 class SortedArenaList
@@ -800,17 +800,17 @@ class ArenaLists
 #endif
     }
 
     void checkEmptyFreeList(AllocKind kind) {
         MOZ_ASSERT(freeLists[kind].isEmpty());
     }
 
     bool relocateArenas(ArenaHeader *&relocatedListOut, JS::gcreason::Reason reason,
-                        SliceBudget &sliceBudget, gcstats::Statistics& stats);
+                        gcstats::Statistics& stats);
 
     void queueForegroundObjectsForSweep(FreeOp *fop);
     void queueForegroundThingsForSweep(FreeOp *fop);
 
     void mergeForegroundSweptObjectArenas();
 
     bool foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget,
                             SortedArenaList &sweepList);
@@ -1387,17 +1387,16 @@ class ZoneList
     ~ZoneList();
 
     bool isEmpty() const;
     Zone *front() const;
 
     void append(Zone *zone);
     void transferFrom(ZoneList &other);
     void removeFront();
-    void clear();
 
   private:
     explicit ZoneList(Zone *singleZone);
     void check() const;
 
     ZoneList(const ZoneList &other) = delete;
     ZoneList &operator=(const ZoneList &other) = delete;
 };
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -2110,34 +2110,26 @@ Debugger::markCrossCompartmentEdges(JSTr
  * We must scan all Debugger objects regardless of whether they *currently* have
  * any debuggees in a compartment being GC'd, because the WeakMap entries
  * persist even when debuggees are removed.
  *
  * This happens during the initial mark phase, not iterative marking, because
  * all the edges being reported here are strong references.
  */
 /* static */ void
-Debugger::markIncomingCrossCompartmentEdges(JSTracer *trc)
+Debugger::markAllCrossCompartmentEdges(JSTracer *trc)
 {
     JSRuntime *rt = trc->runtime();
 
     for (Debugger *dbg = rt->debuggerList.getFirst(); dbg; dbg = dbg->getNext()) {
         if (!dbg->object->zone()->isCollecting())
             dbg->markCrossCompartmentEdges(trc);
     }
 }
 
-/* static */ void
-Debugger::markAllCrossCompartmentEdges(JSTracer *trc)
-{
-    JSRuntime *rt = trc->runtime();
-    for (Debugger *dbg = rt->debuggerList.getFirst(); dbg; dbg = dbg->getNext())
-        dbg->markCrossCompartmentEdges(trc);
-}
-
 /*
  * This method has two tasks:
  *   1. Mark Debugger objects that are unreachable except for debugger hooks that
  *      may yet be called.
  *   2. Mark breakpoint handlers.
  *
  * This happens during the iterative part of the GC mark phase. This method
  * returns true if it has to mark anything; GC calls it repeatedly until it
--- a/js/src/vm/Debugger.h
+++ b/js/src/vm/Debugger.h
@@ -543,17 +543,16 @@ class Debugger : private mozilla::Linked
      *       - it has a debugger hook installed
      *       - it has a breakpoint set on a live script
      *       - it has a watchpoint set on a live object.
      *
      * Debugger::markAllIteratively handles the last case. If it finds any
      * Debugger objects that are definitely live but not yet marked, it marks
      * them and returns true. If not, it returns false.
      */
-    static void markIncomingCrossCompartmentEdges(JSTracer *tracer);
     static void markAllCrossCompartmentEdges(JSTracer *tracer);
     static bool markAllIteratively(GCMarker *trc);
     static void markAll(JSTracer *trc);
     static void sweepAll(FreeOp *fop);
     static void detachAllDebuggersFromGlobal(FreeOp *fop, GlobalObject *global);
     static void findCompartmentEdges(JS::Zone *v, gc::ComponentFinder<JS::Zone> &finder);
 
     /*