Backed out changeset 7f1da255d058 (bug 1119537) for causing frequent SM(e) Memory-drainAllocationsLog-13.js timeouts.
authorRyan VanderMeulen <ryanvm@gmail.com>
Sat, 09 Apr 2016 15:02:53 -0400
changeset 292524 1bb3c8365cc4edc7552e6bd7390e6deb917b65f7
parent 292523 f9aed5af58b63e251092035cb0168f0b1f7ea561
child 292525 ee048ce0f8d5d6570b7582728b3ac540634a367c
push id18623
push userryanvm@gmail.com
push dateSun, 10 Apr 2016 20:21:27 +0000
treeherderfx-team@29d5a4175c8b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1119537
milestone48.0a1
backs out7f1da255d0584a6d6c5ff2579a8e362496934960
Backed out changeset 7f1da255d058 (bug 1119537) for causing frequent SM(e) Memory-drainAllocationsLog-13.js timeouts.
js/src/builtin/TestingFunctions.cpp
js/src/gc/GCRuntime.h
js/src/gc/Nursery.cpp
js/src/jit-test/tests/gc/incremental-state.js
js/src/jsgc.cpp
js/src/jsgc.h
js/src/vm/HelperThreads.cpp
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -812,18 +812,16 @@ GCState(JSContext* cx, unsigned argc, Va
     else if (globalState == gc::MARK)
         state = "mark";
     else if (globalState == gc::SWEEP)
         state = "sweep";
     else if (globalState == gc::FINALIZE)
         state = "finalize";
     else if (globalState == gc::COMPACT)
         state = "compact";
-    else if (globalState == gc::DECOMMIT)
-        state = "decommit";
     else
         MOZ_CRASH("Unobserveable global GC state");
 
     JSString* str = JS_NewStringCopyZ(cx, state);
     if (!str)
         return false;
     args.rval().setString(str);
     return true;
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -77,34 +77,17 @@ class BackgroundAllocTask : public GCPar
 
     const bool enabled_;
 
   public:
     BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool);
     bool enabled() const { return enabled_; }
 
   protected:
-    void run() override;
-};
-
-// Search the provided Chunks for free arenas and decommit them.
-class BackgroundDecommitTask : public GCParallelTask
-{
-  public:
-    using ChunkVector = mozilla::Vector<Chunk*>;
-
-    explicit BackgroundDecommitTask(JSRuntime *rt) : runtime(rt) {}
-    void setChunksToScan(ChunkVector &chunks);
-
-  protected:
-    void run() override;
-
-  private:
-    JSRuntime* runtime;
-    ChunkVector toDecommit;
+    virtual void run() override;
 };
 
 /*
  * Encapsulates all of the GC tunables. These are effectively constant and
  * should only be modified by setParameter.
  */
 class GCSchedulingTunables
 {
@@ -968,17 +951,17 @@ class GCRuntime
     void endMarkingZoneGroup();
     void beginSweepingZoneGroup();
     bool shouldReleaseObservedTypes();
     void endSweepingZoneGroup();
     IncrementalProgress sweepPhase(SliceBudget& sliceBudget);
     void endSweepPhase(bool lastGC);
     void sweepZones(FreeOp* fop, bool lastGC);
     void decommitAllWithoutUnlocking(const AutoLockGC& lock);
-    void startDecommit();
+    void decommitArenas(AutoLockGC& lock);
     void expireChunksAndArenas(bool shouldShrink, AutoLockGC& lock);
     void queueZonesForBackgroundSweep(ZoneList& zones);
     void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks, ThreadType threadType);
     void assertBackgroundSweepingFinished();
     bool shouldCompact();
     void beginCompactPhase();
     IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget);
     void endCompactPhase(JS::gcreason::Reason reason);
@@ -1348,17 +1331,16 @@ class GCRuntime
 
     /* Synchronize GC heap access between main thread and GCHelperState. */
     PRLock* lock;
 #ifdef DEBUG
     mozilla::Atomic<PRThread*> lockOwner;
 #endif
 
     BackgroundAllocTask allocTask;
-    BackgroundDecommitTask decommitTask;
     GCHelperState helperState;
 
     /*
      * During incremental sweeping, this field temporarily holds the arenas of
      * the current AllocKind being swept in order of increasing free space.
      */
     SortedArenaList incrementalSweepList;
 
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -588,17 +588,17 @@ js::Nursery::collect(JSRuntime* rt, JS::
 #undef TIME_END
 #undef TIME_TOTAL
 
 void
 js::Nursery::FreeMallocedBuffersTask::transferBuffersToFree(MallocedBuffersSet& buffersToFree)
 {
     // Transfer the contents of the source set to the task's buffers_ member by
     // swapping the sets, which also clears the source.
-    MOZ_ASSERT(!isRunningWithLockHeld());
+    MOZ_ASSERT(!isRunning());
     MOZ_ASSERT(buffers_.empty());
     mozilla::Swap(buffers_, buffersToFree);
 }
 
 void
 js::Nursery::FreeMallocedBuffersTask::run()
 {
     for (MallocedBuffersSet::Range r = buffers_.all(); !r.empty(); r.popFront())
--- a/js/src/jit-test/tests/gc/incremental-state.js
+++ b/js/src/jit-test/tests/gc/incremental-state.js
@@ -7,57 +7,52 @@ gczeal(0);
 // Non-incremental GC.
 gc();
 assertEq(gcstate(), "none");
 
 // Incremental GC in minimal slice. Note that finalization always uses zero-
 // sized slices while background finalization is on-going, so we need to loop.
 gcslice(1000000);
 while (gcstate() == "finalize") { gcslice(1); }
-while (gcstate() == "decommit") { gcslice(1); }
 assertEq(gcstate(), "none");
 
 // Incremental GC in multiple slices: if marking takes more than one slice,
 // we yield before we start sweeping.
 gczeal(0);
 gcslice(1);
 assertEq(gcstate(), "mark");
 gcslice(1000000);
 assertEq(gcstate(), "mark");
 gcslice(1000000);
 while (gcstate() == "finalize") { gcslice(1); }
-while (gcstate() == "decommit") { gcslice(1); }
 assertEq(gcstate(), "none");
 
 // Zeal mode 8: Incremental GC in two main slices:
 //   1) mark roots
 //   2) mark and sweep
 //   *) finalize.
 gczeal(8, 0);
 gcslice(1);
 assertEq(gcstate(), "mark");
 gcslice(1);
 while (gcstate() == "finalize") { gcslice(1); }
-while (gcstate() == "decommit") { gcslice(1); }
 assertEq(gcstate(), "none");
 
 // Zeal mode 9: Incremental GC in two main slices:
 //   1) mark roots and marking
 //   2) new marking and sweeping
 //   *) finalize.
 gczeal(9, 0);
 gcslice(1);
 assertEq(gcstate(), "mark");
 gcslice(1);
 while (gcstate() == "finalize") { gcslice(1); }
-while (gcstate() == "decommit") { gcslice(1); }
 assertEq(gcstate(), "none");
 
 // Zeal mode 10: Incremental GC in multiple slices (always yeilds before
 // sweeping). This test uses long slices to prove that this zeal mode yields
 // in sweeping, where normal IGC (above) does not.
 gczeal(10, 0);
 gcslice(1000000);
 assertEq(gcstate(), "sweep");
 gcslice(1000000);
 while (gcstate() == "finalize") { gcslice(1); }
-while (gcstate() == "decommit") { gcslice(1); }
 assertEq(gcstate(), "none");
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1028,17 +1028,17 @@ GCRuntime::wantBackgroundAllocation(cons
            emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
            (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
 }
 
 void
 GCRuntime::startBackgroundAllocTaskIfIdle()
 {
     AutoLockHelperThreadState helperLock;
-    if (allocTask.isRunningWithLockHeld())
+    if (allocTask.isRunning())
         return;
 
     // Join the previous invocation of the task. This will return immediately
     // if the thread has never been started.
     allocTask.joinWithLockHeld();
     allocTask.startWithLockHeld();
 }
 
@@ -1175,17 +1175,16 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
     alwaysPreserveCode(false),
 #ifdef DEBUG
     inUnsafeRegion(0),
     noGCOrAllocationCheck(0),
     noNurseryAllocationCheck(0),
 #endif
     lock(nullptr),
     allocTask(rt, emptyChunks_),
-    decommitTask(rt),
     helperState(rt)
 {
     setGCMode(JSGC_MODE_GLOBAL);
 }
 
 #ifdef JS_GC_ZEAL
 
 void
@@ -1359,17 +1358,16 @@ GCRuntime::finish()
 
     /*
      * Wait until the background finalization and allocation stops and the
      * helper thread shuts down before we forcefully release any remaining GC
      * memory.
      */
     helperState.finish();
     allocTask.cancel(GCParallelTask::CancelAndWait);
-    decommitTask.cancel(GCParallelTask::CancelAndWait);
 
 #ifdef JS_GC_ZEAL
     /* Free memory associated with GC verification. */
     finishVerifier();
 #endif
 
     /* Delete all remaining zones. */
     if (rt->gcInitialized) {
@@ -3371,92 +3369,66 @@ GCRuntime::decommitAllWithoutUnlocking(c
 {
     MOZ_ASSERT(emptyChunks(lock).count() == 0);
     for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next())
         chunk->decommitAllArenasWithoutUnlocking(lock);
     MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
-GCRuntime::startDecommit()
-{
-    MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-    MOZ_ASSERT(!decommitTask.isRunning());
-
-    // If we are allocating heavily enough to trigger "high freqency" GC, then
-    // skip decommit so that we do not compete with the mutator.
-    if (schedulingState.inHighFrequencyGCMode())
-        return;
-
-    BackgroundDecommitTask::ChunkVector toDecommit;
-    {
-        AutoLockGC lock(rt);
-
-        // Verify that all entries in the empty chunks pool are already decommitted.
-        for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next())
-            MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
-
-        // Since we release the GC lock while doing the decommit syscall below,
-        // it is dangerous to iterate the available list directly, as the main
-        // thread could modify it concurrently. Instead, we build and pass an
-        // explicit Vector containing the Chunks we want to visit.
-        MOZ_ASSERT(availableChunks(lock).verify());
-        for (ChunkPool::Iter iter(availableChunks(lock)); !iter.done(); iter.next()) {
-            if (!toDecommit.append(iter.get())) {
-                // The OOM handler does a full, immediate decommit.
-                return onOutOfMallocMemory(lock);
-            }
+GCRuntime::decommitArenas(AutoLockGC& lock)
+{
+    // Verify that all entries in the empty chunks pool are decommitted.
+    for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next())
+        MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
+
+    // Build a Vector of all current available Chunks. Since we release the
+    // gc lock while doing the decommit syscall, it is dangerous to iterate
+    // the available list directly, as concurrent operations can modify it.
+    mozilla::Vector<Chunk*> toDecommit;
+    MOZ_ASSERT(availableChunks(lock).verify());
+    for (ChunkPool::Iter iter(availableChunks(lock)); !iter.done(); iter.next()) {
+        if (!toDecommit.append(iter.get())) {
+            // The OOM handler does a full, immediate decommit, so there is
+            // nothing more to do here in any case.
+            return onOutOfMallocMemory(lock);
         }
     }
-    decommitTask.setChunksToScan(toDecommit);
-
-    if (sweepOnBackgroundThread && decommitTask.start())
-        return;
-
-    decommitTask.runFromMainThread(rt);
-}
-
-void
-js::gc::BackgroundDecommitTask::setChunksToScan(ChunkVector &chunks)
-{
-    MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime));
-    MOZ_ASSERT(!isRunning());
-    MOZ_ASSERT(toDecommit.empty());
-    Swap(toDecommit, chunks);
-}
-
-/* virtual */ void
-js::gc::BackgroundDecommitTask::run()
-{
-    AutoLockGC lock(runtime);
-
-    for (Chunk* chunk : toDecommit) {
+
+    // Start at the tail and stop before the first chunk: we allocate from the
+    // head and don't want to thrash with the mutator.
+    for (size_t i = toDecommit.length(); i > 1; --i) {
+        Chunk* chunk = toDecommit[i - 1];
+        MOZ_ASSERT(chunk);
+
         // The arena list is not doubly-linked, so we have to work in the free
         // list order and not in the natural order.
         while (chunk->info.numArenasFreeCommitted) {
-            bool ok = chunk->decommitOneFreeArena(runtime, lock);
-
-            // If we are low enough on memory that we can't update the page
-            // tables, or if we need to return for any other reason, break out
-            // of the loop.
-            if (cancel_ || !ok)
-                break;
+            bool ok = chunk->decommitOneFreeArena(rt, lock);
+
+            // FIXME Bug 1095620: add cancellation support when this becomes
+            // a ParallelTask.
+            if (/* cancel_ || */ !ok)
+                return;
         }
     }
-    toDecommit.clearAndFree();
+    MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
 GCRuntime::expireChunksAndArenas(bool shouldShrink, AutoLockGC& lock)
 {
     ChunkPool toFree = expireEmptyChunkPool(shouldShrink, lock);
     if (toFree.count()) {
         AutoUnlockGC unlock(lock);
         FreeChunkPool(rt, toFree);
     }
+
+    if (shouldShrink)
+        decommitArenas(lock);
 }
 
 void
 GCRuntime::sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks, ThreadType threadType)
 {
     freeBlocks.freeAll();
 
     if (zones.isEmpty())
@@ -5762,17 +5734,16 @@ void
 GCRuntime::endCompactPhase(JS::gcreason::Reason reason)
 {
     startedCompacting = false;
 }
 
 void
 GCRuntime::finishCollection(JS::gcreason::Reason reason)
 {
-    assertBackgroundSweepingFinished();
     MOZ_ASSERT(marker.isDrained());
     marker.stop();
     clearBufferedGrayRoots();
     MemProfiler::SweepTenured(rt);
 
     uint64_t currentTime = PRMJ_Now();
     schedulingState.updateHighFrequencyMode(lastGCTime, currentTime, tunables);
 
@@ -5791,16 +5762,25 @@ GCRuntime::finishCollection(JS::gcreason
 
     if (invocationKind == GC_SHRINK) {
         // Ensure excess chunks are returns to the system and free arenas
         // decommitted.
         shrinkBuffers();
     }
 
     lastGCTime = currentTime;
+
+    // If this is an OOM GC reason, wait on the background sweeping thread
+    // before returning to ensure that we free as much as possible. If this is
+    // a zeal-triggered GC, we want to ensure that the mutator can continue
+    // allocating on the same pages to reduce fragmentation.
+    if (IsOOMReason(reason) || reason == JS::gcreason::DEBUG_GC) {
+        gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
+        rt->gc.waitBackgroundSweepOrAllocEnd();
+    }
 }
 
 static const char*
 HeapStateToLabel(JS::HeapState heapState)
 {
     switch (heapState) {
       case JS::HeapState::MinorCollecting:
         return "js::Nursery::collect";
@@ -5939,22 +5919,16 @@ GCRuntime::resetIncrementalGC(const char
 
         auto unlimited = SliceBudget::unlimited();
         incrementalCollectSlice(unlimited, JS::gcreason::RESET);
 
         isCompacting = wasCompacting;
         break;
       }
 
-      case DECOMMIT: {
-        auto unlimited = SliceBudget::unlimited();
-        incrementalCollectSlice(unlimited, JS::gcreason::RESET);
-        break;
-      }
-
       default:
         MOZ_CRASH("Invalid incremental GC state");
     }
 
     stats.reset(reason);
 
 #ifdef DEBUG
     assertBackgroundSweepingFinished();
@@ -6206,38 +6180,23 @@ GCRuntime::incrementalCollectSlice(Slice
                 beginCompactPhase();
 
             if (compactPhase(reason, budget) == NotFinished)
                 break;
 
             endCompactPhase(reason);
         }
 
-        startDecommit();
-        incrementalState = DECOMMIT;
-
-        MOZ_FALLTHROUGH;
-
-      case DECOMMIT:
-        {
-            gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
-
-            // Yield until background decommit is done.
-            if (isIncremental && decommitTask.isRunning())
-                break;
-
-            decommitTask.join();
-        }
-
         finishCollection(reason);
+
         incrementalState = NO_INCREMENTAL;
         break;
 
       default:
-        MOZ_CRASH("unexpected GC incrementalState");
+        MOZ_ASSERT(false);
     }
 }
 
 IncrementalSafety
 gc::IsIncrementalGCSafe(JSRuntime* rt)
 {
     MOZ_ASSERT(!rt->mainThread.suppressGC);
 
@@ -6356,22 +6315,20 @@ GCRuntime::gcCycle(bool nonincrementalBy
     MOZ_ASSERT(!rt->mainThread.suppressGC);
 
     // Assert if this is a GC unsafe region.
     JS::AutoAssertOnGC::VerifyIsSafeToGC(rt);
 
     {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
 
-        // Background finalization and decommit are finished by defininition
-        // before we can start a new GC session.
-        if (!isIncrementalGCInProgress()) {
-            assertBackgroundSweepingFinished();
-            MOZ_ASSERT(!decommitTask.isRunning());
-        }
+        // As we are about to clear the mark bits, wait for background
+        // finalization to finish. We only need to wait on the first slice.
+        if (!isIncrementalGCInProgress())
+            waitBackgroundSweepEnd();
 
         // We must also wait for background allocation to finish so we can
         // avoid taking the GC lock when manipulating the chunks during the GC.
         // The background alloc task can run between slices, so we must wait
         // for it at the start of every slice.
         allocTask.cancel(GCParallelTask::CancelAndWait);
     }
 
@@ -6702,19 +6659,16 @@ GCRuntime::shrinkBuffers()
 }
 
 void
 GCRuntime::onOutOfMallocMemory()
 {
     // Stop allocating new chunks.
     allocTask.cancel(GCParallelTask::CancelAndWait);
 
-    // Make sure we release anything queued for release.
-    decommitTask.join();
-
     // Wait for background free of nursery huge slots to finish.
     nursery.waitBackgroundFreeEnd();
 
     AutoLockGC lock(rt);
     onOutOfMallocMemory(lock);
 }
 
 void
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -44,17 +44,17 @@ struct FinalizePhase;
 
 enum State {
     NO_INCREMENTAL,
     MARK_ROOTS,
     MARK,
     SWEEP,
     FINALIZE,
     COMPACT,
-    DECOMMIT,
+
     NUM_STATES
 };
 
 /* Map from C++ type to alloc kind. JSObject does not have a 1:1 mapping, so must use Arena::thingSize. */
 template <typename T> struct MapTypeToFinalizeKind {};
 template <> struct MapTypeToFinalizeKind<JSScript>          { static const AllocKind kind = AllocKind::SCRIPT; };
 template <> struct MapTypeToFinalizeKind<LazyScript>        { static const AllocKind kind = AllocKind::LAZY_SCRIPT; };
 template <> struct MapTypeToFinalizeKind<Shape>             { static const AllocKind kind = AllocKind::SHAPE; };
@@ -950,17 +950,16 @@ class GCParallelTask
     enum CancelMode { CancelNoWait, CancelAndWait};
     void cancel(CancelMode mode = CancelNoWait) {
         cancel_ = true;
         if (mode == CancelAndWait)
             join();
     }
 
     // Check if a task is actively running.
-    bool isRunningWithLockHeld() const;
     bool isRunning() const;
 
     // This should be friended to HelperThread, but cannot be because it
     // would introduce several circular dependencies.
   public:
     virtual void runFromHelperThread();
 };
 
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -1087,29 +1087,22 @@ js::GCParallelTask::runFromHelperThread(
         duration_ = PRMJ_Now() - timeStart;
     }
 
     state = Finished;
     HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER);
 }
 
 bool
-js::GCParallelTask::isRunningWithLockHeld() const
+js::GCParallelTask::isRunning() const
 {
     MOZ_ASSERT(HelperThreadState().isLocked());
     return state == Dispatched;
 }
 
-bool
-js::GCParallelTask::isRunning() const
-{
-    AutoLockHelperThreadState helperLock;
-    return isRunningWithLockHeld();
-}
-
 void
 HelperThread::handleGCParallelWorkload()
 {
     MOZ_ASSERT(HelperThreadState().isLocked());
     MOZ_ASSERT(HelperThreadState().canStartGCParallelTask());
     MOZ_ASSERT(idle());
 
     currentTask.emplace(HelperThreadState().gcParallelWorklist().popCopy());