Bug 1116455 - Use isIncrementalGCInProgress() rather than checking the state explicitly r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 31 Dec 2014 10:40:34 +0000
changeset 247544 f996eb4935c61fa1be6fe610bb6d2536fbf285b8
parent 247543 e66b5fdd480c6728db85ef8ac047f47856bd631a
child 247558 be87311dbf3707cdaed3ded558dc87b096fc2868
child 247608 feb16b98a8256d9d50a7402e4a285ec21a3b3eb9
push id4489
push userraliiev@mozilla.com
push dateMon, 23 Feb 2015 15:17:55 +0000
treeherdermozilla-beta@fd7c3dc24146 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1116455
milestone37.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1116455 - Use isIncrementalGCInProgress() rather than checking the state explicitly r=terrence
js/src/gc/GCRuntime.h
js/src/gc/Statistics.cpp
js/src/gc/Verifier.cpp
js/src/jsapi-tests/testGCFinalizeCallback.cpp
js/src/jsfriendapi.cpp
js/src/jsgc.cpp
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -424,17 +424,17 @@ class GCRuntime
 #endif
 
     void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
 
     bool isIncrementalGCAllowed() { return incrementalAllowed; }
     void disallowIncrementalGC() { incrementalAllowed = false; }
 
     bool isIncrementalGCEnabled() { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
-    bool isIncrementalGCInProgress() { return state() != gc::NO_INCREMENTAL && !verifyPreData; }
+    bool isIncrementalGCInProgress() { return state() != gc::NO_INCREMENTAL; }
 
     bool isGenerationalGCEnabled() { return generationalDisabled == 0; }
     void disableGenerationalGC();
     void enableGenerationalGC();
 
 #ifdef JSGC_COMPACTING
     void disableCompactingGC();
     void enableCompactingGC();
--- a/js/src/gc/Statistics.cpp
+++ b/js/src/gc/Statistics.cpp
@@ -768,17 +768,17 @@ Statistics::endGC()
     runtime->addTelemetry(JS_TELEMETRY_GC_MARK_MS, t(phaseTimes[PHASE_MARK]));
     runtime->addTelemetry(JS_TELEMETRY_GC_SWEEP_MS, t(phaseTimes[PHASE_SWEEP]));
     runtime->addTelemetry(JS_TELEMETRY_GC_MARK_ROOTS_MS, t(phaseTimes[PHASE_MARK_ROOTS]));
     runtime->addTelemetry(JS_TELEMETRY_GC_MARK_GRAY_MS, t(phaseTimes[PHASE_SWEEP_MARK_GRAY]));
     runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL, !!nonincrementalReason);
     runtime->addTelemetry(JS_TELEMETRY_GC_INCREMENTAL_DISABLED, !runtime->gc.isIncrementalGCAllowed());
     runtime->addTelemetry(JS_TELEMETRY_GC_SCC_SWEEP_TOTAL_MS, t(sccTotal));
     runtime->addTelemetry(JS_TELEMETRY_GC_SCC_SWEEP_MAX_PAUSE_MS, t(sccLongest));
- 
+
     double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
     runtime->addTelemetry(JS_TELEMETRY_GC_MMU_50, mmu50 * 100);
 
     if (fp)
         printStats();
 
     // Clear the timers at the end of a GC because we accumulate time in
     // between GCs for some (which come before PHASE_GC_BEGIN in the list.)
@@ -787,17 +787,17 @@ Statistics::endGC()
 }
 
 void
 Statistics::beginSlice(const ZoneGCStats &zoneStats, JSGCInvocationKind gckind,
                        JS::gcreason::Reason reason)
 {
     this->zoneStats = zoneStats;
 
-    bool first = runtime->gc.state() == gc::NO_INCREMENTAL;
+    bool first = !runtime->gc.isIncrementalGCInProgress();
     if (first)
         beginGC(gckind);
 
     SliceData data(reason, PRMJ_Now(), GetPageFaultCount());
     if (!slices.append(data))
         CrashAtUnhandlableOOM("Failed to allocate statistics slice.");
 
     runtime->addTelemetry(JS_TELEMETRY_GC_REASON, reason);
@@ -815,17 +815,17 @@ void
 Statistics::endSlice()
 {
     slices.back().end = PRMJ_Now();
     slices.back().endFaults = GetPageFaultCount();
 
     runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start));
     runtime->addTelemetry(JS_TELEMETRY_GC_RESET, !!slices.back().resetReason);
 
-    bool last = runtime->gc.state() == gc::NO_INCREMENTAL;
+    bool last = !runtime->gc.isIncrementalGCInProgress();
     if (last)
         endGC();
 
     // Slice callbacks should only fire for the outermost level
     if (--gcDepth == 0) {
         bool wasFullGC = zoneStats.isCollectingAllZones();
         if (sliceCallback)
             (*sliceCallback)(runtime, last ? JS::GC_CYCLE_END : JS::GC_SLICE_END,
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -165,17 +165,17 @@ NextNode(VerifyNode *node)
     else
         return (VerifyNode *)((char *)node + sizeof(VerifyNode) +
                              sizeof(EdgeValue)*(node->count - 1));
 }
 
 void
 gc::GCRuntime::startVerifyPreBarriers()
 {
-    if (verifyPreData || incrementalState != NO_INCREMENTAL)
+    if (verifyPreData || isIncrementalGCInProgress())
         return;
 
     /*
      * The post barrier verifier requires the storebuffer to be enabled, but the
      * pre barrier verifier disables it as part of disabling GGC.  Don't allow
      * starting the pre barrier verifier if the post barrier verifier is already
      * running.
      */
@@ -398,21 +398,18 @@ struct VerifyPostTracer : JSTracer
 /*
  * The post-barrier verifier runs the full store buffer and a fake nursery when
  * running and when it stops, walks the full heap to ensure that all the
  * important edges were inserted into the storebuffer.
  */
 void
 gc::GCRuntime::startVerifyPostBarriers()
 {
-    if (verifyPostData ||
-        incrementalState != NO_INCREMENTAL)
-    {
+    if (verifyPostData || isIncrementalGCInProgress())
         return;
-    }
 
     evictNursery();
 
     number++;
 
     VerifyPostTracer *trc = js_new<VerifyPostTracer>(rt, JSTraceCallback(nullptr));
     if (!trc)
         return;
--- a/js/src/jsapi-tests/testGCFinalizeCallback.cpp
+++ b/js/src/jsapi-tests/testGCFinalizeCallback.cpp
@@ -20,17 +20,17 @@ BEGIN_TEST(testGCFinalizeCallback)
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(false));
 
     /* Full GC, incremental. */
     FinalizeCalls = 0;
     JS::PrepareForFullGC(rt);
     JS::IncrementalGC(rt, JS::gcreason::API, 1000000);
-    CHECK(rt->gc.state() == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isIncrementalGCInProgress());
     CHECK(rt->gc.isFullGc());
     CHECK(checkMultipleGroups());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(false));
 
     JS::RootedObject global1(cx, createTestGlobal());
     JS::RootedObject global2(cx, createTestGlobal());
     JS::RootedObject global3(cx, createTestGlobal());
@@ -57,29 +57,29 @@ BEGIN_TEST(testGCFinalizeCallback)
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
     /* Compartment GC, incremental, single compartment. */
     FinalizeCalls = 0;
     JS::PrepareZoneForGC(global1->zone());
     JS::IncrementalGC(rt, JS::gcreason::API, 1000000);
-    CHECK(rt->gc.state() == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isIncrementalGCInProgress());
     CHECK(!rt->gc.isFullGc());
     CHECK(checkSingleGroup());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
     /* Compartment GC, incremental, multiple compartments. */
     FinalizeCalls = 0;
     JS::PrepareZoneForGC(global1->zone());
     JS::PrepareZoneForGC(global2->zone());
     JS::PrepareZoneForGC(global3->zone());
     JS::IncrementalGC(rt, JS::gcreason::API, 1000000);
-    CHECK(rt->gc.state() == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isIncrementalGCInProgress());
     CHECK(!rt->gc.isFullGc());
     CHECK(checkMultipleGroups());
     CHECK(checkFinalizeStatus());
     CHECK(checkFinalizeIsCompartmentGC(true));
 
 #ifdef JS_GC_ZEAL
 
     /* Full GC with reset due to new compartment, becoming compartment GC. */
@@ -90,17 +90,17 @@ BEGIN_TEST(testGCFinalizeCallback)
     js::SliceBudget budget(js::WorkBudget(1));
     rt->gc.gcDebugSlice(budget);
     CHECK(rt->gc.state() == js::gc::MARK);
     CHECK(rt->gc.isFullGc());
 
     JS::RootedObject global4(cx, createTestGlobal());
     budget = js::SliceBudget(js::WorkBudget(1));
     rt->gc.gcDebugSlice(budget);
-    CHECK(rt->gc.state() == js::gc::NO_INCREMENTAL);
+    CHECK(!rt->gc.isIncrementalGCInProgress());
     CHECK(!rt->gc.isFullGc());
     CHECK(checkMultipleGroups());
     CHECK(checkFinalizeStatus());
 
     for (unsigned i = 0; i < FinalizeCalls - 1; ++i)
         CHECK(!IsCompartmentGCBuffer[i]);
     CHECK(IsCompartmentGCBuffer[FinalizeCalls - 1]);
 
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -1109,17 +1109,17 @@ JS_FRIEND_API(bool)
 JS::IsIncrementalGCEnabled(JSRuntime *rt)
 {
     return rt->gc.isIncrementalGCEnabled();
 }
 
 JS_FRIEND_API(bool)
 JS::IsIncrementalGCInProgress(JSRuntime *rt)
 {
-    return rt->gc.isIncrementalGCInProgress();
+    return rt->gc.isIncrementalGCInProgress() && !rt->gc.isVerifyPreBarriersEnabled();
 }
 
 JS_FRIEND_API(void)
 JS::DisableIncrementalGC(JSRuntime *rt)
 {
     rt->gc.disallowIncrementalGC();
 }
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1622,20 +1622,20 @@ bool
 GCRuntime::addRoot(T *rp, const char *name, JSGCRootType rootType)
 {
     /*
      * Sometimes Firefox will hold weak references to objects and then convert
      * them to strong references by calling AddRoot (e.g., via PreserveWrapper,
      * or ModifyBusyCount in workers). We need a read barrier to cover these
      * cases.
      */
-    if (rt->gc.incrementalState != NO_INCREMENTAL)
+    if (isIncrementalGCInProgress())
         BarrierOwner<T>::result::writeBarrierPre(*rp);
 
-    return rt->gc.rootsHash.put((void *)rp, RootInfo(name, rootType));
+    return rootsHash.put((void *)rp, RootInfo(name, rootType));
 }
 
 void
 GCRuntime::removeRoot(void *rp)
 {
     rootsHash.remove(rp);
     poke();
 }
@@ -3011,27 +3011,27 @@ RunLastDitchGC(JSContext *cx, JS::Zone *
     size_t thingSize = Arena::thingSize(thingKind);
     return zone->allocator.arenas.allocateFromFreeList(thingKind, thingSize);
 }
 
 template <AllowGC allowGC>
 /* static */ void *
 GCRuntime::refillFreeListFromMainThread(JSContext *cx, AllocKind thingKind)
 {
-    MOZ_ASSERT(!cx->runtime()->isHeapBusy(), "allocating while under GC");
-    MOZ_ASSERT_IF(allowGC, !cx->runtime()->currentThreadHasExclusiveAccess());
+    JSRuntime *rt = cx->runtime();
+    MOZ_ASSERT(!rt->isHeapBusy(), "allocating while under GC");
+    MOZ_ASSERT_IF(allowGC, !rt->currentThreadHasExclusiveAccess());
 
     Allocator *allocator = cx->allocator();
     Zone *zone = allocator->zone_;
 
     // If we have grown past our GC heap threshold while in the middle of an
     // incremental GC, we're growing faster than we're GCing, so stop the world
     // and do a full, non-incremental GC right now, if possible.
-    const bool mustCollectNow = allowGC &&
-                                cx->runtime()->gc.incrementalState != NO_INCREMENTAL &&
+    const bool mustCollectNow = allowGC && rt->gc.isIncrementalGCInProgress() &&
                                 zone->usage.gcBytes() > zone->threshold.gcTriggerBytes();
 
     bool outOfMemory = false;  // Set true if we fail to allocate.
     bool ranGC = false;  // Once we've GC'd and still cannot allocate, report.
     do {
         if (MOZ_UNLIKELY(mustCollectNow || outOfMemory)) {
             // If we are doing a fallible allocation, percolate up the OOM
             // instead of reporting it.
@@ -3049,17 +3049,17 @@ GCRuntime::refillFreeListFromMainThread(
         void *thing = allocator->arenas.allocateFromArena(zone, thingKind, maybeStartBGAlloc);
         if (MOZ_LIKELY(thing))
             return thing;
 
         // Even if allocateFromArena failed due to OOM, a background
         // finalization task may be running (freeing more memory); wait for it
         // to finish, then try to allocate again in case it freed up the memory
         // we need.
-        cx->runtime()->gc.waitBackgroundSweepEnd();
+        rt->gc.waitBackgroundSweepEnd();
 
         thing = allocator->arenas.allocateFromArena(zone, thingKind, maybeStartBGAlloc);
         if (MOZ_LIKELY(thing))
             return thing;
 
         // Retry after a last-ditch GC, unless we've already tried that.
         outOfMemory = true;
     } while (!ranGC);
@@ -3312,17 +3312,17 @@ GCRuntime::maybeGC(Zone *zone)
     }
 #endif
 
     if (gcIfNeeded())
         return true;
 
     if (zone->usage.gcBytes() > 1024 * 1024 &&
         zone->threshold.isCloseToAllocTrigger(zone->usage, schedulingState.inHighFrequencyGCMode()) &&
-        incrementalState == NO_INCREMENTAL &&
+        !isIncrementalGCInProgress() &&
         !isBackgroundSweeping())
     {
         PrepareZoneForGC(zone);
         gcSlice(GC_NORMAL, JS::gcreason::MAYBEGC);
         return true;
     }
 
     return false;
@@ -3668,17 +3668,17 @@ GCHelperState::startBackgroundShrink(con
 }
 
 void
 GCHelperState::waitBackgroundSweepEnd()
 {
     AutoLockGC lock(rt);
     while (state() == SWEEPING)
         waitForBackgroundThread();
-    if (rt->gc.incrementalState == NO_INCREMENTAL)
+    if (!rt->gc.isIncrementalGCInProgress())
         rt->gc.assertBackgroundSweepingFinished();
 }
 
 void
 GCHelperState::doSweep(AutoLockGC &lock)
 {
     // The main thread may call queueZonesForBackgroundSweep() or
     // ShrinkGCBuffers() while this is running so we must check there is no more
@@ -5882,17 +5882,17 @@ GCRuntime::incrementalCollectSlice(Slice
          * Do the incremental collection type specified by zeal mode if the
          * collection was triggered by runDebugGC() and incremental GC has not
          * been cancelled by resetIncrementalGC().
          */
         zeal = zealMode;
     }
 #endif
 
-    MOZ_ASSERT_IF(incrementalState != NO_INCREMENTAL, isIncremental);
+    MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental);
     isIncremental = !budget.isUnlimited();
 
     if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
         /*
          * Yields between slices occurs at predetermined points in these modes;
          * the budget is not used.
          */
         budget.makeUnlimited();
@@ -6040,21 +6040,18 @@ GCRuntime::budgetIncrementalGC(SliceBudg
 
     bool reset = false;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->usage.gcBytes() >= zone->threshold.gcTriggerBytes()) {
             budget.makeUnlimited();
             stats.nonincremental("allocation trigger");
         }
 
-        if (incrementalState != NO_INCREMENTAL &&
-            zone->isGCScheduled() != zone->wasGCStarted())
-        {
+        if (isIncrementalGCInProgress() && zone->isGCScheduled() != zone->wasGCStarted())
             reset = true;
-        }
 
         if (zone->isTooMuchMalloc()) {
             budget.makeUnlimited();
             stats.nonincremental("malloc bytes trigger");
         }
     }
 
     if (reset)
@@ -6103,32 +6100,32 @@ GCRuntime::gcCycle(bool incremental, Sli
     AutoDisableStoreBuffer adsb(this);
 
     AutoTraceSession session(rt, MajorCollecting);
 
     majorGCRequested = false;
     interFrameGC = true;
 
     number++;
-    if (incrementalState == NO_INCREMENTAL)
+    if (!isIncrementalGCInProgress())
         majorGCNumber++;
 
     // It's ok if threads other than the main thread have suppressGC set, as
     // they are operating on zones which will not be collected from here.
     MOZ_ASSERT(!rt->mainThread.suppressGC);
 
     // Assert if this is a GC unsafe region.
     JS::AutoAssertOnGC::VerifyIsSafeToGC(rt);
 
     {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
 
         // As we are about to clear the mark bits, wait for background
         // finalization to finish. We only need to wait on the first slice.
-        if (incrementalState == NO_INCREMENTAL)
+        if (!isIncrementalGCInProgress())
             waitBackgroundSweepEnd();
 
         // We must also wait for background allocation to finish so we can
         // avoid taking the GC lock when manipulating the chunks during the GC.
         // The background alloc task can run between slices, so we must wait
         // for it at the start of every slice.
         allocTask.cancel(GCParallelTask::CancelAndWait);
     }
@@ -6145,23 +6142,23 @@ GCRuntime::gcCycle(bool incremental, Sli
 
         stats.nonincremental("requested");
         budget.makeUnlimited();
     } else {
         budgetIncrementalGC(budget);
     }
 
     /* The GC was reset, so we need a do-over. */
-    if (prevState != NO_INCREMENTAL && incrementalState == NO_INCREMENTAL)
+    if (prevState != NO_INCREMENTAL && !isIncrementalGCInProgress())
         return true;
 
     TraceMajorGCStart();
 
     /* Set the invocation kind in the first slice. */
-    if (incrementalState == NO_INCREMENTAL)
+    if (!isIncrementalGCInProgress())
         invocationKind = gckind;
 
     incrementalCollectSlice(budget, reason);
 
 #ifndef JS_MORE_DETERMINISTIC
     nextFullGCTime = PRMJ_Now() + GC_IDLE_FULL_SPAN;
 #endif
 
@@ -6217,17 +6214,17 @@ gcstats::ZoneGCStats
 GCRuntime::scanZonesBeforeGC()
 {
     gcstats::ZoneGCStats zoneStats;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (mode == JSGC_MODE_GLOBAL)
             zone->scheduleGC();
 
         /* This is a heuristic to avoid resets. */
-        if (incrementalState != NO_INCREMENTAL && zone->needsIncrementalBarrier())
+        if (isIncrementalGCInProgress() && zone->needsIncrementalBarrier())
             zone->scheduleGC();
 
         /* This is a heuristic to reduce the total number of collections. */
         if (zone->threshold.isCloseToAllocTrigger(zone->usage, schedulingState.inHighFrequencyGCMode()))
             zone->scheduleGC();
 
         zoneStats.zoneCount++;
         if (zone->isGCScheduled()) {
@@ -6276,42 +6273,42 @@ GCRuntime::collect(bool incremental, Sli
     cleanUpEverything = ShouldCleanUpEverything(reason, gckind);
 
     bool repeat = false;
     do {
         /*
          * Let the API user decide to defer a GC if it wants to (unless this
          * is the last context). Invoke the callback regardless.
          */
-        if (incrementalState == NO_INCREMENTAL) {
+        if (!isIncrementalGCInProgress()) {
             gcstats::AutoPhase ap(stats, gcstats::PHASE_GC_BEGIN);
             if (gcCallback.op)
                 gcCallback.op(rt, JSGC_BEGIN, gcCallback.data);
         }
 
         poked = false;
         bool wasReset = gcCycle(incremental, budget, gckind, reason);
 
-        if (incrementalState == NO_INCREMENTAL) {
+        if (!isIncrementalGCInProgress()) {
             gcstats::AutoPhase ap(stats, gcstats::PHASE_GC_END);
             if (gcCallback.op)
                 gcCallback.op(rt, JSGC_END, gcCallback.data);
         }
 
         /* Need to re-schedule all zones for GC. */
         if (poked && cleanUpEverything)
             JS::PrepareForFullGC(rt);
 
         /*
          * This code makes an extra effort to collect compartments that we
          * thought were dead at the start of the GC. See the large comment in
          * beginMarkPhase.
          */
         bool repeatForDeadZone = false;
-        if (incremental && incrementalState == NO_INCREMENTAL) {
+        if (incremental && !isIncrementalGCInProgress()) {
             for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
                 if (c->scheduledForDestruction) {
                     incremental = false;
                     repeatForDeadZone = true;
                     reason = JS::gcreason::COMPARTMENT_REVIVED;
                     c->zone()->scheduleGC();
                 }
             }
@@ -6321,17 +6318,17 @@ GCRuntime::collect(bool incremental, Sli
          * If we reset an existing GC, we need to start a new one. Also, we
          * repeat GCs that happen during shutdown (the gcShouldCleanUpEverything
          * case) until we can be sure that no additional garbage is created
          * (which typically happens if roots are dropped during finalizers).
          */
         repeat = (poked && cleanUpEverything) || wasReset || repeatForDeadZone;
     } while (repeat);
 
-    if (incrementalState == NO_INCREMENTAL)
+    if (!isIncrementalGCInProgress())
         EnqueuePendingParseTasksAfterGC(rt);
 }
 
 void
 GCRuntime::gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason)
 {
     SliceBudget budget;
     collect(false, budget, gckind, reason);
@@ -6679,17 +6676,17 @@ GCRuntime::runDebugGC()
     {
         js::gc::State initialState = incrementalState;
         if (type == ZealIncrementalMultipleSlices) {
             /*
              * Start with a small slice limit and double it every slice. This
              * ensure that we get multiple slices, and collection runs to
              * completion.
              */
-            if (initialState == NO_INCREMENTAL)
+            if (!isIncrementalGCInProgress())
                 incrementalLimit = zealFrequency / 2;
             else
                 incrementalLimit *= 2;
             budget = SliceBudget(WorkBudget(incrementalLimit));
         } else {
             // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
             budget = SliceBudget(WorkBudget(1));
         }