Bug 1029648 - Update zone triggers end of background sweeping rather than for every chunk freed r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Wed, 02 Jul 2014 08:11:26 +0100
changeset 192178 41da9bcec12e83e9c50c606d43801ad80cfe09fc
parent 192177 c76b804566e9663ee15b8b568f02180a2dbd43dd
child 192179 cad7de31731e5a51a158201172438a418f70b834
push idunknown
push userunknown
push dateunknown
reviewersterrence
bugs1029648
milestone33.0a1
Bug 1029648 - Update zone triggers end of background sweeping rather than for every chunk freed r=terrence
js/src/gc/GCRuntime.h
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/jsgc.cpp
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -275,18 +275,19 @@ class GCRuntime
 
     bool shouldCleanUpEverything() { return cleanUpEverything; }
 
     bool areGrayBitsValid() { return grayBitsValid; }
     void setGrayBitsInvalid() { grayBitsValid = false; }
 
     bool isGcNeeded() { return isNeeded; }
 
-    double computeHeapGrowthFactor(size_t lastBytes);
-    size_t computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind);
+    double computeHeapGrowthFactor(size_t lastBytes) const;
+    size_t computeTriggerBytes(double growthFactor, size_t lastBytes,
+                               JSGCInvocationKind gckind) const;
     size_t allocationThreshold() { return allocThreshold; }
 
     JSGCMode gcMode() const { return mode; }
     void setGCMode(JSGCMode m) {
         mode = m;
         marker.setGCMode(mode);
     }
 
@@ -332,17 +333,17 @@ class GCRuntime
     void findZoneGroups();
     bool findZoneEdgesForWeakMaps();
     void getNextZoneGroup();
     void endMarkingZoneGroup();
     void beginSweepingZoneGroup();
     bool releaseObservedTypes();
     void endSweepingZoneGroup();
     bool sweepPhase(SliceBudget &sliceBudget);
-    void endSweepPhase(JSGCInvocationKind gckind, bool lastGC);
+    void endSweepPhase(bool lastGC);
     void sweepZones(FreeOp *fop, bool lastGC);
     void decommitArenasFromAvailableList(Chunk **availableListHeadp);
     void decommitArenas();
     void expireChunksAndArenas(bool shouldShrink);
     void sweepBackgroundThings(bool onBackgroundThread);
     void assertBackgroundSweepingFinished();
 
     void computeNonIncrementalMarkingForValidation();
@@ -452,16 +453,19 @@ class GCRuntime
     uint64_t              startNumber;
 
     /* Whether the currently running GC can finish in multiple slices. */
     bool                  isIncremental;
 
     /* Whether all compartments are being collected in first GC slice. */
     bool                  isFull;
 
+    /* The kind of the last collection. */
+    JSGCInvocationKind    lastKind;
+
     /* The reason that an interrupt-triggered GC should be called. */
     JS::gcreason::Reason  triggerReason;
 
     /*
      * If this is 0, all cross-compartment proxies must be registered in the
      * wrapper map. This checking must be disabled temporarily while creating
      * new wrappers. When non-zero, this records the recursion depth of wrapper
      * creation.
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -26,16 +26,17 @@ JS::Zone::Zone(JSRuntime *rt)
     allocator(this),
     types(this),
     compartments(),
     gcGrayRoots(),
     gcHeapGrowthFactor(3.0),
     gcMallocBytes(0),
     gcMallocGCTriggered(false),
     gcBytes(0),
+    gcBytesAfterGC(0),
     gcTriggerBytes(0),
     data(nullptr),
     isSystem(false),
     usedByExclusiveThread(false),
     scheduledForDestruction(false),
     maybeAlive(true),
     active(false),
     jitZone_(nullptr),
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -101,17 +101,16 @@ struct Zone : public JS::shadow::Zone,
 
     void discardJitCode(js::FreeOp *fop);
 
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 size_t *typePool,
                                 size_t *baselineStubsOptimized);
 
     void setGCLastBytes(size_t lastBytes, js::JSGCInvocationKind gckind);
-    void reduceGCTriggerBytes(size_t amount);
 
     void resetGCMallocBytes();
     void setGCMaxMallocBytes(size_t value);
     void updateMallocCounter(size_t nbytes) {
         // Note: this code may be run from worker threads. We tolerate any
         // thread races when updating gcMallocBytes.
         gcMallocBytes -= ptrdiff_t(nbytes);
         if (MOZ_UNLIKELY(isTooMuchMalloc()))
@@ -243,18 +242,22 @@ struct Zone : public JS::shadow::Zone,
     // This should be a bool, but Atomic only supports 32-bit and pointer-sized
     // types.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
 
     // Counts the number of bytes allocated in the GC heap for this zone. It is
     // updated by both the main and GC helper threads.
     mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes;
 
-    // GC trigger threshold for allocations on the GC heap.
-    size_t gcTriggerBytes;
+    // The number of bytes allocated in the GC heap for this zone after the last GC.
+    size_t gcBytesAfterGC;
+
+    // GC trigger threshold for allocations on the GC heap. It is updated by
+    // both the main and GC helper threads.
+    mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcTriggerBytes;
 
     // Per-zone data for use by an embedder.
     void *data;
 
     bool isSystem;
 
     bool usedByExclusiveThread;
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -963,17 +963,17 @@ Chunk::releaseArena(ArenaHeader *aheader
     JSRuntime *rt = zone->runtimeFromAnyThread();
     AutoLockGC maybeLock;
     if (rt->gc.isBackgroundSweeping())
         maybeLock.lock(rt);
 
     JS_ASSERT(rt->gc.bytes >= ArenaSize);
     JS_ASSERT(zone->gcBytes >= ArenaSize);
     if (rt->gc.isBackgroundSweeping())
-        zone->reduceGCTriggerBytes(zone->gcHeapGrowthFactor * ArenaSize);
+        zone->gcBytesAfterGC -= ArenaSize;
     rt->gc.bytes -= ArenaSize;
     zone->gcBytes -= ArenaSize;
 
     aheader->setAsNotAllocated();
     addArenaToFreeList(rt, aheader);
 
     if (info.numArenasFree == 1) {
         JS_ASSERT(!info.prevp);
@@ -1665,25 +1665,26 @@ GCRuntime::updateMallocCounter(JS::Zone 
 void
 GCRuntime::onTooMuchMalloc()
 {
     if (!mallocGCTriggered)
         mallocGCTriggered = triggerGC(JS::gcreason::TOO_MUCH_MALLOC);
 }
 
 size_t
-GCRuntime::computeTriggerBytes(double growthFactor, size_t lastBytes, JSGCInvocationKind gckind)
+GCRuntime::computeTriggerBytes(double growthFactor, size_t lastBytes,
+                               JSGCInvocationKind gckind) const
 {
     size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, allocThreshold);
     double trigger = double(base) * growthFactor;
     return size_t(Min(double(maxBytes), trigger));
 }
 
 double
-GCRuntime::computeHeapGrowthFactor(size_t lastBytes)
+GCRuntime::computeHeapGrowthFactor(size_t lastBytes) const
 {
     /*
      * The heap growth factor depends on the heap size after a GC and the GC frequency.
      * For low frequency GCs (more than 1sec between GCs) we let the heap grow to 150%.
      * For high frequency GCs we let the heap grow depending on the heap size:
      *   lastBytes < highFrequencyLowLimit: 300%
      *   lastBytes > highFrequencyHighLimit: 150%
      *   otherwise: linear interpolation between 150% and 300% based on lastBytes
@@ -1715,32 +1716,21 @@ GCRuntime::computeHeapGrowthFactor(size_
     }
 
     return factor;
 }
 
 void
 Zone::setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind)
 {
-    GCRuntime &gc = runtimeFromMainThread()->gc;
+    const GCRuntime &gc = runtimeFromAnyThread()->gc;
     gcHeapGrowthFactor = gc.computeHeapGrowthFactor(lastBytes);
     gcTriggerBytes = gc.computeTriggerBytes(gcHeapGrowthFactor, lastBytes, gckind);
 }
 
-void
-Zone::reduceGCTriggerBytes(size_t amount)
-{
-    JS_ASSERT(amount > 0);
-    JS_ASSERT(gcTriggerBytes >= amount);
-    GCRuntime &gc = runtimeFromAnyThread()->gc;
-    if (gcTriggerBytes - amount < gc.allocationThreshold() * gcHeapGrowthFactor)
-        return;
-    gcTriggerBytes -= amount;
-}
-
 Allocator::Allocator(Zone *zone)
   : zone_(zone)
 {}
 
 inline void
 GCMarker::delayMarkingArena(ArenaHeader *aheader)
 {
     if (aheader->hasDelayedMarking) {
@@ -2560,16 +2550,25 @@ GCRuntime::sweepBackgroundThings(bool on
                 AllocKind kind = BackgroundPhases[phase][index];
                 ArenaHeader *arenas = zone->allocator.arenas.arenaListsToSweep[kind];
                 if (arenas)
                     ArenaLists::backgroundFinalize(&fop, arenas, onBackgroundThread);
             }
         }
     }
 
+    if (onBackgroundThread) {
+        /*
+         * Update zone triggers a second time now we have completely finished
+         * sweeping these zones.
+         */
+        for (Zone *zone = sweepingZones; zone; zone = zone->gcNextGraphNode)
+            zone->setGCLastBytes(zone->gcBytesAfterGC, lastKind);
+    }
+
     sweepingZones = nullptr;
 }
 
 void
 GCRuntime::assertBackgroundSweepingFinished()
 {
 #if defined(JS_THREADSAFE) && defined(DEBUG)
     JS_ASSERT(!sweepingZones);
@@ -4357,17 +4356,17 @@ GCRuntime::sweepPhase(SliceBudget &slice
         if (!currentZoneGroup)
             return true;  /* We're finished. */
         endMarkingZoneGroup();
         beginSweepingZoneGroup();
     }
 }
 
 void
-GCRuntime::endSweepPhase(JSGCInvocationKind gckind, bool lastGC)
+GCRuntime::endSweepPhase(bool lastGC)
 {
     gcstats::AutoPhase ap(stats, gcstats::PHASE_SWEEP);
     FreeOp fop(rt, sweepOnBackgroundThread);
 
     JS_ASSERT_IF(lastGC, !sweepOnBackgroundThread);
 
     JS_ASSERT(marker.isDrained());
     marker.stop();
@@ -4430,17 +4429,17 @@ GCRuntime::endSweepPhase(JSGCInvocationK
         if (!sweepOnBackgroundThread) {
             /*
              * Destroy arenas after we finished the sweeping so finalizers can
              * safely use IsAboutToBeFinalized(). This is done on the
              * GCHelperState if possible. We acquire the lock only because
              * Expire needs to unlock it for other callers.
              */
             AutoLockGC lock(rt);
-            expireChunksAndArenas(gckind == GC_SHRINK);
+            expireChunksAndArenas(lastKind == GC_SHRINK);
         }
     }
 
     {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_FINALIZE_END);
 
         for (Callback<JSFinalizeCallback> *p = rt->gc.finalizeCallbacks.begin();
              p < rt->gc.finalizeCallbacks.end(); p++)
@@ -4473,19 +4472,20 @@ GCRuntime::endSweepPhase(JSGCInvocationK
             sweepZones(&fop, lastGC);
     }
 
     uint64_t currentTime = PRMJ_Now();
     highFrequencyGC = dynamicHeapGrowth && lastGCTime &&
         lastGCTime + highFrequencyTimeThreshold * PRMJ_USEC_PER_MSEC > currentTime;
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
-        zone->setGCLastBytes(zone->gcBytes, gckind);
+        zone->setGCLastBytes(zone->gcBytes, lastKind);
         if (zone->isCollecting()) {
             JS_ASSERT(zone->isGCFinished());
+            zone->gcBytesAfterGC = zone->gcBytes;
             zone->setGCState(Zone::NoGC);
         }
 
 #ifdef DEBUG
         JS_ASSERT(!zone->isCollecting());
         JS_ASSERT(!zone->wasGCStarted());
 
         for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i) {
@@ -4757,16 +4757,18 @@ GCRuntime::incrementalCollectSlice(int64
          */
         zeal = zealMode;
     }
 #endif
 
     JS_ASSERT_IF(incrementalState != NO_INCREMENTAL, isIncremental);
     isIncremental = budget != SliceBudget::Unlimited;
 
+    lastKind = gckind;
+
     if (zeal == ZealIncrementalRootsThenFinish || zeal == ZealIncrementalMarkAllThenFinish) {
         /*
          * Yields between slices occurs at predetermined points in these modes;
          * the budget is not used.
          */
         budget = SliceBudget::Unlimited;
     }
 
@@ -4844,17 +4846,17 @@ GCRuntime::incrementalCollectSlice(int64
         /* fall through */
       }
 
       case SWEEP: {
         bool finished = sweepPhase(sliceBudget);
         if (!finished)
             break;
 
-        endSweepPhase(gckind, lastGC);
+        endSweepPhase(lastGC);
 
         if (sweepOnBackgroundThread)
             helperState.startBackgroundSweep(gckind == GC_SHRINK);
 
         incrementalState = NO_INCREMENTAL;
         break;
       }