author | Jon Coppeard <jcoppeard@mozilla.com> |
Sat, 25 Apr 2020 07:02:30 +0000 | |
changeset 526054 | 82dc44ab84c053ee90e4ed035bc9ba11d795942c |
parent 526052 | d09dfdc1546e8cebbd0491b5d971b91d5a32ef31 |
child 526055 | c65feea492118812556f2795f2d34728afe2d2ae |
push id | 37349 |
push user | ncsoregi@mozilla.com |
push date | Sat, 25 Apr 2020 22:00:31 +0000 |
treeherder | mozilla-central@6ba63b50d930 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | sfink |
bugs | 1632534 |
milestone | 77.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/js/src/gc/Allocator.cpp +++ b/js/src/gc/Allocator.cpp @@ -666,17 +666,17 @@ Arena* GCRuntime::allocateArena(Chunk* c (heapSize.bytes() >= tunables.gcMaxBytes())) return nullptr; Arena* arena = chunk->allocateArena(this, zone, thingKind, lock); zone->gcHeapSize.addGCArena(); // Trigger an incremental slice if needed. if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) { - maybeAllocTriggerZoneGC(zone, ArenaSize); + maybeAllocTriggerZoneGC(zone); } return arena; } Arena* Chunk::allocateArena(GCRuntime* gc, Zone* zone, AllocKind thingKind, const AutoLockGC& lock) { Arena* arena = info.numArenasFreeCommitted > 0 ? fetchNextFreeArena(gc)
--- a/js/src/gc/GC.cpp +++ b/js/src/gc/GC.cpp @@ -1372,17 +1372,17 @@ bool GCRuntime::setParameter(JSGCParamKe case JSGC_INCREMENTAL_WEAKMAP_ENABLED: marker.incrementalWeakMapMarkingEnabled = value != 0; break; default: if (!tunables.setParameter(key, value, lock)) { return false; } for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) { - zone->updateGCThresholds(*this, GC_NORMAL, lock); + zone->updateGCStartThresholds(*this, GC_NORMAL, lock); } } return true; } void GCRuntime::resetParameter(JSGCParamKey key) { MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt)); @@ -1407,17 +1407,17 @@ void GCRuntime::resetParameter(JSGCParam break; case JSGC_INCREMENTAL_WEAKMAP_ENABLED: marker.incrementalWeakMapMarkingEnabled = TuningDefaults::IncrementalWeakMapMarkingEnabled; break; default: tunables.resetParameter(key, lock); for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) { - zone->updateGCThresholds(*this, GC_NORMAL, lock); + zone->updateGCStartThresholds(*this, GC_NORMAL, lock); } } } uint32_t GCRuntime::getParameter(JSGCParamKey key) { MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt)); AutoLockGC lock(this); return getParameter(key, lock); @@ -2906,17 +2906,17 @@ bool GCRuntime::triggerGC(JS::GCReason r return false; } JS::PrepareForFullGC(rt->mainContextFromOwnThread()); requestMajorGC(reason); return true; } -void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes) { +void GCRuntime::maybeAllocTriggerZoneGC(Zone* zone) { if (!CurrentThreadCanAccessRuntime(rt)) { // Zones in use by a helper thread can't be collected. MOZ_ASSERT(zone->usedByHelperThread() || zone->isAtomsZone()); return; } MOZ_ASSERT(!JS::RuntimeHeapIsCollecting()); @@ -2930,36 +2930,22 @@ void GCRuntime::maybeAllocTriggerZoneGC( if (trigger.kind == TriggerKind::NonIncremental) { triggerZoneGC(zone, JS::GCReason::ALLOC_TRIGGER, trigger.usedBytes, trigger.thresholdBytes); return; } MOZ_ASSERT(trigger.kind == TriggerKind::Incremental); - // During an incremental GC, reduce the delay to the start of the next - // incremental slice. - if (zone->gcDelayBytes < nbytes) { - zone->gcDelayBytes = 0; - } else { - zone->gcDelayBytes -= nbytes; - } - - if (!zone->gcDelayBytes) { - // Start or continue an in progress incremental GC. We do this - // to try to avoid performing non-incremental GCs on zones - // which allocate a lot of data, even when incremental slices - // can't be triggered via scheduling in the event loop. - triggerZoneGC(zone, JS::GCReason::INCREMENTAL_ALLOC_TRIGGER, - trigger.usedBytes, trigger.thresholdBytes); - - // Delay the next slice until a certain amount of allocation - // has been performed. - zone->gcDelayBytes = tunables.zoneAllocDelayBytes(); - } + // Start or continue an in progress incremental GC. We do this to try to avoid + // performing non-incremental GCs on zones which allocate a lot of data, even + // when incremental slices can't be triggered via scheduling in the event + // loop. + triggerZoneGC(zone, JS::GCReason::INCREMENTAL_ALLOC_TRIGGER, + trigger.usedBytes, trigger.thresholdBytes); } void js::gc::MaybeMallocTriggerZoneGC(JSRuntime* rt, ZoneAllocator* zoneAlloc, const HeapSize& heap, const HeapThreshold& threshold, JS::GCReason reason) { rt->gc.maybeMallocTriggerZoneGC(Zone::from(zoneAlloc), heap, threshold, reason); @@ -2990,34 +2976,29 @@ bool GCRuntime::maybeMallocTriggerZoneGC return false; } TriggerResult trigger = checkHeapThreshold(zone, heap, threshold); if (trigger.kind == TriggerKind::None) { return false; } - if (trigger.kind == TriggerKind::Incremental && zone->wasGCStarted()) { - // Don't start subsequent incremental slices if we're already collecting - // this zone. This is different to our behaviour for GC allocation in - // maybeAllocTriggerZoneGC. - MOZ_ASSERT(isIncrementalGCInProgress()); - return false; - } - // Trigger a zone GC. budgetIncrementalGC() will work out whether to do an // incremental or non-incremental collection. triggerZoneGC(zone, reason, trigger.usedBytes, trigger.thresholdBytes); return true; } TriggerResult GCRuntime::checkHeapThreshold( Zone* zone, const HeapSize& heapSize, const HeapThreshold& heapThreshold) { + MOZ_ASSERT_IF(heapThreshold.hasSliceThreshold(), zone->wasGCStarted()); + size_t usedBytes = heapSize.bytes(); - size_t thresholdBytes = heapThreshold.bytes(); + size_t thresholdBytes = zone->wasGCStarted() ? heapThreshold.sliceBytes() + : heapThreshold.startBytes(); if (usedBytes < thresholdBytes) { return TriggerResult{TriggerKind::None, 0, 0}; } size_t niThreshold = heapThreshold.nonIncrementalBytes(zone, tunables); if (usedBytes >= niThreshold) { // We have passed the non-incremental threshold: immediately trigger a // non-incremental GC. @@ -4062,17 +4043,17 @@ bool GCRuntime::beginMarkPhase(JS::GCRea checkForCompartmentMismatches(); } #endif if (!prepareZonesForCollection(reason, &isFull.ref())) { return false; } - /* * Check it's safe to access the atoms zone if we are collecting it. */ + /* Check it's safe to access the atoms zone if we are collecting it. */ if (atomsZone->isCollecting()) { session.maybeCheckAtomsAccess.emplace(rt); } /* * In an incremental GC, clear the area free lists to ensure that subsequent * allocations refill them and end up marking new cells back. See * arenaAllocatedDuringGC(). @@ -4543,22 +4524,24 @@ void GCRuntime::getNextSweepGroup() { for (Zone* zone = currentSweepGroup; zone; zone = zone->nextNodeInGroup()) { MOZ_ASSERT(zone->isGCMarkingBlackOnly()); MOZ_ASSERT(!zone->isQueuedForBackgroundSweep()); } if (abortSweepAfterCurrentGroup) { joinTask(sweepMarkTask, gcstats::PhaseKind::SWEEP_MARK); + // Abort collection of subsequent sweep groups. for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) { MOZ_ASSERT(!zone->gcNextGraphComponent); zone->setNeedsIncrementalBarrier(false); zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC); zone->arenas.unmarkPreMarkedFreeCells(); zone->gcGrayRoots().Clear(); + zone->clearGCSliceThresholds(); } for (SweepGroupCompartmentsIter comp(rt); !comp.done(); comp.next()) { ResetGrayList(comp); } abortSweepAfterCurrentGroup = false; currentSweepGroup = nullptr; @@ -6260,19 +6243,19 @@ void GCRuntime::finishCollection() { auto currentTime = ReallyNow(); schedulingState.updateHighFrequencyMode(lastGCEndTime_, currentTime, tunables); { AutoLockGC lock(this); for (GCZonesIter zone(this); !zone.done(); zone.next()) { zone->changeGCState(Zone::Finished, Zone::NoGC); - zone->gcDelayBytes = 0; + zone->clearGCSliceThresholds(); zone->notifyObservingDebuggers(); - zone->updateGCThresholds(*this, invocationKind, lock); + zone->updateGCStartThresholds(*this, invocationKind, lock); } } for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) { MOZ_ASSERT(!zone->wasGCStarted()); MOZ_ASSERT(!zone->needsIncrementalBarrier()); MOZ_ASSERT(!zone->isOnList()); } @@ -6355,17 +6338,17 @@ GCRuntime::IncrementalResult GCRuntime:: for (GCCompartmentsIter c(rt); !c.done(); c.next()) { ResetGrayList(c); } for (GCZonesIter zone(this); !zone.done(); zone.next()) { zone->setNeedsIncrementalBarrier(false); zone->changeGCState(Zone::MarkBlackOnly, Zone::NoGC); - zone->gcDelayBytes = 0; + zone->clearGCSliceThresholds(); zone->arenas.unmarkPreMarkedFreeCells(); } { AutoLockHelperThreadState lock; lifoBlocksToFree.ref().freeAll(); } @@ -6914,17 +6897,17 @@ static void ScheduleZones(GCRuntime* gc) } // This is a heuristic to reduce the total number of collections. bool inHighFrequencyMode = gc->schedulingState.inHighFrequencyGCMode(); if (zone->gcHeapSize.bytes() >= zone->gcHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) || zone->mallocHeapSize.bytes() >= zone->mallocHeapThreshold.eagerAllocTrigger(inHighFrequencyMode) || - zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.bytes()) { + zone->jitHeapSize.bytes() >= zone->jitHeapThreshold.startBytes()) { zone->scheduleGC(); } } } static void UnscheduleZones(GCRuntime* gc) { for (ZonesIter zone(gc->rt, WithAtoms); !zone.done(); zone.next()) { zone->unscheduleGC(); @@ -7184,16 +7167,43 @@ bool GCRuntime::shouldRepeatForDeadZone( if (c->gcState.scheduledForDestruction) { return true; } } return false; } +struct MOZ_RAII AutoSetZoneSliceThresholds { + explicit AutoSetZoneSliceThresholds(GCRuntime* gc) : gc(gc) { + // On entry, zones that are already collecting should have a slice threshold + // set. + for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) { + MOZ_ASSERT(zone->wasGCStarted() == + zone->gcHeapThreshold.hasSliceThreshold()); + MOZ_ASSERT(zone->wasGCStarted() == + zone->mallocHeapThreshold.hasSliceThreshold()); + } + } + + ~AutoSetZoneSliceThresholds() { + // On exit, update the thresholds for all collecting zones. + for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) { + if (zone->wasGCStarted()) { + zone->setGCSliceThresholds(*gc); + } else { + MOZ_ASSERT(!zone->gcHeapThreshold.hasSliceThreshold()); + MOZ_ASSERT(!zone->mallocHeapThreshold.hasSliceThreshold()); + } + } + } + + GCRuntime* gc; +}; + void GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget, const MaybeInvocationKind& gckindArg, JS::GCReason reason) { MOZ_ASSERT(reason != JS::GCReason::NO_REASON); MaybeInvocationKind gckind = gckindArg; MOZ_ASSERT_IF(!isIncrementalGCInProgress(), gckind.isSome()); @@ -7207,16 +7217,17 @@ void GCRuntime::collect(bool nonincremen stats().writeLogMessage("GC starting in state %s", StateName(incrementalState)); AutoTraceLog logGC(TraceLoggerForCurrentThread(), TraceLogger_GC); AutoStopVerifyingBarriers av(rt, IsShutdownGC(reason)); AutoEnqueuePendingParseTasksAfterGC aept(*this); AutoMaybeLeaveAtomsZone leaveAtomsZone(rt->mainContextFromOwnThread()); + AutoSetZoneSliceThresholds sliceThresholds(this); #ifdef DEBUG if (IsShutdownGC(reason)) { marker.markQueue.clear(); marker.queuePos = 0; } #endif @@ -8367,17 +8378,17 @@ static bool GCSliceCountGetter(JSContext static bool ZoneGCBytesGetter(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); args.rval().setNumber(double(cx->zone()->gcHeapSize.bytes())); return true; } static bool ZoneGCTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); - args.rval().setNumber(double(cx->zone()->gcHeapThreshold.bytes())); + args.rval().setNumber(double(cx->zone()->gcHeapThreshold.startBytes())); return true; } static bool ZoneGCAllocTriggerGetter(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); bool highFrequency = cx->runtime()->gc.schedulingState.inHighFrequencyGCMode(); args.rval().setNumber( @@ -8389,23 +8400,17 @@ static bool ZoneMallocBytesGetter(JSCont CallArgs args = CallArgsFromVp(argc, vp); args.rval().setNumber(double(cx->zone()->mallocHeapSize.bytes())); return true; } static bool ZoneMallocTriggerBytesGetter(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); - args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.bytes())); - return true; -} - -static bool ZoneGCDelayBytesGetter(JSContext* cx, unsigned argc, Value* vp) { - CallArgs args = CallArgsFromVp(argc, vp); - args.rval().setNumber(double(cx->zone()->gcDelayBytes)); + args.rval().setNumber(double(cx->zone()->mallocHeapThreshold.startBytes())); return true; } static bool ZoneGCNumberGetter(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); args.rval().setNumber(double(cx->zone()->gcNumber())); return true; } @@ -8462,17 +8467,16 @@ JSObject* NewMemoryInfoObject(JSContext* struct NamedZoneGetter { const char* name; JSNative getter; } zoneGetters[] = {{"gcBytes", ZoneGCBytesGetter}, {"gcTriggerBytes", ZoneGCTriggerBytesGetter}, {"gcAllocTrigger", ZoneGCAllocTriggerGetter}, {"mallocBytes", ZoneMallocBytesGetter}, {"mallocTriggerBytes", ZoneMallocTriggerBytesGetter}, - {"delayBytes", ZoneGCDelayBytesGetter}, {"gcNumber", ZoneGCNumberGetter}}; for (auto pair : zoneGetters) { #ifdef JS_MORE_DETERMINISTIC JSNative getter = DummyGetter; #else JSNative getter = pair.getter; #endif @@ -8742,11 +8746,11 @@ void GCRuntime::setPerformanceHint(Perfo bool inPageLoad = inPageLoadCount != 0; if (inPageLoad == wasInPageLoad) { return; } AutoLockGC lock(this); schedulingState.inPageLoad = inPageLoad; - atomsZone->updateGCThresholds(*this, invocationKind, lock); + atomsZone->updateGCStartThresholds(*this, invocationKind, lock); maybeAllocTriggerZoneGC(atomsZone); }
--- a/js/src/gc/GCRuntime.h +++ b/js/src/gc/GCRuntime.h @@ -309,17 +309,17 @@ class GCRuntime { uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock); void setPerformanceHint(PerformanceHint hint); MOZ_MUST_USE bool triggerGC(JS::GCReason reason); // Check whether to trigger a zone GC after allocating GC cells. During an // incremental GC, optionally count |nbytes| towards the threshold for // performing the next slice. - void maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes = 0); + void maybeAllocTriggerZoneGC(Zone* zone); // Check whether to trigger a zone GC after malloc memory. void maybeMallocTriggerZoneGC(Zone* zone); bool maybeMallocTriggerZoneGC(Zone* zone, const HeapSize& heap, const HeapThreshold& threshold, JS::GCReason reason); // The return value indicates if we were able to do the GC. bool triggerZoneGC(Zone* zone, JS::GCReason reason, size_t usedBytes, size_t thresholdBytes);
--- a/js/src/gc/Scheduling.cpp +++ b/js/src/gc/Scheduling.cpp @@ -335,33 +335,40 @@ void GCSchedulingTunables::resetParamete break; default: MOZ_CRASH("Unknown GC parameter."); } } size_t HeapThreshold::nonIncrementalBytes( ZoneAllocator* zone, const GCSchedulingTunables& tunables) const { - size_t bytes = bytes_ * tunables.nonIncrementalFactor(); + size_t bytes = startBytes_ * tunables.nonIncrementalFactor(); // Increase the non-incremental threshold when we start background sweeping // for the zone. The splay latency benchmark depends on this to avoid pauses // due to non-incremental GC. if (zone->gcState() > ZoneAllocator::Sweep) { bytes *= tunables.lowFrequencyHeapGrowth(); } return bytes; } float HeapThreshold::eagerAllocTrigger(bool highFrequencyGC) const { float eagerTriggerFactor = highFrequencyGC ? HighFrequencyEagerAllocTriggerFactor : LowFrequencyEagerAllocTriggerFactor; - return eagerTriggerFactor * bytes(); + return eagerTriggerFactor * startBytes(); +} + +void HeapThreshold::setSliceThreshold(ZoneAllocator* zone, + const HeapSize& heapSize, + const GCSchedulingTunables& tunables) { + sliceBytes_ = std::min(heapSize.bytes() + tunables.zoneAllocDelayBytes(), + nonIncrementalBytes(zone, tunables)); } /* static */ float GCHeapThreshold::computeZoneHeapGrowthFactorForHeapSize( size_t lastBytes, const GCSchedulingTunables& tunables, const GCSchedulingState& state) { if (!tunables.isDynamicHeapGrowthEnabled()) { return 3.0f; @@ -420,45 +427,49 @@ size_t GCHeapThreshold::computeZoneTrigg : tunables.gcZoneAllocThresholdBase(); size_t base = std::max(lastBytes, baseMin); float trigger = float(base) * growthFactor; float triggerMax = float(tunables.gcMaxBytes()) / tunables.nonIncrementalFactor(); return size_t(std::min(triggerMax, trigger)); } -void GCHeapThreshold::updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind, - const GCSchedulingTunables& tunables, - const GCSchedulingState& state, - bool isAtomsZone, const AutoLockGC& lock) { +void GCHeapThreshold::updateStartThreshold(size_t lastBytes, + JSGCInvocationKind gckind, + const GCSchedulingTunables& tunables, + const GCSchedulingState& state, + bool isAtomsZone, + const AutoLockGC& lock) { float growthFactor = computeZoneHeapGrowthFactorForHeapSize(lastBytes, tunables, state); // Discourage collection of the atoms zone during page load as this can block // off-thread parsing. if (isAtomsZone && state.inPageLoad) { growthFactor *= 1.5; } - bytes_ = + startBytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, gckind, tunables, lock); } /* static */ size_t MallocHeapThreshold::computeZoneTriggerBytes(float growthFactor, size_t lastBytes, size_t baseBytes, const AutoLockGC& lock) { return size_t(float(std::max(lastBytes, baseBytes)) * growthFactor); } -void MallocHeapThreshold::updateAfterGC(size_t lastBytes, size_t baseBytes, - float growthFactor, - const AutoLockGC& lock) { - bytes_ = computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock); +void MallocHeapThreshold::updateStartThreshold(size_t lastBytes, + size_t baseBytes, + float growthFactor, + const AutoLockGC& lock) { + startBytes_ = + computeZoneTriggerBytes(growthFactor, lastBytes, baseBytes, lock); } #ifdef DEBUG void MemoryTracker::adopt(MemoryTracker& other) { LockGuard<Mutex> lock(mutex); AutoEnterOOMUnsafeRegion oomUnsafe;
--- a/js/src/gc/Scheduling.h +++ b/js/src/gc/Scheduling.h @@ -740,74 +740,84 @@ class HeapSize { // Skip retainedBytes_: we never adopt zones that are currently being // collected. bytes_ += source.bytes_; source.retainedBytes_ = 0; source.bytes_ = 0; } }; -// A heap size threshold used to trigger GC. This is an abstract base class for +// Heap size thresholds used to trigger GC. This is an abstract base class for // GC heap and malloc thresholds defined below. class HeapThreshold { protected: - HeapThreshold() = default; + HeapThreshold() : startBytes_(SIZE_MAX), sliceBytes_(SIZE_MAX) {} - // GC trigger threshold. + // The threshold at which to start a new collection. // // TODO: This is currently read off-thread during parsing, but at some point // we should be able to make this MainThreadData<>. - AtomicByteCount bytes_; + AtomicByteCount startBytes_; + + // The threshold at which to trigger a slice during an ongoing incremental + // collection. + size_t sliceBytes_; public: - size_t bytes() const { return bytes_; } + size_t startBytes() const { return startBytes_; } + size_t sliceBytes() const { return sliceBytes_; } size_t nonIncrementalBytes(ZoneAllocator* zone, const GCSchedulingTunables& tunables) const; float eagerAllocTrigger(bool highFrequencyGC) const; + + void setSliceThreshold(ZoneAllocator* zone, const HeapSize& heapSize, + const GCSchedulingTunables& tunables); + void clearSliceThreshold() { sliceBytes_ = SIZE_MAX; } + bool hasSliceThreshold() const { return sliceBytes_ != SIZE_MAX; } }; // A heap threshold that is based on a multiple of the retained size after the // last collection adjusted based on collection frequency and retained // size. This is used to determine when to do a zone GC based on GC heap size. class GCHeapThreshold : public HeapThreshold { public: - void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind, - const GCSchedulingTunables& tunables, - const GCSchedulingState& state, bool isAtomsZone, - const AutoLockGC& lock); + void updateStartThreshold(size_t lastBytes, JSGCInvocationKind gckind, + const GCSchedulingTunables& tunables, + const GCSchedulingState& state, bool isAtomsZone, + const AutoLockGC& lock); private: static float computeZoneHeapGrowthFactorForHeapSize( size_t lastBytes, const GCSchedulingTunables& tunables, const GCSchedulingState& state); static size_t computeZoneTriggerBytes(float growthFactor, size_t lastBytes, JSGCInvocationKind gckind, const GCSchedulingTunables& tunables, const AutoLockGC& lock); }; // A heap threshold that is calculated as a constant multiple of the retained // size after the last collection. This is used to determines when to do a zone // GC based on malloc data. class MallocHeapThreshold : public HeapThreshold { public: - void updateAfterGC(size_t lastBytes, size_t baseBytes, float growthFactor, - const AutoLockGC& lock); + void updateStartThreshold(size_t lastBytes, size_t baseBytes, + float growthFactor, const AutoLockGC& lock); private: static size_t computeZoneTriggerBytes(float growthFactor, size_t lastBytes, size_t baseBytes, const AutoLockGC& lock); }; // A fixed threshold that's used to determine when we need to do a zone GC based // on allocated JIT code. class JitHeapThreshold : public HeapThreshold { public: - explicit JitHeapThreshold(size_t bytes) { bytes_ = bytes; } + explicit JitHeapThreshold(size_t bytes) { startBytes_ = bytes; } }; struct SharedMemoryUse { explicit SharedMemoryUse(MemoryUse use) : count(0), nbytes(0) { #ifdef DEBUG this->use = use; #endif }
--- a/js/src/gc/Zone.cpp +++ b/js/src/gc/Zone.cpp @@ -54,28 +54,40 @@ void ZoneAllocator::fixupAfterMovingGC() #endif } void js::ZoneAllocator::updateMemoryCountersOnGCStart() { gcHeapSize.updateOnGCStart(); mallocHeapSize.updateOnGCStart(); } -void js::ZoneAllocator::updateGCThresholds(GCRuntime& gc, - JSGCInvocationKind invocationKind, - const js::AutoLockGC& lock) { +void js::ZoneAllocator::updateGCStartThresholds( + GCRuntime& gc, JSGCInvocationKind invocationKind, + const js::AutoLockGC& lock) { // This is called repeatedly during a GC to update thresholds as memory is // freed. bool isAtomsZone = JS::Zone::from(this)->isAtomsZone(); - gcHeapThreshold.updateAfterGC(gcHeapSize.retainedBytes(), invocationKind, - gc.tunables, gc.schedulingState, isAtomsZone, - lock); - mallocHeapThreshold.updateAfterGC(mallocHeapSize.retainedBytes(), - gc.tunables.mallocThresholdBase(), - gc.tunables.mallocGrowthFactor(), lock); + gcHeapThreshold.updateStartThreshold(gcHeapSize.retainedBytes(), + invocationKind, gc.tunables, + gc.schedulingState, isAtomsZone, lock); + mallocHeapThreshold.updateStartThreshold( + mallocHeapSize.retainedBytes(), gc.tunables.mallocThresholdBase(), + gc.tunables.mallocGrowthFactor(), lock); +} + +void js::ZoneAllocator::setGCSliceThresholds(GCRuntime& gc) { + gcHeapThreshold.setSliceThreshold(this, gcHeapSize, gc.tunables); + mallocHeapThreshold.setSliceThreshold(this, mallocHeapSize, gc.tunables); + jitHeapThreshold.setSliceThreshold(this, jitHeapSize, gc.tunables); +} + +void js::ZoneAllocator::clearGCSliceThresholds() { + gcHeapThreshold.clearSliceThreshold(); + mallocHeapThreshold.clearSliceThreshold(); + jitHeapThreshold.clearSliceThreshold(); } bool ZoneAllocator::addSharedMemory(void* mem, size_t nbytes, MemoryUse use) { // nbytes can be zero here for SharedArrayBuffers. MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_)); auto ptr = sharedMemoryUseCounts.lookupForAdd(mem); @@ -174,19 +186,19 @@ JS::Zone::Zone(JSRuntime* rt) wasCollected_(false), listNext_(NotOnList), weakRefMap_(this, this), keptObjects(this, this) { /* Ensure that there are no vtables to mess us up here. */ MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) == static_cast<JS::shadow::Zone*>(this)); - // We can't call updateGCThresholds until the Zone has been constructed. + // We can't call updateGCStartThresholds until the Zone has been constructed. AutoLockGC lock(rt); - updateGCThresholds(rt->gc, GC_NORMAL, lock); + updateGCStartThresholds(rt->gc, GC_NORMAL, lock); } Zone::~Zone() { MOZ_ASSERT(helperThreadUse_ == HelperThreadUse::None); MOZ_ASSERT(gcWeakMapList().isEmpty()); MOZ_ASSERT_IF(regExps_.ref(), regExps().empty()); JSRuntime* rt = runtimeFromAnyThread();
--- a/js/src/gc/ZoneAllocator.h +++ b/js/src/gc/ZoneAllocator.h @@ -60,18 +60,21 @@ class ZoneAllocator : public JS::shadow: mallocHeapSize.adopt(other->mallocHeapSize); jitHeapSize.adopt(other->jitHeapSize); #ifdef DEBUG mallocTracker.adopt(other->mallocTracker); #endif } void updateMemoryCountersOnGCStart(); - void updateGCThresholds(gc::GCRuntime& gc, JSGCInvocationKind invocationKind, - const js::AutoLockGC& lock); + void updateGCStartThresholds(gc::GCRuntime& gc, + JSGCInvocationKind invocationKind, + const js::AutoLockGC& lock); + void setGCSliceThresholds(gc::GCRuntime& gc); + void clearGCSliceThresholds(); // Memory accounting APIs for malloc memory owned by GC cells. void addCellMemory(js::gc::Cell* cell, size_t nbytes, js::MemoryUse use) { MOZ_ASSERT(cell); MOZ_ASSERT(nbytes); mallocHeapSize.addBytes(nbytes); @@ -159,33 +162,29 @@ class ZoneAllocator : public JS::shadow: maybeTriggerZoneGC(mallocHeapSize, mallocHeapThreshold, JS::GCReason::TOO_MUCH_MALLOC); } private: void maybeTriggerZoneGC(const js::gc::HeapSize& heap, const js::gc::HeapThreshold& threshold, JS::GCReason reason) { - if (heap.bytes() >= threshold.bytes()) { + if (heap.bytes() >= threshold.startBytes()) { gc::MaybeMallocTriggerZoneGC(runtimeFromAnyThread(), this, heap, threshold, reason); } } public: // The size of allocated GC arenas in this zone. gc::HeapSize gcHeapSize; // Threshold used to trigger GC based on GC heap size. gc::GCHeapThreshold gcHeapThreshold; - // Amount of data to allocate before triggering a new incremental slice for - // the current GC. - MainThreadData<size_t> gcDelayBytes; - // Amount of malloc data owned by tenured GC things in this zone, including // external allocations supplied by JS::AddAssociatedMemory. gc::HeapSize mallocHeapSize; // Threshold used to trigger GC based on malloc allocations. gc::MallocHeapThreshold mallocHeapThreshold; // Amount of exectuable JIT code owned by GC things in this zone.