Bug 1374797 - Fix logic around triggering atoms GCs r=sfink
authorJon Coppeard <jcoppeard@mozilla.com>
Tue, 25 Jul 2017 11:28:41 +0100
changeset 419569 e33e38d61829005fe095310a1036cf90781eec26
parent 419568 4ce87710ff85998f8afcf821e961fe6ee03554bb
child 419570 2e0e609aa3ca08298870542650011104871c3425
push id7566
push usermtabara@mozilla.com
push dateWed, 02 Aug 2017 08:25:16 +0000
treeherdermozilla-beta@86913f512c3c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1374797
milestone56.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1374797 - Fix logic around triggering atoms GCs r=sfink
js/public/GCAPI.h
js/src/gc/GCRuntime.h
js/src/gc/Zone.h
js/src/jit-test/tests/gc/bug-1374797.js
js/src/jscntxt.h
js/src/jsgc.cpp
js/src/vm/Runtime.cpp
--- a/js/public/GCAPI.h
+++ b/js/public/GCAPI.h
@@ -60,17 +60,17 @@ namespace JS {
     D(LAST_DITCH)                               \
     D(TOO_MUCH_MALLOC)                          \
     D(ALLOC_TRIGGER)                            \
     D(DEBUG_GC)                                 \
     D(COMPARTMENT_REVIVED)                      \
     D(RESET)                                    \
     D(OUT_OF_NURSERY)                           \
     D(EVICT_NURSERY)                            \
-    D(UNUSED0)                                  \
+    D(DELAYED_ATOMS_GC)                         \
     D(SHARED_MEMORY_LIMIT)                      \
     D(UNUSED1)                                  \
     D(INCREMENTAL_TOO_SLOW)                     \
     D(ABORT_GC)                                 \
     D(FULL_WHOLE_CELL_BUFFER)                   \
     D(FULL_GENERIC_BUFFER)                      \
     D(FULL_VALUE_BUFFER)                        \
     D(FULL_CELL_PTR_BUFFER)                     \
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -708,21 +708,17 @@ class GCRuntime
     void gcSlice(JS::gcreason::Reason reason, int64_t millis = 0);
     void finishGC(JS::gcreason::Reason reason);
     void abortGC();
     void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
     void debugGCSlice(SliceBudget& budget);
 
     bool canChangeActiveContext(JSContext* cx);
 
-    void triggerFullGCForAtoms() {
-        MOZ_ASSERT(fullGCForAtomsRequested_);
-        fullGCForAtomsRequested_ = false;
-        MOZ_RELEASE_ASSERT(triggerGC(JS::gcreason::ALLOC_TRIGGER));
-    }
+    void triggerFullGCForAtoms(JSContext* cx);
 
     void runDebugGC();
     void notifyRootsRemoved();
 
     enum TraceOrMarkRuntime {
         TraceRuntime,
         MarkRuntime
     };
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -205,21 +205,23 @@ struct Zone : public JS::shadow::Zone,
     void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
 
     void beginSweepTypes(js::FreeOp* fop, bool releaseTypes);
 
     bool hasMarkedCompartments();
 
     void scheduleGC() { MOZ_ASSERT(!CurrentThreadIsHeapBusy()); gcScheduled_ = true; }
     void unscheduleGC() { gcScheduled_ = false; }
-    bool isGCScheduled() { return gcScheduled_ && canCollect(); }
+    bool isGCScheduled() { return gcScheduled_; }
 
     void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
     bool isPreservingCode() const { return gcPreserveCode_; }
 
+    // Whether this zone can currently be collected. This doesn't take account
+    // of AutoKeepAtoms for the atoms zone.
     bool canCollect();
 
     void changeGCState(GCState prev, GCState next) {
         MOZ_ASSERT(CurrentThreadIsHeapBusy());
         MOZ_ASSERT(gcState() == prev);
         MOZ_ASSERT_IF(next != NoGC, canCollect());
         gcState_ = next;
     }
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/gc/bug-1374797.js
@@ -0,0 +1,27 @@
+// Exercise triggering GC of atoms zone while off-thread parsing is happening.
+
+if (helperThreadCount() === 0)
+   quit();
+
+gczeal(0);
+
+// Reduce some GC parameters so that we can trigger a GC more easily.
+gcparam('lowFrequencyHeapGrowth', 120);
+gcparam('highFrequencyHeapGrowthMin', 120);
+gcparam('highFrequencyHeapGrowthMax', 120);
+gcparam('allocationThreshold', 1);
+gc();
+
+// Start an off-thread parse.
+offThreadCompileScript("print('Finished')");
+
+// Allocate lots of atoms, parsing occasionally.
+for (let i = 0; i < 10; i++) {
+    print(i);
+    for (let j = 0; j < 10000; j++)
+        Symbol.for(i + 10 * j);
+    eval(`${i}`);
+}
+
+// Finish the off-thread parse.
+runOffThreadScript();
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -303,16 +303,17 @@ struct JSContext : public JS::RootingCon
     }
 
     // Methods specific to any HelperThread for the context.
     bool addPendingCompileError(js::CompileError** err);
     void addPendingOverRecursed();
     void addPendingOutOfMemory();
 
     JSRuntime* runtime() { return runtime_; }
+    const JSRuntime* runtime() const { return runtime_; }
 
     static size_t offsetOfCompartment() {
         return offsetof(JSContext, compartment_);
     }
 
     friend class JS::AutoSaveExceptionState;
     friend class js::jit::DebugModeOSRVolatileJitFrameIterator;
     friend void js::ReportOverRecursed(JSContext*, unsigned errorNumber);
@@ -562,16 +563,20 @@ struct JSContext : public JS::RootingCon
     // of any such threads also inhibits collection of atoms. We don't scan the
     // stacks of exclusive threads, so we need to avoid collecting their
     // objects in another way. The only GC thing pointers they have are to
     // their exclusive compartment (which is not collected) or to the atoms
     // compartment. Therefore, we avoid collecting the atoms compartment when
     // exclusive threads are running.
     js::ThreadLocalData<unsigned> keepAtoms;
 
+    bool canCollectAtoms() const {
+        return !keepAtoms && !runtime()->hasHelperThreadZones();
+    }
+
   private:
     // Pools used for recycling name maps and vectors when parsing and
     // emitting bytecode. Purged on GC when there are no active script
     // compilations.
     js::ThreadLocalData<js::frontend::NameCollectionPool> frontendCollectionPool_;
   public:
 
     js::frontend::NameCollectionPool& frontendCollectionPool() {
@@ -1251,18 +1256,18 @@ class MOZ_RAII AutoKeepAtoms
         cx->keepAtoms++;
     }
     ~AutoKeepAtoms() {
         MOZ_ASSERT(cx->keepAtoms);
         cx->keepAtoms--;
 
         JSRuntime* rt = cx->runtime();
         if (!cx->helperThread()) {
-            if (rt->gc.fullGCForAtomsRequested() && !cx->keepAtoms)
-                rt->gc.triggerFullGCForAtoms();
+            if (rt->gc.fullGCForAtomsRequested() && cx->canCollectAtoms())
+                rt->gc.triggerFullGCForAtoms(cx);
         }
     }
 };
 
 // Debugging RAII class which marks the current thread as performing an Ion
 // compilation, for use by CurrentThreadCan{Read,Write}CompilationData
 class MOZ_RAII AutoEnterIonCompilation
 {
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -3125,16 +3125,28 @@ GCRuntime::maybeGC(Zone* zone)
         !isIncrementalGCInProgress() && !isBackgroundSweeping())
     {
         stats().recordTrigger(usedBytes, threshold);
         PrepareZoneForGC(zone);
         startGC(GC_NORMAL, JS::gcreason::EAGER_ALLOC_TRIGGER);
     }
 }
 
+void
+GCRuntime::triggerFullGCForAtoms(JSContext* cx)
+{
+    MOZ_ASSERT(fullGCForAtomsRequested_);
+    MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+    MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
+    MOZ_ASSERT(!cx->keepAtoms);
+    MOZ_ASSERT(!rt->hasHelperThreadZones());
+    fullGCForAtomsRequested_ = false;
+    MOZ_RELEASE_ASSERT(triggerGC(JS::gcreason::DELAYED_ATOMS_GC));
+}
+
 // Do all possible decommit immediately from the current thread without
 // releasing the GC lock or allocating any memory.
 void
 GCRuntime::decommitAllWithoutUnlocking(const AutoLockGC& lock)
 {
     MOZ_ASSERT(emptyChunks(lock).count() == 0);
     for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next())
         chunk->decommitAllArenasWithoutUnlocking(lock);
@@ -3850,28 +3862,50 @@ RelazifyFunctions(Zone* zone, AllocKind 
         if (fun->hasScript())
             fun->maybeRelazify(rt);
     }
 }
 
 static bool
 ShouldCollectZone(Zone* zone, JS::gcreason::Reason reason)
 {
-    // Normally we collect all scheduled zones.
-    if (reason != JS::gcreason::COMPARTMENT_REVIVED)
-        return zone->isGCScheduled();
-
-    // If we are repeating a GC becuase we noticed dead compartments haven't
+    // If we are repeating a GC because we noticed dead compartments haven't
     // been collected, then only collect zones contianing those compartments.
-    for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
-        if (comp->scheduledForDestruction)
-            return true;
-    }
-
-    return false;
+    if (reason == JS::gcreason::COMPARTMENT_REVIVED) {
+        for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+            if (comp->scheduledForDestruction)
+                return true;
+        }
+
+        return false;
+    }
+
+    // Otherwise we only collect scheduled zones.
+    if (!zone->isGCScheduled())
+        return false;
+
+    // If canCollectAtoms() is false then either an instance of AutoKeepAtoms is
+    // currently on the stack or parsing is currently happening on another
+    // thread. In either case we don't have information about which atoms are
+    // roots, so we must skip collecting atoms.
+    //
+    // Note that only affects the first slice of an incremental GC since root
+    // marking is completed before we return to the mutator.
+    //
+    // Off-thread parsing is inhibited after the start of GC which prevents
+    // races between creating atoms during parsing and sweeping atoms on the
+    // active thread.
+    //
+    // Otherwise, we always schedule a GC in the atoms zone so that atoms which
+    // the other collected zones are using are marked, and we can update the
+    // set of atoms in use by the other collected zones at the end of the GC.
+    if (zone->isAtomsZone())
+        return TlsContext.get()->canCollectAtoms();
+
+    return true;
 }
 
 bool
 GCRuntime::prepareZonesForCollection(JS::gcreason::Reason reason, bool* isFullOut,
                                      AutoLockForExclusiveAccess& lock)
 {
 #ifdef DEBUG
     /* Assert that zone state is as we expect */
@@ -3886,20 +3920,19 @@ GCRuntime::prepareZonesForCollection(JS:
     *isFullOut = true;
     bool any = false;
 
     int64_t currentTime = PRMJ_Now();
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         /* Set up which zones will be collected. */
         if (ShouldCollectZone(zone, reason)) {
-            if (!zone->isAtomsZone()) {
-                any = true;
-                zone->changeGCState(Zone::NoGC, Zone::Mark);
-            }
+            MOZ_ASSERT(zone->canCollect());
+            any = true;
+            zone->changeGCState(Zone::NoGC, Zone::Mark);
         } else {
             *isFullOut = false;
         }
 
         zone->setPreservingCode(false);
     }
 
     // Discard JIT code more aggressively if the process is approaching its
@@ -3916,40 +3949,20 @@ GCRuntime::prepareZonesForCollection(JS:
 
     if (!cleanUpEverything && canAllocateMoreCode) {
         jit::JitActivationIterator activation(TlsContext.get());
         if (!activation.done())
             activation->compartment()->zone()->setPreservingCode(true);
     }
 
     /*
-     * If keepAtoms() is true then either an instance of AutoKeepAtoms is
-     * currently on the stack or parsing is currently happening on another
-     * thread. In either case we don't have information about which atoms are
-     * roots, so we must skip collecting atoms.
-     *
-     * Note that only affects the first slice of an incremental GC since root
-     * marking is completed before we return to the mutator.
-     *
-     * Off-thread parsing is inhibited after the start of GC which prevents
-     * races between creating atoms during parsing and sweeping atoms on the
-     * active thread.
-     *
-     * Otherwise, we always schedule a GC in the atoms zone so that atoms which
-     * the other collected zones are using are marked, and we can update the
-     * set of atoms in use by the other collected zones at the end of the GC.
+     * Check that we do collect the atoms zone if we triggered a GC for that
+     * purpose.
      */
-    if (!TlsContext.get()->keepAtoms || rt->hasHelperThreadZones()) {
-        Zone* atomsZone = rt->atomsCompartment(lock)->zone();
-        if (atomsZone->isGCScheduled()) {
-            MOZ_ASSERT(!atomsZone->isCollecting());
-            atomsZone->changeGCState(Zone::NoGC, Zone::Mark);
-            any = true;
-        }
-    }
+    MOZ_ASSERT_IF(reason == JS::gcreason::DELAYED_ATOMS_GC, atomsZone->isGCMarking());
 
     /* Check that at least one zone is scheduled for collection. */
     return any;
 }
 
 static void
 DiscardJITCodeForIncrementalGC(JSRuntime* rt)
 {
@@ -6673,27 +6686,29 @@ GCRuntime::budgetIncrementalGC(bool noni
     if (isTooMuchMalloc()) {
         budget.makeUnlimited();
         stats().nonincremental(AbortReason::MallocBytesTrigger);
     }
 
     bool reset = false;
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (zone->usage.gcBytes() >= zone->threshold.gcTriggerBytes()) {
+            MOZ_ASSERT(zone->isGCScheduled());
             budget.makeUnlimited();
             stats().nonincremental(AbortReason::GCBytesTrigger);
         }
 
+        if (zone->isTooMuchMalloc()) {
+            MOZ_ASSERT(zone->isGCScheduled());
+            budget.makeUnlimited();
+            stats().nonincremental(AbortReason::MallocBytesTrigger);
+        }
+
         if (isIncrementalGCInProgress() && zone->isGCScheduled() != zone->wasGCStarted())
             reset = true;
-
-        if (zone->isTooMuchMalloc()) {
-            budget.makeUnlimited();
-            stats().nonincremental(AbortReason::MallocBytesTrigger);
-        }
     }
 
     if (reset)
         return resetIncrementalGC(AbortReason::ZoneChange, lock);
 
     return IncrementalResult::Ok;
 }
 
@@ -6704,26 +6719,32 @@ class AutoScheduleZonesForGC
     JSRuntime* rt_;
 
   public:
     explicit AutoScheduleZonesForGC(JSRuntime* rt) : rt_(rt) {
         for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
             if (rt->gc.gcMode() == JSGC_MODE_GLOBAL)
                 zone->scheduleGC();
 
-            /* This is a heuristic to avoid resets. */
+            // This is a heuristic to avoid resets.
             if (rt->gc.isIncrementalGCInProgress() && zone->needsIncrementalBarrier())
                 zone->scheduleGC();
 
-            /* This is a heuristic to reduce the total number of collections. */
+            // This is a heuristic to reduce the total number of collections.
             if (zone->usage.gcBytes() >=
                 zone->threshold.allocTrigger(rt->gc.schedulingState.inHighFrequencyGCMode()))
             {
                 zone->scheduleGC();
             }
+
+            // This ensures we collect zones that have reached the malloc limit.
+            // TODO: Start collecting these zones earlier like we do for the GC
+            // bytes trigger above (bug 1384049).
+            if (zone->isTooMuchMalloc())
+                zone->scheduleGC();
         }
     }
 
     ~AutoScheduleZonesForGC() {
         for (ZonesIter zone(rt_, WithAtoms); !zone.done(); zone.next())
             zone->unscheduleGC();
     }
 };
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -882,18 +882,19 @@ JSRuntime::setUsedByHelperThread(Zone* z
 }
 
 void
 JSRuntime::clearUsedByHelperThread(Zone* zone)
 {
     MOZ_ASSERT(zone->group()->usedByHelperThread);
     zone->group()->usedByHelperThread = false;
     numHelperThreadZones--;
-    if (gc.fullGCForAtomsRequested() && !TlsContext.get())
-        gc.triggerFullGCForAtoms();
+    JSContext* cx = TlsContext.get();
+    if (gc.fullGCForAtomsRequested() && cx->canCollectAtoms())
+        gc.triggerFullGCForAtoms(cx);
 }
 
 bool
 js::CurrentThreadCanAccessRuntime(const JSRuntime* rt)
 {
     return rt->activeContext() == TlsContext.get();
 }