Bug 1572006 - Replace use of JSRuntime in GCLock classes r=allstarschh
authorJon Coppeard <jcoppeard@mozilla.com>
Thu, 03 Oct 2019 19:50:45 +0000
changeset 496290 1748f0da572197d52d3bb9c727776aafe9e02e94
parent 496289 e0f93751fd812702068bfeff555821d280dc7c14
child 496291 886d6128768ce9c6ac30359b51d4b6bbbd98ac57
push id97157
push userjcoppeard@mozilla.com
push dateFri, 04 Oct 2019 09:09:04 +0000
treeherderautoland@886d6128768c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersallstarschh
bugs1572006
milestone71.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1572006 - Replace use of JSRuntime in GCLock classes r=allstarschh Depends on D48086 Differential Revision: https://phabricator.services.mozilla.com/D48087
js/src/builtin/TestingFunctions.cpp
js/src/gc/GC.cpp
js/src/gc/GCLock.h
js/src/gc/GCRuntime.h
js/src/gc/Nursery.cpp
js/src/gc/Verifier.cpp
js/src/jsapi.cpp
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -625,23 +625,17 @@ static bool GCParameter(JSContext* cx, u
 
   uint32_t value = floor(d);
   if (param == JSGC_MARK_STACK_LIMIT && JS::IsIncrementalGCInProgress(cx)) {
     JS_ReportErrorASCII(
         cx, "attempt to set markStackLimit while a GC is in progress");
     return false;
   }
 
-  bool ok;
-  {
-    JSRuntime* rt = cx->runtime();
-    AutoLockGC lock(rt);
-    ok = rt->gc.setParameter(param, value, lock);
-  }
-
+  bool ok = cx->runtime()->gc.setParameter(param, value);
   if (!ok) {
     JS_ReportErrorASCII(cx, "Parameter value out of range");
     return false;
   }
 
   args.rval().setUndefined();
   return true;
 }
--- a/js/src/gc/GC.cpp
+++ b/js/src/gc/GC.cpp
@@ -1201,17 +1201,17 @@ void js::gc::DumpArenaInfo() {
 }
 
 #endif  // JS_GC_ZEAL
 
 bool GCRuntime::init(uint32_t maxbytes) {
   MOZ_ASSERT(SystemPageSize());
 
   {
-    AutoLockGCBgAlloc lock(rt);
+    AutoLockGCBgAlloc lock(this);
 
     MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
 
     const char* size = getenv("JSGC_MARK_STACK_LIMIT");
     if (size) {
       setMarkStackLimit(atoi(size), lock);
     }
 
@@ -1295,20 +1295,25 @@ void GCRuntime::finish() {
   FreeChunkPool(emptyChunks_.ref());
 
   gcTracer.finishTrace();
 
   nursery().printTotalProfileTimes();
   stats().printTotalProfileTimes();
 }
 
+bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value) {
+  MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+  waitBackgroundSweepEnd();
+  AutoLockGC lock(this);
+  return setParameter(key, value, lock);
+}
+
 bool GCRuntime::setParameter(JSGCParamKey key, uint32_t value,
                              AutoLockGC& lock) {
-  MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-
   switch (key) {
     case JSGC_SLICE_TIME_BUDGET_MS:
       defaultTimeBudgetMS_ = value ? value : SliceBudget::UnlimitedTimeBudget;
       break;
     case JSGC_MARK_STACK_LIMIT:
       if (value == 0) {
         return false;
       }
@@ -1331,19 +1336,24 @@ bool GCRuntime::setParameter(JSGCParamKe
       for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         zone->updateGCThresholds(*this, GC_NORMAL, lock);
       }
   }
 
   return true;
 }
 
-void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
+void GCRuntime::resetParameter(JSGCParamKey key) {
   MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
-
+  waitBackgroundSweepEnd();
+  AutoLockGC lock(this);
+  resetParameter(key, lock);
+}
+
+void GCRuntime::resetParameter(JSGCParamKey key, AutoLockGC& lock) {
   switch (key) {
     case JSGC_SLICE_TIME_BUDGET_MS:
       defaultTimeBudgetMS_ = TuningDefaults::DefaultTimeBudgetMS;
       break;
     case JSGC_MARK_STACK_LIMIT:
       setMarkStackLimit(MarkStack::DefaultCapacity, lock);
       break;
     case JSGC_MODE:
@@ -1355,16 +1365,22 @@ void GCRuntime::resetParameter(JSGCParam
     default:
       tunables.resetParameter(key, lock);
       for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         zone->updateGCThresholds(*this, GC_NORMAL, lock);
       }
   }
 }
 
+uint32_t GCRuntime::getParameter(JSGCParamKey key) {
+  MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+  AutoLockGC lock(this);
+  return getParameter(key, lock);
+}
+
 uint32_t GCRuntime::getParameter(JSGCParamKey key, const AutoLockGC& lock) {
   switch (key) {
     case JSGC_MAX_BYTES:
       return uint32_t(tunables.gcMaxBytes());
     case JSGC_MIN_NURSERY_BYTES:
       MOZ_ASSERT(tunables.gcMinNurseryBytes() < UINT32_MAX);
       return uint32_t(tunables.gcMinNurseryBytes());
     case JSGC_MAX_NURSERY_BYTES:
@@ -2502,17 +2518,17 @@ void GCRuntime::updateRuntimePointersToR
   }
 
   // Call callbacks to get the rest of the system to fixup other untraced
   // pointers.
   callWeakPointerZonesCallbacks();
 }
 
 void GCRuntime::clearRelocatedArenas(Arena* arenaList, JS::GCReason reason) {
-  AutoLockGC lock(rt);
+  AutoLockGC lock(this);
   clearRelocatedArenasWithoutUnlocking(arenaList, reason, lock);
 }
 
 void GCRuntime::clearRelocatedArenasWithoutUnlocking(Arena* arenaList,
                                                      JS::GCReason reason,
                                                      const AutoLockGC& lock) {
   // Clear the relocated arenas, now containing only forwarding pointers
   while (arenaList) {
@@ -2557,17 +2573,17 @@ void GCRuntime::protectAndHoldArenas(Are
 void GCRuntime::unprotectHeldRelocatedArenas() {
   for (Arena* arena = relocatedArenasToRelease; arena; arena = arena->next) {
     UnprotectPages(arena, ArenaSize);
     MOZ_ASSERT(!arena->allocated());
   }
 }
 
 void GCRuntime::releaseRelocatedArenas(Arena* arenaList) {
-  AutoLockGC lock(rt);
+  AutoLockGC lock(this);
   releaseRelocatedArenasWithoutUnlocking(arenaList, lock);
 }
 
 void GCRuntime::releaseRelocatedArenasWithoutUnlocking(Arena* arenaList,
                                                        const AutoLockGC& lock) {
   // Release relocated arenas previously cleared with clearRelocatedArenas().
   while (arenaList) {
     Arena* arena = arenaList;
@@ -3132,17 +3148,17 @@ void GCRuntime::startDecommit() {
   // doing a shrinking GC we always decommit to release as much memory as
   // possible.
   if (schedulingState.inHighFrequencyGCMode() && !cleanUpEverything) {
     return;
   }
 
   BackgroundDecommitTask::ChunkVector toDecommit;
   {
-    AutoLockGC lock(rt);
+    AutoLockGC lock(this);
 
     // Verify that all entries in the empty chunks pool are already decommitted.
     for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done();
          chunk.next()) {
       MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
     }
 
     // Since we release the GC lock while doing the decommit syscall below,
@@ -3223,42 +3239,34 @@ void GCRuntime::sweepBackgroundThings(Zo
         Arena* arenas = zone->arenas.arenaListsToSweep(kind);
         MOZ_RELEASE_ASSERT(uintptr_t(arenas) != uintptr_t(-1));
         if (arenas) {
           ArenaLists::backgroundFinalize(&fop, arenas, &emptyArenas);
         }
       }
     }
 
-    AutoLockGC lock(rt);
-
     // Release any arenas that are now empty.
     //
     // Periodically drop and reaquire the GC lock every so often to avoid
     // blocking the main thread from allocating chunks.
     //
     // Also use this opportunity to periodically recalculate the GC thresholds
     // as we free more memory.
     static const size_t LockReleasePeriod = 32;
-    size_t releaseCount = 0;
-    Arena* next;
-    for (Arena* arena = emptyArenas; arena; arena = next) {
-      next = arena->next;
-
-      releaseArena(arena, lock);
-      releaseCount++;
-      if (releaseCount % LockReleasePeriod == 0) {
-        lock.unlock();
-        lock.lock();
-        zone->updateGCThresholds(*this, invocationKind, lock);
+
+    while (emptyArenas) {
+      AutoLockGC lock(this);
+      for (size_t i = 0; i < LockReleasePeriod && emptyArenas; i++) {
+        Arena* arena = emptyArenas;
+        emptyArenas = emptyArenas->next;
+        releaseArena(arena, lock);
       }
-    }
-
-    // Do a final update now we've finished.
-    zone->updateGCThresholds(*this, invocationKind, lock);
+      zone->updateGCThresholds(*this, invocationKind, lock);
+    }
   }
 }
 
 void GCRuntime::assertBackgroundSweepingFinished() {
 #ifdef DEBUG
   {
     AutoLockHelperThreadState lock;
     MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
@@ -4238,17 +4246,17 @@ void js::gc::MarkingValidator::nonIncrem
 
   gc->waitBackgroundSweepEnd();
 
   /* Wait for off-thread parsing which can allocate. */
   HelperThreadState().waitForAllThreads();
 
   /* Save existing mark bits. */
   {
-    AutoLockGC lock(runtime);
+    AutoLockGC lock(gc);
     for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
          chunk.next()) {
       ChunkBitmap* bitmap = &chunk->bitmap;
       auto entry = MakeUnique<ChunkBitmap>();
       if (!entry) {
         return;
       }
 
@@ -4315,17 +4323,17 @@ void js::gc::MarkingValidator::nonIncrem
 
       for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
         WeakMapBase::unmarkZone(zone);
       }
 
       MOZ_ASSERT(gcmarker->isDrained());
       gcmarker->reset();
 
-      AutoLockGC lock(runtime);
+      AutoLockGC lock(gc);
       for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
            chunk.next()) {
         chunk->bitmap.clear();
       }
     }
   }
 
   {
@@ -4358,17 +4366,17 @@ void js::gc::MarkingValidator::nonIncrem
     for (GCZonesIter zone(runtime); !zone.done(); zone.next()) {
       zone->changeGCState(Zone::MarkBlackAndGray, Zone::MarkBlackOnly);
     }
     MOZ_ASSERT(gc->marker.isDrained());
   }
 
   /* Take a copy of the non-incremental mark state and restore the original. */
   {
-    AutoLockGC lock(runtime);
+    AutoLockGC lock(gc);
     for (auto chunk = gc->allNonEmptyChunks(lock); !chunk.done();
          chunk.next()) {
       ChunkBitmap* bitmap = &chunk->bitmap;
       ChunkBitmap* entry = map.lookup(chunk)->value().get();
       Swap(*entry, *bitmap);
     }
   }
 
@@ -5458,17 +5466,17 @@ IncrementalProgress GCRuntime::endSweepi
     callFinalizeCallbacks(&fop, JSFINALIZE_GROUP_END);
   }
 
   /* Free LIFO blocks on a background thread if possible. */
   startBackgroundFree();
 
   /* Update the GC state for zones we have swept. */
   for (SweepGroupZonesIter zone(rt); !zone.done(); zone.next()) {
-    AutoLockGC lock(rt);
+    AutoLockGC lock(this);
     zone->changeGCState(Zone::Sweep, Zone::Finished);
     zone->updateGCThresholds(*this, invocationKind, lock);
     zone->arenas.unmarkPreMarkedFreeCells();
   }
 
   /*
    * Start background thread to sweep zones if required, sweeping the atoms
    * zone last if present.
@@ -7480,17 +7488,17 @@ void GCRuntime::onOutOfMallocMemory() {
 
   // Make sure we release anything queued for release.
   decommitTask.join();
   nursery().joinDecommitTask();
 
   // Wait for background free of nursery huge slots to finish.
   sweepTask.join();
 
-  AutoLockGC lock(rt);
+  AutoLockGC lock(this);
   onOutOfMallocMemory(lock);
 }
 
 void GCRuntime::onOutOfMallocMemory(const AutoLockGC& lock) {
   // Release any relocated arenas we may be holding on to, without releasing
   // the GC lock.
   releaseHeldRelocatedArenasWithoutUnlocking(lock);
 
--- a/js/src/gc/GCLock.h
+++ b/js/src/gc/GCLock.h
@@ -22,82 +22,87 @@ class AutoUnlockGC;
  *
  * Usually functions will pass const references of this class.  However
  * non-const references can be used to either temporarily release the lock by
  * use of AutoUnlockGC or to start background allocation when the lock is
  * released.
  */
 class MOZ_RAII AutoLockGC {
  public:
-  explicit AutoLockGC(JSRuntime* rt MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
-      : runtime_(rt) {
+  explicit AutoLockGC(gc::GCRuntime* gc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+      : gc(gc) {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     lock();
   }
+  explicit AutoLockGC(JSRuntime* rt MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+      : AutoLockGC(&rt->gc) {
+    MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+  }
 
   ~AutoLockGC() { lockGuard_.reset(); }
 
+ protected:
   void lock() {
     MOZ_ASSERT(lockGuard_.isNothing());
-    lockGuard_.emplace(runtime_->gc.lock);
+    lockGuard_.emplace(gc->lock);
   }
 
   void unlock() {
     MOZ_ASSERT(lockGuard_.isSome());
     lockGuard_.reset();
   }
 
- protected:
   js::LockGuard<js::Mutex>& guard() { return lockGuard_.ref(); }
 
-  JSRuntime* runtime() const { return runtime_; }
+  gc::GCRuntime* const gc;
 
  private:
-  JSRuntime* runtime_;
   mozilla::Maybe<js::LockGuard<js::Mutex>> lockGuard_;
   MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 
   AutoLockGC(const AutoLockGC&) = delete;
   AutoLockGC& operator=(const AutoLockGC&) = delete;
+
+  friend class AutoUnlockGC;  // For lock/unlock.
 };
 
 /*
  * Same as AutoLockGC except it can optionally start a background chunk
  * allocation task when the lock is released.
  */
 class MOZ_RAII AutoLockGCBgAlloc : public AutoLockGC {
  public:
-  explicit AutoLockGCBgAlloc(JSRuntime* rt)
-      : AutoLockGC(rt), startBgAlloc(false) {}
+  explicit AutoLockGCBgAlloc(gc::GCRuntime* gc) : AutoLockGC(gc) {}
+  explicit AutoLockGCBgAlloc(JSRuntime* rt) : AutoLockGCBgAlloc(&rt->gc) {}
 
   ~AutoLockGCBgAlloc() {
     unlock();
 
     /*
      * We have to do this after releasing the lock because it may acquire
      * the helper lock which could cause lock inversion if we still held
      * the GC lock.
      */
     if (startBgAlloc) {
-      runtime()->gc.startBackgroundAllocTaskIfIdle();  // Ignore failure.
+      gc->startBackgroundAllocTaskIfIdle();  // Ignore failure.
     }
   }
 
   /*
    * This can be used to start a background allocation task (if one isn't
    * already running) that allocates chunks and makes them available in the
    * free chunks list.  This happens after the lock is released in order to
    * avoid lock inversion.
    */
   void tryToStartBackgroundAllocation() { startBgAlloc = true; }
 
  private:
   // true if we should start a background chunk allocation task after the
   // lock is released.
-  bool startBgAlloc;
+  bool startBgAlloc = false;
 };
 
 class MOZ_RAII AutoUnlockGC {
  public:
   explicit AutoUnlockGC(AutoLockGC& lock MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
       : lock(lock) {
     MOZ_GUARD_OBJECT_NOTIFIER_INIT;
     lock.unlock();
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -249,19 +249,22 @@ class GCRuntime {
   inline bool upcomingZealousGC();
   inline bool needZealousGC();
   inline bool hasIncrementalTwoSliceZealMode();
 
   MOZ_MUST_USE bool addRoot(Value* vp, const char* name);
   void removeRoot(Value* vp);
   void setMarkStackLimit(size_t limit, AutoLockGC& lock);
 
+  MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value);
   MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value,
                                  AutoLockGC& lock);
+  void resetParameter(JSGCParamKey key);
   void resetParameter(JSGCParamKey key, AutoLockGC& lock);
+  uint32_t getParameter(JSGCParamKey key);
   uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
 
   MOZ_MUST_USE bool triggerGC(JS::GCReason reason);
   // Check whether to trigger a zone GC after allocating GC cells. During an
   // incremental GC, optionally count |nbytes| towards the threshold for
   // performing the next slice.
   void maybeAllocTriggerZoneGC(Zone* zone, size_t nbytes = 0);
   // Check whether to trigger a zone GC after malloc memory.
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -185,18 +185,19 @@ void js::NurseryDecommitTask::run() {
 
     setFinishing(lock);
   }
 }
 
 void js::NurseryDecommitTask::decommitChunk(Chunk* chunk) {
   chunk->decommitAllArenas();
   {
-    AutoLockGC lock(runtime());
-    runtime()->gc.recycleChunk(chunk, lock);
+    GCRuntime* gc = &runtime()->gc;
+    AutoLockGC lock(gc);
+    gc->recycleChunk(chunk, lock);
   }
 }
 
 void js::NurseryDecommitTask::decommitRange(AutoLockHelperThreadState& lock) {
   // Clear this field here before releasing the lock. While the lock is
   // released the main thread may make new decommit requests or update the range
   // of the current requested chunk, but it won't attempt to use any
   // might-be-decommitted-soon memory.
@@ -291,17 +292,17 @@ js::Nursery::~Nursery() { disable(); }
 void js::Nursery::enable() {
   MOZ_ASSERT(isEmpty());
   MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
   if (isEnabled() || mozilla::recordreplay::IsRecordingOrReplaying()) {
     return;
   }
 
   {
-    AutoLockGCBgAlloc lock(runtime());
+    AutoLockGCBgAlloc lock(gc);
     capacity_ = roundSize(tunables().gcMinNurseryBytes());
     if (!allocateNextChunk(0, lock)) {
       capacity_ = 0;
       return;
     }
   }
 
   setCurrentChunk(0);
@@ -479,17 +480,17 @@ void* js::Nursery::allocate(size_t size)
     MOZ_ASSERT(chunkno <= maxChunkCount());
     MOZ_ASSERT(chunkno <= allocatedChunkCount());
     if (chunkno == maxChunkCount()) {
       return nullptr;
     }
     if (MOZ_UNLIKELY(chunkno == allocatedChunkCount())) {
       mozilla::TimeStamp start = ReallyNow();
       {
-        AutoLockGCBgAlloc lock(runtime());
+        AutoLockGCBgAlloc lock(gc);
         if (!allocateNextChunk(chunkno, lock)) {
           return nullptr;
         }
       }
       timeInChunkAlloc_ += ReallyNow() - start;
       MOZ_ASSERT(chunkno < allocatedChunkCount());
     }
     setCurrentChunk(chunkno);
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -195,17 +195,17 @@ void gc::GCRuntime::startVerifyPreBarrie
   VerifyPreTracer* trc = js_new<VerifyPreTracer>(rt);
   if (!trc) {
     return;
   }
 
   AutoPrepareForTracing prep(cx);
 
   {
-    AutoLockGC lock(cx->runtime());
+    AutoLockGC lock(this);
     for (auto chunk = allNonEmptyChunks(lock); !chunk.done(); chunk.next()) {
       chunk->bitmap.clear();
     }
   }
 
   gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::TRACE_HEAP);
 
   const size_t size = 64 * 1024 * 1024;
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -1332,30 +1332,25 @@ JS_PUBLIC_API void JS_UpdateWeakPointerA
 JS_PUBLIC_API void JS_UpdateWeakPointerAfterGCUnbarriered(JSObject** objp) {
   if (IsAboutToBeFinalizedUnbarriered(objp)) {
     *objp = nullptr;
   }
 }
 
 JS_PUBLIC_API void JS_SetGCParameter(JSContext* cx, JSGCParamKey key,
                                      uint32_t value) {
-  cx->runtime()->gc.waitBackgroundSweepEnd();
-  AutoLockGC lock(cx->runtime());
-  MOZ_ALWAYS_TRUE(cx->runtime()->gc.setParameter(key, value, lock));
+  MOZ_ALWAYS_TRUE(cx->runtime()->gc.setParameter(key, value));
 }
 
 JS_PUBLIC_API void JS_ResetGCParameter(JSContext* cx, JSGCParamKey key) {
-  cx->runtime()->gc.waitBackgroundSweepEnd();
-  AutoLockGC lock(cx->runtime());
-  cx->runtime()->gc.resetParameter(key, lock);
+  cx->runtime()->gc.resetParameter(key);
 }
 
 JS_PUBLIC_API uint32_t JS_GetGCParameter(JSContext* cx, JSGCParamKey key) {
-  AutoLockGC lock(cx->runtime());
-  return cx->runtime()->gc.getParameter(key, lock);
+  return cx->runtime()->gc.getParameter(key);
 }
 
 JS_PUBLIC_API void JS_SetGCParametersBasedOnAvailableMemory(JSContext* cx,
                                                             uint32_t availMem) {
   struct JSGCConfig {
     JSGCParamKey key;
     uint32_t value;
   };