Bug 1097864 - Don't release the GC lock while allocating arenas r=terrence
authorJon Coppeard <jcoppeard@mozilla.com>
Thu, 20 Nov 2014 10:12:03 +0000
changeset 216624 70ffdede9e2bba10442df671fa8ae10938508162
parent 216623 62af62f4f0a4c8adef51fc3ae515f4a0dada764c
child 216625 03c6a758c9e8da0dfcad48788a8eabb6f9d899d2
push idunknown
push userunknown
push dateunknown
reviewersterrence
bugs1097864
milestone36.0a1
Bug 1097864 - Don't release the GC lock while allocating arenas r=terrence
js/src/gc/GCRuntime.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/vm/Runtime.h
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -563,18 +563,18 @@ class GCRuntime
     void endMarkingZoneGroup();
     void beginSweepingZoneGroup();
     bool shouldReleaseObservedTypes();
     void endSweepingZoneGroup();
     bool sweepPhase(SliceBudget &sliceBudget);
     void endSweepPhase(bool lastGC);
     void sweepZones(FreeOp *fop, bool lastGC);
     void decommitAllWithoutUnlocking(const AutoLockGC &lock);
-    void decommitArenas(const AutoLockGC &lock);
-    void expireChunksAndArenas(bool shouldShrink, const AutoLockGC &lock);
+    void decommitArenas(AutoLockGC &lock);
+    void expireChunksAndArenas(bool shouldShrink, AutoLockGC &lock);
     void sweepBackgroundThings();
     void assertBackgroundSweepingFinished();
     bool shouldCompact();
 #ifdef JSGC_COMPACTING
     void sweepTypesAfterCompacting(Zone *zone);
     void sweepZoneAfterCompacting(Zone *zone);
     void compactPhase(bool lastGC);
     ArenaHeader *relocateArenas();
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1981,16 +1981,17 @@ ArenaLists::allocateFromArena(JS::Zone *
         return nullptr;
 
     // Although our chunk should definitely have enough space for another arena,
     // there are other valid reasons why Chunk::allocateArena() may fail.
     aheader = rt->gc.allocateArena(chunk, zone, thingKind, maybeLock.ref());
     if (!aheader)
         return nullptr;
 
+    MOZ_ASSERT(!maybeLock->wasUnlocked());
     MOZ_ASSERT(al.isCursorAtEnd());
     al.insertAtCursor(aheader);
 
     return allocateFromArenaInner<IsEmpty>(zone, aheader, thingKind);
 }
 
 template <ArenaLists::ArenaAllocMode hasFreeThings>
 inline TenuredCell *
@@ -3269,31 +3270,29 @@ GCRuntime::maybeAllocTriggerZoneGC(Zone 
 {
     size_t usedBytes = zone->usage.gcBytes();
     size_t thresholdBytes = zone->threshold.gcTriggerBytes();
     size_t igcThresholdBytes = thresholdBytes * tunables.zoneAllocThresholdFactor();
 
     if (usedBytes >= thresholdBytes) {
         // The threshold has been surpassed, immediately trigger a GC,
         // which will be done non-incrementally.
-        AutoUnlockGC unlock(rt);
         triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
     } else if (usedBytes >= igcThresholdBytes) {
         // Reduce the delay to the start of the next incremental slice.
         if (zone->gcDelayBytes < ArenaSize)
             zone->gcDelayBytes = 0;
         else
             zone->gcDelayBytes -= ArenaSize;
 
         if (!zone->gcDelayBytes) {
             // Start or continue an in progress incremental GC. We do this
             // to try to avoid performing non-incremental GCs on zones
             // which allocate a lot of data, even when incremental slices
             // can't be triggered via scheduling in the event loop.
-            AutoUnlockGC unlock(rt);
             triggerZoneGC(zone, JS::gcreason::ALLOC_TRIGGER);
 
             // Delay the next slice until a certain amount of allocation
             // has been performed.
             zone->gcDelayBytes = tunables.zoneAllocDelayBytes();
         }
     }
 }
@@ -3409,17 +3408,17 @@ GCRuntime::decommitAllWithoutUnlocking(c
                 chunk->decommittedArenas.set(i);
             }
         }
     }
     MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
-GCRuntime::decommitArenas(const AutoLockGC &lock)
+GCRuntime::decommitArenas(AutoLockGC &lock)
 {
     // Verify that all entries in the empty chunks pool are decommitted.
     for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next())
         MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
 
     // Build a Vector of all current available Chunks. Since we release the
     // gc lock while doing the decommit syscall, it is dangerous to iterate
     // the available list directly, as concurrent operations can modify it.
@@ -3440,39 +3439,39 @@ GCRuntime::decommitArenas(const AutoLock
         MOZ_ASSERT(chunk);
 
         // The arena list is not doubly-linked, so we have to work in the free
         // list order and not in the natural order.
         while (chunk->info.numArenasFreeCommitted) {
             ArenaHeader *aheader = chunk->allocateArena(rt, nullptr, FINALIZE_OBJECT0, lock);
             bool ok;
             {
-                AutoUnlockGC unlock(rt);
+                AutoUnlockGC unlock(lock);
                 ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
             }
             chunk->releaseArena(rt, aheader, lock, Chunk::ArenaDecommitState(ok));
 
             // FIXME Bug 1095620: add cancellation support when this becomes
             // a ParallelTask.
             if (/* cancel_ || */ !ok)
                 return;
         }
     }
     MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
-GCRuntime::expireChunksAndArenas(bool shouldShrink, const AutoLockGC &lock)
+GCRuntime::expireChunksAndArenas(bool shouldShrink, AutoLockGC &lock)
 {
 #ifdef JSGC_FJGENERATIONAL
     rt->threadPool.pruneChunkCache();
 #endif
 
     if (Chunk *toFree = expireEmptyChunkPool(shouldShrink, lock)) {
-        AutoUnlockGC unlock(rt);
+        AutoUnlockGC unlock(lock);
         freeChunkList(toFree);
     }
 
     if (shouldShrink)
         decommitArenas(lock);
 }
 
 void
@@ -3639,17 +3638,17 @@ BackgroundAllocTask::run()
 {
     TraceLogger *logger = TraceLoggerForCurrentThread();
     AutoTraceLog logAllocation(logger, TraceLogger::GCAllocation);
 
     AutoLockGC lock(runtime);
     while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
         Chunk *chunk;
         {
-            AutoUnlockGC unlock(runtime);
+            AutoUnlockGC unlock(lock);
             chunk = Chunk::allocate(runtime);
             if (!chunk)
                 break;
         }
         chunkPool_.push(chunk);
     }
 }
 
@@ -3692,21 +3691,21 @@ GCHelperState::waitBackgroundSweepEnd()
     AutoLockGC lock(rt);
     while (state() == SWEEPING)
         waitForBackgroundThread();
     if (rt->gc.incrementalState == NO_INCREMENTAL)
         rt->gc.assertBackgroundSweepingFinished();
 }
 
 void
-GCHelperState::doSweep(const AutoLockGC &lock)
+GCHelperState::doSweep(AutoLockGC &lock)
 {
     if (sweepFlag) {
         sweepFlag = false;
-        AutoUnlockGC unlock(rt);
+        AutoUnlockGC unlock(lock);
 
         rt->gc.sweepBackgroundThings();
 
         rt->gc.freeLifoAlloc.freeAll();
     }
 
     bool shrinking = shrinkFlag;
     rt->gc.expireChunksAndArenas(shrinking, lock);
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -1025,18 +1025,17 @@ class GCHelperState
 
     static void freeElementsAndArray(void **array, void **end) {
         MOZ_ASSERT(array <= end);
         for (void **p = array; p != end; ++p)
             js_free(*p);
         js_free(array);
     }
 
-    /* Must be called with the GC lock taken. */
-    void doSweep(const AutoLockGC &lock);
+    void doSweep(AutoLockGC &lock);
 
   public:
     explicit GCHelperState(JSRuntime *rt)
       : rt(rt),
         done(nullptr),
         state_(IDLE),
         thread(nullptr),
         sweepFlag(false),
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -1484,52 +1484,82 @@ FreeOp::freeLater(void *p)
     // FreeOps other than the defaultFreeOp() are constructed on the stack,
     // and won't hold onto the pointers to free indefinitely.
     MOZ_ASSERT(this != runtime()->defaultFreeOp());
 
     if (!freeLaterList.append(p))
         CrashAtUnhandlableOOM("FreeOp::freeLater");
 }
 
-class AutoLockGC
+/*
+ * RAII class that takes the GC lock while it is live.
+ *
+ * Note that the lock may be temporarily released by use of AutoUnlockGC when
+ * passed a non-const reference to this class.
+ */
+class MOZ_STACK_CLASS AutoLockGC
 {
   public:
     explicit AutoLockGC(JSRuntime *rt
                         MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
-      : runtime(rt)
+      : runtime_(rt), wasUnlocked_(false)
     {
         MOZ_GUARD_OBJECT_NOTIFIER_INIT;
-        rt->lockGC();
+        lock();
+    }
+
+    ~AutoLockGC() {
+        unlock();
+    }
+
+    void lock() {
+        runtime_->lockGC();
+    }
+
+    void unlock() {
+        runtime_->unlockGC();
+        wasUnlocked_ = true;
     }
 
-    ~AutoLockGC()
+#ifdef DEBUG
+    bool wasUnlocked() {
+        return wasUnlocked_;
+    }
+#endif
+
+  private:
+    JSRuntime *runtime_;
+    mozilla::DebugOnly<bool> wasUnlocked_;
+    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+
+    AutoLockGC(const AutoLockGC&) MOZ_DELETE;
+    AutoLockGC& operator=(const AutoLockGC&) MOZ_DELETE;
+};
+
+class MOZ_STACK_CLASS AutoUnlockGC
+{
+  public:
+    explicit AutoUnlockGC(AutoLockGC& lock
+                          MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+      : lock(lock)
     {
-        runtime->unlockGC();
+        MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+        lock.unlock();
+    }
+
+    ~AutoUnlockGC() {
+        lock.lock();
     }
 
   private:
-    JSRuntime *runtime;
-    MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
-};
-
-class AutoUnlockGC
-{
-  private:
-    JSRuntime *rt;
+    AutoLockGC& lock;
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 
-  public:
-    explicit AutoUnlockGC(JSRuntime *rt
-                          MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
-      : rt(rt)
-    {
-        MOZ_GUARD_OBJECT_NOTIFIER_INIT;
-        rt->unlockGC();
-    }
-    ~AutoUnlockGC() { rt->lockGC(); }
+    AutoUnlockGC(const AutoUnlockGC&) MOZ_DELETE;
+    AutoUnlockGC& operator=(const AutoUnlockGC&) MOZ_DELETE;
 };
 
 class MOZ_STACK_CLASS AutoKeepAtoms
 {
     PerThreadData *pt;
     MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
 
   public: