Bug 1074961 - Part 13: Do not iterate the chunk list concurrent with mutation; r=sfink
☠☠ backed out by b322b316e6f2 ☠ ☠
authorTerrence Cole <terrence@mozilla.com>
Thu, 06 Nov 2014 14:03:10 -0800
changeset 215656 ed264e327a937902f07abe524b61850d072c1843
parent 215655 ee366f6b2d3ec274b34190cb2f37a6bdc5736433
child 215657 c01469b24e22d378ef1bff3fd9610c18ce79bb3a
push id51813
push usertcole@mozilla.com
push dateThu, 13 Nov 2014 23:33:21 +0000
treeherdermozilla-inbound@ed264e327a93 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1074961
milestone36.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1074961 - Part 13: Do not iterate the chunk list concurrent with mutation; r=sfink
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/jsgc.cpp
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -297,16 +297,17 @@ class GCRuntime
     };
     void markRuntime(JSTracer *trc,
                      TraceOrMarkRuntime traceOrMark = TraceRuntime,
                      TraceRootsOrUsedSaved rootsSource = TraceRoots);
 
     void notifyDidPaint();
     void shrinkBuffers();
     void onOutOfMallocMemory();
+    void onOutOfMallocMemory(const AutoLockGC &lock);
 
 #ifdef JS_GC_ZEAL
     const void *addressOfZealMode() { return &zealMode; }
     void setZeal(uint8_t zeal, uint32_t frequency);
     void setNextScheduled(uint32_t count);
     void verifyPreBarriers();
     void verifyPostBarriers();
     void maybeVerifyPreBarriers(bool always);
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -944,17 +944,19 @@ struct Chunk
 
     inline void addToAvailableList(JSRuntime *rt);
     inline void insertToAvailableList(Chunk **insertPoint);
     inline void removeFromAvailableList();
 
     ArenaHeader *allocateArena(JSRuntime *rt, JS::Zone *zone, AllocKind kind,
                                const AutoLockGC &lock);
 
-    void releaseArena(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock);
+    enum ArenaDecommitState { IsCommitted = false, IsDecommitted = true };
+    void releaseArena(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock,
+                      ArenaDecommitState state = IsCommitted);
     void recycleArena(ArenaHeader *aheader, SortedArenaList &dest, AllocKind thingKind,
                       size_t thingsPerArena);
 
     static Chunk *allocate(JSRuntime *rt);
 
     void decommitAllArenas(JSRuntime *rt);
 
     /*
@@ -975,21 +977,22 @@ struct Chunk
 
   private:
     inline void init(JSRuntime *rt);
 
     /* Search for a decommitted arena to allocate. */
     unsigned findDecommittedArenaOffset();
     ArenaHeader* fetchNextDecommittedArena();
 
+    void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
+    void addArenaToDecommittedList(JSRuntime *rt, const ArenaHeader *aheader);
+
   public:
     /* Unlink and return the freeArenasHead. */
     inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt);
-
-    inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
 };
 
 static_assert(sizeof(Chunk) == ChunkSize,
               "Ensure the hardcoded chunk size definition actually matches the struct.");
 static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
               "The hardcoded API bitmap offset must match the actual offset.");
 static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, info) +
                                             offsetof(ChunkInfo, trailer) +
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -960,43 +960,55 @@ Chunk::allocateArena(JSRuntime *rt, Zone
 }
 
 inline void
 GCRuntime::updateOnArenaFree(const ChunkInfo &info)
 {
     ++numArenasFreeCommitted;
 }
 
-inline void
+void
 Chunk::addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader)
 {
     MOZ_ASSERT(!aheader->allocated());
     aheader->next = info.freeArenasHead;
     info.freeArenasHead = aheader;
     ++info.numArenasFreeCommitted;
     ++info.numArenasFree;
     rt->gc.updateOnArenaFree(info);
 }
 
 void
+Chunk::addArenaToDecommittedList(JSRuntime *rt, const ArenaHeader *aheader)
+{
+    ++info.numArenasFree;
+    decommittedArenas.set(Chunk::arenaIndex(aheader->arenaAddress()));
+}
+
+void
 Chunk::recycleArena(ArenaHeader *aheader, SortedArenaList &dest, AllocKind thingKind,
                     size_t thingsPerArena)
 {
     aheader->getArena()->setAsFullyUnused(thingKind);
     dest.insertAt(aheader, thingsPerArena);
 }
 
 void
-Chunk::releaseArena(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock)
+Chunk::releaseArena(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock,
+                    ArenaDecommitState state /* = IsCommitted */)
 {
     MOZ_ASSERT(aheader->allocated());
     MOZ_ASSERT(!aheader->hasDelayedMarking);
 
-    aheader->setAsNotAllocated();
-    addArenaToFreeList(rt, aheader);
+    if (state == IsCommitted) {
+        aheader->setAsNotAllocated();
+        addArenaToFreeList(rt, aheader);
+    } else {
+        addArenaToDecommittedList(rt, aheader);
+    }
 
     if (info.numArenasFree == 1) {
         MOZ_ASSERT(!info.prevp);
         MOZ_ASSERT(!info.next);
         addToAvailableList(rt);
     } else if (!unused()) {
         MOZ_ASSERT(info.prevp);
     } else {
@@ -3181,120 +3193,55 @@ GCRuntime::decommitAllWithoutUnlocking(c
             }
         }
     }
 }
 
 void
 GCRuntime::decommitArenas(const AutoLockGC &lock)
 {
-    Chunk **availableListHeadp = &availableChunkListHead;
-    Chunk *chunk = *availableListHeadp;
-    if (!chunk)
-        return;
-
-    /*
-     * Decommit is expensive so we avoid holding the GC lock while calling it.
-     *
-     * We decommit from the tail of the list to minimize interference with the
-     * main thread that may start to allocate things at this point.
-     *
-     * The arena that is been decommitted outside the GC lock must not be
-     * available for allocations either via the free list or via the
-     * decommittedArenas bitmap. For that we just fetch the arena from the
-     * free list before the decommit pretending as it was allocated. If this
-     * arena also is the single free arena in the chunk, then we must remove
-     * from the available list before we release the lock so the allocation
-     * thread would not see chunks with no free arenas on the available list.
-     *
-     * After we retake the lock, we mark the arena as free and decommitted if
-     * the decommit was successful. We must also add the chunk back to the
-     * available list if we removed it previously or when the main thread
-     * have allocated all remaining free arenas in the chunk.
-     *
-     * We also must make sure that the aheader is not accessed again after we
-     * decommit the arena.
-     */
-    MOZ_ASSERT(chunk->info.prevp == availableListHeadp);
-    while (Chunk *next = chunk->info.next) {
-        MOZ_ASSERT(next->info.prevp == &chunk->info.next);
-        chunk = next;
-    }
-
-    for (;;) {
-        while (chunk->info.numArenasFreeCommitted != 0) {
-            ArenaHeader *aheader = chunk->fetchNextFreeArena(rt);
-
-            Chunk **savedPrevp = chunk->info.prevp;
-            if (!chunk->hasAvailableArenas())
-                chunk->removeFromAvailableList();
-
-            size_t arenaIndex = Chunk::arenaIndex(aheader->arenaAddress());
+    // Verify that all entries in the empty chunks pool are decommitted.
+    for (ChunkPool::Enum e(emptyChunks(lock)); !e.empty(); e.popFront())
+        MOZ_ASSERT(e.front()->info.numArenasFreeCommitted == 0);
+
+    // Build a Vector of all current available Chunks. Since we release the
+    // gc lock while doing the decommit syscall, it is dangerous to iterate
+    // the available list directly, as concurrent operations can modify it.
+    mozilla::Vector<Chunk *> toDecommit;
+    for (Chunk *chunk = availableChunkListHead; chunk; chunk = chunk->info.next) {
+        if (!toDecommit.append(chunk)) {
+            // The OOM handler does a full, immediate decommit, so there is
+            // nothing more to do here in any case.
+            return onOutOfMallocMemory(lock);
+        }
+    }
+
+    // Start at the tail and stop before the first chunk: we allocate from the
+    // head and don't want to thrash with the mutator.
+    static_assert(SIZE_MAX / ChunkSize < SSIZE_MAX, "A vector of Chunk cannot overflow ssize_t.");
+    for (ssize_t i = toDecommit.length() - 1; i > 0; --i) {
+        Chunk *chunk = toDecommit[i];
+        MOZ_ASSERT(chunk);
+
+        // The arena list is not doubly-linked, so we have to work in the free
+        // list order and not in the natural order.
+        while (chunk->info.numArenasFreeCommitted) {
+            ArenaHeader *aheader = chunk->allocateArena(rt, nullptr, FINALIZE_OBJECT0, lock);
             bool ok;
             {
-                /*
-                 * If the main thread waits for the decommit to finish, skip
-                 * potentially expensive unlock/lock pair on the contested
-                 * lock.
-                 */
-                Maybe<AutoUnlockGC> maybeUnlock;
-                if (!isHeapBusy())
-                    maybeUnlock.emplace(rt);
+                AutoUnlockGC unlock(rt);
                 ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
             }
-
-            if (ok) {
-                ++chunk->info.numArenasFree;
-                chunk->decommittedArenas.set(arenaIndex);
-            } else {
-                chunk->addArenaToFreeList(rt, aheader);
-            }
-            MOZ_ASSERT(chunk->hasAvailableArenas());
-            MOZ_ASSERT(!chunk->unused());
-            if (chunk->info.numArenasFree == 1) {
-                /*
-                 * Put the chunk back to the available list either at the
-                 * point where it was before to preserve the available list
-                 * that we enumerate, or, when the allocation thread has fully
-                 * used all the previous chunks, at the beginning of the
-                 * available list.
-                 */
-                Chunk **insertPoint = savedPrevp;
-                if (savedPrevp != availableListHeadp) {
-                    Chunk *prev = Chunk::fromPointerToNext(savedPrevp);
-                    if (!prev->hasAvailableArenas())
-                        insertPoint = availableListHeadp;
-                }
-                chunk->insertToAvailableList(insertPoint);
-            } else {
-                MOZ_ASSERT(chunk->info.prevp);
-            }
-
-            if (chunkAllocationSinceLastGC || !ok) {
-                /*
-                 * The allocator thread has started to get new chunks. We should stop
-                 * to avoid decommitting arenas in just allocated chunks.
-                 */
+            chunk->releaseArena(rt, aheader, lock, Chunk::ArenaDecommitState(ok));
+
+            // FIXME Bug 1095620: add cancellation support when this becomes
+            // a ParallelTask.
+            if (/* cancel_ || */ !ok)
                 return;
-            }
         }
-
-        /*
-         * chunk->info.prevp becomes null when the allocator thread consumed
-         * all chunks from the available list.
-         */
-        MOZ_ASSERT_IF(chunk->info.prevp, *chunk->info.prevp == chunk);
-        if (chunk->info.prevp == availableListHeadp || !chunk->info.prevp)
-            break;
-
-        /*
-         * prevp exists and is not the list head. It must point to the next
-         * field of the previous chunk.
-         */
-        chunk = chunk->getPrevious();
     }
 }
 
 void
 GCRuntime::expireChunksAndArenas(bool shouldShrink, const AutoLockGC &lock)
 {
 #ifdef JSGC_FJGENERATIONAL
     rt->threadPool.pruneChunkCache();
@@ -6263,18 +6210,24 @@ GCRuntime::shrinkBuffers()
 }
 
 void
 GCRuntime::onOutOfMallocMemory()
 {
     // Stop allocating new chunks.
     allocTask.cancel(GCParallelTask::CancelAndWait);
 
+    AutoLockGC lock(rt);
+    onOutOfMallocMemory(lock);
+}
+
+void
+GCRuntime::onOutOfMallocMemory(const AutoLockGC &lock)
+{
     // Throw away any excess chunks we have lying around.
-    AutoLockGC lock(rt);
     freeEmptyChunks(rt, lock);
 
     // Immediately decommit as many arenas as possible in the hopes that this
     // might let the OS scrape together enough pages to satisfy the failing
     // malloc request.
     decommitAllWithoutUnlocking(lock);
 }