Bug 1074961 - Part 16: Use a ChunkPool instead of manual list twiddling to manage available chunks; r=sfink
authorTerrence Cole <terrence@mozilla.com>
Wed, 29 Oct 2014 14:18:25 -0700
changeset 240885 d54f4315fd89c34f1426cc15466aef322778578c
parent 240884 a4441b5f5de82eb144a4f195368ab1e1c8905c9d
child 240886 2d8d2791494525de2055022c0692935fbbe793f0
push id4311
push userraliiev@mozilla.com
push dateMon, 12 Jan 2015 19:37:41 +0000
treeherdermozilla-beta@150c9fed433b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssfink
bugs1074961
milestone36.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1074961 - Part 16: Use a ChunkPool instead of manual list twiddling to manage available chunks; r=sfink
js/src/gc/GCRuntime.h
js/src/gc/Heap.h
js/src/jsgc.cpp
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -47,23 +47,19 @@ class ChunkPool
     Chunk *head_;
     size_t count_;
 
   public:
     ChunkPool() : head_(nullptr), count_(0) {}
 
     size_t count() const { return count_; }
 
-    /* Must be called with the GC lock taken. */
+    Chunk *head() { MOZ_ASSERT(head_); return head_; }
     Chunk *pop();
-
-    /* Must be called either during the GC or with the GC lock taken. */
     void push(Chunk *chunk);
-
-    /* Must be called with the GC lock taken. */
     Chunk *remove(Chunk *chunk);
 
 #ifdef DEBUG
     bool contains(Chunk *chunk) const;
     bool verify() const;
 #endif
 
     // Pool mutation does not invalidate an Iter unless the mutation
@@ -474,20 +470,21 @@ class GCRuntime
         mode = m;
         marker.setGCMode(mode);
     }
 
     inline void updateOnFreeArenaAlloc(const ChunkInfo &info);
     inline void updateOnArenaFree(const ChunkInfo &info);
 
     GCChunkSet::Range allChunks() { return chunkSet.all(); }
-    Chunk **getAvailableChunkList();
     void moveChunkToFreePool(Chunk *chunk, const AutoLockGC &lock);
     bool hasChunk(Chunk *chunk) { return chunkSet.has(chunk); }
+    ChunkPool &availableChunks(const AutoLockGC &lock) { return availableChunks_; }
     ChunkPool &emptyChunks(const AutoLockGC &lock) { return emptyChunks_; }
+    const ChunkPool &availableChunks(const AutoLockGC &lock) const { return availableChunks_; }
     const ChunkPool &emptyChunks(const AutoLockGC &lock) const { return emptyChunks_; }
 
 #ifdef JS_GC_ZEAL
     void startVerifyPreBarriers();
     bool endVerifyPreBarriers();
     void startVerifyPostBarriers();
     bool endVerifyPostBarriers();
     void finishVerifier();
@@ -640,18 +637,18 @@ class GCRuntime
 
     /*
      * Doubly-linked lists of chunks from user and system compartments. The GC
      * allocates its arenas from the corresponding list and when all arenas
      * in the list head are taken, then the chunk is removed from the list.
      * During the GC when all arenas in a chunk become free, that chunk is
      * removed from the list and scheduled for release.
      */
-    js::gc::Chunk *availableChunkListHead;
-    js::gc::ChunkPool emptyChunks_;
+    ChunkPool availableChunks_;
+    ChunkPool emptyChunks_;
 
     js::RootedValueMap rootsHash;
 
     size_t maxMallocBytes;
 
     /*
      * Number of the committed arenas in all GC chunks including empty chunks.
      */
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -940,20 +940,16 @@ struct Chunk
     bool unused() const {
         return info.numArenasFree == ArenasPerChunk;
     }
 
     bool hasAvailableArenas() const {
         return info.numArenasFree != 0;
     }
 
-    inline void addToAvailableList(JSRuntime *rt);
-    inline void insertToAvailableList(Chunk **insertPoint);
-    inline void removeFromAvailableList();
-
     ArenaHeader *allocateArena(JSRuntime *rt, JS::Zone *zone, AllocKind kind,
                                const AutoLockGC &lock);
 
     enum ArenaDecommitState { IsCommitted = false, IsDecommitted = true };
     void releaseArena(JSRuntime *rt, ArenaHeader *aheader, const AutoLockGC &lock,
                       ArenaDecommitState state = IsCommitted);
     void recycleArena(ArenaHeader *aheader, SortedArenaList &dest, AllocKind thingKind,
                       size_t thingsPerArena);
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -658,27 +658,25 @@ AllocChunk(JSRuntime *rt)
 }
 
 static inline void
 FreeChunk(JSRuntime *rt, Chunk *p)
 {
     UnmapPages(static_cast<void *>(p), ChunkSize);
 }
 
-/* Must be called with the GC lock taken. */
 Chunk *
 ChunkPool::pop()
 {
     MOZ_ASSERT(bool(head_) == bool(count_));
     if (!count_)
         return nullptr;
     return remove(head_);
 }
 
-/* Must be called either during the GC or with the GC lock taken. */
 void
 ChunkPool::push(Chunk *chunk)
 {
     MOZ_ASSERT(!chunk->info.next);
     MOZ_ASSERT(!chunk->info.prevp);
     MOZ_ASSERT_IF(head_, head_->info.prevp == &head_);
 
     chunk->info.age = 0;
@@ -687,17 +685,16 @@ ChunkPool::push(Chunk *chunk)
     if (head_)
         head_->info.prevp = &chunk->info.next;
     head_ = chunk;
     ++count_;
 
     MOZ_ASSERT(verify());
 }
 
-/* Must be called with the GC lock taken. */
 Chunk *
 ChunkPool::remove(Chunk *chunk)
 {
     MOZ_ASSERT(count_ > 0);
     MOZ_ASSERT(contains(chunk));
 
     *chunk->info.prevp = chunk->info.next;
     if (chunk->info.next) {
@@ -890,57 +887,16 @@ Chunk::init(JSRuntime *rt)
     info.prevp = nullptr;
     info.trailer.storeBuffer = nullptr;
     info.trailer.location = ChunkLocationBitTenuredHeap;
     info.trailer.runtime = rt;
 
     /* The rest of info fields are initialized in pickChunk. */
 }
 
-inline Chunk **
-GCRuntime::getAvailableChunkList()
-{
-    return &availableChunkListHead;
-}
-
-inline void
-Chunk::addToAvailableList(JSRuntime *rt)
-{
-    insertToAvailableList(rt->gc.getAvailableChunkList());
-}
-
-inline void
-Chunk::insertToAvailableList(Chunk **insertPoint)
-{
-    MOZ_ASSERT(hasAvailableArenas());
-    MOZ_ASSERT(!info.prevp);
-    MOZ_ASSERT(!info.next);
-    info.prevp = insertPoint;
-    Chunk *insertBefore = *insertPoint;
-    if (insertBefore) {
-        MOZ_ASSERT(insertBefore->info.prevp == insertPoint);
-        insertBefore->info.prevp = &info.next;
-    }
-    info.next = insertBefore;
-    *insertPoint = this;
-}
-
-inline void
-Chunk::removeFromAvailableList()
-{
-    MOZ_ASSERT(info.prevp);
-    *info.prevp = info.next;
-    if (info.next) {
-        MOZ_ASSERT(info.next->info.prevp == &info.next);
-        info.next->info.prevp = info.prevp;
-    }
-    info.prevp = nullptr;
-    info.next = nullptr;
-}
-
 /*
  * Search for and return the next decommitted Arena. Our goal is to keep
  * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
  * it to the most recently freed arena when we free, and forcing it to
  * the last alloc + 1 when we allocate.
  */
 uint32_t
 Chunk::findDecommittedArenaOffset()
@@ -998,17 +954,17 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
 ArenaHeader *
 Chunk::allocateArena(JSRuntime *rt, Zone *zone, AllocKind thingKind, const AutoLockGC &lock)
 {
     ArenaHeader *aheader = info.numArenasFreeCommitted > 0
                            ? fetchNextFreeArena(rt)
                            : fetchNextDecommittedArena();
     aheader->init(zone, thingKind);
     if (MOZ_UNLIKELY(!hasAvailableArenas()))
-        removeFromAvailableList();
+        rt->gc.availableChunks(lock).remove(this);
     return aheader;
 }
 
 inline void
 GCRuntime::updateOnArenaFree(const ChunkInfo &info)
 {
     ++numArenasFreeCommitted;
 }
@@ -1051,22 +1007,22 @@ Chunk::releaseArena(JSRuntime *rt, Arena
         addArenaToFreeList(rt, aheader);
     } else {
         addArenaToDecommittedList(rt, aheader);
     }
 
     if (info.numArenasFree == 1) {
         MOZ_ASSERT(!info.prevp);
         MOZ_ASSERT(!info.next);
-        addToAvailableList(rt);
+        rt->gc.availableChunks(lock).push(this);
     } else if (!unused()) {
         MOZ_ASSERT(info.prevp);
     } else {
         MOZ_ASSERT(unused());
-        removeFromAvailableList();
+        rt->gc.availableChunks(lock).remove(this);
         decommitAllArenas(rt);
         rt->gc.moveChunkToFreePool(this, lock);
     }
 }
 
 void
 GCRuntime::moveChunkToFreePool(Chunk *chunk, const AutoLockGC &lock)
 {
@@ -1122,22 +1078,20 @@ class js::gc::AutoMaybeStartBackgroundAl
             runtime->gc.startBackgroundAllocTaskIfIdle();
     }
 };
 
 Chunk *
 GCRuntime::pickChunk(const AutoLockGC &lock,
                      AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation)
 {
-    Chunk **listHeadp = getAvailableChunkList();
-    Chunk *chunk = *listHeadp;
-    if (chunk)
-        return chunk;
-
-    chunk = emptyChunks(lock).pop();
+    if (availableChunks(lock).count())
+        return availableChunks(lock).head();
+
+    Chunk *chunk = emptyChunks(lock).pop();
     if (!chunk) {
         chunk = Chunk::allocate(rt);
         if (!chunk)
             return nullptr;
         MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
     }
 
     MOZ_ASSERT(chunk->unused());
@@ -1154,19 +1108,17 @@ GCRuntime::pickChunk(const AutoLockGC &l
      */
     GCChunkSet::AddPtr p = chunkSet.lookupForAdd(chunk);
     MOZ_ASSERT(!p);
     if (!chunkSet.add(p, chunk)) {
         releaseChunk(chunk);
         return nullptr;
     }
 
-    chunk->info.prevp = nullptr;
-    chunk->info.next = nullptr;
-    chunk->addToAvailableList(rt);
+    availableChunks(lock).push(chunk);
 
     return chunk;
 }
 
 ArenaHeader *
 GCRuntime::allocateArena(Chunk *chunk, Zone *zone, AllocKind thingKind, const AutoLockGC &lock)
 {
     MOZ_ASSERT(chunk->hasAvailableArenas());
@@ -1211,17 +1163,16 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
     systemZone(nullptr),
 #ifdef JSGC_GENERATIONAL
     nursery(rt),
     storeBuffer(rt, nursery),
 #endif
     stats(rt),
     marker(rt),
     usage(nullptr),
-    availableChunkListHead(nullptr),
     maxMallocBytes(0),
     numArenasFreeCommitted(0),
     verifyPreData(nullptr),
     verifyPostData(nullptr),
     chunkAllocationSinceLastGC(false),
     nextFullGCTime(0),
     lastGCTime(0),
     mode(JSGC_MODE_INCREMENTAL),
@@ -1455,17 +1406,23 @@ GCRuntime::finish()
             for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
                 js_delete(comp.get());
             js_delete(zone.get());
         }
     }
 
     zones.clear();
 
-    availableChunkListHead = nullptr;
+    for (ChunkPool::Iter iter(availableChunks_); !iter.done();) {
+        Chunk *chunk = iter.get();
+        iter.next();
+        MOZ_ASSERT(chunkSet.has(chunk));
+        availableChunks_.remove(chunk);
+    }
+
     if (chunkSet.initialized()) {
         for (GCChunkSet::Range r(chunkSet.all()); !r.empty(); r.popFront())
             releaseChunk(r.front());
         chunkSet.clear();
     }
 
     FreeChunkPool(rt, emptyChunks_);
 
@@ -3450,42 +3407,44 @@ GCRuntime::maybePeriodicFullGC()
 }
 
 // Do all possible decommit immediately from the current thread without
 // releasing the GC lock or allocating any memory.
 void
 GCRuntime::decommitAllWithoutUnlocking(const AutoLockGC &lock)
 {
     MOZ_ASSERT(emptyChunks(lock).count() == 0);
-    for (Chunk *chunk = *getAvailableChunkList(); chunk; chunk = chunk->info.next) {
+    for (ChunkPool::Iter chunk(availableChunks(lock)); !chunk.done(); chunk.next()) {
         for (size_t i = 0; i < ArenasPerChunk; ++i) {
             if (chunk->decommittedArenas.get(i) || chunk->arenas[i].aheader.allocated())
                 continue;
 
             if (MarkPagesUnused(&chunk->arenas[i], ArenaSize)) {
                 chunk->info.numArenasFreeCommitted--;
                 chunk->decommittedArenas.set(i);
             }
         }
     }
+    MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
 GCRuntime::decommitArenas(const AutoLockGC &lock)
 {
     // Verify that all entries in the empty chunks pool are decommitted.
     for (ChunkPool::Iter chunk(emptyChunks(lock)); !chunk.done(); chunk.next())
         MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
 
     // Build a Vector of all current available Chunks. Since we release the
     // gc lock while doing the decommit syscall, it is dangerous to iterate
     // the available list directly, as concurrent operations can modify it.
     mozilla::Vector<Chunk *> toDecommit;
-    for (Chunk *chunk = availableChunkListHead; chunk; chunk = chunk->info.next) {
-        if (!toDecommit.append(chunk)) {
+    MOZ_ASSERT(availableChunks(lock).verify());
+    for (ChunkPool::Iter iter(availableChunks(lock)); !iter.done(); iter.next()) {
+        if (!toDecommit.append(iter.get())) {
             // The OOM handler does a full, immediate decommit, so there is
             // nothing more to do here in any case.
             return onOutOfMallocMemory(lock);
         }
     }
 
     // Start at the tail and stop before the first chunk: we allocate from the
     // head and don't want to thrash with the mutator.
@@ -3505,16 +3464,17 @@ GCRuntime::decommitArenas(const AutoLock
             chunk->releaseArena(rt, aheader, lock, Chunk::ArenaDecommitState(ok));
 
             // FIXME Bug 1095620: add cancellation support when this becomes
             // a ParallelTask.
             if (/* cancel_ || */ !ok)
                 return;
         }
     }
+    MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
 GCRuntime::expireChunksAndArenas(bool shouldShrink, const AutoLockGC &lock)
 {
 #ifdef JSGC_FJGENERATIONAL
     rt->threadPool.pruneChunkCache();
 #endif