bug 702251 - free GC chunks in background outside the GC lock. r=wmccloskey
authorIgor Bukanov <igor@mir2.org>
Sun, 25 Dec 2011 02:45:22 +0100
changeset 84689 7884cb42b9de882164c575cc284916b6ff726459
parent 84688 1b72fc52dfe19f82772772f4428dabcc2fbd3949
child 84690 e76919e51dc36119569edfccb7b4b6b707f80052
push id805
push userakeybl@mozilla.com
push dateWed, 01 Feb 2012 18:17:35 +0000
treeherdermozilla-aurora@6fb3bf232436 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerswmccloskey
bugs702251
milestone12.0a1
bug 702251 - free GC chunks in background outside the GC lock. r=wmccloskey
js/src/jsgc.cpp
js/src/jsgc.h
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -469,79 +469,57 @@ ChunkPool::get(JSRuntime *rt)
         rt->gcHelperThread.startBackgroundAllocationIfIdle();
 #endif
 
     return chunk;
 }
 
 /* Must be called either during the GC or with the GC lock taken. */
 inline void
-ChunkPool::put(JSRuntime *rt, Chunk *chunk)
+ChunkPool::put(Chunk *chunk)
 {
-    JS_ASSERT(this == &rt->gcChunkPool);
-
-    size_t initialAge = 0;
-#ifdef JS_THREADSAFE
-    /*
-     * When we have not yet started the background finalization, we must keep
-     * empty chunks until we are done with all the sweeping and finalization
-     * that cannot be done in the background even if shouldShrink() is true.
-     * This way we can safely call IsAboutToBeFinalized and Cell::isMarked for
-     * finalized GC things in empty chunks. So we only release the chunk if we
-     * are called from the background thread.
-     */
-    if (rt->gcHelperThread.sweeping()) {
-        if (rt->gcHelperThread.shouldShrink()) {
-            Chunk::release(rt, chunk);
-            return;
-        }
-
-        /*
-         * Set the age to one as we expire chunks early during the background
-         * sweep so this chunk already survived one GC cycle.
-         */
-        initialAge = 1;
-    }
-#endif
-
-    chunk->info.age = initialAge;
+    chunk->info.age = 0;
     chunk->info.next = emptyChunkListHead;
     emptyChunkListHead = chunk;
     emptyCount++;
 }
 
 /* Must be called either during the GC or with the GC lock taken. */
-void
+Chunk *
 ChunkPool::expire(JSRuntime *rt, bool releaseAll)
 {
     JS_ASSERT(this == &rt->gcChunkPool);
 
     /*
      * Return old empty chunks to the system while preserving the order of
      * other chunks in the list. This way, if the GC runs several times
      * without emptying the list, the older chunks will stay at the tail
      * and are more likely to reach the max age.
      */
+    Chunk *freeList = NULL;
     for (Chunk **chunkp = &emptyChunkListHead; *chunkp; ) {
         JS_ASSERT(emptyCount);
         Chunk *chunk = *chunkp;
         JS_ASSERT(chunk->unused());
         JS_ASSERT(!rt->gcChunkSet.has(chunk));
         JS_ASSERT(chunk->info.age <= MAX_EMPTY_CHUNK_AGE);
         if (releaseAll || chunk->info.age == MAX_EMPTY_CHUNK_AGE) {
             *chunkp = chunk->info.next;
             --emptyCount;
-            Chunk::release(rt, chunk);
+            chunk->prepareToBeFreed(rt);
+            chunk->info.next = freeList;
+            freeList = chunk;
         } else {
             /* Keep the chunk but increase its age. */
             ++chunk->info.age;
             chunkp = &chunk->info.next;
         }
     }
     JS_ASSERT_IF(releaseAll, !emptyCount);
+    return freeList;
 }
 
 JS_FRIEND_API(int64_t)
 ChunkPool::countCleanDecommittedArenas(JSRuntime *rt)
 {
     JS_ASSERT(this == &rt->gcChunkPool);
 
     int64_t numDecommitted = 0;
@@ -566,20 +544,44 @@ Chunk::allocate(JSRuntime *rt)
     return chunk;
 }
 
 /* Must be called with the GC lock taken. */
 /* static */ inline void
 Chunk::release(JSRuntime *rt, Chunk *chunk)
 {
     JS_ASSERT(chunk);
-    JS_ASSERT(rt->gcNumArenasFreeCommitted >= chunk->info.numArenasFreeCommitted);
-    rt->gcNumArenasFreeCommitted -= chunk->info.numArenasFreeCommitted;
+    chunk->prepareToBeFreed(rt);
+    FreeChunk(chunk);
+}
+
+static void
+FreeChunkList(Chunk *chunkListHead)
+{
+    while (Chunk *chunk = chunkListHead) {
+        JS_ASSERT(!chunk->info.numArenasFreeCommitted);
+        chunkListHead = chunk->info.next;
+        FreeChunk(chunk);
+    }
+}
+
+inline void
+Chunk::prepareToBeFreed(JSRuntime *rt)
+{
+    JS_ASSERT(rt->gcNumArenasFreeCommitted >= info.numArenasFreeCommitted);
+    rt->gcNumArenasFreeCommitted -= info.numArenasFreeCommitted;
     rt->gcStats.count(gcstats::STAT_DESTROY_CHUNK);
-    FreeChunk(chunk);
+
+#ifdef DEBUG
+    /*
+     * Let FreeChunkList detect a missing prepareToBeFreed call before it
+     * frees chunk.
+     */
+    info.numArenasFreeCommitted = 0;
+#endif
 }
 
 void
 Chunk::init()
 {
     JS_POISON(this, JS_FREE_PATTERN, ChunkSize);
 
     /*
@@ -760,17 +762,17 @@ Chunk::releaseArena(ArenaHeader *aheader
         JS_ASSERT(!info.prevp);
         JS_ASSERT(!info.next);
         addToAvailableList(comp);
     } else if (!unused()) {
         JS_ASSERT(info.prevp);
     } else {
         rt->gcChunkSet.remove(this);
         removeFromAvailableList();
-        rt->gcChunkPool.put(rt, this);
+        rt->gcChunkPool.put(this);
     }
 }
 
 } /* namespace gc */
 } /* namespace js */
 
 /* The caller must hold the GC lock. */
 static Chunk *
@@ -1168,17 +1170,17 @@ js_FinishGC(JSRuntime *rt)
 #ifdef JS_THREADSAFE
     rt->gcHelperThread.finish();
 #endif
 
     /*
      * Finish the pool after the background thread stops in case it was doing
      * the background sweeping.
      */
-    rt->gcChunkPool.expire(rt, true);
+    FreeChunkList(rt->gcChunkPool.expire(rt, true));
 
 #ifdef DEBUG
     if (!rt->gcRootsHash.empty())
         CheckLeakedRoots(rt);
 #endif
     rt->gcRootsHash.clear();
     rt->gcLocksHash.clear();
 }
@@ -2191,22 +2193,18 @@ MaybeGC(JSContext *cx)
         {
             js_GC(cx, NULL, GC_SHRINK, gcstats::MAYBEGC);
         } else {
             rt->gcNextFullGCTime = now + GC_IDLE_FULL_SPAN;
         }
     }
 }
 
-} /* namespace js */
-
 #ifdef JS_THREADSAFE
 
-namespace js {
-
 bool
 GCHelperThread::init()
 {
     if (!(wakeup = PR_NewCondVar(rt->gcLock)))
         return false;
     if (!(done = PR_NewCondVar(rt->gcLock)))
         return false;
 
@@ -2282,17 +2280,17 @@ GCHelperThread::threadLoop()
                     chunk = Chunk::allocate(rt);
                 }
 
                 /* OOM stops the background allocation. */
                 if (!chunk)
                     break;
                 JS_ASSERT(chunk->info.numArenasFreeCommitted == ArenasPerChunk);
                 rt->gcNumArenasFreeCommitted += ArenasPerChunk;
-                rt->gcChunkPool.put(rt, chunk);
+                rt->gcChunkPool.put(chunk);
             } while (state == ALLOCATING && rt->gcChunkPool.wantBackgroundAllocation(rt));
             if (state == ALLOCATING)
                 state = IDLE;
             break;
           case CANCEL_ALLOCATION:
             state = IDLE;
             PR_NotifyAllCondVar(done);
             break;
@@ -2371,52 +2369,53 @@ GCHelperThread::replenishAndFreeLater(vo
 }
 
 /* Must be called with the GC lock taken. */
 void
 GCHelperThread::doSweep()
 {
     JS_ASSERT(context);
 
-    /*
-     * Expire the chunks released during the GC so they will be available to
-     * the rest of the system immediately.
-     */
-    rt->gcChunkPool.expire(rt, shouldShrink());
-
-    AutoUnlockGC unlock(rt);
-
-    /*
-     * We must finalize in the insert order, see comments in
-     * finalizeObjects.
-     */
-    for (ArenaHeader **i = finalizeVector.begin(); i != finalizeVector.end(); ++i)
-        ArenaLists::backgroundFinalize(context, *i);
-    finalizeVector.resize(0);
-
-    context = NULL;
-
-    if (freeCursor) {
-        void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
-        freeElementsAndArray(array, freeCursor);
-        freeCursor = freeCursorEnd = NULL;
-    } else {
-        JS_ASSERT(!freeCursorEnd);
+    {
+        AutoUnlockGC unlock(rt);
+
+        /*
+         * We must finalize in the insert order, see comments in
+         * finalizeObjects.
+         */
+        for (ArenaHeader **i = finalizeVector.begin(); i != finalizeVector.end(); ++i)
+            ArenaLists::backgroundFinalize(context, *i);
+        finalizeVector.resize(0);
+
+        context = NULL;
+
+        if (freeCursor) {
+            void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
+            freeElementsAndArray(array, freeCursor);
+            freeCursor = freeCursorEnd = NULL;
+        } else {
+            JS_ASSERT(!freeCursorEnd);
+        }
+        for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
+            void **array = *iter;
+            freeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
+        }
+        freeVector.resize(0);
     }
-    for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
-        void **array = *iter;
-        freeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
+
+    if (Chunk *toFree = rt->gcChunkPool.expire(rt, shouldShrink())) {
+        AutoUnlockGC unlock(rt);
+        FreeChunkList(toFree);
     }
-    freeVector.resize(0);
 }
 
+#endif /* JS_THREADSAFE */
+
 } /* namespace js */
 
-#endif /* JS_THREADSAFE */
-
 static bool
 ReleaseObservedTypes(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime;
 
     bool releaseTypes = false;
     int64_t now = PRMJ_Now();
     if (now >= rt->gcJitReleaseTime) {
@@ -2652,17 +2651,17 @@ SweepPhase(JSContext *cx, GCMarker *gcma
             SweepCompartments(cx, gckind);
 
 #ifndef JS_THREADSAFE
         /*
          * Destroy arenas after we finished the sweeping so finalizers can safely
          * use IsAboutToBeFinalized().
          * This is done on the GCHelperThread if JS_THREADSAFE is defined.
          */
-        rt->gcChunkPool.expire(rt, gckind == GC_SHRINK);
+        FreeChunkList(rt->gcChunkPool.expire(rt, gckind == GC_SHRINK));
 #endif
 
         if (gckind == GC_SHRINK)
             DecommitFreePages(cx);
     }
 
     {
         gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_XPCONNECT);
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -593,33 +593,33 @@ struct ChunkInfo {
  *
  * In order to figure out how many Arenas will fit in a chunk, we need to know
  * how much extra space is available after we allocate the header data. This
  * is a problem because the header size depends on the number of arenas in the
  * chunk. The two dependent fields are bitmap and decommittedArenas.
  *
  * For the mark bitmap, we know that each arena will use a fixed number of full
  * bytes: ArenaBitmapBytes. The full size of the header data is this number
- * multiplied by the eventual number of arenas we have in the header. We, 
+ * multiplied by the eventual number of arenas we have in the header. We,
  * conceptually, distribute this header data among the individual arenas and do
- * not include it in the header. This way we do not have to worry about its 
+ * not include it in the header. This way we do not have to worry about its
  * variable size: it gets attached to the variable number we are computing.
  *
  * For the decommitted arena bitmap, we only have 1 bit per arena, so this
  * technique will not work. Instead, we observe that we do not have enough
- * header info to fill 8 full arenas: it is currently 4 on 64bit, less on 
+ * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
  * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
- * This will not become 63 bytes unless we double the data required in the 
- * header. Therefore, we just compute the number of bytes required to track 
+ * This will not become 63 bytes unless we double the data required in the
+ * header. Therefore, we just compute the number of bytes required to track
  * every possible arena and do not worry about slop bits, since there are too
  * few to usefully allocate.
  *
  * To actually compute the number of arenas we can allocate in a chunk, we
  * divide the amount of available space less the header info (not including
- * the mark bitmap which is distributed into the arena size) by the size of 
+ * the mark bitmap which is distributed into the arena size) by the size of
  * the arena (with the mark bitmap bytes it uses).
  */
 const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
 const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
 const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
 const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
 
 /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
@@ -745,16 +745,19 @@ struct Chunk {
 
     void releaseArena(ArenaHeader *aheader);
 
     static Chunk *allocate(JSRuntime *rt);
 
     /* Must be called with the GC lock taken. */
     static inline void release(JSRuntime *rt, Chunk *chunk);
 
+    /* Must be called with the GC lock taken. */
+    inline void prepareToBeFreed(JSRuntime *rt);
+
   private:
     inline void init();
 
     /* Search for a decommitted arena to allocate. */
     jsuint findDecommittedArenaOffset();
     ArenaHeader* fetchNextDecommittedArena();
 
     /* Unlink and return the freeArenasHead. */
@@ -777,20 +780,23 @@ class ChunkPool {
     }
 
     inline bool wantBackgroundAllocation(JSRuntime *rt) const;
 
     /* Must be called with the GC lock taken. */
     inline Chunk *get(JSRuntime *rt);
 
     /* Must be called either during the GC or with the GC lock taken. */
-    inline void put(JSRuntime *rt, Chunk *chunk);
+    inline void put(Chunk *chunk);
 
-    /* Must be called either during the GC or with the GC lock taken. */
-    void expire(JSRuntime *rt, bool releaseAll);
+    /*
+     * Return the list of chunks that can be released outside the GC lock.
+     * Must be called either during the GC or with the GC lock taken.
+     */
+    Chunk *expire(JSRuntime *rt, bool releaseAll);
 
     /* Must be called either during the GC or with the GC lock taken. */
     JS_FRIEND_API(int64_t) countCleanDecommittedArenas(JSRuntime *rt);
 };
 
 inline uintptr_t
 Cell::address() const
 {
@@ -1715,17 +1721,17 @@ struct GCMarker : public JSTracer {
 
     bool isMarkStackEmpty() {
         return stack.isEmpty();
     }
 
     void drainMarkStack();
 
     inline void processMarkStackTop();
-    
+
     void pushObject(JSObject *obj) {
         pushTaggedPtr(ObjectTag, obj);
     }
 
     void pushType(types::TypeObject *type) {
         pushTaggedPtr(TypeTag, type);
     }