Bug 729760 - GC: Incremental sweeping of shapes and types r=billm
authorJon Coppeard <jcoppeard@mozilla.com>
Thu, 26 Jul 2012 09:31:52 +0100
changeset 100562 106da1cef37bc5de7afe72baa4bf5cf0b7302d23
parent 100561 a6ec0c6749bb6a8d4aff8980c1be2aa8243b731b
child 100563 82b24fab8f1d5aaf9fde0776118966cb98049dd9
push id23185
push usermbrubeck@mozilla.com
push dateThu, 26 Jul 2012 20:58:28 +0000
treeherdermozilla-central@8a7ad0adcccf [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbillm
bugs729760
milestone17.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 729760 - GC: Incremental sweeping of shapes and types r=billm
js/src/gc/Heap.h
js/src/jsapi.cpp
js/src/jscntxt.h
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jsfriendapi.cpp
js/src/jsfriendapi.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jspropertytree.cpp
js/src/jsscript.cpp
--- a/js/src/gc/Heap.h
+++ b/js/src/gc/Heap.h
@@ -418,48 +418,57 @@ struct ArenaHeader
     size_t          firstFreeSpanOffsets;
 
     /*
      * One of AllocKind constants or FINALIZE_LIMIT when the arena does not
      * contain any GC things and is on the list of empty arenas in the GC
      * chunk. The latter allows to quickly check if the arena is allocated
      * during the conservative GC scanning without searching the arena in the
      * list.
+     *
+     * We use 8 bits for the allocKind so the compiler can use byte-level memory
+     * instructions to access it.
      */
     size_t       allocKind          : 8;
 
     /*
-     * When recursive marking uses too much stack the marking is delayed and
-     * the corresponding arenas are put into a stack using the following field
-     * as a linkage. To distinguish the bottom of the stack from the arenas
-     * not present in the stack we use an extra flag to tag arenas on the
-     * stack.
+     * When collecting we sometimes need to keep an auxillary list of arenas,
+     * for which we use the following fields.  This happens for several reasons:
+     *
+     * When recursive marking uses too much stack the marking is delayed and the
+     * corresponding arenas are put into a stack. To distinguish the bottom of
+     * the stack from the arenas not present in the stack we use the
+     * markOverflow flag to tag arenas on the stack.
      *
      * Delayed marking is also used for arenas that we allocate into during an
      * incremental GC. In this case, we intend to mark all the objects in the
      * arena, and it's faster to do this marking in bulk.
      *
-     * To minimize the ArenaHeader size we record the next delayed marking
-     * linkage as arenaAddress() >> ArenaShift and pack it with the allocKind
-     * field and hasDelayedMarking flag. We use 8 bits for the allocKind, not
-     * ArenaShift - 1, so the compiler can use byte-level memory instructions
-     * to access it.
+     * When sweeping we keep track of which arenas have been allocated since the
+     * end of the mark phase.  This allows us to tell whether a pointer to an
+     * unmarked object is yet to be finalized or has already been reallocated.
+     * We set the allocatedDuringIncremental flag for this and clear it at the
+     * end of the sweep phase.
+     *
+     * To minimize the ArenaHeader size we record the next linkage as
+     * arenaAddress() >> ArenaShift and pack it with the allocKind field and the
+     * flags.
      */
   public:
     size_t       hasDelayedMarking  : 1;
     size_t       allocatedDuringIncremental : 1;
     size_t       markOverflow : 1;
-    size_t       nextDelayedMarking : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
+    size_t       auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
 
     static void staticAsserts() {
         /* We must be able to fit the allockind into uint8_t. */
         JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
 
         /*
-         * nextDelayedMarkingpacking assumes that ArenaShift has enough bits
+         * auxNextLink packing assumes that ArenaShift has enough bits
          * to cover allocKind and hasDelayedMarking.
          */
         JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
     }
 
     inline uintptr_t address() const;
     inline Chunk *chunk() const;
 
@@ -482,17 +491,17 @@ struct ArenaHeader
         firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
     }
 
     void setAsNotAllocated() {
         allocKind = size_t(FINALIZE_LIMIT);
         markOverflow = 0;
         allocatedDuringIncremental = 0;
         hasDelayedMarking = 0;
-        nextDelayedMarking = 0;
+        auxNextLink = 0;
     }
 
     inline uintptr_t arenaAddress() const;
     inline Arena *getArena();
 
     AllocKind getAllocKind() const {
         JS_ASSERT(allocated());
         return AllocKind(allocKind);
@@ -514,16 +523,21 @@ struct ArenaHeader
     inline void setFirstFreeSpan(const FreeSpan *span);
 
 #ifdef DEBUG
     void checkSynchronizedWithFreeList() const;
 #endif
 
     inline ArenaHeader *getNextDelayedMarking() const;
     inline void setNextDelayedMarking(ArenaHeader *aheader);
+    inline void unsetDelayedMarking();
+
+    inline ArenaHeader *getNextAllocDuringSweep() const;
+    inline void setNextAllocDuringSweep(ArenaHeader *aheader);
+    inline void unsetAllocDuringSweep();
 };
 
 struct Arena
 {
     /*
      * Layout of an arena:
      * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader
      * descriptor followed by some pad bytes. The remainder of the arena is
@@ -877,25 +891,58 @@ ArenaHeader::setFirstFreeSpan(const Free
 {
     JS_ASSERT(span->isWithinArena(arenaAddress()));
     firstFreeSpanOffsets = span->encodeAsOffsets();
 }
 
 inline ArenaHeader *
 ArenaHeader::getNextDelayedMarking() const
 {
-    return &reinterpret_cast<Arena *>(nextDelayedMarking << ArenaShift)->aheader;
+    JS_ASSERT(hasDelayedMarking);
+    return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
 }
 
 inline void
 ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
 {
     JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
+    JS_ASSERT(!auxNextLink && !hasDelayedMarking);
     hasDelayedMarking = 1;
-    nextDelayedMarking = aheader->arenaAddress() >> ArenaShift;
+    auxNextLink = aheader->arenaAddress() >> ArenaShift;
+}
+
+inline void
+ArenaHeader::unsetDelayedMarking()
+{
+    JS_ASSERT(hasDelayedMarking);
+    hasDelayedMarking = 0;
+    auxNextLink = 0;
+}
+
+inline ArenaHeader *
+ArenaHeader::getNextAllocDuringSweep() const
+{
+    JS_ASSERT(allocatedDuringIncremental);
+    return &reinterpret_cast<Arena *>(auxNextLink << ArenaShift)->aheader;
+}
+
+inline void
+ArenaHeader::setNextAllocDuringSweep(ArenaHeader *aheader)
+{
+    JS_ASSERT(!auxNextLink && !allocatedDuringIncremental);
+    allocatedDuringIncremental = 1;
+    auxNextLink = aheader->arenaAddress() >> ArenaShift;
+}
+
+inline void
+ArenaHeader::unsetAllocDuringSweep()
+{
+    JS_ASSERT(allocatedDuringIncremental);
+    allocatedDuringIncremental = 0;
+    auxNextLink = 0;
 }
 
 JS_ALWAYS_INLINE void
 ChunkBitmap::getMarkWordAndMask(const Cell *cell, uint32_t color,
                                 uintptr_t **wordp, uintptr_t *maskp)
 {
     size_t bit = (cell->address() & ChunkMask) / Cell::CellSize + color;
     JS_ASSERT(bit < ArenaBitmapBits * ArenasPerChunk);
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -771,16 +771,21 @@ JSRuntime::JSRuntime()
     gcNumber(0),
     gcStartNumber(0),
     gcIsFull(false),
     gcTriggerReason(gcreason::NO_REASON),
     gcStrictCompartmentChecking(false),
     gcDisableStrictProxyCheckingCount(0),
     gcIncrementalState(gc::NO_INCREMENTAL),
     gcLastMarkSlice(false),
+    gcSweepOnBackgroundThread(false),
+    gcSweepPhase(0),
+    gcSweepCompartmentIndex(0),
+    gcSweepKindIndex(0),
+    gcArenasAllocatedDuringSweep(NULL),
     gcInterFrameGC(0),
     gcSliceBudget(SliceBudget::Unlimited),
     gcIncrementalEnabled(true),
     gcExactScanningEnabled(true),
     gcPoke(false),
     heapState(Idle),
 #ifdef JS_GC_ZEAL
     gcZeal_(0),
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -559,16 +559,31 @@ struct JSRuntime : js::RuntimeFriendFiel
      * The current incremental GC phase. During non-incremental GC, this is
      * always NO_INCREMENTAL.
      */
     js::gc::State       gcIncrementalState;
 
     /* Indicates that the last incremental slice exhausted the mark stack. */
     bool                gcLastMarkSlice;
 
+    /* Whether any sweeping will take place in the separate GC helper thread. */
+    bool                gcSweepOnBackgroundThread;
+
+    /*
+     * Incremental sweep state.
+     */
+    int                gcSweepPhase;
+    ptrdiff_t          gcSweepCompartmentIndex;
+    int                gcSweepKindIndex;
+
+    /*
+     * List head of arenas allocated during the sweep phase.
+     */
+    js::gc::ArenaHeader *gcArenasAllocatedDuringSweep;
+
     /*
      * Indicates that a GC slice has taken place in the middle of an animation
      * frame, rather than at the beginning. In this case, the next slice will be
      * delayed so that we don't get back-to-back slices.
      */
     volatile uintptr_t  gcInterFrameGC;
 
     /* Default budget for incremental GC slice. See SliceBudget in jsgc.h. */
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -42,16 +42,17 @@ JSCompartment::JSCompartment(JSRuntime *
     principals(NULL),
     global_(NULL),
 #ifdef JSGC_GENERATIONAL
     gcStoreBuffer(&gcNursery),
 #endif
     needsBarrier_(false),
     gcState(NoGCScheduled),
     gcPreserveCode(false),
+    gcStarted(false),
     gcBytes(0),
     gcTriggerBytes(0),
     gcHeapGrowthFactor(3.0),
     hold(false),
     isSystemCompartment(false),
     lastCodeRelease(0),
     typeLifoAlloc(TYPE_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
     data(NULL),
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -169,16 +169,17 @@ struct JSCompartment
     enum CompartmentGCState {
         NoGCScheduled,
         GCScheduled,
         GCRunning
     };
 
     CompartmentGCState           gcState;
     bool                         gcPreserveCode;
+    bool                         gcStarted;
 
   public:
     bool isCollecting() const {
         /* Allow this if we're in the middle of an incremental GC. */
         if (rt->isHeapBusy()) {
             return gcState == GCRunning;
         } else {
             JS_ASSERT(gcState != GCRunning);
@@ -221,16 +222,29 @@ struct JSCompartment
     bool isGCScheduled() const {
         return gcState == GCScheduled;
     }
 
     void setPreservingCode(bool preserving) {
         gcPreserveCode = preserving;
     }
 
+    bool wasGCStarted() const {
+        return gcStarted;
+    }
+
+    void setGCStarted(bool started) {
+        JS_ASSERT(rt->isHeapBusy());
+        gcStarted = started;
+    }
+
+    bool isGCSweeping() {
+        return wasGCStarted() && rt->gcIncrementalState == js::gc::SWEEP;
+    }
+
     size_t                       gcBytes;
     size_t                       gcTriggerBytes;
     size_t                       gcMaxMallocBytes;
     double                       gcHeapGrowthFactor;
 
     bool                         hold;
     bool                         isSystemCompartment;
 
--- a/js/src/jsfriendapi.cpp
+++ b/js/src/jsfriendapi.cpp
@@ -122,17 +122,17 @@ js::PrepareForFullGC(JSRuntime *rt)
 
 JS_FRIEND_API(void)
 js::PrepareForIncrementalGC(JSRuntime *rt)
 {
     if (rt->gcIncrementalState == gc::NO_INCREMENTAL)
         return;
 
     for (CompartmentsIter c(rt); !c.done(); c.next()) {
-        if (c->needsBarrier())
+        if (c->wasGCStarted())
             PrepareCompartmentForGC(c);
     }
 }
 
 JS_FRIEND_API(bool)
 js::IsGCScheduled(JSRuntime *rt)
 {
     for (CompartmentsIter c(rt); !c.done(); c.next()) {
--- a/js/src/jsfriendapi.h
+++ b/js/src/jsfriendapi.h
@@ -627,16 +627,17 @@ SizeOfJSContext();
     D(LAST_CONTEXT)                             \
     D(DESTROY_CONTEXT)                          \
     D(LAST_DITCH)                               \
     D(TOO_MUCH_MALLOC)                          \
     D(ALLOC_TRIGGER)                            \
     D(DEBUG_GC)                                 \
     D(DEBUG_MODE_GC)                            \
     D(TRANSPLANT)                               \
+    D(RESET)                                    \
                                                 \
     /* Reasons from Firefox */                  \
     D(DOM_WINDOW_UTILS)                         \
     D(COMPONENT_UTILS)                          \
     D(MEM_PRESSURE)                             \
     D(CC_WAITING)                               \
     D(CC_FORCED)                                \
     D(LOAD_END)                                 \
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -195,16 +195,39 @@ const uint32_t Arena::FirstThingOffsets[
 #endif
     OFFSET(JSShortString),      /* FINALIZE_SHORT_STRING        */
     OFFSET(JSString),           /* FINALIZE_STRING              */
     OFFSET(JSExternalString),   /* FINALIZE_EXTERNAL_STRING     */
 };
 
 #undef OFFSET
 
+/*
+ * Finalization order for incrementally swept things.
+ */
+
+static const AllocKind FinalizePhaseShapes[] = {
+    FINALIZE_SHAPE,
+    FINALIZE_BASE_SHAPE,
+    FINALIZE_TYPE_OBJECT
+};
+
+static const AllocKind* FinalizePhases[] = {
+    FinalizePhaseShapes
+};
+static const int FinalizePhaseCount = sizeof(FinalizePhases) / sizeof(AllocKind*);
+
+static const int FinalizePhaseLength[] = {
+    sizeof(FinalizePhaseShapes) / sizeof(AllocKind)
+};
+
+static const gcstats::Phase FinalizePhaseStatsPhase[] = {
+    gcstats::PHASE_SWEEP_SHAPE
+};
+
 #ifdef DEBUG
 void
 ArenaHeader::checkSynchronizedWithFreeList() const
 {
     /*
      * Do not allow to access the free list when its real head is still stored
      * in FreeLists and is not synchronized with this one.
      */
@@ -322,98 +345,106 @@ Arena::finalize(FreeOp *fop, AllocKind t
     nfree += (newListTail->last + 1 - newListTail->first) / thingSize;
     JS_ASSERT(nfree + nmarked == thingsPerArena(thingSize));
 #endif
     aheader.setFirstFreeSpan(&newListHead);
 
     return false;
 }
 
+/*
+ * Insert an arena into the list in appropriate position and update the cursor
+ * to ensure that any arena before the cursor is full.
+ */
+void ArenaList::insert(ArenaHeader *a)
+{
+    JS_ASSERT(a);
+    JS_ASSERT_IF(!head, cursor == &head);
+    a->next = *cursor;
+    *cursor = a;
+    if (!a->hasFreeThings())
+        cursor = &a->next;
+}
+
 template<typename T>
-inline void
-FinalizeTypedArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
+inline bool
+FinalizeTypedArenas(FreeOp *fop,
+                    ArenaHeader **src,
+                    ArenaList &dest,
+                    AllocKind thingKind,
+                    SliceBudget &budget)
 {
     /*
-     * Release empty arenas and move non-full arenas with some free things into
-     * a separated list that we append to al after the loop to ensure that any
-     * arena before al->cursor is full.
+     * Finalize arenas from src list, releasing empty arenas and inserting the
+     * others into dest in an appropriate position.
      */
-    JS_ASSERT_IF(!al->head, al->cursor == &al->head);
-    ArenaLists::ArenaList available;
-    ArenaHeader **ap = &al->head;
+
     size_t thingSize = Arena::thingSize(thingKind);
-    while (ArenaHeader *aheader = *ap) {
+
+    while (ArenaHeader *aheader = *src) {
+        *src = aheader->next;
         bool allClear = aheader->getArena()->finalize<T>(fop, thingKind, thingSize);
-        if (allClear) {
-            *ap = aheader->next;
+        if (allClear)
             aheader->chunk()->releaseArena(aheader);
-        } else if (aheader->hasFreeThings()) {
-            *ap = aheader->next;
-            *available.cursor = aheader;
-            available.cursor = &aheader->next;
-        } else {
-            ap = &aheader->next;
-        }
+        else
+            dest.insert(aheader);
+        budget.step(Arena::thingsPerArena(thingSize));
+        if (budget.isOverBudget())
+            return false;
     }
 
-    /* Terminate the available list and append it to al. */
-    *available.cursor = NULL;
-    *ap = available.head;
-    al->cursor = ap;
-    JS_ASSERT_IF(!al->head, al->cursor == &al->head);
+    return true;
 }
 
 /*
  * Finalize the list. On return al->cursor points to the first non-empty arena
  * after the al->head.
  */
-static void
-FinalizeArenas(FreeOp *fop, ArenaLists::ArenaList *al, AllocKind thingKind)
+static bool
+FinalizeArenas(FreeOp *fop,
+               ArenaHeader **src,
+               ArenaList &dest,
+               AllocKind thingKind,
+               SliceBudget &budget)
 {
     switch(thingKind) {
       case FINALIZE_OBJECT0:
       case FINALIZE_OBJECT0_BACKGROUND:
       case FINALIZE_OBJECT2:
       case FINALIZE_OBJECT2_BACKGROUND:
       case FINALIZE_OBJECT4:
       case FINALIZE_OBJECT4_BACKGROUND:
       case FINALIZE_OBJECT8:
       case FINALIZE_OBJECT8_BACKGROUND:
       case FINALIZE_OBJECT12:
       case FINALIZE_OBJECT12_BACKGROUND:
       case FINALIZE_OBJECT16:
       case FINALIZE_OBJECT16_BACKGROUND:
-        FinalizeTypedArenas<JSObject>(fop, al, thingKind);
-        break;
+        return FinalizeTypedArenas<JSObject>(fop, src, dest, thingKind, budget);
       case FINALIZE_SCRIPT:
-	FinalizeTypedArenas<JSScript>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<JSScript>(fop, src, dest, thingKind, budget);
       case FINALIZE_SHAPE:
-	FinalizeTypedArenas<Shape>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<Shape>(fop, src, dest, thingKind, budget);
       case FINALIZE_BASE_SHAPE:
-        FinalizeTypedArenas<BaseShape>(fop, al, thingKind);
-        break;
+        return FinalizeTypedArenas<BaseShape>(fop, src, dest, thingKind, budget);
       case FINALIZE_TYPE_OBJECT:
-	FinalizeTypedArenas<types::TypeObject>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<types::TypeObject>(fop, src, dest, thingKind, budget);
 #if JS_HAS_XML_SUPPORT
       case FINALIZE_XML:
-	FinalizeTypedArenas<JSXML>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<JSXML>(fop, src, dest, thingKind, budget);
 #endif
       case FINALIZE_STRING:
-	FinalizeTypedArenas<JSString>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<JSString>(fop, src, dest, thingKind, budget);
       case FINALIZE_SHORT_STRING:
-	FinalizeTypedArenas<JSShortString>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<JSShortString>(fop, src, dest, thingKind, budget);
       case FINALIZE_EXTERNAL_STRING:
-	FinalizeTypedArenas<JSExternalString>(fop, al, thingKind);
-        break;
+	return FinalizeTypedArenas<JSExternalString>(fop, src, dest, thingKind, budget);
+      default:
+        JS_NOT_REACHED("Invalid alloc kind");
+        return true;
     }
 }
 
 static inline Chunk *
 AllocChunk() {
     return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
 }
 
@@ -1431,16 +1462,23 @@ ArenaLists::prepareForIncrementalGC(JSRu
         if (!headSpan->isEmpty()) {
             ArenaHeader *aheader = headSpan->arenaHeader();
             aheader->allocatedDuringIncremental = true;
             rt->gcMarker.delayMarkingArena(aheader);
         }
     }
 }
 
+static inline void
+PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
+{
+    arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep);
+    runtime->gcArenasAllocatedDuringSweep = arena;
+}
+
 inline void *
 ArenaLists::allocateFromArena(JSCompartment *comp, AllocKind thingKind)
 {
     Chunk *chunk = NULL;
 
     ArenaList *al = &arenaLists[thingKind];
     AutoLockGC maybeLock;
 
@@ -1484,19 +1522,23 @@ ArenaLists::allocateFromArena(JSCompartm
             al->cursor = &aheader->next;
 
             /*
              * Move the free span stored in the arena to the free list and
              * allocate from it.
              */
             freeLists[thingKind] = aheader->getFirstFreeSpan();
             aheader->setAsFullyUsed();
-            if (JS_UNLIKELY(comp->needsBarrier())) {
-                aheader->allocatedDuringIncremental = true;
-                comp->rt->gcMarker.delayMarkingArena(aheader);
+            if (JS_UNLIKELY(comp->wasGCStarted())) {
+                if (comp->needsBarrier()) {
+                    aheader->allocatedDuringIncremental = true;
+                    comp->rt->gcMarker.delayMarkingArena(aheader);
+                } else if (comp->isGCSweeping()) {
+                    PushArenaAllocatedDuringSweep(comp->rt, aheader);
+                }
             }
             return freeLists[thingKind].infallibleAllocate(Arena::thingSize(thingKind));
         }
 
         /* Make sure we hold the GC lock before we call PickChunk. */
         if (!maybeLock.locked())
             maybeLock.lock(comp->rt);
         chunk = PickChunk(comp);
@@ -1513,19 +1555,23 @@ ArenaLists::allocateFromArena(JSCompartm
      * cursor, so after the GC the most recently added arena will be used first
      * for allocations improving cache locality.
      */
     JS_ASSERT(!*al->cursor);
     ArenaHeader *aheader = chunk->allocateArena(comp, thingKind);
     if (!aheader)
         return NULL;
 
-    if (JS_UNLIKELY(comp->needsBarrier())) {
-        aheader->allocatedDuringIncremental = true;
-        comp->rt->gcMarker.delayMarkingArena(aheader);
+    if (JS_UNLIKELY(comp->wasGCStarted())) {
+        if (comp->needsBarrier()) {
+            aheader->allocatedDuringIncremental = true;
+            comp->rt->gcMarker.delayMarkingArena(aheader);
+        } else if (comp->isGCSweeping()) {
+            PushArenaAllocatedDuringSweep(comp->rt, aheader);
+        }
     }
     aheader->next = al->head;
     if (!al->head) {
         JS_ASSERT(al->cursor == &al->head);
         al->cursor = &aheader->next;
     }
     al->head = aheader;
 
@@ -1536,22 +1582,39 @@ ArenaLists::allocateFromArena(JSCompartm
                                                      Arena::firstThingOffset(thingKind),
                                                      Arena::thingSize(thingKind));
 }
 
 void
 ArenaLists::finalizeNow(FreeOp *fop, AllocKind thingKind)
 {
     JS_ASSERT(!fop->onBackgroundThread());
+    JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE ||
+              backgroundFinalizeState[thingKind] == BFS_JUST_FINISHED);
+
+    ArenaHeader *arenas = arenaLists[thingKind].head;
+    arenaLists[thingKind].clear();
+
+    SliceBudget budget;
+    FinalizeArenas(fop, &arenas, arenaLists[thingKind], thingKind, budget);
+    JS_ASSERT(!arenas);
+}
+
+void
+ArenaLists::queueForForegroundSweep(FreeOp *fop, AllocKind thingKind)
+{
+    JS_ASSERT(!fop->onBackgroundThread());
     JS_ASSERT(backgroundFinalizeState[thingKind] == BFS_DONE);
-    FinalizeArenas(fop, &arenaLists[thingKind], thingKind);
+    JS_ASSERT(!arenaListsToSweep[thingKind]);
+    arenaListsToSweep[thingKind] = arenaLists[thingKind].head;
+    arenaLists[thingKind].clear();
 }
 
 inline void
-ArenaLists::finalizeLater(FreeOp *fop, AllocKind thingKind)
+ArenaLists::queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind)
 {
     JS_ASSERT(thingKind == FINALIZE_OBJECT0_BACKGROUND  ||
               thingKind == FINALIZE_OBJECT2_BACKGROUND  ||
               thingKind == FINALIZE_OBJECT4_BACKGROUND  ||
               thingKind == FINALIZE_OBJECT8_BACKGROUND  ||
               thingKind == FINALIZE_OBJECT12_BACKGROUND ||
               thingKind == FINALIZE_OBJECT16_BACKGROUND ||
               thingKind == FINALIZE_SHORT_STRING        ||
@@ -1580,17 +1643,17 @@ ArenaLists::finalizeLater(FreeOp *fop, A
          * To ensure the finalization order even during the background GC we
          * must use infallibleAppend so arenas scheduled for background
          * finalization would not be finalized now if the append fails.
          */
         fop->runtime()->gcHelperThread.finalizeVector.infallibleAppend(al->head);
         al->clear();
         backgroundFinalizeState[thingKind] = BFS_RUN;
     } else {
-        FinalizeArenas(fop, al, thingKind);
+        finalizeNow(fop, thingKind);
         backgroundFinalizeState[thingKind] = BFS_DONE;
     }
 
 #else /* !JS_THREADSAFE */
 
     finalizeNow(fop, thingKind);
 
 #endif
@@ -1600,19 +1663,21 @@ ArenaLists::finalizeLater(FreeOp *fop, A
 ArenaLists::backgroundFinalize(FreeOp *fop, ArenaHeader *listHead)
 {
 #ifdef JS_THREADSAFE
     JS_ASSERT(fop->onBackgroundThread());
 #endif /* JS_THREADSAFE */
     JS_ASSERT(listHead);
     AllocKind thingKind = listHead->getAllocKind();
     JSCompartment *comp = listHead->compartment;
+
     ArenaList finalized;
-    finalized.head = listHead;
-    FinalizeArenas(fop, &finalized, thingKind);
+    SliceBudget budget;
+    FinalizeArenas(fop, &listHead, finalized, thingKind, budget);
+    JS_ASSERT(!listHead);
 
     /*
      * After we finish the finalization al->cursor must point to the end of
      * the head list as we emptied the list before the background finalization
      * and the allocation adds new arenas before the cursor.
      */
     ArenaLists *lists = &comp->arenas;
     ArenaList *al = &lists->arenaLists[thingKind];
@@ -1636,58 +1701,58 @@ ArenaLists::backgroundFinalize(FreeOp *f
             al->cursor = finalized.cursor;
         lists->backgroundFinalizeState[thingKind] = BFS_JUST_FINISHED;
     } else {
         lists->backgroundFinalizeState[thingKind] = BFS_DONE;
     }
 }
 
 void
-ArenaLists::finalizeObjects(FreeOp *fop)
+ArenaLists::queueObjectsForSweep(FreeOp *fop)
 {
     finalizeNow(fop, FINALIZE_OBJECT0);
     finalizeNow(fop, FINALIZE_OBJECT2);
     finalizeNow(fop, FINALIZE_OBJECT4);
     finalizeNow(fop, FINALIZE_OBJECT8);
     finalizeNow(fop, FINALIZE_OBJECT12);
     finalizeNow(fop, FINALIZE_OBJECT16);
 
-    finalizeLater(fop, FINALIZE_OBJECT0_BACKGROUND);
-    finalizeLater(fop, FINALIZE_OBJECT2_BACKGROUND);
-    finalizeLater(fop, FINALIZE_OBJECT4_BACKGROUND);
-    finalizeLater(fop, FINALIZE_OBJECT8_BACKGROUND);
-    finalizeLater(fop, FINALIZE_OBJECT12_BACKGROUND);
-    finalizeLater(fop, FINALIZE_OBJECT16_BACKGROUND);
+    queueForBackgroundSweep(fop, FINALIZE_OBJECT0_BACKGROUND);
+    queueForBackgroundSweep(fop, FINALIZE_OBJECT2_BACKGROUND);
+    queueForBackgroundSweep(fop, FINALIZE_OBJECT4_BACKGROUND);
+    queueForBackgroundSweep(fop, FINALIZE_OBJECT8_BACKGROUND);
+    queueForBackgroundSweep(fop, FINALIZE_OBJECT12_BACKGROUND);
+    queueForBackgroundSweep(fop, FINALIZE_OBJECT16_BACKGROUND);
 
 #if JS_HAS_XML_SUPPORT
     finalizeNow(fop, FINALIZE_XML);
 #endif
 }
 
 void
-ArenaLists::finalizeStrings(FreeOp *fop)
-{
-    finalizeLater(fop, FINALIZE_SHORT_STRING);
-    finalizeLater(fop, FINALIZE_STRING);
+ArenaLists::queueStringsForSweep(FreeOp *fop)
+{
+    queueForBackgroundSweep(fop, FINALIZE_SHORT_STRING);
+    queueForBackgroundSweep(fop, FINALIZE_STRING);
 
     finalizeNow(fop, FINALIZE_EXTERNAL_STRING);
 }
 
 void
-ArenaLists::finalizeShapes(FreeOp *fop)
-{
-    finalizeNow(fop, FINALIZE_SHAPE);
-    finalizeNow(fop, FINALIZE_BASE_SHAPE);
-    finalizeNow(fop, FINALIZE_TYPE_OBJECT);
+ArenaLists::queueScriptsForSweep(FreeOp *fop)
+{
+    finalizeNow(fop, FINALIZE_SCRIPT);
 }
 
 void
-ArenaLists::finalizeScripts(FreeOp *fop)
-{
-    finalizeNow(fop, FINALIZE_SCRIPT);
+ArenaLists::queueShapesForSweep(FreeOp *fop)
+{
+    queueForForegroundSweep(fop, FINALIZE_SHAPE);
+    queueForForegroundSweep(fop, FINALIZE_BASE_SHAPE);
+    queueForForegroundSweep(fop, FINALIZE_TYPE_OBJECT);
 }
 
 static void
 RunLastDitchGC(JSContext *cx, gcreason::Reason reason)
 {
     JSRuntime *rt = cx->runtime;
 
     /* The last ditch GC preserves all atoms. */
@@ -1918,17 +1983,17 @@ GCMarker::reset()
     stack.reset();
     JS_ASSERT(isMarkStackEmpty());
 
     while (unmarkedArenaStackTop) {
         ArenaHeader *aheader = unmarkedArenaStackTop;
         JS_ASSERT(aheader->hasDelayedMarking);
         JS_ASSERT(markLaterArenas);
         unmarkedArenaStackTop = aheader->getNextDelayedMarking();
-        aheader->hasDelayedMarking = 0;
+        aheader->unsetDelayedMarking();
         aheader->markOverflow = 0;
         aheader->allocatedDuringIncremental = 0;
         markLaterArenas--;
     }
     JS_ASSERT(isDrained());
     JS_ASSERT(!markLaterArenas);
 
     grayRoots.clearAndFree();
@@ -2001,17 +2066,17 @@ GCMarker::markDelayedChildren(SliceBudge
          * If marking gets delayed at the same arena again, we must repeat
          * marking of its things. For that we pop arena from the stack and
          * clear its hasDelayedMarking flag before we begin the marking.
          */
         ArenaHeader *aheader = unmarkedArenaStackTop;
         JS_ASSERT(aheader->hasDelayedMarking);
         JS_ASSERT(markLaterArenas);
         unmarkedArenaStackTop = aheader->getNextDelayedMarking();
-        aheader->hasDelayedMarking = 0;
+        aheader->unsetDelayedMarking();
         markLaterArenas--;
         markDelayedChildren(aheader);
 
         budget.step(150);
         if (budget.isOverBudget())
             return false;
     } while (unmarkedArenaStackTop);
     JS_ASSERT(!markLaterArenas);
@@ -3037,33 +3102,35 @@ ReleaseObservedTypes(JSRuntime *rt)
     if (releaseTypes)
         rt->gcJitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
 #endif
 
     return releaseTypes;
 }
 
 static void
-SweepCompartments(FreeOp *fop, JSGCInvocationKind gckind)
+SweepCompartments(FreeOp *fop, gcreason::Reason gcReason)
 {
     JSRuntime *rt = fop->runtime();
+    JS_ASSERT_IF(gcReason == gcreason::LAST_CONTEXT, !rt->hasContexts());
+
     JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
 
     /* Skip the atomsCompartment. */
     JSCompartment **read = rt->compartments.begin() + 1;
     JSCompartment **end = rt->compartments.end();
     JSCompartment **write = read;
     JS_ASSERT(rt->compartments.length() >= 1);
     JS_ASSERT(*rt->compartments.begin() == rt->atomsCompartment);
 
     while (read < end) {
         JSCompartment *compartment = *read++;
 
         if (!compartment->hold && compartment->isCollecting() &&
-            (compartment->arenas.arenaListsAreEmpty() || !rt->hasContexts()))
+            (compartment->arenas.arenaListsAreEmpty() || gcReason == gcreason::LAST_CONTEXT))
         {
             compartment->arenas.checkEmptyFreeLists();
             if (callback)
                 callback(fop, compartment);
             if (compartment->principals)
                 JS_DropPrincipals(rt, compartment->principals);
             fop->delete_(compartment);
             continue;
@@ -3128,17 +3195,23 @@ BeginMarkPhase(JSRuntime *rt, bool isInc
      */
     if (isIncremental) {
         for (GCCompartmentsIter c(rt); !c.done(); c.next())
             c->arenas.purge();
     }
 
     rt->gcIsFull = true;
     for (CompartmentsIter c(rt); !c.done(); c.next()) {
-        if (!c->isCollecting())
+        JS_ASSERT(!c->wasGCStarted());
+        for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
+            JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
+
+        if (c->isCollecting())
+            c->setGCStarted(true);
+        else
             rt->gcIsFull = false;
 
         c->setPreservingCode(ShouldPreserveJITCode(c, currentTime));
     }
 
     rt->gcMarker.start(rt);
     JS_ASSERT(!rt->gcMarker.callback);
     JS_ASSERT(IS_GC_MARKING_TRACER(&rt->gcMarker));
@@ -3379,17 +3452,17 @@ ValidateIncrementalMarking(JSRuntime *rt
     WeakMapBase::resetWeakMapList(rt);
     WeakMapBase::restoreWeakMapList(rt, weakmaps);
 
     rt->gcIncrementalState = state;
 }
 #endif
 
 static void
-SweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, bool *startBackgroundSweep)
+BeginSweepPhase(JSRuntime *rt)
 {
     /*
      * Sweep phase.
      *
      * Finalize as we sweep, outside of rt->gcLock but with rt->isHeapBusy()
      * true so that any attempt to allocate a GC-thing from a finalizer will
      * fail, rather than nest badly and leave the unmarked newborn to be swept.
      *
@@ -3407,23 +3480,24 @@ SweepPhase(JSRuntime *rt, JSGCInvocation
      * BeginMarkPhase. More compartments may have been created since then.
      */
     bool isFull = true;
     for (CompartmentsIter c(rt); !c.done(); c.next()) {
         if (!c->isCollecting())
             isFull = false;
     }
 
-    *startBackgroundSweep = (rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep());
+    rt->gcSweepOnBackgroundThread =
+        (rt->hasContexts() && rt->gcHelperThread.prepareForBackgroundSweep());
 
     /* Purge the ArenaLists before sweeping. */
     for (GCCompartmentsIter c(rt); !c.done(); c.next())
         c->arenas.purge();
 
-    FreeOp fop(rt, *startBackgroundSweep, false);
+    FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
 
     {
         gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_START);
         if (rt->gcFinalizeCallback)
             rt->gcFinalizeCallback(&fop, JSFINALIZE_START, !isFull);
     }
 
     /* Finalize unreachable (key,value) pairs in all weak maps. */
@@ -3448,47 +3522,92 @@ SweepPhase(JSRuntime *rt, JSGCInvocation
         for (CompartmentsIter c(rt); !c.done(); c.next()) {
             if (c->isCollecting())
                 c->sweep(&fop, releaseTypes);
             else
                 c->sweepCrossCompartmentWrappers();
         }
     }
 
-    {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_OBJECT);
-
-        /*
-         * We finalize objects before other GC things to ensure that the object's
-         * finalizer can access the other things even if they will be freed.
-         */
-        for (GCCompartmentsIter c(rt); !c.done(); c.next())
-            c->arenas.finalizeObjects(&fop);
-    }
+    /*
+     * Queue all GC things in all compartments for sweeping, either in the
+     * foreground or on the background thread.
+     *
+     * Note that order is important here for the background case.
+     *
+     * Objects are finalized immediately but this may change in the future.
+     */
+    for (GCCompartmentsIter c(rt); !c.done(); c.next())
+        c->arenas.queueObjectsForSweep(&fop);
+    for (GCCompartmentsIter c(rt); !c.done(); c.next())
+        c->arenas.queueStringsForSweep(&fop);
+    for (GCCompartmentsIter c(rt); !c.done(); c.next())
+        c->arenas.queueScriptsForSweep(&fop);
+    for (GCCompartmentsIter c(rt); !c.done(); c.next())
+        c->arenas.queueShapesForSweep(&fop);
+
+    rt->gcSweepPhase = 0;
+    rt->gcSweepCompartmentIndex = 0;
+    rt->gcSweepKindIndex = 0;
 
     {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_STRING);
-        for (GCCompartmentsIter c(rt); !c.done(); c.next())
-            c->arenas.finalizeStrings(&fop);
+        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
+        if (rt->gcFinalizeCallback)
+            rt->gcFinalizeCallback(&fop, JSFINALIZE_END, !rt->gcIsFull);
     }
-
-    {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SCRIPT);
-        for (GCCompartmentsIter c(rt); !c.done(); c.next())
-            c->arenas.finalizeScripts(&fop);
+}
+
+bool
+ArenaLists::foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget)
+{
+    if (!arenaListsToSweep[thingKind])
+        return true;
+
+    ArenaList &dest = arenaLists[thingKind];
+    return FinalizeArenas(fop, &arenaListsToSweep[thingKind], dest, thingKind, sliceBudget);
+}
+
+static bool
+SweepPhase(JSRuntime *rt, SliceBudget &sliceBudget)
+{
+    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
+    FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
+
+    for (; rt->gcSweepPhase < FinalizePhaseCount ; ++rt->gcSweepPhase) {
+        gcstats::AutoPhase ap(rt->gcStats, FinalizePhaseStatsPhase[rt->gcSweepPhase]);
+
+        ptrdiff_t len = rt->compartments.end() - rt->compartments.begin();
+        for (; rt->gcSweepCompartmentIndex < len ; ++rt->gcSweepCompartmentIndex) {
+            JSCompartment *c = rt->compartments.begin()[rt->gcSweepCompartmentIndex];
+
+            if (c->wasGCStarted()) {
+                while (rt->gcSweepKindIndex < FinalizePhaseLength[rt->gcSweepPhase]) {
+                    AllocKind kind = FinalizePhases[rt->gcSweepPhase][rt->gcSweepKindIndex];
+
+                    if (!c->arenas.foregroundFinalize(&fop, kind, sliceBudget))
+                        return false;
+                    ++rt->gcSweepKindIndex;
+                }
+            }
+            rt->gcSweepKindIndex = 0;
+        }
+        rt->gcSweepCompartmentIndex = 0;
     }
 
-    {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP_SHAPE);
-        for (GCCompartmentsIter c(rt); !c.done(); c.next())
-            c->arenas.finalizeShapes(&fop);
-    }
+    return true;
+}
+
+static void
+EndSweepPhase(JSRuntime *rt, JSGCInvocationKind gckind, gcreason::Reason gcReason)
+{
+    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_SWEEP);
+    FreeOp fop(rt, rt->gcSweepOnBackgroundThread, false);
 
 #ifdef DEBUG
-     PropertyTree::dumpShapes(rt);
+    PropertyTree::dumpShapes(rt);
 #endif
 
     {
         gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_DESTROY);
 
         /*
          * Sweep script filenames after sweeping functions in the generic loop
          * above. In this way when a scripted function's finalizer destroys the
@@ -3499,36 +3618,46 @@ SweepPhase(JSRuntime *rt, JSGCInvocation
             SweepScriptFilenames(rt);
             ScriptSource::sweep(rt);
         }
 
         /*
          * This removes compartments from rt->compartment, so we do it last to make
          * sure we don't miss sweeping any compartments.
          */
-        SweepCompartments(&fop, gckind);
+        SweepCompartments(&fop, gcReason);
 
 #ifndef JS_THREADSAFE
         /*
          * Destroy arenas after we finished the sweeping so finalizers can safely
          * use IsAboutToBeFinalized().
          * This is done on the GCHelperThread if JS_THREADSAFE is defined.
          */
         ExpireChunksAndArenas(rt, gckind == GC_SHRINK);
 #endif
     }
 
-    {
-        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_FINALIZE_END);
-        if (rt->gcFinalizeCallback)
-            rt->gcFinalizeCallback(&fop, JSFINALIZE_END, !isFull);
+    /*
+     * Reset the list of arenas marked as being allocated during sweep phase.
+     */
+    while (ArenaHeader *arena = rt->gcArenasAllocatedDuringSweep) {
+        rt->gcArenasAllocatedDuringSweep = arena->getNextAllocDuringSweep();
+        arena->unsetAllocDuringSweep();
     }
 
-    for (CompartmentsIter c(rt); !c.done(); c.next())
+    for (CompartmentsIter c(rt); !c.done(); c.next()) {
         c->setGCLastBytes(c->gcBytes, c->gcMallocAndFreeBytes, gckind);
+        if (c->wasGCStarted())
+            c->setGCStarted(false);
+
+        JS_ASSERT(!c->wasGCStarted());
+        for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
+            JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
+    }
+
     rt->gcLastGCTime = PRMJ_Now();
 }
 
 /*
  * This class should be used by any code that needs to exclusive access to the
  * heap in order to trace through it...
  */
 class AutoTraceSession {
@@ -3604,26 +3733,47 @@ AutoGCSession::~AutoGCSession()
     /* Clear gcMallocBytes for all compartments */
     for (CompartmentsIter c(runtime); !c.done(); c.next())
         c->resetGCMallocBytes();
 
     runtime->resetGCMallocBytes();
 }
 
 static void
+IncrementalCollectSlice(JSRuntime *rt,
+                        int64_t budget,
+                        gcreason::Reason gcReason,
+                        JSGCInvocationKind gcKind);
+
+static void
 ResetIncrementalGC(JSRuntime *rt, const char *reason)
 {
     if (rt->gcIncrementalState == NO_INCREMENTAL)
         return;
 
-    for (CompartmentsIter c(rt); !c.done(); c.next())
+    if (rt->gcIncrementalState == SWEEP) {
+        /* If we've finished marking then sweep to completion here. */
+        IncrementalCollectSlice(rt, SliceBudget::Unlimited, gcreason::RESET, GC_NORMAL);
+        gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
+        rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
+        return;
+    }
+
+    JS_ASSERT(rt->gcIncrementalState == MARK);
+
+    for (CompartmentsIter c(rt); !c.done(); c.next()) {
         c->setNeedsBarrier(false);
+        c->setGCStarted(false);
+        for (unsigned i = 0 ; i < FINALIZE_LIMIT ; ++i)
+            JS_ASSERT(!c->arenas.arenaListsToSweep[i]);
+    }
 
     rt->gcMarker.reset();
     rt->gcMarker.stop();
+
     rt->gcIncrementalState = NO_INCREMENTAL;
 
     JS_ASSERT(!rt->gcStrictCompartmentChecking);
 
     rt->gcStats.reset(reason);
 }
 
 class AutoGCSlice {
@@ -3643,31 +3793,34 @@ AutoGCSlice::AutoGCSlice(JSRuntime *rt)
      * there are stack frames active for any of its scripts. Normally this flag
      * is set at the beginning of the mark phase. During incremental GC, we also
      * set it at the start of every phase.
      */
     rt->stackSpace.markActiveCompartments();
 
     for (GCCompartmentsIter c(rt); !c.done(); c.next()) {
         /* Clear this early so we don't do any write barriers during GC. */
-        if (rt->gcIncrementalState == MARK)
+        if (rt->gcIncrementalState == MARK) {
+            JS_ASSERT(c->needsBarrier());
             c->setNeedsBarrier(false);
-        else
+        } else {
             JS_ASSERT(!c->needsBarrier());
+        }
     }
 }
 
 AutoGCSlice::~AutoGCSlice()
 {
     for (GCCompartmentsIter c(runtime); !c.done(); c.next()) {
         if (runtime->gcIncrementalState == MARK) {
             c->setNeedsBarrier(true);
             c->arenas.prepareForIncrementalGC(runtime);
         } else {
-            JS_ASSERT(runtime->gcIncrementalState == NO_INCREMENTAL);
+            JS_ASSERT(runtime->gcIncrementalState == NO_INCREMENTAL ||
+                      runtime->gcIncrementalState == SWEEP);
             c->setNeedsBarrier(false);
         }
     }
 }
 
 class AutoCopyFreeListToArenas {
     JSRuntime *rt;
 
@@ -3680,88 +3833,154 @@ class AutoCopyFreeListToArenas {
 
     ~AutoCopyFreeListToArenas() {
         for (CompartmentsIter c(rt); !c.done(); c.next())
             c->arenas.clearFreeListsInArenas();
     }
 };
 
 static void
-IncrementalMarkSlice(JSRuntime *rt, int64_t budget, gcreason::Reason reason, bool *shouldSweep)
-{
+PushZealSelectedObjects(JSRuntime *rt)
+{
+#ifdef JS_GC_ZEAL
+    /* Push selected objects onto the mark stack and clear the list. */
+    for (JSObject **obj = rt->gcSelectedForMarking.begin();
+         obj != rt->gcSelectedForMarking.end(); obj++)
+    {
+        MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
+    }
+#endif
+}
+
+static bool
+DrainMarkStack(JSRuntime *rt, SliceBudget &sliceBudget)
+{
+    /* Run a marking slice and return whether the stack is now empty. */
+    gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
+    return rt->gcMarker.drainMarkStack(sliceBudget);
+}
+
+static void
+IncrementalCollectSlice(JSRuntime *rt,
+                        int64_t budget,
+                        gcreason::Reason reason,
+                        JSGCInvocationKind gckind)
+{
+    AutoCopyFreeListToArenas copy(rt);
     AutoGCSlice slice(rt);
 
     gc::State initialState = rt->gcIncrementalState;
-
-    *shouldSweep = false;
+    SliceBudget sliceBudget(budget);
 
     int zeal = 0;
 #ifdef JS_GC_ZEAL
     if (reason == gcreason::DEBUG_GC) {
-        // Do the collection type specified by zeal mode only if the collection
-        // was triggered by RunDebugGC().
+        /*
+         * Do the collection type specified by zeal mode only if the collection
+         * was triggered by RunDebugGC().
+         */
         zeal = rt->gcZeal();
+        JS_ASSERT_IF(zeal == ZealIncrementalMarkAllThenFinish ||
+                     zeal == ZealIncrementalRootsThenFinish,
+                     budget == SliceBudget::Unlimited);
     }
 #endif
 
     bool isIncremental = rt->gcIncrementalState != NO_INCREMENTAL ||
                          budget != SliceBudget::Unlimited ||
                          zeal == ZealIncrementalRootsThenFinish ||
                          zeal == ZealIncrementalMarkAllThenFinish;
 
     if (rt->gcIncrementalState == NO_INCREMENTAL) {
         rt->gcIncrementalState = MARK_ROOTS;
         rt->gcLastMarkSlice = false;
     }
 
-    if (rt->gcIncrementalState == MARK_ROOTS) {
+    switch (rt->gcIncrementalState) {
+
+      case MARK_ROOTS:
         BeginMarkPhase(rt, isIncremental);
+        PushZealSelectedObjects(rt);
+
         rt->gcIncrementalState = MARK;
 
         if (zeal == ZealIncrementalRootsThenFinish)
-            return;
-    }
-
-    if (rt->gcIncrementalState == MARK) {
-        SliceBudget sliceBudget(budget);
+            break;
+
+        /* fall through */
+
+      case MARK: {
 
         /* If we needed delayed marking for gray roots, then collect until done. */
         if (!rt->gcMarker.hasBufferedGrayRoots())
             sliceBudget.reset();
 
-#ifdef JS_GC_ZEAL
-        if (!rt->gcSelectedForMarking.empty()) {
-            for (JSObject **obj = rt->gcSelectedForMarking.begin();
-                 obj != rt->gcSelectedForMarking.end(); obj++)
-            {
-                MarkObjectUnbarriered(&rt->gcMarker, obj, "selected obj");
-            }
-        }
-#endif
-
-        bool finished;
+        bool finished = DrainMarkStack(rt, sliceBudget);
+        if (!finished)
+            break;
+
+        JS_ASSERT(rt->gcMarker.isDrained());
+
+        if (!rt->gcLastMarkSlice &&
+            ((initialState == MARK && budget != SliceBudget::Unlimited) ||
+             zeal == ZealIncrementalMarkAllThenFinish))
         {
-            gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_MARK);
-            finished = rt->gcMarker.drainMarkStack(sliceBudget);
+            /*
+             * Yield with the aim of starting the sweep in the next
+             * slice.  We will need to mark anything new on the stack
+             * when we resume, so we stay in MARK state.
+             */
+            rt->gcLastMarkSlice = true;
+            break;
         }
-        if (finished) {
-            JS_ASSERT(rt->gcMarker.isDrained());
-
-            if (!rt->gcLastMarkSlice &&
-                ((initialState == MARK && budget != SliceBudget::Unlimited) ||
-                 zeal == ZealIncrementalMarkAllThenFinish))
-            {
-                rt->gcLastMarkSlice = true;
-            } else {
-                EndMarkPhase(rt, isIncremental);
-                rt->gcIncrementalState = NO_INCREMENTAL;
-                *shouldSweep = true;
-            }
-        }
-    }
+
+        EndMarkPhase(rt, isIncremental);
+
+        rt->gcIncrementalState = SWEEP;
+
+        /*
+         * This runs to completion, but we don't continue if the budget is
+         * now exhasted.
+         */
+        BeginSweepPhase(rt);
+        if (sliceBudget.isOverBudget())
+            break;
+
+        /*
+         * Always yield here when running in incremental multi-slice zeal
+         * mode, so RunDebugGC can reset the slice buget.
+         */
+        if (budget != SliceBudget::Unlimited && zeal == ZealIncrementalMultipleSlices)
+            break;
+
+        /* fall through */
+      }
+
+      case SWEEP: {
+#ifdef DEBUG
+        for (CompartmentsIter c(rt); !c.done(); c.next())
+            JS_ASSERT(!c->needsBarrier());
+#endif
+
+        bool finished = SweepPhase(rt, sliceBudget);
+        if (!finished)
+            break;
+
+        EndSweepPhase(rt, gckind, reason);
+
+        if (rt->gcSweepOnBackgroundThread)
+            rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
+
+        rt->gcIncrementalState = NO_INCREMENTAL;
+        break;
+      }
+
+      default:
+        JS_ASSERT(false);
+     }
 }
 
 class IncrementalSafety
 {
     const char *reason_;
 
     IncrementalSafety(const char *reason) : reason_(reason) {}
 
@@ -3829,18 +4048,20 @@ BudgetIncrementalGC(JSRuntime *rt, int64
             rt->gcStats.nonincremental("allocation trigger");
         }
 
         if (c->isTooMuchMalloc()) {
             *budget = SliceBudget::Unlimited;
             rt->gcStats.nonincremental("malloc bytes trigger");
         }
 
-        if (c->isCollecting() != c->needsBarrier())
+        if (rt->gcIncrementalState != NO_INCREMENTAL &&
+            c->isCollecting() != c->wasGCStarted()) {
             reset = true;
+        }
     }
 
     if (reset)
         ResetIncrementalGC(rt, "compartment change");
 }
 
 /*
  * GC, repeatedly if necessary, until we think we have not created any new
@@ -3872,44 +4093,28 @@ GCCycle(JSRuntime *rt, bool incremental,
      * background allocation to finish so we can avoid taking the GC lock
      * when manipulating the chunks during the GC.
      */
     {
         gcstats::AutoPhase ap(rt->gcStats, gcstats::PHASE_WAIT_BACKGROUND_THREAD);
         rt->gcHelperThread.waitBackgroundSweepOrAllocEnd();
     }
 
-    bool startBackgroundSweep = false;
     {
         if (!incremental) {
             /* If non-incremental GC was requested, reset incremental GC. */
             ResetIncrementalGC(rt, "requested");
             rt->gcStats.nonincremental("requested");
             budget = SliceBudget::Unlimited;
         } else {
             BudgetIncrementalGC(rt, &budget);
         }
 
-        AutoCopyFreeListToArenas copy(rt);
-
-        bool shouldSweep;
-        IncrementalMarkSlice(rt, budget, reason, &shouldSweep);
-
-#ifdef DEBUG
-        if (rt->gcIncrementalState == NO_INCREMENTAL) {
-            for (CompartmentsIter c(rt); !c.done(); c.next())
-                JS_ASSERT(!c->needsBarrier());
-        }
-#endif
-        if (shouldSweep)
-            SweepPhase(rt, gckind, &startBackgroundSweep);
+        IncrementalCollectSlice(rt, budget, reason, gckind);
     }
-
-    if (startBackgroundSweep)
-        rt->gcHelperThread.startBackgroundSweep(gckind == GC_SHRINK);
 }
 
 #ifdef JS_GC_ZEAL
 static bool
 IsDeterministicGCReason(gcreason::Reason reason)
 {
     if (reason > gcreason::DEBUG_GC && reason != gcreason::CC_FORCED)
         return false;
@@ -4265,30 +4470,44 @@ RunDebugGC(JSContext *cx)
     JSRuntime *rt = cx->runtime;
     PrepareForDebugGC(cx->runtime);
 
     int type = rt->gcZeal();
     if (type == ZealIncrementalRootsThenFinish ||
         type == ZealIncrementalMarkAllThenFinish ||
         type == ZealIncrementalMultipleSlices)
     {
+        js::gc::State initialState = rt->gcIncrementalState;
         int64_t budget;
         if (type == ZealIncrementalMultipleSlices) {
-            // Start with a small slice limit and double it every slice. This ensure that we get
-            // multiple slices, and collection runs to completion.
-            if (rt->gcIncrementalState == NO_INCREMENTAL)
+            /*
+             * Start with a small slice limit and double it every slice. This
+             * ensure that we get multiple slices, and collection runs to
+             * completion.
+             */
+            if (initialState == NO_INCREMENTAL)
                 rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
             else
                 rt->gcIncrementalLimit *= 2;
             budget = SliceBudget::WorkBudget(rt->gcIncrementalLimit);
         } else {
-            // This triggers incremental GC but is actually ignored by IncrementalMarkSlice.
             budget = SliceBudget::Unlimited;
         }
+
         Collect(rt, true, budget, GC_NORMAL, gcreason::DEBUG_GC);
+
+        /*
+         * For multi-slice zeal, reset the slice size when we get to the sweep
+         * phase.
+         */
+        if (type == ZealIncrementalMultipleSlices &&
+            initialState == MARK && rt->gcIncrementalState == SWEEP)
+        {
+            rt->gcIncrementalLimit = rt->gcZealFrequency / 2;
+        }
     } else {
         Collect(rt, false, SliceBudget::Unlimited, GC_NORMAL, gcreason::DEBUG_GC);
     }
 
 #endif
 }
 
 void
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -36,23 +36,25 @@ struct JSCompartment;
 #else
 # define JS_CHECK_STACK_SIZE(limit, lval)  ((uintptr_t)(lval) > limit)
 #endif
 
 namespace js {
 
 class GCHelperThread;
 struct Shape;
+struct SliceBudget;
 
 namespace gc {
 
 enum State {
     NO_INCREMENTAL,
     MARK_ROOTS,
     MARK,
+    SWEEP,
     INVALID
 };
 
 class ChunkPool {
     Chunk   *emptyChunkListHead;
     size_t  emptyCount;
 
   public:
@@ -141,42 +143,44 @@ IsNurseryAllocable(AllocKind kind)
         false      /* FINALIZE_EXTERNAL_STRING */
     };
     return map[kind];
 }
 
 inline JSGCTraceKind
 GetGCThingTraceKind(const void *thing);
 
-struct ArenaLists {
+/*
+ * ArenaList::head points to the start of the list. Normally cursor points
+ * to the first arena in the list with some free things and all arenas
+ * before cursor are fully allocated. However, as the arena currently being
+ * allocated from is considered full while its list of free spans is moved
+ * into the freeList, during the GC or cell enumeration, when an
+ * unallocated freeList is moved back to the arena, we can see an arena
+ * with some free cells before the cursor. The cursor is an indirect
+ * pointer to allow for efficient list insertion at the cursor point and
+ * other list manipulations.
+ */
+struct ArenaList {
+    ArenaHeader     *head;
+    ArenaHeader     **cursor;
 
-    /*
-     * ArenaList::head points to the start of the list. Normally cursor points
-     * to the first arena in the list with some free things and all arenas
-     * before cursor are fully allocated. However, as the arena currently being
-     * allocated from is considered full while its list of free spans is moved
-     * into the freeList, during the GC or cell enumeration, when an
-     * unallocated freeList is moved back to the arena, we can see an arena
-     * with some free cells before the cursor. The cursor is an indirect
-     * pointer to allow for efficient list insertion at the cursor point and
-     * other list manipulations.
-     */
-    struct ArenaList {
-        ArenaHeader     *head;
-        ArenaHeader     **cursor;
+    ArenaList() {
+        clear();
+    }
 
-        ArenaList() {
-            clear();
-        }
+    void clear() {
+        head = NULL;
+        cursor = &head;
+    }
 
-        void clear() {
-            head = NULL;
-            cursor = &head;
-        }
-    };
+    void insert(ArenaHeader *arena);
+};
+
+struct ArenaLists {
 
   private:
     /*
      * For each arena kind its free list is represented as the first span with
      * free things. Initially all the spans are initialized as empty. After we
      * find a new arena with available things we move its first free span into
      * the list and set the arena as fully allocated. way we do not need to
      * update the arena header after the initial allocation. When starting the
@@ -207,21 +211,27 @@ struct ArenaLists {
         BFS_DONE,
         BFS_RUN,
         BFS_JUST_FINISHED
     };
 
     volatile uintptr_t backgroundFinalizeState[FINALIZE_LIMIT];
 
   public:
+    /* For each arena kind, a list of arenas remaining to be swept. */
+    ArenaHeader *arenaListsToSweep[FINALIZE_LIMIT];
+
+  public:
     ArenaLists() {
         for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
             freeLists[i].initAsEmpty();
         for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
             backgroundFinalizeState[i] = BFS_DONE;
+        for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
+            arenaListsToSweep[i] = NULL;
     }
 
     ~ArenaLists() {
         for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
             /*
              * We can only call this during the shutdown after the last GC when
              * the background finalization is disabled.
              */
@@ -251,16 +261,20 @@ struct ArenaLists {
             if (backgroundFinalizeState[i] != BFS_DONE)
                 return false;
             if (arenaLists[i].head)
                 return false;
         }
         return true;
     }
 
+    bool arenasAreFull(AllocKind thingKind) const {
+        return !*arenaLists[thingKind].cursor;
+    }
+
     void unmarkAll() {
         for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
             /* The background finalization must have stopped at this point. */
             JS_ASSERT(backgroundFinalizeState[i] == BFS_DONE ||
                       backgroundFinalizeState[i] == BFS_JUST_FINISHED);
             for (ArenaHeader *aheader = arenaLists[i].head; aheader; aheader = aheader->next) {
                 uintptr_t *word = aheader->chunk()->bitmap.arenaBits(aheader);
                 memset(word, 0, ArenaBitmapWords * sizeof(uintptr_t));
@@ -359,26 +373,28 @@ struct ArenaLists {
             JS_ASSERT(freeLists[i].isEmpty());
 #endif
     }
 
     void checkEmptyFreeList(AllocKind kind) {
         JS_ASSERT(freeLists[kind].isEmpty());
     }
 
-    void finalizeObjects(FreeOp *fop);
-    void finalizeStrings(FreeOp *fop);
-    void finalizeShapes(FreeOp *fop);
-    void finalizeScripts(FreeOp *fop);
+    void queueObjectsForSweep(FreeOp *fop);
+    void queueStringsForSweep(FreeOp *fop);
+    void queueShapesForSweep(FreeOp *fop);
+    void queueScriptsForSweep(FreeOp *fop);
 
+    bool foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget);
     static void backgroundFinalize(FreeOp *fop, ArenaHeader *listHead);
 
   private:
     inline void finalizeNow(FreeOp *fop, AllocKind thingKind);
-    inline void finalizeLater(FreeOp *fop, AllocKind thingKind);
+    inline void queueForForegroundSweep(FreeOp *fop, AllocKind thingKind);
+    inline void queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind);
 
     inline void *allocateFromArena(JSCompartment *comp, AllocKind thingKind);
 };
 
 /*
  * Initial allocation size for data structures holding chunks is set to hold
  * chunks with total capacity of 16MB to avoid buffer resizes during browser
  * startup.
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -413,17 +413,17 @@ NewGCThing(JSContext *cx, js::gc::AllocK
 
     MaybeCheckStackRoots(cx, /* relax = */ false);
 
     JSCompartment *comp = cx->compartment;
     void *t = comp->arenas.allocateFromFreeList(kind, thingSize);
     if (!t)
         t = js::gc::ArenaLists::refillFreeList(cx, kind);
 
-    JS_ASSERT_IF(t && comp->needsBarrier(),
+    JS_ASSERT_IF(t && comp->wasGCStarted() && comp->needsBarrier(),
                  static_cast<T *>(t)->arenaHeader()->allocatedDuringIncremental);
 
 #if defined(JSGC_GENERATIONAL) && defined(JS_GC_ZEAL)
     if (cx->runtime->gcVerifyPostData && IsNurseryAllocable(kind) && !IsAtomsCompartment(comp))
         comp->gcNursery.insertPointer(t);
 #endif
     return static_cast<T *>(t);
 }
@@ -440,17 +440,17 @@ TryNewGCThing(JSContext *cx, js::gc::All
     JS_ASSERT(!cx->runtime->noGCOrAllocationCheck);
 
 #ifdef JS_GC_ZEAL
     if (cx->runtime->needZealousGC())
         return NULL;
 #endif
 
     void *t = cx->compartment->arenas.allocateFromFreeList(kind, thingSize);
-    JS_ASSERT_IF(t && cx->compartment->needsBarrier(),
+    JS_ASSERT_IF(t && cx->compartment->wasGCStarted() && cx->compartment->needsBarrier(),
                  static_cast<T *>(t)->arenaHeader()->allocatedDuringIncremental);
 
 #if defined(JSGC_GENERATIONAL) && defined(JS_GC_ZEAL)
     JSCompartment *comp = cx->compartment;
     if (cx->runtime->gcVerifyPostData && IsNurseryAllocable(kind) && !IsAtomsCompartment(comp))
         comp->gcNursery.insertPointer(t);
 #endif
     return static_cast<T *>(t);
--- a/js/src/jspropertytree.cpp
+++ b/js/src/jspropertytree.cpp
@@ -97,85 +97,97 @@ PropertyTree::insertChild(JSContext *cx,
     child->setParent(parent);
     return true;
 }
 
 void
 Shape::removeChild(Shape *child)
 {
     JS_ASSERT(!child->inDictionary());
+    JS_ASSERT(child->parent == this);
 
     KidsPointer *kidp = &kids;
 
     if (kidp->isShape()) {
         JS_ASSERT(kidp->toShape() == child);
         kidp->setNull();
+        child->parent = NULL;
         return;
     }
 
     KidsHash *hash = kidp->toHash();
     JS_ASSERT(hash->count() >= 2);      /* otherwise kidp->isShape() should be true */
 
     hash->remove(child);
+    child->parent = NULL;
 
     if (hash->count() == 1) {
         /* Convert from HASH form back to SHAPE form. */
         KidsHash::Range r = hash->all();
         Shape *otherChild = r.front();
         JS_ASSERT((r.popFront(), r.empty()));    /* No more elements! */
         kidp->setShape(otherChild);
         js::UnwantedForeground::delete_(hash);
     }
 }
 
-/*
- * We need a read barrier for the shape tree, since these are weak pointers.
- */
-static Shape *
-ReadBarrier(Shape *shape)
-{
-#ifdef JSGC_INCREMENTAL
-    JSCompartment *comp = shape->compartment();
-    if (comp->needsBarrier()) {
-        Shape *tmp = shape;
-        MarkShapeUnbarriered(comp->barrierTracer(), &tmp, "read barrier");
-        JS_ASSERT(tmp == shape);
-    }
-#endif
-    return shape;
-}
-
 Shape *
 PropertyTree::getChild(JSContext *cx, Shape *parent_, uint32_t nfixed, const StackShape &child)
 {
-    Shape *shape;
+    Shape *shape = NULL;
 
     JS_ASSERT(parent_);
 
     /*
      * The property tree has extremely low fan-out below its root in
      * popular embeddings with real-world workloads. Patterns such as
      * defining closures that capture a constructor's environment as
      * getters or setters on the new object that is passed in as
      * |this| can significantly increase fan-out below the property
      * tree root -- see bug 335700 for details.
      */
     KidsPointer *kidp = &parent_->kids;
     if (kidp->isShape()) {
-        shape = kidp->toShape();
-        if (shape->matches(child))
-            return ReadBarrier(shape);
+        Shape *kid = kidp->toShape();
+        if (kid->matches(child))
+            shape = kid;
     } else if (kidp->isHash()) {
         shape = *kidp->toHash()->lookup(child);
-        if (shape)
-            return ReadBarrier(shape);
     } else {
         /* If kidp->isNull(), we always insert. */
     }
 
+#ifdef JSGC_INCREMENTAL
+    if (shape) {
+        JSCompartment *comp = shape->compartment();
+        if (comp->needsBarrier()) {
+            /*
+             * We need a read barrier for the shape tree, since these are weak
+             * pointers.
+             */
+            Shape *tmp = shape;
+            MarkShapeUnbarriered(comp->barrierTracer(), &tmp, "read barrier");
+            JS_ASSERT(tmp == shape);
+        } else if (comp->isGCSweeping() && !shape->isMarked() &&
+                   !shape->arenaHeader()->allocatedDuringIncremental)
+        {
+            /*
+             * The shape we've found is unreachable and due to be finalized, so
+             * remove our weak reference to it and don't use it.
+             */
+            JS_ASSERT(parent_->isMarked());
+            parent_->removeChild(shape);
+            shape = NULL;
+        }
+    }
+#endif
+
+    if (shape)
+        return shape;
+
     StackShape::AutoRooter childRoot(cx, &child);
     RootedShape parent(cx, parent_);
 
     shape = newShape(cx);
     if (!shape)
         return NULL;
 
     new (shape) Shape(child, nfixed);
@@ -185,16 +197,21 @@ PropertyTree::getChild(JSContext *cx, Sh
 
     return shape;
 }
 
 void
 Shape::finalize(FreeOp *fop)
 {
     if (!inDictionary()) {
+        /*
+         * Note that due to incremental sweeping, if !parent->isMarked() then
+         * the parent may point to a new shape allocated in the same cell that
+         * use to hold our parent.
+         */
         if (parent && parent->isMarked())
             parent->removeChild(this);
 
         if (kids.isHash())
             fop->delete_(kids.toHash());
     }
 }
 
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -1196,17 +1196,17 @@ ScriptSource::createFromSource(JSContext
 #endif
 
 #ifdef JSGC_INCREMENTAL
     /*
      * During the IGC we need to ensure that source is marked whenever it is
      * accessed even if the name was already in the table. At this point old
      * scripts pointing to the source may no longer be reachable.
      */
-    if (cx->runtime->gcIncrementalState == MARK && cx->runtime->gcIsFull)
+    if (cx->runtime->gcIncrementalState != NO_INCREMENTAL && cx->runtime->gcIsFull)
         ss->marked = true;
 #endif
 
     JS_ASSERT_IF(ownSource, !tok);
 
 #ifdef JS_THREADSAFE
     if (tok && 0) {
         tok->ss = ss;
@@ -1336,17 +1336,17 @@ ScriptSource::performXDR(XDRState<mode> 
         ss->marked = ss->onRuntime_ = ss->argumentsNotIncluded_ = false;
 #ifdef DEBUG
         ss->ready_ = false;
 #endif
         ss->data.compressed = NULL;
         cleanup.protect(ss);
 #ifdef JSGC_INCREMENTAL
         // See comment in ScriptSource::createFromSource.
-        if (xdr->cx()->runtime->gcIncrementalState == MARK &&
+        if (xdr->cx()->runtime->gcIncrementalState != NO_INCREMENTAL &&
             xdr->cx()->runtime->gcIsFull)
             ss->marked = true;
 #endif
     }
     if (!xdr->codeUint32(&ss->length_))
         return false;
     if (!xdr->codeUint32(&ss->compressedLength))
         return false;
@@ -1404,17 +1404,17 @@ js::SaveScriptFilename(JSContext *cx, co
     ScriptFilenameEntry *sfe = *p;
 #ifdef JSGC_INCREMENTAL
     /*
      * During the IGC we need to ensure that filename is marked whenever it is
      * accessed even if the name was already in the table. At this point old
      * scripts or exceptions pointing to the filename may no longer be
      * reachable.
      */
-    if (rt->gcIncrementalState == MARK && rt->gcIsFull)
+    if (rt->gcIncrementalState != NO_INCREMENTAL && rt->gcIsFull)
         sfe->marked = true;
 #endif
 
     return sfe->filename;
 }
 
 void
 js::SweepScriptFilenames(JSRuntime *rt)