Bug 1117753 - remove the PJS generational GC. r=terrence
authorLars T Hansen <lhansen@mozilla.com>
Wed, 07 Jan 2015 08:05:26 +0100
changeset 248188 d4cf5f8e67afb55c3ad76e3cde57ac16771cac5f
parent 248187 097840bded9c3fa91f6d86b6468b0d45803443a2
child 248189 13869ca774bb3d673ef1c40aec1bdb8241ba56b0
push id4489
push userraliiev@mozilla.com
push dateMon, 23 Feb 2015 15:17:55 +0000
treeherdermozilla-beta@fd7c3dc24146 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersterrence
bugs1117753
milestone37.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1117753 - remove the PJS generational GC. r=terrence
js/src/gc/ForkJoinNursery-inl.h
js/src/gc/ForkJoinNursery.cpp
js/src/gc/ForkJoinNursery.h
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/Marking.cpp
js/src/gc/Nursery.h
js/src/gc/RootMarking.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/JitFrames.cpp
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/ParallelFunctions.cpp
js/src/jscntxtinlines.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jsinfer.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/moz.build
js/src/vm/ForkJoin.cpp
js/src/vm/ForkJoin.h
js/src/vm/NativeObject.cpp
js/src/vm/NativeObject.h
js/src/vm/Runtime.h
js/src/vm/Shape.cpp
js/src/vm/Shape.h
js/src/vm/ThreadPool.cpp
js/src/vm/ThreadPool.h
deleted file mode 100644
--- a/js/src/gc/ForkJoinNursery-inl.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef gc_ForkJoinNursery_inl_h
-#define gc_ForkJoinNursery_inl_h
-
-#ifdef JSGC_FJGENERATIONAL
-
-#include "gc/ForkJoinNursery.h"
-
-#include "jsgc.h"
-
-namespace js {
-namespace gc {
-
-// For the following two predicates we can't check the attributes on
-// the chunk trailer because it's not known whether addr points into a
-// chunk.
-//
-// A couple of optimizations are possible if performance is an issue:
-//
-//  - The loop can be unrolled, and we can arrange for all array entries
-//    to be valid for this purpose so that the bound is constant.
-//  - The per-chunk test can be reduced to testing whether the high bits
-//    of the object pointer and the high bits of the chunk pointer are
-//    the same (and the latter value is essentially space[i]).
-//    Note, experiments with that do not show an improvement yet.
-//  - Taken together, those optimizations yield code that is one LOAD,
-//    one XOR, and one AND for each chunk, with the result being true
-//    iff the resulting value is zero.
-//  - We can have multiple versions of the predicates, and those that
-//    take known-good GCThing types can go directly to the attributes;
-//    it may be possible to ensure that more calls use GCThing types.
-//    Note, this requires the worker ID to be part of the chunk
-//    attribute bit vector.
-//
-// Performance may not be an issue as there may be few survivors of a
-// collection in the ForkJoinNursery and few objects will be tested.
-// If so then the bulk of the calls may come from the code that scans
-// the roots.  Behavior will be workload-dependent however.
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::isInsideNewspace(const void *addr)
-{
-    uintptr_t p = reinterpret_cast<uintptr_t>(addr);
-    for (unsigned i = 0 ; i <= currentChunk_ ; i++) {
-        if (p >= newspace[i]->start() && p < newspace[i]->end())
-            return true;
-    }
-    return false;
-}
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::isInsideFromspace(const void *addr)
-{
-    uintptr_t p = reinterpret_cast<uintptr_t>(addr);
-    for (unsigned i = 0 ; i < numFromspaceChunks_ ; i++) {
-        if (p >= fromspace[i]->start() && p < fromspace[i]->end())
-            return true;
-    }
-    return false;
-}
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::isForwarded(Cell *cell)
-{
-    MOZ_ASSERT(isInsideFromspace(cell));
-    const RelocationOverlay *overlay = RelocationOverlay::fromCell(cell);
-    return overlay->isForwarded();
-}
-
-template <typename T>
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::getForwardedPointer(T **ref)
-{
-    MOZ_ASSERT(ref);
-    MOZ_ASSERT(isInsideFromspace(*ref));
-    const RelocationOverlay *overlay = RelocationOverlay::fromCell(*ref);
-    if (!overlay->isForwarded())
-        return false;
-    // This static_cast from Cell* restricts T to valid (GC thing) types.
-    *ref = static_cast<T *>(overlay->forwardingAddress());
-    return true;
-}
-
-} // namespace gc
-} // namespace js
-
-#endif // JSGC_FJGENERATIONAL
-
-#endif // gc_ForkJoinNursery_inl_h
deleted file mode 100644
--- a/js/src/gc/ForkJoinNursery.cpp
+++ /dev/null
@@ -1,932 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifdef JSGC_FJGENERATIONAL
-
-#include "gc/ForkJoinNursery-inl.h"
-
-#include "mozilla/IntegerPrintfMacros.h"
-
-#include "prmjtime.h"
-
-#include "gc/Heap.h"
-#include "jit/JitFrames.h"
-#include "jit/RematerializedFrame.h"
-#include "vm/ArrayObject.h"
-#include "vm/ForkJoin.h"
-
-#include "jsgcinlines.h"
-#include "gc/Nursery-inl.h"
-#include "vm/NativeObject-inl.h"
-
-// The ForkJoinNursery provides an object nursery for movable object
-// types for one ForkJoin worker thread.  There is a one-to-one
-// correspondence between ForkJoinNursery and ForkJoinContext.
-//
-// For a general overview of how the ForkJoinNursery fits into the
-// overall PJS system, see the comment block in vm/ForkJoin.h.
-//
-//
-// Invariants on the ForkJoinNursery:
-//
-// Let "the tenured area" from the point of view of one
-// ForkJoinNursery comprise the global tenured area and the nursery's
-// owning worker's private tenured area.  Then:
-//
-// - There can be pointers from the tenured area into a ForkJoinNursery,
-//   and from the ForkJoinNursery into the tenured area
-//
-// - There *cannot* be a pointer from one ForkJoinNursery into
-//   another, or from one private tenured area into another, or from a
-//   ForkJoinNursery into another worker's private tenured are or vice
-//   versa, or from any ForkJoinNursery or private tenured area into
-//   the normal Nursery.
-//
-// For those invariants to hold the normal Nursery must be empty before
-// a ForkJoin section.
-//
-//
-// General description:
-//
-// The nursery maintains a space into which small, movable objects
-// are allocated.  Other objects are allocated directly in the private
-// tenured area for the worker.
-//
-// If an allocation request can't be satisfied because the nursery is
-// full then a /minor collection/ is triggered without bailouts.  This
-// collection copies nursery-allocated objects reachable from the
-// worker's roots into a fresh space.  Then the old space is
-// discarded.
-//
-// Nurseries are maintained in 1MB chunks.  If the live data in a
-// nursery after a collection exceeds some set fraction (currently
-// 1/3) then the nursery is grown, independently of other nurseries.
-//
-// There is an upper limit on the number of chunks in a nursery.  If
-// the live data in a nursery after a collection exceeds the set
-// fraction and the nursery can't grow, then the next collection will
-// be an /evacuating collection/.
-//
-// An evacuating collection copies nursery-allocated objects reachable
-// from the worker's roots into the worker's private tenured area.
-//
-// If an allocation request in the tenured area - whether the request
-// comes from the mutator or from the garbage collector during
-// evacuation - can't be satisified because the tenured area is full,
-// then the worker bails out and triggers a full collection in the
-// ForkJoin worker's zone.  This is expected to happen very rarely in
-// practice.
-//
-// The roots for a collection in the ForkJoinNursery are: the frames
-// of the execution stack, any registered roots on the execution
-// stack, any objects in the private tenured area, and the ForkJoin
-// result object in the common tenured area.
-//
-// The entire private tenured area is considered to be rooted in order
-// not to have to run write barriers during the ForkJoin section.
-// During a minor or evacuating collection in a worker the GC will
-// step through the worker's tenured area, examining each object for
-// pointers into the nursery.
-//
-// The ForkJoinNursery contains its own object tracing machinery for
-// most of the types that can be allocated in the nursery.  But it
-// does not handle all types, and there are two places where the code
-// in ForkJoinNursery loses control of the tracing:
-//
-// - When calling clasp->trace() in traceObject()
-// - When calling MarkForkJoinStack() in forwardFromStack()
-//
-// In both cases:
-//
-// - We pass a ForkJoinNurseryCollectionTracer object with a callback
-//   to ForkJoinNursery::MinorGCCallback
-//
-// - We should only ever end up in MarkInternal() in Marking.cpp, in
-//   the case in that code that calls back to trc->callback.  We
-//   should /never/ end up in functions that trigger use of the mark
-//   stack internal to the general GC's marker.
-//
-// - Any function along the path to MarkInternal() that asks about
-//   whether something is in the nursery or is tenured /must/ be aware
-//   that there can be multiple nursery and tenured areas; assertions
-//   get this wrong a lot of the time and must be fixed when they do.
-//   In practice, such code either must have a case for each nursery
-//   kind or must use the IsInsideNursery(Cell*) method, which looks
-//   only at the chunk tag.
-//
-//
-// Terminological note:
-//
-// - While the mutator is running it is allocating in what's known as
-//   the nursery's "newspace".  The mutator may also allocate directly
-//   in the tenured space, but the tenured space is not part of the
-//   newspace.
-//
-// - While the gc is running, the previous "newspace" has been renamed
-//   as the gc's "fromspace", and the space that objects are copied
-//   into is known as the "tospace".  The tospace may be a nursery
-//   space (during a minor collection), or it may be a tenured space
-//   (during an evacuation collection), but it's always one or the
-//   other, never a combination.  After gc the fromspace is always
-//   discarded.
-//
-// - If the gc copies objects into a nursery tospace then this tospace
-//   becomes known as the "newspace" following gc.  Otherwise, a new
-//   newspace won't be needed (if the parallel section is finished) or
-//   can be created empty (if the gc just needed to evacuate).
-//
-
-namespace js {
-namespace gc {
-
-ForkJoinNursery::ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured)
-  : cx_(cx)
-  , tenured_(tenured)
-  , shared_(shared)
-  , evacuationZone_(nullptr)
-  , currentStart_(0)
-  , currentEnd_(0)
-  , position_(0)
-  , currentChunk_(0)
-  , numActiveChunks_(0)
-  , numFromspaceChunks_(0)
-  , mustEvacuate_(false)
-  , isEvacuating_(false)
-  , movedSize_(0)
-  , head_(nullptr)
-  , tail_(&head_)
-  , hugeSlotsNew(0)
-  , hugeSlotsFrom(1)
-{
-    for ( size_t i=0 ; i < MaxNurseryChunks ; i++ ) {
-        newspace[i] = nullptr;
-        fromspace[i] = nullptr;
-    }
-}
-
-ForkJoinNursery::~ForkJoinNursery()
-{
-    for ( size_t i=0 ; i < numActiveChunks_ ; i++ ) {
-        if (newspace[i])
-            shared_->freeNurseryChunk(newspace[i]);
-    }
-}
-
-bool
-ForkJoinNursery::initialize()
-{
-    if (!hugeSlots[hugeSlotsNew].init() || !hugeSlots[hugeSlotsFrom].init())
-        return false;
-    if (!initNewspace())
-        return false;
-    return true;
-}
-
-void
-ForkJoinNursery::minorGC()
-{
-    if (mustEvacuate_) {
-        mustEvacuate_ = false;
-        pjsCollection(Evacuate|Recreate);
-    } else {
-        pjsCollection(Collect|Recreate);
-    }
-}
-
-void
-ForkJoinNursery::evacuatingGC()
-{
-    pjsCollection(Evacuate);
-}
-
-#define TIME_START(name) int64_t timstampStart_##name = PRMJ_Now()
-#define TIME_END(name) int64_t timstampEnd_##name = PRMJ_Now()
-#define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name)
-
-void
-ForkJoinNursery::pjsCollection(int op)
-{
-    MOZ_ASSERT((op & Collect) != (op & Evacuate));
-
-    bool evacuate = op & Evacuate;
-    bool recreate = op & Recreate;
-
-    MOZ_ASSERT(!isEvacuating_);
-    MOZ_ASSERT(!evacuationZone_);
-    MOZ_ASSERT(!head_);
-    MOZ_ASSERT(tail_ == &head_);
-
-    JSRuntime *const rt = shared_->runtime();
-    const unsigned currentNumActiveChunks_ = numActiveChunks_;
-    const char *msg = "";
-
-    MOZ_ASSERT(!rt->needsIncrementalBarrier());
-
-    TIME_START(pjsCollection);
-
-    rt->gc.incFJMinorCollecting();
-    if (evacuate) {
-        isEvacuating_ = true;
-        evacuationZone_ = shared_->zone();
-    }
-
-    flip();
-    if (recreate) {
-        if (!initNewspace())
-            CrashAtUnhandlableOOM("Cannot expand PJS nursery during GC");
-        // newspace must be at least as large as fromSpace
-        numActiveChunks_ = currentNumActiveChunks_;
-    }
-    ForkJoinNurseryCollectionTracer trc(rt, this);
-    forwardFromRoots(&trc);
-    collectToFixedPoint(&trc);
-    jit::UpdateJitActivationsForMinorGC<ForkJoinNursery>(TlsPerThreadData.get(), &trc);
-    freeFromspace();
-
-    size_t live = movedSize_;
-    computeNurserySizeAfterGC(live, &msg);
-
-    sweepHugeSlots();
-    MOZ_ASSERT(hugeSlots[hugeSlotsFrom].empty());
-    MOZ_ASSERT_IF(isEvacuating_, hugeSlots[hugeSlotsNew].empty());
-
-    isEvacuating_ = false;
-    evacuationZone_ = nullptr;
-    head_ = nullptr;
-    tail_ = &head_;
-    movedSize_ = 0;
-
-    rt->gc.decFJMinorCollecting();
-
-    TIME_END(pjsCollection);
-
-    // Note, the spew is awk-friendly, non-underlined words serve as markers:
-    //   FJGC _tag_ us _value_ copied _value_ size _value_ _message-word_ ...
-    shared_->spewGC("FJGC %s us %5" PRId64 "  copied %7" PRIu64 "  size %" PRIu64 "  %s",
-                    (evacuate ? "evacuate " : "collect  "),
-                    TIME_TOTAL(pjsCollection),
-                    (uint64_t)live,
-                    (uint64_t)numActiveChunks_*1024*1024,
-                    msg);
-}
-
-#undef TIME_START
-#undef TIME_END
-#undef TIME_TOTAL
-
-void
-ForkJoinNursery::computeNurserySizeAfterGC(size_t live, const char **msg)
-{
-    // Grow the nursery if it is too full.  Do not bother to shrink it - lazy
-    // chunk allocation means that a too-large nursery will not really be a problem,
-    // the entire nursery will be deallocated soon anyway.
-    if (live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize) {
-        if (numActiveChunks_ < MaxNurseryChunks) {
-            while (numActiveChunks_ < MaxNurseryChunks &&
-                   live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize)
-            {
-                ++numActiveChunks_;
-            }
-        } else {
-            // Evacuation will tend to drive us toward the cliff of a bailout GC, which
-            // is not good, probably worse than working within the thread at a higher load
-            // than desirable.
-            //
-            // Thus it's possible to be more sophisticated than this:
-            //
-            // - evacuate only after several minor GCs in a row exceeded the set load
-            // - evacuate only if significantly less space than required is available, eg,
-            //   if only 1/2 the required free space is available
-            *msg = "  Overfull, will evacuate next";
-            mustEvacuate_ = true;
-        }
-    }
-}
-
-void
-ForkJoinNursery::flip()
-{
-    size_t i;
-    for (i=0; i < numActiveChunks_; i++) {
-        if (!newspace[i])
-            break;
-        fromspace[i] = newspace[i];
-        newspace[i] = nullptr;
-        fromspace[i]->trailer.location = gc::ChunkLocationBitPJSFromspace;
-    }
-    numFromspaceChunks_ = i;
-    numActiveChunks_ = 0;
-
-    int tmp = hugeSlotsNew;
-    hugeSlotsNew = hugeSlotsFrom;
-    hugeSlotsFrom = tmp;
-
-    MOZ_ASSERT(hugeSlots[hugeSlotsNew].empty());
-}
-
-void
-ForkJoinNursery::freeFromspace()
-{
-    for (size_t i=0; i < numFromspaceChunks_; i++) {
-        shared_->freeNurseryChunk(fromspace[i]);
-        fromspace[i] = nullptr;
-    }
-    numFromspaceChunks_ = 0;
-}
-
-bool
-ForkJoinNursery::initNewspace()
-{
-    MOZ_ASSERT(newspace[0] == nullptr);
-    MOZ_ASSERT(numActiveChunks_ == 0);
-
-    numActiveChunks_ = 1;
-    return setCurrentChunk(0);
-}
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::shouldMoveObject(void **thingp)
-{
-    // Note that thingp must really be a T** where T is some GCThing,
-    // ie, something that lives in a chunk (or nullptr).  This should
-    // be the case because the MinorGCCallback is only called on exact
-    // roots on the stack or slots within in tenured objects and not
-    // on slot/element arrays that can be malloc'd; they are forwarded
-    // using the forwardBufferPointer() mechanism.
-    //
-    // The main reason for that restriction is so that we can call a
-    // method here that can check the chunk trailer for the cell (a
-    // future optimization).
-    Cell **cellp = reinterpret_cast<Cell **>(thingp);
-    return isInsideFromspace(*cellp) && !getForwardedPointer(cellp);
-}
-
-/* static */ void
-ForkJoinNursery::MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind traceKind)
-{
-    // traceKind can be all sorts of things, when we're marking from stack roots
-    ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trcArg)->nursery_;
-    if (nursery->shouldMoveObject(thingp)) {
-        // When other types of objects become nursery-allocable then the static_cast
-        // to JSObject * will no longer be valid.
-        MOZ_ASSERT(traceKind == JSTRACE_OBJECT);
-        *thingp = nursery->moveObjectToTospace(static_cast<JSObject *>(*thingp));
-    }
-}
-
-void
-ForkJoinNursery::forwardFromRoots(ForkJoinNurseryCollectionTracer *trc)
-{
-    // There should be no other roots as a result of effect-freedom.
-    forwardFromUpdatable(trc);
-    forwardFromStack(trc);
-    forwardFromTenured(trc);
-    forwardFromRematerializedFrames(trc);
-}
-
-void
-ForkJoinNursery::forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc)
-{
-    JSObject *obj = shared_->updatable();
-    if (obj)
-        traceObject(trc, obj);
-}
-
-void
-ForkJoinNursery::forwardFromStack(ForkJoinNurseryCollectionTracer *trc)
-{
-    MarkForkJoinStack(trc);
-}
-
-void
-ForkJoinNursery::forwardFromTenured(ForkJoinNurseryCollectionTracer *trc)
-{
-    JSObject *objs[ArenaCellCount];
-    ArenaLists &lists = tenured_->arenas;
-    for (size_t k=0; k < FINALIZE_LIMIT; k++) {
-        AllocKind kind = (AllocKind)k;
-        if (!IsFJNurseryAllocable(kind))
-            continue;
-
-        // When non-JSObject types become nursery-allocable the assumptions in the
-        // loops below will no longer hold; other types than JSObject must be
-        // handled.
-        MOZ_ASSERT(kind <= FINALIZE_OBJECT_LAST);
-
-        // Clear the free list that we're currently allocating out of.
-        lists.purge(kind);
-
-        // Since we only purge once, there must not currently be any partially
-        // full arenas left to allocate out of, or we would break out early.
-        MOZ_ASSERT(!lists.getArenaAfterCursor(kind));
-
-        ArenaIter ai;
-        ai.init(const_cast<Allocator *>(tenured_), kind);
-        for (; !ai.done(); ai.next()) {
-            if (isEvacuating_ && lists.arenaIsInUse(ai.get(), kind))
-                break;
-            // Use ArenaCellIterUnderFinalize, not ...UnderGC, because that side-steps
-            // some assertions in the latter that are wrong for PJS collection.
-            size_t numObjs = 0;
-            for (ArenaCellIterUnderFinalize i(ai.get()); !i.done(); i.next())
-                objs[numObjs++] = i.get<JSObject>();
-            for (size_t i=0; i < numObjs; i++)
-                traceObject(trc, objs[i]);
-        }
-    }
-}
-
-void
-ForkJoinNursery::forwardFromRematerializedFrames(ForkJoinNurseryCollectionTracer *trc)
-{
-    if (cx_->bailoutRecord->hasFrames())
-        jit::RematerializedFrame::MarkInVector(trc, cx_->bailoutRecord->frames());
-}
-
-/*static*/ void
-ForkJoinNursery::forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems)
-{
-    ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trc)->nursery_;
-    HeapSlot *old = *pSlotsElems;
-
-    if (!nursery->isInsideFromspace(old))
-        return;
-
-    // If the elements buffer is zero length, the "first" item could be inside
-    // of the next object or past the end of the allocable area.  However,
-    // since we always store the runtime as the last word in a nursery chunk,
-    // isInsideFromspace will still be true, even if this zero-size allocation
-    // abuts the end of the allocable area. Thus, it is always safe to read the
-    // first word of |old| here.
-    *pSlotsElems = *reinterpret_cast<HeapSlot **>(old);
-    MOZ_ASSERT(!nursery->isInsideFromspace(*pSlotsElems));
-}
-
-void
-ForkJoinNursery::collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc)
-{
-    for (RelocationOverlay *p = head_; p; p = p->next())
-        traceObject(trc, static_cast<JSObject *>(p->forwardingAddress()));
-}
-
-inline bool
-ForkJoinNursery::setCurrentChunk(int index)
-{
-    MOZ_ASSERT((size_t)index < numActiveChunks_);
-    MOZ_ASSERT(!newspace[index]);
-
-    currentChunk_ = index;
-    ForkJoinNurseryChunk *c = shared_->allocateNurseryChunk();
-    if (!c)
-        return false;
-    c->trailer.runtime = shared_->runtime();
-    c->trailer.location = gc::ChunkLocationBitPJSNewspace;
-    c->trailer.storeBuffer = nullptr;
-    currentStart_ = c->start();
-    currentEnd_ = c->end();
-    position_ = currentStart_;
-    newspace[index] = c;
-    return true;
-}
-
-void *
-ForkJoinNursery::allocate(size_t size)
-{
-    MOZ_ASSERT(position_ >= currentStart_);
-
-    if (currentEnd_ - position_ < size) {
-        if (currentChunk_ + 1 == numActiveChunks_)
-            return nullptr;
-        // Failure to allocate on growth is treated the same as exhaustion
-        // of the nursery.  If this happens during normal execution then
-        // we'll trigger a minor collection.  That collection will likely
-        // fail to obtain a block for the new tospace, and we'll go OOM
-        // immediately; that's expected and acceptable.  If we do continue
-        // to run (because some other thread or process has freed the memory)
-        // then so much the better.
-        if (!setCurrentChunk(currentChunk_ + 1))
-            return nullptr;
-    }
-
-    void *thing = reinterpret_cast<void *>(position_);
-    position_ += size;
-
-    JS_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
-    return thing;
-}
-
-JSObject *
-ForkJoinNursery::allocateObject(size_t baseSize, size_t numDynamic, bool& tooLarge)
-{
-    // Ensure there's enough space to replace the contents with a RelocationOverlay.
-    MOZ_ASSERT(baseSize >= sizeof(js::gc::RelocationOverlay));
-
-    // Too-large slot arrays cannot be accomodated.
-    if (numDynamic > MaxNurserySlots) {
-        tooLarge = true;
-        return nullptr;
-    }
-
-    // Allocate slots contiguously after the object.
-    size_t totalSize = baseSize + sizeof(HeapSlot) * numDynamic;
-    JSObject *obj = static_cast<JSObject *>(allocate(totalSize));
-    if (!obj) {
-        tooLarge = false;
-        return nullptr;
-    }
-    obj->setInitialSlotsMaybeNonNative(numDynamic
-                                       ? reinterpret_cast<HeapSlot *>(size_t(obj) + baseSize)
-                                       : nullptr);
-    return obj;
-}
-
-HeapSlot *
-ForkJoinNursery::allocateSlots(JSObject *obj, uint32_t nslots)
-{
-    MOZ_ASSERT(obj);
-    MOZ_ASSERT(nslots > 0);
-
-    if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
-        return nullptr;
-
-    if (!isInsideNewspace(obj))
-        return obj->zone()->pod_malloc<HeapSlot>(nslots);
-
-    if (nslots > MaxNurserySlots)
-        return allocateHugeSlots(obj, nslots);
-
-    size_t size = nslots * sizeof(HeapSlot);
-    HeapSlot *slots = static_cast<HeapSlot *>(allocate(size));
-    if (slots)
-        return slots;
-
-    return allocateHugeSlots(obj, nslots);
-}
-
-HeapSlot *
-ForkJoinNursery::reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
-                                 uint32_t oldCount, uint32_t newCount)
-{
-    if (newCount & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
-        return nullptr;
-
-    if (!isInsideNewspace(obj)) {
-        MOZ_ASSERT_IF(oldSlots, !isInsideNewspace(oldSlots));
-        return obj->zone()->pod_realloc<HeapSlot>(oldSlots, oldCount, newCount);
-    }
-
-    if (!isInsideNewspace(oldSlots))
-        return reallocateHugeSlots(obj, oldSlots, oldCount, newCount);
-
-    // No-op if we're shrinking, we can't make use of the freed portion.
-    if (newCount < oldCount)
-        return oldSlots;
-
-    HeapSlot *newSlots = allocateSlots(obj, newCount);
-    if (!newSlots)
-        return nullptr;
-
-    size_t oldSize = oldCount * sizeof(HeapSlot);
-    js_memcpy(newSlots, oldSlots, oldSize);
-    return newSlots;
-}
-
-ObjectElements *
-ForkJoinNursery::allocateElements(JSObject *obj, uint32_t nelems)
-{
-    MOZ_ASSERT(nelems >= ObjectElements::VALUES_PER_HEADER);
-    return reinterpret_cast<ObjectElements *>(allocateSlots(obj, nelems));
-}
-
-ObjectElements *
-ForkJoinNursery::reallocateElements(JSObject *obj, ObjectElements *oldHeader,
-                                    uint32_t oldCount, uint32_t newCount)
-{
-    HeapSlot *slots = reallocateSlots(obj, reinterpret_cast<HeapSlot *>(oldHeader),
-                                      oldCount, newCount);
-    return reinterpret_cast<ObjectElements *>(slots);
-}
-
-void
-ForkJoinNursery::freeSlots(HeapSlot *slots)
-{
-    if (!isInsideNewspace(slots)) {
-        hugeSlots[hugeSlotsNew].remove(slots);
-        js_free(slots);
-    }
-}
-
-HeapSlot *
-ForkJoinNursery::allocateHugeSlots(JSObject *obj, size_t nslots)
-{
-    if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
-        return nullptr;
-
-    HeapSlot *slots = obj->zone()->pod_malloc<HeapSlot>(nslots);
-    if (!slots)
-        return slots;
-
-    // If this put fails, we will only leak the slots.
-    (void)hugeSlots[hugeSlotsNew].put(slots);
-    return slots;
-}
-
-HeapSlot *
-ForkJoinNursery::reallocateHugeSlots(JSObject *obj, HeapSlot *oldSlots,
-                                     uint32_t oldCount, uint32_t newCount)
-{
-    HeapSlot *newSlots = obj->zone()->pod_realloc<HeapSlot>(oldSlots, oldCount, newCount);
-    if (!newSlots)
-        return newSlots;
-
-    if (oldSlots != newSlots) {
-        hugeSlots[hugeSlotsNew].remove(oldSlots);
-        // If this put fails, we will only leak the slots.
-        (void)hugeSlots[hugeSlotsNew].put(newSlots);
-    }
-    return newSlots;
-}
-
-void
-ForkJoinNursery::sweepHugeSlots()
-{
-    for (HugeSlotsSet::Range r = hugeSlots[hugeSlotsFrom].all(); !r.empty(); r.popFront())
-        js_free(r.front());
-    hugeSlots[hugeSlotsFrom].clear();
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj)
-{
-    const Class *clasp = obj->getClass();
-    if (clasp->trace)
-        clasp->trace(trc, obj);
-
-    if (!obj->isNative())
-        return;
-    NativeObject *nobj = &obj->as<NativeObject>();
-
-    if (!nobj->hasEmptyElements())
-        markSlots(nobj->getDenseElements(), nobj->getDenseInitializedLength());
-
-    HeapSlot *fixedStart, *fixedEnd, *dynStart, *dynEnd;
-    nobj->getSlotRange(0, nobj->slotSpan(), &fixedStart, &fixedEnd, &dynStart, &dynEnd);
-    markSlots(fixedStart, fixedEnd);
-    markSlots(dynStart, dynEnd);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::markSlots(HeapSlot *vp, uint32_t nslots)
-{
-    markSlots(vp, vp + nslots);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::markSlots(HeapSlot *vp, HeapSlot *end)
-{
-    for (; vp != end; ++vp)
-        markSlot(vp);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::markSlot(HeapSlot *slotp)
-{
-    if (!slotp->isObject())
-        return;
-
-    JSObject *obj = &slotp->toObject();
-    if (!isInsideFromspace(obj))
-        return;
-
-    if (getForwardedPointer(&obj)) {
-        slotp->unsafeGet()->setObject(*obj);
-        return;
-    }
-
-    JSObject *moved = static_cast<JSObject *>(moveObjectToTospace(obj));
-    slotp->unsafeGet()->setObject(*moved);
-}
-
-AllocKind
-ForkJoinNursery::getObjectAllocKind(JSObject *obj)
-{
-    if (obj->is<ArrayObject>()) {
-        ArrayObject *aobj = &obj->as<ArrayObject>();
-        MOZ_ASSERT(aobj->numFixedSlots() == 0);
-
-        // Use minimal size object if we are just going to copy the pointer.
-        if (!isInsideFromspace((void *)aobj->getElementsHeader()))
-            return FINALIZE_OBJECT0_BACKGROUND;
-
-        size_t nelements = aobj->getDenseCapacity();
-        return GetBackgroundAllocKind(GetGCArrayKind(nelements));
-    }
-
-    if (obj->is<JSFunction>())
-        return obj->as<JSFunction>().getAllocKind();
-
-    // Don't handle other objects with special allocation requirements.
-    MOZ_ASSERT(!obj->is<TypedArrayObject>());
-    MOZ_ASSERT(obj->isNative());
-
-    AllocKind kind = GetGCObjectFixedSlotsKind(obj->as<NativeObject>().numFixedSlots());
-    MOZ_ASSERT(!IsBackgroundFinalized(kind));
-    MOZ_ASSERT(CanBeFinalizedInBackground(kind, obj->getClass()));
-    return GetBackgroundAllocKind(kind);
-}
-
-// Nursery allocation will never fail during GC - apart from true OOM - since
-// newspace is at least as large as fromspace, ergo a nullptr return from the
-// allocator means true OOM, which we catch and signal here.
-void *
-ForkJoinNursery::allocateInTospaceInfallible(size_t thingSize)
-{
-    void *p = allocate(thingSize);
-    if (!p)
-        CrashAtUnhandlableOOM("Cannot expand PJS nursery during GC");
-    return p;
-}
-
-void *
-ForkJoinNursery::allocateInTospace(gc::AllocKind thingKind)
-{
-    size_t thingSize = Arena::thingSize(thingKind);
-    if (isEvacuating_) {
-        void *t = tenured_->arenas.allocateFromFreeList(thingKind, thingSize);
-        if (t)
-            return t;
-        tenured_->arenas.checkEmptyFreeList(thingKind);
-        // This call may return NULL but should do so only if memory
-        // is truly exhausted.  However, allocateFromArena() can fail
-        // either because memory is exhausted or if the allocation
-        // budget is used up.  There is a guard in
-        // Chunk::allocateArena() against the latter case.
-        return tenured_->arenas.allocateFromArena(evacuationZone_, thingKind);
-    }
-    return allocateInTospaceInfallible(thingSize);
-}
-
-template <typename T>
-T *
-ForkJoinNursery::allocateInTospace(size_t nelem)
-{
-    if (isEvacuating_)
-        return evacuationZone_->pod_malloc<T>(nelem);
-    return static_cast<T *>(allocateInTospaceInfallible(nelem * sizeof(T)));
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::insertIntoFixupList(RelocationOverlay *entry)
-{
-    *tail_ = entry;
-    tail_ = &entry->next_;
-    *tail_ = nullptr;
-}
-
-void *
-ForkJoinNursery::moveObjectToTospace(JSObject *src)
-{
-    AllocKind dstKind = getObjectAllocKind(src);
-    JSObject *dst = static_cast<JSObject *>(allocateInTospace(dstKind));
-    if (!dst)
-        CrashAtUnhandlableOOM("Failed to allocate object while moving object.");
-
-    movedSize_ += copyObjectToTospace(dst, src, dstKind);
-
-    RelocationOverlay *overlay = RelocationOverlay::fromCell(src);
-    overlay->forwardTo(dst);
-    insertIntoFixupList(overlay);
-
-    return static_cast<void *>(dst);
-}
-
-size_t
-ForkJoinNursery::copyObjectToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
-{
-    size_t srcSize = Arena::thingSize(dstKind);
-    size_t movedSize = srcSize;
-
-    // Arrays do not necessarily have the same AllocKind between src and dst.
-    // We deal with this by copying elements manually, possibly re-inlining
-    // them if there is adequate room inline in dst.
-    if (src->is<ArrayObject>())
-        srcSize = movedSize = sizeof(NativeObject);
-
-    js_memcpy(dst, src, srcSize);
-    if (src->isNative()) {
-        NativeObject *ndst = &dst->as<NativeObject>(), *nsrc = &src->as<NativeObject>();
-        movedSize += copySlotsToTospace(ndst, nsrc, dstKind);
-        movedSize += copyElementsToTospace(ndst, nsrc, dstKind);
-    }
-
-    // The shape's list head may point into the old object.
-    if (&src->shape_ == dst->shape_->listp) {
-        MOZ_ASSERT(cx_->isThreadLocal(dst->shape_.get()));
-        dst->shape_->listp = &dst->shape_;
-    }
-
-    return movedSize;
-}
-
-size_t
-ForkJoinNursery::copySlotsToTospace(NativeObject *dst, NativeObject *src, AllocKind dstKind)
-{
-    // Fixed slots have already been copied over.
-    if (!src->hasDynamicSlots())
-        return 0;
-
-    if (!isInsideFromspace(src->slots_)) {
-        hugeSlots[hugeSlotsFrom].remove(src->slots_);
-        if (!isEvacuating_)
-            hugeSlots[hugeSlotsNew].put(src->slots_);
-        return 0;
-    }
-
-    size_t count = src->numDynamicSlots();
-    dst->slots_ = allocateInTospace<HeapSlot>(count);
-    if (!dst->slots_)
-        CrashAtUnhandlableOOM("Failed to allocate slots while moving object.");
-    js_memcpy(dst->slots_, src->slots_, count * sizeof(HeapSlot));
-    setSlotsForwardingPointer(src->slots_, dst->slots_, count);
-    return count * sizeof(HeapSlot);
-}
-
-size_t
-ForkJoinNursery::copyElementsToTospace(NativeObject *dst, NativeObject *src, AllocKind dstKind)
-{
-    if (src->hasEmptyElements() || src->denseElementsAreCopyOnWrite())
-        return 0;
-
-    ObjectElements *srcHeader = src->getElementsHeader();
-    ObjectElements *dstHeader;
-
-    // TODO Bug 874151: Prefer to put element data inline if we have space.
-    // (Note, not a correctness issue.)
-    if (!isInsideFromspace(srcHeader)) {
-        MOZ_ASSERT(src->elements_ == dst->elements_);
-        hugeSlots[hugeSlotsFrom].remove(reinterpret_cast<HeapSlot*>(srcHeader));
-        if (!isEvacuating_)
-            hugeSlots[hugeSlotsNew].put(reinterpret_cast<HeapSlot*>(srcHeader));
-        return 0;
-    }
-
-    size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity;
-
-    // Unlike other objects, Arrays can have fixed elements.
-    if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
-        dst->as<ArrayObject>().setFixedElements();
-        dstHeader = dst->as<ArrayObject>().getElementsHeader();
-        js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
-        setElementsForwardingPointer(srcHeader, dstHeader, nslots);
-        return nslots * sizeof(HeapSlot);
-    }
-
-    MOZ_ASSERT(nslots >= 2);
-    dstHeader = reinterpret_cast<ObjectElements *>(allocateInTospace<HeapSlot>(nslots));
-    if (!dstHeader)
-        CrashAtUnhandlableOOM("Failed to allocate elements while moving object.");
-    js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
-    setElementsForwardingPointer(srcHeader, dstHeader, nslots);
-    dst->elements_ = dstHeader->elements();
-    return nslots * sizeof(HeapSlot);
-}
-
-void
-ForkJoinNursery::setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots)
-{
-    MOZ_ASSERT(nslots > 0);
-    MOZ_ASSERT(isInsideFromspace(oldSlots));
-    MOZ_ASSERT(!isInsideFromspace(newSlots));
-    *reinterpret_cast<HeapSlot **>(oldSlots) = newSlots;
-}
-
-void
-ForkJoinNursery::setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
-                                             uint32_t nelems)
-{
-    // If the JIT has hoisted a zero length pointer, then we do not need to
-    // relocate it because reads and writes to/from this pointer are invalid.
-    if (nelems - ObjectElements::VALUES_PER_HEADER < 1)
-        return;
-    MOZ_ASSERT(isInsideFromspace(oldHeader));
-    MOZ_ASSERT(!isInsideFromspace(newHeader));
-    *reinterpret_cast<HeapSlot **>(oldHeader->elements()) = newHeader->elements();
-}
-
-ForkJoinNurseryCollectionTracer::ForkJoinNurseryCollectionTracer(JSRuntime *rt,
-                                                                 ForkJoinNursery *nursery)
-  : JSTracer(rt, ForkJoinNursery::MinorGCCallback, TraceWeakMapKeysValues)
-  , nursery_(nursery)
-{
-    MOZ_ASSERT(rt);
-    MOZ_ASSERT(nursery);
-}
-
-} // namespace gc
-} // namespace js
-
-#endif /* JSGC_FJGENERATIONAL */
deleted file mode 100644
--- a/js/src/gc/ForkJoinNursery.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef gc_ForkJoinNursery_h
-#define gc_ForkJoinNursery_h
-
-#ifdef JSGC_FJGENERATIONAL
-
-#include "jsalloc.h"
-#include "jspubtd.h"
-
-#include "gc/Heap.h"
-#include "gc/Memory.h"
-#include "gc/Nursery.h"
-
-#include "js/HashTable.h"
-#include "js/TracingAPI.h"
-
-namespace js {
-class ObjectElements;
-class HeapSlot;
-class ForkJoinShared;
-}
-
-namespace js {
-namespace gc {
-
-class ForkJoinGCShared;
-class ForkJoinNursery;
-class ForkJoinNurseryCollectionTracer;
-class RelocationOverlay;
-
-// This tracer comes into play when a class has a tracer function, but
-// is otherwise unused and has no other functionality.
-//
-// It could look like this could be merged into ForkJoinNursery by
-// making the latter derive from JSTracer; I've decided to keep them
-// separate for now, since it allows for multiple instantiations of
-// this class with different parameters, for different purposes.  That
-// may change.
-
-class ForkJoinNurseryCollectionTracer : public JSTracer
-{
-    friend class ForkJoinNursery;
-
-  public:
-    ForkJoinNurseryCollectionTracer(JSRuntime *rt, ForkJoinNursery *nursery);
-
-  private:
-    ForkJoinNursery *const nursery_;
-};
-
-// The layout for a chunk used by the ForkJoinNursery.
-
-struct ForkJoinNurseryChunk
-{
-    // The amount of space in the mapped nursery available to allocations
-    static const size_t UsableSize = ChunkSize - sizeof(ChunkTrailer);
-
-    char data[UsableSize];
-    ChunkTrailer trailer;
-    uintptr_t start() { return uintptr_t(&data); }
-    uintptr_t end() { return uintptr_t(&trailer); }
-};
-
-// A GC adapter to ForkJoinShared, which is a complex class hidden
-// inside ForkJoin.cpp.
-
-class ForkJoinGCShared
-{
-  public:
-    explicit ForkJoinGCShared(ForkJoinShared *shared) : shared_(shared) {}
-
-    JSRuntime *runtime();
-    JS::Zone *zone();
-
-    // The updatable object (the ForkJoin result array), or nullptr.
-    JSObject *updatable();
-
-    // allocateNurseryChunk() returns nullptr on oom.
-    ForkJoinNurseryChunk *allocateNurseryChunk();
-
-    // p must have been obtained through allocateNurseryChunk.
-    void freeNurseryChunk(ForkJoinNurseryChunk *p);
-
-    // GC statistics output.
-    void spewGC(const char *fmt, ...);
-
-  private:
-    ForkJoinShared *const shared_;
-};
-
-// There is one ForkJoinNursery per ForkJoin worker.
-//
-// See the comment in ForkJoinNursery.cpp about how it works.
-
-class ForkJoinNursery
-{
-    friend class ForkJoinNurseryCollectionTracer;
-    friend class RelocationOverlay;
-
-    static_assert(sizeof(ForkJoinNurseryChunk) == ChunkSize,
-                  "ForkJoinNursery chunk size must match Chunk size.");
-  public:
-    ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured);
-    ~ForkJoinNursery();
-
-    // Attempt to allocate initial storage, returns false on failure
-    bool initialize();
-
-    // Perform a collection within the nursery, and if that for some reason
-    // cannot be done then perform an evacuating collection.
-    void minorGC();
-
-    // Evacuate the live data from the nursery into the tenured area;
-    // do not recreate the nursery.
-    void evacuatingGC();
-
-    // Allocate an object with a number of dynamic slots.  Returns an
-    // object, or nullptr in one of two circumstances:
-    //
-    //  - The nursery was full, the collector must be run, and the
-    //    allocation must be retried.  tooLarge is set to 'false'.
-    //  - The number of dynamic slots requested is too large and
-    //    the object should be allocated in the tenured area.
-    //    tooLarge is set to 'true'.
-    //
-    // This method will never run the garbage collector.
-    JSObject *allocateObject(size_t size, size_t numDynamic, bool& tooLarge);
-
-    // Allocate and reallocate slot and element arrays for existing
-    // objects.  These will create or maintain the arrays within the
-    // nursery if possible and appropriate, and otherwise will fall
-    // back to allocating in the tenured area.  They will return
-    // nullptr only if memory is exhausted.  If the reallocate methods
-    // return nullptr then the old array is still live.
-    //
-    // These methods will never run the garbage collector.
-    HeapSlot *allocateSlots(JSObject *obj, uint32_t nslots);
-    HeapSlot *reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
-                              uint32_t oldCount, uint32_t newCount);
-    ObjectElements *allocateElements(JSObject *obj, uint32_t nelems);
-    ObjectElements *reallocateElements(JSObject *obj, ObjectElements *oldHeader,
-                                       uint32_t oldCount, uint32_t newCount);
-
-    // Free a slots array.
-    void freeSlots(HeapSlot *slots);
-
-    // The method embedded in a ForkJoinNurseryCollectionTracer
-    static void MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind kind);
-
-    // A method called from the JIT frame updater
-    static void forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems);
-
-    // Return true iff obj is inside the current newspace.
-    MOZ_ALWAYS_INLINE bool isInsideNewspace(const void *obj);
-
-    // Return true iff collection is ongoing and obj is inside the current fromspace.
-    MOZ_ALWAYS_INLINE bool isInsideFromspace(const void *obj);
-
-    MOZ_ALWAYS_INLINE bool isForwarded(Cell *cell);
-
-    template <typename T>
-    MOZ_ALWAYS_INLINE bool getForwardedPointer(T **ref);
-
-    static size_t offsetOfPosition() {
-        return offsetof(ForkJoinNursery, position_);
-    }
-
-    static size_t offsetOfCurrentEnd() {
-        return offsetof(ForkJoinNursery, currentEnd_);
-    }
-
-  private:
-    // The largest slot arrays that will be allocated in the nursery.
-    // On the one hand we want this limit to be large, to avoid
-    // managing many hugeSlots.  On the other hand, slot arrays have
-    // to be copied during GC and will induce some external
-    // fragmentation in the nursery at chunk boundaries.
-    static const size_t MaxNurserySlots = 2048;
-
-    // The fixed limit on the per-worker nursery, in chunks.
-    //
-    // For production runs, 16 may be good - programs that need it,
-    // really need it, and as allocation is lazy programs that don't
-    // need it won't suck up a lot of resources.
-    //
-    // For debugging runs, 1 or 2 may sometimes be good, because it
-    // will more easily provoke bugs in the evacuation paths.
-    static const size_t MaxNurseryChunks = 16;
-
-    // The inverse load factor in the per-worker nursery.  Grow the nursery
-    // or schedule an evacuation if more than 1/NurseryLoadFactor of the
-    // current nursery size is live after minor GC.
-    static const int NurseryLoadFactor = 3;
-
-    // Allocate an object in the nursery's newspace.  Return nullptr
-    // when allocation fails (ie the object can't fit in the current
-    // chunk and the number of chunks it at its maximum).
-    void *allocate(size_t size);
-
-    // Allocate an external slot array and register it with this nursery.
-    HeapSlot *allocateHugeSlots(JSObject *obj, size_t nslots);
-
-    // Reallocate an external slot array, unregister the old array and
-    // register the new array.  If the allocation fails then leave
-    // everything unchanged.
-    HeapSlot *reallocateHugeSlots(JSObject *obj, HeapSlot *oldSlots,
-                                  uint32_t oldCount, uint32_t newCount);
-
-    // Walk the list of registered slot arrays and free them all.
-    void sweepHugeSlots();
-
-    // Set the position/end pointers to correspond to the numbered
-    // chunk.  Returns false if the chunk could not be allocated, either
-    // because we're OOM or because the nursery capacity is exhausted.
-    bool setCurrentChunk(int index);
-
-    enum PJSCollectionOp {
-        Evacuate = 1,
-        Collect = 2,
-        Recreate = 4
-    };
-
-    // Misc GC internals.
-    void pjsCollection(int op /* A combination of PJSCollectionOp bits */);
-    bool initNewspace();
-    void flip();
-    void forwardFromRoots(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromStack(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromTenured(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromRematerializedFrames(ForkJoinNurseryCollectionTracer *trc);
-    void collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc);
-    void freeFromspace();
-    void computeNurserySizeAfterGC(size_t live, const char **msg);
-
-    AllocKind getObjectAllocKind(JSObject *src);
-    void *allocateInTospaceInfallible(size_t thingSize);
-    void *allocateInTospace(AllocKind thingKind);
-    template <typename T> T *allocateInTospace(size_t nelem);
-    MOZ_ALWAYS_INLINE bool shouldMoveObject(void **thingp);
-    void *moveObjectToTospace(JSObject *src);
-    size_t copyObjectToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
-    size_t copyElementsToTospace(NativeObject *dst, NativeObject *src, gc::AllocKind dstKind);
-    size_t copySlotsToTospace(NativeObject *dst, NativeObject *src, gc::AllocKind dstKind);
-    MOZ_ALWAYS_INLINE void insertIntoFixupList(RelocationOverlay *entry);
-
-    void setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots);
-    void setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
-                                      uint32_t nelems);
-
-    MOZ_ALWAYS_INLINE void traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj);
-    MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, uint32_t nslots);
-    MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, HeapSlot *end);
-    MOZ_ALWAYS_INLINE void markSlot(HeapSlot *slotp);
-
-    ForkJoinContext *const cx_;      // The context that owns this nursery
-    Allocator *const tenured_;       // Private tenured area
-    ForkJoinGCShared *const shared_; // Common to all nurseries belonging to a ForkJoin instance
-    JS::Zone *evacuationZone_;       // During evacuating GC this is non-NULL: the Zone we
-                                     // allocate into
-
-    uintptr_t currentStart_;         // Start of current area in newspace
-    uintptr_t currentEnd_;           // End of current area in newspace (last byte + 1)
-    uintptr_t position_;             // Next free byte in current newspace chunk
-    unsigned currentChunk_;          // Index of current / highest numbered chunk in newspace
-    unsigned numActiveChunks_;       // Number of active chunks in newspace, not all may be allocated
-    unsigned numFromspaceChunks_;    // Number of active chunks in fromspace, all are allocated
-    bool mustEvacuate_;              // Set to true after GC when the /next/ minor GC must evacuate
-
-    bool isEvacuating_;              // Set to true when the current minor GC is evacuating
-    size_t movedSize_;               // Bytes copied during the current minor GC
-    RelocationOverlay *head_;        // First node of relocation list
-    RelocationOverlay **tail_;       // Pointer to 'next_' field of last node of relocation list
-
-    typedef HashSet<HeapSlot *, PointerHasher<HeapSlot *, 3>, SystemAllocPolicy> HugeSlotsSet;
-
-    HugeSlotsSet hugeSlots[2];       // Hash sets for huge slots
-
-    int hugeSlotsNew;                // Huge slot arrays in the newspace (index in hugeSlots)
-    int hugeSlotsFrom;               // Huge slot arrays in the fromspace (index in hugeSlots)
-
-    ForkJoinNurseryChunk *newspace[MaxNurseryChunks];  // All allocation happens here
-    ForkJoinNurseryChunk *fromspace[MaxNurseryChunks]; // Meaningful during GC: the previous newspace
-};
-
-} // namespace gc
-} // namespace js
-
-#endif // JSGC_FJGENERATIONAL
-
-#endif // gc_ForkJoinNursery_h
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -14,23 +14,16 @@
 #include "vm/Runtime.h"
 
 namespace js {
 namespace gc {
 
 void
 MarkPersistentRootedChains(JSTracer *trc);
 
-#ifdef JSGC_FJGENERATIONAL
-class ForkJoinNurseryCollectionTracer;
-
-void
-MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc);
-#endif
-
 class AutoCopyFreeListToArenas
 {
     JSRuntime *runtime;
     ZoneSelector selector;
 
   public:
     AutoCopyFreeListToArenas(JSRuntime *rt, ZoneSelector selector);
     ~AutoCopyFreeListToArenas();
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -296,25 +296,16 @@ class GCRuntime
     bool isHeapMinorCollecting() { return heapState == js::MinorCollecting; }
     bool isHeapCollecting() { return isHeapMajorCollecting() || isHeapMinorCollecting(); }
 #ifdef JSGC_COMPACTING
     bool isHeapCompacting() { return isHeapMajorCollecting() && state() == COMPACT; }
 #else
     bool isHeapCompacting() { return false; }
 #endif
 
-    // Performance note: if isFJMinorCollecting turns out to be slow because
-    // reading the counter is slow then we may be able to augment the counter
-    // with a volatile flag that is set iff the counter is greater than
-    // zero. (It will require some care to make sure the two variables stay in
-    // sync.)
-    bool isFJMinorCollecting() { return fjCollectionCounter > 0; }
-    void incFJMinorCollecting() { fjCollectionCounter++; }
-    void decFJMinorCollecting() { fjCollectionCounter--; }
-
     bool triggerGC(JS::gcreason::Reason reason);
     void maybeAllocTriggerZoneGC(Zone *zone, const AutoLockGC &lock);
     bool triggerZoneGC(Zone *zone, JS::gcreason::Reason reason);
     bool maybeGC(Zone *zone);
     void maybePeriodicFullGC();
     void minorGC(JS::gcreason::Reason reason) {
         gcstats::AutoPhase ap(stats, gcstats::PHASE_MINOR_GC);
         minorGCImpl(reason, nullptr);
@@ -843,26 +834,16 @@ class GCRuntime
      */
     unsigned objectsMarkedInDeadZones;
 
     bool poked;
 
     mozilla::Atomic<js::HeapState> heapState;
 
     /*
-     * ForkJoin workers enter and leave GC independently; this counter
-     * tracks the number that are currently in GC.
-     *
-     * Technically this should be #ifdef JSGC_FJGENERATIONAL but that
-     * affects the observed size of JSRuntime in problematic ways, see
-     * note in vm/ThreadPool.h.
-     */
-    mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> fjCollectionCounter;
-
-    /*
      * These options control the zealousness of the GC. The fundamental values
      * are nextScheduled and gcDebugCompartmentGC. At every allocation,
      * nextScheduled is decremented. When it reaches zero, we do either a full
      * or a compartmental GC, based on debugCompartmentGC.
      *
      * At this point, if zeal_ is one of the types that trigger periodic
      * collection, then nextScheduled is reset to the value of zealFrequency.
      * Otherwise, no additional GCs take place.
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -159,26 +159,16 @@ CheckMarkedThing(JSTracer *trc, T **thin
 
     T *thing = *thingp;
     MOZ_ASSERT(*thingp);
 
 #ifdef JSGC_COMPACTING
     thing = MaybeForwarded(thing);
 #endif
 
-# ifdef JSGC_FJGENERATIONAL
-    /*
-     * The code below (runtimeFromMainThread(), etc) makes assumptions
-     * not valid for the ForkJoin worker threads during ForkJoin GGC,
-     * so just bail.
-     */
-    if (ForkJoinContext::current())
-        return;
-# endif
-
     /* This function uses data that's not available in the nursery. */
     if (IsInsideNursery(thing))
         return;
 
 #ifdef JSGC_COMPACTING
     MOZ_ASSERT_IF(!MovingTracer::IsMovingTracer(trc) && !Nursery::IsMinorCollectionTracer(trc),
                   !IsForwarded(*thingp));
 #endif
@@ -271,26 +261,16 @@ SetMaybeAliveFlag(JSScript *thing)
 template<typename T>
 static void
 MarkInternal(JSTracer *trc, T **thingp)
 {
     CheckMarkedThing(trc, thingp);
     T *thing = *thingp;
 
     if (!trc->callback) {
-#ifdef JSGC_FJGENERATIONAL
-        /*
-         * This case should never be reached from PJS collections as
-         * those should all be using a ForkJoinNurseryCollectionTracer
-         * that carries a callback.
-         */
-        MOZ_ASSERT(!ForkJoinContext::current());
-        MOZ_ASSERT(!trc->runtime()->isFJMinorCollecting());
-#endif
-
         /*
          * We may mark a Nursery thing outside the context of the
          * MinorCollectionTracer because of a pre-barrier. The pre-barrier is
          * not needed in this case because we perform a minor collection before
          * each incremental slice.
          */
         if (IsInsideNursery(thing))
             return;
@@ -442,32 +422,20 @@ IsMarked(T **thingp)
 
 template <typename T>
 static bool
 IsMarkedFromAnyThread(T **thingp)
 {
     MOZ_ASSERT(thingp);
     MOZ_ASSERT(*thingp);
     JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
-#ifdef JSGC_FJGENERATIONAL
-    // Must precede the case for GGC because IsInsideNursery()
-    // will also be true for the ForkJoinNursery.
-    if (rt->isFJMinorCollecting()) {
-        ForkJoinContext *ctx = ForkJoinContext::current();
-        ForkJoinNursery &nursery = ctx->nursery();
-        if (nursery.isInsideFromspace(*thingp))
-            return nursery.getForwardedPointer(thingp);
-    }
-    else
-#endif
-    {
-        if (IsInsideNursery(*thingp)) {
-            Nursery &nursery = rt->gc.nursery;
-            return nursery.getForwardedPointer(thingp);
-        }
+
+    if (IsInsideNursery(*thingp)) {
+        Nursery &nursery = rt->gc.nursery;
+        return nursery.getForwardedPointer(thingp);
     }
 
     Zone *zone = (*thingp)->asTenured().zoneFromAnyThread();
     if (!zone->isCollectingFromAnyThread() || zone->isGCFinished())
         return true;
 #ifdef JSGC_COMPACTING
     if (zone->isGCCompacting() && IsForwarded(*thingp))
         *thingp = Forwarded(*thingp);
@@ -493,33 +461,22 @@ IsAboutToBeFinalizedFromAnyThread(T **th
 
     T *thing = *thingp;
     JSRuntime *rt = thing->runtimeFromAnyThread();
 
     /* Permanent atoms are never finalized by non-owning runtimes. */
     if (ThingIsPermanentAtom(thing) && !TlsPerThreadData.get()->associatedWith(rt))
         return false;
 
-#ifdef JSGC_FJGENERATIONAL
-    if (rt->isFJMinorCollecting()) {
-        ForkJoinContext *ctx = ForkJoinContext::current();
-        ForkJoinNursery &nursery = ctx->nursery();
-        if (nursery.isInsideFromspace(thing))
+    Nursery &nursery = rt->gc.nursery;
+    MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
+    if (rt->isHeapMinorCollecting()) {
+        if (IsInsideNursery(thing))
             return !nursery.getForwardedPointer(thingp);
-    }
-    else
-#endif
-    {
-        Nursery &nursery = rt->gc.nursery;
-        MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
-        if (rt->isHeapMinorCollecting()) {
-            if (IsInsideNursery(thing))
-                return !nursery.getForwardedPointer(thingp);
-            return false;
-        }
+        return false;
     }
 
     Zone *zone = thing->asTenured().zoneFromAnyThread();
     if (zone->isGCSweeping()) {
         if (thing->asTenured().arenaHeader()->allocatedDuringIncremental)
             return false;
         return !thing->asTenured().isMarked();
     }
@@ -536,26 +493,16 @@ IsAboutToBeFinalizedFromAnyThread(T **th
 template <typename T>
 T *
 UpdateIfRelocated(JSRuntime *rt, T **thingp)
 {
     MOZ_ASSERT(thingp);
     if (!*thingp)
         return nullptr;
 
-#ifdef JSGC_FJGENERATIONAL
-    if (rt->isFJMinorCollecting()) {
-        ForkJoinContext *ctx = ForkJoinContext::current();
-        ForkJoinNursery &nursery = ctx->nursery();
-        if (nursery.isInsideFromspace(*thingp))
-            nursery.getForwardedPointer(thingp);
-        return *thingp;
-    }
-#endif
-
     if (rt->isHeapMinorCollecting() && IsInsideNursery(*thingp)) {
         rt->gc.nursery.getForwardedPointer(thingp);
         return *thingp;
     }
 
 #ifdef JSGC_COMPACTING
     Zone *zone = (*thingp)->zone();
     if (zone->isGCCompacting() && IsForwarded(*thingp))
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -31,17 +31,16 @@ class ObjectElements;
 class NativeObject;
 class HeapSlot;
 void SetGCZeal(JSRuntime *, uint8_t, uint32_t);
 
 namespace gc {
 struct Cell;
 class Collector;
 class MinorCollectionTracer;
-class ForkJoinNursery;
 } /* namespace gc */
 
 namespace types {
 struct TypeObject;
 }
 
 namespace jit {
 class CodeGenerator;
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -13,17 +13,16 @@
 #include "jscntxt.h"
 #include "jsgc.h"
 #include "jsprf.h"
 #include "jstypes.h"
 #include "jswatchpoint.h"
 
 #include "builtin/MapObject.h"
 #include "frontend/BytecodeCompiler.h"
-#include "gc/ForkJoinNursery.h"
 #include "gc/GCInternals.h"
 #include "gc/Marking.h"
 #include "jit/MacroAssembler.h"
 #include "js/HashTable.h"
 #include "vm/Debugger.h"
 #include "vm/JSONParser.h"
 #include "vm/PropDesc.h"
 
@@ -115,24 +114,16 @@ MarkExactStackRootsAcrossTypes(T context
     MarkExactStackRootList<Value, MarkValueRoot>(trc, context, "exact-value");
     MarkExactStackRootList<types::Type, MarkTypeRoot>(trc, context, "types::Type");
     MarkExactStackRootList<Bindings, MarkBindingsRoot>(trc, context, "Bindings");
     MarkExactStackRootList<JSPropertyDescriptor, MarkPropertyDescriptorRoot>(
         trc, context, "JSPropertyDescriptor");
     MarkExactStackRootList<PropDesc, MarkPropDescRoot>(trc, context, "PropDesc");
 }
 
-#ifdef JSGC_FJGENERATIONAL
-static void
-MarkExactStackRoots(ThreadSafeContext* cx, JSTracer *trc)
-{
-    MarkExactStackRootsAcrossTypes<ThreadSafeContext*>(cx, trc);
-}
-#endif
-
 static void
 MarkExactStackRoots(JSRuntime* rt, JSTracer *trc)
 {
     for (ContextIter cx(rt); !cx.done(); cx.next())
         MarkExactStackRootsAcrossTypes<ThreadSafeContext*>(cx.get(), trc);
     MarkExactStackRootsAcrossTypes<PerThreadData*>(&rt->mainThread, trc);
 }
 
@@ -414,37 +405,16 @@ js::gc::MarkPersistentRootedChains(JSTra
 
     // Mark the PersistentRooted chains of types that are never null.
     PersistentRootedMarker<jsid>::markChain<MarkIdRoot>(trc, rt->idPersistentRooteds,
                                                         "PersistentRooted<jsid>");
     PersistentRootedMarker<Value>::markChain<MarkValueRoot>(trc, rt->valuePersistentRooteds,
                                                             "PersistentRooted<Value>");
 }
 
-#ifdef JSGC_FJGENERATIONAL
-void
-js::gc::MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc)
-{
-    ForkJoinContext *cx = ForkJoinContext::current();
-    PerThreadData *ptd = cx->perThreadData;
-
-    AutoGCRooter::traceAllInContext(cx, trc);
-    MarkExactStackRoots(cx, trc);
-    jit::MarkJitActivations(ptd, trc);
-
-#ifdef DEBUG
-    // There should be only JIT activations on the stack
-    for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
-        Activation *act = iter.activation();
-        MOZ_ASSERT(act->isJit());
-    }
-#endif
-}
-#endif  // JSGC_FJGENERATIONAL
-
 void
 js::gc::GCRuntime::markRuntime(JSTracer *trc,
                                TraceOrMarkRuntime traceOrMark,
                                TraceRootsOrUsedSaved rootsSource)
 {
     gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
 
     MOZ_ASSERT(trc->callback != GCMarker::GrayCallback);
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -4512,17 +4512,16 @@ CodeGenerator::visitNewPar(LNewPar *lir)
     Register objReg = ToRegister(lir->output());
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register tempReg1 = ToRegister(lir->getTemp0());
     Register tempReg2 = ToRegister(lir->getTemp1());
     NativeObject *templateObject = lir->mir()->templateObject();
     emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
 }
 
-#ifndef JSGC_FJGENERATIONAL
 class OutOfLineNewGCThingPar : public OutOfLineCodeBase<CodeGenerator>
 {
 public:
     LInstruction *lir;
     gc::AllocKind allocKind;
     Register objReg;
     Register cxReg;
 
@@ -4530,44 +4529,37 @@ public:
                            Register cxReg)
       : lir(lir), allocKind(allocKind), objReg(objReg), cxReg(cxReg)
     {}
 
     void accept(CodeGenerator *codegen) {
         codegen->visitOutOfLineNewGCThingPar(this);
     }
 };
-#endif // JSGC_FJGENERATIONAL
 
 typedef JSObject *(*NewGCThingParFn)(ForkJoinContext *, js::gc::AllocKind allocKind);
 static const VMFunction NewGCThingParInfo =
     FunctionInfo<NewGCThingParFn>(NewGCThingPar);
 
 void
 CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
                                       Register tempReg1, Register tempReg2, NativeObject *templateObj)
 {
     MOZ_ASSERT(lir->mirRaw());
     MOZ_ASSERT(lir->mirRaw()->isInstruction());
 
     gc::AllocKind allocKind = templateObj->asTenured().getAllocKind();
-#ifdef JSGC_FJGENERATIONAL
-    OutOfLineCode *ool = oolCallVM(NewGCThingParInfo, lir,
-                                   (ArgList(), Imm32(allocKind)), StoreRegisterTo(objReg));
-#else
     OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
     addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
-#endif
 
     masm.newGCThingPar(objReg, cxReg, tempReg1, tempReg2, templateObj, ool->entry());
     masm.bind(ool->rejoin());
     masm.initGCThing(objReg, tempReg1, templateObj);
 }
 
-#ifndef JSGC_FJGENERATIONAL
 void
 CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
 {
     // As a fallback for allocation in par. exec. mode, we invoke the
     // C helper NewGCThingPar(), which calls into the GC code.  If it
     // returns nullptr, we bail.  If returns non-nullptr, we rejoin the
     // original instruction.
     Register out = ool->objReg;
@@ -4578,17 +4570,16 @@ CodeGenerator::visitOutOfLineNewGCThingP
     masm.move32(Imm32(ool->allocKind), out);
     masm.passABIArg(out);
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NewGCThingPar));
     masm.storeCallResult(out);
     restoreVolatile(out);
 
     bailoutTestPtr(Assembler::Zero, out, out, ool->lir->snapshot());
 }
-#endif // JSGC_FJGENERATIONAL
 
 typedef bool(*InitElemFn)(JSContext *cx, HandleObject obj,
                           HandleValue id, HandleValue value);
 static const VMFunction InitElemInfo =
     FunctionInfo<InitElemFn>(InitElemOperation);
 
 void
 CodeGenerator::visitInitElem(LInitElem *lir)
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -6,17 +6,16 @@
 
 #include "jit/JitFrames-inl.h"
 
 #include "jsfun.h"
 #include "jsinfer.h"
 #include "jsobj.h"
 #include "jsscript.h"
 
-#include "gc/ForkJoinNursery.h"
 #include "gc/Marking.h"
 #include "jit/BaselineDebugModeOSR.h"
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Ion.h"
 #include "jit/JitcodeMap.h"
 #include "jit/JitCompartment.h"
@@ -1153,22 +1152,16 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc
     while (safepoint.getValueSlot(&slot));
 #ifdef JS_NUNBOX32
     LAllocation type, payload;
     while (safepoint.getNunboxSlot(&type, &payload));
 #endif
 
     while (safepoint.getSlotsOrElementsSlot(&slot)) {
         HeapSlot **slots = reinterpret_cast<HeapSlot **>(layout->slotRef(slot));
-#ifdef JSGC_FJGENERATIONAL
-        if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
-            gc::ForkJoinNursery::forwardBufferPointer(trc, slots);
-            continue;
-        }
-#endif
         trc->runtime()->gc.nursery.forwardBufferPointer(slots);
     }
 }
 
 static void
 MarkBaselineStubFrame(JSTracer *trc, const JitFrameIterator &frame)
 {
     // Mark the ICStub pointer stored in the stub frame. This is necessary
@@ -1484,37 +1477,28 @@ TopmostIonActivationCompartment(JSRuntim
         }
     }
     return nullptr;
 }
 
 template <typename T>
 void UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc)
 {
-#ifdef JSGC_FJGENERATIONAL
-    MOZ_ASSERT(trc->runtime()->isHeapMinorCollecting() || trc->runtime()->isFJMinorCollecting());
-#else
     MOZ_ASSERT(trc->runtime()->isHeapMinorCollecting());
-#endif
     for (JitActivationIterator activations(ptd); !activations.done(); ++activations) {
         for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
             if (frames.type() == JitFrame_IonJS)
                 UpdateIonJSFrameForMinorGC<T>(trc, frames);
         }
     }
 }
 
 template
 void UpdateJitActivationsForMinorGC<Nursery>(PerThreadData *ptd, JSTracer *trc);
 
-#ifdef JSGC_FJGENERATIONAL
-template
-void UpdateJitActivationsForMinorGC<gc::ForkJoinNursery>(PerThreadData *ptd, JSTracer *trc);
-#endif
-
 void
 GetPcScript(JSContext *cx, JSScript **scriptRes, jsbytecode **pcRes)
 {
     JitSpew(JitSpew_IonSnapshots, "Recover PC & Script from the last frame.");
 
     JSRuntime *rt = cx->runtime();
 
     // Recover the return address.
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -834,63 +834,28 @@ MacroAssembler::newGCFatInlineString(Reg
 {
     allocateNonObject(result, temp, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
 }
 
 void
 MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                               gc::AllocKind allocKind, Label *fail)
 {
-#ifdef JSGC_FJGENERATIONAL
-    if (IsNurseryAllocable(allocKind))
-        return newGCNurseryThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
-#endif
     return newGCTenuredThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
 }
 
-#ifdef JSGC_FJGENERATIONAL
-void
-MacroAssembler::newGCNurseryThingPar(Register result, Register cx,
-                                     Register tempReg1, Register tempReg2,
-                                     gc::AllocKind allocKind, Label *fail)
-{
-    MOZ_ASSERT(IsNurseryAllocable(allocKind));
-
-    uint32_t thingSize = uint32_t(gc::Arena::thingSize(allocKind));
-
-    // Correctness depends on thingSize being smaller than a chunk
-    // (not a problem) and the last chunk of the nursery not being
-    // located at the very top of the address space.  The regular
-    // Nursery makes the same assumption, see nurseryAllocate() above.
-
-    // The ForkJoinNursery is a member variable of the ForkJoinContext.
-    size_t offsetOfPosition =
-        ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfPosition();
-    size_t offsetOfEnd =
-        ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfCurrentEnd();
-    loadPtr(Address(cx, offsetOfPosition), result);
-    loadPtr(Address(cx, offsetOfEnd), tempReg2);
-    computeEffectiveAddress(Address(result, thingSize), tempReg1);
-    branchPtr(Assembler::Below, tempReg2, tempReg1, fail);
-    storePtr(tempReg1, Address(cx, offsetOfPosition));
-}
-#endif
-
 void
 MacroAssembler::newGCTenuredThingPar(Register result, Register cx,
                                      Register tempReg1, Register tempReg2,
                                      gc::AllocKind allocKind, Label *fail)
 {
     // Similar to ::newGCThing(), except that it allocates from a custom
     // Allocator in the ForkJoinContext*, rather than being hardcoded to the
     // compartment allocator.  This requires two temporary registers.
     //
-    // When the ForkJoin generational collector is enabled this is only used
-    // for those object types that cannot be allocated in the ForkJoinNursery.
-    //
     // Subtle: I wanted to reuse `result` for one of the temporaries, but the
     // register allocator was assigning it to the same register as `cx`.
     // Then we overwrite that register which messed up the OOL code.
 
     uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
 
     // Load the allocator:
     // tempReg1 = (Allocator*) forkJoinCx->allocator()
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -827,20 +827,16 @@ class MacroAssembler : public MacroAssem
     void initGCThing(Register obj, Register temp, JSObject *templateObj,
                      bool initFixedSlots = true);
 
     void newGCString(Register result, Register temp, Label *fail);
     void newGCFatInlineString(Register result, Register temp, Label *fail);
 
     void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                        gc::AllocKind allocKind, Label *fail);
-#ifdef JSGC_FJGENERATIONAL
-    void newGCNurseryThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                              gc::AllocKind allocKind, Label *fail);
-#endif
     void newGCTenuredThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                               gc::AllocKind allocKind, Label *fail);
     void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                        NativeObject *templateObject, Label *fail);
     void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                         Label *fail);
     void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                                  Label *fail);
--- a/js/src/jit/ParallelFunctions.cpp
+++ b/js/src/jit/ParallelFunctions.cpp
@@ -37,21 +37,17 @@ jit::ForkJoinContextPar()
 
 // NewGCThingPar() is called in place of NewGCThing() when executing
 // parallel code.  It uses the ArenaLists for the current thread and
 // allocates from there.
 JSObject *
 jit::NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind)
 {
     MOZ_ASSERT(ForkJoinContext::current() == cx);
-#ifdef JSGC_FJGENERATIONAL
-    return js::NewGCObject<CanGC>(cx, allocKind, 0, gc::DefaultHeap);
-#else
     return js::NewGCObject<NoGC>(cx, allocKind, 0, gc::TenuredHeap);
-#endif
 }
 
 bool
 jit::ParallelWriteGuard(ForkJoinContext *cx, JSObject *object)
 {
     // Implements the most general form of the write guard, which is
     // suitable for writes to any object O. There are two cases to
     // consider and test for:
--- a/js/src/jscntxtinlines.h
+++ b/js/src/jscntxtinlines.h
@@ -15,18 +15,16 @@
 #include "builtin/Object.h"
 #include "jit/JitFrames.h"
 #include "vm/ForkJoin.h"
 #include "vm/HelperThreads.h"
 #include "vm/Interpreter.h"
 #include "vm/ProxyObject.h"
 #include "vm/Symbol.h"
 
-#include "gc/ForkJoinNursery-inl.h"
-
 namespace js {
 
 #ifdef JS_CRASH_DIAGNOSTICS
 class CompartmentChecker
 {
     JSCompartment *compartment;
 
   public:
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1064,25 +1064,17 @@ GCRuntime::allocateArena(Chunk *chunk, Z
 {
     MOZ_ASSERT(chunk->hasAvailableArenas());
 
     // Fail the allocation if we are over our heap size limits.
     if (!isHeapMinorCollecting() &&
         !isHeapCompacting() &&
         usage.gcBytes() >= tunables.gcMaxBytes())
     {
-#ifdef JSGC_FJGENERATIONAL
-        // This is an approximation to the best test, which would check that
-        // this thread is currently promoting into the tenured area.  I doubt
-        // the better test would make much difference.
-        if (!isFJMinorCollecting())
-            return nullptr;
-#else
         return nullptr;
-#endif
     }
 
     ArenaHeader *aheader = chunk->allocateArena(rt, zone, thingKind, lock);
     zone->usage.addGCArena();
 
     // Trigger an incremental slice if needed.
     if (!isHeapMinorCollecting() && !isHeapCompacting())
         maybeAllocTriggerZoneGC(zone, lock);
@@ -3430,20 +3422,16 @@ GCRuntime::decommitArenas(AutoLockGC &lo
         }
     }
     MOZ_ASSERT(availableChunks(lock).verify());
 }
 
 void
 GCRuntime::expireChunksAndArenas(bool shouldShrink, AutoLockGC &lock)
 {
-#ifdef JSGC_FJGENERATIONAL
-    rt->threadPool.pruneChunkCache();
-#endif
-
     ChunkPool toFree = expireEmptyChunkPool(shouldShrink, lock);
     if (toFree.count()) {
         AutoUnlockGC unlock(lock);
         FreeChunkPool(rt, toFree);
     }
 
     if (shouldShrink)
         decommitArenas(lock);
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -21,20 +21,16 @@
 #include "js/Vector.h"
 
 #include "vm/NativeObject.h"
 
 namespace js {
 
 class AutoLockGC;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 unsigned GetCPUCount();
 
 enum HeapState {
     Idle,             // doing nothing with the GC heap
     Tracing,          // tracing the GC heap without collecting, e.g. IterateCompartments()
     MajorCollecting,  // doing a GC of the major heap
     MinorCollecting   // doing a GC of the minor heap (nursery)
 };
@@ -103,54 +99,16 @@ IsNurseryAllocable(AllocKind kind)
         false,     /* FINALIZE_EXTERNAL_STRING */
         false,     /* FINALIZE_SYMBOL */
         false,     /* FINALIZE_JITCODE */
     };
     JS_STATIC_ASSERT(JS_ARRAY_LENGTH(map) == FINALIZE_LIMIT);
     return map[kind];
 }
 
-#if defined(JSGC_FJGENERATIONAL)
-// This is separate from IsNurseryAllocable() so that the latter can evolve
-// without worrying about what the ForkJoinNursery's needs are, and vice
-// versa to some extent.
-static inline bool
-IsFJNurseryAllocable(AllocKind kind)
-{
-    MOZ_ASSERT(kind >= 0 && unsigned(kind) < FINALIZE_LIMIT);
-    static const bool map[] = {
-        false,     /* FINALIZE_OBJECT0 */
-        true,      /* FINALIZE_OBJECT0_BACKGROUND */
-        false,     /* FINALIZE_OBJECT2 */
-        true,      /* FINALIZE_OBJECT2_BACKGROUND */
-        false,     /* FINALIZE_OBJECT4 */
-        true,      /* FINALIZE_OBJECT4_BACKGROUND */
-        false,     /* FINALIZE_OBJECT8 */
-        true,      /* FINALIZE_OBJECT8_BACKGROUND */
-        false,     /* FINALIZE_OBJECT12 */
-        true,      /* FINALIZE_OBJECT12_BACKGROUND */
-        false,     /* FINALIZE_OBJECT16 */
-        true,      /* FINALIZE_OBJECT16_BACKGROUND */
-        false,     /* FINALIZE_SCRIPT */
-        false,     /* FINALIZE_LAZY_SCRIPT */
-        false,     /* FINALIZE_SHAPE */
-        false,     /* FINALIZE_ACCESSOR_SHAPE */
-        false,     /* FINALIZE_BASE_SHAPE */
-        false,     /* FINALIZE_TYPE_OBJECT */
-        false,     /* FINALIZE_FAT_INLINE_STRING */
-        false,     /* FINALIZE_STRING */
-        false,     /* FINALIZE_EXTERNAL_STRING */
-        false,     /* FINALIZE_SYMBOL */
-        false,     /* FINALIZE_JITCODE */
-    };
-    JS_STATIC_ASSERT(JS_ARRAY_LENGTH(map) == FINALIZE_LIMIT);
-    return map[kind];
-}
-#endif
-
 static inline bool
 IsBackgroundFinalized(AllocKind kind)
 {
     MOZ_ASSERT(kind >= 0 && unsigned(kind) < FINALIZE_LIMIT);
     static const bool map[] = {
         false,     /* FINALIZE_OBJECT0 */
         true,      /* FINALIZE_OBJECT0_BACKGROUND */
         false,     /* FINALIZE_OBJECT2 */
@@ -1218,17 +1176,16 @@ MergeCompartments(JSCompartment *source,
 
 /*
  * This structure overlays a Cell in the Nursery and re-purposes its memory
  * for managing the Nursery collection process.
  */
 class RelocationOverlay
 {
     friend class MinorCollectionTracer;
-    friend class ForkJoinNursery;
 
     /* The low bit is set so this should never equal a normal pointer. */
     static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
 
     // Putting the magic value after the forwarding pointer is a terrible hack
     // to make JSObject::zone() work on forwarded objects.
 
     /* The location |this| was moved to. */
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -26,22 +26,16 @@ ThreadSafeContext::allocator() const
 
 template <typename T>
 inline bool
 ThreadSafeContext::isThreadLocal(T thing) const
 {
     if (!isForkJoinContext())
         return true;
 
-#ifdef JSGC_FJGENERATIONAL
-    ForkJoinContext *cx = static_cast<ForkJoinContext*>(const_cast<ThreadSafeContext*>(this));
-    if (cx->nursery().isInsideNewspace(thing))
-        return true;
-#endif
-
     // Global invariant
     MOZ_ASSERT(!IsInsideNursery(thing));
 
     // The thing is not in the nursery, but is it in the private tenured area?
     if (allocator_->arenas.containsArena(runtime_, thing->asTenured().arenaHeader()))
     {
         // GC should be suppressed in preparation for mutating thread local
         // objects, as we don't want to trip any barriers.
@@ -68,24 +62,16 @@ GetGCObjectKind(const Class *clasp)
 }
 
 inline bool
 ShouldNurseryAllocate(const Nursery &nursery, AllocKind kind, InitialHeap heap)
 {
     return nursery.isEnabled() && IsNurseryAllocable(kind) && heap != TenuredHeap;
 }
 
-#ifdef JSGC_FJGENERATIONAL
-inline bool
-ShouldFJNurseryAllocate(const ForkJoinNursery &nursery, AllocKind kind, InitialHeap heap)
-{
-    return IsFJNurseryAllocable(kind) && heap != TenuredHeap;
-}
-#endif
-
 inline JSGCTraceKind
 GetGCThingTraceKind(const void *thing)
 {
     MOZ_ASSERT(thing);
     const Cell *cell = static_cast<const Cell *>(thing);
     if (IsInsideNursery(cell))
         return JSTRACE_OBJECT;
     return MapAllocToTraceKind(cell->asTenured().getAllocKind());
@@ -450,38 +436,16 @@ TryNewNurseryObject(JSContext *cx, size_
             JSObject *obj = nursery.allocateObject(cx, thingSize, nDynamicSlots);
             MOZ_ASSERT(obj);
             return obj;
         }
     }
     return nullptr;
 }
 
-#ifdef JSGC_FJGENERATIONAL
-template <AllowGC allowGC>
-inline JSObject *
-TryNewNurseryObject(ForkJoinContext *cx, size_t thingSize, size_t nDynamicSlots)
-{
-    ForkJoinNursery &nursery = cx->nursery();
-    bool tooLarge = false;
-    JSObject *obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
-    if (obj)
-        return obj;
-
-    if (!tooLarge && allowGC) {
-        nursery.minorGC();
-        obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
-        if (obj)
-            return obj;
-    }
-
-    return nullptr;
-}
-#endif /* JSGC_FJGENERATIONAL */
-
 static inline bool
 PossiblyFail()
 {
     JS_OOM_POSSIBLY_FAIL();
     return true;
 }
 
 template <AllowGC allowGC>
@@ -565,26 +529,16 @@ AllocateObject(ThreadSafeContext *cx, Al
         return nullptr;
 
     if (cx->isJSContext() &&
         ShouldNurseryAllocate(cx->asJSContext()->nursery(), kind, heap)) {
         JSObject *obj = TryNewNurseryObject<allowGC>(cx->asJSContext(), thingSize, nDynamicSlots);
         if (obj)
             return obj;
     }
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext() &&
-        ShouldFJNurseryAllocate(cx->asForkJoinContext()->nursery(), kind, heap))
-    {
-        JSObject *obj =
-            TryNewNurseryObject<allowGC>(cx->asForkJoinContext(), thingSize, nDynamicSlots);
-        if (obj)
-            return obj;
-    }
-#endif
 
     HeapSlot *slots = nullptr;
     if (nDynamicSlots) {
         if (cx->isExclusiveContext())
             slots = cx->asExclusiveContext()->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
         else
             slots = js_pod_malloc<HeapSlot>(nDynamicSlots);
         if (MOZ_UNLIKELY(!slots))
@@ -631,18 +585,16 @@ AllocateNonObject(ThreadSafeContext *cx)
 }
 
 /*
  * When allocating for initialization from a cached object copy, we will
  * potentially destroy the cache entry we want to copy if we allow GC. On the
  * other hand, since these allocations are extremely common, we don't want to
  * delay GC from these allocation sites. Instead we allow the GC, but still
  * fail the allocation, forcing the non-cached path.
- *
- * Observe this won't be used for ForkJoin allocation, as it takes a JSContext*
  */
 template <AllowGC allowGC>
 inline JSObject *
 AllocateObjectForCacheHit(JSContext *cx, AllocKind kind, InitialHeap heap)
 {
     if (ShouldNurseryAllocate(cx->nursery(), kind, heap)) {
         size_t thingSize = Arena::thingSize(kind);
 
--- a/js/src/jsinfer.cpp
+++ b/js/src/jsinfer.cpp
@@ -4575,17 +4575,16 @@ ExclusiveContext::getSingletonType(const
 void
 ConstraintTypeSet::sweep(Zone *zone, AutoClearTypeInferenceStateOnOOM &oom)
 {
     MOZ_ASSERT(zone->isGCSweepingOrCompacting());
 
     // IsAboutToBeFinalized doesn't work right on tenured objects when called
     // during a minor collection.
     MOZ_ASSERT(!zone->runtimeFromMainThread()->isHeapMinorCollecting());
-    MOZ_ASSERT(!zone->runtimeFromMainThread()->isFJMinorCollecting());
 
     /*
      * Purge references to type objects that are no longer live. Type sets hold
      * only weak references. For type sets containing more than one object,
      * live entries in the object hash need to be copied to the zone's
      * new arena.
      */
     unsigned objectCount = baseObjectCount();
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -73,20 +73,16 @@ extern const Class MathClass;
 
 class GlobalObject;
 class MapObject;
 class NewObjectCache;
 class NormalArgumentsObject;
 class SetObject;
 class StrictArgumentsObject;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 }  /* namespace js */
 
 /*
  * A JavaScript object. The members common to all objects are as follows:
  *
  * - The |shape_| member stores the shape of the object, which includes the
  *   object's class and the layout of all its properties.
  *
@@ -112,17 +108,16 @@ class JSObject : public js::gc::Cell
      */
     js::HeapPtrTypeObject type_;
 
   private:
     friend class js::Shape;
     friend class js::GCMarker;
     friend class js::NewObjectCache;
     friend class js::Nursery;
-    friend class js::gc::ForkJoinNursery;
 
     /* Make the type object to use for LAZY_TYPE objects. */
     static js::types::TypeObject *makeLazyType(JSContext *cx, js::HandleObject obj);
 
   public:
     js::Shape * lastProperty() const {
         MOZ_ASSERT(shape_);
         return shape_;
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -19,18 +19,16 @@
 #include "vm/StringObject.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsatominlines.h"
 #include "jscompartmentinlines.h"
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
 
-#include "gc/ForkJoinNursery-inl.h"
-
 /* static */ inline bool
 JSObject::setGenericAttributes(JSContext *cx, js::HandleObject obj,
                                js::HandleId id, unsigned *attrsp)
 {
     js::types::MarkTypePropertyNonData(cx, obj, id);
     js::GenericAttributesOp op = obj->getOps()->setGenericAttributes;
     if (op)
         return op(cx, obj, id, attrsp);
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -115,17 +115,16 @@ UNIFIED_SOURCES += [
     'frontend/BytecodeCompiler.cpp',
     'frontend/BytecodeEmitter.cpp',
     'frontend/FoldConstants.cpp',
     'frontend/NameFunctions.cpp',
     'frontend/ParseMaps.cpp',
     'frontend/ParseNode.cpp',
     'frontend/TokenStream.cpp',
     'gc/Barrier.cpp',
-    'gc/ForkJoinNursery.cpp',
     'gc/GCTrace.cpp',
     'gc/Iteration.cpp',
     'gc/Marking.cpp',
     'gc/Memory.cpp',
     'gc/Nursery.cpp',
     'gc/RootMarking.cpp',
     'gc/Statistics.cpp',
     'gc/StoreBuffer.cpp',
--- a/js/src/vm/ForkJoin.cpp
+++ b/js/src/vm/ForkJoin.cpp
@@ -25,17 +25,16 @@
 #ifdef FORKJOIN_SPEW
 # include "jit/Ion.h"
 # include "jit/JitCompartment.h"
 # include "jit/MIR.h"
 # include "jit/MIRGraph.h"
 #endif
 #include "vm/Monitor.h"
 
-#include "gc/ForkJoinNursery-inl.h"
 #include "vm/Interpreter-inl.h"
 
 using namespace js;
 using namespace js::parallel;
 using namespace js::jit;
 
 using mozilla::ThreadLocal;
 
@@ -210,20 +209,16 @@ class ForkJoinOperation
     TrafficLight appendCallTargetsToWorklist(uint32_t index, ExecutionStatus *status);
     TrafficLight appendCallTargetToWorklist(HandleScript script, ExecutionStatus *status);
     bool addToWorklist(HandleScript script);
     inline bool hasScript(const types::RecompileInfoVector &scripts, JSScript *script);
 }; // class ForkJoinOperation
 
 class ForkJoinShared : public ParallelJob, public Monitor
 {
-#ifdef JSGC_FJGENERATIONAL
-    friend class gc::ForkJoinGCShared;
-#endif
-
     /////////////////////////////////////////////////////////////////////////
     // Constant fields
 
     JSContext *const cx_;                    // Current context
     ThreadPool *const threadPool_;           // The thread pool
     HandleFunction fun_;                     // The JavaScript function to execute
     HandleObject updatable_;                 // Pre-existing object that might be updated
     uint16_t sliceStart_;                    // The starting slice id.
@@ -1600,46 +1595,18 @@ ForkJoinShared::executePortion(PerThread
         ParallelIonInvoke<3> fii(runtime(), fun_, 3);
 
         fii.args[0] = Int32Value(worker->id());
         fii.args[1] = Int32Value(sliceStart_);
         fii.args[2] = Int32Value(sliceEnd_);
 
         bool ok = fii.invoke(&cx);
         MOZ_ASSERT(ok == !cx.bailoutRecord->bailedOut());
-        if (!ok) {
+        if (!ok)
             setAbortFlagAndRequestInterrupt(false);
-#ifdef JSGC_FJGENERATIONAL
-            // TODO: See bugs 1010169, 993347.
-            //
-            // It is not desirable to promote here, but if we don't do
-            // this then we can't unconditionally transfer arenas to
-            // the compartment, since the arenas can contain objects
-            // that point into the nurseries.  If those objects are
-            // touched at all by the GC, eg as part of a prebarrier,
-            // then chaos ensues.
-            //
-            // The proper fix might appear to be to note the abort and
-            // not transfer, but instead clear, the arenas.  However,
-            // the result array will remain live and unless it is
-            // cleared immediately and without running barriers then
-            // it will have pointers into the now-cleared areas, which
-            // is also wrong.
-            //
-            // For the moment, until we figure out how to clear the
-            // result array properly and implement that, it may be
-            // that the best thing we can do here is to evacuate and
-            // then let the GC run its course.
-            cx.evacuateLiveData();
-#endif
-        } else {
-#ifdef JSGC_FJGENERATIONAL
-            cx.evacuateLiveData();
-#endif
-        }
     }
 
     Spew(SpewOps, "Down");
 }
 
 void
 ForkJoinShared::setAbortFlagDueToInterrupt(ForkJoinContext &cx)
 {
@@ -1690,75 +1657,28 @@ ForkJoinShared::requestZoneGC(JS::Zone *
     } else {
         // Otherwise, just GC this zone.
         gcZone_ = zone;
         gcReason_ = reason;
         gcRequested_ = true;
     }
 }
 
-#ifdef JSGC_FJGENERATIONAL
-
-JSRuntime*
-js::gc::ForkJoinGCShared::runtime()
-{
-    return shared_->runtime();
-}
-
-JS::Zone*
-js::gc::ForkJoinGCShared::zone()
-{
-    return shared_->zone();
-}
-
-JSObject*
-js::gc::ForkJoinGCShared::updatable()
-{
-    return shared_->updatable();
-}
-
-js::gc::ForkJoinNurseryChunk *
-js::gc::ForkJoinGCShared::allocateNurseryChunk()
-{
-    return shared_->threadPool_->getChunk();
-}
-
-void
-js::gc::ForkJoinGCShared::freeNurseryChunk(js::gc::ForkJoinNurseryChunk *p)
-{
-    shared_->threadPool_->putFreeChunk(p);
-}
-
-void
-js::gc::ForkJoinGCShared::spewGC(const char *fmt, ...)
-{
-    va_list ap;
-    va_start(ap, fmt);
-    SpewVA(SpewGC, fmt, ap);
-    va_end(ap);
-}
-
-#endif // JSGC_FJGENERATIONAL
-
 /////////////////////////////////////////////////////////////////////////////
 // ForkJoinContext
 //
 
 ForkJoinContext::ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker *worker,
                                  Allocator *allocator, ForkJoinShared *shared,
                                  ParallelBailoutRecord *bailoutRecord)
   : ThreadSafeContext(shared->runtime(), perThreadData, Context_ForkJoin),
     bailoutRecord(bailoutRecord),
     targetRegionStart(nullptr),
     targetRegionEnd(nullptr),
     shared_(shared),
-#ifdef JSGC_FJGENERATIONAL
-    gcShared_(shared),
-    nursery_(const_cast<ForkJoinContext*>(this), &this->gcShared_, allocator),
-#endif
     worker_(worker),
     acquiredJSContext_(false),
     nogc_()
 {
     /*
      * Unsafely set the zone. This is used to track malloc counters and to
      * trigger GCs and is otherwise not thread-safe to access.
      */
@@ -1770,20 +1690,16 @@ ForkJoinContext::ForkJoinContext(PerThre
      */
     compartment_ = shared->compartment();
 
     allocator_ = allocator;
 }
 
 bool ForkJoinContext::initialize()
 {
-#ifdef JSGC_FJGENERATIONAL
-    if (!nursery_.initialize())
-        return false;
-#endif
     return true;
 }
 
 bool
 ForkJoinContext::isMainThread() const
 {
     return worker_->isMainThread();
 }
--- a/js/src/vm/ForkJoin.h
+++ b/js/src/vm/ForkJoin.h
@@ -8,17 +8,16 @@
 #define vm_ForkJoin_h
 
 #include "mozilla/ThreadLocal.h"
 
 #include <stdarg.h>
 
 #include "jscntxt.h"
 
-#include "gc/ForkJoinNursery.h"
 #include "gc/GCInternals.h"
 
 #include "jit/Ion.h"
 #include "jit/IonTypes.h"
 
 #ifdef DEBUG
   #define FORKJOIN_SPEW
 #endif
@@ -185,53 +184,36 @@
 // with respect to garbage collection and allocation.  The typical
 // allocation paths are UNSAFE in parallel code because they access
 // shared state (the compartment's arena lists and so forth) without
 // any synchronization.  They can also trigger GC in an ad-hoc way.
 //
 // To deal with this, the forkjoin code creates a distinct |Allocator|
 // object for each worker, which is used as follows.
 //
-// In a non-generational setting you can access the appropriate
-// allocator via the |ForkJoinContext| object that is provided to the
-// callbacks.  Once the parallel execution is complete, all the
-// objects found in these distinct |Allocator| are merged back into
-// the main compartment lists and things proceed normally.  (If it is
-// known that the result array contains no references then no merging
-// is necessary.)
-//
-// In a generational setting there is a per-thread |ForkJoinNursery|
-// in addition to the per-thread Allocator.  All "simple" objects
-// (meaning they are reasonably small, can be copied, and have no
-// complicated finalization semantics) are allocated in the nurseries;
-// other objects are allocated directly in the threads' Allocators,
-// which serve as the tenured areas for the threads.
-//
-// When a thread's nursery fills up it can be collected independently
-// of the other threads' nurseries, and does not require any of the
-// threads to bail out of the parallel section.  The nursery is
-// copy-collected, and the expectation is that the survival rate will
-// be very low and the collection will be very cheap.
+// You can access the appropriate allocator via the |ForkJoinContext|
+// object that is provided to the callbacks.  Once the parallel
+// execution is complete, all the objects found in these distinct
+// |Allocator| are merged back into the main compartment lists and
+// things proceed normally.  (If it is known that the result array
+// contains no references then no merging is necessary.)
 //
 // When the parallel execution is complete, and only if merging of the
 // Allocators into the main compartment is necessary, then the live
 // objects of the nurseries are copied into the respective Allocators,
 // in parallel, before the merging takes place.
 //
 // In Ion-generated code, we will do allocation through the
-// |ForkJoinNursery| or |Allocator| found in |ForkJoinContext| (which
-// is obtained via TLS).
+// |Allocator| found in |ForkJoinContext| (which is obtained via TLS).
 //
 // No write barriers are emitted.  We permit writes to thread-local
 // objects, and such writes can create cross-generational pointers or
-// pointers that may interact with incremental GC.  However, the
-// per-thread generational collector scans its entire tenured area on
-// each minor collection, and we block upon entering a parallel
-// section to ensure that any concurrent marking or incremental GC has
-// completed.
+// pointers that may interact with incremental GC.  However, we block
+// upon entering a parallel section to ensure that any concurrent
+// marking or incremental GC has completed.
 //
 // In the future, it should be possible to lift the restriction that
 // we must block until incremental GC has completed. But we're not
 // there yet.
 //
 // Load balancing (work stealing):
 //
 // The ForkJoin job is dynamically divided into a fixed number of slices,
@@ -470,44 +452,24 @@ class ForkJoinContext : public ThreadSaf
     // Initializes the thread-local state.
     static bool initializeTls();
 
     // Used in inlining GetForkJoinSlice.
     static size_t offsetOfWorker() {
         return offsetof(ForkJoinContext, worker_);
     }
 
-#ifdef JSGC_FJGENERATIONAL
-    // There is already a nursery() method in ThreadSafeContext.
-    gc::ForkJoinNursery &nursery() { return nursery_; }
-
-    // Evacuate live data from the per-thread nursery into the per-thread
-    // tenured area.
-    void evacuateLiveData() { nursery_.evacuatingGC(); }
-
-    // Used in inlining nursery allocation.  Note the nursery is a
-    // member of the ForkJoinContext (a substructure), not a pointer.
-    static size_t offsetOfFJNursery() {
-        return offsetof(ForkJoinContext, nursery_);
-    }
-#endif
-
   private:
     friend class AutoSetForkJoinContext;
 
     // Initialized by initialize()
     static mozilla::ThreadLocal<ForkJoinContext*> tlsForkJoinContext;
 
     ForkJoinShared *const shared_;
 
-#ifdef JSGC_FJGENERATIONAL
-    gc::ForkJoinGCShared gcShared_;
-    gc::ForkJoinNursery nursery_;
-#endif
-
     ThreadPoolWorker *worker_;
 
     bool acquiredJSContext_;
 
     // ForkJoinContext is allocated on the stack. It would be dangerous to GC
     // with it live because of the GC pointer fields stored in the context.
     JS::AutoSuppressGCAnalysis nogc_;
 };
--- a/js/src/vm/NativeObject.cpp
+++ b/js/src/vm/NativeObject.cpp
@@ -409,41 +409,31 @@ NativeObject::setSlotSpan(ThreadSafeCont
 
 // This will not run the garbage collector.  If a nursery cannot accomodate the slot array
 // an attempt will be made to place the array in the tenured area.
 static HeapSlot *
 AllocateSlots(ThreadSafeContext *cx, JSObject *obj, uint32_t nslots)
 {
     if (cx->isJSContext())
         return cx->asJSContext()->runtime()->gc.nursery.allocateSlots(obj, nslots);
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext())
-        return cx->asForkJoinContext()->nursery().allocateSlots(obj, nslots);
-#endif
     return obj->zone()->pod_malloc<HeapSlot>(nslots);
 }
 
 // This will not run the garbage collector.  If a nursery cannot accomodate the slot array
 // an attempt will be made to place the array in the tenured area.
 //
 // If this returns null then the old slots will be left alone.
 static HeapSlot *
 ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
                 uint32_t oldCount, uint32_t newCount)
 {
     if (cx->isJSContext()) {
         return cx->asJSContext()->runtime()->gc.nursery.reallocateSlots(obj, oldSlots,
                                                                         oldCount, newCount);
     }
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext()) {
-        return cx->asForkJoinContext()->nursery().reallocateSlots(obj, oldSlots,
-                                                                  oldCount, newCount);
-    }
-#endif
     return obj->zone()->pod_realloc<HeapSlot>(oldSlots, oldCount, newCount);
 }
 
 /* static */ bool
 NativeObject::growSlots(ThreadSafeContext *cx, HandleNativeObject obj, uint32_t oldCount, uint32_t newCount)
 {
     MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(newCount > oldCount);
@@ -477,20 +467,16 @@ NativeObject::growSlots(ThreadSafeContex
 }
 
 static void
 FreeSlots(ThreadSafeContext *cx, HeapSlot *slots)
 {
     // Note: threads without a JSContext do not have access to GGC nursery allocated things.
     if (cx->isJSContext())
         return cx->asJSContext()->runtime()->gc.nursery.freeSlots(slots);
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext())
-        return cx->asForkJoinContext()->nursery().freeSlots(slots);
-#endif
     js_free(slots);
 }
 
 /* static */ void
 NativeObject::shrinkSlots(ThreadSafeContext *cx, HandleNativeObject obj,
                           uint32_t oldCount, uint32_t newCount)
 {
     MOZ_ASSERT(cx->isThreadLocal(obj));
@@ -713,41 +699,29 @@ NativeObject::maybeDensifySparseElements
 
 // This will not run the garbage collector.  If a nursery cannot accomodate the element array
 // an attempt will be made to place the array in the tenured area.
 static ObjectElements *
 AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
 {
     if (cx->isJSContext())
         return cx->asJSContext()->runtime()->gc.nursery.allocateElements(obj, nelems);
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext())
-        return cx->asForkJoinContext()->nursery().allocateElements(obj, nelems);
-#endif
-
     return reinterpret_cast<js::ObjectElements *>(obj->zone()->pod_malloc<HeapSlot>(nelems));
 }
 
 // This will not run the garbage collector.  If a nursery cannot accomodate the element array
 // an attempt will be made to place the array in the tenured area.
 static ObjectElements *
 ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHeader,
                    uint32_t oldCount, uint32_t newCount)
 {
     if (cx->isJSContext()) {
         return cx->asJSContext()->runtime()->gc.nursery.reallocateElements(obj, oldHeader,
                                                                            oldCount, newCount);
     }
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext()) {
-        return cx->asForkJoinContext()->nursery().reallocateElements(obj, oldHeader,
-                                                                     oldCount, newCount);
-    }
-#endif
-
     return reinterpret_cast<js::ObjectElements *>(
             obj->zone()->pod_realloc<HeapSlot>(reinterpret_cast<HeapSlot *>(oldHeader),
                                                oldCount, newCount));
 }
 
 // Round up |reqAllocated| to a good size. Up to 1 Mebi (i.e. 1,048,576) the
 // slot count is usually a power-of-two:
 //
--- a/js/src/vm/NativeObject.h
+++ b/js/src/vm/NativeObject.h
@@ -24,20 +24,16 @@
 #include "vm/Shape.h"
 #include "vm/String.h"
 
 namespace js {
 
 class Nursery;
 class Shape;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 /*
  * To really poison a set of values, using 'magic' or 'undefined' isn't good
  * enough since often these will just be ignored by buggy code (see bug 629974)
  * in debug builds and crash in release builds. Instead, we use a safe-for-crash
  * pointer.
  */
 static MOZ_ALWAYS_INLINE void
 Debug_SetValueRangeToCrashOnTouch(Value *beg, Value *end)
@@ -185,17 +181,16 @@ class ObjectElements
         COPY_ON_WRITE               = 0x4
     };
 
   private:
     friend class ::JSObject;
     friend class NativeObject;
     friend class ArrayObject;
     friend class Nursery;
-    friend class gc::ForkJoinNursery;
 
     template <ExecutionMode mode>
     friend bool
     ArraySetLength(typename ExecutionModeTraits<mode>::ContextType cx,
                    Handle<ArrayObject*> obj, HandleId id,
                    unsigned attrs, HandleValue value, bool setterIsStrict);
 
     /* See Flags enum above. */
@@ -442,17 +437,16 @@ class NativeObject : public JSObject
      * slots to cover the new span if necessary.
      */
     static bool setSlotSpan(ThreadSafeContext *cx, HandleNativeObject obj, uint32_t span);
 
     bool toDictionaryMode(ThreadSafeContext *cx);
 
   private:
     friend class Nursery;
-    friend class gc::ForkJoinNursery;
 
     /*
      * Get internal pointers to the range of values starting at start and
      * running for length.
      */
     void getSlotRangeUnchecked(uint32_t start, uint32_t length,
                                HeapSlot **fixedStart, HeapSlot **fixedEnd,
                                HeapSlot **slotsStart, HeapSlot **slotsEnd)
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -968,18 +968,16 @@ struct JSRuntime : public JS::shadow::Ru
     bool                gcInitialized;
 
     bool isHeapBusy() { return gc.isHeapBusy(); }
     bool isHeapMajorCollecting() { return gc.isHeapMajorCollecting(); }
     bool isHeapMinorCollecting() { return gc.isHeapMinorCollecting(); }
     bool isHeapCollecting() { return gc.isHeapCollecting(); }
     bool isHeapCompacting() { return gc.isHeapCompacting(); }
 
-    bool isFJMinorCollecting() { return gc.isFJMinorCollecting(); }
-
     int gcZeal() { return gc.zeal(); }
 
     void lockGC() {
         assertCanLock(js::GCLock);
         gc.lockGC();
     }
 
     void unlockGC() {
--- a/js/src/vm/Shape.cpp
+++ b/js/src/vm/Shape.cpp
@@ -17,17 +17,16 @@
 #include "jshashutil.h"
 #include "jsobj.h"
 
 #include "js/HashTable.h"
 
 #include "jscntxtinlines.h"
 #include "jsobjinlines.h"
 
-#include "gc/ForkJoinNursery-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/Runtime-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::CeilingLog2Size;
 using mozilla::DebugOnly;
--- a/js/src/vm/Shape.h
+++ b/js/src/vm/Shape.h
@@ -110,20 +110,16 @@
 
 namespace js {
 
 class Bindings;
 class Debugger;
 class Nursery;
 class StaticBlockObject;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 typedef JSPropertyOp         PropertyOp;
 typedef JSStrictPropertyOp   StrictPropertyOp;
 typedef JSPropertyDescriptor PropertyDescriptor;
 
 /* Limit on the number of slotful properties in an object. */
 static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1;
 static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2;
 
@@ -565,17 +561,16 @@ typedef HashSet<ReadBarrieredUnownedBase
 
 
 class Shape : public gc::TenuredCell
 {
     friend class ::JSObject;
     friend class ::JSFunction;
     friend class Bindings;
     friend class Nursery;
-    friend class gc::ForkJoinNursery;
     friend class NativeObject;
     friend class PropertyTree;
     friend class StaticBlockObject;
     friend class ShapeGetterSetterRef;
     friend struct StackShape;
     friend struct StackBaseShape;
 
   protected:
--- a/js/src/vm/ThreadPool.cpp
+++ b/js/src/vm/ThreadPool.cpp
@@ -12,20 +12,16 @@
 #include "jsmath.h"
 #include "jsnum.h" // for FIX_FPU
 
 #include "js/Utility.h"
 #include "vm/ForkJoin.h"
 #include "vm/Monitor.h"
 #include "vm/Runtime.h"
 
-#ifdef JSGC_FJGENERATIONAL
-#include "prmjtime.h"
-#endif
-
 using namespace js;
 
 const size_t WORKER_THREAD_STACK_SIZE = 1*1024*1024;
 
 static inline uint32_t
 ComposeSliceBounds(uint16_t from, uint16_t to)
 {
     MOZ_ASSERT(from <= to);
@@ -259,44 +255,34 @@ ThreadPool::ThreadPool(JSRuntime *rt)
   : activeWorkers_(0),
     joinBarrier_(nullptr),
     job_(nullptr),
 #ifdef DEBUG
     runtime_(rt),
     stolenSlices_(0),
 #endif
     pendingSlices_(0),
-    isMainThreadActive_(false),
-    chunkLock_(nullptr),
-    timeOfLastAllocation_(0),
-    freeChunks_(nullptr)
+    isMainThreadActive_(false)
 { }
 
 ThreadPool::~ThreadPool()
 {
     terminateWorkers();
-    if (chunkLock_)
-        clearChunkCache();
-    if (chunkLock_)
-        PR_DestroyLock(chunkLock_);
     if (joinBarrier_)
         PR_DestroyCondVar(joinBarrier_);
 }
 
 bool
 ThreadPool::init()
 {
     if (!Monitor::init())
         return false;
     joinBarrier_ = PR_NewCondVar(lock_);
     if (!joinBarrier_)
         return false;
-    chunkLock_ = PR_NewLock();
-    if (!chunkLock_)
-        return false;
     return true;
 }
 
 uint32_t
 ThreadPool::numWorkers() const
 {
     return HelperThreadState().cpuCount;
 }
@@ -478,97 +464,8 @@ ThreadPool::abortJob()
     // The reason for this is that while calling discardSlices() clears all
     // workers' bounds, the pendingSlices_ cache might still be > 0 due to
     // still-executing calls to popSliceBack or popSliceFront in other
     // threads. When those finish, we will be sure that !hasWork(), which is
     // important to ensure that an aborted worker does not start again due to
     // the thread pool having more work.
     while (hasWork());
 }
-
-// We are not using the markPagesUnused() / markPagesInUse() APIs here
-// for two reasons.  One, the free list is threaded through the
-// chunks, so some pages are actually in use.  Two, the expectation is
-// that a small number of chunks will be used intensively for a short
-// while and then be abandoned at the next GC.
-//
-// It's an open question whether it's best to map the chunk directly,
-// as now, or go via the GC's chunk pool.  Either way there's a need
-// to manage a predictable chunk cache here as we don't want chunks to
-// be deallocated during a parallel section.
-
-gc::ForkJoinNurseryChunk *
-ThreadPool::getChunk()
-{
-#ifdef JSGC_FJGENERATIONAL
-    PR_Lock(chunkLock_);
-    timeOfLastAllocation_ = PRMJ_Now()/1000000;
-    ChunkFreeList *p = freeChunks_;
-    if (p)
-        freeChunks_ = p->next;
-    PR_Unlock(chunkLock_);
-
-    if (p) {
-        // Already poisoned.
-        return reinterpret_cast<gc::ForkJoinNurseryChunk *>(p);
-    }
-    gc::ForkJoinNurseryChunk *c =
-        reinterpret_cast<gc::ForkJoinNurseryChunk *>(
-            gc::MapAlignedPages(gc::ChunkSize, gc::ChunkSize));
-    if (!c)
-        return c;
-    poisonChunk(c);
-    return c;
-#else
-    return nullptr;
-#endif
-}
-
-void
-ThreadPool::putFreeChunk(gc::ForkJoinNurseryChunk *c)
-{
-#ifdef JSGC_FJGENERATIONAL
-    poisonChunk(c);
-
-    PR_Lock(chunkLock_);
-    ChunkFreeList *p = reinterpret_cast<ChunkFreeList *>(c);
-    p->next = freeChunks_;
-    freeChunks_ = p;
-    PR_Unlock(chunkLock_);
-#endif
-}
-
-void
-ThreadPool::poisonChunk(gc::ForkJoinNurseryChunk *c)
-{
-#ifdef JSGC_FJGENERATIONAL
-#ifdef DEBUG
-    memset(c, JS_POISONED_FORKJOIN_CHUNK, gc::ChunkSize);
-#endif
-    c->trailer.runtime = nullptr;
-#endif
-}
-
-void
-ThreadPool::pruneChunkCache()
-{
-#ifdef JSGC_FJGENERATIONAL
-    if (PRMJ_Now()/1000000 - timeOfLastAllocation_ >= secondsBeforePrune)
-        clearChunkCache();
-#endif
-}
-
-void
-ThreadPool::clearChunkCache()
-{
-#ifdef JSGC_FJGENERATIONAL
-    PR_Lock(chunkLock_);
-    ChunkFreeList *p = freeChunks_;
-    freeChunks_ = nullptr;
-    PR_Unlock(chunkLock_);
-
-    while (p) {
-        ChunkFreeList *victim = p;
-        p = p->next;
-        gc::UnmapPages(victim, gc::ChunkSize);
-    }
-#endif
-}
--- a/js/src/vm/ThreadPool.h
+++ b/js/src/vm/ThreadPool.h
@@ -19,20 +19,16 @@
 
 struct JSRuntime;
 struct JSCompartment;
 
 namespace js {
 
 class ThreadPool;
 
-namespace gc {
-struct ForkJoinNurseryChunk;
-}
-
 /////////////////////////////////////////////////////////////////////////////
 // ThreadPoolWorker
 //
 // Class for worker threads in the pool. All threads (i.e. helpers and main
 // thread) have a worker associted with them. By convention, the worker id of
 // the main thread is 0.
 
 class ThreadPoolWorker
@@ -248,87 +244,13 @@ class ThreadPool : public Monitor
 
     // Execute the given ParallelJob using the main thread and any available worker.
     // Blocks until the main thread has completed execution.
     ParallelResult executeJob(JSContext *cx, ParallelJob *job, uint16_t sliceStart,
                               uint16_t numSlices);
 
     // Abort the current job.
     void abortJob();
-
-    // Chunk pool for the PJS parallel nurseries.  The nurseries need
-    // to have a useful pool of cheap chunks, they cannot map/unmap
-    // chunks as needed, as that slows down collection much too much.
-    //
-    // Technically the following should be #ifdef JSGC_FJGENERATIONAL
-    // but that affects the observed size of JSRuntime, of which
-    // ThreadPool is a member.  JSGC_FJGENERATIONAL can only be set if
-    // PJS is enabled, but the latter is enabled in js/src/moz.build;
-    // meanwhile, JSGC_FJGENERATIONAL must be enabled globally if it
-    // is enabled at all, since plenty of Firefox code includes files
-    // to make JSRuntime visible.  JSGC_FJGENERATIONAL will go away
-    // soon, in the mean time the problem is resolved by not making
-    // definitions exported from SpiderMonkey dependent on it.
-
-    // Obtain chunk memory from the cache, or allocate new.  In debug
-    // mode poison the memory, see poisionChunk().
-    //
-    // Returns nullptr on OOM.
-    gc::ForkJoinNurseryChunk *getChunk();
-
-    // Free chunk memory to the cache.  In debug mode poison it, see
-    // poisionChunk().
-    void putFreeChunk(gc::ForkJoinNurseryChunk *mem);
-
-    // If enough time has passed since any allocation activity on the
-    // chunk pool then release any free chunks.  It's meaningful to
-    // call this from the main GC's chunk expiry mechanism; it has low
-    // cost if it does not do anything.
-    //
-    // This must be called with the GC lock taken.
-    void pruneChunkCache();
-
-  private:
-    // Ignore requests to prune the pool until this number of seconds
-    // has passed since the last allocation request.
-    static const int32_t secondsBeforePrune = 10;
-
-    // This lock controls access to the following variables and to the
-    // 'next' field of any ChunkFreeList object reachable from freeChunks_.
-    //
-    // You will be tempted to remove this lock and instead introduce a
-    // lock-free push/pop data structure using Atomic.compareExchange.
-    // Before you do that, consider that such a data structure
-    // implemented naively is vulnerable to the ABA problem in a way
-    // that leads to a corrupt free list; the problem occurs in
-    // practice during very heavily loaded runs where preeption
-    // windows can be long (eg, running the parallel jit_tests on all
-    // cores means having a number of runnable threads quadratic in
-    // the number of cores).  To do better some ABA-defeating scheme
-    // is needed additionally.
-    PRLock *chunkLock_;
-
-    // Timestamp of last allocation from the chunk pool, in seconds.
-    int32_t timeOfLastAllocation_;
-
-    // This structure overlays the beginning of the chunk when the
-    // chunk is on the free list; the rest of the chunk is unused.
-    struct ChunkFreeList {
-        ChunkFreeList *next;
-    };
-
-    // List of free chunks.
-    ChunkFreeList *freeChunks_;
-
-    // Poison a free chunk by filling with JS_POISONED_FORKJOIN_CHUNK
-    // and setting the runtime pointer to null.
-    void poisonChunk(gc::ForkJoinNurseryChunk *c);
-
-    // Release the memory of the chunks that are on the free list.
-    //
-    // This should be called only from the ThreadPool's destructor or
-    // from pruneChunkCache().
-    void clearChunkCache();
 };
 
 } // namespace js
 
 #endif /* vm_ThreadPool_h */