Backed out changeset d89d4281fd8d (bug 933313) for bustage on a CLOSED TREE
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Fri, 06 Jun 2014 10:18:04 +0200
changeset 207370 16fb5d3adef7eb27673d5453706ed1efc038ec77
parent 207369 999ef6a46be8253b685cb86158003016897fa9a3
child 207371 cd9ee55b7d24a7bbcedf55f775ab3266c9c1fa6a
push id494
push userraliiev@mozilla.com
push dateMon, 25 Aug 2014 18:42:16 +0000
treeherdermozilla-release@a3cc3e46b571 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs933313
milestone32.0a1
backs outd89d4281fd8d5d51634491a7f100695c89071112
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset d89d4281fd8d (bug 933313) for bustage on a CLOSED TREE
js/public/HeapAPI.h
js/public/Utility.h
js/src/builtin/Array.js
js/src/builtin/TypedObject.js
js/src/configure.in
js/src/gc/ForkJoinNursery-inl.h
js/src/gc/ForkJoinNursery.cpp
js/src/gc/ForkJoinNursery.h
js/src/gc/GCInternals.h
js/src/gc/GCRuntime.h
js/src/gc/Marking.cpp
js/src/gc/Nursery-inl.h
js/src/gc/Nursery.cpp
js/src/gc/Nursery.h
js/src/gc/RootMarking.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/IonFrames.cpp
js/src/jit/IonFrames.h
js/src/jit/IonMacroAssembler.cpp
js/src/jit/IonMacroAssembler.h
js/src/jit/JitFrameIterator.h
js/src/jit/LIR-Common.h
js/src/jit/Lowering.cpp
js/src/jit/MIR.h
js/src/jit/ParallelFunctions.cpp
js/src/jit/ParallelSafetyAnalysis.cpp
js/src/jsapi.h
js/src/jscntxtinlines.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jsobj.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/moz.build
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ForkJoin.cpp
js/src/vm/ForkJoin.h
js/src/vm/ObjectImpl.h
js/src/vm/Runtime.h
js/src/vm/SelfHosting.cpp
js/src/vm/Shape.cpp
js/src/vm/Shape.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
js/src/vm/ThreadPool.cpp
js/src/vm/ThreadPool.h
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -50,33 +50,21 @@ const size_t ChunkLocationOffset = Chunk
  * Live objects are marked black. How many other additional colors are available
  * depends on the size of the GCThing. Objects marked gray are eligible for
  * cycle collection.
  */
 static const uint32_t BLACK = 0;
 static const uint32_t GRAY = 1;
 
 /*
- * The "location" field in the Chunk trailer is a bit vector indicting various
- * roles of the chunk.
- *
- * The value 0 for the "location" field is invalid, at least one bit must be
- * set.
- *
- * Some bits preclude others, for example, any "nursery" bit precludes any
- * "tenured" or "middle generation" bit.
+ * Constants used to indicate whether a chunk is part of the tenured heap or the
+ * nusery.
  */
-const uintptr_t ChunkLocationBitNursery = 1;       // Standard GGC nursery
-const uintptr_t ChunkLocationBitTenuredHeap = 2;   // Standard GGC tenured generation
-const uintptr_t ChunkLocationBitPJSNewspace = 4;   // The PJS generational GC's allocation space
-const uintptr_t ChunkLocationBitPJSFromspace = 8;  // The PJS generational GC's fromspace (during GC)
-
-const uintptr_t ChunkLocationAnyNursery = ChunkLocationBitNursery |
-                                          ChunkLocationBitPJSNewspace |
-                                          ChunkLocationBitPJSFromspace;
+const uint32_t ChunkLocationNursery = 0;
+const uint32_t ChunkLocationTenuredHeap = 1;
 
 #ifdef JS_DEBUG
 /* When downcasting, ensure we are actually the right type. */
 extern JS_FRIEND_API(void)
 AssertGCThingHasType(js::gc::Cell *cell, JSGCTraceKind kind);
 #else
 inline void
 AssertGCThingHasType(js::gc::Cell *cell, JSGCTraceKind kind) {}
@@ -232,18 +220,19 @@ IsInsideNursery(const js::gc::Cell *cell
 {
 #ifdef JSGC_GENERATIONAL
     if (!cell)
         return false;
     uintptr_t addr = uintptr_t(cell);
     addr &= ~js::gc::ChunkMask;
     addr |= js::gc::ChunkLocationOffset;
     uint32_t location = *reinterpret_cast<uint32_t *>(addr);
-    JS_ASSERT(location != 0);
-    return location & ChunkLocationAnyNursery;
+    JS_ASSERT(location == gc::ChunkLocationNursery ||
+              location == gc::ChunkLocationTenuredHeap);
+    return location == gc::ChunkLocationNursery;
 #else
     return false;
 #endif
 }
 
 } /* namespace gc */
 
 } /* namespace js */
--- a/js/public/Utility.h
+++ b/js/public/Utility.h
@@ -42,17 +42,16 @@ namespace js {}
 #define JS_FRESH_NURSERY_PATTERN 0x2F
 #define JS_SWEPT_NURSERY_PATTERN 0x2B
 #define JS_ALLOCATED_NURSERY_PATTERN 0x2D
 #define JS_FRESH_TENURED_PATTERN 0x4F
 #define JS_SWEPT_TENURED_PATTERN 0x4B
 #define JS_ALLOCATED_TENURED_PATTERN 0x4D
 #define JS_SWEPT_CODE_PATTERN 0x3b
 #define JS_SWEPT_FRAME_PATTERN 0x5b
-#define JS_POISONED_FORKJOIN_CHUNK 0xBD
 
 #define JS_ASSERT(expr)           MOZ_ASSERT(expr)
 #define JS_ASSERT_IF(cond, expr)  MOZ_ASSERT_IF(cond, expr)
 
 #define JS_STATIC_ASSERT(cond)           static_assert(cond, "JS_STATIC_ASSERT")
 #define JS_STATIC_ASSERT_IF(cond, expr)  MOZ_STATIC_ASSERT_IF(cond, expr, "JS_STATIC_ASSERT_IF")
 
 extern MOZ_NORETURN JS_PUBLIC_API(void)
--- a/js/src/builtin/Array.js
+++ b/js/src/builtin/Array.js
@@ -687,17 +687,17 @@ function ArrayMapPar(func, mode) {
     // - Breaking out of named blocks does not currently work (bug 684384);
     // - Unreachable Code Elim. can't properly handle if (a && b) (bug 669796)
     if (ShouldForceSequential())
       break parallel;
     if (!TRY_PARALLEL(mode))
       break parallel;
 
     var slicesInfo = ComputeSlicesInfo(length);
-    ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
+    ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode));
     return buffer;
   }
 
   // Sequential fallback:
   ASSERT_SEQUENTIAL_IS_OK(mode);
   for (var i = 0; i < length; i++)
     UnsafePutElements(buffer, i, func(self[i], i, self));
   return buffer;
@@ -736,17 +736,17 @@ function ArrayReducePar(func, mode) {
       break parallel;
     if (!TRY_PARALLEL(mode))
       break parallel;
 
     var slicesInfo = ComputeSlicesInfo(length);
     var numSlices = slicesInfo.count;
     var subreductions = NewDenseArray(numSlices);
 
-    ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode), null);
+    ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode));
 
     var accumulator = subreductions[0];
     for (var i = 1; i < numSlices; i++)
       accumulator = func(accumulator, subreductions[i]);
     return accumulator;
   }
 
   // Sequential fallback:
@@ -795,33 +795,33 @@ function ArrayScanPar(func, mode) {
       break parallel;
     if (!TRY_PARALLEL(mode))
       break parallel;
 
     var slicesInfo = ComputeSlicesInfo(length);
     var numSlices = slicesInfo.count;
 
     // Scan slices individually (see comment on phase1()).
-    ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode), buffer);
+    ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode));
 
     // Compute intermediates array (see comment on phase2()).
     var intermediates = [];
     var accumulator = buffer[finalElement(0)];
     ARRAY_PUSH(intermediates, accumulator);
     for (var i = 1; i < numSlices - 1; i++) {
       accumulator = func(accumulator, buffer[finalElement(i)]);
       ARRAY_PUSH(intermediates, accumulator);
     }
 
     // Complete each slice using intermediates array (see comment on phase2()).
     //
     // We start from slice 1 instead of 0 since there is no work to be done
     // for slice 0.
     if (numSlices > 1)
-      ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode), buffer);
+      ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode));
     return buffer;
   }
 
   // Sequential fallback:
   ASSERT_SEQUENTIAL_IS_OK(mode);
   scan(self[0], 0, length);
   return buffer;
 
@@ -1025,25 +1025,25 @@ function ArrayFilterPar(func, mode) {
     // the chunk survived. We also keep an array |counts| containing the total
     // number of items that are being preserved from within one slice.
     var numSlices = slicesInfo.count;
     var counts = NewDenseArray(numSlices);
     for (var i = 0; i < numSlices; i++)
       UnsafePutElements(counts, i, 0);
 
     var survivors = new Uint8Array(length);
-    ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode), survivors);
+    ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode));
 
     // Step 2. Compress the slices into one contiguous set.
     var count = 0;
     for (var i = 0; i < numSlices; i++)
       count += counts[i];
     var buffer = NewDenseArray(count);
     if (count > 0)
-      ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode), buffer);
+      ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode));
 
     return buffer;
   }
 
   // Sequential fallback:
   ASSERT_SEQUENTIAL_IS_OK(mode);
   var buffer = [];
   for (var i = 0; i < length; i++) {
@@ -1143,17 +1143,17 @@ function ArrayStaticBuildPar(length, fun
 
   parallel: for (;;) {
     if (ShouldForceSequential())
       break parallel;
     if (!TRY_PARALLEL(mode))
       break parallel;
 
     var slicesInfo = ComputeSlicesInfo(length);
-    ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
+    ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode));
     return buffer;
   }
 
   // Sequential fallback:
   ASSERT_SEQUENTIAL_IS_OK(mode);
   for (var i = 0; i < length; i++)
     UnsafePutElements(buffer, i, func(i));
   return buffer;
--- a/js/src/builtin/TypedObject.js
+++ b/js/src/builtin/TypedObject.js
@@ -1190,17 +1190,17 @@ function MapTypedParImplDepth1(inArray, 
                             outTypedObject: outTypedObject }));
   }
 
   // Below we will be adjusting offsets within the input to point at
   // successive entries; we'll need to know the offset of inArray
   // relative to its owner (which is often but not always 0).
   const inBaseOffset = TYPEDOBJ_BYTEOFFSET(inArray);
 
-  ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), outArray);
+  ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode));
   return outArray;
 
   function mapThread(workerId, sliceStart, sliceEnd) {
     assert(TO_INT32(workerId) === workerId,
            "workerId not int: " + workerId);
     assert(workerId < pointers.length,
            "workerId too large: " + workerId + " >= " + pointers.length);
 
@@ -1246,27 +1246,21 @@ function MapTypedParImplDepth1(inArray, 
           if (outGrainTypeIsComplex)
             SetTypedObjectValue(outGrainType, outArray, outOffset, r);
           else
             UnsafePutElements(outArray, i, r);
         }
         inOffset += inGrainTypeSize;
         outOffset += outGrainTypeSize;
 
-#ifndef JSGC_FJGENERATIONAL
         // A transparent result type cannot contain references, and
         // hence there is no way for a pointer to a thread-local object
         // to escape.
-        //
-        // This has been disabled for the PJS generational collector
-        // as it probably has little effect in that setting and adds
-        // per-iteration cost.
         if (outGrainTypeIsTransparent)
           ClearThreadLocalArenas();
-#endif
       }
     }
 
     return sliceId;
   }
 
   return undefined;
 }
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -3194,18 +3194,16 @@ dnl needed on a per-platform basis.
 JSGC_GENERATIONAL=1
 MOZ_ARG_DISABLE_BOOL(gcgenerational,
 [  --disable-gcgenerational Disable generational GC],
     JSGC_GENERATIONAL= ,
     JSGC_GENERATIONAL=1 )
 if test -n "$JSGC_GENERATIONAL"; then
     AC_DEFINE(JSGC_GENERATIONAL)
 fi
-JSGC_GENERATIONAL_CONFIGURED=$JSGC_GENERATIONAL
-AC_SUBST(JSGC_GENERATIONAL_CONFIGURED)
 
 dnl ========================================================
 dnl = Use exact stack rooting for GC
 dnl ========================================================
 dnl Use exact rooting by default in all shell builds. The top-level mozilla
 dnl configure.in will configure SpiderMonkey with --disable-exact-rooting as
 dnl needed on a per-platform basis.
 JSGC_USE_EXACT_ROOTING=1
deleted file mode 100644
--- a/js/src/gc/ForkJoinNursery-inl.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef gc_ForkJoinNursery_inl_h
-#define gc_ForkJoinNursery_inl_h
-
-#ifdef JSGC_FJGENERATIONAL
-
-#include "gc/ForkJoinNursery.h"
-
-namespace js {
-namespace gc {
-
-// For the following two predicates we can't check the attributes on
-// the chunk trailer because it's not known whether addr points into a
-// chunk.
-//
-// A couple of optimizations are possible if performance is an issue:
-//
-//  - The loop can be unrolled, and we can arrange for all array entries
-//    to be valid for this purpose so that the bound is constant.
-//  - The per-chunk test can be reduced to testing whether the high bits
-//    of the object pointer and the high bits of the chunk pointer are
-//    the same (and the latter value is essentially space[i]).
-//    Note, experiments with that do not show an improvement yet.
-//  - Taken together, those optimizations yield code that is one LOAD,
-//    one XOR, and one AND for each chunk, with the result being true
-//    iff the resulting value is zero.
-//  - We can have multiple versions of the predicates, and those that
-//    take known-good GCThing types can go directly to the attributes;
-//    it may be possible to ensure that more calls use GCThing types.
-//    Note, this requires the worker ID to be part of the chunk
-//    attribute bit vector.
-//
-// Performance may not be an issue as there may be few survivors of a
-// collection in the ForkJoinNursery and few objects will be tested.
-// If so then the bulk of the calls may come from the code that scans
-// the roots.  Behavior will be workload-dependent however.
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::isInsideNewspace(const void *addr)
-{
-    uintptr_t p = reinterpret_cast<uintptr_t>(addr);
-    for (unsigned i = 0 ; i <= currentChunk_ ; i++) {
-        if (p >= newspace[i]->start() && p < newspace[i]->end())
-            return true;
-    }
-    return false;
-}
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::isInsideFromspace(const void *addr)
-{
-    uintptr_t p = reinterpret_cast<uintptr_t>(addr);
-    for (unsigned i = 0 ; i < numFromspaceChunks_ ; i++) {
-        if (p >= fromspace[i]->start() && p < fromspace[i]->end())
-            return true;
-    }
-    return false;
-}
-
-template <typename T>
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::getForwardedPointer(T **ref)
-{
-    JS_ASSERT(ref);
-    JS_ASSERT(isInsideFromspace(*ref));
-    const RelocationOverlay *overlay = reinterpret_cast<const RelocationOverlay *>(*ref);
-    if (!overlay->isForwarded())
-        return false;
-    // This static_cast from Cell* restricts T to valid (GC thing) types.
-    *ref = static_cast<T *>(overlay->forwardingAddress());
-    return true;
-}
-
-} // namespace gc
-} // namespace js
-
-#endif // JSGC_FJGENERATIONAL
-
-#endif // gc_ForkJoinNursery_inl_h
deleted file mode 100644
--- a/js/src/gc/ForkJoinNursery.cpp
+++ /dev/null
@@ -1,907 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifdef JSGC_FJGENERATIONAL
-
-#include "gc/ForkJoinNursery-inl.h"
-
-#include "mozilla/IntegerPrintfMacros.h"
-
-#include "prmjtime.h"
-
-#include "gc/Heap.h"
-#include "jit/IonFrames.h"
-#include "vm/ArrayObject.h"
-#include "vm/ForkJoin.h"
-#include "vm/TypedArrayObject.h"
-
-#include "jsgcinlines.h"
-#include "gc/Nursery-inl.h"
-
-// The ForkJoinNursery provides an object nursery for movable object
-// types for one ForkJoin worker thread.  There is a one-to-one
-// correspondence between ForkJoinNursery and ForkJoinContext.
-//
-// For a general overview of how the ForkJoinNursery fits into the
-// overall PJS system, see the comment block in vm/ForkJoin.h.
-//
-//
-// Invariants on the ForkJoinNursery:
-//
-// Let "the tenured area" from the point of view of one
-// ForkJoinNursery comprise the global tenured area and the nursery's
-// owning worker's private tenured area.  Then:
-//
-// - There can be pointers from the tenured area into a ForkJoinNursery,
-//   and from the ForkJoinNursery into the tenured area
-//
-// - There *cannot* be a pointer from one ForkJoinNursery into
-//   another, or from one private tenured area into another, or from a
-//   ForkJoinNursery into another worker's private tenured are or vice
-//   versa, or from any ForkJoinNursery or private tenured area into
-//   the normal Nursery.
-//
-// For those invariants to hold the normal Nursery must be empty before
-// a ForkJoin section.
-//
-//
-// General description:
-//
-// The nursery maintains a space into which small, movable objects
-// are allocated.  Other objects are allocated directly in the private
-// tenured area for the worker.
-//
-// If an allocation request can't be satisfied because the nursery is
-// full then a /minor collection/ is triggered without bailouts.  This
-// collection copies nursery-allocated objects reachable from the
-// worker's roots into a fresh space.  Then the old space is
-// discarded.
-//
-// Nurseries are maintained in 1MB chunks.  If the live data in a
-// nursery after a collection exceeds some set fraction (currently
-// 1/3) then the nursery is grown, independently of other nurseries.
-//
-// There is an upper limit on the number of chunks in a nursery.  If
-// the live data in a nursery after a collection exceeds the set
-// fraction and the nursery can't grow, then the next collection will
-// be an /evacuating collection/.
-//
-// An evacuating collection copies nursery-allocated objects reachable
-// from the worker's roots into the worker's private tenured area.
-//
-// If an allocation request in the tenured area - whether the request
-// comes from the mutator or from the garbage collector during
-// evacuation - can't be satisified because the tenured area is full,
-// then the worker bails out and triggers a full collection in the
-// ForkJoin worker's zone.  This is expected to happen very rarely in
-// practice.
-//
-// The roots for a collection in the ForkJoinNursery are: the frames
-// of the execution stack, any registered roots on the execution
-// stack, any objects in the private tenured area, and the ForkJoin
-// result object in the common tenured area.
-//
-// The entire private tenured area is considered to be rooted in order
-// not to have to run write barriers during the ForkJoin section.
-// During a minor or evacuating collection in a worker the GC will
-// step through the worker's tenured area, examining each object for
-// pointers into the nursery.
-//
-// The ForkJoinNursery contains its own object tracing machinery for
-// most of the types that can be allocated in the nursery.  But it
-// does not handle all types, and there are two places where the code
-// in ForkJoinNursery loses control of the tracing:
-//
-// - When calling clasp->trace() in traceObject()
-// - When calling MarkForkJoinStack() in forwardFromStack()
-//
-// In both cases:
-//
-// - We pass a ForkJoinNurseryCollectionTracer object with a callback
-//   to ForkJoinNursery::MinorGCCallback
-//
-// - We should only ever end up in MarkInternal() in Marking.cpp, in
-//   the case in that code that calls back to trc->callback.  We
-//   should /never/ end up in functions that trigger use of the mark
-//   stack internal to the general GC's marker.
-//
-// - Any function along the path to MarkInternal() that asks about
-//   whether something is in the nursery or is tenured /must/ be aware
-//   that there can be multiple nursery and tenured areas; assertions
-//   get this wrong a lot of the time and must be fixed when they do.
-//   In practice, such code either must have a case for each nursery
-//   kind or must use the IsInsideNursery(Cell*) method, which looks
-//   only at the chunk tag.
-//
-//
-// Terminological note:
-//
-// - While the mutator is running it is allocating in what's known as
-//   the nursery's "newspace".  The mutator may also allocate directly
-//   in the tenured space, but the tenured space is not part of the
-//   newspace.
-//
-// - While the gc is running, the previous "newspace" has been renamed
-//   as the gc's "fromspace", and the space that objects are copied
-//   into is known as the "tospace".  The tospace may be a nursery
-//   space (during a minor collection), or it may be a tenured space
-//   (during an evacuation collection), but it's always one or the
-//   other, never a combination.  After gc the fromspace is always
-//   discarded.
-//
-// - If the gc copies objects into a nursery tospace then this tospace
-//   becomes known as the "newspace" following gc.  Otherwise, a new
-//   newspace won't be needed (if the parallel section is finished) or
-//   can be created empty (if the gc just needed to evacuate).
-//
-//
-// Style note:
-//
-// - Use js_memcpy, malloc_, realloc_, and js_free uniformly, do not
-//   use PodCopy or pod_malloc: the type information for the latter is
-//   not always correct and surrounding code usually operates in terms
-//   of bytes, anyhow.
-//
-//   With power comes responsibility, etc: code that used pod_malloc
-//   gets safe size computation built-in; here we must handle that
-//   manually.
-
-namespace js {
-namespace gc {
-
-ForkJoinNursery::ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured)
-  : cx_(cx)
-  , tenured_(tenured)
-  , shared_(shared)
-  , evacuationZone_(nullptr)
-  , currentStart_(0)
-  , currentEnd_(0)
-  , position_(0)
-  , currentChunk_(0)
-  , numActiveChunks_(0)
-  , numFromspaceChunks_(0)
-  , mustEvacuate_(false)
-  , isEvacuating_(false)
-  , movedSize_(0)
-  , head_(nullptr)
-  , tail_(&head_)
-  , hugeSlotsNew(0)
-  , hugeSlotsFrom(1)
-{
-    for ( size_t i=0 ; i < MaxNurseryChunks ; i++ ) {
-        newspace[i] = nullptr;
-        fromspace[i] = nullptr;
-    }
-    if (!hugeSlots[hugeSlotsNew].init() || !hugeSlots[hugeSlotsFrom].init())
-        CrashAtUnhandlableOOM("Cannot initialize PJS nursery");
-    initNewspace();             // This can fail to return
-}
-
-ForkJoinNursery::~ForkJoinNursery()
-{
-    for ( size_t i=0 ; i < numActiveChunks_ ; i++ ) {
-        if (newspace[i])
-            shared_->freeNurseryChunk(newspace[i]);
-    }
-}
-
-void
-ForkJoinNursery::minorGC()
-{
-    if (mustEvacuate_) {
-        mustEvacuate_ = false;
-        pjsCollection(Evacuate|Recreate);
-    } else {
-        pjsCollection(Collect|Recreate);
-    }
-}
-
-void
-ForkJoinNursery::evacuatingGC()
-{
-    pjsCollection(Evacuate);
-}
-
-#define TIME_START(name) int64_t timstampStart_##name = PRMJ_Now()
-#define TIME_END(name) int64_t timstampEnd_##name = PRMJ_Now()
-#define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name)
-
-void
-ForkJoinNursery::pjsCollection(int op)
-{
-    JS_ASSERT((op & Collect) != (op & Evacuate));
-
-    bool evacuate = op & Evacuate;
-    bool recreate = op & Recreate;
-
-    JS_ASSERT(!isEvacuating_);
-    JS_ASSERT(!evacuationZone_);
-    JS_ASSERT(!head_);
-    JS_ASSERT(tail_ == &head_);
-
-    JSRuntime *const rt = shared_->runtime();
-    const unsigned currentNumActiveChunks_ = numActiveChunks_;
-    const char *msg = "";
-
-    JS_ASSERT(!rt->needsBarrier());
-
-    TIME_START(pjsCollection);
-
-    rt->incFJMinorCollecting();
-    if (evacuate) {
-        isEvacuating_ = true;
-        evacuationZone_ = shared_->zone();
-    }
-
-    flip();
-    if (recreate) {
-        initNewspace();
-        // newspace must be at least as large as fromSpace
-        numActiveChunks_ = currentNumActiveChunks_;
-    }
-    ForkJoinNurseryCollectionTracer trc(rt, this);
-    forwardFromRoots(&trc);
-    collectToFixedPoint(&trc);
-#ifdef JS_ION
-    jit::UpdateJitActivationsForMinorGC(TlsPerThreadData.get(), &trc);
-#endif
-    freeFromspace();
-
-    size_t live = movedSize_;
-    computeNurserySizeAfterGC(live, &msg);
-
-    sweepHugeSlots();
-    JS_ASSERT(hugeSlots[hugeSlotsFrom].empty());
-    JS_ASSERT_IF(isEvacuating_, hugeSlots[hugeSlotsNew].empty());
-
-    isEvacuating_ = false;
-    evacuationZone_ = nullptr;
-    head_ = nullptr;
-    tail_ = &head_;
-    movedSize_ = 0;
-
-    rt->decFJMinorCollecting();
-
-    TIME_END(pjsCollection);
-
-    // Note, the spew is awk-friendly, non-underlined words serve as markers:
-    //   FJGC _tag_ us _value_ copied _value_ size _value_ _message-word_ ...
-    shared_->spewGC("FJGC %s us %5" PRId64 "  copied %7" PRIu64 "  size %" PRIu64 "  %s",
-                    (evacuate ? "evacuate " : "collect  "),
-                    TIME_TOTAL(pjsCollection),
-                    (uint64_t)live,
-                    (uint64_t)numActiveChunks_*1024*1024,
-                    msg);
-}
-
-#undef TIME_START
-#undef TIME_END
-#undef TIME_TOTAL
-
-void
-ForkJoinNursery::computeNurserySizeAfterGC(size_t live, const char **msg)
-{
-    // Grow the nursery if it is too full.  Do not bother to shrink it - lazy
-    // chunk allocation means that a too-large nursery will not really be a problem,
-    // the entire nursery will be deallocated soon anyway.
-    if (live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize) {
-        if (numActiveChunks_ < MaxNurseryChunks) {
-            while (numActiveChunks_ < MaxNurseryChunks &&
-                   live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize)
-            {
-                ++numActiveChunks_;
-            }
-        } else {
-            // Evacuation will tend to drive us toward the cliff of a bailout GC, which
-            // is not good, probably worse than working within the thread at a higher load
-            // than desirable.
-            //
-            // Thus it's possible to be more sophisticated than this:
-            //
-            // - evacuate only after several minor GCs in a row exceeded the set load
-            // - evacuate only if significantly less space than required is available, eg,
-            //   if only 1/2 the required free space is available
-            *msg = "  Overfull, will evacuate next";
-            mustEvacuate_ = true;
-        }
-    }
-}
-
-void
-ForkJoinNursery::flip()
-{
-    size_t i;
-    for (i=0; i < numActiveChunks_; i++) {
-        if (!newspace[i])
-            break;
-        fromspace[i] = newspace[i];
-        newspace[i] = nullptr;
-        fromspace[i]->trailer.location = gc::ChunkLocationBitPJSFromspace;
-    }
-    numFromspaceChunks_ = i;
-    numActiveChunks_ = 0;
-
-    int tmp = hugeSlotsNew;
-    hugeSlotsNew = hugeSlotsFrom;
-    hugeSlotsFrom = tmp;
-
-    JS_ASSERT(hugeSlots[hugeSlotsNew].empty());
-}
-
-void
-ForkJoinNursery::freeFromspace()
-{
-    for (size_t i=0; i < numFromspaceChunks_; i++) {
-        shared_->freeNurseryChunk(fromspace[i]);
-        fromspace[i] = nullptr;
-    }
-    numFromspaceChunks_ = 0;
-}
-
-void
-ForkJoinNursery::initNewspace()
-{
-    JS_ASSERT(newspace[0] == nullptr);
-    JS_ASSERT(numActiveChunks_ == 0);
-
-    numActiveChunks_ = 1;
-    setCurrentChunk(0);
-}
-
-MOZ_ALWAYS_INLINE bool
-ForkJoinNursery::shouldMoveObject(void **thingp)
-{
-    // Note that thingp must really be a T** where T is some GCThing,
-    // ie, something that lives in a chunk (or nullptr).  This should
-    // be the case because the MinorGCCallback is only called on exact
-    // roots on the stack or slots within in tenured objects and not
-    // on slot/element arrays that can be malloc'd; they are forwarded
-    // using the forwardBufferPointer() mechanism.
-    //
-    // The main reason for that restriction is so that we can call a
-    // method here that can check the chunk trailer for the cell (a
-    // future optimization).
-    Cell *cell = static_cast<Cell *>(*thingp);
-    return isInsideFromspace(cell) && !getForwardedPointer(thingp);
-}
-
-/* static */ void
-ForkJoinNursery::MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind traceKind)
-{
-    // traceKind can be all sorts of things, when we're marking from stack roots
-    ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trcArg)->nursery_;
-    if (nursery->shouldMoveObject(thingp)) {
-        // When other types of objects become nursery-allocable then the static_cast
-        // to JSObject * will no longer be valid.
-        JS_ASSERT(traceKind == JSTRACE_OBJECT);
-        *thingp = nursery->moveObjectToTospace(static_cast<JSObject *>(*thingp));
-    }
-}
-
-void
-ForkJoinNursery::forwardFromRoots(ForkJoinNurseryCollectionTracer *trc)
-{
-    // There should be no other roots as a result of effect-freedom.
-    forwardFromUpdatable(trc);
-    forwardFromStack(trc);
-    forwardFromTenured(trc);
-}
-
-void
-ForkJoinNursery::forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc)
-{
-    JSObject *obj = shared_->updatable();
-    if (obj)
-        traceObject(trc, obj);
-}
-
-void
-ForkJoinNursery::forwardFromStack(ForkJoinNurseryCollectionTracer *trc)
-{
-    MarkForkJoinStack(trc);
-}
-
-void
-ForkJoinNursery::forwardFromTenured(ForkJoinNurseryCollectionTracer *trc)
-{
-    JSObject *objs[ArenaCellCount];
-    for (size_t k=0; k < FINALIZE_LIMIT; k++) {
-        AllocKind kind = (AllocKind)k;
-        if (!IsFJNurseryAllocable(kind))
-            continue;
-
-        // When non-JSObject types become nursery-allocable the assumptions in the
-        // loops below will no longer hold; other types than JSObject must be
-        // handled.
-        JS_ASSERT(kind <= FINALIZE_OBJECT_LAST);
-
-        ArenaIter ai;
-        ai.init(const_cast<Allocator *>(tenured_), kind);
-        for (; !ai.done(); ai.next()) {
-            // Do the walk in two steps to avoid problems resulting from allocating
-            // into the arena that's being walked: ArenaCellIter is not safe for that.
-            // It can happen during evacuation.
-            //
-            // ArenaCellIterUnderFinalize requires any free list to be flushed into
-            // its arena, and since we may allocate within traceObject() we must
-            // purge before each arena scan.  This is probably not very expensive,
-            // it's constant work, and inlined.
-            //
-            // Use ArenaCellIterUnderFinalize, not ...UnderGC, because that side-steps
-            // some assertions in the latter that are wrong for PJS collection.
-            size_t numObjs = 0;
-            tenured_->arenas.purge(kind);
-            for (ArenaCellIterUnderFinalize i(ai.get()); !i.done(); i.next())
-                objs[numObjs++] = i.get<JSObject>();
-            for (size_t i=0; i < numObjs; i++)
-                traceObject(trc, objs[i]);
-        }
-    }
-}
-
-/*static*/ void
-ForkJoinNursery::forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems)
-{
-    ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trc)->nursery_;
-    HeapSlot *old = *pSlotsElems;
-
-    if (!nursery->isInsideFromspace(old))
-        return;
-
-    // If the elements buffer is zero length, the "first" item could be inside
-    // of the next object or past the end of the allocable area.  However,
-    // since we always store the runtime as the last word in a nursery chunk,
-    // isInsideFromspace will still be true, even if this zero-size allocation
-    // abuts the end of the allocable area. Thus, it is always safe to read the
-    // first word of |old| here.
-    *pSlotsElems = *reinterpret_cast<HeapSlot **>(old);
-    JS_ASSERT(!nursery->isInsideFromspace(*pSlotsElems));
-}
-
-void
-ForkJoinNursery::collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc)
-{
-    for (RelocationOverlay *p = head_; p; p = p->next())
-        traceObject(trc, static_cast<JSObject *>(p->forwardingAddress()));
-}
-
-inline void
-ForkJoinNursery::setCurrentChunk(int index)
-{
-    JS_ASSERT((size_t)index < numActiveChunks_);
-    JS_ASSERT(!newspace[index]);
-
-    currentChunk_ = index;
-    ForkJoinNurseryChunk *c = shared_->allocateNurseryChunk();
-    if (!c)
-        CrashAtUnhandlableOOM("Cannot expand PJS nursery");
-    c->trailer.runtime = shared_->runtime();
-    c->trailer.location = gc::ChunkLocationBitPJSNewspace;
-    c->trailer.storeBuffer = nullptr;
-    currentStart_ = c->start();
-    currentEnd_ = c->end();
-    position_ = currentStart_;
-    newspace[index] = c;
-}
-
-void *
-ForkJoinNursery::allocate(size_t size)
-{
-    JS_ASSERT(position_ >= currentStart_);
-
-    if (currentEnd_ - position_ < size) {
-        if (currentChunk_ + 1 == numActiveChunks_)
-            return nullptr;
-        setCurrentChunk(currentChunk_ + 1);
-    }
-
-    void *thing = reinterpret_cast<void *>(position_);
-    position_ += size;
-
-    JS_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
-    return thing;
-}
-
-JSObject *
-ForkJoinNursery::allocateObject(size_t baseSize, size_t numDynamic, bool& tooLarge)
-{
-    // Ensure there's enough space to replace the contents with a RelocationOverlay.
-    JS_ASSERT(baseSize >= sizeof(js::gc::RelocationOverlay));
-
-    // Too-large slot arrays cannot be accomodated.
-    if (numDynamic > MaxNurserySlots) {
-        tooLarge = true;
-        return nullptr;
-    }
-
-    // Allocate slots contiguously after the object.
-    size_t totalSize = baseSize + sizeof(HeapSlot) * numDynamic;
-    JSObject *obj = static_cast<JSObject *>(allocate(totalSize));
-    if (!obj) {
-        tooLarge = false;
-        return nullptr;
-    }
-    obj->setInitialSlots(numDynamic
-                         ? reinterpret_cast<HeapSlot *>(size_t(obj) + baseSize)
-                         : nullptr);
-    return obj;
-}
-
-HeapSlot *
-ForkJoinNursery::allocateSlots(JSObject *obj, uint32_t nslots)
-{
-    JS_ASSERT(obj);
-    JS_ASSERT(nslots > 0);
-
-    if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
-        return nullptr;
-    size_t size = nslots * sizeof(HeapSlot);
-
-    if (!isInsideNewspace(obj))
-        return reinterpret_cast<HeapSlot *>(cx_->malloc_(size));
-
-    if (nslots > MaxNurserySlots)
-        return allocateHugeSlots(nslots);
-
-    HeapSlot *slots = static_cast<HeapSlot *>(allocate(size));
-    if (slots)
-        return slots;
-
-    return allocateHugeSlots(nslots);
-}
-
-HeapSlot *
-ForkJoinNursery::reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
-                                 uint32_t oldCount, uint32_t newCount)
-{
-    if (newCount & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
-        return nullptr;
-
-    size_t oldSize = oldCount * sizeof(HeapSlot);
-    size_t newSize = newCount * sizeof(HeapSlot);
-
-    if (!isInsideNewspace(obj)) {
-        JS_ASSERT_IF(oldSlots, !isInsideNewspace(oldSlots));
-        return static_cast<HeapSlot *>(cx_->realloc_(oldSlots, oldSize, newSize));
-    }
-
-    if (!isInsideNewspace(oldSlots))
-        return reallocateHugeSlots(oldSlots, oldSize, newSize);
-
-    // No-op if we're shrinking, we can't make use of the freed portion.
-    if (newCount < oldCount)
-        return oldSlots;
-
-    HeapSlot *newSlots = allocateSlots(obj, newCount);
-    if (!newSlots)
-        return nullptr;
-
-    js_memcpy(newSlots, oldSlots, oldSize);
-    return newSlots;
-}
-
-ObjectElements *
-ForkJoinNursery::allocateElements(JSObject *obj, uint32_t nelems)
-{
-    JS_ASSERT(nelems >= ObjectElements::VALUES_PER_HEADER);
-    return reinterpret_cast<ObjectElements *>(allocateSlots(obj, nelems));
-}
-
-ObjectElements *
-ForkJoinNursery::reallocateElements(JSObject *obj, ObjectElements *oldHeader,
-                                    uint32_t oldCount, uint32_t newCount)
-{
-    HeapSlot *slots = reallocateSlots(obj, reinterpret_cast<HeapSlot *>(oldHeader),
-                                      oldCount, newCount);
-    return reinterpret_cast<ObjectElements *>(slots);
-}
-
-void
-ForkJoinNursery::freeSlots(HeapSlot *slots)
-{
-    if (!isInsideNewspace(slots)) {
-        hugeSlots[hugeSlotsNew].remove(slots);
-        js_free(slots);
-    }
-}
-
-HeapSlot *
-ForkJoinNursery::allocateHugeSlots(size_t nslots)
-{
-    if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
-        return nullptr;
-
-    size_t size = nslots * sizeof(HeapSlot);
-    HeapSlot *slots = reinterpret_cast<HeapSlot *>(cx_->malloc_(size));
-    if (!slots)
-        return slots;
-
-    // If this put fails, we will only leak the slots.
-    (void)hugeSlots[hugeSlotsNew].put(slots);
-    return slots;
-}
-
-HeapSlot *
-ForkJoinNursery::reallocateHugeSlots(HeapSlot *oldSlots, uint32_t oldSize, uint32_t newSize)
-{
-    HeapSlot *newSlots = static_cast<HeapSlot *>(cx_->realloc_(oldSlots, oldSize, newSize));
-    if (!newSlots)
-        return newSlots;
-
-    if (oldSlots != newSlots) {
-        hugeSlots[hugeSlotsNew].remove(oldSlots);
-        // If this put fails, we will only leak the slots.
-        (void)hugeSlots[hugeSlotsNew].put(newSlots);
-    }
-    return newSlots;
-}
-
-void
-ForkJoinNursery::sweepHugeSlots()
-{
-    for (HugeSlotsSet::Range r = hugeSlots[hugeSlotsFrom].all(); !r.empty(); r.popFront())
-        js_free(r.front());
-    hugeSlots[hugeSlotsFrom].clear();
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj)
-{
-    const Class *clasp = obj->getClass();
-    if (clasp->trace)
-        clasp->trace(trc, obj);
-
-    if (!obj->isNative())
-        return;
-
-    if (!obj->hasEmptyElements())
-        markSlots(obj->getDenseElements(), obj->getDenseInitializedLength());
-
-    HeapSlot *fixedStart, *fixedEnd, *dynStart, *dynEnd;
-    obj->getSlotRange(0, obj->slotSpan(), &fixedStart, &fixedEnd, &dynStart, &dynEnd);
-    markSlots(fixedStart, fixedEnd);
-    markSlots(dynStart, dynEnd);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::markSlots(HeapSlot *vp, uint32_t nslots)
-{
-    markSlots(vp, vp + nslots);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::markSlots(HeapSlot *vp, HeapSlot *end)
-{
-    for (; vp != end; ++vp)
-        markSlot(vp);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::markSlot(HeapSlot *slotp)
-{
-    if (!slotp->isObject())
-        return;
-
-    JSObject *obj = &slotp->toObject();
-    if (!isInsideFromspace(obj))
-        return;
-
-    if (getForwardedPointer(&obj)) {
-        slotp->unsafeGet()->setObject(*obj);
-        return;
-    }
-
-    JSObject *moved = static_cast<JSObject *>(moveObjectToTospace(obj));
-    slotp->unsafeGet()->setObject(*moved);
-}
-
-AllocKind
-ForkJoinNursery::getObjectAllocKind(JSObject *obj)
-{
-    if (obj->is<ArrayObject>()) {
-        JS_ASSERT(obj->numFixedSlots() == 0);
-
-        // Use minimal size object if we are just going to copy the pointer.
-        if (!isInsideFromspace((void *)obj->getElementsHeader()))
-            return FINALIZE_OBJECT0_BACKGROUND;
-
-        size_t nelements = obj->getDenseCapacity();
-        return GetBackgroundAllocKind(GetGCArrayKind(nelements));
-    }
-
-    if (obj->is<JSFunction>())
-        return obj->as<JSFunction>().getAllocKind();
-
-    AllocKind kind = GetGCObjectFixedSlotsKind(obj->numFixedSlots());
-    JS_ASSERT(!IsBackgroundFinalized(kind));
-    JS_ASSERT(CanBeFinalizedInBackground(kind, obj->getClass()));
-    return GetBackgroundAllocKind(kind);
-}
-
-void *
-ForkJoinNursery::allocateInTospace(gc::AllocKind thingKind)
-{
-    size_t thingSize = Arena::thingSize(thingKind);
-    if (isEvacuating_) {
-        void *t = tenured_->arenas.allocateFromFreeList(thingKind, thingSize);
-        if (t)
-            return t;
-        tenured_->arenas.checkEmptyFreeList(thingKind);
-        // This call may return NULL but should do so only if memory
-        // is truly exhausted.  However, allocateFromArena() can fail
-        // either because memory is exhausted or if the allocation
-        // budget is used up.  There is a guard in
-        // Chunk::allocateArena() against the latter case.
-        return tenured_->arenas.allocateFromArena(evacuationZone_, thingKind);
-    } else {
-        // Nursery allocation will never fail during GC - apart from
-        // true OOM - since newspace is at least as large as
-        // fromspace; true OOM is caught and signaled within
-        // ForkJoinNursery::setCurrentChunk().
-        return allocate(thingSize);
-    }
-}
-
-void *
-ForkJoinNursery::allocateInTospace(size_t nelem, size_t elemSize)
-{
-    if (isEvacuating_)
-        return evacuationZone_->malloc_(nelem * elemSize);
-    return allocate(nelem * elemSize);
-}
-
-MOZ_ALWAYS_INLINE void
-ForkJoinNursery::insertIntoFixupList(RelocationOverlay *entry)
-{
-    *tail_ = entry;
-    tail_ = &entry->next_;
-    *tail_ = nullptr;
-}
-
-void *
-ForkJoinNursery::moveObjectToTospace(JSObject *src)
-{
-    AllocKind dstKind = getObjectAllocKind(src);
-    JSObject *dst = static_cast<JSObject *>(allocateInTospace(dstKind));
-    if (!dst)
-        CrashAtUnhandlableOOM("Failed to allocate object while moving object.");
-
-    movedSize_ += copyObjectToTospace(dst, src, dstKind);
-
-    RelocationOverlay *overlay = reinterpret_cast<RelocationOverlay *>(src);
-    overlay->forwardTo(dst);
-    insertIntoFixupList(overlay);
-
-    return static_cast<void *>(dst);
-}
-
-size_t
-ForkJoinNursery::copyObjectToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
-{
-    size_t srcSize = Arena::thingSize(dstKind);
-    size_t movedSize = srcSize;
-
-    // Arrays do not necessarily have the same AllocKind between src and dst.
-    // We deal with this by copying elements manually, possibly re-inlining
-    // them if there is adequate room inline in dst.
-    if (src->is<ArrayObject>())
-        srcSize = movedSize = sizeof(ObjectImpl);
-
-    js_memcpy(dst, src, srcSize);
-    movedSize += copySlotsToTospace(dst, src, dstKind);
-    movedSize += copyElementsToTospace(dst, src, dstKind);
-
-    if (src->is<TypedArrayObject>())
-        dst->setPrivate(dst->fixedData(TypedArrayObject::FIXED_DATA_START));
-
-    // The shape's list head may point into the old object.
-    if (&src->shape_ == dst->shape_->listp) {
-        JS_ASSERT(cx_->isThreadLocal(dst->shape_.get()));
-        dst->shape_->listp = &dst->shape_;
-    }
-
-    return movedSize;
-}
-
-size_t
-ForkJoinNursery::copySlotsToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
-{
-    // Fixed slots have already been copied over.
-    if (!src->hasDynamicSlots())
-        return 0;
-
-    if (!isInsideFromspace(src->slots)) {
-        hugeSlots[hugeSlotsFrom].remove(src->slots);
-        if (!isEvacuating_)
-            hugeSlots[hugeSlotsNew].put(src->slots);
-        return 0;
-    }
-
-    size_t count = src->numDynamicSlots();
-    dst->slots = reinterpret_cast<HeapSlot *>(allocateInTospace(count, sizeof(HeapSlot)));
-    if (!dst->slots)
-        CrashAtUnhandlableOOM("Failed to allocate slots while moving object.");
-    js_memcpy(dst->slots, src->slots, count * sizeof(HeapSlot));
-    setSlotsForwardingPointer(src->slots, dst->slots, count);
-    return count * sizeof(HeapSlot);
-}
-
-size_t
-ForkJoinNursery::copyElementsToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
-{
-    if (src->hasEmptyElements())
-        return 0;
-
-    ObjectElements *srcHeader = src->getElementsHeader();
-    ObjectElements *dstHeader;
-
-    // TODO Bug 874151: Prefer to put element data inline if we have space.
-    // (Note, not a correctness issue.)
-    if (!isInsideFromspace(srcHeader)) {
-        JS_ASSERT(src->elements == dst->elements);
-        hugeSlots[hugeSlotsFrom].remove(reinterpret_cast<HeapSlot*>(srcHeader));
-        if (!isEvacuating_)
-            hugeSlots[hugeSlotsNew].put(reinterpret_cast<HeapSlot*>(srcHeader));
-        return 0;
-    }
-
-    size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity;
-
-    // Unlike other objects, Arrays can have fixed elements.
-    if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
-        dst->setFixedElements();
-        dstHeader = dst->getElementsHeader();
-        js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
-        setElementsForwardingPointer(srcHeader, dstHeader, nslots);
-        return nslots * sizeof(HeapSlot);
-    }
-
-    JS_ASSERT(nslots >= 2);
-    dstHeader = reinterpret_cast<ObjectElements *>(allocateInTospace(nslots, sizeof(HeapSlot)));
-    if (!dstHeader)
-        CrashAtUnhandlableOOM("Failed to allocate elements while moving object.");
-    js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
-    setElementsForwardingPointer(srcHeader, dstHeader, nslots);
-    dst->elements = dstHeader->elements();
-    return nslots * sizeof(HeapSlot);
-}
-
-void
-ForkJoinNursery::setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots)
-{
-    JS_ASSERT(nslots > 0);
-    JS_ASSERT(isInsideFromspace(oldSlots));
-    JS_ASSERT(!isInsideFromspace(newSlots));
-    *reinterpret_cast<HeapSlot **>(oldSlots) = newSlots;
-}
-
-void
-ForkJoinNursery::setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
-                                             uint32_t nelems)
-{
-    // If the JIT has hoisted a zero length pointer, then we do not need to
-    // relocate it because reads and writes to/from this pointer are invalid.
-    if (nelems - ObjectElements::VALUES_PER_HEADER < 1)
-        return;
-    JS_ASSERT(isInsideFromspace(oldHeader));
-    JS_ASSERT(!isInsideFromspace(newHeader));
-    *reinterpret_cast<HeapSlot **>(oldHeader->elements()) = newHeader->elements();
-}
-
-ForkJoinNurseryCollectionTracer::ForkJoinNurseryCollectionTracer(JSRuntime *rt,
-                                                                 ForkJoinNursery *nursery)
-  : JSTracer(rt, ForkJoinNursery::MinorGCCallback, TraceWeakMapKeysValues)
-  , nursery_(nursery)
-{
-    JS_ASSERT(rt);
-    JS_ASSERT(nursery);
-}
-
-} // namespace gc
-} // namespace js
-
-#endif /* JSGC_FJGENERATIONAL */
deleted file mode 100644
--- a/js/src/gc/ForkJoinNursery.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef gc_ForkJoinNursery_h
-#define gc_ForkJoinNursery_h
-
-#ifdef JSGC_FJGENERATIONAL
-
-#ifndef JSGC_GENERATIONAL
-#error "JSGC_GENERATIONAL is required for the ForkJoinNursery"
-#endif
-#ifndef JS_THREADSAFE
-#error "JS_THREADSAFE is required for the ForkJoinNursery"
-#endif
-#ifndef JS_ION
-#error "JS_ION is required for the ForkJoinNursery"
-#endif
-
-#include "jsalloc.h"
-#include "jspubtd.h"
-
-#include "gc/Heap.h"
-#include "gc/Memory.h"
-#include "gc/Nursery.h"
-
-#include "js/HashTable.h"
-#include "js/TracingAPI.h"
-
-namespace js {
-class ObjectElements;
-class HeapSlot;
-class ForkJoinShared;
-}
-
-namespace js {
-namespace gc {
-
-class ForkJoinGCShared;
-class ForkJoinNursery;
-class ForkJoinNurseryCollectionTracer;
-
-// This tracer comes into play when a class has a tracer function, but
-// is otherwise unused and has no other functionality.
-//
-// It could look like this could be merged into ForkJoinNursery by
-// making the latter derive from JSTracer; I've decided to keep them
-// separate for now, since it allows for multiple instantiations of
-// this class with different parameters, for different purposes.  That
-// may change.
-
-class ForkJoinNurseryCollectionTracer : public JSTracer
-{
-    friend class ForkJoinNursery;
-
-  public:
-    ForkJoinNurseryCollectionTracer(JSRuntime *rt, ForkJoinNursery *nursery);
-
-  private:
-    ForkJoinNursery *const nursery_;
-};
-
-// The layout for a chunk used by the ForkJoinNursery.
-
-struct ForkJoinNurseryChunk
-{
-    // The amount of space in the mapped nursery available to allocations
-    static const size_t UsableSize = ChunkSize - sizeof(ChunkTrailer);
-
-    char data[UsableSize];
-    ChunkTrailer trailer;
-    uintptr_t start() { return uintptr_t(&data); }
-    uintptr_t end() { return uintptr_t(&trailer); }
-};
-
-// A GC adapter to ForkJoinShared, which is a complex class hidden
-// inside ForkJoin.cpp.
-
-class ForkJoinGCShared
-{
-  public:
-    ForkJoinGCShared(ForkJoinShared *shared) : shared_(shared) {}
-
-    JSRuntime *runtime();
-    JS::Zone *zone();
-
-    // The updatable object (the ForkJoin result array), or nullptr.
-    JSObject *updatable();
-
-    // allocateNurseryChunk() returns nullptr on oom.
-    ForkJoinNurseryChunk *allocateNurseryChunk();
-
-    // p must have been obtained through allocateNurseryChunk.
-    void freeNurseryChunk(ForkJoinNurseryChunk *p);
-
-    // GC statistics output.
-    void spewGC(const char *fmt, ...);
-
-  private:
-    ForkJoinShared *const shared_;
-};
-
-// There is one ForkJoinNursery per ForkJoin worker.
-//
-// See the comment in ForkJoinNursery.cpp about how it works.
-
-class ForkJoinNursery
-{
-    friend class ForkJoinNurseryCollectionTracer;
-    friend class RelocationOverlay;
-
-    static_assert(sizeof(ForkJoinNurseryChunk) == ChunkSize,
-                  "ForkJoinNursery chunk size must match Chunk size.");
-  public:
-    ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured);
-    ~ForkJoinNursery();
-
-    // Perform a collection within the nursery, and if that for some reason
-    // cannot be done then perform an evacuating collection.
-    void minorGC();
-
-    // Evacuate the live data from the nursery into the tenured area;
-    // do not recreate the nursery.
-    void evacuatingGC();
-
-    // Allocate an object with a number of dynamic slots.  Returns an
-    // object, or nullptr in one of two circumstances:
-    //
-    //  - The nursery was full, the collector must be run, and the
-    //    allocation must be retried.  tooLarge is set to 'false'.
-    //  - The number of dynamic slots requested is too large and
-    //    the object should be allocated in the tenured area.
-    //    tooLarge is set to 'true'.
-    //
-    // This method will never run the garbage collector.
-    JSObject *allocateObject(size_t size, size_t numDynamic, bool& tooLarge);
-
-    // Allocate and reallocate slot and element arrays for existing
-    // objects.  These will create or maintain the arrays within the
-    // nursery if possible and appropriate, and otherwise will fall
-    // back to allocating in the tenured area.  They will return
-    // nullptr only if memory is exhausted.  If the reallocate methods
-    // return nullptr then the old array is still live.
-    //
-    // These methods will never run the garbage collector.
-    HeapSlot *allocateSlots(JSObject *obj, uint32_t nslots);
-    HeapSlot *reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
-                              uint32_t oldCount, uint32_t newCount);
-    ObjectElements *allocateElements(JSObject *obj, uint32_t nelems);
-    ObjectElements *reallocateElements(JSObject *obj, ObjectElements *oldHeader,
-                                       uint32_t oldCount, uint32_t newCount);
-
-    // Free a slots array.
-    void freeSlots(HeapSlot *slots);
-
-    // The method embedded in a ForkJoinNurseryCollectionTracer
-    static void MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind kind);
-
-    // A method called from the JIT frame updater
-    static void forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems);
-
-    // Return true iff obj is inside the current newspace.
-    MOZ_ALWAYS_INLINE bool isInsideNewspace(const void *obj);
-
-    // Return true iff collection is ongoing and obj is inside the current fromspace.
-    MOZ_ALWAYS_INLINE bool isInsideFromspace(const void *obj);
-
-    template <typename T>
-    MOZ_ALWAYS_INLINE bool getForwardedPointer(T **ref);
-
-    static size_t offsetOfPosition() {
-        return offsetof(ForkJoinNursery, position_);
-    }
-
-    static size_t offsetOfCurrentEnd() {
-        return offsetof(ForkJoinNursery, currentEnd_);
-    }
-
-  private:
-    // The largest slot arrays that will be allocated in the nursery.
-    // On the one hand we want this limit to be large, to avoid
-    // managing many hugeSlots.  On the other hand, slot arrays have
-    // to be copied during GC and will induce some external
-    // fragmentation in the nursery at chunk boundaries.
-    static const size_t MaxNurserySlots = 2048;
-
-    // The fixed limit on the per-worker nursery, in chunks.
-    //
-    // For production runs, 16 may be good - programs that need it,
-    // really need it, and as allocation is lazy programs that don't
-    // need it won't suck up a lot of resources.
-    //
-    // For debugging runs, 1 or 2 may sometimes be good, because it
-    // will more easily provoke bugs in the evacuation paths.
-    static const size_t MaxNurseryChunks = 16;
-
-    // The inverse load factor in the per-worker nursery.  Grow the nursery
-    // or schedule an evacuation if more than 1/NurseryLoadFactor of the
-    // current nursery size is live after minor GC.
-    static const int NurseryLoadFactor = 3;
-
-    // Allocate an object in the nursery's newspace.  Return nullptr
-    // when allocation fails (ie the object can't fit in the current
-    // chunk and the number of chunks it at its maximum).
-    void *allocate(size_t size);
-
-    // Allocate an external slot array and register it with this nursery.
-    HeapSlot *allocateHugeSlots(size_t nslots);
-
-    // Reallocate an external slot array, unregister the old array and
-    // register the new array.  If the allocation fails then leave
-    // everything unchanged.
-    HeapSlot *reallocateHugeSlots(HeapSlot *oldSlots, uint32_t oldSize, uint32_t newSize);
-
-    // Walk the list of registered slot arrays and free them all.
-    void sweepHugeSlots();
-
-    // Set the position/end pointers to correspond to the numbered
-    // chunk.
-    void setCurrentChunk(int index);
-
-    enum PJSCollectionOp {
-        Evacuate = 1,
-        Collect = 2,
-        Recreate = 4
-    };
-
-    // Misc GC internals.
-    void pjsCollection(int op /* A combination of PJSCollectionOp bits */);
-    void initNewspace();
-    void flip();
-    void forwardFromRoots(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromStack(ForkJoinNurseryCollectionTracer *trc);
-    void forwardFromTenured(ForkJoinNurseryCollectionTracer *trc);
-    void collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc);
-    void freeFromspace();
-    void computeNurserySizeAfterGC(size_t live, const char **msg);
-
-    AllocKind getObjectAllocKind(JSObject *src);
-    void *allocateInTospace(AllocKind thingKind);
-    void *allocateInTospace(size_t nelem, size_t elemSize);
-    MOZ_ALWAYS_INLINE bool shouldMoveObject(void **thingp);
-    void *moveObjectToTospace(JSObject *src);
-    size_t copyObjectToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
-    size_t copyElementsToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
-    size_t copySlotsToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
-    MOZ_ALWAYS_INLINE void insertIntoFixupList(RelocationOverlay *entry);
-
-    void setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots);
-    void setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
-                                      uint32_t nelems);
-
-    MOZ_ALWAYS_INLINE void traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj);
-    MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, uint32_t nslots);
-    MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, HeapSlot *end);
-    MOZ_ALWAYS_INLINE void markSlot(HeapSlot *slotp);
-
-    ForkJoinContext *const cx_;      // The context that owns this nursery
-    Allocator *const tenured_;       // Private tenured area
-    ForkJoinGCShared *const shared_; // Common to all nurseries belonging to a ForkJoin instance
-    JS::Zone *evacuationZone_;       // During evacuating GC this is non-NULL: the Zone we
-                                     // allocate into
-
-    uintptr_t currentStart_;         // Start of current area in newspace
-    uintptr_t currentEnd_;           // End of current area in newspace (last byte + 1)
-    uintptr_t position_;             // Next free byte in current newspace chunk
-    unsigned currentChunk_;          // Index of current / highest numbered chunk in newspace
-    unsigned numActiveChunks_;       // Number of active chunks in newspace, not all may be allocated
-    unsigned numFromspaceChunks_;    // Number of active chunks in fromspace, all are allocated
-    bool mustEvacuate_;              // Set to true after GC when the /next/ minor GC must evacuate
-
-    bool isEvacuating_;              // Set to true when the current minor GC is evacuating
-    size_t movedSize_;               // Bytes copied during the current minor GC
-    RelocationOverlay *head_;        // First node of relocation list
-    RelocationOverlay **tail_;       // Pointer to 'next_' field of last node of relocation list
-
-    typedef HashSet<HeapSlot *, PointerHasher<HeapSlot *, 3>, SystemAllocPolicy> HugeSlotsSet;
-
-    HugeSlotsSet hugeSlots[2];       // Hash sets for huge slots
-
-    int hugeSlotsNew;                // Huge slot arrays in the newspace (index in hugeSlots)
-    int hugeSlotsFrom;               // Huge slot arrays in the fromspace (index in hugeSlots)
-
-    ForkJoinNurseryChunk *newspace[MaxNurseryChunks];  // All allocation happens here
-    ForkJoinNurseryChunk *fromspace[MaxNurseryChunks]; // Meaningful during GC: the previous newspace
-};
-
-} // namespace gc
-} // namespace js
-
-#endif // JSGC_FJGENERATIONAL
-
-#endif // gc_ForkJoinNursery_h
--- a/js/src/gc/GCInternals.h
+++ b/js/src/gc/GCInternals.h
@@ -14,23 +14,16 @@
 #include "vm/Runtime.h"
 
 namespace js {
 namespace gc {
 
 void
 MarkPersistentRootedChains(JSTracer *trc);
 
-#ifdef JSGC_FJGENERATIONAL
-class ForkJoinNurseryCollectionTracer;
-
-void
-MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc);
-#endif
-
 class AutoCopyFreeListToArenas
 {
     JSRuntime *runtime;
     ZoneSelector selector;
 
   public:
     AutoCopyFreeListToArenas(JSRuntime *rt, ZoneSelector selector);
     ~AutoCopyFreeListToArenas();
--- a/js/src/gc/GCRuntime.h
+++ b/js/src/gc/GCRuntime.h
@@ -462,26 +462,16 @@ class GCRuntime
     volatile js::HeapState heapState;
 
 #ifdef JSGC_GENERATIONAL
     js::Nursery           nursery;
     js::gc::StoreBuffer   storeBuffer;
 #endif
 
     /*
-     * ForkJoin workers enter and leave GC independently; this counter
-     * tracks the number that are currently in GC.
-     *
-     * Technically this should be #ifdef JSGC_FJGENERATIONAL but that
-     * affects the observed size of JSRuntime in problematic ways, see
-     * note in vm/ThreadPool.h.
-     */
-    mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> fjCollectionCounter;
-
-    /*
      * These options control the zealousness of the GC. The fundamental values
      * are   nextScheduled and gcDebugCompartmentGC. At every allocation,
      *   nextScheduled is decremented. When it reaches zero, we do either a
      * full or a compartmental GC, based on   debugCompartmentGC.
      *
      * At this point, if   zeal_ is one of the types that trigger periodic
      * collection, then   nextScheduled is reset to the value of
      *   zealFrequency. Otherwise, no additional GCs take place.
--- a/js/src/gc/Marking.cpp
+++ b/js/src/gc/Marking.cpp
@@ -166,26 +166,16 @@ CheckMarkedThing(JSTracer *trc, T **thin
                     thing, thingp, TraceKindAsAscii(MapTypeToTraceKind<T>::kind), label);
         MOZ_ReportAssertionFailure(msgbuf, __FILE__, __LINE__);
         MOZ_CRASH();
     }
 #endif
     JS_ASSERT(*thingp);
 
 #ifdef DEBUG
-#ifdef JSGC_FJGENERATIONAL
-    /*
-     * The code below (runtimeFromMainThread(), etc) makes assumptions
-     * not valid for the ForkJoin worker threads during ForkJoin GGC,
-     * so just bail.
-     */
-    if (ForkJoinContext::current())
-        return;
-#endif
-
     /* This function uses data that's not available in the nursery. */
     if (IsInsideNursery(thing))
         return;
 
     /*
      * Permanent atoms are not associated with this runtime, but will be ignored
      * during marking.
      */
@@ -258,26 +248,16 @@ SetMaybeAliveFlag(JSScript *thing)
 template<typename T>
 static void
 MarkInternal(JSTracer *trc, T **thingp)
 {
     CheckMarkedThing(trc, thingp);
     T *thing = *thingp;
 
     if (!trc->callback) {
-#ifdef JSGC_FJGENERATIONAL
-        /*
-         * This case should never be reached from PJS collections as
-         * those should all be using a ForkJoinNurseryCollectionTracer
-         * that carries a callback.
-         */
-        JS_ASSERT(!ForkJoinContext::current());
-        JS_ASSERT(!trc->runtime()->isFJMinorCollecting());
-#endif
-
         /*
          * We may mark a Nursery thing outside the context of the
          * MinorCollectionTracer because of a pre-barrier. The pre-barrier is
          * not needed in this case because we perform a minor collection before
          * each incremental slice.
          */
         if (IsInsideNursery(thing))
             return;
@@ -396,35 +376,21 @@ namespace gc {
 
 template <typename T>
 static bool
 IsMarked(T **thingp)
 {
     JS_ASSERT(thingp);
     JS_ASSERT(*thingp);
 #ifdef JSGC_GENERATIONAL
-    JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
-#ifdef JSGC_FJGENERATIONAL
-    // Must precede the case for JSGC_GENERATIONAL because IsInsideNursery()
-    // will also be true for the ForkJoinNursery.
-    if (rt->isFJMinorCollecting()) {
-        ForkJoinContext *ctx = ForkJoinContext::current();
-        ForkJoinNursery &fjNursery = ctx->fjNursery();
-        if (fjNursery.isInsideFromspace(*thingp))
-            return fjNursery.getForwardedPointer(thingp);
+    if (IsInsideNursery(*thingp)) {
+        Nursery &nursery = (*thingp)->runtimeFromMainThread()->gc.nursery;
+        return nursery.getForwardedPointer(thingp);
     }
-    else
 #endif
-    {
-        if (IsInsideNursery(*thingp)) {
-            Nursery &nursery = rt->gc.nursery;
-            return nursery.getForwardedPointer(thingp);
-        }
-    }
-#endif  // JSGC_GENERATIONAL
     Zone *zone = (*thingp)->tenuredZone();
     if (!zone->isCollecting() || zone->isGCFinished())
         return true;
     return (*thingp)->isMarked();
 }
 
 template <typename T>
 static bool
@@ -436,35 +402,24 @@ IsAboutToBeFinalized(T **thingp)
     T *thing = *thingp;
     JSRuntime *rt = thing->runtimeFromAnyThread();
 
     /* Permanent atoms are never finalized by non-owning runtimes. */
     if (ThingIsPermanentAtom(thing) && !TlsPerThreadData.get()->associatedWith(rt))
         return false;
 
 #ifdef JSGC_GENERATIONAL
-#ifdef JSGC_FJGENERATIONAL
-    if (rt->isFJMinorCollecting()) {
-        ForkJoinContext *ctx = ForkJoinContext::current();
-        ForkJoinNursery &fjNursery = ctx->fjNursery();
-        if (fjNursery.isInsideFromspace(thing))
-            return !fjNursery.getForwardedPointer(thingp);
+    Nursery &nursery = rt->gc.nursery;
+    JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
+    if (rt->isHeapMinorCollecting()) {
+        if (IsInsideNursery(thing))
+            return !nursery.getForwardedPointer(thingp);
+        return false;
     }
-    else
 #endif
-    {
-        Nursery &nursery = rt->gc.nursery;
-        JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
-        if (rt->isHeapMinorCollecting()) {
-            if (IsInsideNursery(thing))
-                return !nursery.getForwardedPointer(thingp);
-            return false;
-        }
-    }
-#endif  // JSGC_GENERATIONAL
 
     if (!thing->tenuredZone()->isGCSweeping())
         return false;
 
     /*
      * We should return false for things that have been allocated during
      * incremental sweeping, but this possibility doesn't occur at the moment
      * because this function is only called at the very start of the sweeping a
@@ -477,30 +432,19 @@ IsAboutToBeFinalized(T **thingp)
 }
 
 template <typename T>
 T *
 UpdateIfRelocated(JSRuntime *rt, T **thingp)
 {
     JS_ASSERT(thingp);
 #ifdef JSGC_GENERATIONAL
-#ifdef JSGC_FJGENERATIONAL
-    if (*thingp && rt->isFJMinorCollecting()) {
-        ForkJoinContext *ctx = ForkJoinContext::current();
-        ForkJoinNursery &fjNursery = ctx->fjNursery();
-        if (fjNursery.isInsideFromspace(*thingp))
-            fjNursery.getForwardedPointer(thingp);
-    }
-    else
+    if (*thingp && rt->isHeapMinorCollecting() && IsInsideNursery(*thingp))
+        rt->gc.nursery.getForwardedPointer(thingp);
 #endif
-    {
-        if (*thingp && rt->isHeapMinorCollecting() && IsInsideNursery(*thingp))
-            rt->gc.nursery.getForwardedPointer(thingp);
-    }
-#endif  // JSGC_GENERATIONAL
     return *thingp;
 }
 
 #define DeclMarkerImpl(base, type)                                                                \
 void                                                                                              \
 Mark##base(JSTracer *trc, BarrieredBase<type*> *thing, const char *name)                          \
 {                                                                                                 \
     Mark<type>(trc, thing, name);                                                                 \
--- a/js/src/gc/Nursery-inl.h
+++ b/js/src/gc/Nursery-inl.h
@@ -12,51 +12,62 @@
 
 #include "gc/Nursery.h"
 
 #include "gc/Heap.h"
 
 namespace js {
 namespace gc {
 
-/* static */
-inline RelocationOverlay *
-RelocationOverlay::fromCell(Cell *cell)
-{
-    JS_ASSERT(!cell->isTenured());
-    return reinterpret_cast<RelocationOverlay *>(cell);
-}
-
-inline bool
-RelocationOverlay::isForwarded() const
-{
-    return magic_ == Relocated;
-}
-
-inline Cell *
-RelocationOverlay::forwardingAddress() const
+/*
+ * This structure overlays a Cell in the Nursery and re-purposes its memory
+ * for managing the Nursery collection process.
+ */
+class RelocationOverlay
 {
-    JS_ASSERT(isForwarded());
-    return newLocation_;
-}
+    friend class MinorCollectionTracer;
+
+    /* The low bit is set so this should never equal a normal pointer. */
+    static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
+
+    /* Set to Relocated when moved. */
+    uintptr_t magic_;
+
+    /* The location |this| was moved to. */
+    Cell *newLocation_;
+
+    /* A list entry to track all relocated things. */
+    RelocationOverlay *next_;
+
+  public:
+    static RelocationOverlay *fromCell(Cell *cell) {
+        JS_ASSERT(!cell->isTenured());
+        return reinterpret_cast<RelocationOverlay *>(cell);
+    }
 
-inline void
-RelocationOverlay::forwardTo(Cell *cell)
-{
-    JS_ASSERT(!isForwarded());
-    magic_ = Relocated;
-    newLocation_ = cell;
-    next_ = nullptr;
-}
+    bool isForwarded() const {
+        return magic_ == Relocated;
+    }
+
+    Cell *forwardingAddress() const {
+        JS_ASSERT(isForwarded());
+        return newLocation_;
+    }
 
-inline RelocationOverlay *
-RelocationOverlay::next() const
-{
-    return next_;
-}
+    void forwardTo(Cell *cell) {
+        JS_ASSERT(!isForwarded());
+        magic_ = Relocated;
+        newLocation_ = cell;
+        next_ = nullptr;
+    }
+
+    RelocationOverlay *next() const {
+        return next_;
+    }
+};
 
 } /* namespace gc */
 } /* namespace js */
 
 template <typename T>
 MOZ_ALWAYS_INLINE bool
 js::Nursery::getForwardedPointer(T **ref)
 {
--- a/js/src/gc/Nursery.cpp
+++ b/js/src/gc/Nursery.cpp
@@ -907,20 +907,16 @@ js::Nursery::collect(JSRuntime *rt, JS::
                 TIME_TOTAL(freeHugeSlots),
                 TIME_TOTAL(clearStoreBuffer),
                 TIME_TOTAL(sweep));
 #undef FMT
     }
 #endif
 }
 
-#undef TIME_START
-#undef TIME_END
-#undef TIME_TOTAL
-
 void
 js::Nursery::freeHugeSlots()
 {
     FreeOp *fop = runtime()->defaultFreeOp();
     for (HugeSlotsSet::Range r = hugeSlots.all(); !r.empty(); r.popFront())
         fop->free_(r.front());
     hugeSlots.clear();
 }
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -31,63 +31,29 @@ namespace js {
 class ObjectElements;
 class HeapSlot;
 void SetGCZeal(JSRuntime *, uint8_t, uint32_t);
 
 namespace gc {
 class Cell;
 class Collector;
 class MinorCollectionTracer;
-class ForkJoinNursery;
 } /* namespace gc */
 
 namespace types {
 struct TypeObject;
 }
 
 namespace jit {
 class CodeGenerator;
 class MacroAssembler;
 class ICStubCompiler;
 class BaselineCompiler;
 }
 
-namespace gc {
-
-/*
- * This structure overlays a Cell in the Nursery and re-purposes its memory
- * for managing the Nursery collection process.
- */
-class RelocationOverlay
-{
-    friend class MinorCollectionTracer;
-    friend class ForkJoinNursery;
-
-    /* The low bit is set so this should never equal a normal pointer. */
-    static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
-
-    /* Set to Relocated when moved. */
-    uintptr_t magic_;
-
-    /* The location |this| was moved to. */
-    Cell *newLocation_;
-
-    /* A list entry to track all relocated things. */
-    RelocationOverlay *next_;
-
-  public:
-    static inline RelocationOverlay *fromCell(Cell *cell);
-    inline bool isForwarded() const;
-    inline Cell *forwardingAddress() const;
-    inline void forwardTo(Cell *cell);
-    inline RelocationOverlay *next() const;
-};
-
-} /* namespace gc */
-
 class Nursery
 {
   public:
     static const int NumNurseryChunks = 16;
     static const int LastNurseryChunk = NumNurseryChunks - 1;
     static const size_t Alignment = gc::ChunkSize;
     static const size_t ChunkShift = gc::ChunkShift;
     static const size_t NurserySize = gc::ChunkSize * NumNurseryChunks;
@@ -244,17 +210,17 @@ class Nursery
         JS_ASSERT(index < NumNurseryChunks);
         JS_ASSERT(start());
         return reinterpret_cast<NurseryChunkLayout *>(start())[index];
     }
 
     MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
         NurseryChunkLayout &c = chunk(chunkno);
         c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
-        c.trailer.location = gc::ChunkLocationBitNursery;
+        c.trailer.location = gc::ChunkLocationNursery;
         c.trailer.runtime = runtime();
     }
 
     MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) {
         JS_ASSERT(chunkno < NumNurseryChunks);
         JS_ASSERT(chunkno < numActiveChunks_);
         currentChunk_ = chunkno;
         position_ = chunk(chunkno).start();
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -14,17 +14,16 @@
 #include "jsgc.h"
 #include "jsonparser.h"
 #include "jsprf.h"
 #include "jstypes.h"
 #include "jswatchpoint.h"
 
 #include "builtin/MapObject.h"
 #include "frontend/BytecodeCompiler.h"
-#include "gc/ForkJoinNursery.h"
 #include "gc/GCInternals.h"
 #include "gc/Marking.h"
 #ifdef JS_ION
 # include "jit/IonMacroAssembler.h"
 #endif
 #include "js/HashTable.h"
 #include "vm/Debugger.h"
 #include "vm/PropDesc.h"
@@ -121,24 +120,16 @@ MarkExactStackRoots(JSTracer *trc)
     MarkExactStackRootsForType<LazyScript *, MarkLazyScriptRoot>(trc, "exact-lazy-script");
     MarkExactStackRootsForType<jsid, MarkIdRoot>(trc, "exact-id");
     MarkExactStackRootsForType<Value, MarkValueRoot>(trc, "exact-value");
     MarkExactStackRootsForType<types::Type, MarkTypeRoot>(trc, "exact-type");
     MarkExactStackRootsForType<Bindings, MarkBindingsRoot>(trc);
     MarkExactStackRootsForType<JSPropertyDescriptor, MarkPropertyDescriptorRoot>(trc);
     MarkExactStackRootsForType<PropDesc, MarkPropDescRoot>(trc);
 }
-
-static void
-MarkExactStackRoots(ThreadSafeContext *cx, JSTracer *trc)
-{
-    for (unsigned i = 0; i < THING_ROOT_LIMIT; i++)
-        MarkExactStackRootList(trc, cx->thingGCRooters[i], ThingRootKind(i));
-}
-
 #endif /* JSGC_USE_EXACT_ROOTING */
 
 enum ConservativeGCTest
 {
     CGCT_VALID,
     CGCT_LOWBITSET, /* excluded because one of the low bits was set */
     CGCT_NOTARENA,  /* not within arena range in a chunk */
     CGCT_OTHERCOMPARTMENT,  /* in another compartment */
@@ -585,25 +576,27 @@ AutoGCRooter::trace(JSTracer *trc)
     JS_ASSERT(tag_ >= 0);
     if (Value *vp = static_cast<AutoArrayRooter *>(this)->array)
         MarkValueRootRange(trc, tag_, vp, "JS::AutoArrayRooter.array");
 }
 
 /* static */ void
 AutoGCRooter::traceAll(JSTracer *trc)
 {
-    for (ContextIter cx(trc->runtime()); !cx.done(); cx.next())
-        traceAllInContext(&*cx, trc);
+    for (ContextIter cx(trc->runtime()); !cx.done(); cx.next()) {
+        for (js::AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down)
+            gcr->trace(trc);
+    }
 }
 
 /* static */ void
 AutoGCRooter::traceAllWrappers(JSTracer *trc)
 {
     for (ContextIter cx(trc->runtime()); !cx.done(); cx.next()) {
-        for (AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down) {
+        for (js::AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down) {
             if (gcr->tag_ == WRAPVECTOR || gcr->tag_ == WRAPPER)
                 gcr->trace(trc);
         }
     }
 }
 
 void
 AutoHashableValueRooter::trace(JSTracer *trc)
@@ -685,37 +678,16 @@ js::gc::MarkPersistentRootedChains(JSTra
 
     // Mark the PersistentRooted chains of types that are never null.
     PersistentRootedMarker<jsid>::markChain<MarkIdRoot>(trc, rt->idPersistentRooteds,
                                                         "PersistentRooted<jsid>");
     PersistentRootedMarker<Value>::markChain<MarkValueRoot>(trc, rt->valuePersistentRooteds,
                                                             "PersistentRooted<Value>");
 }
 
-#ifdef JSGC_FJGENERATIONAL
-void
-js::gc::MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc)
-{
-    ForkJoinContext *cx = ForkJoinContext::current();
-    PerThreadData *ptd = cx->perThreadData;
-
-    AutoGCRooter::traceAllInContext(cx, trc);
-    MarkExactStackRoots(cx, trc);
-    jit::MarkJitActivations(ptd, trc);
-
-#ifdef DEBUG
-    // There should be only JIT activations on the stack
-    for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
-        Activation *act = iter.activation();
-        JS_ASSERT(act->isJit());
-    }
-#endif
-}
-#endif  // JSGC_FJGENERATIONAL
-
 void
 js::gc::GCRuntime::markRuntime(JSTracer *trc, bool useSavedRoots)
 {
     JS_ASSERT(trc->callback != GCMarker::GrayCallback);
 
     JS_ASSERT(!rt->mainThread.suppressGC);
 
     if (IS_GC_MARKING_TRACER(trc)) {
@@ -807,20 +779,20 @@ js::gc::GCRuntime::markRuntime(JSTracer 
                 c->watchpointMap->markAll(trc);
         }
 
         /* Mark debug scopes, if present */
         if (c->debugScopes)
             c->debugScopes->mark(trc);
     }
 
-    MarkInterpreterActivations(&rt->mainThread, trc);
+    MarkInterpreterActivations(rt, trc);
 
 #ifdef JS_ION
-    jit::MarkJitActivations(&rt->mainThread, trc);
+    jit::MarkJitActivations(rt, trc);
 #endif
 
     if (!isHeapMinorCollecting()) {
         /*
          * All JSCompartment::mark does is mark the globals for compartments
          * which have been entered. Globals aren't nursery allocated so there's
          * no need to do this for minor GCs.
          */
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -1144,18 +1144,17 @@ CodeGenerator::visitLambdaPar(LLambdaPar
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register scopeChainReg = ToRegister(lir->scopeChain());
     Register tempReg1 = ToRegister(lir->getTemp0());
     Register tempReg2 = ToRegister(lir->getTemp1());
     const LambdaFunctionInfo &info = lir->mir()->info();
 
     JS_ASSERT(scopeChainReg != resultReg);
 
-    if (!emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun))
-        return false;
+    emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun);
     emitLambdaInit(resultReg, scopeChainReg, info);
     return true;
 }
 
 bool
 CodeGenerator::visitLabel(LLabel *lir)
 {
     return true;
@@ -3894,50 +3893,50 @@ bool
 CodeGenerator::visitNewCallObjectPar(LNewCallObjectPar *lir)
 {
     Register resultReg = ToRegister(lir->output());
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register tempReg1 = ToRegister(lir->getTemp0());
     Register tempReg2 = ToRegister(lir->getTemp1());
     JSObject *templateObj = lir->mir()->templateObj();
 
-    return emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
-}
-
-typedef JSObject *(*ExtendArrayParFn)(ForkJoinContext*, JSObject*, uint32_t);
-static const VMFunction ExtendArrayParInfo =
-    FunctionInfo<ExtendArrayParFn>(ExtendArrayPar);
+    emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
+    return true;
+}
 
 bool
 CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
 {
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register lengthReg = ToRegister(lir->length());
     Register tempReg0 = ToRegister(lir->getTemp0());
     Register tempReg1 = ToRegister(lir->getTemp1());
     Register tempReg2 = ToRegister(lir->getTemp2());
     JSObject *templateObj = lir->mir()->templateObject();
 
-    masm.push(lengthReg);
-    if (!emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj))
-        return false;
-    masm.pop(lengthReg);
-
-    // Invoke a C helper to allocate the elements.  The helper returns
-    // nullptr on allocation error or the array object.
-
-    saveLive(lir);
-    pushArg(lengthReg);
-    pushArg(tempReg2);
-    if (!callVM(ExtendArrayParInfo, lir))
-        return false;
-    storeResultTo(ToRegister(lir->output()));
-    restoreLive(lir);
+    // Allocate the array into tempReg2.  Don't use resultReg because it
+    // may alias cxReg etc.
+    emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj);
+
+    // Invoke a C helper to allocate the elements.  For convenience,
+    // this helper also returns the array back to us, or nullptr, which
+    // obviates the need to preserve the register across the call.  In
+    // reality, we should probably just have the C helper also
+    // *allocate* the array, but that would require that it initialize
+    // the various fields of the object, and I didn't want to
+    // duplicate the code in initGCThing() that already does such an
+    // admirable job.
+    masm.setupUnalignedABICall(3, tempReg0);
+    masm.passABIArg(cxReg);
+    masm.passABIArg(tempReg2);
+    masm.passABIArg(lengthReg);
+    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ExtendArrayPar));
 
     Register resultReg = ToRegister(lir->output());
+    JS_ASSERT(resultReg == ReturnReg);
     OutOfLineAbortPar *bail = oolAbortPar(ParallelBailoutOutOfMemory, lir);
     if (!bail)
         return false;
     masm.branchTestPtr(Assembler::Zero, resultReg, resultReg, bail->entry());
 
     return true;
 }
 
@@ -3972,20 +3971,20 @@ CodeGenerator::visitNewStringObject(LNew
 bool
 CodeGenerator::visitNewPar(LNewPar *lir)
 {
     Register objReg = ToRegister(lir->output());
     Register cxReg = ToRegister(lir->forkJoinContext());
     Register tempReg1 = ToRegister(lir->getTemp0());
     Register tempReg2 = ToRegister(lir->getTemp1());
     JSObject *templateObject = lir->mir()->templateObject();
-    return emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
-}
-
-#ifndef JSGC_FJGENERATIONAL
+    emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
+    return true;
+}
+
 class OutOfLineNewGCThingPar : public OutOfLineCodeBase<CodeGenerator>
 {
 public:
     LInstruction *lir;
     gc::AllocKind allocKind;
     Register objReg;
     Register cxReg;
 
@@ -3993,45 +3992,32 @@ public:
                            Register cxReg)
       : lir(lir), allocKind(allocKind), objReg(objReg), cxReg(cxReg)
     {}
 
     bool accept(CodeGenerator *codegen) {
         return codegen->visitOutOfLineNewGCThingPar(this);
     }
 };
-#endif // JSGC_FJGENERATIONAL
-
-typedef JSObject *(*NewGCThingParFn)(ForkJoinContext *, js::gc::AllocKind allocKind);
-static const VMFunction NewGCThingParInfo =
-    FunctionInfo<NewGCThingParFn>(NewGCThingPar);
 
 bool
 CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
                                       Register tempReg1, Register tempReg2, JSObject *templateObj)
 {
     gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
-#ifdef JSGC_FJGENERATIONAL
-    OutOfLineCode *ool = oolCallVM(NewGCThingParInfo, lir,
-                                   (ArgList(), Imm32(allocKind)), StoreRegisterTo(objReg));
-    if (!ool)
-        return false;
-#else
     OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
     if (!ool || !addOutOfLineCode(ool))
         return false;
-#endif
 
     masm.newGCThingPar(objReg, cxReg, tempReg1, tempReg2, templateObj, ool->entry());
     masm.bind(ool->rejoin());
     masm.initGCThing(objReg, tempReg1, templateObj);
     return true;
 }
 
-#ifndef JSGC_FJGENERATIONAL
 bool
 CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
 {
     // As a fallback for allocation in par. exec. mode, we invoke the
     // C helper NewGCThingPar(), which calls into the GC code.  If it
     // returns nullptr, we bail.  If returns non-nullptr, we rejoin the
     // original instruction.
     Register out = ool->objReg;
@@ -4047,17 +4033,16 @@ CodeGenerator::visitOutOfLineNewGCThingP
 
     OutOfLineAbortPar *bail = oolAbortPar(ParallelBailoutOutOfMemory, ool->lir);
     if (!bail)
         return false;
     masm.branchTestPtr(Assembler::Zero, out, out, bail->entry());
     masm.jump(ool->rejoin());
     return true;
 }
-#endif // JSGC_FJGENERATIONAL
 
 bool
 CodeGenerator::visitAbortPar(LAbortPar *lir)
 {
     OutOfLineAbortPar *bail = oolAbortPar(ParallelBailoutUnsupported, lir);
     if (!bail)
         return false;
     masm.jump(bail->entry());
@@ -6501,17 +6486,17 @@ typedef JSObject *(*InitRestParameterPar
                                             HandleObject, HandleObject);
 static const VMFunctionsModal InitRestParameterInfo = VMFunctionsModal(
     FunctionInfo<InitRestParameterFn>(InitRestParameter),
     FunctionInfo<InitRestParameterParFn>(InitRestParameterPar));
 
 bool
 CodeGenerator::emitRest(LInstruction *lir, Register array, Register numActuals,
                         Register temp0, Register temp1, unsigned numFormals,
-                        JSObject *templateObject, bool saveAndRestore, Register resultreg)
+                        JSObject *templateObject)
 {
     // Compute actuals() + numFormals.
     size_t actualsOffset = frameSize() + IonJSFrameLayout::offsetOfActualArgs();
     masm.movePtr(StackPointer, temp1);
     masm.addPtr(Imm32(sizeof(Value) * numFormals + actualsOffset), temp1);
 
     // Compute numActuals - numFormals.
     Label emptyLength, joinLength;
@@ -6520,32 +6505,22 @@ CodeGenerator::emitRest(LInstruction *li
     masm.sub32(Imm32(numFormals), temp0);
     masm.jump(&joinLength);
     {
         masm.bind(&emptyLength);
         masm.move32(Imm32(0), temp0);
     }
     masm.bind(&joinLength);
 
-    if (saveAndRestore)
-        saveLive(lir);
-
     pushArg(array);
     pushArg(ImmGCPtr(templateObject));
     pushArg(temp1);
     pushArg(temp0);
 
-    bool result = callVM(InitRestParameterInfo, lir);
-
-    if (saveAndRestore) {
-        storeResultTo(resultreg);
-        restoreLive(lir);
-    }
-
-    return result;
+    return callVM(InitRestParameterInfo, lir);
 }
 
 bool
 CodeGenerator::visitRest(LRest *lir)
 {
     Register numActuals = ToRegister(lir->numActuals());
     Register temp0 = ToRegister(lir->getTemp(0));
     Register temp1 = ToRegister(lir->getTemp(1));
@@ -6557,39 +6532,34 @@ CodeGenerator::visitRest(LRest *lir)
     masm.createGCObject(temp2, temp0, templateObject, gc::DefaultHeap, &failAlloc);
     masm.jump(&joinAlloc);
     {
         masm.bind(&failAlloc);
         masm.movePtr(ImmPtr(nullptr), temp2);
     }
     masm.bind(&joinAlloc);
 
-    return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
-}
-
-// LRestPar cannot derive from LCallInstructionHelper because emitAllocateGCThingPar may
-// itself contain a VM call.  Thus there's some manual work here and in emitRest().
+    return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
+}
 
 bool
 CodeGenerator::visitRestPar(LRestPar *lir)
 {
     Register numActuals = ToRegister(lir->numActuals());
     Register cx = ToRegister(lir->forkJoinContext());
     Register temp0 = ToRegister(lir->getTemp(0));
     Register temp1 = ToRegister(lir->getTemp(1));
     Register temp2 = ToRegister(lir->getTemp(2));
     unsigned numFormals = lir->mir()->numFormals();
     JSObject *templateObject = lir->mir()->templateObject();
 
-    masm.push(numActuals);
     if (!emitAllocateGCThingPar(lir, temp2, cx, temp0, temp1, templateObject))
         return false;
-    masm.pop(numActuals);
-
-    return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, true, ToRegister(lir->output()));
+
+    return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
 }
 
 bool
 CodeGenerator::generateAsmJS(Label *stackOverflowLabel)
 {
     IonSpew(IonSpew_Codegen, "# Emitting asm.js code");
 
     // AsmJS doesn't do profiler instrumentation.
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -263,17 +263,17 @@ class CodeGenerator : public CodeGenerat
     bool visitArgumentsLength(LArgumentsLength *lir);
     bool visitGetFrameArgument(LGetFrameArgument *lir);
     bool visitSetFrameArgumentT(LSetFrameArgumentT *lir);
     bool visitSetFrameArgumentC(LSetFrameArgumentC *lir);
     bool visitSetFrameArgumentV(LSetFrameArgumentV *lir);
     bool visitRunOncePrologue(LRunOncePrologue *lir);
     bool emitRest(LInstruction *lir, Register array, Register numActuals,
                   Register temp0, Register temp1, unsigned numFormals,
-                  JSObject *templateObject, bool saveAndRestore, Register resultreg);
+                  JSObject *templateObject);
     bool visitRest(LRest *lir);
     bool visitRestPar(LRestPar *lir);
     bool visitCallSetProperty(LCallSetProperty *ins);
     bool visitCallDeleteProperty(LCallDeleteProperty *lir);
     bool visitCallDeleteElement(LCallDeleteElement *lir);
     bool visitBitNotV(LBitNotV *lir);
     bool visitBitOpV(LBitOpV *lir);
     bool emitInstanceOf(LInstruction *ins, JSObject *prototypeObject);
--- a/js/src/jit/IonFrames.cpp
+++ b/js/src/jit/IonFrames.cpp
@@ -5,17 +5,16 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/IonFrames-inl.h"
 
 #include "jsfun.h"
 #include "jsobj.h"
 #include "jsscript.h"
 
-#include "gc/ForkJoinNursery.h"
 #include "gc/Marking.h"
 #include "jit/BaselineDebugModeOSR.h"
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Ion.h"
 #include "jit/IonMacroAssembler.h"
 #include "jit/IonSpewer.h"
@@ -863,18 +862,20 @@ MarkIonJSFrame(JSTracer *trc, const JitF
     layout->replaceCalleeToken(MarkCalleeToken(trc, layout->calleeToken()));
 
     IonScript *ionScript = nullptr;
     if (frame.checkInvalidation(&ionScript)) {
         // This frame has been invalidated, meaning that its IonScript is no
         // longer reachable through the callee token (JSFunction/JSScript->ion
         // is now nullptr or recompiled). Manually trace it here.
         IonScript::Trace(trc, ionScript);
+    } else if (CalleeTokenIsFunction(layout->calleeToken())) {
+        ionScript = CalleeTokenToFunction(layout->calleeToken())->nonLazyScript()->ionScript();
     } else {
-        ionScript = frame.ionScriptFromCalleeToken();
+        ionScript = CalleeTokenToScript(layout->calleeToken())->ionScript();
     }
 
     if (CalleeTokenIsFunction(layout->calleeToken()))
         MarkActualArguments(trc, frame);
 
     const SafepointIndex *si = ionScript->getSafepointIndex(frame.returnAddressToFp());
 
     SafepointReader safepoint(ionScript, si);
@@ -931,56 +932,44 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc
 
     IonJSFrameLayout *layout = (IonJSFrameLayout *)frame.fp();
 
     IonScript *ionScript = nullptr;
     if (frame.checkInvalidation(&ionScript)) {
         // This frame has been invalidated, meaning that its IonScript is no
         // longer reachable through the callee token (JSFunction/JSScript->ion
         // is now nullptr or recompiled).
+    } else if (CalleeTokenIsFunction(layout->calleeToken())) {
+        ionScript = CalleeTokenToFunction(layout->calleeToken())->nonLazyScript()->ionScript();
     } else {
-        ionScript = frame.ionScriptFromCalleeToken();
+        ionScript = CalleeTokenToScript(layout->calleeToken())->ionScript();
     }
 
     const SafepointIndex *si = ionScript->getSafepointIndex(frame.returnAddressToFp());
     SafepointReader safepoint(ionScript, si);
 
     GeneralRegisterSet slotsRegs = safepoint.slotsOrElementsSpills();
     uintptr_t *spill = frame.spillBase();
     for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); iter++) {
         --spill;
-        if (slotsRegs.has(*iter)) {
-#ifdef JSGC_FJGENERATIONAL
-            if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
-                gc::ForkJoinNursery::forwardBufferPointer(trc,
-                                                          reinterpret_cast<HeapSlot **>(spill));
-                continue;
-            }
-#endif
+        if (slotsRegs.has(*iter))
             trc->runtime()->gc.nursery.forwardBufferPointer(reinterpret_cast<HeapSlot **>(spill));
-        }
     }
 
     // Skip to the right place in the safepoint
     uint32_t slot;
     while (safepoint.getGcSlot(&slot));
     while (safepoint.getValueSlot(&slot));
 #ifdef JS_NUNBOX32
     LAllocation type, payload;
     while (safepoint.getNunboxSlot(&type, &payload));
 #endif
 
     while (safepoint.getSlotsOrElementsSlot(&slot)) {
         HeapSlot **slots = reinterpret_cast<HeapSlot **>(layout->slotRef(slot));
-#ifdef JSGC_FJGENERATIONAL
-        if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
-            gc::ForkJoinNursery::forwardBufferPointer(trc, slots);
-            continue;
-        }
-#endif
         trc->runtime()->gc.nursery.forwardBufferPointer(slots);
     }
 }
 #endif
 
 static void
 MarkBaselineStubFrame(JSTracer *trc, const JitFrameIterator &frame)
 {
@@ -1232,19 +1221,19 @@ MarkJitActivation(JSTracer *trc, const J
             break;
           default:
             MOZ_ASSUME_UNREACHABLE("unexpected frame type");
         }
     }
 }
 
 void
-MarkJitActivations(PerThreadData *ptd, JSTracer *trc)
+MarkJitActivations(JSRuntime *rt, JSTracer *trc)
 {
-    for (JitActivationIterator activations(ptd); !activations.done(); ++activations)
+    for (JitActivationIterator activations(rt); !activations.done(); ++activations)
         MarkJitActivation(trc, activations);
 }
 
 JSCompartment *
 TopmostIonActivationCompartment(JSRuntime *rt)
 {
     for (JitActivationIterator activations(rt); !activations.done(); ++activations) {
         for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
@@ -1262,32 +1251,16 @@ UpdateJitActivationsForMinorGC(JSRuntime
     JS_ASSERT(trc->runtime()->isHeapMinorCollecting());
     for (JitActivationIterator activations(rt); !activations.done(); ++activations) {
         for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
             if (frames.type() == JitFrame_IonJS)
                 UpdateIonJSFrameForMinorGC(trc, frames);
         }
     }
 }
-
-void
-UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc)
-{
-#ifdef JSGC_FJGENERATIONAL
-    JS_ASSERT(trc->runtime()->isHeapMinorCollecting() || trc->runtime()->isFJMinorCollecting());
-#else
-    JS_ASSERT(trc->runtime()->isHeapMinorCollecting());
-#endif
-    for (JitActivationIterator activations(ptd); !activations.done(); ++activations) {
-        for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
-            if (frames.type() == JitFrame_IonJS)
-                UpdateIonJSFrameForMinorGC(trc, frames);
-        }
-    }
-}
 #endif
 
 void
 AutoTempAllocatorRooter::trace(JSTracer *trc)
 {
     for (CompilerRootNode *root = temp->rootList(); root != nullptr; root = root->next)
         gc::MarkGCThingRoot(trc, root->address(), "ion-compiler-root");
 }
@@ -1672,25 +1645,16 @@ SnapshotIterator::nextFrame()
 IonScript *
 JitFrameIterator::ionScript() const
 {
     JS_ASSERT(type() == JitFrame_IonJS);
 
     IonScript *ionScript = nullptr;
     if (checkInvalidation(&ionScript))
         return ionScript;
-    return ionScriptFromCalleeToken();
-}
-
-IonScript *
-JitFrameIterator::ionScriptFromCalleeToken() const
-{
-    JS_ASSERT(type() == JitFrame_IonJS);
-    JS_ASSERT(!checkInvalidation());
-
     switch (GetCalleeTokenTag(calleeToken())) {
       case CalleeToken_Function:
       case CalleeToken_Script:
         return mode_ == ParallelExecution ? script()->parallelIonScript() : script()->ionScript();
       default:
         MOZ_ASSUME_UNREACHABLE("unknown callee token type");
     }
 }
--- a/js/src/jit/IonFrames.h
+++ b/js/src/jit/IonFrames.h
@@ -259,25 +259,24 @@ struct ResumeFromException
     BaselineBailoutInfo *bailoutInfo;
 };
 
 void HandleException(ResumeFromException *rfe);
 void HandleParallelFailure(ResumeFromException *rfe);
 
 void EnsureExitFrame(IonCommonFrameLayout *frame);
 
-void MarkJitActivations(PerThreadData *ptd, JSTracer *trc);
+void MarkJitActivations(JSRuntime *rt, JSTracer *trc);
 void MarkIonCompilerRoots(JSTracer *trc);
 
 JSCompartment *
 TopmostIonActivationCompartment(JSRuntime *rt);
 
 #ifdef JSGC_GENERATIONAL
 void UpdateJitActivationsForMinorGC(JSRuntime *rt, JSTracer *trc);
-void UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc);
 #endif
 
 static inline uint32_t
 MakeFrameDescriptor(uint32_t frameSize, FrameType type)
 {
     return (frameSize << FRAMESIZE_SHIFT) | type;
 }
 
--- a/js/src/jit/IonMacroAssembler.cpp
+++ b/js/src/jit/IonMacroAssembler.cpp
@@ -623,63 +623,20 @@ MacroAssembler::newGCFatInlineString(Reg
 {
     allocateNonObject(result, temp, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
 }
 
 void
 MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                               gc::AllocKind allocKind, Label *fail)
 {
-#ifdef JSGC_FJGENERATIONAL
-    if (IsNurseryAllocable(allocKind))
-        return newGCNurseryThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
-#endif
-    return newGCTenuredThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
-}
-
-#ifdef JSGC_FJGENERATIONAL
-void
-MacroAssembler::newGCNurseryThingPar(Register result, Register cx,
-                                     Register tempReg1, Register tempReg2,
-                                     gc::AllocKind allocKind, Label *fail)
-{
-    JS_ASSERT(IsNurseryAllocable(allocKind));
-
-    uint32_t thingSize = uint32_t(gc::Arena::thingSize(allocKind));
-
-    // Correctness depends on thingSize being smaller than a chunk
-    // (not a problem) and the last chunk of the nursery not being
-    // located at the very top of the address space.  The regular
-    // Nursery makes the same assumption, see nurseryAllocate() above.
-
-    // The ForkJoinNursery is a member variable of the ForkJoinContext.
-    size_t offsetOfPosition =
-        ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfPosition();
-    size_t offsetOfEnd =
-        ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfCurrentEnd();
-    loadPtr(Address(cx, offsetOfPosition), result);
-    loadPtr(Address(cx, offsetOfEnd), tempReg2);
-    computeEffectiveAddress(Address(result, thingSize), tempReg1);
-    branchPtr(Assembler::Below, tempReg2, tempReg1, fail);
-    storePtr(tempReg1, Address(cx, offsetOfPosition));
-}
-#endif
-
-void
-MacroAssembler::newGCTenuredThingPar(Register result, Register cx,
-                                     Register tempReg1, Register tempReg2,
-                                     gc::AllocKind allocKind, Label *fail)
-{
     // Similar to ::newGCThing(), except that it allocates from a custom
     // Allocator in the ForkJoinContext*, rather than being hardcoded to the
     // compartment allocator.  This requires two temporary registers.
     //
-    // When the ForkJoin generational collector is enabled this is only used
-    // for those object types that cannot be allocated in the ForkJoinNursery.
-    //
     // Subtle: I wanted to reuse `result` for one of the temporaries, but the
     // register allocator was assigning it to the same register as `cx`.
     // Then we overwrite that register which messed up the OOL code.
 
     uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
 
     // Load the allocator:
     // tempReg1 = (Allocator*) forkJoinCx->allocator()
@@ -724,24 +681,24 @@ MacroAssembler::newGCThingPar(Register r
 
     newGCThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
 }
 
 void
 MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                                Label *fail)
 {
-    newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
+    newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
 }
 
 void
 MacroAssembler::newGCFatInlineStringPar(Register result, Register cx, Register tempReg1,
                                         Register tempReg2, Label *fail)
 {
-    newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
+    newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
 }
 
 void
 MacroAssembler::copySlotsFromTemplate(Register obj, const JSObject *templateObj,
                                       uint32_t start, uint32_t end)
 {
     uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
     for (unsigned i = start; i < nfixed; i++)
--- a/js/src/jit/IonMacroAssembler.h
+++ b/js/src/jit/IonMacroAssembler.h
@@ -822,22 +822,16 @@ class MacroAssembler : public MacroAssem
     void initGCThing(Register obj, Register temp, JSObject *templateObj,
                      bool initFixedSlots = true);
 
     void newGCString(Register result, Register temp, Label *fail);
     void newGCFatInlineString(Register result, Register temp, Label *fail);
 
     void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                        gc::AllocKind allocKind, Label *fail);
-#ifdef JSGC_FJGENERATIONAL
-    void newGCNurseryThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                              gc::AllocKind allocKind, Label *fail);
-#endif
-    void newGCTenuredThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                              gc::AllocKind allocKind, Label *fail);
     void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                        JSObject *templateObject, Label *fail);
     void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                         Label *fail);
     void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
                                  Label *fail);
 
 
--- a/js/src/jit/JitFrameIterator.h
+++ b/js/src/jit/JitFrameIterator.h
@@ -196,20 +196,16 @@ class JitFrameIterator
     inline bool done() const {
         return type_ == JitFrame_Entry;
     }
     JitFrameIterator &operator++();
 
     // Returns the IonScript associated with this JS frame.
     IonScript *ionScript() const;
 
-    // Returns the IonScript associated with this JS frame; the frame must
-    // not be invalidated.
-    IonScript *ionScriptFromCalleeToken() const;
-
     // Returns the Safepoint associated with this JS frame. Incurs a lookup
     // overhead.
     const SafepointIndex *safepoint() const;
 
     // Returns the OSI index associated with this JS frame. Incurs a lookup
     // overhead.
     const OsiIndex *osiIndex() const;
 
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -369,17 +369,17 @@ class LNewPar : public LInstructionHelpe
         return getTemp(0);
     }
 
     const LDefinition *getTemp1() {
         return getTemp(1);
     }
 };
 
-class LNewDenseArrayPar : public LInstructionHelper<1, 2, 3>
+class LNewDenseArrayPar : public LCallInstructionHelper<1, 2, 3>
 {
   public:
     LIR_HEADER(NewDenseArrayPar);
 
     LNewDenseArrayPar(const LAllocation &cx, const LAllocation &length,
                       const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3)
     {
         setOperand(0, cx);
@@ -5356,17 +5356,17 @@ class LRest : public LCallInstructionHel
     const LAllocation *numActuals() {
         return getOperand(0);
     }
     MRest *mir() const {
         return mir_->toRest();
     }
 };
 
-class LRestPar : public LInstructionHelper<1, 2, 3>
+class LRestPar : public LCallInstructionHelper<1, 2, 3>
 {
   public:
     LIR_HEADER(RestPar);
 
     LRestPar(const LAllocation &cx, const LAllocation &numActuals,
              const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3)
     {
         setOperand(0, cx);
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -127,17 +127,21 @@ LIRGenerator::visitCheckOverRecursed(MCh
     return true;
 }
 
 bool
 LIRGenerator::visitCheckOverRecursedPar(MCheckOverRecursedPar *ins)
 {
     LCheckOverRecursedPar *lir =
         new(alloc()) LCheckOverRecursedPar(useRegister(ins->forkJoinContext()), temp());
-    return add(lir, ins) && assignSafepoint(lir, ins);
+    if (!add(lir, ins))
+        return false;
+    if (!assignSafepoint(lir, ins))
+        return false;
+    return true;
 }
 
 bool
 LIRGenerator::visitDefVar(MDefVar *ins)
 {
     LDefVar *lir = new(alloc()) LDefVar(useRegisterAtStart(ins->scopeChain()));
     if (!add(lir, ins))
         return false;
@@ -220,17 +224,17 @@ LIRGenerator::visitNewDerivedTypedObject
     return defineReturn(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewCallObjectPar(MNewCallObjectPar *ins)
 {
     const LAllocation &parThreadContext = useRegister(ins->forkJoinContext());
     LNewCallObjectPar *lir = LNewCallObjectPar::New(alloc(), parThreadContext, temp(), temp());
-    return define(lir, ins) && assignSafepoint(lir, ins);
+    return define(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewStringObject(MNewStringObject *ins)
 {
     JS_ASSERT(ins->input()->type() == MIRType_String);
 
     LNewStringObject *lir = new(alloc()) LNewStringObject(useRegister(ins->input()), temp());
@@ -2085,17 +2089,17 @@ LIRGenerator::visitLambdaArrow(MLambdaAr
 bool
 LIRGenerator::visitLambdaPar(MLambdaPar *ins)
 {
     JS_ASSERT(!ins->info().singletonType);
     JS_ASSERT(!ins->info().useNewTypeForClone);
     LLambdaPar *lir = new(alloc()) LLambdaPar(useRegister(ins->forkJoinContext()),
                                               useRegister(ins->scopeChain()),
                                               temp(), temp());
-    return define(lir, ins) && assignSafepoint(lir, ins);
+    return define(lir, ins);
 }
 
 bool
 LIRGenerator::visitSlots(MSlots *ins)
 {
     return define(new(alloc()) LSlots(useRegisterAtStart(ins->object())), ins);
 }
 
@@ -2191,40 +2195,40 @@ LIRGenerator::visitInterruptCheck(MInter
     return add(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitInterruptCheckPar(MInterruptCheckPar *ins)
 {
     LInterruptCheckPar *lir =
         new(alloc()) LInterruptCheckPar(useRegister(ins->forkJoinContext()), temp());
-    return add(lir, ins) && assignSafepoint(lir, ins);
+    if (!add(lir, ins))
+        return false;
+    if (!assignSafepoint(lir, ins))
+        return false;
+    return true;
 }
 
 bool
 LIRGenerator::visitNewPar(MNewPar *ins)
 {
     LNewPar *lir = new(alloc()) LNewPar(useRegister(ins->forkJoinContext()), temp(), temp());
-    return define(lir, ins) && assignSafepoint(lir, ins);
+    return define(lir, ins);
 }
 
 bool
 LIRGenerator::visitNewDenseArrayPar(MNewDenseArrayPar *ins)
 {
-    JS_ASSERT(ins->forkJoinContext()->type() == MIRType_ForkJoinContext);
-    JS_ASSERT(ins->length()->type() == MIRType_Int32);
-    JS_ASSERT(ins->type() == MIRType_Object);
-
     LNewDenseArrayPar *lir =
-        new(alloc()) LNewDenseArrayPar(useRegister(ins->forkJoinContext()),
-                                       useRegister(ins->length()),
-                                       temp(),
-                                       temp(),
-                                       temp());
-    return define(lir, ins) && assignSafepoint(lir, ins);
+        new(alloc()) LNewDenseArrayPar(useFixed(ins->forkJoinContext(), CallTempReg0),
+                                       useFixed(ins->length(), CallTempReg1),
+                                       tempFixed(CallTempReg2),
+                                       tempFixed(CallTempReg3),
+                                       tempFixed(CallTempReg4));
+    return defineReturn(lir, ins);
 }
 
 bool
 LIRGenerator::visitStoreSlot(MStoreSlot *ins)
 {
     LInstruction *lir;
 
     switch (ins->value()->type()) {
@@ -3310,22 +3314,22 @@ LIRGenerator::visitRest(MRest *ins)
     return defineReturn(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitRestPar(MRestPar *ins)
 {
     JS_ASSERT(ins->numActuals()->type() == MIRType_Int32);
 
-    LRestPar *lir = new(alloc()) LRestPar(useRegister(ins->forkJoinContext()),
-                                          useRegister(ins->numActuals()),
-                                          temp(),
-                                          temp(),
-                                          temp());
-    return define(lir, ins) && assignSafepoint(lir, ins);
+    LRestPar *lir = new(alloc()) LRestPar(useFixed(ins->forkJoinContext(), CallTempReg0),
+                                          useFixed(ins->numActuals(), CallTempReg1),
+                                          tempFixed(CallTempReg2),
+                                          tempFixed(CallTempReg3),
+                                          tempFixed(CallTempReg4));
+    return defineReturn(lir, ins) && assignSafepoint(lir, ins);
 }
 
 bool
 LIRGenerator::visitThrow(MThrow *ins)
 {
     MDefinition *value = ins->getOperand(0);
     JS_ASSERT(value->type() == MIRType_Value);
 
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -1687,20 +1687,16 @@ class MNewPar : public MUnaryInstruction
 
     MDefinition *forkJoinContext() const {
         return getOperand(0);
     }
 
     JSObject *templateObject() const {
         return templateObject_;
     }
-
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
 };
 
 // Creates a new derived type object. At runtime, this is just a call
 // to `BinaryBlock::createDerived()`. That is, the MIR itself does not
 // compile to particularly optimized code. However, using a distinct
 // MIR for creating derived type objects allows the compiler to
 // optimize ephemeral typed objects as would be created for a
 // reference like `a.b.c` -- here, the `a.b` will create an ephemeral
@@ -9827,20 +9823,16 @@ class MNewDenseArrayPar : public MBinary
 
     JSObject *templateObject() const {
         return templateObject_;
     }
 
     bool possiblyCalls() const {
         return true;
     }
-
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
 };
 
 // A resume point contains the information needed to reconstruct the Baseline
 // state from a position in the JIT. See the big comment near resumeAfter() in
 // IonBuilder.cpp.
 class MResumePoint MOZ_FINAL : public MNode, public InlineForwardListNode<MResumePoint>
 {
   public:
--- a/js/src/jit/ParallelFunctions.cpp
+++ b/js/src/jit/ParallelFunctions.cpp
@@ -31,21 +31,17 @@ jit::ForkJoinContextPar()
 
 // NewGCThingPar() is called in place of NewGCThing() when executing
 // parallel code.  It uses the ArenaLists for the current thread and
 // allocates from there.
 JSObject *
 jit::NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind)
 {
     JS_ASSERT(ForkJoinContext::current() == cx);
-#ifdef JSGC_FJGENERATIONAL
-    return js::NewGCObject<CanGC>(cx, allocKind, 0, gc::DefaultHeap);
-#else
     return js::NewGCObject<NoGC>(cx, allocKind, 0, gc::TenuredHeap);
-#endif
 }
 
 bool
 jit::ParallelWriteGuard(ForkJoinContext *cx, JSObject *object)
 {
     // Implements the most general form of the write guard, which is
     // suitable for writes to any object O. There are two cases to
     // consider and test for:
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ b/js/src/jit/ParallelSafetyAnalysis.cpp
@@ -603,21 +603,16 @@ ParallelSafetyVisitor::replaceWithNewPar
 
 bool
 ParallelSafetyVisitor::replace(MInstruction *oldInstruction,
                                MInstruction *replacementInstruction)
 {
     MBasicBlock *block = oldInstruction->block();
     block->insertBefore(oldInstruction, replacementInstruction);
     oldInstruction->replaceAllUsesWith(replacementInstruction);
-    MResumePoint *rp = oldInstruction->resumePoint();
-    if (rp && rp->instruction() == oldInstruction) {
-        rp->setInstruction(replacementInstruction);
-        replacementInstruction->setResumePoint(rp);
-    }
     block->discard(oldInstruction);
 
     // We may have replaced a specialized Float32 instruction by its
     // non-specialized version, so just retry to specialize it. This relies on
     // the fact that Phis' types don't change during the ParallelSafetyAnalysis;
     // otherwise we'd have to run the entire TypeAnalyzer Float32 analysis once
     // instructions have been replaced.
     if (replacementInstruction->isFloat32Commutative() &&
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -84,23 +84,16 @@ class JS_PUBLIC_API(AutoGCRooter) {
         *stackTop = down;
     }
 
     /* Implemented in gc/RootMarking.cpp. */
     inline void trace(JSTracer *trc);
     static void traceAll(JSTracer *trc);
     static void traceAllWrappers(JSTracer *trc);
 
-    /* T must be a context type */
-    template<typename T>
-    static void traceAllInContext(T* cx, JSTracer *trc) {
-        for (AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down)
-            gcr->trace(trc);
-    }
-
   protected:
     AutoGCRooter * const down;
 
     /*
      * Discriminates actual subclass of this being used.  If non-negative, the
      * subclass roots an array of values of the length stored in this field.
      * If negative, meaning is indicated by the corresponding value in the enum
      * below.  Any other negative value indicates some deeper problem such as
--- a/js/src/jscntxtinlines.h
+++ b/js/src/jscntxtinlines.h
@@ -14,18 +14,16 @@
 
 #include "builtin/Object.h"
 #include "jit/IonFrames.h"
 #include "vm/ForkJoin.h"
 #include "vm/HelperThreads.h"
 #include "vm/Interpreter.h"
 #include "vm/ProxyObject.h"
 
-#include "gc/ForkJoinNursery-inl.h"
-
 namespace js {
 
 #ifdef JS_CRASH_DIAGNOSTICS
 class CompartmentChecker
 {
     JSCompartment *compartment;
 
   public:
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -764,17 +764,17 @@ Chunk::init(JSRuntime *rt)
      * Decommit the arenas. We do this after poisoning so that if the OS does
      * not have to recycle the pages, we still get the benefit of poisoning.
      */
     decommitAllArenas(rt);
 
     /* Initialize the chunk info. */
     info.age = 0;
     info.trailer.storeBuffer = nullptr;
-    info.trailer.location = ChunkLocationBitTenuredHeap;
+    info.trailer.location = ChunkLocationTenuredHeap;
     info.trailer.runtime = rt;
 
     /* The rest of info fields are initialized in pickChunk. */
 }
 
 static inline Chunk **
 GetAvailableChunkList(Zone *zone)
 {
@@ -873,27 +873,18 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
 }
 
 ArenaHeader *
 Chunk::allocateArena(Zone *zone, AllocKind thingKind)
 {
     JS_ASSERT(hasAvailableArenas());
 
     JSRuntime *rt = zone->runtimeFromAnyThread();
-    if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes) {
-#ifdef JSGC_FJGENERATIONAL
-        // This is an approximation to the best test, which would check that
-        // this thread is currently promoting into the tenured area.  I doubt
-        // the better test would make much difference.
-        if (!rt->isFJMinorCollecting())
-            return nullptr;
-#else
+    if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes)
         return nullptr;
-#endif
-    }
 
     ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0)
                            ? fetchNextFreeArena(rt)
                            : fetchNextDecommittedArena();
     aheader->init(zone, thingKind);
     if (MOZ_UNLIKELY(!hasAvailableArenas()))
         removeFromAvailableList();
 
@@ -1616,17 +1607,17 @@ ArenaLists::allocateFromArenaInline(Zone
     if (!maybeLock.locked())
         maybeLock.lock(rt);
     Chunk *chunk = rt->gc.pickChunk(zone, maybeStartBackgroundAllocation);
     if (!chunk)
         return nullptr;
 
     /*
      * While we still hold the GC lock get an arena from some chunk, mark it
-     * as full as its single free span is moved to the free lists, and insert
+     * as full as its single free span is moved to the free lits, and insert
      * it to the list as a fully allocated arena.
      *
      * We add the arena before the the head, so that after the GC the most
      * recently added arena will be used first for allocations. This improves
      * cache locality.
      */
     JS_ASSERT(al->isCursorAtEnd());
     aheader = chunk->allocateArena(zone, thingKind);
@@ -2069,17 +2060,17 @@ GCRuntime::triggerGC(JS::gcreason::Reaso
     JS::PrepareForFullGC(rt);
     requestInterrupt(reason);
     return true;
 }
 
 bool
 js::TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
 {
-    return zone->runtimeFromAnyThread()->gc.triggerZoneGC(zone, reason);
+    return zone->runtimeFromAnyThread()->gc.triggerZoneGC(zone,reason);
 }
 
 bool
 GCRuntime::triggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
 {
     /*
      * If parallel threads are running, wait till they
      * are stopped to trigger GC.
@@ -2289,20 +2280,16 @@ DecommitArenas(JSRuntime *rt)
     DecommitArenasFromAvailableList(rt, &rt->gc.systemAvailableChunkListHead);
     DecommitArenasFromAvailableList(rt, &rt->gc.userAvailableChunkListHead);
 }
 
 /* Must be called with the GC lock taken. */
 static void
 ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
 {
-#ifdef JSGC_FJGENERATIONAL
-    rt->threadPool.pruneChunkCache();
-#endif
-
     if (Chunk *toFree = rt->gc.chunkPool.expire(rt, shouldShrink)) {
         AutoUnlockGC unlock(rt);
         FreeChunkList(rt, toFree);
     }
 
     if (shouldShrink)
         DecommitArenas(rt);
 }
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -36,20 +36,16 @@ class DebugScopeObject;
 class GlobalObject;
 class LazyScript;
 class Nursery;
 class PropertyName;
 class ScopeObject;
 class Shape;
 class UnownedBaseShape;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 unsigned GetCPUCount();
 
 enum HeapState {
     Idle,             // doing nothing with the GC heap
     Tracing,          // tracing the GC heap without collecting, e.g. IterateCompartments()
     MajorCollecting,  // doing a GC of the major heap
     MinorCollecting   // doing a GC of the minor heap (nursery)
 };
@@ -195,52 +191,16 @@ IsNurseryAllocable(AllocKind kind)
         false,     /* FINALIZE_EXTERNAL_STRING */
         false,     /* FINALIZE_JITCODE */
     };
     JS_STATIC_ASSERT(JS_ARRAY_LENGTH(map) == FINALIZE_LIMIT);
     return map[kind];
 }
 #endif
 
-#if defined(JSGC_FJGENERATIONAL)
-// This is separate from IsNurseryAllocable() so that the latter can evolve
-// without worrying about what the ForkJoinNursery's needs are, and vice
-// versa to some extent.
-static inline bool
-IsFJNurseryAllocable(AllocKind kind)
-{
-    JS_ASSERT(kind >= 0 && unsigned(kind) < FINALIZE_LIMIT);
-    static const bool map[] = {
-        false,     /* FINALIZE_OBJECT0 */
-        true,      /* FINALIZE_OBJECT0_BACKGROUND */
-        false,     /* FINALIZE_OBJECT2 */
-        true,      /* FINALIZE_OBJECT2_BACKGROUND */
-        false,     /* FINALIZE_OBJECT4 */
-        true,      /* FINALIZE_OBJECT4_BACKGROUND */
-        false,     /* FINALIZE_OBJECT8 */
-        true,      /* FINALIZE_OBJECT8_BACKGROUND */
-        false,     /* FINALIZE_OBJECT12 */
-        true,      /* FINALIZE_OBJECT12_BACKGROUND */
-        false,     /* FINALIZE_OBJECT16 */
-        true,      /* FINALIZE_OBJECT16_BACKGROUND */
-        false,     /* FINALIZE_SCRIPT */
-        false,     /* FINALIZE_LAZY_SCRIPT */
-        false,     /* FINALIZE_SHAPE */
-        false,     /* FINALIZE_BASE_SHAPE */
-        false,     /* FINALIZE_TYPE_OBJECT */
-        false,     /* FINALIZE_FAT_INLINE_STRING */
-        false,     /* FINALIZE_STRING */
-        false,     /* FINALIZE_EXTERNAL_STRING */
-        false,     /* FINALIZE_JITCODE */
-    };
-    JS_STATIC_ASSERT(JS_ARRAY_LENGTH(map) == FINALIZE_LIMIT);
-    return map[kind];
-}
-#endif
-
 static inline bool
 IsBackgroundFinalized(AllocKind kind)
 {
     JS_ASSERT(kind >= 0 && unsigned(kind) < FINALIZE_LIMIT);
     static const bool map[] = {
         false,     /* FINALIZE_OBJECT0 */
         true,      /* FINALIZE_OBJECT0_BACKGROUND */
         false,     /* FINALIZE_OBJECT2 */
@@ -817,17 +777,16 @@ class ArenaLists
 
     void *allocateFromArena(JS::Zone *zone, AllocKind thingKind);
     inline void *allocateFromArenaInline(JS::Zone *zone, AllocKind thingKind,
                                          AutoMaybeStartBackgroundAllocation &maybeStartBackgroundAllocation);
 
     inline void normalizeBackgroundFinalizeState(AllocKind thingKind);
 
     friend class js::Nursery;
-    friend class js::gc::ForkJoinNursery;
 };
 
 /*
  * Initial allocation size for data structures holding chunks is set to hold
  * chunks with total capacity of 16MB to avoid buffer resizes during browser
  * startup.
  */
 const size_t INITIAL_CHUNK_CAPACITY = 16 * 1024 * 1024 / ChunkSize;
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -5,17 +5,16 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jsgcinlines_h
 #define jsgcinlines_h
 
 #include "jsgc.h"
 
 #include "gc/Zone.h"
-#include "vm/ForkJoin.h"
 
 namespace js {
 
 class Shape;
 
 inline Allocator *
 ThreadSafeContext::allocator() const
 {
@@ -25,27 +24,18 @@ ThreadSafeContext::allocator() const
 
 template <typename T>
 inline bool
 ThreadSafeContext::isThreadLocal(T thing) const
 {
     if (!isForkJoinContext())
         return true;
 
-#ifdef JSGC_FJGENERATIONAL
-    ForkJoinContext *cx = static_cast<ForkJoinContext*>(const_cast<ThreadSafeContext*>(this));
-    if (cx->fjNursery().isInsideNewspace(thing))
-        return true;
-#endif
-
-    // Global invariant
-    JS_ASSERT(!IsInsideNursery(thing));
-
-    // The thing is not in the nursery, but is it in the private tenured area?
-    if (allocator_->arenas.containsArena(runtime_, thing->arenaHeader()))
+    if (!IsInsideNursery(thing) &&
+        allocator_->arenas.containsArena(runtime_, thing->arenaHeader()))
     {
         // GC should be suppressed in preparation for mutating thread local
         // objects, as we don't want to trip any barriers.
         JS_ASSERT(!thing->zoneFromAnyThread()->needsBarrier());
         JS_ASSERT(!thing->runtimeFromAnyThread()->needsBarrier());
 
         return true;
     }
@@ -69,24 +59,16 @@ GetGCObjectKind(const Class *clasp)
 #ifdef JSGC_GENERATIONAL
 inline bool
 ShouldNurseryAllocate(const Nursery &nursery, AllocKind kind, InitialHeap heap)
 {
     return nursery.isEnabled() && IsNurseryAllocable(kind) && heap != TenuredHeap;
 }
 #endif
 
-#ifdef JSGC_FJGENERATIONAL
-inline bool
-ShouldFJNurseryAllocate(const ForkJoinNursery &nursery, AllocKind kind, InitialHeap heap)
-{
-    return IsFJNurseryAllocable(kind) && heap != TenuredHeap;
-}
-#endif
-
 inline JSGCTraceKind
 GetGCThingTraceKind(const void *thing)
 {
     JS_ASSERT(thing);
     const Cell *cell = static_cast<const Cell *>(thing);
 #ifdef JSGC_GENERATIONAL
     if (IsInsideNursery(cell))
         return JSTRACE_OBJECT;
@@ -116,29 +98,25 @@ class ArenaIter
         aheader = nullptr;
         remainingHeader = nullptr;
     }
 
     ArenaIter(JS::Zone *zone, AllocKind kind) {
         init(zone, kind);
     }
 
-    void init(Allocator *allocator, AllocKind kind) {
-        aheader = allocator->arenas.getFirstArena(kind);
-        remainingHeader = allocator->arenas.getFirstArenaToSweep(kind);
+    void init(JS::Zone *zone, AllocKind kind) {
+        aheader = zone->allocator.arenas.getFirstArena(kind);
+        remainingHeader = zone->allocator.arenas.getFirstArenaToSweep(kind);
         if (!aheader) {
             aheader = remainingHeader;
             remainingHeader = nullptr;
         }
     }
 
-    void init(JS::Zone *zone, AllocKind kind) {
-        init(&zone->allocator, kind);
-    }
-
     bool done() const {
         return !aheader;
     }
 
     ArenaHeader *get() const {
         return aheader;
     }
 
@@ -177,21 +155,17 @@ class ArenaCellIterImpl
         // never need to move forward.
         if (thing == span.first) {
             thing = span.last + thingSize;
             span = *span.nextSpan();
         }
     }
 
   public:
-    ArenaCellIterImpl()
-      : firstThingOffset(0)     // Squelch
-      , thingSize(0)            //   warnings
-    {
-    }
+    ArenaCellIterImpl() {}
 
     void initUnsynchronized(ArenaHeader *aheader) {
         AllocKind kind = aheader->getAllocKind();
 #ifdef DEBUG
         isInited = true;
 #endif
         firstThingOffset = Arena::firstThingOffset(kind);
         thingSize = Arena::thingSize(kind);
@@ -473,38 +447,16 @@ TryNewNurseryObject(ThreadSafeContext *c
             JS_ASSERT(obj);
             return obj;
         }
     }
     return nullptr;
 }
 #endif /* JSGC_GENERATIONAL */
 
-#ifdef JSGC_FJGENERATIONAL
-template <AllowGC allowGC>
-inline JSObject *
-TryNewFJNurseryObject(ForkJoinContext *cx, size_t thingSize, size_t nDynamicSlots)
-{
-    ForkJoinNursery &nursery = cx->fjNursery();
-    bool tooLarge = false;
-    JSObject *obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
-    if (obj)
-        return obj;
-
-    if (!tooLarge && allowGC) {
-        nursery.minorGC();
-        obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
-        if (obj)
-            return obj;
-    }
-
-    return nullptr;
-}
-#endif /* JSGC_FJGENERATIONAL */
-
 static inline bool
 PossiblyFail()
 {
     JS_OOM_POSSIBLY_FAIL();
     return true;
 }
 
 template <AllowGC allowGC>
@@ -584,26 +536,16 @@ AllocateObject(ThreadSafeContext *cx, Al
 
 #ifdef JSGC_GENERATIONAL
     if (cx->hasNursery() && ShouldNurseryAllocate(cx->nursery(), kind, heap)) {
         JSObject *obj = TryNewNurseryObject<allowGC>(cx, thingSize, nDynamicSlots);
         if (obj)
             return obj;
     }
 #endif
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext() &&
-        ShouldFJNurseryAllocate(cx->asForkJoinContext()->fjNursery(), kind, heap))
-    {
-        JSObject *obj =
-            TryNewFJNurseryObject<allowGC>(cx->asForkJoinContext(), thingSize, nDynamicSlots);
-        if (obj)
-            return obj;
-    }
-#endif
 
     HeapSlot *slots = nullptr;
     if (nDynamicSlots) {
         slots = cx->pod_malloc<HeapSlot>(nDynamicSlots);
         if (MOZ_UNLIKELY(!slots))
             return nullptr;
         js::Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
     }
@@ -641,18 +583,16 @@ AllocateNonObject(ThreadSafeContext *cx)
 }
 
 /*
  * When allocating for initialization from a cached object copy, we will
  * potentially destroy the cache entry we want to copy if we allow GC. On the
  * other hand, since these allocations are extremely common, we don't want to
  * delay GC from these allocation sites. Instead we allow the GC, but still
  * fail the allocation, forcing the non-cached path.
- *
- * Observe this won't be used for ForkJoin allocation, as it takes a JSContext*
  */
 template <AllowGC allowGC>
 inline JSObject *
 AllocateObjectForCacheHit(JSContext *cx, AllocKind kind, InitialHeap heap)
 {
 #ifdef JSGC_GENERATIONAL
     if (ShouldNurseryAllocate(cx->nursery(), kind, heap)) {
         size_t thingSize = Arena::thingSize(kind);
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -2832,51 +2832,35 @@ JSObject::setSlotSpan(ThreadSafeContext 
 
     if (!JSObject::updateSlotsForSpan(cx, obj, oldSpan, span))
         return false;
 
     obj->lastProperty()->base()->setSlotSpan(span);
     return true;
 }
 
-// This will not run the garbage collector.  If a nursery cannot accomodate the slot array
-// an attempt will be made to place the array in the tenured area.
 static HeapSlot *
 AllocateSlots(ThreadSafeContext *cx, JSObject *obj, uint32_t nslots)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext())
         return cx->asJSContext()->runtime()->gc.nursery.allocateSlots(cx->asJSContext(), obj, nslots);
 #endif
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext())
-        return cx->asForkJoinContext()->fjNursery().allocateSlots(obj, nslots);
-#endif
     return cx->pod_malloc<HeapSlot>(nslots);
 }
 
-// This will not run the garbage collector.  If a nursery cannot accomodate the slot array
-// an attempt will be made to place the array in the tenured area.
-//
-// If this returns null then the old slots will be left alone.
 static HeapSlot *
 ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
                 uint32_t oldCount, uint32_t newCount)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext()) {
         return cx->asJSContext()->runtime()->gc.nursery.reallocateSlots(cx->asJSContext(),
-                                                                        obj, oldSlots,
-                                                                        oldCount, newCount);
-    }
-#endif
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext()) {
-        return cx->asForkJoinContext()->fjNursery().reallocateSlots(obj, oldSlots,
-                                                                    oldCount, newCount);
+                                                                          obj, oldSlots,
+                                                                          oldCount, newCount);
     }
 #endif
     return (HeapSlot *)cx->realloc_(oldSlots, oldCount * sizeof(HeapSlot),
                                     newCount * sizeof(HeapSlot));
 }
 
 /* static */ bool
 JSObject::growSlots(ThreadSafeContext *cx, HandleObject obj, uint32_t oldCount, uint32_t newCount)
@@ -2938,25 +2922,21 @@ JSObject::growSlots(ThreadSafeContext *c
     Debug_SetSlotRangeToCrashOnTouch(obj->slots + oldCount, newCount - oldCount);
 
     return true;
 }
 
 static void
 FreeSlots(ThreadSafeContext *cx, HeapSlot *slots)
 {
+    // Note: threads without a JSContext do not have access to nursery allocated things.
 #ifdef JSGC_GENERATIONAL
-    // Note: threads without a JSContext do not have access to GGC nursery allocated things.
     if (cx->isJSContext())
         return cx->asJSContext()->runtime()->gc.nursery.freeSlots(cx->asJSContext(), slots);
 #endif
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext())
-        return cx->asForkJoinContext()->fjNursery().freeSlots(slots);
-#endif
     js_free(slots);
 }
 
 /* static */ void
 JSObject::shrinkSlots(ThreadSafeContext *cx, HandleObject obj, uint32_t oldCount, uint32_t newCount)
 {
     JS_ASSERT(cx->isThreadLocal(obj));
     JS_ASSERT(newCount < oldCount);
@@ -3162,50 +3142,36 @@ JSObject::maybeDensifySparseElements(js:
      * to grow the object.
      */
     if (!obj->clearFlag(cx, BaseShape::INDEXED))
         return ED_FAILED;
 
     return ED_OK;
 }
 
-// This will not run the garbage collector.  If a nursery cannot accomodate the element array
-// an attempt will be made to place the array in the tenured area.
 static ObjectElements *
 AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext())
         return cx->asJSContext()->runtime()->gc.nursery.allocateElements(cx->asJSContext(), obj, nelems);
 #endif
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext())
-        return cx->asForkJoinContext()->fjNursery().allocateElements(obj, nelems);
-#endif
 
     return static_cast<js::ObjectElements *>(cx->malloc_(nelems * sizeof(HeapValue)));
 }
 
-// This will not run the garbage collector.  If a nursery cannot accomodate the element array
-// an attempt will be made to place the array in the tenured area.
 static ObjectElements *
 ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHeader,
                    uint32_t oldCount, uint32_t newCount)
 {
 #ifdef JSGC_GENERATIONAL
     if (cx->isJSContext()) {
         return cx->asJSContext()->runtime()->gc.nursery.reallocateElements(cx->asJSContext(), obj,
-                                                                           oldHeader, oldCount,
-                                                                           newCount);
-    }
-#endif
-#ifdef JSGC_FJGENERATIONAL
-    if (cx->isForkJoinContext()) {
-        return cx->asForkJoinContext()->fjNursery().reallocateElements(obj, oldHeader,
-                                                                       oldCount, newCount);
+                                                                             oldHeader, oldCount,
+                                                                             newCount);
     }
 #endif
 
     return static_cast<js::ObjectElements *>(cx->realloc_(oldHeader, oldCount * sizeof(HeapSlot),
                                                           newCount * sizeof(HeapSlot)));
 }
 
 bool
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -188,20 +188,16 @@ DenseRangeWriteBarrierPost(JSRuntime *rt
 #ifdef JSGC_GENERATIONAL
     if (count > 0) {
         JS::shadow::Runtime *shadowRuntime = JS::shadow::Runtime::asShadowRuntime(rt);
         shadowRuntime->gcStoreBufferPtr()->putSlotFromAnyThread(obj, HeapSlot::Element, start, count);
     }
 #endif
 }
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 }  /* namespace js */
 
 /*
  * The public interface for an object.
  *
  * Implementation of the underlying structure occurs in ObjectImpl, from which
  * this struct inherits.  This inheritance is currently public, but it will
  * eventually be made protected.  For full details, see vm/ObjectImpl.{h,cpp}.
@@ -211,17 +207,16 @@ class ForkJoinNursery;
  */
 class JSObject : public js::ObjectImpl
 {
   private:
     friend class js::Shape;
     friend struct js::GCMarker;
     friend class  js::NewObjectCache;
     friend class js::Nursery;
-    friend class js::gc::ForkJoinNursery;
 
     /* Make the type object to use for LAZY_TYPE objects. */
     static js::types::TypeObject *makeLazyType(JSContext *cx, js::HandleObject obj);
 
   public:
     static const js::Class class_;
 
     /*
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -16,17 +16,16 @@
 #include "vm/ScopeObject.h"
 #include "vm/StringObject.h"
 
 #include "jsatominlines.h"
 #include "jscompartmentinlines.h"
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
 
-#include "gc/ForkJoinNursery-inl.h"
 #include "vm/ObjectImpl-inl.h"
 
 /* static */ inline bool
 JSObject::setGenericAttributes(JSContext *cx, js::HandleObject obj,
                                js::HandleId id, unsigned *attrsp)
 {
     js::types::MarkTypePropertyNonData(cx, obj, id);
     js::GenericAttributesOp op = obj->getOps()->setGenericAttributes;
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -107,17 +107,16 @@ UNIFIED_SOURCES += [
     'frontend/BytecodeCompiler.cpp',
     'frontend/BytecodeEmitter.cpp',
     'frontend/FoldConstants.cpp',
     'frontend/NameFunctions.cpp',
     'frontend/ParseMaps.cpp',
     'frontend/ParseNode.cpp',
     'frontend/TokenStream.cpp',
     'gc/Barrier.cpp',
-    'gc/ForkJoinNursery.cpp',
     'gc/Iteration.cpp',
     'gc/Marking.cpp',
     'gc/Memory.cpp',
     'gc/Nursery.cpp',
     'gc/RootMarking.cpp',
     'gc/Statistics.cpp',
     'gc/StoreBuffer.cpp',
     'gc/Tracer.cpp',
@@ -453,18 +452,16 @@ if CONFIG['MOZ_ETW']:
     ]
     # This will get the ETW provider resources into the library mozjs.dll
     RESFILE = 'ETWProvider.res'
 
 if CONFIG['NIGHTLY_BUILD']:
     DEFINES['ENABLE_PARALLEL_JS'] = True
     DEFINES['ENABLE_BINARYDATA'] = True
     DEFINES['ENABLE_SHARED_ARRAY_BUFFER'] = True
-    if CONFIG['JSGC_GENERATIONAL_CONFIGURED']:
-        DEFINES['JSGC_FJGENERATIONAL'] = True
 
 DEFINES['EXPORT_JS_API'] = True
 
 if CONFIG['JS_THREADSAFE']:
     DEFINES['JS_THREADSAFE'] = True
 
 if CONFIG['JS_HAS_CTYPES']:
     DEFINES['JS_HAS_CTYPES'] = True
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -794,24 +794,18 @@ ArrayBufferObject::finalize(FreeOp *fop,
 
     if (buffer.ownsData())
         buffer.releaseData(fop);
 }
 
 /* static */ void
 ArrayBufferObject::obj_trace(JSTracer *trc, JSObject *obj)
 {
-    if (!IS_GC_MARKING_TRACER(trc) && !trc->runtime()->isHeapMinorCollecting()
-#ifdef JSGC_FJGENERATIONAL
-        && !trc->runtime()->isFJMinorCollecting()
-#endif
-        )
-    {
+    if (!IS_GC_MARKING_TRACER(trc) && !trc->runtime()->isHeapMinorCollecting())
         return;
-    }
 
     // ArrayBufferObjects need to maintain a list of possibly-weak pointers to
     // their views. The straightforward way to update the weak pointers would
     // be in the views' finalizers, but giving views finalizers means they
     // cannot be swept in the background. This results in a very high
     // performance cost.  Instead, ArrayBufferObjects with a single view hold a
     // strong pointer to the view. This can entrain garbage when the single
     // view becomes otherwise unreachable while the buffer is still live, but
--- a/js/src/vm/ForkJoin.cpp
+++ b/js/src/vm/ForkJoin.cpp
@@ -22,25 +22,24 @@
 
 #ifdef JS_THREADSAFE
 # include "jit/BaselineJIT.h"
 # include "vm/Monitor.h"
 #endif
 
 #if defined(JS_THREADSAFE) && defined(JS_ION)
 # include "jit/JitCommon.h"
-# ifdef FORKJOIN_SPEW
+# ifdef DEBUG
 #  include "jit/Ion.h"
 #  include "jit/JitCompartment.h"
 #  include "jit/MIR.h"
 #  include "jit/MIRGraph.h"
 # endif
 #endif // THREADSAFE && ION
 
-#include "gc/ForkJoinNursery-inl.h"
 #include "vm/Interpreter-inl.h"
 
 using namespace js;
 using namespace js::parallel;
 using namespace js::jit;
 
 using mozilla::ThreadLocal;
 
@@ -275,17 +274,17 @@ class ForkJoinOperation
     uint32_t bailouts;
 
     // Information about the bailout:
     ParallelBailoutCause bailoutCause;
     RootedScript bailoutScript;
     jsbytecode *bailoutBytecode;
 
     ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart,
-                      uint16_t sliceEnd, ForkJoinMode mode, HandleObject updatable);
+                      uint16_t sliceEnd, ForkJoinMode mode);
     ExecutionStatus apply();
 
   private:
     // Most of the functions involved in managing the parallel
     // compilation follow a similar control-flow. They return RedLight
     // if they have either encountered a fatal error or completed the
     // execution, such that no further work is needed. In that event,
     // they take an `ExecutionStatus*` which they use to report
@@ -314,17 +313,16 @@ class ForkJoinOperation
             calleesEnqueued = false;
             useCount = 0;
             stallCount = 0;
         }
     };
 
     JSContext *cx_;
     HandleFunction fun_;
-    HandleObject updatable_;
     uint16_t sliceStart_;
     uint16_t sliceEnd_;
     Vector<ParallelBailoutRecord, 16> bailoutRecords_;
     AutoScriptVector worklist_;
     Vector<WorklistData, 16> worklistData_;
     ForkJoinMode mode_;
 
     TrafficLight enqueueInitialScript(ExecutionStatus *status);
@@ -342,27 +340,22 @@ class ForkJoinOperation
     TrafficLight appendCallTargetsToWorklist(uint32_t index, ExecutionStatus *status);
     TrafficLight appendCallTargetToWorklist(HandleScript script, ExecutionStatus *status);
     bool addToWorklist(HandleScript script);
     inline bool hasScript(Vector<types::RecompileInfo> &scripts, JSScript *script);
 }; // class ForkJoinOperation
 
 class ForkJoinShared : public ParallelJob, public Monitor
 {
-#ifdef JSGC_FJGENERATIONAL
-    friend class gc::ForkJoinGCShared;
-#endif
-
     /////////////////////////////////////////////////////////////////////////
     // Constant fields
 
     JSContext *const cx_;                  // Current context
     ThreadPool *const threadPool_;         // The thread pool
     HandleFunction fun_;                   // The JavaScript function to execute
-    HandleObject updatable_;               // Pre-existing object that might be updated
     uint16_t sliceStart_;                  // The starting slice id.
     uint16_t sliceEnd_;                    // The ending slice id + 1.
     PRLock *cxLock_;                       // Locks cx_ for parallel VM calls
     ParallelBailoutRecord *const records_; // Bailout records for each worker
 
     /////////////////////////////////////////////////////////////////////////
     // Per-thread arenas
     //
@@ -389,17 +382,16 @@ class ForkJoinShared : public ParallelJo
 
     // Set to true when a worker bails for a fatal reason.
     mozilla::Atomic<bool, mozilla::ReleaseAcquire> fatal_;
 
   public:
     ForkJoinShared(JSContext *cx,
                    ThreadPool *threadPool,
                    HandleFunction fun,
-                   HandleObject updatable,
                    uint16_t sliceStart,
                    uint16_t sliceEnd,
                    ParallelBailoutRecord *records);
     ~ForkJoinShared();
 
     bool init();
 
     ParallelResult execute();
@@ -431,18 +423,16 @@ class ForkJoinShared : public ParallelJo
     void setPendingAbortFatal() { fatal_ = true; }
 
     JSRuntime *runtime() { return cx_->runtime(); }
     JS::Zone *zone() { return cx_->zone(); }
     JSCompartment *compartment() { return cx_->compartment(); }
 
     JSContext *acquireJSContext() { PR_Lock(cxLock_); return cx_; }
     void releaseJSContext() { PR_Unlock(cxLock_); }
-
-    HandleObject updatable() { return updatable_; }
 };
 
 class AutoEnterWarmup
 {
     JSRuntime *runtime_;
 
   public:
     explicit AutoEnterWarmup(JSRuntime *runtime) : runtime_(runtime) { runtime_->forkJoinWarmup++; }
@@ -507,36 +497,34 @@ ForkJoinActivation::~ForkJoinActivation(
 // They handle parallel compilation (if necessary), triggering
 // parallel execution, and recovering from bailouts.
 
 static const char *ForkJoinModeString(ForkJoinMode mode);
 
 bool
 js::ForkJoin(JSContext *cx, CallArgs &args)
 {
-    JS_ASSERT(args.length() == 5); // else the self-hosted code is wrong
+    JS_ASSERT(args.length() == 4); // else the self-hosted code is wrong
     JS_ASSERT(args[0].isObject());
     JS_ASSERT(args[0].toObject().is<JSFunction>());
     JS_ASSERT(args[1].isInt32());
     JS_ASSERT(args[2].isInt32());
     JS_ASSERT(args[3].isInt32());
     JS_ASSERT(args[3].toInt32() < NumForkJoinModes);
-    JS_ASSERT(args[4].isObjectOrNull());
 
     RootedFunction fun(cx, &args[0].toObject().as<JSFunction>());
     uint16_t sliceStart = (uint16_t)(args[1].toInt32());
     uint16_t sliceEnd = (uint16_t)(args[2].toInt32());
     ForkJoinMode mode = (ForkJoinMode)(args[3].toInt32());
-    RootedObject updatable(cx, args[4].toObjectOrNull());
 
     MOZ_ASSERT(sliceStart == args[1].toInt32());
     MOZ_ASSERT(sliceEnd == args[2].toInt32());
     MOZ_ASSERT(sliceStart <= sliceEnd);
 
-    ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode, updatable);
+    ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode);
     ExecutionStatus status = op.apply();
     if (status == ExecutionFatal)
         return false;
 
     switch (mode) {
       case ForkJoinModeNormal:
       case ForkJoinModeCompile:
         return true;
@@ -585,24 +573,23 @@ ForkJoinModeString(ForkJoinMode mode) {
       case ForkJoinModeRecover: return "recover";
       case ForkJoinModeBailout: return "bailout";
       case NumForkJoinModes: return "max";
     }
     return "???";
 }
 
 ForkJoinOperation::ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart,
-                                     uint16_t sliceEnd, ForkJoinMode mode, HandleObject updatable)
+                                     uint16_t sliceEnd, ForkJoinMode mode)
   : bailouts(0),
     bailoutCause(ParallelBailoutNone),
     bailoutScript(cx),
     bailoutBytecode(nullptr),
     cx_(cx),
     fun_(fun),
-    updatable_(updatable),
     sliceStart_(sliceStart),
     sliceEnd_(sliceEnd),
     bailoutRecords_(cx),
     worklist_(cx),
     worklistData_(cx),
     mode_(mode)
 { }
 
@@ -1245,18 +1232,17 @@ ForkJoinOperation::parallelExecution(Exe
     if (sliceStart_ == sliceEnd_) {
         Spew(SpewOps, "Warmup execution finished all the work.");
         *status = ExecutionWarmup;
         return RedLight;
     }
 
     ForkJoinActivation activation(cx_);
     ThreadPool *threadPool = &cx_->runtime()->threadPool;
-    ForkJoinShared shared(cx_, threadPool, fun_, updatable_, sliceStart_, sliceEnd_,
-                          &bailoutRecords_[0]);
+    ForkJoinShared shared(cx_, threadPool, fun_, sliceStart_, sliceEnd_, &bailoutRecords_[0]);
     if (!shared.init()) {
         *status = ExecutionFatal;
         return RedLight;
     }
 
     switch (shared.execute()) {
       case TP_SUCCESS:
         *status = ExecutionParallel;
@@ -1342,39 +1328,36 @@ class ParallelIonInvoke
         JitCode *code = ion->method();
         jitcode_ = code->raw();
         enter_ = rt->jitRuntime()->enterIon();
         calleeToken_ = CalleeToToken(callee);
     }
 
     bool invoke(ForkJoinContext *cx) {
         JitActivation activation(cx);
-        // In-out parameter: on input it denotes the number of values to preserve after the call.
-        Value result = Int32Value(0);
+        Value result;
         CALL_GENERATED_CODE(enter_, jitcode_, argc_ + 1, argv_ + 1, nullptr, calleeToken_,
                             nullptr, 0, &result);
         return !result.isMagic();
     }
 };
 
 /////////////////////////////////////////////////////////////////////////////
 // ForkJoinShared
 //
 
 ForkJoinShared::ForkJoinShared(JSContext *cx,
                                ThreadPool *threadPool,
                                HandleFunction fun,
-                               HandleObject updatable,
                                uint16_t sliceStart,
                                uint16_t sliceEnd,
                                ParallelBailoutRecord *records)
   : cx_(cx),
     threadPool_(threadPool),
     fun_(fun),
-    updatable_(updatable),
     sliceStart_(sliceStart),
     sliceEnd_(sliceEnd),
     cxLock_(nullptr),
     records_(records),
     allocators_(cx),
     gcRequested_(false),
     gcReason_(JS::gcreason::NUM_REASONS),
     gcZone_(nullptr),
@@ -1437,57 +1420,49 @@ ForkJoinShared::execute()
     AutoLockMonitor lock(*this);
 
     ParallelResult jobResult = TP_SUCCESS;
     {
         AutoUnlockMonitor unlock(*this);
 
         // Push parallel tasks and wait until they're all done.
         jobResult = threadPool_->executeJob(cx_, this, sliceStart_, sliceEnd_);
+        if (jobResult == TP_FATAL)
+            return TP_FATAL;
     }
 
-    // Arenas must be transfered unconditionally until we have the means
-    // to clear the ForkJoin result array, see bug 993347.
     transferArenasToCompartmentAndProcessGCRequests();
 
-    if (jobResult == TP_FATAL)
-        return TP_FATAL;
-
     // Check if any of the workers failed.
     if (abort_) {
         if (fatal_)
             return TP_FATAL;
         return TP_RETRY_SEQUENTIALLY;
     }
 
-#ifdef FORKJOIN_SPEW
+#ifdef DEBUG
     Spew(SpewOps, "Completed parallel job [slices: %d, threads: %d, stolen: %d (work stealing:%s)]",
          sliceEnd_ - sliceStart_,
          threadPool_->numWorkers(),
-#ifdef DEBUG
          threadPool_->stolenSlices(),
-#else
-         0,
-#endif
          threadPool_->workStealing() ? "ON" : "OFF");
 #endif
 
     // Everything went swimmingly. Give yourself a pat on the back.
     return jobResult;
 }
 
 void
 ForkJoinShared::transferArenasToCompartmentAndProcessGCRequests()
 {
     JSCompartment *comp = cx_->compartment();
     for (unsigned i = 0; i < threadPool_->numWorkers(); i++)
         comp->adoptWorkerAllocator(allocators_[i]);
 
     if (gcRequested_) {
-        Spew(SpewGC, "Triggering garbage collection in SpiderMonkey heap");
         if (!gcZone_)
             TriggerGC(cx_->runtime(), gcReason_);
         else
             TriggerZoneGC(gcZone_, gcReason_);
         gcRequested_ = false;
         gcZone_ = nullptr;
     }
 }
@@ -1513,32 +1488,17 @@ ForkJoinShared::executeFromWorker(Thread
     TlsPerThreadData.set(nullptr);
 
     return !abort_;
 }
 
 bool
 ForkJoinShared::executeFromMainThread(ThreadPoolWorker *worker)
 {
-    // Note that we need new PerThreadData on the main thread as well,
-    // so that PJS GC does not walk up the old mainThread stack.
-    PerThreadData *oldData = TlsPerThreadData.get();
-    PerThreadData thisThread(cx_->runtime());
-    if (!thisThread.init()) {
-        setAbortFlagAndRequestInterrupt(true);
-        return false;
-    }
-    TlsPerThreadData.set(&thisThread);
-
-    // Don't use setIonStackLimit() because that acquires the ionStackLimitLock, and the
-    // lock has not been initialized in these cases.
-    thisThread.jitStackLimit = oldData->jitStackLimit;
-    executePortion(&thisThread, worker);
-    TlsPerThreadData.set(oldData);
-
+    executePortion(&cx_->mainThread(), worker);
     return !abort_;
 }
 
 void
 ForkJoinShared::executePortion(PerThreadData *perThread, ThreadPoolWorker *worker)
 {
     // WARNING: This code runs ON THE PARALLEL WORKER THREAD.
     // Be careful when accessing cx_.
@@ -1547,17 +1507,17 @@ ForkJoinShared::executePortion(PerThread
     ForkJoinContext cx(perThread, worker, allocator, this, &records_[worker->id()]);
     AutoSetForkJoinContext autoContext(&cx);
 
     // ForkJoinContext already contains an AutoSuppressGCAnalysis; however, the
     // analysis does not propagate this type information. We duplicate the
     // assertion here for maximum clarity.
     JS::AutoSuppressGCAnalysis nogc;
 
-#ifdef FORKJOIN_SPEW
+#ifdef DEBUG
     // Set the maximum worker and slice number for prettier spewing.
     cx.maxWorkerId = threadPool_->numWorkers();
 #endif
 
     Spew(SpewOps, "Up");
 
     // Make a new IonContext for the slice, which is needed if we need to
     // re-enter the VM.
@@ -1579,46 +1539,18 @@ ForkJoinShared::executePortion(PerThread
         ParallelIonInvoke<3> fii(runtime(), fun_, 3);
 
         fii.args[0] = Int32Value(worker->id());
         fii.args[1] = Int32Value(sliceStart_);
         fii.args[2] = Int32Value(sliceEnd_);
 
         bool ok = fii.invoke(&cx);
         JS_ASSERT(ok == !cx.bailoutRecord->topScript);
-        if (!ok) {
+        if (!ok)
             setAbortFlagAndRequestInterrupt(false);
-#ifdef JSGC_FJGENERATIONAL
-            // TODO: See bugs 1010169, 993347.
-            //
-            // It is not desirable to promote here, but if we don't do
-            // this then we can't unconditionally transfer arenas to
-            // the compartment, since the arenas can contain objects
-            // that point into the nurseries.  If those objects are
-            // touched at all by the GC, eg as part of a prebarrier,
-            // then chaos ensues.
-            //
-            // The proper fix might appear to be to note the abort and
-            // not transfer, but instead clear, the arenas.  However,
-            // the result array will remain live and unless it is
-            // cleared immediately and without running barriers then
-            // it will have pointers into the now-cleared areas, which
-            // is also wrong.
-            //
-            // For the moment, until we figure out how to clear the
-            // result array properly and implement that, it may be
-            // that the best thing we can do here is to evacuate and
-            // then let the GC run its course.
-            cx.evacuateLiveData();
-#endif
-        } else {
-#ifdef JSGC_FJGENERATIONAL
-            cx.evacuateLiveData();
-#endif
-        }
     }
 
     Spew(SpewOps, "Down");
 }
 
 void
 ForkJoinShared::setAbortFlagDueToInterrupt(ForkJoinContext &cx)
 {
@@ -1671,75 +1603,28 @@ ForkJoinShared::requestZoneGC(JS::Zone *
     } else {
         // Otherwise, just GC this zone.
         gcZone_ = zone;
         gcReason_ = reason;
         gcRequested_ = true;
     }
 }
 
-#ifdef JSGC_FJGENERATIONAL
-
-JSRuntime*
-js::gc::ForkJoinGCShared::runtime()
-{
-    return shared_->runtime();
-}
-
-JS::Zone*
-js::gc::ForkJoinGCShared::zone()
-{
-    return shared_->zone();
-}
-
-JSObject*
-js::gc::ForkJoinGCShared::updatable()
-{
-    return shared_->updatable();
-}
-
-js::gc::ForkJoinNurseryChunk *
-js::gc::ForkJoinGCShared::allocateNurseryChunk()
-{
-    return shared_->threadPool_->getChunk();
-}
-
-void
-js::gc::ForkJoinGCShared::freeNurseryChunk(js::gc::ForkJoinNurseryChunk *p)
-{
-    shared_->threadPool_->putFreeChunk(p);
-}
-
-void
-js::gc::ForkJoinGCShared::spewGC(const char *fmt, ...)
-{
-    va_list ap;
-    va_start(ap, fmt);
-    SpewVA(SpewGC, fmt, ap);
-    va_end(ap);
-}
-
-#endif // JSGC_FJGENERATIONAL
-
 /////////////////////////////////////////////////////////////////////////////
 // ForkJoinContext
 //
 
 ForkJoinContext::ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker *worker,
                                  Allocator *allocator, ForkJoinShared *shared,
                                  ParallelBailoutRecord *bailoutRecord)
   : ThreadSafeContext(shared->runtime(), perThreadData, Context_ForkJoin),
     bailoutRecord(bailoutRecord),
     targetRegionStart(nullptr),
     targetRegionEnd(nullptr),
     shared_(shared),
-#ifdef JSGC_FJGENERATIONAL
-    gcShared_(shared),
-    fjNursery_(const_cast<ForkJoinContext*>(this), &this->gcShared_, allocator),
-#endif
     worker_(worker),
     acquiredJSContext_(false),
     nogc_()
 {
     /*
      * Unsafely set the zone. This is used to track malloc counters and to
      * trigger GCs and is otherwise not thread-safe to access.
      */
@@ -1889,17 +1774,17 @@ js::ParallelBailoutRecord::addTrace(JSSc
 }
 
 //////////////////////////////////////////////////////////////////////////////
 
 //
 // Debug spew
 //
 
-#ifdef FORKJOIN_SPEW
+#ifdef DEBUG
 
 static const char *
 ExecutionStatusToString(ExecutionStatus status)
 {
     switch (status) {
       case ExecutionFatal:
         return "fatal";
       case ExecutionSequential:
@@ -1983,18 +1868,16 @@ class ParallelSpewer
         env = getenv("PAFLAGS");
         if (env) {
             if (strstr(env, "ops"))
                 active[SpewOps] = true;
             if (strstr(env, "compile"))
                 active[SpewCompile] = true;
             if (strstr(env, "bailouts"))
                 active[SpewBailouts] = true;
-            if (strstr(env, "gc"))
-                active[SpewGC] = true;
             if (strstr(env, "full")) {
                 for (uint32_t i = 0; i < NumSpewChannels; i++)
                     active[i] = true;
             }
         }
 
         env = getenv("TERM");
         if (env && isatty(fileno(stderr))) {
@@ -2190,22 +2073,16 @@ parallel::Spew(SpewChannel channel, cons
 {
     va_list ap;
     va_start(ap, fmt);
     spewer.spewVA(channel, fmt, ap);
     va_end(ap);
 }
 
 void
-parallel::SpewVA(SpewChannel channel, const char *fmt, va_list ap)
-{
-    spewer.spewVA(channel, fmt, ap);
-}
-
-void
 parallel::SpewBeginOp(JSContext *cx, const char *name)
 {
     spewer.beginOp(cx, name);
 }
 
 ExecutionStatus
 parallel::SpewEndOp(ExecutionStatus status)
 {
@@ -2243,17 +2120,17 @@ parallel::SpewMIR(MDefinition *mir, cons
 }
 
 void
 parallel::SpewBailoutIR(IonLIRTraceData *data)
 {
     spewer.spewBailoutIR(data);
 }
 
-#endif // FORKJOIN_SPEW
+#endif // DEBUG
 
 bool
 js::InExclusiveParallelSection()
 {
     return InParallelSection() && ForkJoinContext::current()->hasAcquiredJSContext();
 }
 
 bool
--- a/js/src/vm/ForkJoin.h
+++ b/js/src/vm/ForkJoin.h
@@ -4,68 +4,54 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef vm_ForkJoin_h
 #define vm_ForkJoin_h
 
 #include "mozilla/ThreadLocal.h"
 
-#include <stdarg.h>
-
 #include "jscntxt.h"
 
-#include "gc/ForkJoinNursery.h"
 #include "gc/GCInternals.h"
 
 #include "jit/Ion.h"
 
-#ifdef DEBUG
-  #define FORKJOIN_SPEW
-#endif
-
 ///////////////////////////////////////////////////////////////////////////
 // Read Me First
 //
 // The ForkJoin abstraction:
 // -------------------------
 //
 // This is the building block for executing multi-threaded JavaScript with
 // shared memory (as distinct from Web Workers).  The idea is that you have
 // some (typically data-parallel) operation which you wish to execute in
 // parallel across as many threads as you have available.
 //
 // The ForkJoin abstraction is intended to be used by self-hosted code
 // to enable parallel execution.  At the top-level, it consists of a native
 // function (exposed as the ForkJoin intrinsic) that is used like so:
 //
-//     ForkJoin(func, sliceStart, sliceEnd, mode, updatable)
+//     ForkJoin(func, sliceStart, sliceEnd, mode)
 //
 // The intention of this statement is to start some some number (usually the
 // number of hardware threads) of copies of |func()| running in parallel. Each
 // copy will then do a portion of the total work, depending on
 // workstealing-based load balancing.
 //
 // Typically, each of the N slices runs in a different worker thread, but that
 // is not something you should rely upon---if work-stealing is enabled it
 // could be that a single worker thread winds up handling multiple slices.
 //
 // The second and third arguments, |sliceStart| and |sliceEnd|, are the slice
 // boundaries. These numbers must each fit inside an uint16_t.
 //
 // The fourth argument, |mode|, is an internal mode integer giving finer
 // control over the behavior of ForkJoin. See the |ForkJoinMode| enum.
 //
-// The fifth argument, |updatable|, if not null, is an object that may
-// be updated in a race-free manner by |func()| or its callees.
-// Typically this is some sort of pre-sized array.  Only this object
-// may be updated by |func()|, and updates must not race.  (A more
-// general approach is perhaps desirable, eg passing an Array of
-// objects that may be updated, but that is not presently needed.)
-//
 // func() should expect the following arguments:
 //
 //     func(workerId, sliceStart, sliceEnd)
 //
 // The |workerId| parameter is the id of the worker executing the function. It
 // is 0 in sequential mode.
 //
 // The |sliceStart| and |sliceEnd| parameters are the current bounds that that
@@ -173,68 +159,43 @@
 // |ParallelBailoutRecord| pre-allocated for this purpose. This
 // structure is used to record the cause of the bailout, the JSScript
 // which was executing, as well as the location in the source where
 // the bailout occurred (in principle, we can record a full stack
 // trace, but right now we only record the top-most frame). Note that
 // the error location might not be in the same JSScript as the one
 // which was executing due to inlining.
 //
-// Garbage collection, allocation, and write barriers:
+// Garbage collection and allocation:
 //
 // Code which executes on these parallel threads must be very careful
 // with respect to garbage collection and allocation.  The typical
 // allocation paths are UNSAFE in parallel code because they access
 // shared state (the compartment's arena lists and so forth) without
 // any synchronization.  They can also trigger GC in an ad-hoc way.
 //
 // To deal with this, the forkjoin code creates a distinct |Allocator|
-// object for each worker, which is used as follows.
-//
-// In a non-generational setting you can access the appropriate
-// allocator via the |ForkJoinContext| object that is provided to the
-// callbacks.  Once the parallel execution is complete, all the
-// objects found in these distinct |Allocator| are merged back into
-// the main compartment lists and things proceed normally.  (If it is
-// known that the result array contains no references then no merging
-// is necessary.)
-//
-// In a generational setting there is a per-thread |ForkJoinNursery|
-// in addition to the per-thread Allocator.  All "simple" objects
-// (meaning they are reasonably small, can be copied, and have no
-// complicated finalization semantics) are allocated in the nurseries;
-// other objects are allocated directly in the threads' Allocators,
-// which serve as the tenured areas for the threads.
-//
-// When a thread's nursery fills up it can be collected independently
-// of the other threads' nurseries, and does not require any of the
-// threads to bail out of the parallel section.  The nursery is
-// copy-collected, and the expectation is that the survival rate will
-// be very low and the collection will be very cheap.
-//
-// When the parallel execution is complete, and only if merging of the
-// Allocators into the main compartment is necessary, then the live
-// objects of the nurseries are copied into the respective Allocators,
-// in parallel, before the merging takes place.
+// object for each slice.  You can access the appropriate object via
+// the |ForkJoinContext| object that is provided to the callbacks.  Once
+// the execution is complete, all the objects found in these distinct
+// |Allocator| is merged back into the main compartment lists and
+// things proceed normally.
 //
 // In Ion-generated code, we will do allocation through the
-// |ForkJoinNursery| or |Allocator| found in |ForkJoinContext| (which
-// is obtained via TLS).
-//
-// No write barriers are emitted.  We permit writes to thread-local
-// objects, and such writes can create cross-generational pointers or
-// pointers that may interact with incremental GC.  However, the
-// per-thread generational collector scans its entire tenured area on
-// each minor collection, and we block upon entering a parallel
-// section to ensure that any concurrent marking or incremental GC has
-// completed.
+// |Allocator| found in |ForkJoinContext| (which is obtained via TLS).
+// Also, no write barriers are emitted.  Conceptually, we should never
+// need a write barrier because we only permit writes to objects that
+// are newly allocated, and such objects are always black (to use
+// incremental GC terminology).  However, to be safe, we also block
+// upon entering a parallel section to ensure that any concurrent
+// marking or incremental GC has completed.
 //
 // In the future, it should be possible to lift the restriction that
-// we must block until incremental GC has completed. But we're not
-// there yet.
+// we must block until inc. GC has completed and also to permit GC
+// during parallel exeution. But we're not there yet.
 //
 // Load balancing (work stealing):
 //
 // The ForkJoin job is dynamically divided into a fixed number of slices,
 // and is submitted for parallel execution in the pool. When the number
 // of slices is big enough (typically greater than the number of workers
 // in the pool) -and the workload is unbalanced- each worker thread
 // will perform load balancing through work stealing. The number
@@ -350,17 +311,17 @@ struct ParallelBailoutRecord {
 struct ForkJoinShared;
 
 class ForkJoinContext : public ThreadSafeContext
 {
   public:
     // Bailout record used to record the reason this thread stopped executing
     ParallelBailoutRecord *const bailoutRecord;
 
-#ifdef FORKJOIN_SPEW
+#ifdef DEBUG
     // Records the last instr. to execute on this thread.
     IonLIRTraceData traceData;
 
     // The maximum worker id.
     uint32_t maxWorkerId;
 #endif
 
     // When we run a par operation like mapPar, we create an out pointer
@@ -446,44 +407,24 @@ class ForkJoinContext : public ThreadSaf
     // Initializes the thread-local state.
     static bool initialize();
 
     // Used in inlining GetForkJoinSlice.
     static size_t offsetOfWorker() {
         return offsetof(ForkJoinContext, worker_);
     }
 
-#ifdef JSGC_FJGENERATIONAL
-    // There is already a nursery() method in ThreadSafeContext.
-    gc::ForkJoinNursery &fjNursery() { return fjNursery_; }
-
-    // Evacuate live data from the per-thread nursery into the per-thread
-    // tenured area.
-    void evacuateLiveData() { fjNursery_.evacuatingGC(); }
-
-    // Used in inlining nursery allocation.  Note the nursery is a
-    // member of the ForkJoinContext (a substructure), not a pointer.
-    static size_t offsetOfFJNursery() {
-        return offsetof(ForkJoinContext, fjNursery_);
-    }
-#endif
-
   private:
     friend class AutoSetForkJoinContext;
 
     // Initialized by initialize()
     static mozilla::ThreadLocal<ForkJoinContext*> tlsForkJoinContext;
 
     ForkJoinShared *const shared_;
 
-#ifdef JSGC_FJGENERATIONAL
-    gc::ForkJoinGCShared gcShared_;
-    gc::ForkJoinNursery fjNursery_;
-#endif
-
     ThreadPoolWorker *worker_;
 
     bool acquiredJSContext_;
 
     // ForkJoinContext is allocated on the stack. It would be dangerous to GC
     // with it live because of the GC pointer fields stored in the context.
     JS::AutoSuppressGCAnalysis nogc_;
 };
@@ -558,51 +499,48 @@ enum ExecutionStatus {
     // Parallel exec was successful after some number of bailouts
     ExecutionParallel
 };
 
 enum SpewChannel {
     SpewOps,
     SpewCompile,
     SpewBailouts,
-    SpewGC,
     NumSpewChannels
 };
 
-#if defined(FORKJOIN_SPEW) && defined(JS_THREADSAFE) && defined(JS_ION)
+#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
 
 bool SpewEnabled(SpewChannel channel);
 void Spew(SpewChannel channel, const char *fmt, ...);
-void SpewVA(SpewChannel channel, const char *fmt, va_list args);
 void SpewBeginOp(JSContext *cx, const char *name);
 void SpewBailout(uint32_t count, HandleScript script, jsbytecode *pc,
                  ParallelBailoutCause cause);
 ExecutionStatus SpewEndOp(ExecutionStatus status);
 void SpewBeginCompile(HandleScript script);
 jit::MethodStatus SpewEndCompile(jit::MethodStatus status);
 void SpewMIR(jit::MDefinition *mir, const char *fmt, ...);
 void SpewBailoutIR(IonLIRTraceData *data);
 
 #else
 
 static inline bool SpewEnabled(SpewChannel channel) { return false; }
 static inline void Spew(SpewChannel channel, const char *fmt, ...) { }
-static inline void SpewVA(SpewChannel channel, const char *fmt, va_list args) { }
 static inline void SpewBeginOp(JSContext *cx, const char *name) { }
 static inline void SpewBailout(uint32_t count, HandleScript script,
                                jsbytecode *pc, ParallelBailoutCause cause) {}
 static inline ExecutionStatus SpewEndOp(ExecutionStatus status) { return status; }
 static inline void SpewBeginCompile(HandleScript script) { }
 #ifdef JS_ION
 static inline jit::MethodStatus SpewEndCompile(jit::MethodStatus status) { return status; }
 static inline void SpewMIR(jit::MDefinition *mir, const char *fmt, ...) { }
 #endif
 static inline void SpewBailoutIR(IonLIRTraceData *data) { }
 
-#endif // FORKJOIN_SPEW && JS_THREADSAFE && JS_ION
+#endif // DEBUG && JS_THREADSAFE && JS_ION
 
 } // namespace parallel
 } // namespace js
 
 /* static */ inline js::ForkJoinContext *
 js::ForkJoinContext::current()
 {
     return tlsForkJoinContext.get();
--- a/js/src/vm/ObjectImpl.h
+++ b/js/src/vm/ObjectImpl.h
@@ -25,20 +25,16 @@
 #include "vm/String.h"
 
 namespace js {
 
 class ObjectImpl;
 class Nursery;
 class Shape;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 /*
  * To really poison a set of values, using 'magic' or 'undefined' isn't good
  * enough since often these will just be ignored by buggy code (see bug 629974)
  * in debug builds and crash in release builds. Instead, we use a safe-for-crash
  * pointer.
  */
 static MOZ_ALWAYS_INLINE void
 Debug_SetValueRangeToCrashOnTouch(Value *beg, Value *end)
@@ -176,17 +172,16 @@ class ObjectElements
         NONWRITABLE_ARRAY_LENGTH    = 0x2
     };
 
   private:
     friend class ::JSObject;
     friend class ObjectImpl;
     friend class ArrayObject;
     friend class Nursery;
-    friend class gc::ForkJoinNursery;
 
     template <ExecutionMode mode>
     friend bool
     ArraySetLength(typename ExecutionModeTraits<mode>::ContextType cx,
                    Handle<ArrayObject*> obj, HandleId id,
                    unsigned attrs, HandleValue value, bool setterIsStrict);
 
     /* See Flags enum above. */
@@ -445,17 +440,16 @@ class ObjectImpl : public gc::BarrieredC
     bool setFlag(ExclusiveContext *cx, /*BaseShape::Flag*/ uint32_t flag,
                  GenerateShape generateShape = GENERATE_NONE);
     bool clearFlag(ExclusiveContext *cx, /*BaseShape::Flag*/ uint32_t flag);
 
     bool toDictionaryMode(ThreadSafeContext *cx);
 
   private:
     friend class Nursery;
-    friend class gc::ForkJoinNursery;
 
     /*
      * Get internal pointers to the range of values starting at start and
      * running for length.
      */
     void getSlotRangeUnchecked(uint32_t start, uint32_t length,
                                HeapSlot **fixedStart, HeapSlot **fixedEnd,
                                HeapSlot **slotsStart, HeapSlot **slotsEnd)
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -948,25 +948,16 @@ struct JSRuntime : public JS::shadow::Ru
         gc.marker.setGCMode(mode);
     }
 
     bool isHeapBusy() { return gc.isHeapBusy(); }
     bool isHeapMajorCollecting() { return gc.isHeapMajorCollecting(); }
     bool isHeapMinorCollecting() { return gc.isHeapMinorCollecting(); }
     bool isHeapCollecting() { return gc.isHeapCollecting(); }
 
-    // Performance note: if isFJMinorCollecting turns out to be slow
-    // because reading the counter is slow then we may be able to
-    // augment the counter with a volatile flag that is set iff the
-    // counter is greater than zero.  (It will require some care to
-    // make sure the two variables stay in sync.)
-    bool isFJMinorCollecting() { return gc.fjCollectionCounter > 0; }
-    void incFJMinorCollecting() { gc.fjCollectionCounter++; }
-    void decFJMinorCollecting() { gc.fjCollectionCounter--; }
-
 #ifdef JS_GC_ZEAL
     int gcZeal() { return gc.zealMode; }
 
     bool upcomingZealousGC() {
         return gc.nextScheduled == 1;
     }
 
     bool needZealousGC() {
--- a/js/src/vm/SelfHosting.cpp
+++ b/js/src/vm/SelfHosting.cpp
@@ -291,21 +291,17 @@ intrinsic_ParallelSpew(ThreadSafeContext
     return true;
 }
 
 JS_JITINFO_NATIVE_PARALLEL_THREADSAFE(intrinsic_ParallelSpew_jitInfo, intrinsic_ParallelSpew_jitInfo,
                                       intrinsic_ParallelSpew);
 #endif
 
 /*
- * ForkJoin(func, sliceStart, sliceEnd, mode, updatable): Invokes |func| many times in parallel.
- *
- * If "func" will update a pre-existing object then that object /must/ be passed
- * as the object "updatable".  It is /not/ correct to pass an object that
- * references the updatable objects indirectly.
+ * ForkJoin(func, feedback): Invokes |func| many times in parallel.
  *
  * See ForkJoin.cpp for details and ParallelArray.js for examples.
  */
 static bool
 intrinsic_ForkJoin(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     return ForkJoin(cx, args);
@@ -778,17 +774,17 @@ static const JSFunctionSpec intrinsic_fu
     JS_FN("GetIteratorPrototype",    intrinsic_GetIteratorPrototype,    0,0),
 
     JS_FN("NewArrayIterator",        intrinsic_NewArrayIterator,        0,0),
     JS_FN("IsArrayIterator",         intrinsic_IsArrayIterator,         1,0),
 
     JS_FN("NewStringIterator",       intrinsic_NewStringIterator,       0,0),
     JS_FN("IsStringIterator",        intrinsic_IsStringIterator,        1,0),
 
-    JS_FN("ForkJoin",                intrinsic_ForkJoin,                5,0),
+    JS_FN("ForkJoin",                intrinsic_ForkJoin,                2,0),
     JS_FN("ForkJoinNumWorkers",      intrinsic_ForkJoinNumWorkers,      0,0),
     JS_FN("NewDenseArray",           intrinsic_NewDenseArray,           1,0),
     JS_FN("ShouldForceSequential",   intrinsic_ShouldForceSequential,   0,0),
     JS_FN("ParallelTestsShouldPass", intrinsic_ParallelTestsShouldPass, 0,0),
     JS_FNINFO("ClearThreadLocalArenas",
               intrinsic_ClearThreadLocalArenas,
               &intrinsic_ClearThreadLocalArenasInfo, 0,0),
     JS_FNINFO("SetForkJoinTargetRegion",
--- a/js/src/vm/Shape.cpp
+++ b/js/src/vm/Shape.cpp
@@ -17,17 +17,16 @@
 #include "jshashutil.h"
 #include "jsobj.h"
 
 #include "js/HashTable.h"
 
 #include "jscntxtinlines.h"
 #include "jsobjinlines.h"
 
-#include "gc/ForkJoinNursery-inl.h"
 #include "vm/ObjectImpl-inl.h"
 #include "vm/Runtime-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::CeilingLog2Size;
 using mozilla::DebugOnly;
--- a/js/src/vm/Shape.h
+++ b/js/src/vm/Shape.h
@@ -106,20 +106,16 @@
 namespace js {
 
 class Bindings;
 class Debugger;
 class Nursery;
 class ObjectImpl;
 class StaticBlockObject;
 
-namespace gc {
-class ForkJoinNursery;
-}
-
 typedef JSPropertyOp         PropertyOp;
 typedef JSStrictPropertyOp   StrictPropertyOp;
 typedef JSPropertyDescriptor PropertyDescriptor;
 
 /* Limit on the number of slotful properties in an object. */
 static const uint32_t SHAPE_INVALID_SLOT = JS_BIT(24) - 1;
 static const uint32_t SHAPE_MAXIMUM_SLOT = JS_BIT(24) - 2;
 
@@ -611,17 +607,16 @@ typedef HashSet<ReadBarrieredUnownedBase
 
 
 class Shape : public gc::BarrieredCell<Shape>
 {
     friend class ::JSObject;
     friend class ::JSFunction;
     friend class js::Bindings;
     friend class js::Nursery;
-    friend class js::gc::ForkJoinNursery;
     friend class js::ObjectImpl;
     friend class js::PropertyTree;
     friend class js::StaticBlockObject;
     friend struct js::StackShape;
     friend struct js::StackBaseShape;
 
   protected:
     HeapPtrBaseShape    base_;
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -432,23 +432,24 @@ MarkInterpreterActivation(JSTracer *trc,
     for (InterpreterFrameIterator frames(act); !frames.done(); ++frames) {
         InterpreterFrame *fp = frames.frame();
         fp->markValues(trc, frames.sp(), frames.pc());
         fp->mark(trc);
     }
 }
 
 void
-js::MarkInterpreterActivations(PerThreadData *ptd, JSTracer *trc)
+js::MarkInterpreterActivations(JSRuntime *rt, JSTracer *trc)
 {
-    for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
+    for (ActivationIterator iter(rt); !iter.done(); ++iter) {
         Activation *act = iter.activation();
         if (act->isInterpreter())
             MarkInterpreterActivation(trc, act->asInterpreter());
     }
+
 }
 
 /*****************************************************************************/
 
 // Unlike the other methods of this calss, this method is defined here so that
 // we don't have to #include jsautooplen.h in vm/Stack.h.
 void
 InterpreterRegs::setToEndOfScript()
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -1088,17 +1088,17 @@ class InterpreterStack
 
     inline void purge(JSRuntime *rt);
 
     size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return allocator_.sizeOfExcludingThis(mallocSizeOf);
     }
 };
 
-void MarkInterpreterActivations(PerThreadData *ptd, JSTracer *trc);
+void MarkInterpreterActivations(JSRuntime *rt, JSTracer *trc);
 
 /*****************************************************************************/
 
 class InvokeArgs : public JS::CallArgs
 {
     AutoValueVector v_;
 
   public:
--- a/js/src/vm/ThreadPool.cpp
+++ b/js/src/vm/ThreadPool.cpp
@@ -5,25 +5,20 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "vm/ThreadPool.h"
 
 #include "mozilla/Atomics.h"
 
 #include "jslock.h"
 
-#include "js/Utility.h"
 #include "vm/ForkJoin.h"
 #include "vm/Monitor.h"
 #include "vm/Runtime.h"
 
-#ifdef JSGC_FJGENERATIONAL
-#include "prmjtime.h"
-#endif
-
 using namespace js;
 
 const size_t WORKER_THREAD_STACK_SIZE = 1*1024*1024;
 
 static inline uint32_t
 ComposeSliceBounds(uint16_t from, uint16_t to)
 {
     MOZ_ASSERT(from <= to);
@@ -256,53 +251,44 @@ ThreadPoolWorker::terminate(AutoLockMoni
 //
 // The |ThreadPool| starts up workers, submits work to them, and shuts
 // them down when requested.
 
 ThreadPool::ThreadPool(JSRuntime *rt)
   : activeWorkers_(0),
     joinBarrier_(nullptr),
     job_(nullptr),
+#ifdef DEBUG
     runtime_(rt),
-#ifdef DEBUG
     stolenSlices_(0),
 #endif
     pendingSlices_(0),
-    isMainThreadActive_(false),
-    chunkLock_(nullptr),
-    timeOfLastAllocation_(0),
-    freeChunks_(nullptr)
+    isMainThreadActive_(false)
 { }
 
 ThreadPool::~ThreadPool()
 {
     terminateWorkers();
-    clearChunkCache();
 #ifdef JS_THREADSAFE
-    if (chunkLock_)
-        PR_DestroyLock(chunkLock_);
     if (joinBarrier_)
         PR_DestroyCondVar(joinBarrier_);
 #endif
 }
 
 bool
 ThreadPool::init()
 {
 #ifdef JS_THREADSAFE
     if (!Monitor::init())
         return false;
     joinBarrier_ = PR_NewCondVar(lock_);
-    if (!joinBarrier_)
-        return false;
-    chunkLock_ = PR_NewLock();
-    if (!chunkLock_)
-        return false;
+    return !!joinBarrier_;
+#else
+    return true;
 #endif
-    return true;
 }
 
 uint32_t
 ThreadPool::numWorkers() const
 {
 #ifdef JS_THREADSAFE
     return HelperThreadState().cpuCount;
 #else
@@ -491,97 +477,8 @@ ThreadPool::abortJob()
     // The reason for this is that while calling discardSlices() clears all
     // workers' bounds, the pendingSlices_ cache might still be > 0 due to
     // still-executing calls to popSliceBack or popSliceFront in other
     // threads. When those finish, we will be sure that !hasWork(), which is
     // important to ensure that an aborted worker does not start again due to
     // the thread pool having more work.
     while (hasWork());
 }
-
-// We are not using the markPagesUnused() / markPagesInUse() APIs here
-// for two reasons.  One, the free list is threaded through the
-// chunks, so some pages are actually in use.  Two, the expectation is
-// that a small number of chunks will be used intensively for a short
-// while and then be abandoned at the next GC.
-//
-// It's an open question whether it's best to go directly to the
-// pageAllocator, as now, or go via the GC's chunk pool.  Either way
-// there's a need to manage a predictable chunk cache here as we don't
-// want chunks to be deallocated during a parallel section.
-
-gc::ForkJoinNurseryChunk *
-ThreadPool::getChunk()
-{
-#ifdef JSGC_FJGENERATIONAL
-    PR_Lock(chunkLock_);
-    timeOfLastAllocation_ = PRMJ_Now()/1000000;
-    ChunkFreeList *p = freeChunks_;
-    if (p)
-        freeChunks_ = p->next;
-    PR_Unlock(chunkLock_);
-
-    if (p) {
-        // Already poisoned.
-        return reinterpret_cast<gc::ForkJoinNurseryChunk *>(p);
-    }
-    gc::ForkJoinNurseryChunk *c =
-        reinterpret_cast<gc::ForkJoinNurseryChunk *>(
-            runtime_->gc.pageAllocator.mapAlignedPages(gc::ChunkSize, gc::ChunkSize));
-    if (!c)
-        return c;
-    poisonChunk(c);
-    return c;
-#else
-    return nullptr;
-#endif
-}
-
-void
-ThreadPool::putFreeChunk(gc::ForkJoinNurseryChunk *c)
-{
-#ifdef JSGC_FJGENERATIONAL
-    poisonChunk(c);
-
-    PR_Lock(chunkLock_);
-    ChunkFreeList *p = reinterpret_cast<ChunkFreeList *>(c);
-    p->next = freeChunks_;
-    freeChunks_ = p;
-    PR_Unlock(chunkLock_);
-#endif
-}
-
-void
-ThreadPool::poisonChunk(gc::ForkJoinNurseryChunk *c)
-{
-#ifdef JSGC_FJGENERATIONAL
-#ifdef DEBUG
-    memset(c, JS_POISONED_FORKJOIN_CHUNK, gc::ChunkSize);
-#endif
-    c->trailer.runtime = nullptr;
-#endif
-}
-
-void
-ThreadPool::pruneChunkCache()
-{
-#ifdef JSGC_FJGENERATIONAL
-    if (PRMJ_Now()/1000000 - timeOfLastAllocation_ >= secondsBeforePrune)
-        clearChunkCache();
-#endif
-}
-
-void
-ThreadPool::clearChunkCache()
-{
-#ifdef JSGC_FJGENERATIONAL
-    PR_Lock(chunkLock_);
-    ChunkFreeList *p = freeChunks_;
-    freeChunks_ = nullptr;
-    PR_Unlock(chunkLock_);
-
-    while (p) {
-        ChunkFreeList *victim = p;
-        p = p->next;
-        runtime_->gc.pageAllocator.unmapPages(victim, gc::ChunkSize);
-    }
-#endif
-}
--- a/js/src/vm/ThreadPool.h
+++ b/js/src/vm/ThreadPool.h
@@ -19,20 +19,16 @@
 
 struct JSRuntime;
 struct JSCompartment;
 
 namespace js {
 
 class ThreadPool;
 
-namespace gc {
-struct ForkJoinNurseryChunk;
-}
-
 /////////////////////////////////////////////////////////////////////////////
 // ThreadPoolWorker
 //
 // Class for worker threads in the pool. All threads (i.e. helpers and main
 // thread) have a worker associted with them. By convention, the worker id of
 // the main thread is 0.
 
 class ThreadPoolWorker
@@ -173,19 +169,20 @@ class ThreadPool : public Monitor
 
     // The number of active workers. Should only access under lock.
     uint32_t activeWorkers_;
     PRCondVar *joinBarrier_;
 
     // The current job.
     ParallelJob *job_;
 
+#ifdef DEBUG
     // Initialized at startup only.
     JSRuntime *const runtime_;
-#ifdef DEBUG
+
     // Number of stolen slices in the last parallel job.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> stolenSlices_;
 #endif
 
     // Number of pending slices in the current job.
     mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> pendingSlices_;
 
     // Whether the main thread is currently processing slices.
@@ -248,87 +245,13 @@ class ThreadPool : public Monitor
 
     // Execute the given ParallelJob using the main thread and any available worker.
     // Blocks until the main thread has completed execution.
     ParallelResult executeJob(JSContext *cx, ParallelJob *job, uint16_t sliceStart,
                               uint16_t numSlices);
 
     // Abort the current job.
     void abortJob();
-
-    // Chunk pool for the PJS parallel nurseries.  The nurseries need
-    // to have a useful pool of cheap chunks, they cannot map/unmap
-    // chunks as needed, as that slows down collection much too much.
-    //
-    // Technically the following should be #ifdef JSGC_FJGENERATIONAL
-    // but that affects the observed size of JSRuntime, of which
-    // ThreadPool is a member.  JSGC_FJGENERATIONAL can only be set if
-    // PJS is enabled, but the latter is enabled in js/src/moz.build;
-    // meanwhile, JSGC_FJGENERATIONAL must be enabled globally if it
-    // is enabled at all, since plenty of Firefox code includes files
-    // to make JSRuntime visible.  JSGC_FJGENERATIONAL will go away
-    // soon, in the mean time the problem is resolved by not making
-    // definitions exported from SpiderMonkey dependent on it.
-
-    // Obtain chunk memory from the cache, or allocate new.  In debug
-    // mode poison the memory, see poisionChunk().
-    //
-    // Returns nullptr on OOM.
-    gc::ForkJoinNurseryChunk *getChunk();
-
-    // Free chunk memory to the cache.  In debug mode poison it, see
-    // poisionChunk().
-    void putFreeChunk(gc::ForkJoinNurseryChunk *mem);
-
-    // If enough time has passed since any allocation activity on the
-    // chunk pool then release any free chunks.  It's meaningful to
-    // call this from the main GC's chunk expiry mechanism; it has low
-    // cost if it does not do anything.
-    //
-    // This must be called with the GC lock taken.
-    void pruneChunkCache();
-
-  private:
-    // Ignore requests to prune the pool until this number of seconds
-    // has passed since the last allocation request.
-    static const int32_t secondsBeforePrune = 10;
-
-    // This lock controls access to the following variables and to the
-    // 'next' field of any ChunkFreeList object reachable from freeChunks_.
-    //
-    // You will be tempted to remove this lock and instead introduce a
-    // lock-free push/pop data structure using Atomic.compareExchange.
-    // Before you do that, consider that such a data structure
-    // implemented naively is vulnerable to the ABA problem in a way
-    // that leads to a corrupt free list; the problem occurs in
-    // practice during very heavily loaded runs where preeption
-    // windows can be long (eg, running the parallel jit_tests on all
-    // cores means having a number of runnable threads quadratic in
-    // the number of cores).  To do better some ABA-defeating scheme
-    // is needed additionally.
-    PRLock *chunkLock_;
-
-    // Timestamp of last allocation from the chunk pool, in seconds.
-    int32_t timeOfLastAllocation_;
-
-    // This structure overlays the beginning of the chunk when the
-    // chunk is on the free list; the rest of the chunk is unused.
-    struct ChunkFreeList {
-        ChunkFreeList *next;
-    };
-
-    // List of free chunks.
-    ChunkFreeList *freeChunks_;
-
-    // Poison a free chunk by filling with JS_POISONED_FORKJOIN_CHUNK
-    // and setting the runtime pointer to null.
-    void poisonChunk(gc::ForkJoinNurseryChunk *c);
-
-    // Release the memory of the chunks that are on the free list.
-    //
-    // This should be called only from the ThreadPool's destructor or
-    // from pruneChunkCache().
-    void clearChunkCache();
 };
 
 } // namespace js
 
 #endif /* vm_ThreadPool_h */