Bug 875661 - Part 2: Make NewGCThing inlines take ThreadSafeContext. (r=billm)
authorShu-yu Guo <shu@rfrn.org>
Thu, 20 Jun 2013 16:40:53 -0700
changeset 147414 40bbd1174f4660876277ee6143e23249f14d723a
parent 147413 76077c8abfcc24d14a3aa54b64be0518f925ec14
child 147415 191bed3002c99a623b4a2e85eaaa0ee72631e977
push id2697
push userbbajaj@mozilla.com
push dateMon, 05 Aug 2013 18:49:53 +0000
treeherdermozilla-beta@dfec938c7b63 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbillm
bugs875661
milestone24.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 875661 - Part 2: Make NewGCThing inlines take ThreadSafeContext. (r=billm)
js/src/gc/Zone.h
js/src/ion/IonMacroAssembler.cpp
js/src/ion/ParallelFunctions.cpp
js/src/jscntxt.h
js/src/jscntxtinlines.h
js/src/jscompartmentinlines.h
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/vm/ForkJoin.cpp
js/src/vm/ForkJoin.h
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -21,34 +21,35 @@
 #include "gc/FindSCCs.h"
 #include "vm/GlobalObject.h"
 #include "vm/RegExpObject.h"
 #include "vm/Shape.h"
 
 namespace js {
 
 /*
- * Encapsulates the data needed to perform allocation.  Typically
- * there is precisely one of these per compartment
- * (|compartment.allocator|).  However, in parallel execution mode,
- * there will be one per worker thread.  In general, if a piece of
- * code must perform execution and should work safely either in
- * parallel or sequential mode, you should make it take an
- * |Allocator*| rather than a |JSContext*|.
+ * Encapsulates the data needed to perform allocation.  Typically there is
+ * precisely one of these per zone (|cx->zone().allocator|).  However, in
+ * parallel execution mode, there will be one per worker thread.
  */
 class Allocator
 {
+    /*
+     * Since allocators can be accessed from worker threads, the parent zone_
+     * should not be accessed in general. ArenaLists is allowed to actually do
+     * the allocation, however.
+     */
+    friend class gc::ArenaLists;
+
     JS::Zone *zone_;
 
   public:
     explicit Allocator(JS::Zone *zone);
 
     js::gc::ArenaLists arenas;
-
-    inline void *parallelNewGCThing(gc::AllocKind thingKind, size_t thingSize);
 };
 
 typedef Vector<JSCompartment *, 1, SystemAllocPolicy> CompartmentVector;
 
 } /* namespace js */
 
 namespace JS {
 
--- a/js/src/ion/IonMacroAssembler.cpp
+++ b/js/src/ion/IonMacroAssembler.cpp
@@ -512,18 +512,18 @@ MacroAssembler::parNewGCThing(const Regi
     // but the register allocator was assigning it to the same
     // register as `threadContextReg`.  Then we overwrite that
     // register which messed up the OOL code.
 
     gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
     uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
 
     // Load the allocator:
-    // tempReg1 = (Allocator*) forkJoinSlice->allocator
-    loadPtr(Address(threadContextReg, offsetof(js::ForkJoinSlice, allocator)),
+    // tempReg1 = (Allocator*) forkJoinSlice->allocator()
+    loadPtr(Address(threadContextReg, ThreadSafeContext::offsetOfAllocator()),
             tempReg1);
 
     // Get a pointer to the relevant free list:
     // tempReg1 = (FreeSpan*) &tempReg1->arenas.freeLists[(allocKind)]
     uint32_t offset = (offsetof(Allocator, arenas) +
                        js::gc::ArenaLists::getFreeListOffset(allocKind));
     addPtr(Imm32(offset), tempReg1);
 
--- a/js/src/ion/ParallelFunctions.cpp
+++ b/js/src/ion/ParallelFunctions.cpp
@@ -31,28 +31,27 @@ ion::ParForkJoinSlice()
 // ParNewGCThing() is called in place of NewGCThing() when executing
 // parallel code.  It uses the ArenaLists for the current thread and
 // allocates from there.
 JSObject *
 ion::ParNewGCThing(gc::AllocKind allocKind)
 {
     ForkJoinSlice *slice = ForkJoinSlice::Current();
     uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
-    void *t = slice->allocator->parallelNewGCThing(allocKind, thingSize);
-    return static_cast<JSObject *>(t);
+    return gc::NewGCThing<JSObject, NoGC>(slice, allocKind, thingSize, gc::DefaultHeap);
 }
 
 // Check that the object was created by the current thread
 // (and hence is writable).
 bool
 ion::ParWriteGuard(ForkJoinSlice *slice, JSObject *object)
 {
     JS_ASSERT(ForkJoinSlice::Current() == slice);
     return !IsInsideNursery(object->runtime(), object) &&
-           slice->allocator->arenas.containsArena(slice->runtime(), object->arenaHeader());
+           slice->allocator()->arenas.containsArena(slice->runtime(), object->arenaHeader());
 }
 
 #ifdef DEBUG
 static void
 printTrace(const char *prefix, struct IonLIRTraceData *cached)
 {
     fprintf(stderr, "%s / Block %3u / LIR %3u / Mode %u / LIR %s\n",
             prefix,
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -1534,16 +1534,41 @@ struct ThreadSafeContext : js::ContextFr
     explicit ThreadSafeContext(JSRuntime *rt, PerThreadData *pt, ContextKind kind);
 
     bool isJSContext() const;
     JSContext *asJSContext();
 
     bool isForkJoinSlice() const;
     ForkJoinSlice *asForkJoinSlice();
 
+#ifdef JSGC_GENERATIONAL
+    inline bool hasNursery() const;
+    inline js::Nursery &nursery();
+#endif
+
+    /*
+     * Allocator used when allocating GCThings on this context. If we are a
+     * JSContext, this is the Zone allocator of the JSContext's zone. If we
+     * are the per-thread data of a ForkJoinSlice, this is a per-thread
+     * allocator.
+     *
+     * This does not live in PerThreadData because the notion of an allocator
+     * is only per-thread in PJS. The runtime (and the main thread) can have
+     * more than one zone, each with its own allocator, and it's up to the
+     * context to specify what compartment and zone we are operating in.
+     */
+  protected:
+    Allocator *allocator_;
+
+  public:
+    static size_t offsetOfAllocator() { return offsetof(ThreadSafeContext, allocator_); }
+
+    inline Allocator *const allocator();
+    inline AllowGC allowGC();
+
     void *onOutOfMemory(void *p, size_t nbytes) {
         return runtime_->onOutOfMemory(p, nbytes, isJSContext() ? asJSContext() : NULL);
     }
     inline void updateMallocCounter(size_t nbytes) {
         /* Note: this is racy. */
         runtime_->updateMallocCounter(zone_, nbytes);
     }
     void reportAllocationOverflow() {
--- a/js/src/jscntxtinlines.h
+++ b/js/src/jscntxtinlines.h
@@ -567,11 +567,40 @@ JSContext::leaveCompartment(JSCompartmen
         wrapPendingException();
 }
 
 inline void
 JSContext::setCompartment(JSCompartment *comp)
 {
     compartment_ = comp;
     zone_ = comp ? comp->zone() : NULL;
+    allocator_ = zone_ ? &zone_->allocator : NULL;
+}
+
+#ifdef JSGC_GENERATIONAL
+inline bool
+js::ThreadSafeContext::hasNursery() const
+{
+    return isJSContext();
+}
+
+inline js::Nursery &
+js::ThreadSafeContext::nursery()
+{
+    JS_ASSERT(hasNursery());
+    return runtime_->gcNursery;
+}
+#endif /* JSGC_GENERATIONAL */
+
+inline js::Allocator *const
+js::ThreadSafeContext::allocator()
+{
+    JS_ASSERT_IF(isJSContext(), &asJSContext()->zone()->allocator == allocator_);
+    return allocator_;
+}
+
+inline js::AllowGC
+js::ThreadSafeContext::allowGC()
+{
+    return isJSContext() ? CanGC : NoGC;
 }
 
 #endif /* jscntxtinlines_h */
--- a/js/src/jscompartmentinlines.h
+++ b/js/src/jscompartmentinlines.h
@@ -33,22 +33,16 @@ js::AutoCompartment::AutoCompartment(JSC
     cx_->enterCompartment(target->compartment());
 }
 
 js::AutoCompartment::~AutoCompartment()
 {
     cx_->leaveCompartment(origin_);
 }
 
-inline void *
-js::Allocator::parallelNewGCThing(gc::AllocKind thingKind, size_t thingSize)
-{
-    return arenas.parallelAllocate(zone_, thingKind, thingSize);
-}
-
 namespace js {
 
 /*
  * Entering the atoms comaprtment is not possible with the AutoCompartment
  * since the atoms compartment does not have a global.
  *
  * Note: since most of the VM assumes that cx->global is non-null, only a
  * restricted set of (atom creating/destroying) operations may be used from
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -1158,33 +1158,16 @@ ArenaLists::prepareForIncrementalGC(JSRu
 
 static inline void
 PushArenaAllocatedDuringSweep(JSRuntime *runtime, ArenaHeader *arena)
 {
     arena->setNextAllocDuringSweep(runtime->gcArenasAllocatedDuringSweep);
     runtime->gcArenasAllocatedDuringSweep = arena;
 }
 
-void *
-ArenaLists::parallelAllocate(Zone *zone, AllocKind thingKind, size_t thingSize)
-{
-    /*
-     * During parallel Rivertrail sections, if no existing arena can
-     * satisfy the allocation, then a new one is allocated. If that
-     * fails, then we return NULL which will cause the parallel
-     * section to abort.
-     */
-
-    void *t = allocateFromFreeList(thingKind, thingSize);
-    if (t)
-        return t;
-
-    return allocateFromArenaInline(zone, thingKind);
-}
-
 inline void *
 ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind)
 {
     /*
      * Parallel JS Note:
      *
      * This function can be called from parallel threads all of which
      * are associated with the same compartment. In that case, each
@@ -1488,44 +1471,48 @@ RunLastDitchGC(JSContext *cx, JS::Zone *
     if (void *thing = zone->allocator.arenas.allocateFromFreeList(thingKind, thingSize))
         return thing;
 
     return NULL;
 }
 
 template <AllowGC allowGC>
 /* static */ void *
-ArenaLists::refillFreeList(JSContext *cx, AllocKind thingKind)
-{
-    JS_ASSERT(cx->zone()->allocator.arenas.freeLists[thingKind].isEmpty());
-
-    Zone *zone = cx->zone();
+ArenaLists::refillFreeList(ThreadSafeContext *tcx, AllocKind thingKind)
+{
+    JS_ASSERT(tcx->allocator()->arenas.freeLists[thingKind].isEmpty());
+
+    Zone *zone = tcx->allocator()->zone_;
     JSRuntime *rt = zone->rt;
     JS_ASSERT(!rt->isHeapBusy());
 
     bool runGC = rt->gcIncrementalState != NO_INCREMENTAL &&
                  zone->gcBytes > zone->gcTriggerBytes &&
-                 allowGC;
+                 tcx->allowGC() && allowGC;
+
     for (;;) {
         if (JS_UNLIKELY(runGC)) {
-            if (void *thing = RunLastDitchGC(cx, zone, thingKind))
+            if (void *thing = RunLastDitchGC(tcx->asJSContext(), zone, thingKind))
                 return thing;
         }
 
         /*
          * allocateFromArena may fail while the background finalization still
-         * run. In that case we want to wait for it to finish and restart.
-         * However, checking for that is racy as the background finalization
-         * could free some things after allocateFromArena decided to fail but
-         * at this point it may have already stopped. To avoid this race we
-         * always try to allocate twice.
+         * run. If we aren't in a fork join, we want to wait for it to finish
+         * and restart. However, checking for that is racy as the background
+         * finalization could free some things after allocateFromArena decided
+         * to fail but at this point it may have already stopped. To avoid
+         * this race we always try to allocate twice.
+         *
+         * If we're in a fork join, we simply try it once and return whatever
+         * value we get.
          */
         for (bool secondAttempt = false; ; secondAttempt = true) {
-            void *thing = zone->allocator.arenas.allocateFromArenaInline(zone, thingKind);
-            if (JS_LIKELY(!!thing))
+            void *thing = tcx->allocator()->arenas.allocateFromArenaInline(zone, thingKind);
+            if (JS_LIKELY(!!thing) || tcx->isForkJoinSlice())
                 return thing;
             if (secondAttempt)
                 break;
 
             rt->gcHelperThread.waitBackgroundSweepEnd();
         }
 
         if (!allowGC)
@@ -1536,25 +1523,25 @@ ArenaLists::refillFreeList(JSContext *cx
          * Otherwise report OOM.
          */
         if (runGC)
             break;
         runGC = true;
     }
 
     JS_ASSERT(allowGC);
-    js_ReportOutOfMemory(cx);
+    js_ReportOutOfMemory(tcx->asJSContext());
     return NULL;
 }
 
 template void *
-ArenaLists::refillFreeList<NoGC>(JSContext *cx, AllocKind thingKind);
+ArenaLists::refillFreeList<NoGC>(ThreadSafeContext *cx, AllocKind thingKind);
 
 template void *
-ArenaLists::refillFreeList<CanGC>(JSContext *cx, AllocKind thingKind);
+ArenaLists::refillFreeList<CanGC>(ThreadSafeContext *cx, AllocKind thingKind);
 
 JSGCTraceKind
 js_GetGCThingTraceKind(void *thing)
 {
     return GetGCThingTraceKind(thing);
 }
 
 void
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -461,17 +461,17 @@ struct ArenaLists
         return false;
     }
 
     JS_ALWAYS_INLINE void *allocateFromFreeList(AllocKind thingKind, size_t thingSize) {
         return freeLists[thingKind].allocate(thingSize);
     }
 
     template <AllowGC allowGC>
-    static void *refillFreeList(JSContext *cx, AllocKind thingKind);
+    static void *refillFreeList(ThreadSafeContext *cx, AllocKind thingKind);
 
     /*
      * Moves all arenas from |fromArenaLists| into |this|.  In
      * parallel blocks, we temporarily create one ArenaLists per
      * parallel thread.  When the parallel block ends, we move
      * whatever allocations may have been performed back into the
      * compartment's main arena list using this function.
      */
@@ -495,24 +495,16 @@ struct ArenaLists
     void queueStringsForSweep(FreeOp *fop);
     void queueShapesForSweep(FreeOp *fop);
     void queueScriptsForSweep(FreeOp *fop);
     void queueIonCodeForSweep(FreeOp *fop);
 
     bool foregroundFinalize(FreeOp *fop, AllocKind thingKind, SliceBudget &sliceBudget);
     static void backgroundFinalize(FreeOp *fop, ArenaHeader *listHead, bool onBackgroundThread);
 
-    /*
-     * Invoked from IonMonkey-compiled parallel worker threads to
-     * perform an allocation.  In this case, |this| will be
-     * thread-local, but the compartment |comp| is shared between all
-     * threads.
-     */
-    void *parallelAllocate(JS::Zone *zone, AllocKind thingKind, size_t thingSize);
-
   private:
     inline void finalizeNow(FreeOp *fop, AllocKind thingKind);
     inline void queueForForegroundSweep(FreeOp *fop, AllocKind thingKind);
     inline void queueForBackgroundSweep(FreeOp *fop, AllocKind thingKind);
 
     void *allocateFromArena(JS::Zone *zone, AllocKind thingKind);
     inline void *allocateFromArenaInline(JS::Zone *zone, AllocKind thingKind);
 
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -458,18 +458,21 @@ typedef CompartmentsIterT<GCZoneGroupIte
 
 #ifdef JSGC_GENERATIONAL
 /*
  * Attempt to allocate a new GC thing out of the nursery. If there is not enough
  * room in the nursery or there is an OOM, this method will return NULL.
  */
 template <typename T, AllowGC allowGC>
 inline T *
-TryNewNurseryGCThing(JSContext *cx, size_t thingSize)
+TryNewNurseryGCThing(ThreadSafeContext *tcx, size_t thingSize)
 {
+    /* TODO: Integrate PJS with generational GC. */
+    JSContext *cx = tcx->asJSContext();
+
     JS_ASSERT(!IsAtomsCompartment(cx->compartment()));
     JSRuntime *rt = cx->runtime();
     Nursery &nursery = rt->gcNursery;
     T *t = static_cast<T *>(nursery.allocate(thingSize));
     if (t)
         return t;
     if (allowGC && !rt->mainThread.suppressGC) {
         MinorGC(rt, JS::gcreason::OUT_OF_NURSERY);
@@ -488,112 +491,133 @@ TryNewNurseryGCThing(JSContext *cx, size
 /*
  * Allocates a new GC thing. After a successful allocation the caller must
  * fully initialize the thing before calling any function that can potentially
  * trigger GC. This will ensure that GC tracing never sees junk values stored
  * in the partially initialized thing.
  */
 template <typename T, AllowGC allowGC>
 inline T *
-NewGCThing(JSContext *cx, AllocKind kind, size_t thingSize, InitialHeap heap)
+NewGCThing(js::ThreadSafeContext *tcx, AllocKind kind, size_t thingSize, InitialHeap heap)
 {
     JS_ASSERT(thingSize == js::gc::Arena::thingSize(kind));
-    JS_ASSERT_IF(cx->compartment() == cx->runtime()->atomsCompartment,
-                 kind == FINALIZE_STRING ||
-                 kind == FINALIZE_SHORT_STRING ||
-                 kind == FINALIZE_IONCODE);
-    JS_ASSERT(!cx->runtime()->isHeapBusy());
-    JS_ASSERT(!cx->runtime()->noGCOrAllocationCheck);
 
-    /* For testing out of memory conditions */
-    JS_OOM_POSSIBLY_FAIL_REPORT(cx);
+    if (tcx->isJSContext()) {
+        JSContext *cx = tcx->asJSContext();
+        JS_ASSERT_IF(cx->compartment() == cx->runtime()->atomsCompartment,
+                     kind == FINALIZE_STRING ||
+                     kind == FINALIZE_SHORT_STRING ||
+                     kind == FINALIZE_IONCODE);
+        JS_ASSERT(!cx->runtime()->isHeapBusy());
+        JS_ASSERT(!cx->runtime()->noGCOrAllocationCheck);
+
+        /* For testing out of memory conditions */
+        JS_OOM_POSSIBLY_FAIL_REPORT(cx);
 
 #ifdef JS_GC_ZEAL
-    if (cx->runtime()->needZealousGC() && allowGC)
-        js::gc::RunDebugGC(cx);
+        if (cx->runtime()->needZealousGC() && allowGC)
+            js::gc::RunDebugGC(cx);
 #endif
 
-    if (allowGC)
-        MaybeCheckStackRoots(cx);
+        if (allowGC)
+            MaybeCheckStackRoots(cx);
+    }
 
 #if defined(JSGC_GENERATIONAL)
-    if (ShouldNurseryAllocate(cx->runtime()->gcNursery, kind, heap)) {
-        T *t = TryNewNurseryGCThing<T, allowGC>(cx, thingSize);
+    if (tcx->hasNursery() && ShouldNurseryAllocate(tcx->nursery(), kind, heap)) {
+        T *t = TryNewNurseryGCThing<T, allowGC>(tcx, thingSize);
         if (t)
             return t;
     }
 #endif
 
-    JS::Zone *zone = cx->zone();
-    T *t = static_cast<T *>(zone->allocator.arenas.allocateFromFreeList(kind, thingSize));
+    T *t = static_cast<T *>(tcx->allocator()->arenas.allocateFromFreeList(kind, thingSize));
     if (!t)
-        t = static_cast<T *>(js::gc::ArenaLists::refillFreeList<allowGC>(cx, kind));
+        t = static_cast<T *>(js::gc::ArenaLists::refillFreeList<allowGC>(tcx, kind));
+
+#ifdef DEBUG
+    if (tcx->isJSContext()) {
+        Zone *zone = tcx->asJSContext()->zone();
+        JS_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
+                     t->arenaHeader()->allocatedDuringIncremental);
+    }
+#endif
 
-    JS_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
-                 t->arenaHeader()->allocatedDuringIncremental);
+#if defined(JSGC_GENERATIONAL) && defined(JS_GC_ZEAL)
+    if (tcx->hasNursery()) {
+        JSContext *cx = tcx->asJSContext();
+
+        if (cx->runtime()->gcVerifyPostData &&
+            ShouldNurseryAllocate(cx->runtime()->gcVerifierNursery, kind, heap))
+        {
+            JS_ASSERT(!IsAtomsCompartment(cx->compartment()));
+            cx->runtime()->gcVerifierNursery.insertPointer(t);
+        }
+    }
+#endif
 
     return t;
 }
 
 } /* namespace gc */
 } /* namespace js */
 
 template <js::AllowGC allowGC>
 inline JSObject *
-js_NewGCObject(JSContext *cx, js::gc::AllocKind kind, js::gc::InitialHeap heap)
+js_NewGCObject(js::ThreadSafeContext *tcx, js::gc::AllocKind kind, js::gc::InitialHeap heap)
 {
     JS_ASSERT(kind >= js::gc::FINALIZE_OBJECT0 && kind <= js::gc::FINALIZE_OBJECT_LAST);
-    return js::gc::NewGCThing<JSObject, allowGC>(cx, kind, js::gc::Arena::thingSize(kind), heap);
+    return js::gc::NewGCThing<JSObject, allowGC>(tcx, kind, js::gc::Arena::thingSize(kind), heap);
 }
 
 template <js::AllowGC allowGC>
 inline JSString *
-js_NewGCString(JSContext *cx)
+js_NewGCString(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<JSString, allowGC>(cx, js::gc::FINALIZE_STRING,
+    return js::gc::NewGCThing<JSString, allowGC>(tcx, js::gc::FINALIZE_STRING,
                                                  sizeof(JSString), js::gc::TenuredHeap);
 }
 
 template <js::AllowGC allowGC>
 inline JSShortString *
-js_NewGCShortString(JSContext *cx)
+js_NewGCShortString(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<JSShortString, allowGC>(cx, js::gc::FINALIZE_SHORT_STRING,
+    return js::gc::NewGCThing<JSShortString, allowGC>(tcx, js::gc::FINALIZE_SHORT_STRING,
                                                       sizeof(JSShortString), js::gc::TenuredHeap);
 }
 
 inline JSExternalString *
-js_NewGCExternalString(JSContext *cx)
+js_NewGCExternalString(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<JSExternalString, js::CanGC>(cx, js::gc::FINALIZE_EXTERNAL_STRING,
+    return js::gc::NewGCThing<JSExternalString, js::CanGC>(tcx, js::gc::FINALIZE_EXTERNAL_STRING,
                                                            sizeof(JSExternalString), js::gc::TenuredHeap);
 }
 
 inline JSScript *
-js_NewGCScript(JSContext *cx)
+js_NewGCScript(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<JSScript, js::CanGC>(cx, js::gc::FINALIZE_SCRIPT,
+    return js::gc::NewGCThing<JSScript, js::CanGC>(tcx, js::gc::FINALIZE_SCRIPT,
                                                    sizeof(JSScript), js::gc::TenuredHeap);
 }
 
 inline js::LazyScript *
-js_NewGCLazyScript(JSContext *cx)
+js_NewGCLazyScript(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<js::LazyScript, js::CanGC>(cx, js::gc::FINALIZE_LAZY_SCRIPT,
+    return js::gc::NewGCThing<js::LazyScript, js::CanGC>(tcx, js::gc::FINALIZE_LAZY_SCRIPT,
                                                          sizeof(js::LazyScript), js::gc::TenuredHeap);
 }
 
 inline js::Shape *
-js_NewGCShape(JSContext *cx)
+js_NewGCShape(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<js::Shape, js::CanGC>(cx, js::gc::FINALIZE_SHAPE,
+    return js::gc::NewGCThing<js::Shape, js::CanGC>(tcx, js::gc::FINALIZE_SHAPE,
                                                     sizeof(js::Shape), js::gc::TenuredHeap);
 }
 
 template <js::AllowGC allowGC>
 inline js::BaseShape *
-js_NewGCBaseShape(JSContext *cx)
+js_NewGCBaseShape(js::ThreadSafeContext *tcx)
 {
-    return js::gc::NewGCThing<js::BaseShape, allowGC>(cx, js::gc::FINALIZE_BASE_SHAPE,
+    return js::gc::NewGCThing<js::BaseShape, allowGC>(tcx, js::gc::FINALIZE_BASE_SHAPE,
                                                       sizeof(js::BaseShape), js::gc::TenuredHeap);
 }
 
 #endif /* jsgcinlines_h */
--- a/js/src/vm/ForkJoin.cpp
+++ b/js/src/vm/ForkJoin.cpp
@@ -1645,25 +1645,25 @@ ForkJoinShared::requestZoneGC(JS::Zone *
 
 ForkJoinSlice::ForkJoinSlice(PerThreadData *perThreadData,
                              uint32_t sliceId, uint32_t numSlices,
                              Allocator *allocator, ForkJoinShared *shared,
                              ParallelBailoutRecord *bailoutRecord)
   : ThreadSafeContext(shared->runtime(), perThreadData, Context_ForkJoin),
     sliceId(sliceId),
     numSlices(numSlices),
-    allocator(allocator),
     bailoutRecord(bailoutRecord),
     shared(shared)
 {
     /*
      * Unsafely set the zone. This is used to track malloc counters and to
      * trigger GCs and is otherwise not thread-safe to access.
      */
     zone_ = shared->zone();
+    allocator_ = allocator;
 }
 
 bool
 ForkJoinSlice::isMainThread() const
 {
     return perThreadData == &shared->runtime()->mainThread;
 }
 
--- a/js/src/vm/ForkJoin.h
+++ b/js/src/vm/ForkJoin.h
@@ -294,31 +294,26 @@ struct ForkJoinSlice : ThreadSafeContext
 {
   public:
     // Which slice should you process? Ranges from 0 to |numSlices|.
     const uint32_t sliceId;
 
     // How many slices are there in total?
     const uint32_t numSlices;
 
-    // Allocator to use when allocating on this thread.  See
-    // |ion::ParFunctions::ParNewGCThing()|.  This should move into
-    // |perThreadData|.
-    Allocator *const allocator;
-
     // Bailout record used to record the reason this thread stopped executing
     ParallelBailoutRecord *const bailoutRecord;
 
 #ifdef DEBUG
     // Records the last instr. to execute on this thread.
     IonLIRTraceData traceData;
 #endif
 
     ForkJoinSlice(PerThreadData *perThreadData, uint32_t sliceId, uint32_t numSlices,
-                  Allocator *arenaLists, ForkJoinShared *shared,
+                  Allocator *allocator, ForkJoinShared *shared,
                   ParallelBailoutRecord *bailoutRecord);
 
     // True if this is the main thread, false if it is one of the parallel workers.
     bool isMainThread() const;
 
     // When the code would normally trigger a GC, we don't trigger it
     // immediately but instead record that request here.  This will
     // cause |ExecuteForkJoinOp()| to invoke |TriggerGC()| or