Bug 1119292 - disable ForkJoinContext and ThreadSafeContext. r=shu
authorLars T Hansen <lhansen@mozilla.com>
Wed, 14 Jan 2015 09:22:00 +0100
changeset 223724 572ae21637214b0c283b8be8dce86ebc5e42a61b
parent 223723 d43317c1d73f526adff9fcf10f29d38d5b089830
child 223725 116f224b77899b207ac20c1db163f1cef1369f93
push id28101
push usercbook@mozilla.com
push dateWed, 14 Jan 2015 13:18:38 +0000
treeherdermozilla-central@d60a0e201e2c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersshu
bugs1119292
milestone38.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1119292 - disable ForkJoinContext and ThreadSafeContext. r=shu
js/src/builtin/TestingFunctions.cpp
js/src/jit/CompileWrappers.cpp
js/src/jit/CompileWrappers.h
js/src/jit/Ion.cpp
js/src/jit/IonCode.h
js/src/jit/MacroAssembler.h
js/src/jit/VMFunctions.h
js/src/jsapi.cpp
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jspubtd.h
js/src/shell/js.cpp
js/src/vm/ForkJoin.cpp
js/src/vm/ForkJoin.h
js/src/vm/HelperThreads.cpp
js/src/vm/Interpreter-inl.h
js/src/vm/Monitor.h
js/src/vm/NativeObject-inl.h
js/src/vm/NativeObject.cpp
js/src/vm/Runtime.cpp
js/src/vm/Runtime.h
js/src/vm/Shape.cpp
js/src/vm/Stack-inl.h
js/src/vm/Stack.cpp
js/src/vm/Stack.h
js/src/vm/ThreadPool.cpp
js/src/vm/ThreadPool.h
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -1225,17 +1225,16 @@ DisplayName(JSContext *cx, unsigned argc
 
 bool
 js::testingFunc_inParallelSection(JSContext *cx, unsigned argc, jsval *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     // If we were actually *in* a parallel section, then this function
     // would be inlined to TRUE in ion-generated code.
-    MOZ_ASSERT(!InParallelSection());
     args.rval().setBoolean(false);
     return true;
 }
 
 static bool
 ShellObjectMetadataCallback(JSContext *cx, JSObject **pmetadata)
 {
     RootedObject obj(cx, NewBuiltinClassInstance<PlainObject>(cx));
--- a/js/src/jit/CompileWrappers.cpp
+++ b/js/src/jit/CompileWrappers.cpp
@@ -73,28 +73,16 @@ CompileRuntime::addressOfGCZeal()
 #endif
 
 const void *
 CompileRuntime::addressOfInterruptUint32()
 {
     return runtime()->addressOfInterruptUint32();
 }
 
-const void *
-CompileRuntime::addressOfInterruptParUint32()
-{
-    return runtime()->addressOfInterruptParUint32();
-}
-
-const void *
-CompileRuntime::addressOfThreadPool()
-{
-    return &runtime()->threadPool;
-}
-
 const JitRuntime *
 CompileRuntime::jitRuntime()
 {
     return runtime()->jitRuntime();
 }
 
 SPSProfiler &
 CompileRuntime::spsProfiler()
--- a/js/src/jit/CompileWrappers.h
+++ b/js/src/jit/CompileWrappers.h
@@ -46,19 +46,16 @@ class CompileRuntime
     // &GetJitContext()->runtime->nativeIterCache.last
     const void *addressOfLastCachedNativeIterator();
 
 #ifdef JS_GC_ZEAL
     const void *addressOfGCZeal();
 #endif
 
     const void *addressOfInterruptUint32();
-    const void *addressOfInterruptParUint32();
-
-    const void *addressOfThreadPool();
 
     const JitRuntime *jitRuntime();
 
     // Compilation does not occur off thread when the SPS profiler is enabled.
     SPSProfiler &spsProfiler();
 
     bool canUseSignalHandlers();
     bool jitSupportsFloatingPoint();
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -699,17 +699,16 @@ IonScript::IonScript()
   : method_(nullptr),
     deoptTable_(nullptr),
     osrPc_(nullptr),
     osrEntryOffset_(0),
     skipArgCheckEntryOffset_(0),
     invalidateEpilogueOffset_(0),
     invalidateEpilogueDataOffset_(0),
     numBailouts_(0),
-    hasUncompiledCallTarget_(false),
     hasSPSInstrumentation_(false),
     recompiling_(false),
     runtimeData_(0),
     runtimeSize_(0),
     cacheIndex_(0),
     cacheEntries_(0),
     safepointIndexOffset_(0),
     safepointIndexEntries_(0),
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -188,21 +188,16 @@ struct IonScript
     // NOTE: technically a constant delta from
     // |invalidateEpilogueOffset_|, so we could hard-code this
     // per-platform if we want.
     uint32_t invalidateEpilogueDataOffset_;
 
     // Number of times this script bailed out without invalidation.
     uint32_t numBailouts_;
 
-    // Flag set when it is likely that one of our (transitive) call
-    // targets is not compiled.  Used in ForkJoin.cpp to decide when
-    // we should add call targets to the worklist.
-    mozilla::Atomic<bool, mozilla::Relaxed> hasUncompiledCallTarget_;
-
     // Flag set if IonScript was compiled with SPS profiling enabled.
     bool hasSPSInstrumentation_;
 
     // Flag for if this script is getting recompiled.
     uint32_t recompiling_;
 
     // Any kind of data needed by the runtime, these can be either cache
     // information or profiling info.
@@ -412,25 +407,16 @@ struct IonScript
         numBailouts_++;
     }
     uint32_t numBailouts() const {
         return numBailouts_;
     }
     bool bailoutExpected() const {
         return numBailouts_ > 0;
     }
-    void setHasUncompiledCallTarget() {
-        hasUncompiledCallTarget_ = true;
-    }
-    void clearHasUncompiledCallTarget() {
-        hasUncompiledCallTarget_ = false;
-    }
-    bool hasUncompiledCallTarget() const {
-        return hasUncompiledCallTarget_;
-    }
     void setHasSPSInstrumentation() {
         hasSPSInstrumentation_ = true;
     }
     void clearHasSPSInstrumentation() {
         hasSPSInstrumentation_ = false;
     }
     bool hasSPSInstrumentation() const {
         return hasSPSInstrumentation_;
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -387,20 +387,16 @@ class MacroAssembler : public MacroAssem
     void loadStringChar(Register str, Register index, Register output);
 
     void branchIfRope(Register str, Label *label) {
         Address flags(str, JSString::offsetOfFlags());
         static_assert(JSString::ROPE_FLAGS == 0, "Rope type flags must be 0");
         branchTest32(Assembler::Zero, flags, Imm32(JSString::TYPE_FLAGS_MASK), label);
     }
 
-    void loadSliceBounds(Register worker, Register dest) {
-        loadPtr(Address(worker, ThreadPoolWorker::offsetOfSliceBounds()), dest);
-    }
-
     void loadJSContext(Register dest) {
         loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfJSContext()), dest);
     }
     void loadJitActivation(Register dest) {
         loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfActivation()), dest);
     }
 
     template<typename T>
@@ -855,22 +851,16 @@ class MacroAssembler : public MacroAssem
     // The JitCode * argument here is one of the tokens defined in the various
     // exit frame layout classes, e.g. NativeExitFrameLayout::Token().
     void enterFakeExitFrame(JitCode *codeVal) {
         linkExitFrame();
         Push(ImmPtr(codeVal));
         Push(ImmPtr(nullptr));
     }
 
-    void loadThreadPool(Register pool) {
-        // JitRuntimes are tied to JSRuntimes and there is one ThreadPool per
-        // JSRuntime, so we can hardcode the ThreadPool address here.
-        movePtr(ImmPtr(GetJitContext()->runtime->addressOfThreadPool()), pool);
-    }
-
     void leaveExitFrame() {
         freeStack(ExitFooterFrame::Size());
     }
 
     bool hasEnteredExitFrame() const {
         return exitCodePatch_.offset() != 0;
     }
 
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -115,17 +115,17 @@ struct VMFunction
 
     // Contains an combination of enumerated types used by the gc for marking
     // arguments of the VM wrapper.
     uint64_t argumentRootTypes;
 
     // The root type of the out param if outParam == Type_Handle.
     RootType outParamRootType;
 
-    // Does this function take a ForkJoinContext * or a JSContext *?
+    // PJS FIXME: get rid of executionMode
     ExecutionMode executionMode;
 
     // Number of Values the VM wrapper should pop from the stack when it returns.
     // Used by baseline IC stubs so that they can use tail calls to call the VM
     // wrapper.
     uint32_t extraValuesToPop;
 
     // On some architectures, called functions need to explicitly push their
@@ -463,49 +463,36 @@ template <> struct OutParamToRootType<Mu
 };
 template <> struct OutParamToRootType<MutableHandleObject> {
     static const VMFunction::RootType result = VMFunction::RootObject;
 };
 template <> struct OutParamToRootType<MutableHandleString> {
     static const VMFunction::RootType result = VMFunction::RootString;
 };
 
-template <class> struct MatchContext { };
-template <> struct MatchContext<JSContext *> {
-    static const ExecutionMode execMode = SequentialExecution;
-};
-template <> struct MatchContext<ExclusiveContext *> {
-    static const ExecutionMode execMode = SequentialExecution;
-};
-template <> struct MatchContext<ThreadSafeContext *> {
-    // ThreadSafeContext functions can be called from either mode, but for
-    // calling from parallel they should be wrapped first, so we default to
-    // SequentialExecution here.
-    static const ExecutionMode execMode = SequentialExecution;
-};
-
 #define FOR_EACH_ARGS_1(Macro, Sep, Last) Macro(1) Last(1)
 #define FOR_EACH_ARGS_2(Macro, Sep, Last) FOR_EACH_ARGS_1(Macro, Sep, Sep) Macro(2) Last(2)
 #define FOR_EACH_ARGS_3(Macro, Sep, Last) FOR_EACH_ARGS_2(Macro, Sep, Sep) Macro(3) Last(3)
 #define FOR_EACH_ARGS_4(Macro, Sep, Last) FOR_EACH_ARGS_3(Macro, Sep, Sep) Macro(4) Last(4)
 #define FOR_EACH_ARGS_5(Macro, Sep, Last) FOR_EACH_ARGS_4(Macro, Sep, Sep) Macro(5) Last(5)
 #define FOR_EACH_ARGS_6(Macro, Sep, Last) FOR_EACH_ARGS_5(Macro, Sep, Sep) Macro(6) Last(6)
 
 #define COMPUTE_INDEX(NbArg) NbArg
 #define COMPUTE_OUTPARAM_RESULT(NbArg) OutParamToDataType<A ## NbArg>::result
 #define COMPUTE_OUTPARAM_ROOT(NbArg) OutParamToRootType<A ## NbArg>::result
 #define COMPUTE_ARG_PROP(NbArg) (TypeToArgProperties<A ## NbArg>::result << (2 * (NbArg - 1)))
 #define COMPUTE_ARG_ROOT(NbArg) (uint64_t(TypeToRootType<A ## NbArg>::result) << (3 * (NbArg - 1)))
 #define COMPUTE_ARG_FLOAT(NbArg) (TypeToPassInFloatReg<A ## NbArg>::result) << (NbArg - 1)
 #define SEP_OR(_) |
 #define NOTHING(_)
 
+// PJS FIXME: get rid of executionMode()
 #define FUNCTION_INFO_STRUCT_BODY(ForEachNb)                                            \
     static inline ExecutionMode executionMode() {                                       \
-        return MatchContext<Context>::execMode;                                         \
+        return SequentialExecution;                                                     \
     }                                                                                   \
     static inline DataType returnType() {                                               \
         return TypeToDataType<R>::result;                                               \
     }                                                                                   \
     static inline DataType outParam() {                                                 \
         return ForEachNb(NOTHING, NOTHING, COMPUTE_OUTPARAM_RESULT);                    \
     }                                                                                   \
     static inline RootType outParamRootType() {                                         \
@@ -546,18 +533,19 @@ template <typename Fun>
 struct FunctionInfo {
 };
 
 // VMFunction wrapper with no explicit arguments.
 template <class R, class Context>
 struct FunctionInfo<R (*)(Context)> : public VMFunction {
     typedef R (*pf)(Context);
 
+    // PJS FIXME: get rid of executionMode()
     static inline ExecutionMode executionMode() {
-        return MatchContext<Context>::execMode;
+        return SequentialExecution;
     }
     static inline DataType returnType() {
         return TypeToDataType<R>::result;
     }
     static inline DataType outParam() {
         return Type_Void;
     }
     static inline RootType outParamRootType() {
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -559,19 +559,16 @@ JS_Init(void)
 
     using js::TlsPerThreadData;
     if (!TlsPerThreadData.initialized() && !TlsPerThreadData.init())
         return false;
 
     if (!jit::InitializeIon())
         return false;
 
-    if (!ForkJoinContext::initializeTls())
-        return false;
-
 #if EXPOSE_INTL_API
     UErrorCode err = U_ZERO_ERROR;
     u_init(&err);
     if (U_FAILURE(err))
         return false;
 #endif // EXPOSE_INTL_API
 
     if (!CreateHelperThreadsState())
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -963,39 +963,28 @@ const JSErrorFormatString js_ErrorFormat
 JS_FRIEND_API(const JSErrorFormatString *)
 js_GetErrorMessage(void *userRef, const unsigned errorNumber)
 {
     if (errorNumber > 0 && errorNumber < JSErr_Limit)
         return &js_ErrorFormatString[errorNumber];
     return nullptr;
 }
 
-ThreadSafeContext::ThreadSafeContext(JSRuntime *rt, PerThreadData *pt, ContextKind kind)
+ExclusiveContext::ExclusiveContext(JSRuntime *rt, PerThreadData *pt, ContextKind kind)
   : ContextFriendFields(rt),
+    helperThread_(nullptr),
     contextKind_(kind),
     perThreadData(pt),
-    allocator_(nullptr)
+    allocator_(nullptr),
+    enterCompartmentDepth_(0)
 {
 }
 
-bool
-ThreadSafeContext::isForkJoinContext() const
-{
-    return contextKind_ == Context_ForkJoin;
-}
-
-ForkJoinContext *
-ThreadSafeContext::asForkJoinContext()
-{
-    MOZ_ASSERT(isForkJoinContext());
-    return reinterpret_cast<ForkJoinContext *>(this);
-}
-
 void
-ThreadSafeContext::recoverFromOutOfMemory()
+ExclusiveContext::recoverFromOutOfMemory()
 {
     // If this is not a JSContext, there's nothing to do.
     if (JSContext *maybecx = maybeJSContext()) {
         if (maybecx->isExceptionPending()) {
             MOZ_ASSERT(maybecx->isThrowingOutOfMemory());
             maybecx->clearPendingException();
         }
     }
@@ -1193,17 +1182,17 @@ JSContext::mark(JSTracer *trc)
 
     TraceCycleDetectionSet(trc, cycleDetectorSet);
 
     if (compartment_)
         compartment_->mark();
 }
 
 void *
-ThreadSafeContext::stackLimitAddressForJitCode(StackKind kind)
+ExclusiveContext::stackLimitAddressForJitCode(StackKind kind)
 {
 #if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
     return runtime_->mainThread.addressOfSimulatorStackLimit();
 #endif
     return stackLimitAddress(kind);
 }
 
 JSVersion
@@ -1224,17 +1213,17 @@ JS::AutoCheckRequestDepth::AutoCheckRequ
     : cx(cx)
 {
     MOZ_ASSERT(cx->runtime()->requestDepth || cx->runtime()->isHeapBusy());
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
     cx->runtime()->checkRequestDepth++;
 }
 
 JS::AutoCheckRequestDepth::AutoCheckRequestDepth(ContextFriendFields *cxArg)
-    : cx(static_cast<ThreadSafeContext *>(cxArg)->maybeJSContext())
+    : cx(static_cast<ExclusiveContext *>(cxArg)->maybeJSContext())
 {
     if (cx) {
         MOZ_ASSERT(cx->runtime()->requestDepth || cx->runtime()->isHeapBusy());
         MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
         cx->runtime()->checkRequestDepth++;
     }
 }
 
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -107,70 +107,69 @@ class AutoCycleDetector
 };
 
 /* Updates references in the cycle detection set if the GC moves them. */
 extern void
 TraceCycleDetectionSet(JSTracer *trc, ObjectSet &set);
 
 struct AutoResolving;
 class DtoaCache;
-class ForkJoinContext;
 class RegExpStatics;
 
 namespace frontend { struct CompileError; }
 
 /*
  * Execution Context Overview:
  *
  * Several different structures may be used to provide a context for operations
  * on the VM. Each context is thread local, but varies in what data it can
  * access and what other threads may be running.
  *
- * - ThreadSafeContext is used by threads operating in one compartment which
- * may run in parallel with other threads operating on the same or other
- * compartments.
- *
  * - ExclusiveContext is used by threads operating in one compartment/zone,
  * where other threads may operate in other compartments, but *not* the same
  * compartment or zone which the ExclusiveContext is in. A thread with an
  * ExclusiveContext may enter the atoms compartment and atomize strings, in
  * which case a lock is used.
  *
  * - JSContext is used only by the runtime's main thread. The context may
- * operate in any compartment or zone which is not used by an ExclusiveContext
- * or ThreadSafeContext, and will only run in parallel with threads using such
- * contexts.
+ * operate in any compartment or zone which is not used by an ExclusiveContext,
+ * and will only run in parallel with threads using such contexts.
  *
- * An ExclusiveContext coerces to a ThreadSafeContext, and a JSContext coerces
- * to an ExclusiveContext or ThreadSafeContext.
- *
- * Contexts which are a ThreadSafeContext but not an ExclusiveContext are used
- * to represent a ForkJoinContext, the per-thread parallel context used in PJS.
+ * A JSContext coerces to an ExclusiveContext.
  */
 
-struct ThreadSafeContext : ContextFriendFields,
-                           public MallocProvider<ThreadSafeContext>
+struct HelperThread;
+
+class ExclusiveContext : public ContextFriendFields,
+                         public MallocProvider<ExclusiveContext>
 {
+    friend class gc::ArenaLists;
+    friend class AutoCompartment;
+    friend class AutoLockForExclusiveAccess;
     friend struct StackBaseShape;
+    friend void JSScript::initCompartment(ExclusiveContext *cx);
+    friend class jit::JitContext;
     friend class Activation;
 
+    // The thread on which this context is running, if this is not a JSContext.
+    HelperThread *helperThread_;
+
   public:
     enum ContextKind {
         Context_JS,
-        Context_Exclusive,
-        Context_ForkJoin
+        Context_Exclusive
     };
 
   private:
     ContextKind contextKind_;
 
   public:
     PerThreadData *perThreadData;
 
-    ThreadSafeContext(JSRuntime *rt, PerThreadData *pt, ContextKind kind);
+    ExclusiveContext(JSRuntime *rt, PerThreadData *pt, ContextKind kind);
 
     bool isJSContext() const {
         return contextKind_ == Context_JS;
     }
 
     JSContext *maybeJSContext() const {
         if (isJSContext())
             return (JSContext *) this;
@@ -193,83 +192,46 @@ struct ThreadSafeContext : ContextFriend
     // asJSContext() and crashing afterwards, this method may be used to watch
     // for such cases and produce either a soft failure in release builds or
     // an assertion failure in debug builds.
     bool shouldBeJSContext() const {
         MOZ_ASSERT(isJSContext());
         return isJSContext();
     }
 
-    bool isExclusiveContext() const {
-        return contextKind_ == Context_JS || contextKind_ == Context_Exclusive;
-    }
-
-    ExclusiveContext *maybeExclusiveContext() const {
-        if (isExclusiveContext())
-            return (ExclusiveContext *) this;
-        return nullptr;
-    }
-
-    ExclusiveContext *asExclusiveContext() const {
-        MOZ_ASSERT(isExclusiveContext());
-        return maybeExclusiveContext();
-    }
-
-    bool isForkJoinContext() const;
-    ForkJoinContext *asForkJoinContext();
-
-    /*
-     * Allocator used when allocating GCThings on this context. If we are a
-     * JSContext, this is the Zone allocator of the JSContext's zone.
-     * Otherwise, this is a per-thread allocator.
-     *
-     * This does not live in PerThreadData because the notion of an allocator
-     * is only per-thread when off the main thread. The runtime (and the main
-     * thread) can have more than one zone, each with its own allocator, and
-     * it's up to the context to specify what compartment and zone we are
-     * operating in.
-     */
   protected:
     Allocator *allocator_;
 
   public:
-    static size_t offsetOfAllocator() { return offsetof(ThreadSafeContext, allocator_); }
-
     inline Allocator *allocator() const { return allocator_; }
 
-    // Allocations can only trigger GC when running on the main thread.
-    inline AllowGC allowGC() const { return isJSContext() ? CanGC : NoGC; }
-
     template <typename T>
     bool isInsideCurrentZone(T thing) const {
         return thing->zoneFromAnyThread() == zone_;
     }
 
     template <typename T>
     inline bool isInsideCurrentCompartment(T thing) const {
         return thing->compartment() == compartment_;
     }
 
-    template <typename T>
-    inline bool isThreadLocal(T thing) const { return true; }  // obsolete
-
     void *onOutOfMemory(void *p, size_t nbytes) {
         return runtime_->onOutOfMemory(p, nbytes, maybeJSContext());
     }
 
     /* Clear the pending exception (if any) due to OOM. */
     void recoverFromOutOfMemory();
 
     inline void updateMallocCounter(size_t nbytes) {
         // Note: this is racy.
         runtime_->updateMallocCounter(zone_, nbytes);
     }
 
     void reportAllocationOverflow() {
-        js_ReportAllocationOverflow(asExclusiveContext());
+        js_ReportAllocationOverflow(this);
     }
 
     // Accessors for immutable runtime data.
     JSAtomState &names() { return *runtime_->commonNames; }
     StaticStrings &staticStrings() { return *runtime_->staticStrings; }
     AtomSet &permanentAtoms() { return *runtime_->permanentAtoms; }
     WellKnownSymbols &wellKnownSymbols() { return *runtime_->wellKnownSymbols; }
     const JS::AsmJSCacheOps &asmJSCacheOps() { return runtime_->asmJSCacheOps; }
@@ -283,39 +245,16 @@ struct ThreadSafeContext : ContextFriend
     bool canUseSignalHandlers() const { return runtime_->canUseSignalHandlers(); }
     bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
     bool jitSupportsSimd() const { return runtime_->jitSupportsSimd; }
 
     // Thread local data that may be accessed freely.
     DtoaState *dtoaState() {
         return perThreadData->dtoaState;
     }
-};
-
-struct HelperThread;
-
-class ExclusiveContext : public ThreadSafeContext
-{
-    friend class gc::ArenaLists;
-    friend class AutoCompartment;
-    friend class AutoLockForExclusiveAccess;
-    friend struct StackBaseShape;
-    friend void JSScript::initCompartment(ExclusiveContext *cx);
-    friend class jit::JitContext;
-
-    // The thread on which this context is running, if this is not a JSContext.
-    HelperThread *helperThread_;
-
-  public:
-
-    ExclusiveContext(JSRuntime *rt, PerThreadData *pt, ContextKind kind)
-      : ThreadSafeContext(rt, pt, kind),
-        helperThread_(nullptr),
-        enterCompartmentDepth_(0)
-    {}
 
     /*
      * "Entering" a compartment changes cx->compartment (which changes
      * cx->global). Note that this does not push any InterpreterFrame which means
      * that it is possible for cx->fp()->compartment() != cx->compartment.
      * This is not a problem since, in general, most places in the VM cannot
      * know that they were called from script (e.g., they may have been called
      * through the JSAPI via JS_CallFunction) and thus cannot expect fp.
--- a/js/src/jspubtd.h
+++ b/js/src/jspubtd.h
@@ -283,18 +283,16 @@ namespace js {
 /*
  * Parallel operations in general can have one of three states. They may
  * succeed, fail, or "bail", where bail indicates that the code encountered an
  * unexpected condition and should be re-run sequentially. Different
  * subcategories of the "bail" state are encoded as variants of TP_RETRY_*.
  */
 enum ParallelResult { TP_SUCCESS, TP_RETRY_SEQUENTIALLY, TP_RETRY_AFTER_GC, TP_FATAL };
 
-struct ThreadSafeContext;
-class ForkJoinContext;
 class ExclusiveContext;
 
 class Allocator;
 
 enum ThingRootKind
 {
     THING_ROOT_OBJECT,
     THING_ROOT_SHAPE,
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -1000,17 +1000,17 @@ class AutoNewContext
 };
 
 static void
 my_LargeAllocFailCallback(void *data)
 {
     JSContext *cx = (JSContext*)data;
     JSRuntime *rt = cx->runtime();
 
-    if (InParallelSection() || !cx->allowGC())
+    if (!cx->isJSContext())
         return;
 
     MOZ_ASSERT(!rt->isHeapBusy());
     MOZ_ASSERT(!rt->currentThreadHasExclusiveAccess());
 
     JS::PrepareForFullGC(rt);
     AutoKeepAtoms keepAtoms(cx->perThreadData);
     rt->gc.gc(GC_NORMAL, JS::gcreason::SHARED_MEMORY_LIMIT);
--- a/js/src/vm/ForkJoin.cpp
+++ b/js/src/vm/ForkJoin.cpp
@@ -27,24 +27,24 @@
 # include "jit/JitCompartment.h"
 # include "jit/MIR.h"
 # include "jit/MIRGraph.h"
 #endif
 #include "vm/Monitor.h"
 
 #include "vm/Interpreter-inl.h"
 
+#if 0
+
 using namespace js;
 using namespace js::parallel;
 using namespace js::jit;
 
 using mozilla::ThreadLocal;
 
-#if 0
-
 ///////////////////////////////////////////////////////////////////////////
 // Degenerate configurations
 //
 // When IonMonkey is disabled, we simply run the |func| callback
 // sequentially.  We also forego the feedback altogether.
 
 static bool
 ExecuteSequentially(JSContext *cx_, HandleValue funVal, uint16_t *sliceStart,
@@ -81,32 +81,28 @@ ExecuteSequentially(JSContext *cx, Handl
     args[1].setInt32(*sliceStart);
     args[2].setInt32(sliceEnd);
     if (!fig.invoke(cx))
         return false;
     *sliceStart = (uint16_t)(args.rval().toInt32());
     return true;
 }
 
-#endif // 0
-
 ThreadLocal<ForkJoinContext*> ForkJoinContext::tlsForkJoinContext;
 
 /* static */ bool
 ForkJoinContext::initializeTls()
 {
     if (!tlsForkJoinContext.initialized()) {
         if (!tlsForkJoinContext.init())
             return false;
     }
     return true;
 }
 
-#if 0
-
 ///////////////////////////////////////////////////////////////////////////
 // Parallel configurations
 //
 // The remainder of this file is specific to cases where IonMonkey is enabled.
 
 ///////////////////////////////////////////////////////////////////////////
 // Class Declarations and Function Prototypes
 
--- a/js/src/vm/ForkJoin.h
+++ b/js/src/vm/ForkJoin.h
@@ -17,16 +17,18 @@
 
 #include "jit/Ion.h"
 #include "jit/IonTypes.h"
 
 #ifdef DEBUG
   #define FORKJOIN_SPEW
 #endif
 
+#if 0
+
 ///////////////////////////////////////////////////////////////////////////
 // Read Me First
 //
 // The ForkJoin abstraction:
 // -------------------------
 //
 // This is the building block for executing multi-threaded JavaScript with
 // shared memory (as distinct from Web Workers).  The idea is that you have
@@ -585,9 +587,11 @@ namespace js {
 static inline bool
 InParallelSection()
 {
     return ForkJoinContext::current() != nullptr;
 }
 
 } // namespace js
 
+#endif // 0
+
 #endif /* vm_ForkJoin_h */
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -356,17 +356,17 @@ js::StartOffThreadParseScript(JSContext 
             !GetBuiltinConstructor(cx, JSProto_Iterator, &obj))
         {
             return false;
         }
     }
 
     ScopedJSDeletePtr<ExclusiveContext> helpercx(
         cx->new_<ExclusiveContext>(cx->runtime(), (PerThreadData *) nullptr,
-                                   ThreadSafeContext::Context_Exclusive));
+                                   ExclusiveContext::Context_Exclusive));
     if (!helpercx)
         return false;
 
     ScopedJSDeletePtr<ParseTask> task(
         cx->new_<ParseTask>(helpercx.get(), global, cx, chars, length,
                             callback, callbackData));
     if (!task)
         return false;
--- a/js/src/vm/Interpreter-inl.h
+++ b/js/src/vm/Interpreter-inl.h
@@ -827,17 +827,16 @@ class FastInvokeGuard
 
   public:
     FastInvokeGuard(JSContext *cx, const Value &fval)
       : args_(cx)
       , fun_(cx)
       , script_(cx)
       , useIon_(jit::IsIonEnabled(cx))
     {
-        MOZ_ASSERT(!InParallelSection());
         initFunction(fval);
     }
 
     void initFunction(const Value &fval) {
         if (fval.isObject() && fval.toObject().is<JSFunction>()) {
             JSFunction *fun = &fval.toObject().as<JSFunction>();
             if (fun->isInterpreted())
                 fun_ = fun;
--- a/js/src/vm/Monitor.h
+++ b/js/src/vm/Monitor.h
@@ -13,19 +13,19 @@
 
 #include "jslock.h"
 
 #include "js/Utility.h"
 
 namespace js {
 
 // A base class used for types intended to be used in a parallel
-// fashion, such as the workers in the |ThreadPool| class.  Combines a
-// lock and a condition variable.  You can acquire the lock or signal
-// the condition variable using the |AutoLockMonitor| type.
+// fashion.  Combines a lock and a condition variable.  You can
+// acquire the lock or signal the condition variable using the
+// |AutoLockMonitor| type.
 
 class Monitor
 {
   protected:
     friend class AutoLockMonitor;
     friend class AutoUnlockMonitor;
 
     PRLock *lock_;
--- a/js/src/vm/NativeObject-inl.h
+++ b/js/src/vm/NativeObject-inl.h
@@ -126,17 +126,16 @@ NativeObject::markDenseElementsNotPacked
     MOZ_ASSERT(isNative());
     MarkTypeObjectFlags(cx, this, types::OBJECT_FLAG_NON_PACKED);
 }
 
 inline void
 NativeObject::ensureDenseInitializedLengthNoPackedCheck(ExclusiveContext *cx, uint32_t index,
                                                         uint32_t extra)
 {
-    MOZ_ASSERT(cx->isThreadLocal(this));
     MOZ_ASSERT(!denseElementsAreCopyOnWrite());
 
     /*
      * Ensure that the array's contents have been initialized up to index, and
      * mark the elements through 'index + extra' as initialized in preparation
      * for a write.
      */
     MOZ_ASSERT(index + extra <= getDenseCapacity());
@@ -161,17 +160,16 @@ NativeObject::ensureDenseInitializedLeng
         markDenseElementsNotPacked(cx);
     ensureDenseInitializedLengthNoPackedCheck(cx, index, extra);
 }
 
 NativeObject::EnsureDenseResult
 NativeObject::extendDenseElements(ExclusiveContext *cx,
                                   uint32_t requiredCapacity, uint32_t extra)
 {
-    MOZ_ASSERT(cx->isThreadLocal(this));
     MOZ_ASSERT(!denseElementsAreCopyOnWrite());
 
     /*
      * Don't grow elements for non-extensible objects or watched objects. Dense
      * elements can be added/written with no extensible or watchpoint checks as
      * long as there is capacity for them.
      */
     if (!nonProxyIsExtensible() || watched()) {
--- a/js/src/vm/NativeObject.cpp
+++ b/js/src/vm/NativeObject.cpp
@@ -318,17 +318,16 @@ PropDesc::trace(JSTracer *trc)
     gc::MarkValueRoot(trc, &get_, "PropDesc get");
     gc::MarkValueRoot(trc, &set_, "PropDesc set");
 }
 
 /* static */ inline bool
 NativeObject::updateSlotsForSpan(ExclusiveContext *cx,
                                  HandleNativeObject obj, size_t oldSpan, size_t newSpan)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(oldSpan != newSpan);
 
     size_t oldCount = dynamicSlotsCount(obj->numFixedSlots(), oldSpan, obj->getClass());
     size_t newCount = dynamicSlotsCount(obj->numFixedSlots(), newSpan, obj->getClass());
 
     if (oldSpan < newSpan) {
         if (oldCount < newCount && !growSlots(cx, obj, oldCount, newCount))
             return false;
@@ -347,17 +346,16 @@ NativeObject::updateSlotsForSpan(Exclusi
     }
 
     return true;
 }
 
 /* static */ bool
 NativeObject::setLastProperty(ExclusiveContext *cx, HandleNativeObject obj, HandleShape shape)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(!obj->inDictionaryMode());
     MOZ_ASSERT(!shape->inDictionary());
     MOZ_ASSERT(shape->compartment() == obj->compartment());
     MOZ_ASSERT(shape->numFixedSlots() == obj->numFixedSlots());
 
     size_t oldSpan = obj->lastProperty()->slotSpan();
     size_t newSpan = shape->slotSpan();
 
@@ -390,17 +388,16 @@ NativeObject::setLastPropertyShrinkFixed
     MOZ_ASSERT(dynamicSlotsCount(newFixed, shape->slotSpan(), getClass()) == 0);
 
     shape_ = shape;
 }
 
 /* static */ bool
 NativeObject::setSlotSpan(ExclusiveContext *cx, HandleNativeObject obj, uint32_t span)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(obj->inDictionaryMode());
 
     size_t oldSpan = obj->lastProperty()->base()->slotSpan();
     if (oldSpan == span)
         return true;
 
     if (!updateSlotsForSpan(cx, obj, oldSpan, span))
         return false;
@@ -432,17 +429,16 @@ ReallocateSlots(ExclusiveContext *cx, JS
                                                                         oldCount, newCount);
     }
     return obj->zone()->pod_realloc<HeapSlot>(oldSlots, oldCount, newCount);
 }
 
 /* static */ bool
 NativeObject::growSlots(ExclusiveContext *cx, HandleNativeObject obj, uint32_t oldCount, uint32_t newCount)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(newCount > oldCount);
     MOZ_ASSERT_IF(!obj->is<ArrayObject>(), newCount >= SLOT_CAPACITY_MIN);
 
     /*
      * Slot capacities are determined by the span of allocated objects. Due to
      * the limited number of bits to store shape slots, object growth is
      * throttled well before the slot capacity can overflow.
      */
@@ -476,17 +472,16 @@ FreeSlots(ExclusiveContext *cx, HeapSlot
         return cx->asJSContext()->runtime()->gc.nursery.freeSlots(slots);
     js_free(slots);
 }
 
 /* static */ void
 NativeObject::shrinkSlots(ExclusiveContext *cx, HandleNativeObject obj,
                           uint32_t oldCount, uint32_t newCount)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(newCount < oldCount);
 
     if (newCount == 0) {
         FreeSlots(cx, obj->slots_);
         obj->slots_ = nullptr;
         return;
     }
 
@@ -871,17 +866,16 @@ NativeObject::growElements(ExclusiveCont
     Debug_SetSlotRangeToCrashOnTouch(elements_ + initlen, newCapacity - initlen);
 
     return true;
 }
 
 void
 NativeObject::shrinkElements(ExclusiveContext *cx, uint32_t reqCapacity)
 {
-    MOZ_ASSERT(cx->isThreadLocal(this));
     MOZ_ASSERT(canHaveNonEmptyElements());
     if (denseElementsAreCopyOnWrite())
         MOZ_CRASH();
 
     if (!hasDynamicElements())
         return;
 
     uint32_t oldCapacity = getDenseCapacity();
@@ -939,18 +933,16 @@ NativeObject::CopyElementsForWrite(Exclu
     Debug_SetSlotRangeToCrashOnTouch(obj->elements_ + initlen, newCapacity - initlen);
 
     return true;
 }
 
 /* static */ bool
 NativeObject::allocSlot(ExclusiveContext *cx, HandleNativeObject obj, uint32_t *slotp)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
-
     uint32_t slot = obj->slotSpan();
     MOZ_ASSERT(slot >= JSSLOT_FREE(obj->getClass()));
 
     /*
      * If this object is in dictionary mode, try to pull a free slot from the
      * shape table's slot-number freelist.
      */
     if (obj->inDictionaryMode()) {
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -128,17 +128,16 @@ ReturnZeroSize(const void *p)
 {
     return 0;
 }
 
 JSRuntime::JSRuntime(JSRuntime *parentRuntime)
   : mainThread(this),
     parentRuntime(parentRuntime),
     interrupt_(false),
-    interruptPar_(false),
     telemetryCallback(nullptr),
     handlingSignal(false),
     interruptCallback(nullptr),
     exclusiveAccessLock(nullptr),
     exclusiveAccessOwner(nullptr),
     mainThreadHasExclusiveAccess(false),
     numExclusiveThreads(0),
     numCompartments(0),
@@ -211,20 +210,18 @@ JSRuntime::JSRuntime(JSRuntime *parentRu
     commonNames(nullptr),
     permanentAtoms(nullptr),
     wellKnownSymbols(nullptr),
     wrapObjectCallbacks(&DefaultWrapObjectCallbacks),
     preserveWrapperCallback(nullptr),
     jitSupportsFloatingPoint(false),
     jitSupportsSimd(false),
     ionPcScriptCache(nullptr),
-    threadPool(this),
     defaultJSContextCallback(nullptr),
     ctypesActivityCallback(nullptr),
-    forkJoinWarmup(0),
     offthreadIonCompilationEnabled_(true),
     parallelParsingEnabled_(true),
 #ifdef DEBUG
     enteredPolicy(nullptr),
 #endif
     largeAllocationFailureCallback(nullptr),
     oomCallback(nullptr),
     debuggerMallocSizeOf(ReturnZeroSize)
@@ -272,19 +269,16 @@ JSRuntime::init(uint32_t maxbytes, uint3
     if (!exclusiveAccessLock)
         return false;
 
     if (!mainThread.init())
         return false;
 
     js::TlsPerThreadData.set(&mainThread);
 
-    if (!threadPool.init())
-        return false;
-
     if (CanUseExtraThreads())
         EnsureHelperThreadsInitialized();
 
     if (!gc.init(maxbytes, maxNurseryBytes))
         return false;
 
     const char *size = getenv("JSGC_MARK_STACK_LIMIT");
     if (size)
@@ -618,30 +612,28 @@ PerThreadData::initJitStackLimitPar(uint
 {
     jitStackLimit_ = limit;
 }
 
 void
 JSRuntime::requestInterrupt(InterruptMode mode)
 {
     interrupt_ = true;
-    interruptPar_ = true;
     mainThread.jitStackLimit_ = UINTPTR_MAX;
 
     if (mode == JSRuntime::RequestInterruptUrgent)
         InterruptRunningJitCode(this);
 }
 
 bool
 JSRuntime::handleInterrupt(JSContext *cx)
 {
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
     if (interrupt_ || mainThread.jitStackLimit_ == UINTPTR_MAX) {
         interrupt_ = false;
-        interruptPar_ = false;
         mainThread.resetJitStackLimit();
         return InvokeInterruptCallback(cx);
     }
     return true;
 }
 
 jit::ExecutableAllocator *
 JSRuntime::createExecutableAllocator(JSContext *cx)
@@ -810,29 +802,24 @@ JSRuntime::clearUsedByExclusiveThread(Zo
     MOZ_ASSERT(zone->usedByExclusiveThread);
     zone->usedByExclusiveThread = false;
     numExclusiveThreads--;
 }
 
 bool
 js::CurrentThreadCanAccessRuntime(JSRuntime *rt)
 {
-    return rt->ownerThread_ == PR_GetCurrentThread() && !InParallelSection();
+    return rt->ownerThread_ == PR_GetCurrentThread();
 }
 
 bool
 js::CurrentThreadCanAccessZone(Zone *zone)
 {
     if (CurrentThreadCanAccessRuntime(zone->runtime_))
         return true;
-    if (InParallelSection()) {
-        DebugOnly<PerThreadData *> pt = js::TlsPerThreadData.get();
-        MOZ_ASSERT(pt && pt->associatedWith(zone->runtime_));
-        return true;
-    }
 
     // Only zones in use by an exclusive thread can be used off the main thread
     // or outside of PJS. We don't keep track of which thread owns such zones
     // though, so this check is imperfect.
     return zone->usedByExclusiveThread;
 }
 
 #ifdef DEBUG
--- a/js/src/vm/Runtime.h
+++ b/js/src/vm/Runtime.h
@@ -698,17 +698,16 @@ struct JSRuntime : public JS::shadow::Ru
     /*
      * If non-null, another runtime guaranteed to outlive this one and whose
      * permanent data may be used by this one where possible.
      */
     JSRuntime *parentRuntime;
 
   private:
     mozilla::Atomic<uint32_t, mozilla::Relaxed> interrupt_;
-    mozilla::Atomic<uint32_t, mozilla::Relaxed> interruptPar_;
 
     /* Call this to accumulate telemetry data. */
     JSAccumulateTelemetryDataCallback telemetryCallback;
   public:
     // Accumulates data for Firefox telemetry. |id| is the ID of a JS_TELEMETRY_*
     // histogram. |key| provides an additional key to identify the histogram.
     // |sample| is the data to add to the histogram.
     void addTelemetry(int id, uint32_t sample, const char *key = nullptr);
@@ -740,29 +739,22 @@ struct JSRuntime : public JS::shadow::Ru
     // called twice in succession after a single requestInterrupt call, but
     // that's fine.
     void requestInterrupt(InterruptMode mode);
     bool handleInterrupt(JSContext *cx);
 
     MOZ_ALWAYS_INLINE bool hasPendingInterrupt() const {
         return interrupt_;
     }
-    MOZ_ALWAYS_INLINE bool hasPendingInterruptPar() const {
-        return interruptPar_;
-    }
 
     // For read-only JIT use:
     void *addressOfInterruptUint32() {
         static_assert(sizeof(interrupt_) == sizeof(uint32_t), "Assumed by JIT callers");
         return &interrupt_;
     }
-    void *addressOfInterruptParUint32() {
-        static_assert(sizeof(interruptPar_) == sizeof(uint32_t), "Assumed by JIT callers");
-        return &interruptPar_;
-    }
 
     /* Set when handling a signal for a thread associated with this runtime. */
     bool handlingSignal;
 
     JSInterruptCallback interruptCallback;
 
 #ifdef DEBUG
     void assertCanLock(js::RuntimeLock which);
@@ -1278,26 +1270,20 @@ struct JSRuntime : public JS::shadow::Ru
     }
 
     bool                jitSupportsFloatingPoint;
     bool                jitSupportsSimd;
 
     // Cache for jit::GetPcScript().
     js::jit::PcScriptCache *ionPcScriptCache;
 
-    js::ThreadPool threadPool;
-
     js::DefaultJSContextCallback defaultJSContextCallback;
 
     js::CTypesActivityCallback  ctypesActivityCallback;
 
-    // Non-zero if this is a ForkJoin warmup execution.  See
-    // js::ForkJoin() for more information.
-    uint32_t forkJoinWarmup;
-
   private:
     static mozilla::Atomic<size_t> liveRuntimesCount;
 
   public:
     static bool hasLiveRuntimes() {
         return liveRuntimesCount > 0;
     }
 
--- a/js/src/vm/Shape.cpp
+++ b/js/src/vm/Shape.cpp
@@ -54,17 +54,16 @@ ShapeTable::init(ExclusiveContext *cx, S
     if (!entries_)
         return false;
 
     MOZ_ASSERT(sizeLog2 <= HASH_BITS);
     hashShift_ = HASH_BITS - sizeLog2;
 
     for (Shape::Range<NoGC> r(lastProp); !r.empty(); r.popFront()) {
         Shape &shape = r.front();
-        MOZ_ASSERT(cx->isThreadLocal(&shape));
         Entry &entry = search(shape.propid(), true);
 
         /*
          * Beware duplicate args and arg vs. var conflicts: the youngest shape
          * (nearest to lastProp) must win. See bug 600067.
          */
         if (!entry.shape())
             entry.setPreservingCollision(&shape);
@@ -110,17 +109,16 @@ Shape::insertIntoDictionary(HeapPtrShape
     listp = (HeapPtrShape *) dictp;
     *dictp = this;
 }
 
 bool
 Shape::makeOwnBaseShape(ExclusiveContext *cx)
 {
     MOZ_ASSERT(!base()->isOwned());
-    MOZ_ASSERT(cx->isThreadLocal(this));
     assertSameCompartmentDebugOnly(cx, compartment());
 
     BaseShape *nbase = js_NewGCBaseShape<NoGC>(cx);
     if (!nbase)
         return false;
 
     new (nbase) BaseShape(StackBaseShape(this));
     nbase->setOwned(base()->toUnowned());
@@ -294,17 +292,16 @@ ShapeTable::change(int log2Delta, Exclus
     hashShift_ = HASH_BITS - newLog2;
     removedCount_ = 0;
     Entry *oldTable = entries_;
     entries_ = newTable;
 
     /* Copy only live entries, leaving removed and free ones behind. */
     for (Entry *oldEntry = oldTable; oldSize != 0; oldEntry++) {
         if (Shape *shape = oldEntry->shape()) {
-            MOZ_ASSERT(cx->isThreadLocal(shape));
             Entry &entry = search(shape->propid(), true);
             MOZ_ASSERT(entry.isFree());
             entry.setShape(shape);
         }
         oldSize--;
     }
 
     MOZ_ASSERT(capacity() == newSize);
@@ -436,23 +433,16 @@ NativeObject::getChildProperty(Exclusive
 bool
 js::NativeObject::toDictionaryMode(ExclusiveContext *cx)
 {
     MOZ_ASSERT(!inDictionaryMode());
 
     /* We allocate the shapes from cx->compartment(), so make sure it's right. */
     MOZ_ASSERT(cx->isInsideCurrentCompartment(this));
 
-    /*
-     * This function is thread safe as long as the object is thread local. It
-     * does not modify the shared shapes, and only allocates newly allocated
-     * (and thus also thread local) shapes.
-     */
-    MOZ_ASSERT(cx->isThreadLocal(this));
-
     uint32_t span = slotSpan();
 
     Rooted<NativeObject*> self(cx, this);
 
     // Clone the shapes into a new dictionary list. Don't update the last
     // property of this object until done, otherwise a GC triggered while
     // creating the dictionary will get the wrong slot span for this object.
     RootedShape root(cx);
@@ -537,17 +527,16 @@ ShouldConvertToDictionary(JSObject *obj)
 /* static */ Shape *
 NativeObject::addPropertyInternal(ExclusiveContext *cx,
                                   HandleNativeObject obj, HandleId id,
                                   PropertyOp getter, StrictPropertyOp setter,
                                   uint32_t slot, unsigned attrs,
                                   unsigned flags, ShapeTable::Entry *entry,
                                   bool allowDictionary)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT_IF(!allowDictionary, !obj->inDictionaryMode());
     MOZ_ASSERT(getter != JS_PropertyStub);
     MOZ_ASSERT(setter != JS_StrictPropertyStub);
 
     AutoRooterGetterSetter gsRoot(cx, attrs, &getter, &setter);
 
     /*
      * The code below deals with either converting obj to dictionary mode or
@@ -707,17 +696,16 @@ CheckCanChangeAttrs(ExclusiveContext *cx
     return true;
 }
 
 /* static */ Shape *
 NativeObject::putProperty(ExclusiveContext *cx, HandleNativeObject obj, HandleId id,
                           PropertyOp getter, StrictPropertyOp setter,
                           uint32_t slot, unsigned attrs, unsigned flags)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(!JSID_IS_VOID(id));
     MOZ_ASSERT(getter != JS_PropertyStub);
     MOZ_ASSERT(setter != JS_StrictPropertyStub);
 
 #ifdef DEBUG
     if (obj->is<ArrayObject>()) {
         ArrayObject *arr = &obj->as<ArrayObject>();
         uint32_t index;
@@ -898,17 +886,16 @@ NativeObject::putProperty(ExclusiveConte
     return shape;
 }
 
 /* static */ Shape *
 NativeObject::changeProperty(ExclusiveContext *cx, HandleNativeObject obj,
                              HandleShape shape, unsigned attrs,
                              unsigned mask, PropertyOp getter, StrictPropertyOp setter)
 {
-    MOZ_ASSERT(cx->isThreadLocal(obj));
     MOZ_ASSERT(obj->containsPure(shape));
     MOZ_ASSERT(getter != JS_PropertyStub);
     MOZ_ASSERT(setter != JS_StrictPropertyStub);
 
     attrs |= shape->attrs & mask;
     MOZ_ASSERT_IF(attrs & (JSPROP_GETTER | JSPROP_SETTER), attrs & JSPROP_SHARED);
 
     /* Allow only shared (slotless) => unshared (slotful) transition. */
@@ -1101,18 +1088,16 @@ NativeObject::rollbackProperties(Exclusi
 
     return true;
 }
 
 Shape *
 NativeObject::replaceWithNewEquivalentShape(ExclusiveContext *cx, Shape *oldShape, Shape *newShape,
                                             bool accessorShape)
 {
-    MOZ_ASSERT(cx->isThreadLocal(this));
-    MOZ_ASSERT(cx->isThreadLocal(oldShape));
     MOZ_ASSERT(cx->isInsideCurrentCompartment(oldShape));
     MOZ_ASSERT_IF(oldShape != lastProperty(),
                   inDictionaryMode() && lookup(cx, oldShape->propidRef()) == oldShape);
 
     NativeObject *self = this;
 
     if (!inDictionaryMode()) {
         RootedNativeObject selfRoot(cx, self);
--- a/js/src/vm/Stack-inl.h
+++ b/js/src/vm/Stack-inl.h
@@ -843,19 +843,16 @@ bool
 Activation::isProfiling() const
 {
     if (isInterpreter())
         return asInterpreter()->isProfiling();
 
     if (isJit())
         return asJit()->isProfiling();
 
-    if (isForkJoin())
-        return asForkJoin()->isProfiling();
-
     MOZ_ASSERT(isAsmJS());
     return asAsmJS()->isProfiling();
 }
 
 Activation *
 Activation::mostRecentProfiling()
 {
     if (isProfiling())
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -550,22 +550,16 @@ FrameIter::settleOnActivation()
                 ++data_.activations_;
                 continue;
             }
 
             data_.state_ = ASMJS;
             return;
         }
 
-        // ForkJoin activations don't contain iterable frames, so skip them.
-        if (activation->isForkJoin()) {
-            ++data_.activations_;
-            continue;
-        }
-
         MOZ_ASSERT(activation->isInterpreter());
 
         InterpreterActivation *interpAct = activation->asInterpreter();
         data_.interpFrames_ = InterpreterFrameIterator(interpAct);
 
         // If we OSR'ed into JIT code, skip the interpreter frame so that
         // the same frame is not reported twice.
         if (data_.interpFrames_.frame()->runningInJit()) {
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -1030,17 +1030,16 @@ struct DefaultHasher<AbstractFramePtr> {
     static bool match(const AbstractFramePtr &k, const Lookup &l) {
         return k == l;
     }
 };
 
 /*****************************************************************************/
 
 class InterpreterActivation;
-class ForkJoinActivation;
 class AsmJSActivation;
 
 namespace jit {
     class JitActivation;
 };
 
 class Activation
 {
@@ -1058,17 +1057,17 @@ class Activation
 
     // Counter incremented by JS::HideScriptedCaller and decremented by
     // JS::UnhideScriptedCaller. If > 0 for the top activation,
     // DescribeScriptedCaller will return null instead of querying that
     // activation, which should prompt the caller to consult embedding-specific
     // data structures instead.
     size_t hideScriptedCallerCount_;
 
-    enum Kind { Interpreter, Jit, ForkJoin, AsmJS };
+    enum Kind { Interpreter, Jit, AsmJS };
     Kind kind_;
 
     inline Activation(JSContext *cx, Kind kind_);
     inline ~Activation();
 
   public:
     JSContext *cx() const {
         return cx_;
@@ -1083,39 +1082,32 @@ class Activation
     inline Activation *mostRecentProfiling();
 
     bool isInterpreter() const {
         return kind_ == Interpreter;
     }
     bool isJit() const {
         return kind_ == Jit;
     }
-    bool isForkJoin() const {
-        return kind_ == ForkJoin;
-    }
     bool isAsmJS() const {
         return kind_ == AsmJS;
     }
 
     inline bool isProfiling() const;
     void registerProfiling();
     void unregisterProfiling();
 
     InterpreterActivation *asInterpreter() const {
         MOZ_ASSERT(isInterpreter());
         return (InterpreterActivation *)this;
     }
     jit::JitActivation *asJit() const {
         MOZ_ASSERT(isJit());
         return (jit::JitActivation *)this;
     }
-    ForkJoinActivation *asForkJoin() const {
-        MOZ_ASSERT(isForkJoin());
-        return (ForkJoinActivation *)this;
-    }
     AsmJSActivation *asAsmJS() const {
         MOZ_ASSERT(isAsmJS());
         return (AsmJSActivation *)this;
     }
 
     void saveFrameChain() {
         savedFrameChain_++;
     }
--- a/js/src/vm/ThreadPool.cpp
+++ b/js/src/vm/ThreadPool.cpp
@@ -12,16 +12,18 @@
 #include "jsmath.h"
 #include "jsnum.h" // for FIX_FPU
 
 #include "js/Utility.h"
 #include "vm/ForkJoin.h"
 #include "vm/Monitor.h"
 #include "vm/Runtime.h"
 
+#if 0
+
 using namespace js;
 
 const size_t WORKER_THREAD_STACK_SIZE = 1*1024*1024;
 
 static inline uint32_t
 ComposeSliceBounds(uint16_t from, uint16_t to)
 {
     MOZ_ASSERT(from <= to);
@@ -464,8 +466,10 @@ ThreadPool::abortJob()
     // The reason for this is that while calling discardSlices() clears all
     // workers' bounds, the pendingSlices_ cache might still be > 0 due to
     // still-executing calls to popSliceBack or popSliceFront in other
     // threads. When those finish, we will be sure that !hasWork(), which is
     // important to ensure that an aborted worker does not start again due to
     // the thread pool having more work.
     while (hasWork());
 }
+
+#endif // 0
--- a/js/src/vm/ThreadPool.h
+++ b/js/src/vm/ThreadPool.h
@@ -17,16 +17,18 @@
 #include "js/Vector.h"
 #include "vm/Monitor.h"
 
 struct JSRuntime;
 struct JSCompartment;
 
 namespace js {
 
+#if 0
+
 class ThreadPool;
 
 /////////////////////////////////////////////////////////////////////////////
 // ThreadPoolWorker
 //
 // Class for worker threads in the pool. All threads (i.e. helpers and main
 // thread) have a worker associted with them. By convention, the worker id of
 // the main thread is 0.
@@ -246,11 +248,13 @@ class ThreadPool : public Monitor
     // Blocks until the main thread has completed execution.
     ParallelResult executeJob(JSContext *cx, ParallelJob *job, uint16_t sliceStart,
                               uint16_t numSlices);
 
     // Abort the current job.
     void abortJob();
 };
 
+#endif
+
 } // namespace js
 
 #endif /* vm_ThreadPool_h */