Bug 1118038 - Remove JIT parts of PJS. (r=lth)
authorShu-yu Guo <shu@rfrn.org>
Wed, 07 Jan 2015 01:18:42 -0800
changeset 222596 7584b643e7e9e44c450c186e2631bed91fea5850
parent 222595 eab4b3520c509ec440cc648d795964b06b353d01
child 222597 2ba42b1966bc18e2911ef1552acf859dfecf8579
push id28068
push usercbook@mozilla.com
push dateThu, 08 Jan 2015 13:16:34 +0000
treeherdermozilla-central@2880e05d5e32 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1118038
milestone37.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1118038 - Remove JIT parts of PJS. (r=lth)
js/src/asmjs/AsmJSValidate.cpp
js/src/gc/Zone.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/CompileInfo.h
js/src/jit/Ion.cpp
js/src/jit/Ion.h
js/src/jit/IonAnalysis.cpp
js/src/jit/IonBuilder.cpp
js/src/jit/IonBuilder.h
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/IonCode.h
js/src/jit/IonTypes.h
js/src/jit/JitCompartment.h
js/src/jit/JitFrames.cpp
js/src/jit/JitFrames.h
js/src/jit/LIR-Common.h
js/src/jit/LIR.h
js/src/jit/LOpcodes.h
js/src/jit/Lowering.cpp
js/src/jit/Lowering.h
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/MIRGraph.cpp
js/src/jit/MIRGraph.h
js/src/jit/MOpcodes.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/ParallelFunctions.cpp
js/src/jit/ParallelFunctions.h
js/src/jit/ParallelSafetyAnalysis.cpp
js/src/jit/ParallelSafetyAnalysis.h
js/src/jit/VMFunctions.h
js/src/jit/arm/Bailouts-arm.cpp
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/Lowering-arm.h
js/src/jit/arm/Trampoline-arm.cpp
js/src/jit/mips/Bailouts-mips.cpp
js/src/jit/mips/CodeGenerator-mips.cpp
js/src/jit/mips/CodeGenerator-mips.h
js/src/jit/mips/Lowering-mips.cpp
js/src/jit/mips/Lowering-mips.h
js/src/jit/mips/Trampoline-mips.cpp
js/src/jit/none/Lowering-none.h
js/src/jit/none/Trampoline-none.cpp
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/CodeGenerator-shared.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/CodeGenerator-x86-shared.h
js/src/jit/shared/Lowering-shared.h
js/src/jit/shared/Lowering-x86-shared.cpp
js/src/jit/shared/Lowering-x86-shared.h
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/Bailouts-x64.cpp
js/src/jit/x64/Lowering-x64.h
js/src/jit/x64/Trampoline-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/Bailouts-x86.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/Lowering-x86.h
js/src/jit/x86/Trampoline-x86.cpp
js/src/jsgc.cpp
js/src/moz.build
js/src/vm/ForkJoin.cpp
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -8487,17 +8487,17 @@ GenerateFFIIonExit(ModuleCompiler &m, co
     masm.loadPtr(Address(callee, offsetof(AsmJSModule::ExitDatum, fun)), callee);
 
     // 2.3. Save callee
     masm.storePtr(callee, Address(StackPointer, argOffset));
     argOffset += sizeof(size_t);
 
     // 2.4. Load callee executable entry point
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
-    masm.loadBaselineOrIonNoArgCheck(callee, callee, SequentialExecution, nullptr);
+    masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
 
     // 3. Argc
     unsigned argc = exit.sig().args().length();
     masm.storePtr(ImmWord(uintptr_t(argc)), Address(StackPointer, argOffset));
     argOffset += sizeof(size_t);
 
     // 4. |this| value
     masm.storeValue(UndefinedValue(), Address(StackPointer, argOffset));
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -180,18 +180,17 @@ Zone::discardJitCode(FreeOp *fop)
         /* Mark baseline scripts on the stack as active. */
         jit::MarkActiveBaselineScripts(this);
 
         /* Only mark OSI points if code is being discarded. */
         jit::InvalidateAll(fop, this);
 
         for (ZoneCellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
             JSScript *script = i.get<JSScript>();
-            jit::FinishInvalidation<SequentialExecution>(fop, script);
-            jit::FinishInvalidation<ParallelExecution>(fop, script);
+            jit::FinishInvalidation(fop, script);
 
             /*
              * Discard baseline script if it's not marked as active. Note that
              * this also resets the active flag.
              */
             jit::FinishDiscardBaselineScript(fop, script);
 
             /*
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -4256,30 +4256,30 @@ ICGetElemNativeCompiler::emitCallScripte
         masm.Push(Imm32(0));  // ActualArgc is 0
         masm.Push(callee);
         masm.Push(callScratch);
         regs.add(callScratch);
     }
 
     Register code = regs.takeAnyExcluding(ArgumentsRectifierReg);
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
-    masm.loadBaselineOrIonRaw(code, code, SequentialExecution, nullptr);
+    masm.loadBaselineOrIonRaw(code, code, nullptr);
 
     Register scratch = regs.takeAny();
 
     // Handle arguments underflow.
     Label noUnderflow;
     masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
     masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != code);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), code);
         masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
         masm.mov(ImmWord(0), ArgumentsRectifierReg);
     }
 
     masm.bind(&noUnderflow);
 
@@ -4489,18 +4489,17 @@ ICGetElemNativeCompiler::generateStubCod
         } else {
             MOZ_ASSERT(acctype_ == ICGetElemNativeStub::ScriptedGetter);
 
             // Load function in scratchReg and ensure that it has a jit script.
             masm.loadPtr(Address(BaselineStubReg, ICGetElemNativeGetterStub::offsetOfGetter()),
                          scratchReg);
             masm.branchIfFunctionHasNoScript(scratchReg, popR1 ? &failurePopR1 : &failure);
             masm.loadPtr(Address(scratchReg, JSFunction::offsetOfNativeOrScript()), scratchReg);
-            masm.loadBaselineOrIonRaw(scratchReg, scratchReg, SequentialExecution,
-                                      popR1 ? &failurePopR1 : &failure);
+            masm.loadBaselineOrIonRaw(scratchReg, scratchReg, popR1 ? &failurePopR1 : &failure);
 
             // At this point, we are guaranteed to successfully complete.
             if (popR1)
                 masm.addPtr(Imm32(sizeof(size_t)), BaselineStackReg);
 
             emitCallScripted(masm, objReg);
         }
     }
@@ -7416,17 +7415,17 @@ ICGetProp_CallScripted::Compiler::genera
         regs.take(callee);
     } else {
         callee = regs.takeAny();
     }
     Register code = regs.takeAny();
     masm.loadPtr(Address(BaselineStubReg, ICGetProp_CallScripted::offsetOfGetter()), callee);
     masm.branchIfFunctionHasNoScript(callee, &failureLeaveStubFrame);
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
-    masm.loadBaselineOrIonRaw(code, code, SequentialExecution, &failureLeaveStubFrame);
+    masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
 
     // Getter is called with 0 arguments, just |obj| as thisv.
     // Note that we use Push, not push, so that callJit will align the stack
     // properly on ARM.
     masm.Push(R0);
     EmitCreateStubFrameDescriptor(masm, scratch);
     masm.Push(Imm32(0));  // ActualArgc is 0
     masm.Push(callee);
@@ -7436,17 +7435,17 @@ ICGetProp_CallScripted::Compiler::genera
     Label noUnderflow;
     masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
     masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != code);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), code);
         masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
         masm.mov(ImmWord(0), ArgumentsRectifierReg);
     }
 
     masm.bind(&noUnderflow);
 
@@ -8788,17 +8787,17 @@ ICSetProp_CallScripted::Compiler::genera
         regs.take(callee);
     } else {
         callee = regs.takeAny();
     }
     Register code = regs.takeAny();
     masm.loadPtr(Address(BaselineStubReg, ICSetProp_CallScripted::offsetOfSetter()), callee);
     masm.branchIfFunctionHasNoScript(callee, &failureLeaveStubFrame);
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
-    masm.loadBaselineOrIonRaw(code, code, SequentialExecution, &failureLeaveStubFrame);
+    masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
 
     // Setter is called with the new value as the only argument, and |obj| as thisv.
     // Note that we use Push, not push, so that callJit will align the stack
     // properly on ARM.
 
     // To Push R1, read it off of the stowed values on stack.
     // Stack: [ ..., R0, R1, ..STUBFRAME-HEADER.. ]
     masm.movePtr(BaselineStackReg, scratch);
@@ -8813,17 +8812,17 @@ ICSetProp_CallScripted::Compiler::genera
     Label noUnderflow;
     masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
     masm.branch32(Assembler::BelowOrEqual, scratch, Imm32(1), &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != code);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), code);
         masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
         masm.mov(ImmWord(1), ArgumentsRectifierReg);
     }
 
     masm.bind(&noUnderflow);
 
@@ -9755,17 +9754,17 @@ ICCallStubCompiler::guardFunApply(MacroA
                             failure);
 
     if (checkNative) {
         masm.branchIfInterpreted(target, failure);
     } else {
         masm.branchIfFunctionHasNoScript(target, failure);
         Register temp = regs.takeAny();
         masm.loadPtr(Address(target, JSFunction::offsetOfNativeOrScript()), temp);
-        masm.loadBaselineOrIonRaw(temp, temp, SequentialExecution, failure);
+        masm.loadBaselineOrIonRaw(temp, temp, failure);
         regs.add(temp);
     }
     return target;
 }
 
 void
 ICCallStubCompiler::pushCallerArguments(MacroAssembler &masm, GeneralRegisterSet regs)
 {
@@ -9990,17 +9989,17 @@ ICCallScriptedCompiler::generateStubCode
             masm.branchIfFunctionHasNoScript(callee, &failure);
         masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
     }
 
     // Load the start of the target JitCode.
     Register code;
     if (!isConstructing_) {
         code = regs.takeAny();
-        masm.loadBaselineOrIonRaw(callee, code, SequentialExecution, &failure);
+        masm.loadBaselineOrIonRaw(callee, code, &failure);
     } else {
         Address scriptCode(callee, JSScript::offsetOfBaselineOrIonRaw());
         masm.branchPtr(Assembler::Equal, scriptCode, ImmPtr(nullptr), &failure);
     }
 
     // We no longer need R1.
     regs.add(R1);
 
@@ -10074,17 +10073,17 @@ ICCallScriptedCompiler::generateStubCode
             masm.loadValue(calleeSlot3, R0);
         }
         callee = masm.extractObject(R0, ExtractTemp0);
         regs.add(R0);
         regs.takeUnchecked(callee);
         masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
 
         code = regs.takeAny();
-        masm.loadBaselineOrIonRaw(callee, code, SequentialExecution, &failureLeaveStubFrame);
+        masm.loadBaselineOrIonRaw(callee, code, &failureLeaveStubFrame);
 
         // Release callee register, but don't add ExtractTemp0 back into the pool
         // ExtractTemp0 is used later, and if it's allocated to some other register at that
         // point, it will get clobbered when used.
         if (callee != ExtractTemp0)
             regs.add(callee);
 
         if (canUseTailCallReg)
@@ -10118,17 +10117,17 @@ ICCallScriptedCompiler::generateStubCode
     masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
     masm.branch32(Assembler::AboveOrEqual, argcReg, callee, &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != code);
         MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), code);
         masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
         masm.mov(argcReg, ArgumentsRectifierReg);
     }
 
     masm.bind(&noUnderflow);
 
@@ -10595,28 +10594,28 @@ ICCall_ScriptedApplyArray::Compiler::gen
 
     masm.Push(argcReg);
     masm.Push(target);
     masm.Push(scratch);
 
     // Load nargs into scratch for underflow check, and then load jitcode pointer into target.
     masm.load16ZeroExtend(Address(target, JSFunction::offsetOfNargs()), scratch);
     masm.loadPtr(Address(target, JSFunction::offsetOfNativeOrScript()), target);
-    masm.loadBaselineOrIonRaw(target, target, SequentialExecution, nullptr);
+    masm.loadBaselineOrIonRaw(target, target, nullptr);
 
     // Handle arguments underflow.
     Label noUnderflow;
     masm.branch32(Assembler::AboveOrEqual, argcReg, scratch, &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != target);
         MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), target);
         masm.loadPtr(Address(target, JitCode::offsetOfCode()), target);
         masm.mov(argcReg, ArgumentsRectifierReg);
     }
     masm.bind(&noUnderflow);
     regs.add(argcReg);
 
@@ -10696,28 +10695,28 @@ ICCall_ScriptedApplyArguments::Compiler:
     masm.loadPtr(Address(argcReg, BaselineFrame::offsetOfNumActualArgs()), argcReg);
     masm.Push(argcReg);
     masm.Push(target);
     masm.Push(scratch);
 
     // Load nargs into scratch for underflow check, and then load jitcode pointer into target.
     masm.load16ZeroExtend(Address(target, JSFunction::offsetOfNargs()), scratch);
     masm.loadPtr(Address(target, JSFunction::offsetOfNativeOrScript()), target);
-    masm.loadBaselineOrIonRaw(target, target, SequentialExecution, nullptr);
+    masm.loadBaselineOrIonRaw(target, target, nullptr);
 
     // Handle arguments underflow.
     Label noUnderflow;
     masm.branch32(Assembler::AboveOrEqual, argcReg, scratch, &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != target);
         MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), target);
         masm.loadPtr(Address(target, JitCode::offsetOfCode()), target);
         masm.mov(argcReg, ArgumentsRectifierReg);
     }
     masm.bind(&noUnderflow);
     regs.add(argcReg);
 
@@ -10776,17 +10775,17 @@ ICCall_ScriptedFunCall::Compiler::genera
 
     masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
                             &failure);
     masm.branchIfFunctionHasNoScript(callee, &failure);
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
 
     // Load the start of the target JitCode.
     Register code = regs.takeAny();
-    masm.loadBaselineOrIonRaw(callee, code, SequentialExecution, &failure);
+    masm.loadBaselineOrIonRaw(callee, code, &failure);
 
     // We no longer need R1.
     regs.add(R1);
 
     // Push a stub frame so that we can perform a non-tail call.
     enterStubFrame(masm, regs.getAny());
     if (canUseTailCallReg)
         regs.add(BaselineTailCallReg);
@@ -10829,17 +10828,17 @@ ICCall_ScriptedFunCall::Compiler::genera
     masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
     masm.branch32(Assembler::AboveOrEqual, argcReg, callee, &noUnderflow);
     {
         // Call the arguments rectifier.
         MOZ_ASSERT(ArgumentsRectifierReg != code);
         MOZ_ASSERT(ArgumentsRectifierReg != argcReg);
 
         JitCode *argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier(SequentialExecution);
+            cx->runtime()->jitRuntime()->getArgumentsRectifier();
 
         masm.movePtr(ImmGCPtr(argumentsRectifier), code);
         masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
         masm.mov(argcReg, ArgumentsRectifierReg);
     }
 
     masm.bind(&noUnderflow);
 
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -26,20 +26,17 @@
 #include "jit/IonCaches.h"
 #include "jit/IonOptimizationLevels.h"
 #include "jit/JitcodeMap.h"
 #include "jit/JitSpewer.h"
 #include "jit/Linker.h"
 #include "jit/Lowering.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MoveEmitter.h"
-#include "jit/ParallelFunctions.h"
-#include "jit/ParallelSafetyAnalysis.h"
 #include "jit/RangeAnalysis.h"
-#include "vm/ForkJoin.h"
 #include "vm/MatchPairs.h"
 #include "vm/RegExpStatics.h"
 #include "vm/TraceLogging.h"
 
 #include "jsboolinlines.h"
 
 #include "jit/ExecutionMode-inl.h"
 #include "jit/shared/CodeGenerator-shared-inl.h"
@@ -162,20 +159,17 @@ CodeGenerator::CodeGenerator(MIRGenerato
 
 CodeGenerator::~CodeGenerator()
 {
     MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteLinks() == 0);
     js_delete(scriptCounts_);
 }
 
 typedef bool (*StringToNumberFn)(ThreadSafeContext *, JSString *, double *);
-typedef bool (*StringToNumberParFn)(ForkJoinContext *, JSString *, double *);
-static const VMFunctionsModal StringToNumberInfo = VMFunctionsModal(
-    FunctionInfo<StringToNumberFn>(StringToNumber),
-    FunctionInfo<StringToNumberParFn>(StringToNumberPar));
+static const VMFunction StringToNumberInfo = FunctionInfo<StringToNumberFn>(StringToNumber);
 
 void
 CodeGenerator::visitValueToInt32(LValueToInt32 *lir)
 {
     ValueOperand operand = ToValue(lir, LValueToInt32::Input);
     Register output = ToRegister(lir->output());
     FloatRegister temp = ToFloatRegister(lir->tempFloat());
 
@@ -816,40 +810,34 @@ CodeGenerator::emitIntToString(Register 
     masm.branch32(Assembler::AboveOrEqual, input, Imm32(StaticStrings::INT_STATIC_LIMIT), ool);
 
     // Fast path for small integers.
     masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().intStaticTable), output);
     masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
 }
 
 typedef JSFlatString *(*IntToStringFn)(ThreadSafeContext *, int);
-typedef JSFlatString *(*IntToStringParFn)(ForkJoinContext *, int);
-static const VMFunctionsModal IntToStringInfo = VMFunctionsModal(
-    FunctionInfo<IntToStringFn>(Int32ToString<CanGC>),
-    FunctionInfo<IntToStringParFn>(IntToStringPar));
+static const VMFunction IntToStringInfo = FunctionInfo<IntToStringFn>(Int32ToString<CanGC>);
 
 void
 CodeGenerator::visitIntToString(LIntToString *lir)
 {
     Register input = ToRegister(lir->input());
     Register output = ToRegister(lir->output());
 
     OutOfLineCode *ool = oolCallVM(IntToStringInfo, lir, (ArgList(), input),
                                    StoreRegisterTo(output));
 
     emitIntToString(input, output, ool->entry());
 
     masm.bind(ool->rejoin());
 }
 
 typedef JSString *(*DoubleToStringFn)(ThreadSafeContext *, double);
-typedef JSString *(*DoubleToStringParFn)(ForkJoinContext *, double);
-static const VMFunctionsModal DoubleToStringInfo = VMFunctionsModal(
-    FunctionInfo<DoubleToStringFn>(NumberToString<CanGC>),
-    FunctionInfo<DoubleToStringParFn>(DoubleToStringPar));
+static const VMFunction DoubleToStringInfo = FunctionInfo<DoubleToStringFn>(NumberToString<CanGC>);
 
 void
 CodeGenerator::visitDoubleToString(LDoubleToString *lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     Register temp = ToRegister(lir->tempInt());
     Register output = ToRegister(lir->output());
 
@@ -859,20 +847,17 @@ CodeGenerator::visitDoubleToString(LDoub
     // Try double to integer conversion and run integer to string code.
     masm.convertDoubleToInt32(input, temp, ool->entry(), true);
     emitIntToString(temp, output, ool->entry());
 
     masm.bind(ool->rejoin());
 }
 
 typedef JSString *(*PrimitiveToStringFn)(JSContext *, HandleValue);
-typedef JSString *(*PrimitiveToStringParFn)(ForkJoinContext *, HandleValue);
-static const VMFunctionsModal PrimitiveToStringInfo = VMFunctionsModal(
-    FunctionInfo<PrimitiveToStringFn>(ToStringSlow),
-    FunctionInfo<PrimitiveToStringParFn>(PrimitiveToStringPar));
+static const VMFunction PrimitiveToStringInfo = FunctionInfo<PrimitiveToStringFn>(ToStringSlow);
 
 void
 CodeGenerator::visitValueToString(LValueToString *lir)
 {
     ValueOperand input = ToValue(lir, LValueToString::Input);
     Register output = ToRegister(lir->output());
 
     OutOfLineCode *ool = oolCallVM(PrimitiveToStringInfo, lir, (ArgList(), input),
@@ -1761,32 +1746,16 @@ CodeGenerator::emitLambdaInit(Register o
     masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs()));
     masm.storePtr(ImmGCPtr(info.scriptOrLazyScript),
                   Address(output, JSFunction::offsetOfNativeOrScript()));
     masm.storePtr(scopeChain, Address(output, JSFunction::offsetOfEnvironment()));
     masm.storePtr(ImmGCPtr(info.fun->displayAtom()), Address(output, JSFunction::offsetOfAtom()));
 }
 
 void
-CodeGenerator::visitLambdaPar(LLambdaPar *lir)
-{
-    Register resultReg = ToRegister(lir->output());
-    Register cxReg = ToRegister(lir->forkJoinContext());
-    Register scopeChainReg = ToRegister(lir->scopeChain());
-    Register tempReg1 = ToRegister(lir->getTemp0());
-    Register tempReg2 = ToRegister(lir->getTemp1());
-    const LambdaFunctionInfo &info = lir->mir()->info();
-
-    MOZ_ASSERT(scopeChainReg != resultReg);
-
-    emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun);
-    emitLambdaInit(resultReg, scopeChainReg, info);
-}
-
-void
 CodeGenerator::visitLabel(LLabel *lir)
 {
 }
 
 void
 CodeGenerator::visitNop(LNop *lir)
 {
 }
@@ -1848,20 +1817,17 @@ class OutOfLineInterruptCheckImplicit : 
     { }
 
     void accept(CodeGenerator *codegen) {
         codegen->visitOutOfLineInterruptCheckImplicit(this);
     }
 };
 
 typedef bool (*InterruptCheckFn)(JSContext *);
-typedef bool (*InterruptCheckParFn)(ForkJoinContext *);
-static const VMFunctionsModal InterruptCheckInfo = VMFunctionsModal(
-    FunctionInfo<InterruptCheckFn>(InterruptCheck),
-    FunctionInfo<InterruptCheckParFn>(InterruptCheckPar));
+static const VMFunction InterruptCheckInfo = FunctionInfo<InterruptCheckFn>(InterruptCheck);
 
 void
 CodeGenerator::visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit *ool)
 {
 #ifdef CHECK_OSIPOINT_REGISTERS
     // This is path is entered from the patched back-edge of the loop. This
     // means that the JitAtivation flags used for checking the validity of the
     // OSI points are not reseted by the path generated by generateBody, so we
@@ -2016,20 +1982,18 @@ CodeGenerator::visitReturn(LReturn *lir)
 void
 CodeGenerator::visitOsrEntry(LOsrEntry *lir)
 {
     // Remember the OSR entry offset into the code buffer.
     masm.flushBuffer();
     setOsrEntryOffset(masm.size());
 
 #ifdef JS_TRACE_LOGGING
-    if (gen->info().executionMode() == SequentialExecution) {
-        emitTracelogStopEvent(TraceLogger_Baseline);
-        emitTracelogStartEvent(TraceLogger_IonMonkey);
-    }
+    emitTracelogStopEvent(TraceLogger_Baseline);
+    emitTracelogStartEvent(TraceLogger_IonMonkey);
 #endif
 
     // Allocate the full frame for this function
     // Note we have a new entry here. So we reset MacroAssembler::framePushed()
     // to 0, before reserving the stack.
     MOZ_ASSERT(masm.framePushed() == frameSize());
     masm.setFramePushed(0);
     masm.reserveStack(frameSize());
@@ -2448,40 +2412,16 @@ CodeGenerator::visitMaybeCopyElementsFor
 void
 CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment *lir)
 {
     Address environment(ToRegister(lir->function()), JSFunction::offsetOfEnvironment());
     masm.loadPtr(environment, ToRegister(lir->output()));
 }
 
 void
-CodeGenerator::visitForkJoinContext(LForkJoinContext *lir)
-{
-    const Register tempReg = ToRegister(lir->getTempReg());
-
-    masm.setupUnalignedABICall(0, tempReg);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ForkJoinContextPar));
-    MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
-}
-
-void
-CodeGenerator::visitGuardThreadExclusive(LGuardThreadExclusive *lir)
-{
-    MOZ_ASSERT(gen->info().executionMode() == ParallelExecution);
-
-    const Register tempReg = ToRegister(lir->getTempReg());
-    masm.setupUnalignedABICall(2, tempReg);
-    masm.passABIArg(ToRegister(lir->forkJoinContext()));
-    masm.passABIArg(ToRegister(lir->object()));
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParallelWriteGuard));
-
-    bailoutIfFalseBool(ReturnReg, lir->snapshot());
-}
-
-void
 CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity *guard)
 {
     Register obj = ToRegister(guard->input());
 
     Assembler::Condition cond =
         guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
     bailoutCmpPtr(cond, obj, ImmGCPtr(guard->mir()->singleObject()), guard->snapshot());
 }
@@ -2664,71 +2604,51 @@ CodeGenerator::visitCallNative(LCallNati
 
     // Misc. temporary registers.
     const Register tempReg = ToRegister(call->getTempReg());
 
     DebugOnly<uint32_t> initialStack = masm.framePushed();
 
     masm.checkStackAlignment();
 
-    // Sequential native functions have the signature:
+    // Native functions have the signature:
     //  bool (*)(JSContext *, unsigned, Value *vp)
-    // and parallel native functions have the signature:
-    //  ParallelResult (*)(ForkJoinContext *, unsigned, Value *vp)
     // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
     // are the function arguments.
 
     // Allocate space for the outparam, moving the StackPointer to what will be &vp[1].
     masm.adjustStack(unusedStack);
 
     // Push a Value containing the callee object: natives are allowed to access their callee before
     // setitng the return value. The StackPointer is moved to &vp[0].
     masm.Push(ObjectValue(*target));
 
     // Preload arguments into registers.
-    //
-    // Note that for parallel execution, loadContext does an ABI call, so we
-    // need to do this before we load the other argument registers, otherwise
-    // we'll hose them.
-    ExecutionMode executionMode = gen->info().executionMode();
-    masm.loadContext(argContextReg, tempReg, executionMode);
+    masm.loadJSContext(argContextReg);
     masm.move32(Imm32(call->numStackArgs()), argUintNReg);
     masm.movePtr(StackPointer, argVpReg);
 
     masm.Push(argUintNReg);
 
     // Construct native exit frame.
     uint32_t safepointOffset;
     masm.buildFakeExitFrame(tempReg, &safepointOffset);
-    masm.enterFakeExitFrame(argContextReg, tempReg, executionMode,
-                            NativeExitFrameLayout::Token());
+    masm.enterFakeExitFrame(NativeExitFrameLayout::Token());
 
     markSafepointAt(safepointOffset, call);
 
     // Construct and execute call.
     masm.setupUnalignedABICall(3, tempReg);
     masm.passABIArg(argContextReg);
     masm.passABIArg(argUintNReg);
     masm.passABIArg(argVpReg);
-
-    switch (executionMode) {
-      case SequentialExecution:
-        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target->native()));
-        break;
-
-      case ParallelExecution:
-        masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target->parallelNative()));
-        break;
-
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target->native()));
 
     // Test for failure.
-    masm.branchIfFalseBool(ReturnReg, masm.failureLabel(executionMode));
+    masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
 
     // Load the outparam vp[0] into output register(s).
     masm.loadValue(Address(StackPointer, NativeExitFrameLayout::offsetOfResult()), JSReturnOperand);
 
     // The next instruction is removing the footer of the exit frame, so there
     // is no need for leaveFakeExitFrame.
 
     // Move the StackPointer back to its original location, unwinding the native exit frame.
@@ -2903,24 +2823,23 @@ CodeGenerator::emitCallInvokeFunction(LI
 
 void
 CodeGenerator::visitCallGeneric(LCallGeneric *call)
 {
     Register calleereg = ToRegister(call->getFunction());
     Register objreg    = ToRegister(call->getTempObject());
     Register nargsreg  = ToRegister(call->getNargsReg());
     uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
-    ExecutionMode executionMode = gen->info().executionMode();
     Label invoke, thunk, makeCall, end;
 
     // Known-target case is handled by LCallKnown.
     MOZ_ASSERT(!call->hasSingleTarget());
 
     // Generate an ArgumentsRectifier.
-    JitCode *argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier(executionMode);
+    JitCode *argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier();
 
     masm.checkStackAlignment();
 
     // Guard that calleereg is actually a function object.
     masm.loadObjClass(calleereg, nargsreg);
     masm.branchPtr(Assembler::NotEqual, nargsreg, ImmPtr(&JSFunction::class_), &invoke);
 
     // Guard that calleereg is an interpreted function with a JSScript.
@@ -2929,17 +2848,17 @@ CodeGenerator::visitCallGeneric(LCallGen
         masm.branchIfNotInterpretedConstructor(calleereg, nargsreg, &invoke);
     else
         masm.branchIfFunctionHasNoScript(calleereg, &invoke);
 
     // Knowing that calleereg is a non-native function, load the JSScript.
     masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg);
 
     // Load script jitcode.
-    masm.loadBaselineOrIonRaw(objreg, objreg, executionMode, &invoke);
+    masm.loadBaselineOrIonRaw(objreg, objreg, &invoke);
 
     // Nestle the StackPointer up to the argument vector.
     masm.freeStack(unusedStack);
 
     // Construct the IonFramePrefix.
     uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
     masm.Push(Imm32(call->numActualArgs()));
     masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
@@ -2967,65 +2886,39 @@ CodeGenerator::visitCallGeneric(LCallGen
     // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
     // The return address has already been removed from the Ion frame.
     int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void *);
     masm.adjustStack(prefixGarbage - unusedStack);
     masm.jump(&end);
 
     // Handle uncompiled or native functions.
     masm.bind(&invoke);
-    switch (executionMode) {
-      case SequentialExecution:
-        emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack);
-        break;
-
-      case ParallelExecution:
-        emitCallToUncompiledScriptPar(call, calleereg);
-        break;
-
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack);
 
     masm.bind(&end);
 
     // If the return value of the constructing function is Primitive,
     // replace the return value with the Object from CreateThis.
     if (call->mir()->isConstructing()) {
         Label notPrimitive;
         masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, &notPrimitive);
         masm.loadValue(Address(StackPointer, unusedStack), JSReturnOperand);
         masm.bind(&notPrimitive);
     }
 
     dropArguments(call->numStackArgs() + 1);
 }
 
-typedef bool (*CallToUncompiledScriptParFn)(ForkJoinContext *, JSObject *);
-static const VMFunction CallToUncompiledScriptParInfo =
-    FunctionInfo<CallToUncompiledScriptParFn>(CallToUncompiledScriptPar);
-
-// Generates a call to CallToUncompiledScriptPar() and then bails out.
-// |calleeReg| should contain the JSFunction*.
-void
-CodeGenerator::emitCallToUncompiledScriptPar(LInstruction *lir, Register calleeReg)
-{
-    pushArg(calleeReg);
-    callVM(CallToUncompiledScriptParInfo, lir);
-    masm.assumeUnreachable("CallToUncompiledScriptParInfo always returns false.");
-}
-
 void
 CodeGenerator::visitCallKnown(LCallKnown *call)
 {
     Register calleereg = ToRegister(call->getFunction());
     Register objreg    = ToRegister(call->getTempObject());
     uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
     DebugOnly<JSFunction *> target = call->getSingleTarget();
-    ExecutionMode executionMode = gen->info().executionMode();
     Label end, uncompiled;
 
     // Native single targets are handled by LCallNative.
     MOZ_ASSERT(!target->isNative());
     // Missing arguments must have been explicitly appended by the IonBuilder.
     MOZ_ASSERT(target->nargs() <= call->numStackArgs());
 
     MOZ_ASSERT_IF(call->mir()->isConstructing(), target->isInterpretedConstructor());
@@ -3036,19 +2929,19 @@ CodeGenerator::visitCallKnown(LCallKnown
     // a LazyScript instead of a JSScript.
     masm.branchIfFunctionHasNoScript(calleereg, &uncompiled);
 
     // Knowing that calleereg is a non-native function, load the JSScript.
     masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg);
 
     // Load script jitcode.
     if (call->mir()->needsArgCheck())
-        masm.loadBaselineOrIonRaw(objreg, objreg, executionMode, &uncompiled);
+        masm.loadBaselineOrIonRaw(objreg, objreg, &uncompiled);
     else
-        masm.loadBaselineOrIonNoArgCheck(objreg, objreg, executionMode, &uncompiled);
+        masm.loadBaselineOrIonNoArgCheck(objreg, objreg, &uncompiled);
 
     // Nestle the StackPointer up to the argument vector.
     masm.freeStack(unusedStack);
 
     // Construct the IonFramePrefix.
     uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
     masm.Push(Imm32(call->numActualArgs()));
     masm.PushCalleeToken(calleereg, call->mir()->isConstructing());
@@ -3061,28 +2954,17 @@ CodeGenerator::visitCallKnown(LCallKnown
     // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
     // The return address has already been removed from the Ion frame.
     int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void *);
     masm.adjustStack(prefixGarbage - unusedStack);
     masm.jump(&end);
 
     // Handle uncompiled functions.
     masm.bind(&uncompiled);
-    switch (executionMode) {
-      case SequentialExecution:
-        emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack);
-        break;
-
-      case ParallelExecution:
-        emitCallToUncompiledScriptPar(call, calleereg);
-        break;
-
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack);
 
     masm.bind(&end);
 
     // If the return value of the constructing function is Primitive,
     // replace the return value with the Object from CreateThis.
     if (call->mir()->isConstructing()) {
         Label notPrimitive;
         masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, &notPrimitive);
@@ -3195,17 +3077,16 @@ CodeGenerator::visitApplyArgsGeneric(LAp
     }
 
     // Copy the arguments of the current function.
     emitPushArguments(apply, copyreg);
 
     masm.checkStackAlignment();
 
     // If the function is known to be uncompilable, only emit the call to InvokeFunction.
-    ExecutionMode executionMode = gen->info().executionMode();
     if (apply->hasSingleTarget()) {
         JSFunction *target = apply->getSingleTarget();
         if (target->isNative()) {
             emitCallInvokeFunction(apply, copyreg);
             emitPopArguments(apply, copyreg);
             return;
         }
     }
@@ -3219,17 +3100,17 @@ CodeGenerator::visitApplyArgsGeneric(LAp
         // Native single targets are handled by LCallNative.
         MOZ_ASSERT(!apply->getSingleTarget()->isNative());
     }
 
     // Knowing that calleereg is a non-native function, load the JSScript.
     masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg);
 
     // Load script jitcode.
-    masm.loadBaselineOrIonRaw(objreg, objreg, executionMode, &invoke);
+    masm.loadBaselineOrIonRaw(objreg, objreg, &invoke);
 
     // Call with an Ion frame or a rectifier frame.
     {
         // Create the frame descriptor.
         unsigned pushed = masm.framePushed();
         masm.addPtr(Imm32(pushed), copyreg);
         masm.makeFrameDescriptor(copyreg, JitFrame_IonJS);
 
@@ -3252,17 +3133,17 @@ CodeGenerator::visitApplyArgsGeneric(LAp
         // underflow.
         masm.jump(&rejoin);
 
         // Argument fixup needed. Get ready to call the argumentsRectifier.
         {
             masm.bind(&underflow);
 
             // Hardcode the address of the argumentsRectifier code.
-            JitCode *argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier(executionMode);
+            JitCode *argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier();
 
             MOZ_ASSERT(ArgumentsRectifierReg != objreg);
             masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking.
             masm.loadPtr(Address(objreg, JitCode::offsetOfCode()), objreg);
             masm.movePtr(argcreg, ArgumentsRectifierReg);
         }
 
         masm.bind(&rejoin);
@@ -3548,20 +3429,18 @@ CodeGenerator::visitDefFun(LDefFun *lir)
     pushArg(ImmGCPtr(lir->mir()->fun()));
     pushArg(scopeChain);
     pushArg(ImmGCPtr(current->mir()->info().script()));
 
     callVM(DefFunOperationInfo, lir);
 }
 
 typedef bool (*CheckOverRecursedFn)(JSContext *);
-typedef bool (*CheckOverRecursedParFn)(ForkJoinContext *);
-static const VMFunctionsModal CheckOverRecursedInfo = VMFunctionsModal(
-    FunctionInfo<CheckOverRecursedFn>(CheckOverRecursed),
-    FunctionInfo<CheckOverRecursedParFn>(CheckOverRecursedPar));
+static const VMFunction CheckOverRecursedInfo =
+    FunctionInfo<CheckOverRecursedFn>(CheckOverRecursed);
 
 void
 CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool)
 {
     // The OOL path is hit if the recursion depth has been exceeded.
     // Throw an InternalError for over-recursion.
 
     // LFunctionEnvironment can appear before LCheckOverRecursed, so we have
@@ -3570,52 +3449,16 @@ CodeGenerator::visitCheckOverRecursedFai
     saveLive(ool->lir());
 
     callVM(CheckOverRecursedInfo, ool->lir());
 
     restoreLive(ool->lir());
     masm.jump(ool->rejoin());
 }
 
-void
-CodeGenerator::visitCheckOverRecursedPar(LCheckOverRecursedPar *lir)
-{
-    // See above: unlike visitCheckOverRecursed(), this code runs in
-    // parallel mode and hence uses the jitStackLimit from the current
-    // thread state.  Also, we must check the interrupt flags because
-    // on interrupt or abort, only the stack limit for the main thread
-    // is reset, not the worker threads.  See comment in vm/ForkJoin.h
-    // for more details.
-
-    Register cxReg = ToRegister(lir->forkJoinContext());
-    Register tempReg = ToRegister(lir->getTempReg());
-
-    masm.loadPtr(Address(cxReg, offsetof(ForkJoinContext, perThreadData)), tempReg);
-    masm.loadPtr(Address(tempReg, PerThreadData::offsetOfJitStackLimit()), tempReg);
-
-    // Conditional forward (unlikely) branch to failure.
-    CheckOverRecursedFailure *ool = new(alloc()) CheckOverRecursedFailure(lir);
-    addOutOfLineCode(ool, lir->mir());
-
-    masm.branchPtr(Assembler::BelowOrEqual, StackPointer, tempReg, ool->entry());
-    masm.checkInterruptFlagPar(tempReg, ool->entry());
-    masm.bind(ool->rejoin());
-}
-
-void
-CodeGenerator::visitInterruptCheckPar(LInterruptCheckPar *lir)
-{
-    // First check for cx->shared->interrupt_.
-    OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing());
-
-    Register tempReg = ToRegister(lir->getTempReg());
-    masm.checkInterruptFlagPar(tempReg, ool->entry());
-    masm.bind(ool->rejoin());
-}
-
 IonScriptCounts *
 CodeGenerator::maybeCreateScriptCounts()
 {
     // If scripts are being profiled, create a new IonScriptCounts for the
     // profiling data, which will be attached to the associated JSScript or
     // AsmJS module after code generation finishes.
     if (!GetJitContext()->runtime->profilingScripts())
         return nullptr;
@@ -3768,44 +3611,42 @@ CodeGenerator::emitObjectOrStringResultC
 
         masm.bind(&miss);
         masm.assumeUnreachable("MIR instruction returned object with unexpected type");
 
         masm.bind(&ok);
     }
 
     // Check that we have a valid GC pointer.
-    if (gen->info().executionMode() != ParallelExecution) {
-        saveVolatile();
-        masm.setupUnalignedABICall(2, temp);
-        masm.loadJSContext(temp);
-        masm.passABIArg(temp);
-        masm.passABIArg(output);
-
-        void *callee;
-        switch (mir->type()) {
-          case MIRType_Object:
-            callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidObjectPtr);
-            break;
-          case MIRType_ObjectOrNull:
-            callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidObjectOrNullPtr);
-            break;
-          case MIRType_String:
-            callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidStringPtr);
-            break;
-          case MIRType_Symbol:
-            callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidSymbolPtr);
-            break;
-          default:
-            MOZ_CRASH();
-        }
-
-        masm.callWithABINoProfiling(callee);
-        restoreVolatile();
-    }
+    saveVolatile();
+    masm.setupUnalignedABICall(2, temp);
+    masm.loadJSContext(temp);
+    masm.passABIArg(temp);
+    masm.passABIArg(output);
+
+    void *callee;
+    switch (mir->type()) {
+      case MIRType_Object:
+        callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidObjectPtr);
+        break;
+      case MIRType_ObjectOrNull:
+        callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidObjectOrNullPtr);
+        break;
+      case MIRType_String:
+        callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidStringPtr);
+        break;
+      case MIRType_Symbol:
+        callee = JS_FUNC_TO_DATA_PTR(void *, AssertValidSymbolPtr);
+        break;
+      default:
+        MOZ_CRASH();
+    }
+
+    masm.callWithABINoProfiling(callee);
+    restoreVolatile();
 
     masm.bind(&done);
     masm.pop(temp);
 }
 
 void
 CodeGenerator::emitValueResultChecks(LInstruction *lir, MDefinition *mir)
 {
@@ -3839,30 +3680,28 @@ CodeGenerator::emitValueResultChecks(LIn
 
         masm.bind(&miss);
         masm.assumeUnreachable("MIR instruction returned value with unexpected type");
 
         masm.bind(&ok);
     }
 
     // Check that we have a valid GC pointer.
-    if (gen->info().executionMode() != ParallelExecution) {
-        saveVolatile();
-
-        masm.pushValue(output);
-        masm.movePtr(StackPointer, temp1);
-
-        masm.setupUnalignedABICall(2, temp2);
-        masm.loadJSContext(temp2);
-        masm.passABIArg(temp2);
-        masm.passABIArg(temp1);
-        masm.callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, AssertValidValue));
-        masm.popValue(output);
-        restoreVolatile();
-    }
+    saveVolatile();
+
+    masm.pushValue(output);
+    masm.movePtr(StackPointer, temp1);
+
+    masm.setupUnalignedABICall(2, temp2);
+    masm.loadJSContext(temp2);
+    masm.passABIArg(temp2);
+    masm.passABIArg(temp1);
+    masm.callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, AssertValidValue));
+    masm.popValue(output);
+    restoreVolatile();
 
     masm.bind(&done);
     masm.pop(temp2);
     masm.pop(temp1);
 }
 
 void
 CodeGenerator::emitDebugResultChecks(LInstruction *ins)
@@ -4008,18 +3847,16 @@ class OutOfLineNewArray : public OutOfLi
 
 typedef ArrayObject *(*NewDenseArrayFn)(ExclusiveContext *, uint32_t, HandleTypeObject,
                                         AllocatingBehaviour);
 static const VMFunction NewDenseArrayInfo = FunctionInfo<NewDenseArrayFn>(NewDenseArray);
 
 void
 CodeGenerator::visitNewArrayCallVM(LNewArray *lir)
 {
-    MOZ_ASSERT(gen->info().executionMode() == SequentialExecution);
-
     Register objReg = ToRegister(lir->output());
 
     MOZ_ASSERT(!lir->isCall());
     saveLive(lir);
 
     JSObject *templateObject = lir->mir()->templateObject();
     types::TypeObject *type =
         templateObject->hasSingletonType() ? nullptr : templateObject->type();
@@ -4041,19 +3878,16 @@ typedef JSObject *(*NewDerivedTypedObjec
                                              HandleObject owner,
                                              int32_t offset);
 static const VMFunction CreateDerivedTypedObjInfo =
     FunctionInfo<NewDerivedTypedObjectFn>(CreateDerivedTypedObj);
 
 void
 CodeGenerator::visitNewDerivedTypedObject(LNewDerivedTypedObject *lir)
 {
-    // Not yet made safe for par exec:
-    MOZ_ASSERT(gen->info().executionMode() == SequentialExecution);
-
     pushArg(ToRegister(lir->offset()));
     pushArg(ToRegister(lir->owner()));
     pushArg(ToRegister(lir->type()));
     callVM(CreateDerivedTypedObjInfo, lir);
 }
 
 void
 CodeGenerator::visitAtan2D(LAtan2D *lir)
@@ -4083,17 +3917,16 @@ CodeGenerator::visitHypot(LHypot *lir)
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ecmaHypot), MoveOp::DOUBLE);
 
     MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg);
 }
 
 void
 CodeGenerator::visitNewArray(LNewArray *lir)
 {
-    MOZ_ASSERT(gen->info().executionMode() == SequentialExecution);
     Register objReg = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
     ArrayObject *templateObject = lir->mir()->templateObject();
     DebugOnly<uint32_t> count = lir->mir()->count();
 
     MOZ_ASSERT(count < NativeObject::NELEMENTS_LIMIT);
 
     if (lir->mir()->shouldUseVM()) {
@@ -4199,18 +4032,16 @@ static const VMFunction NewInitObjectInf
 
 typedef JSObject *(*NewInitObjectWithClassPrototypeFn)(JSContext *, HandlePlainObject);
 static const VMFunction NewInitObjectWithClassPrototypeInfo =
     FunctionInfo<NewInitObjectWithClassPrototypeFn>(NewInitObjectWithClassPrototype);
 
 void
 CodeGenerator::visitNewObjectVMCall(LNewObject *lir)
 {
-    MOZ_ASSERT(gen->info().executionMode() == SequentialExecution);
-
     Register objReg = ToRegister(lir->output());
 
     MOZ_ASSERT(!lir->isCall());
     saveLive(lir);
 
     pushArg(ImmGCPtr(lir->mir()->templateObject()));
 
     // If we're making a new object with a class prototype (that is, an object
@@ -4310,17 +4141,16 @@ ShouldInitFixedSlots(LInstruction *lir, 
     }
 
     MOZ_CRASH("Shouldn't get here");
 }
 
 void
 CodeGenerator::visitNewObject(LNewObject *lir)
 {
-    MOZ_ASSERT(gen->info().executionMode() == SequentialExecution);
     Register objReg = ToRegister(lir->output());
     Register tempReg = ToRegister(lir->temp());
     PlainObject *templateObject = lir->mir()->templateObject();
 
     if (lir->mir()->shouldUseVM()) {
         visitNewObjectVMCall(lir);
         return;
     }
@@ -4437,55 +4267,16 @@ CodeGenerator::visitNewSingletonCallObje
 
     // Objects can only be given singleton types in VM calls.  We make the call
     // out of line to not bloat inline code, even if (naively) this seems like
     // extra work.
     masm.jump(ool->entry());
     masm.bind(ool->rejoin());
 }
 
-void
-CodeGenerator::visitNewCallObjectPar(LNewCallObjectPar *lir)
-{
-    Register resultReg = ToRegister(lir->output());
-    Register cxReg = ToRegister(lir->forkJoinContext());
-    Register tempReg1 = ToRegister(lir->getTemp0());
-    Register tempReg2 = ToRegister(lir->getTemp1());
-    CallObject *templateObj = lir->mir()->templateObj();
-
-    emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
-}
-
-typedef ArrayObject *(*ExtendArrayParFn)(ForkJoinContext*, ArrayObject*, uint32_t);
-static const VMFunction ExtendArrayParInfo =
-    FunctionInfo<ExtendArrayParFn>(ExtendArrayPar);
-
-void
-CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
-{
-    Register cxReg = ToRegister(lir->forkJoinContext());
-    Register lengthReg = ToRegister(lir->length());
-    Register tempReg0 = ToRegister(lir->getTemp0());
-    Register tempReg1 = ToRegister(lir->getTemp1());
-    Register tempReg2 = ToRegister(lir->getTemp2());
-    ArrayObject *templateObj = lir->mir()->templateObject();
-
-    emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj);
-
-    // Invoke a C helper to allocate the elements.  The helper returns
-    // nullptr on allocation error or the array object.
-
-    saveLive(lir);
-    pushArg(lengthReg);
-    pushArg(tempReg2);
-    callVM(ExtendArrayParInfo, lir);
-    storeResultTo(ToRegister(lir->output()));
-    restoreLive(lir);
-}
-
 typedef JSObject *(*NewStringObjectFn)(JSContext *, HandleString);
 static const VMFunction NewStringObjectInfo = FunctionInfo<NewStringObjectFn>(NewStringObject);
 
 void
 CodeGenerator::visitNewStringObject(LNewStringObject *lir)
 {
     Register input = ToRegister(lir->input());
     Register output = ToRegister(lir->output());
@@ -4501,86 +4292,16 @@ CodeGenerator::visitNewStringObject(LNew
     masm.loadStringLength(input, temp);
 
     masm.storeValue(JSVAL_TYPE_STRING, input, Address(output, StringObject::offsetOfPrimitiveValue()));
     masm.storeValue(JSVAL_TYPE_INT32, temp, Address(output, StringObject::offsetOfLength()));
 
     masm.bind(ool->rejoin());
 }
 
-void
-CodeGenerator::visitNewPar(LNewPar *lir)
-{
-    Register objReg = ToRegister(lir->output());
-    Register cxReg = ToRegister(lir->forkJoinContext());
-    Register tempReg1 = ToRegister(lir->getTemp0());
-    Register tempReg2 = ToRegister(lir->getTemp1());
-    NativeObject *templateObject = lir->mir()->templateObject();
-    emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
-}
-
-class OutOfLineNewGCThingPar : public OutOfLineCodeBase<CodeGenerator>
-{
-public:
-    LInstruction *lir;
-    gc::AllocKind allocKind;
-    Register objReg;
-    Register cxReg;
-
-    OutOfLineNewGCThingPar(LInstruction *lir, gc::AllocKind allocKind, Register objReg,
-                           Register cxReg)
-      : lir(lir), allocKind(allocKind), objReg(objReg), cxReg(cxReg)
-    {}
-
-    void accept(CodeGenerator *codegen) {
-        codegen->visitOutOfLineNewGCThingPar(this);
-    }
-};
-
-typedef JSObject *(*NewGCThingParFn)(ForkJoinContext *, js::gc::AllocKind allocKind);
-static const VMFunction NewGCThingParInfo =
-    FunctionInfo<NewGCThingParFn>(NewGCThingPar);
-
-void
-CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
-                                      Register tempReg1, Register tempReg2, NativeObject *templateObj)
-{
-    MOZ_ASSERT(lir->mirRaw());
-    MOZ_ASSERT(lir->mirRaw()->isInstruction());
-
-    gc::AllocKind allocKind = templateObj->asTenured().getAllocKind();
-    OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
-    addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
-
-    masm.newGCThingPar(objReg, cxReg, tempReg1, tempReg2, templateObj, ool->entry());
-    masm.bind(ool->rejoin());
-    masm.initGCThing(objReg, tempReg1, templateObj);
-}
-
-void
-CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
-{
-    // As a fallback for allocation in par. exec. mode, we invoke the
-    // C helper NewGCThingPar(), which calls into the GC code.  If it
-    // returns nullptr, we bail.  If returns non-nullptr, we rejoin the
-    // original instruction.
-    Register out = ool->objReg;
-
-    saveVolatile(out);
-    masm.setupUnalignedABICall(2, out);
-    masm.passABIArg(ool->cxReg);
-    masm.move32(Imm32(ool->allocKind), out);
-    masm.passABIArg(out);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NewGCThingPar));
-    masm.storeCallResult(out);
-    restoreVolatile(out);
-
-    bailoutTestPtr(Assembler::Zero, out, out, ool->lir->snapshot());
-}
-
 typedef bool(*InitElemFn)(JSContext *cx, HandleObject obj,
                           HandleValue id, HandleValue value);
 static const VMFunction InitElemInfo =
     FunctionInfo<InitElemFn>(InitElemOperation);
 
 void
 CodeGenerator::visitInitElem(LInitElem *lir)
 {
@@ -5182,26 +4903,23 @@ CodeGenerator::visitModD(LModD *ins)
 
     if (gen->compilingAsmJS())
         masm.callWithABI(AsmJSImm_ModD, MoveOp::DOUBLE);
     else
         masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NumberMod), MoveOp::DOUBLE);
 }
 
 typedef bool (*BinaryFn)(JSContext *, MutableHandleValue, MutableHandleValue, MutableHandleValue);
-typedef bool (*BinaryParFn)(ForkJoinContext *, HandleValue, HandleValue, MutableHandleValue);
 
 static const VMFunction AddInfo = FunctionInfo<BinaryFn>(js::AddValues);
 static const VMFunction SubInfo = FunctionInfo<BinaryFn>(js::SubValues);
 static const VMFunction MulInfo = FunctionInfo<BinaryFn>(js::MulValues);
 static const VMFunction DivInfo = FunctionInfo<BinaryFn>(js::DivValues);
 static const VMFunction ModInfo = FunctionInfo<BinaryFn>(js::ModValues);
-static const VMFunctionsModal UrshInfo = VMFunctionsModal(
-    FunctionInfo<BinaryFn>(js::UrshValues),
-    FunctionInfo<BinaryParFn>(UrshValuesPar));
+static const VMFunction UrshInfo = FunctionInfo<BinaryFn>(js::UrshValues);
 
 void
 CodeGenerator::visitBinaryV(LBinaryV *lir)
 {
     pushArg(ToValue(lir, LBinaryV::RhsInput));
     pushArg(ToValue(lir, LBinaryV::LhsInput));
 
     switch (lir->jsop()) {
@@ -5230,23 +4948,20 @@ CodeGenerator::visitBinaryV(LBinaryV *li
         break;
 
       default:
         MOZ_CRASH("Unexpected binary op");
     }
 }
 
 typedef bool (*StringCompareFn)(JSContext *, HandleString, HandleString, bool *);
-typedef bool (*StringCompareParFn)(ForkJoinContext *, HandleString, HandleString, bool *);
-static const VMFunctionsModal StringsEqualInfo = VMFunctionsModal(
-    FunctionInfo<StringCompareFn>(jit::StringsEqual<true>),
-    FunctionInfo<StringCompareParFn>(jit::StringsEqualPar));
-static const VMFunctionsModal StringsNotEqualInfo = VMFunctionsModal(
-    FunctionInfo<StringCompareFn>(jit::StringsEqual<false>),
-    FunctionInfo<StringCompareParFn>(jit::StringsUnequalPar));
+static const VMFunction StringsEqualInfo =
+    FunctionInfo<StringCompareFn>(jit::StringsEqual<true>);
+static const VMFunction StringsNotEqualInfo =
+    FunctionInfo<StringCompareFn>(jit::StringsEqual<false>);
 
 void
 CodeGenerator::emitCompareS(LInstruction *lir, JSOp op, Register left, Register right,
                             Register output)
 {
     MOZ_ASSERT(lir->isCompareS() || lir->isCompareStrictS());
 
     OutOfLineCode *ool = nullptr;
@@ -5294,41 +5009,24 @@ CodeGenerator::visitCompareS(LCompareS *
     Register left = ToRegister(lir->left());
     Register right = ToRegister(lir->right());
     Register output = ToRegister(lir->output());
 
     emitCompareS(lir, op, left, right, output);
 }
 
 typedef bool (*CompareFn)(JSContext *, MutableHandleValue, MutableHandleValue, bool *);
-typedef bool (*CompareParFn)(ForkJoinContext *, MutableHandleValue, MutableHandleValue, bool *);
-static const VMFunctionsModal EqInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::LooselyEqual<true>),
-    FunctionInfo<CompareParFn>(jit::LooselyEqualPar));
-static const VMFunctionsModal NeInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::LooselyEqual<false>),
-    FunctionInfo<CompareParFn>(jit::LooselyUnequalPar));
-static const VMFunctionsModal StrictEqInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::StrictlyEqual<true>),
-    FunctionInfo<CompareParFn>(jit::StrictlyEqualPar));
-static const VMFunctionsModal StrictNeInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::StrictlyEqual<false>),
-    FunctionInfo<CompareParFn>(jit::StrictlyUnequalPar));
-static const VMFunctionsModal LtInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::LessThan),
-    FunctionInfo<CompareParFn>(jit::LessThanPar));
-static const VMFunctionsModal LeInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::LessThanOrEqual),
-    FunctionInfo<CompareParFn>(jit::LessThanOrEqualPar));
-static const VMFunctionsModal GtInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::GreaterThan),
-    FunctionInfo<CompareParFn>(jit::GreaterThanPar));
-static const VMFunctionsModal GeInfo = VMFunctionsModal(
-    FunctionInfo<CompareFn>(jit::GreaterThanOrEqual),
-    FunctionInfo<CompareParFn>(jit::GreaterThanOrEqualPar));
+static const VMFunction EqInfo = FunctionInfo<CompareFn>(jit::LooselyEqual<true>);
+static const VMFunction NeInfo = FunctionInfo<CompareFn>(jit::LooselyEqual<false>);
+static const VMFunction StrictEqInfo = FunctionInfo<CompareFn>(jit::StrictlyEqual<true>);
+static const VMFunction StrictNeInfo = FunctionInfo<CompareFn>(jit::StrictlyEqual<false>);
+static const VMFunction LtInfo = FunctionInfo<CompareFn>(jit::LessThan);
+static const VMFunction LeInfo = FunctionInfo<CompareFn>(jit::LessThanOrEqual);
+static const VMFunction GtInfo = FunctionInfo<CompareFn>(jit::GreaterThan);
+static const VMFunction GeInfo = FunctionInfo<CompareFn>(jit::GreaterThanOrEqual);
 
 void
 CodeGenerator::visitCompareVM(LCompareVM *lir)
 {
     pushArg(ToValue(lir, LBinaryV::RhsInput));
     pushArg(ToValue(lir, LBinaryV::LhsInput));
 
     switch (lir->mir()->jsop()) {
@@ -5579,29 +5277,25 @@ CodeGenerator::visitEmulatesUndefinedAnd
     }
 
     Register objreg = ToRegister(lir->input());
 
     testObjectEmulatesUndefined(objreg, equal, unequal, ToRegister(lir->temp()), ool);
 }
 
 typedef JSString *(*ConcatStringsFn)(ThreadSafeContext *, HandleString, HandleString);
-typedef JSString *(*ConcatStringsParFn)(ForkJoinContext *, HandleString, HandleString);
-static const VMFunctionsModal ConcatStringsInfo = VMFunctionsModal(
-    FunctionInfo<ConcatStringsFn>(ConcatStrings<CanGC>),
-    FunctionInfo<ConcatStringsParFn>(ConcatStringsPar));
+static const VMFunction ConcatStringsInfo = FunctionInfo<ConcatStringsFn>(ConcatStrings<CanGC>);
 
 void
 CodeGenerator::emitConcat(LInstruction *lir, Register lhs, Register rhs, Register output)
 {
     OutOfLineCode *ool = oolCallVM(ConcatStringsInfo, lir, (ArgList(), lhs, rhs),
                                    StoreRegisterTo(output));
 
-    ExecutionMode mode = gen->info().executionMode();
-    JitCode *stringConcatStub = gen->compartment->jitCompartment()->stringConcatStubNoBarrier(mode);
+    JitCode *stringConcatStub = gen->compartment->jitCompartment()->stringConcatStubNoBarrier();
     masm.call(stringConcatStub);
     masm.branchTestPtr(Assembler::Zero, output, output, ool->entry());
 
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGenerator::visitConcat(LConcat *lir)
@@ -5618,36 +5312,16 @@ CodeGenerator::visitConcat(LConcat *lir)
     MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg2);
     MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg3);
     MOZ_ASSERT(ToRegister(lir->temp5()) == CallTempReg4);
     MOZ_ASSERT(output == CallTempReg5);
 
     emitConcat(lir, lhs, rhs, output);
 }
 
-void
-CodeGenerator::visitConcatPar(LConcatPar *lir)
-{
-    DebugOnly<Register> cx = ToRegister(lir->forkJoinContext());
-    Register lhs = ToRegister(lir->lhs());
-    Register rhs = ToRegister(lir->rhs());
-    Register output = ToRegister(lir->output());
-
-    MOZ_ASSERT(lhs == CallTempReg0);
-    MOZ_ASSERT(rhs == CallTempReg1);
-    MOZ_ASSERT((Register)cx == CallTempReg4);
-    MOZ_ASSERT(ToRegister(lir->temp1()) == CallTempReg0);
-    MOZ_ASSERT(ToRegister(lir->temp2()) == CallTempReg1);
-    MOZ_ASSERT(ToRegister(lir->temp3()) == CallTempReg2);
-    MOZ_ASSERT(ToRegister(lir->temp4()) == CallTempReg3);
-    MOZ_ASSERT(output == CallTempReg5);
-
-    emitConcat(lir, lhs, rhs, output);
-}
-
 static void
 CopyStringChars(MacroAssembler &masm, Register to, Register from, Register len,
                 Register byteOpScratch, size_t fromWidth, size_t toWidth)
 {
     // Copy |len| char16_t code units from |from| to |to|. Assumes len > 0
     // (checked below in debug builds), and when done |to| must point to the
     // next available char.
 
@@ -5698,58 +5372,39 @@ CopyStringCharsMaybeInflate(MacroAssembl
         masm.loadStringChars(input, input);
         CopyStringChars(masm, destChars, input, temp1, temp2, sizeof(char), sizeof(char16_t));
     }
     masm.bind(&done);
 }
 
 static void
 ConcatFatInlineString(MacroAssembler &masm, Register lhs, Register rhs, Register output,
-                      Register temp1, Register temp2, Register temp3, Register forkJoinContext,
-                      ExecutionMode mode, Label *failure, Label *failurePopTemps, bool isTwoByte)
+                      Register temp1, Register temp2, Register temp3,
+                      Label *failure, Label *failurePopTemps, bool isTwoByte)
 {
     // State: result length in temp2.
 
     // Ensure both strings are linear.
     masm.branchIfRope(lhs, failure);
     masm.branchIfRope(rhs, failure);
 
     // Allocate a JSFatInlineString.
-    switch (mode) {
-      case SequentialExecution:
-        masm.newGCFatInlineString(output, temp1, failure);
-        break;
-      case ParallelExecution:
-        masm.push(temp1);
-        masm.push(temp2);
-        masm.newGCFatInlineStringPar(output, forkJoinContext, temp1, temp2, failurePopTemps);
-        masm.pop(temp2);
-        masm.pop(temp1);
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    masm.newGCFatInlineString(output, temp1, failure);
 
     // Store length and flags.
     uint32_t flags = JSString::INIT_FAT_INLINE_FLAGS;
     if (!isTwoByte)
         flags |= JSString::LATIN1_CHARS_BIT;
     masm.store32(Imm32(flags), Address(output, JSString::offsetOfFlags()));
     masm.store32(temp2, Address(output, JSString::offsetOfLength()));
 
     // Load chars pointer in temp2.
     masm.computeEffectiveAddress(Address(output, JSInlineString::offsetOfInlineStorage()), temp2);
 
     {
-        // We use temp3 in this block, which in parallel execution also holds
-        // a live ForkJoinContext pointer. If we are compiling for parallel
-        // execution, be sure to save and restore the ForkJoinContext.
-        if (mode == ParallelExecution)
-            masm.push(temp3);
-
         // Copy lhs chars. Note that this advances temp2 to point to the next
         // char. This also clobbers the lhs register.
         if (isTwoByte) {
             CopyStringCharsMaybeInflate(masm, lhs, temp2, temp1, temp3);
         } else {
             masm.loadStringLength(lhs, temp3);
             masm.loadStringChars(lhs, lhs);
             CopyStringChars(masm, temp2, lhs, temp3, temp1, sizeof(char), sizeof(char));
@@ -5764,19 +5419,16 @@ ConcatFatInlineString(MacroAssembler &ma
             CopyStringChars(masm, temp2, rhs, temp3, temp1, sizeof(char), sizeof(char));
         }
 
         // Null-terminate.
         if (isTwoByte)
             masm.store16(Imm32(0), Address(temp2, 0));
         else
             masm.store8(Imm32(0), Address(temp2, 0));
-
-        if (mode == ParallelExecution)
-            masm.pop(temp3);
     }
 
     masm.ret();
 }
 
 typedef JSString *(*SubstringKernelFn)(JSContext *cx, HandleString str, int32_t begin, int32_t len);
 static const VMFunction SubstringKernelInfo =
     FunctionInfo<SubstringKernelFn>(SubstringKernel);
@@ -5891,32 +5543,27 @@ CodeGenerator::visitSubstr(LSubstr *lir)
         masm.storePtr(temp, Address(output, JSString::offsetOfNonInlineChars()));
         masm.jump(done);
     }
 
     masm.bind(done);
 }
 
 JitCode *
-JitCompartment::generateStringConcatStub(JSContext *cx, ExecutionMode mode)
+JitCompartment::generateStringConcatStub(JSContext *cx)
 {
     MacroAssembler masm(cx);
 
     Register lhs = CallTempReg0;
     Register rhs = CallTempReg1;
     Register temp1 = CallTempReg2;
     Register temp2 = CallTempReg3;
     Register temp3 = CallTempReg4;
     Register output = CallTempReg5;
 
-    // In parallel execution, we pass in the ForkJoinContext in CallTempReg4, as
-    // by the time we need to use the temp3 we no longer have need of the
-    // cx.
-    Register forkJoinContext = CallTempReg4;
-
     Label failure, failurePopTemps;
 #ifdef JS_USE_LINK_REGISTER
     masm.pushReturnAddress();
 #endif
     // If lhs is empty, return rhs.
     Label leftEmpty;
     masm.loadStringLength(lhs, temp1);
     masm.branchTest32(Assembler::Zero, temp1, temp1, &leftEmpty);
@@ -5949,30 +5596,17 @@ JitCompartment::generateStringConcatStub
     masm.bind(&notInline);
 
     // Keep AND'ed flags in temp1.
 
     // Ensure result length <= JSString::MAX_LENGTH.
     masm.branch32(Assembler::Above, temp2, Imm32(JSString::MAX_LENGTH), &failure);
 
     // Allocate a new rope.
-    switch (mode) {
-      case SequentialExecution:
-        masm.newGCString(output, temp3, &failure);
-        break;
-      case ParallelExecution:
-        masm.push(temp1);
-        masm.push(temp2);
-        masm.newGCStringPar(output, forkJoinContext, temp1, temp2, &failurePopTemps);
-        masm.pop(temp2);
-        masm.pop(temp1);
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    masm.newGCString(output, temp3, &failure);
 
     // Store rope length and flags. temp1 still holds the result of AND'ing the
     // lhs and rhs flags, so we just have to clear the other flags to get our
     // rope flags (Latin1 if both lhs and rhs are Latin1).
     static_assert(JSString::ROPE_FLAGS == 0, "Rope flags must be 0");
     masm.and32(Imm32(JSString::LATIN1_CHARS_BIT), temp1);
     masm.store32(temp1, Address(output, JSString::offsetOfFlags()));
     masm.store32(temp2, Address(output, JSString::offsetOfLength()));
@@ -5986,22 +5620,22 @@ JitCompartment::generateStringConcatStub
     masm.mov(rhs, output);
     masm.ret();
 
     masm.bind(&rightEmpty);
     masm.mov(lhs, output);
     masm.ret();
 
     masm.bind(&isFatInlineTwoByte);
-    ConcatFatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3, forkJoinContext,
-                          mode, &failure, &failurePopTemps, true);
+    ConcatFatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
+                          &failure, &failurePopTemps, true);
 
     masm.bind(&isFatInlineLatin1);
-    ConcatFatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3, forkJoinContext,
-                          mode, &failure, &failurePopTemps, false);
+    ConcatFatInlineString(masm, lhs, rhs, output, temp1, temp2, temp3,
+                          &failure, &failurePopTemps, false);
 
     masm.bind(&failurePopTemps);
     masm.pop(temp2);
     masm.pop(temp1);
 
     masm.bind(&failure);
     masm.movePtr(ImmPtr(nullptr), output);
     masm.ret();
@@ -6520,20 +6154,17 @@ CodeGenerator::visitStoreElementHoleV(LS
     else
         masm.storeValue(value, BaseIndex(elements, ToRegister(lir->index()), TimesEight));
 
     masm.bind(ool->rejoin());
 }
 
 typedef bool (*SetDenseElementFn)(JSContext *, HandleNativeObject, int32_t, HandleValue,
                                   bool strict);
-typedef bool (*SetDenseElementParFn)(ForkJoinContext *, HandleObject, int32_t, HandleValue, bool);
-static const VMFunctionsModal SetDenseElementInfo = VMFunctionsModal(
-    FunctionInfo<SetDenseElementFn>(SetDenseElement),
-    FunctionInfo<SetDenseElementParFn>(SetDenseElementPar));
+static const VMFunction SetDenseElementInfo = FunctionInfo<SetDenseElementFn>(SetDenseElement);
 
 void
 CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool)
 {
     Register object, elements;
     LInstruction *ins = ool->ins();
     const LAllocation *index;
     MIRType valueType;
@@ -7151,21 +6782,18 @@ void
 CodeGenerator::visitRunOncePrologue(LRunOncePrologue *lir)
 {
     pushArg(ImmGCPtr(lir->mir()->block()->info().script()));
     callVM(RunOnceScriptPrologueInfo, lir);
 }
 
 typedef JSObject *(*InitRestParameterFn)(JSContext *, uint32_t, Value *, HandleObject,
                                          HandleObject);
-typedef JSObject *(*InitRestParameterParFn)(ForkJoinContext *, uint32_t, Value *,
-                                            HandleObject, HandleArrayObject);
-static const VMFunctionsModal InitRestParameterInfo = VMFunctionsModal(
-    FunctionInfo<InitRestParameterFn>(InitRestParameter),
-    FunctionInfo<InitRestParameterParFn>(InitRestParameterPar));
+static const VMFunction InitRestParameterInfo =
+    FunctionInfo<InitRestParameterFn>(InitRestParameter);
 
 void
 CodeGenerator::emitRest(LInstruction *lir, Register array, Register numActuals,
                         Register temp0, Register temp1, unsigned numFormals,
                         JSObject *templateObject, bool saveAndRestore, Register resultreg)
 {
     // Compute actuals() + numFormals.
     size_t actualsOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
@@ -7217,34 +6845,16 @@ CodeGenerator::visitRest(LRest *lir)
         masm.bind(&failAlloc);
         masm.movePtr(ImmPtr(nullptr), temp2);
     }
     masm.bind(&joinAlloc);
 
     emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
 }
 
-// LRestPar cannot derive from LCallInstructionHelper because emitAllocateGCThingPar may
-// itself contain a VM call.  Thus there's some manual work here and in emitRest().
-
-void
-CodeGenerator::visitRestPar(LRestPar *lir)
-{
-    Register numActuals = ToRegister(lir->numActuals());
-    Register cx = ToRegister(lir->forkJoinContext());
-    Register temp0 = ToRegister(lir->getTemp(0));
-    Register temp1 = ToRegister(lir->getTemp(1));
-    Register temp2 = ToRegister(lir->getTemp(2));
-    unsigned numFormals = lir->mir()->numFormals();
-    ArrayObject *templateObject = lir->mir()->templateObject();
-
-    emitAllocateGCThingPar(lir, temp2, cx, temp0, temp1, templateObject);
-    emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, true, ToRegister(lir->output()));
-}
-
 bool
 CodeGenerator::generateAsmJS(AsmJSFunctionLabels *labels)
 {
     JitSpew(JitSpew_Codegen, "# Emitting asm.js code");
 
     // AsmJS doesn't do SPS instrumentation.
     sps_.disable();
 
@@ -7334,17 +6944,17 @@ CodeGenerator::generate()
     setSkipArgCheckEntryOffset(masm.size());
     masm.setFramePushed(0);
     if (!generatePrologue())
         return false;
 
     masm.bind(&skipPrologue);
 
 #ifdef JS_TRACE_LOGGING
-    if (!gen->compilingAsmJS() && gen->info().executionMode() == SequentialExecution) {
+    if (!gen->compilingAsmJS()) {
         emitTracelogScriptStart();
         emitTracelogStartEvent(TraceLogger_IonMonkey);
     }
 #endif
 
 #ifdef DEBUG
     // Assert that the argument types are correct.
     generateArgumentsChecks(/* bailout = */ false);
@@ -7418,29 +7028,24 @@ struct AutoDiscardIonCode
 
 bool
 CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
 {
     RootedScript script(cx, gen->info().script());
     ExecutionMode executionMode = gen->info().executionMode();
     OptimizationLevel optimizationLevel = gen->optimizationInfo().level();
 
-    MOZ_ASSERT_IF(HasIonScript(script, executionMode), executionMode == SequentialExecution);
-
     // We finished the new IonScript. Invalidate the current active IonScript,
     // so we can replace it with this new (probably higher optimized) version.
     if (HasIonScript(script, executionMode)) {
         MOZ_ASSERT(GetIonScript(script, executionMode)->isRecompiling());
         // Do a normal invalidate, except don't cancel offThread compilations,
         // since that will cancel this compilation too.
-        if (!Invalidate(cx, script, SequentialExecution,
-                        /* resetUses */ false, /* cancelOffThread*/ false))
-        {
+        if (!Invalidate(cx, script, /* resetUses */ false, /* cancelOffThread*/ false))
             return false;
-        }
     }
 
     if (scriptCounts_ && !script->hasScriptCounts() && !script->initScriptCounts(cx))
         return false;
 
     // Check to make sure we didn't have a mid-build invalidation. If so, we
     // will trickle to jit::Compile() and return Method_Skipped.
     uint32_t warmUpCount = script->getWarmUpCount();
@@ -7457,45 +7062,36 @@ CodeGenerator::link(JSContext *cx, types
 
     uint32_t scriptFrameSize = frameClass_ == FrameSizeClass::None()
                            ? frameDepth_
                            : FrameSizeClass::FromDepth(frameDepth_).frameSize();
 
     // We encode safepoints after the OSI-point offsets have been determined.
     encodeSafepoints();
 
-    // List of possible scripts that this graph may call. Currently this is
-    // only tracked when compiling for parallel execution.
-    CallTargetVector callTargets(alloc());
-    if (executionMode == ParallelExecution)
-        AddPossibleCallees(cx, graph.mir(), callTargets);
-
     AutoDiscardIonCode discardIonCode(cx, &recompileInfo);
 
     IonScript *ionScript =
       IonScript::New(cx, recompileInfo,
                      graph.totalSlotCount(), scriptFrameSize,
                      snapshots_.listSize(), snapshots_.RVATableSize(),
                      recovers_.size(), bailouts_.length(), graph.numConstants(),
                      safepointIndices_.length(), osiIndices_.length(),
                      cacheList_.length(), runtimeData_.length(),
-                     safepoints_.size(), callTargets.length(),
-                     patchableBackedges_.length(), optimizationLevel);
+                     safepoints_.size(), patchableBackedges_.length(), optimizationLevel);
     if (!ionScript)
         return false;
     discardIonCode.ionScript = ionScript;
 
     // Also, note that creating the code here during an incremental GC will
     // trace the code and mark all GC things it refers to. This captures any
     // read barriers which were skipped while compiling the script off thread.
     Linker linker(masm);
     AutoFlushICache afc("IonLink");
-    JitCode *code = (executionMode == SequentialExecution)
-                    ? linker.newCodeForIonScript(cx)
-                    : linker.newCode<CanGC>(cx, ION_CODE);
+    JitCode *code = linker.newCodeForIonScript(cx);
     if (!code)
         return false;
 
     // Encode native to bytecode map if profiling is enabled.
     if (isNativeToBytecodeMapEnabled()) {
         // Generate native-to-bytecode main table.
         if (!generateCompactNativeToBytecodeMap(cx, code))
             return false;
@@ -7545,22 +7141,16 @@ CodeGenerator::link(JSContext *cx, types
     ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
 
     // If SPS is enabled, mark IonScript as having been instrumented with SPS
     if (sps_.enabled())
         ionScript->setHasSPSInstrumentation();
 
     SetIonScript(cx, script, executionMode, ionScript);
 
-    // In parallel execution mode, when we first compile a script, we
-    // don't know that its potential callees are compiled, so set a
-    // flag warning that the callees may not be fully compiled.
-    if (!callTargets.empty())
-        ionScript->setHasUncompiledCallTarget();
-
     invalidateEpilogueData_.fixup(&masm);
     Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, invalidateEpilogueData_),
                                        ImmPtr(ionScript),
                                        ImmPtr((void*)-1));
 
     JitSpew(JitSpew_Codegen, "Created IonScript %p (raw %p)",
             (void *) ionScript, (void *) code->raw());
 
@@ -7603,18 +7193,16 @@ CodeGenerator::link(JSContext *cx, types
         ionScript->copyOsiIndices(&osiIndices_[0], masm);
     if (snapshots_.listSize())
         ionScript->copySnapshots(&snapshots_);
     MOZ_ASSERT_IF(snapshots_.listSize(), recovers_.size());
     if (recovers_.size())
         ionScript->copyRecovers(&recovers_);
     if (graph.numConstants())
         ionScript->copyConstants(graph.constantPool());
-    if (callTargets.length() > 0)
-        ionScript->copyCallTargetEntries(callTargets.begin());
     if (patchableBackedges_.length() > 0)
         ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin(), masm);
 
 #ifdef JS_TRACE_LOGGING
     TraceLoggerThread *logger = TraceLoggerForMainThread(cx->runtime());
     for (uint32_t i = 0; i < patchableTraceLoggers_.length(); i++) {
         patchableTraceLoggers_[i].fixup(&masm);
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTraceLoggers_[i]),
@@ -7631,31 +7219,21 @@ CodeGenerator::link(JSContext *cx, types
             patchableTLScripts_[i].fixup(&masm);
             Assembler::PatchDataWithValueCheck(CodeLocationLabel(code, patchableTLScripts_[i]),
                                                ImmPtr((void *) uintptr_t(textId)),
                                                ImmPtr((void *)0));
         }
     }
 #endif
 
-    switch (executionMode) {
-      case SequentialExecution:
-        // The correct state for prebarriers is unknown until the end of compilation,
-        // since a GC can occur during code generation. All barriers are emitted
-        // off-by-default, and are toggled on here if necessary.
-        if (cx->zone()->needsIncrementalBarrier())
-            ionScript->toggleBarriers(true);
-        break;
-      case ParallelExecution:
-        // We don't run incremental GC during parallel execution; no need to
-        // turn on barriers.
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    // The correct state for prebarriers is unknown until the end of compilation,
+    // since a GC can occur during code generation. All barriers are emitted
+    // off-by-default, and are toggled on here if necessary.
+    if (cx->zone()->needsIncrementalBarrier())
+        ionScript->toggleBarriers(true);
 
     // Attach any generated script counts to the script.
     if (IonScriptCounts *counts = extractScriptCounts())
         script->addIonCounts(counts);
 
     // Make sure that AutoDiscardIonCode does not free the relevant info.
     discardIonCode.keepIonCode();
 
@@ -7748,20 +7326,17 @@ CodeGenerator::visitCallGetElement(LCall
     } else {
         MOZ_ASSERT(op == JSOP_CALLELEM);
         callVM(CallElementInfo, lir);
     }
 }
 
 typedef bool (*SetObjectElementFn)(JSContext *, HandleObject, HandleValue, HandleValue,
                                    bool strict);
-typedef bool (*SetElementParFn)(ForkJoinContext *, HandleObject, HandleValue, HandleValue, bool);
-static const VMFunctionsModal SetObjectElementInfo = VMFunctionsModal(
-    FunctionInfo<SetObjectElementFn>(SetObjectElement),
-    FunctionInfo<SetElementParFn>(SetElementPar));
+static const VMFunction SetObjectElementInfo = FunctionInfo<SetObjectElementFn>(SetObjectElement);
 
 void
 CodeGenerator::visitCallSetElement(LCallSetElement *lir)
 {
     pushArg(Imm32(lir->mir()->strict()));
     pushArg(ToValue(lir, LCallSetElement::Value));
     pushArg(ToValue(lir, LCallSetElement::Index));
     pushArg(ToRegister(lir->getOperand(0)));
@@ -7899,82 +7474,42 @@ CodeGenerator::visitNameIC(OutOfLineUpda
     masm.jump(ool->rejoin());
 }
 
 void
 CodeGenerator::addGetPropertyCache(LInstruction *ins, RegisterSet liveRegs, Register objReg,
                                    PropertyName *name, TypedOrValueRegister output,
                                    bool monitoredResult, jsbytecode *profilerLeavePc)
 {
-    switch (gen->info().executionMode()) {
-      case SequentialExecution: {
-        GetPropertyIC cache(liveRegs, objReg, name, output, monitoredResult);
-        cache.setProfilerLeavePC(profilerLeavePc);
-        addCache(ins, allocateCache(cache));
-        break;
-      }
-      case ParallelExecution: {
-        GetPropertyParIC cache(objReg, name, output);
-        cache.setProfilerLeavePC(profilerLeavePc);
-        addCache(ins, allocateCache(cache));
-        break;
-      }
-      default:
-        MOZ_CRASH("Bad execution mode");
-    }
+    GetPropertyIC cache(liveRegs, objReg, name, output, monitoredResult);
+    cache.setProfilerLeavePC(profilerLeavePc);
+    addCache(ins, allocateCache(cache));
 }
 
 void
 CodeGenerator::addSetPropertyCache(LInstruction *ins, RegisterSet liveRegs, Register objReg,
                                    PropertyName *name, ConstantOrRegister value, bool strict,
                                    bool needsTypeBarrier, jsbytecode *profilerLeavePc)
 {
-    switch (gen->info().executionMode()) {
-      case SequentialExecution: {
-          SetPropertyIC cache(liveRegs, objReg, name, value, strict, needsTypeBarrier);
-            cache.setProfilerLeavePC(profilerLeavePc);
-          addCache(ins, allocateCache(cache));
-          break;
-      }
-      case ParallelExecution: {
-          SetPropertyParIC cache(objReg, name, value, strict, needsTypeBarrier);
-            cache.setProfilerLeavePC(profilerLeavePc);
-          addCache(ins, allocateCache(cache));
-          break;
-      }
-      default:
-        MOZ_CRASH("Bad execution mode");
-    }
+    SetPropertyIC cache(liveRegs, objReg, name, value, strict, needsTypeBarrier);
+    cache.setProfilerLeavePC(profilerLeavePc);
+    addCache(ins, allocateCache(cache));
 }
 
 void
 CodeGenerator::addSetElementCache(LInstruction *ins, Register obj, Register unboxIndex,
                                   Register temp, FloatRegister tempDouble,
                                   FloatRegister tempFloat32, ValueOperand index,
                                   ConstantOrRegister value, bool strict, bool guardHoles,
                                   jsbytecode *profilerLeavePc)
 {
-    switch (gen->info().executionMode()) {
-      case SequentialExecution: {
-        SetElementIC cache(obj, unboxIndex, temp, tempDouble, tempFloat32, index, value, strict,
-                           guardHoles);
-        cache.setProfilerLeavePC(profilerLeavePc);
-        addCache(ins, allocateCache(cache));
-        break;
-      }
-      case ParallelExecution: {
-        SetElementParIC cache(obj, unboxIndex, temp, tempDouble, tempFloat32, index, value, strict,
-                              guardHoles);
-        cache.setProfilerLeavePC(profilerLeavePc);
-        addCache(ins, allocateCache(cache));
-        break;
-      }
-      default:
-        MOZ_CRASH("Bad execution mode");
-    }
+    SetElementIC cache(obj, unboxIndex, temp, tempDouble, tempFloat32, index, value, strict,
+                       guardHoles);
+    cache.setProfilerLeavePC(profilerLeavePc);
+    addCache(ins, allocateCache(cache));
 }
 
 void
 CodeGenerator::visitGetPropertyCacheV(LGetPropertyCacheV *ins)
 {
     RegisterSet liveRegs = ins->safepoint()->liveRegs();
     Register objReg = ToRegister(ins->getOperand(0));
     PropertyName *name = ins->mir()->name();
@@ -8020,57 +7555,25 @@ CodeGenerator::visitGetPropertyIC(OutOfL
     pushArg(Imm32(ool->getCacheIndex()));
     callVM(GetPropertyIC::UpdateInfo, lir);
     StoreValueTo(ic->output()).generate(this);
     restoreLiveIgnore(lir, StoreValueTo(ic->output()).clobbered());
 
     masm.jump(ool->rejoin());
 }
 
-typedef bool (*GetPropertyParICFn)(ForkJoinContext *, size_t, HandleObject, MutableHandleValue);
-const VMFunction GetPropertyParIC::UpdateInfo =
-    FunctionInfo<GetPropertyParICFn>(GetPropertyParIC::update);
-
-void
-CodeGenerator::visitGetPropertyParIC(OutOfLineUpdateCache *ool, DataPtr<GetPropertyParIC> &ic)
-{
-    LInstruction *lir = ool->lir();
-    saveLive(lir);
-
-    pushArg(ic->object());
-    pushArg(Imm32(ool->getCacheIndex()));
-    callVM(GetPropertyParIC::UpdateInfo, lir);
-    StoreValueTo(ic->output()).generate(this);
-    restoreLiveIgnore(lir, StoreValueTo(ic->output()).clobbered());
-
-    masm.jump(ool->rejoin());
-}
-
 void
 CodeGenerator::addGetElementCache(LInstruction *ins, Register obj, ConstantOrRegister index,
                                   TypedOrValueRegister output, bool monitoredResult,
                                   bool allowDoubleResult, jsbytecode *profilerLeavePc)
 {
-    switch (gen->info().executionMode()) {
-      case SequentialExecution: {
-        RegisterSet liveRegs = ins->safepoint()->liveRegs();
-        GetElementIC cache(liveRegs, obj, index, output, monitoredResult, allowDoubleResult);
-        cache.setProfilerLeavePC(profilerLeavePc);
-        addCache(ins, allocateCache(cache));
-        break;
-      }
-      case ParallelExecution: {
-        GetElementParIC cache(obj, index, output, monitoredResult, allowDoubleResult);
-        cache.setProfilerLeavePC(profilerLeavePc);
-        addCache(ins, allocateCache(cache));
-        break;
-      }
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    RegisterSet liveRegs = ins->safepoint()->liveRegs();
+    GetElementIC cache(liveRegs, obj, index, output, monitoredResult, allowDoubleResult);
+    cache.setProfilerLeavePC(profilerLeavePc);
+    addCache(ins, allocateCache(cache));
 }
 
 void
 CodeGenerator::visitGetElementCacheV(LGetElementCacheV *ins)
 {
     Register obj = ToRegister(ins->object());
     ConstantOrRegister index = TypedOrValueRegister(ToValue(ins, LGetElementCacheV::Index));
     TypedOrValueRegister output = TypedOrValueRegister(GetValueOutput(ins));
@@ -8164,57 +7667,16 @@ CodeGenerator::visitSetElementIC(OutOfLi
     pushArg(ic->object());
     pushArg(Imm32(ool->getCacheIndex()));
     callVM(SetElementIC::UpdateInfo, lir);
     restoreLive(lir);
 
     masm.jump(ool->rejoin());
 }
 
-typedef bool (*SetElementParICFn)(ForkJoinContext *, size_t, HandleObject, HandleValue, HandleValue);
-const VMFunction SetElementParIC::UpdateInfo =
-    FunctionInfo<SetElementParICFn>(SetElementParIC::update);
-
-void
-CodeGenerator::visitSetElementParIC(OutOfLineUpdateCache *ool, DataPtr<SetElementParIC> &ic)
-{
-    LInstruction *lir = ool->lir();
-    saveLive(lir);
-
-    pushArg(ic->value());
-    pushArg(ic->index());
-    pushArg(ic->object());
-    pushArg(Imm32(ool->getCacheIndex()));
-    callVM(SetElementParIC::UpdateInfo, lir);
-    restoreLive(lir);
-
-    masm.jump(ool->rejoin());
-}
-
-typedef bool (*GetElementParICFn)(ForkJoinContext *, size_t, HandleObject, HandleValue,
-                                  MutableHandleValue);
-const VMFunction GetElementParIC::UpdateInfo =
-    FunctionInfo<GetElementParICFn>(GetElementParIC::update);
-
-void
-CodeGenerator::visitGetElementParIC(OutOfLineUpdateCache *ool, DataPtr<GetElementParIC> &ic)
-{
-    LInstruction *lir = ool->lir();
-    saveLive(lir);
-
-    pushArg(ic->index());
-    pushArg(ic->object());
-    pushArg(Imm32(ool->getCacheIndex()));
-    callVM(GetElementParIC::UpdateInfo, lir);
-    StoreValueTo(ic->output()).generate(this);
-    restoreLiveIgnore(lir, StoreValueTo(ic->output()).clobbered());
-
-    masm.jump(ool->rejoin());
-}
-
 void
 CodeGenerator::visitBindNameCache(LBindNameCache *ins)
 {
     Register scopeChain = ToRegister(ins->scopeChain());
     Register output = ToRegister(ins->output());
     BindNameIC cache(scopeChain, ins->mir()->name(), output);
     cache.setProfilerLeavePC(ins->mir()->profilerLeavePc());
 
@@ -8237,21 +7699,17 @@ CodeGenerator::visitBindNameIC(OutOfLine
     StoreRegisterTo(ic->outputReg()).generate(this);
     restoreLiveIgnore(lir, StoreRegisterTo(ic->outputReg()).clobbered());
 
     masm.jump(ool->rejoin());
 }
 
 typedef bool (*SetPropertyFn)(JSContext *, HandleObject,
                               HandlePropertyName, const HandleValue, bool, jsbytecode *);
-typedef bool (*SetPropertyParFn)(ForkJoinContext *, HandleObject,
-                                 HandlePropertyName, const HandleValue, bool, jsbytecode *);
-static const VMFunctionsModal SetPropertyInfo = VMFunctionsModal(
-    FunctionInfo<SetPropertyFn>(SetProperty),
-    FunctionInfo<SetPropertyParFn>(SetPropertyPar));
+static const VMFunction SetPropertyInfo = FunctionInfo<SetPropertyFn>(SetProperty);
 
 void
 CodeGenerator::visitCallSetProperty(LCallSetProperty *ins)
 {
     ConstantOrRegister value = TypedOrValueRegister(ToValue(ins, LCallSetProperty::Value));
 
     const Register objReg = ToRegister(ins->getOperand(0));
 
@@ -8344,75 +7802,42 @@ CodeGenerator::visitSetPropertyIC(OutOfL
     pushArg(ic->object());
     pushArg(Imm32(ool->getCacheIndex()));
     callVM(SetPropertyIC::UpdateInfo, lir);
     restoreLive(lir);
 
     masm.jump(ool->rejoin());
 }
 
-typedef bool (*SetPropertyParICFn)(ForkJoinContext *, size_t, HandleObject, HandleValue);
-const VMFunction SetPropertyParIC::UpdateInfo =
-    FunctionInfo<SetPropertyParICFn>(SetPropertyParIC::update);
-
-void
-CodeGenerator::visitSetPropertyParIC(OutOfLineUpdateCache *ool, DataPtr<SetPropertyParIC> &ic)
-{
-    LInstruction *lir = ool->lir();
-    saveLive(lir);
-
-    pushArg(ic->value());
-    pushArg(ic->object());
-    pushArg(Imm32(ool->getCacheIndex()));
-    callVM(SetPropertyParIC::UpdateInfo, lir);
-    restoreLive(lir);
-
-    masm.jump(ool->rejoin());
-}
-
 typedef bool (*ThrowFn)(JSContext *, HandleValue);
 static const VMFunction ThrowInfoCodeGen = FunctionInfo<ThrowFn>(js::Throw);
 
 void
 CodeGenerator::visitThrow(LThrow *lir)
 {
     pushArg(ToValue(lir, LThrow::Value));
     callVM(ThrowInfoCodeGen, lir);
 }
 
 typedef bool (*BitNotFn)(JSContext *, HandleValue, int *p);
-typedef bool (*BitNotParFn)(ForkJoinContext *, HandleValue, int32_t *);
-static const VMFunctionsModal BitNotInfo = VMFunctionsModal(
-    FunctionInfo<BitNotFn>(BitNot),
-    FunctionInfo<BitNotParFn>(BitNotPar));
+static const VMFunction BitNotInfo = FunctionInfo<BitNotFn>(BitNot);
 
 void
 CodeGenerator::visitBitNotV(LBitNotV *lir)
 {
     pushArg(ToValue(lir, LBitNotV::Input));
     callVM(BitNotInfo, lir);
 }
 
 typedef bool (*BitopFn)(JSContext *, HandleValue, HandleValue, int *p);
-typedef bool (*BitopParFn)(ForkJoinContext *, HandleValue, HandleValue, int32_t *);
-static const VMFunctionsModal BitAndInfo = VMFunctionsModal(
-    FunctionInfo<BitopFn>(BitAnd),
-    FunctionInfo<BitopParFn>(BitAndPar));
-static const VMFunctionsModal BitOrInfo = VMFunctionsModal(
-    FunctionInfo<BitopFn>(BitOr),
-    FunctionInfo<BitopParFn>(BitOrPar));
-static const VMFunctionsModal BitXorInfo = VMFunctionsModal(
-    FunctionInfo<BitopFn>(BitXor),
-    FunctionInfo<BitopParFn>(BitXorPar));
-static const VMFunctionsModal BitLhsInfo = VMFunctionsModal(
-    FunctionInfo<BitopFn>(BitLsh),
-    FunctionInfo<BitopParFn>(BitLshPar));
-static const VMFunctionsModal BitRhsInfo = VMFunctionsModal(
-    FunctionInfo<BitopFn>(BitRsh),
-    FunctionInfo<BitopParFn>(BitRshPar));
+static const VMFunction BitAndInfo = FunctionInfo<BitopFn>(BitAnd);
+static const VMFunction BitOrInfo = FunctionInfo<BitopFn>(BitOr);
+static const VMFunction BitXorInfo = FunctionInfo<BitopFn>(BitXor);
+static const VMFunction BitLhsInfo = FunctionInfo<BitopFn>(BitLsh);
+static const VMFunction BitRhsInfo = FunctionInfo<BitopFn>(BitRsh);
 
 void
 CodeGenerator::visitBitOpV(LBitOpV *lir)
 {
     pushArg(ToValue(lir, LBitOpV::RhsInput));
     pushArg(ToValue(lir, LBitOpV::LhsInput));
 
     switch (lir->jsop()) {
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -28,24 +28,21 @@
 
 namespace js {
 namespace jit {
 
 class OutOfLineTestObject;
 class OutOfLineNewArray;
 class OutOfLineNewObject;
 class CheckOverRecursedFailure;
-class CheckOverRecursedFailurePar;
-class OutOfLineInterruptCheckPar;
 class OutOfLineInterruptCheckImplicit;
 class OutOfLineUnboxFloatingPoint;
 class OutOfLineStoreElementHole;
 class OutOfLineTypeOfV;
 class OutOfLineLoadTypedArray;
-class OutOfLineNewGCThingPar;
 class OutOfLineUpdateCache;
 class OutOfLineCallPostWriteBarrier;
 class OutOfLineIsCallable;
 class OutOfLineRegExpExec;
 class OutOfLineRegExpTest;
 
 class CodeGenerator : public CodeGeneratorSpecific
 {
@@ -108,17 +105,16 @@ class CodeGenerator : public CodeGenerat
     void visitOutOfLineRegExpExec(OutOfLineRegExpExec *ool);
     void visitRegExpTest(LRegExpTest *lir);
     void visitOutOfLineRegExpTest(OutOfLineRegExpTest *ool);
     void visitRegExpReplace(LRegExpReplace *lir);
     void visitStringReplace(LStringReplace *lir);
     void visitLambda(LLambda *lir);
     void visitLambdaArrow(LLambdaArrow *lir);
     void visitLambdaForSingleton(LLambdaForSingleton *lir);
-    void visitLambdaPar(LLambdaPar *lir);
     void visitPointer(LPointer *lir);
     void visitSlots(LSlots *lir);
     void visitLoadSlotT(LLoadSlotT *lir);
     void visitLoadSlotV(LLoadSlotV *lir);
     void visitStoreSlotT(LStoreSlotT *lir);
     void visitStoreSlotV(LStoreSlotV *lir);
     void visitElements(LElements *lir);
     void visitConvertElementsToDoubles(LConvertElementsToDoubles *lir);
@@ -157,20 +153,17 @@ class CodeGenerator : public CodeGenerat
     void visitNewArrayDynamicLength(LNewArrayDynamicLength *lir);
     void visitNewObjectVMCall(LNewObject *lir);
     void visitNewObject(LNewObject *lir);
     void visitOutOfLineNewObject(OutOfLineNewObject *ool);
     void visitNewTypedObject(LNewTypedObject *lir);
     void visitNewDeclEnvObject(LNewDeclEnvObject *lir);
     void visitNewCallObject(LNewCallObject *lir);
     void visitNewSingletonCallObject(LNewSingletonCallObject *lir);
-    void visitNewCallObjectPar(LNewCallObjectPar *lir);
     void visitNewStringObject(LNewStringObject *lir);
-    void visitNewPar(LNewPar *lir);
-    void visitNewDenseArrayPar(LNewDenseArrayPar *lir);
     void visitNewDerivedTypedObject(LNewDerivedTypedObject *lir);
     void visitInitElem(LInitElem *lir);
     void visitInitElemGetterSetter(LInitElemGetterSetter *lir);
     void visitMutateProto(LMutateProto *lir);
     void visitInitProp(LInitProp *lir);
     void visitInitPropGetterSetter(LInitPropGetterSetter *lir);
     void visitCreateThis(LCreateThis *lir);
     void visitCreateThisWithProto(LCreateThisWithProto *lir);
@@ -226,23 +219,20 @@ class CodeGenerator : public CodeGenerat
     void visitCompareStrictS(LCompareStrictS *lir);
     void visitCompareVM(LCompareVM *lir);
     void visitIsNullOrLikeUndefined(LIsNullOrLikeUndefined *lir);
     void visitIsNullOrLikeUndefinedAndBranch(LIsNullOrLikeUndefinedAndBranch *lir);
     void visitEmulatesUndefined(LEmulatesUndefined *lir);
     void visitEmulatesUndefinedAndBranch(LEmulatesUndefinedAndBranch *lir);
     void emitConcat(LInstruction *lir, Register lhs, Register rhs, Register output);
     void visitConcat(LConcat *lir);
-    void visitConcatPar(LConcatPar *lir);
     void visitCharCodeAt(LCharCodeAt *lir);
     void visitFromCharCode(LFromCharCode *lir);
     void visitStringSplit(LStringSplit *lir);
     void visitFunctionEnvironment(LFunctionEnvironment *lir);
-    void visitForkJoinContext(LForkJoinContext *lir);
-    void visitGuardThreadExclusive(LGuardThreadExclusive *lir);
     void visitCallGetProperty(LCallGetProperty *lir);
     void visitCallGetElement(LCallGetElement *lir);
     void visitCallSetElement(LCallSetElement *lir);
     void visitCallInitElementArray(LCallInitElementArray *lir);
     void visitThrow(LThrow *lir);
     void visitTypeOfV(LTypeOfV *lir);
     void visitOutOfLineTypeOfV(OutOfLineTypeOfV *ool);
     void visitToIdV(LToIdV *lir);
@@ -286,17 +276,16 @@ class CodeGenerator : public CodeGenerat
     void visitSetFrameArgumentT(LSetFrameArgumentT *lir);
     void visitSetFrameArgumentC(LSetFrameArgumentC *lir);
     void visitSetFrameArgumentV(LSetFrameArgumentV *lir);
     void visitRunOncePrologue(LRunOncePrologue *lir);
     void emitRest(LInstruction *lir, Register array, Register numActuals,
                   Register temp0, Register temp1, unsigned numFormals,
                   JSObject *templateObject, bool saveAndRestore, Register resultreg);
     void visitRest(LRest *lir);
-    void visitRestPar(LRestPar *lir);
     void visitCallSetProperty(LCallSetProperty *ins);
     void visitCallDeleteProperty(LCallDeleteProperty *lir);
     void visitCallDeleteElement(LCallDeleteElement *lir);
     void visitBitNotV(LBitNotV *lir);
     void visitBitOpV(LBitOpV *lir);
     void emitInstanceOf(LInstruction *ins, JSObject *prototypeObject);
     void visitIn(LIn *ins);
     void visitInArray(LInArray *ins);
@@ -320,29 +309,23 @@ class CodeGenerator : public CodeGenerat
     void visitAsmJSVoidReturn(LAsmJSVoidReturn *ret);
     void visitLexicalCheck(LLexicalCheck *ins);
     void visitThrowUninitializedLexical(LThrowUninitializedLexical *ins);
     void visitDebugger(LDebugger *ins);
 
     void visitCheckOverRecursed(LCheckOverRecursed *lir);
     void visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool);
 
-    void visitCheckOverRecursedPar(LCheckOverRecursedPar *lir);
-
-    void visitInterruptCheckPar(LInterruptCheckPar *lir);
-    void visitOutOfLineInterruptCheckPar(OutOfLineInterruptCheckPar *ool);
-
     void visitInterruptCheckImplicit(LInterruptCheckImplicit *ins);
     void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit *ins);
 
     void visitUnboxFloatingPoint(LUnboxFloatingPoint *lir);
     void visitOutOfLineUnboxFloatingPoint(OutOfLineUnboxFloatingPoint *ool);
     void visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool);
 
-    void visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool);
     void loadJSScriptForBlock(MBasicBlock *block, Register reg);
     void loadOutermostJSScript(Register reg);
 
     // Inline caches visitors.
     void visitOutOfLineCache(OutOfLineUpdateCache *ool);
 
     void visitGetPropertyCacheV(LGetPropertyCacheV *ins);
     void visitGetPropertyCacheT(LGetPropertyCacheT *ins);
@@ -353,23 +336,19 @@ class CodeGenerator : public CodeGenerat
     void visitBindNameCache(LBindNameCache *ins);
     void visitCallSetProperty(LInstruction *ins);
     void visitSetPropertyCacheV(LSetPropertyCacheV *ins);
     void visitSetPropertyCacheT(LSetPropertyCacheT *ins);
     void visitGetNameCache(LGetNameCache *ins);
     void visitCallsiteCloneCache(LCallsiteCloneCache *ins);
 
     void visitGetPropertyIC(OutOfLineUpdateCache *ool, DataPtr<GetPropertyIC> &ic);
-    void visitGetPropertyParIC(OutOfLineUpdateCache *ool, DataPtr<GetPropertyParIC> &ic);
     void visitSetPropertyIC(OutOfLineUpdateCache *ool, DataPtr<SetPropertyIC> &ic);
-    void visitSetPropertyParIC(OutOfLineUpdateCache *ool, DataPtr<SetPropertyParIC> &ic);
     void visitGetElementIC(OutOfLineUpdateCache *ool, DataPtr<GetElementIC> &ic);
-    void visitGetElementParIC(OutOfLineUpdateCache *ool, DataPtr<GetElementParIC> &ic);
     void visitSetElementIC(OutOfLineUpdateCache *ool, DataPtr<SetElementIC> &ic);
-    void visitSetElementParIC(OutOfLineUpdateCache *ool, DataPtr<SetElementParIC> &ic);
     void visitBindNameIC(OutOfLineUpdateCache *ool, DataPtr<BindNameIC> &ic);
     void visitNameIC(OutOfLineUpdateCache *ool, DataPtr<NameIC> &ic);
     void visitCallsiteCloneIC(OutOfLineUpdateCache *ool, DataPtr<CallsiteCloneIC> &ic);
 
     void visitAssertRangeI(LAssertRangeI *ins);
     void visitAssertRangeD(LAssertRangeD *ins);
     void visitAssertRangeF(LAssertRangeF *ins);
     void visitAssertRangeV(LAssertRangeV *ins);
@@ -396,22 +375,16 @@ class CodeGenerator : public CodeGenerat
                              bool needsTypeBarrier, jsbytecode *profilerLeavePc);
     void addSetElementCache(LInstruction *ins, Register obj, Register unboxIndex, Register temp,
                             FloatRegister tempDouble, FloatRegister tempFloat32,
                             ValueOperand index, ConstantOrRegister value,
                             bool strict, bool guardHoles, jsbytecode *profilerLeavePc);
 
     bool generateBranchV(const ValueOperand &value, Label *ifTrue, Label *ifFalse, FloatRegister fr);
 
-    void emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
-                                Register tempReg1, Register tempReg2,
-                                NativeObject *templateObj);
-
-    void emitCallToUncompiledScriptPar(LInstruction *lir, Register calleeReg);
-
     void emitLambdaInit(Register resultReg, Register scopeChainReg,
                         const LambdaFunctionInfo &info);
 
     void emitFilterArgumentsOrEval(LInstruction *lir, Register string, Register temp1,
                                    Register temp2);
 
     IonScriptCounts *maybeCreateScriptCounts();
 
--- a/js/src/jit/CompileInfo.h
+++ b/js/src/jit/CompileInfo.h
@@ -426,20 +426,16 @@ class CompileInfo
     ExecutionMode executionMode() const {
         return executionMode_;
     }
 
     bool executionModeIsAnalysis() const {
         return executionMode_ == DefinitePropertiesAnalysis || executionMode_ == ArgumentsUsageAnalysis;
     }
 
-    bool isParallelExecution() const {
-        return executionMode_ == ParallelExecution;
-    }
-
     // Returns true if a slot can be observed out-side the current frame while
     // the frame is active on the stack.  This implies that these definitions
     // would have to be executed and that they cannot be removed even if they
     // are unused.
     bool isObservableSlot(uint32_t slot) const {
         if (isObservableFrameSlot(slot))
             return true;
 
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -28,24 +28,22 @@
 #include "jit/JitCommon.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitSpewer.h"
 #include "jit/LICM.h"
 #include "jit/LinearScan.h"
 #include "jit/LIR.h"
 #include "jit/LoopUnroller.h"
 #include "jit/Lowering.h"
-#include "jit/ParallelSafetyAnalysis.h"
 #include "jit/PerfSpewer.h"
 #include "jit/RangeAnalysis.h"
 #include "jit/ScalarReplacement.h"
 #include "jit/Sink.h"
 #include "jit/StupidAllocator.h"
 #include "jit/ValueNumbering.h"
-#include "vm/ForkJoin.h"
 #include "vm/HelperThreads.h"
 #include "vm/TraceLogging.h"
 
 #include "jscompartmentinlines.h"
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 
@@ -149,26 +147,23 @@ jit::InitializeIon()
     CheckPerf();
     return true;
 }
 
 JitRuntime::JitRuntime()
   : execAlloc_(nullptr),
     ionAlloc_(nullptr),
     exceptionTail_(nullptr),
-    exceptionTailParallel_(nullptr),
     bailoutTail_(nullptr),
     enterJIT_(nullptr),
     bailoutHandler_(nullptr),
     argumentsRectifier_(nullptr),
     argumentsRectifierReturnAddr_(nullptr),
-    parallelArgumentsRectifier_(nullptr),
     invalidator_(nullptr),
     debugTrapHandler_(nullptr),
-    forkJoinGetSliceStub_(nullptr),
     baselineDebugModeOSRHandler_(nullptr),
     functionWrappers_(nullptr),
     osrTempData_(nullptr),
     mutatingBackedgeList_(false),
     ionReturnOverride_(MagicValue(JS_ARG_POISON)),
     jitcodeGlobalTable_(nullptr)
 {
 }
@@ -205,26 +200,21 @@ JitRuntime::initialize(JSContext *cx)
 
     functionWrappers_ = cx->new_<VMWrapperMap>(cx);
     if (!functionWrappers_ || !functionWrappers_->init())
         return false;
 
     JitSpew(JitSpew_Codegen, "# Emitting exception tail stub");
 
     void *handler = JS_FUNC_TO_DATA_PTR(void *, jit::HandleException);
-    void *handlerParallel = JS_FUNC_TO_DATA_PTR(void *, jit::HandleParallelFailure);
 
     exceptionTail_ = generateExceptionTailStub(cx, handler);
     if (!exceptionTail_)
         return false;
 
-    exceptionTailParallel_ = generateExceptionTailStub(cx, handlerParallel);
-    if (!exceptionTailParallel_)
-        return false;
-
     JitSpew(JitSpew_Codegen, "# Emitting bailout tail stub");
     bailoutTail_ = generateBailoutTailStub(cx);
     if (!bailoutTail_)
         return false;
 
     if (cx->runtime()->jitSupportsFloatingPoint) {
         JitSpew(JitSpew_Codegen, "# Emitting bailout tables");
 
@@ -238,41 +228,31 @@ JitRuntime::initialize(JSContext *cx)
                 break;
             bailoutTables_.infallibleAppend((JitCode *)nullptr);
             bailoutTables_[id] = generateBailoutTable(cx, id);
             if (!bailoutTables_[id])
                 return false;
         }
 
         JitSpew(JitSpew_Codegen, "# Emitting bailout handler");
-        bailoutHandler_ = generateBailoutHandler(cx, SequentialExecution);
+        bailoutHandler_ = generateBailoutHandler(cx);
         if (!bailoutHandler_)
             return false;
 
-        JitSpew(JitSpew_Codegen, "# Emitting parallel bailout handler");
-        parallelBailoutHandler_ = generateBailoutHandler(cx, ParallelExecution);
-        if (!parallelBailoutHandler_)
-            return false;
-
         JitSpew(JitSpew_Codegen, "# Emitting invalidator");
         invalidator_ = generateInvalidator(cx);
         if (!invalidator_)
             return false;
     }
 
     JitSpew(JitSpew_Codegen, "# Emitting sequential arguments rectifier");
-    argumentsRectifier_ = generateArgumentsRectifier(cx, SequentialExecution, &argumentsRectifierReturnAddr_);
+    argumentsRectifier_ = generateArgumentsRectifier(cx, &argumentsRectifierReturnAddr_);
     if (!argumentsRectifier_)
         return false;
 
-    JitSpew(JitSpew_Codegen, "# Emitting parallel arguments rectifier");
-    parallelArgumentsRectifier_ = generateArgumentsRectifier(cx, ParallelExecution, nullptr);
-    if (!parallelArgumentsRectifier_)
-        return false;
-
     JitSpew(JitSpew_Codegen, "# Emitting EnterJIT sequence");
     enterJIT_ = generateEnterJIT(cx, EnterJitOptimized);
     if (!enterJIT_)
         return false;
 
     JitSpew(JitSpew_Codegen, "# Emitting EnterBaselineJIT sequence");
     enterBaselineJIT_ = generateEnterJIT(cx, EnterJitBaseline);
     if (!enterBaselineJIT_)
@@ -339,28 +319,16 @@ JitRuntime::debugTrapHandler(JSContext *
         // be allocated in the atoms compartment.
         AutoLockForExclusiveAccess lock(cx);
         AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
         debugTrapHandler_ = generateDebugTrapHandler(cx);
     }
     return debugTrapHandler_;
 }
 
-bool
-JitRuntime::ensureForkJoinGetSliceStubExists(JSContext *cx)
-{
-    if (!forkJoinGetSliceStub_) {
-        JitSpew(JitSpew_Codegen, "# Emitting ForkJoinGetSlice stub");
-        AutoLockForExclusiveAccess lock(cx);
-        AutoCompartment ac(cx, cx->runtime()->atomsCompartment());
-        forkJoinGetSliceStub_ = generateForkJoinGetSliceStub(cx);
-    }
-    return !!forkJoinGetSliceStub_;
-}
-
 uint8_t *
 JitRuntime::allocateOsrTempData(size_t size)
 {
     osrTempData_ = (uint8_t *)js_realloc(osrTempData_, size);
     return osrTempData_;
 }
 
 void
@@ -400,17 +368,16 @@ JitRuntime::patchIonBackedges(JSRuntime 
 }
 
 JitCompartment::JitCompartment()
   : stubCodes_(nullptr),
     baselineCallReturnAddr_(nullptr),
     baselineGetPropReturnAddr_(nullptr),
     baselineSetPropReturnAddr_(nullptr),
     stringConcatStub_(nullptr),
-    parallelStringConcatStub_(nullptr),
     regExpExecStub_(nullptr),
     regExpTestStub_(nullptr),
     activeParallelEntryScripts_(nullptr)
 {
 }
 
 JitCompartment::~JitCompartment()
 {
@@ -427,27 +394,21 @@ JitCompartment::initialize(JSContext *cx
 
     return true;
 }
 
 bool
 JitCompartment::ensureIonStubsExist(JSContext *cx)
 {
     if (!stringConcatStub_) {
-        stringConcatStub_ = generateStringConcatStub(cx, SequentialExecution);
+        stringConcatStub_ = generateStringConcatStub(cx);
         if (!stringConcatStub_)
             return false;
     }
 
-    if (!parallelStringConcatStub_) {
-        parallelStringConcatStub_ = generateStringConcatStub(cx, ParallelExecution);
-        if (!parallelStringConcatStub_)
-            return false;
-    }
-
     return true;
 }
 
 bool
 JitCompartment::notifyOfActiveParallelEntryScript(JSContext *cx, HandleScript script)
 {
     // Fast path. The isParallelEntryScript bit guarantees that the script is
     // already in the set.
@@ -485,17 +446,17 @@ jit::FinishOffThreadBuilder(JSContext *c
     // Clean the references to the pending IonBuilder, if we just finished it.
     if (builder->script()->hasIonScript() && builder->script()->pendingIonBuilder() == builder)
         builder->script()->setPendingIonBuilder(cx, nullptr);
     if (builder->isInList())
         builder->remove();
 
     // Clear the recompiling flag of the old ionScript, since we continue to
     // use the old ionScript if recompiling fails.
-    if (executionMode == SequentialExecution && builder->script()->hasIonScript())
+    if (builder->script()->hasIonScript())
         builder->script()->ionScript()->clearRecompiling();
 
     // Clean up if compilation did not succeed.
     if (CompilingOffThread(builder->script(), executionMode)) {
         SetIonScript(cx, builder->script(), executionMode,
                      builder->abortReason() == AbortReason_Disable
                      ? ION_DISABLED_SCRIPT
                      : nullptr);
@@ -645,19 +606,16 @@ JitCompartment::sweep(FreeOp *fop, JSCom
     if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::GetProp_Fallback)))
         baselineGetPropReturnAddr_ = nullptr;
     if (!stubCodes_->lookup(static_cast<uint32_t>(ICStub::SetProp_Fallback)))
         baselineSetPropReturnAddr_ = nullptr;
 
     if (stringConcatStub_ && !IsJitCodeMarked(&stringConcatStub_))
         stringConcatStub_ = nullptr;
 
-    if (parallelStringConcatStub_ && !IsJitCodeMarked(&parallelStringConcatStub_))
-        parallelStringConcatStub_ = nullptr;
-
     if (regExpExecStub_ && !IsJitCodeMarked(&regExpExecStub_))
         regExpExecStub_ = nullptr;
 
     if (regExpTestStub_ && !IsJitCodeMarked(&regExpTestStub_))
         regExpTestStub_ = nullptr;
 
     if (activeParallelEntryScripts_) {
         for (ScriptSet::Enum e(*activeParallelEntryScripts_); !e.empty(); e.popFront()) {
@@ -840,18 +798,16 @@ IonScript::IonScript()
     bailoutEntries_(0),
     osiIndexOffset_(0),
     osiIndexEntries_(0),
     snapshots_(0),
     snapshotsListSize_(0),
     snapshotsRVATableSize_(0),
     constantTable_(0),
     constantEntries_(0),
-    callTargetList_(0),
-    callTargetEntries_(0),
     backedgeList_(0),
     backedgeEntries_(0),
     invalidationCount_(0),
     parallelAge_(0),
     recompileInfo_(),
     osrPcMismatchCounter_(0),
     pendingBuilder_(nullptr)
 {
@@ -860,18 +816,17 @@ IonScript::IonScript()
 IonScript *
 IonScript::New(JSContext *cx, types::RecompileInfo recompileInfo,
                uint32_t frameSlots, uint32_t frameSize,
                size_t snapshotsListSize, size_t snapshotsRVATableSize,
                size_t recoversSize, size_t bailoutEntries,
                size_t constants, size_t safepointIndices,
                size_t osiIndices, size_t cacheEntries,
                size_t runtimeSize,  size_t safepointsSize,
-               size_t callTargetEntries, size_t backedgeEntries,
-               OptimizationLevel optimizationLevel)
+               size_t backedgeEntries, OptimizationLevel optimizationLevel)
 {
     static const int DataAlignment = sizeof(void *);
 
     if (snapshotsListSize >= MAX_BUFFER_SIZE ||
         (bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t)))
     {
         js_ReportOutOfMemory(cx);
         return nullptr;
@@ -884,28 +839,26 @@ IonScript::New(JSContext *cx, types::Rec
     size_t paddedRecoversSize = AlignBytes(recoversSize, DataAlignment);
     size_t paddedBailoutSize = AlignBytes(bailoutEntries * sizeof(uint32_t), DataAlignment);
     size_t paddedConstantsSize = AlignBytes(constants * sizeof(Value), DataAlignment);
     size_t paddedSafepointIndicesSize = AlignBytes(safepointIndices * sizeof(SafepointIndex), DataAlignment);
     size_t paddedOsiIndicesSize = AlignBytes(osiIndices * sizeof(OsiIndex), DataAlignment);
     size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(uint32_t), DataAlignment);
     size_t paddedRuntimeSize = AlignBytes(runtimeSize, DataAlignment);
     size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
-    size_t paddedCallTargetSize = AlignBytes(callTargetEntries * sizeof(JSScript *), DataAlignment);
     size_t paddedBackedgeSize = AlignBytes(backedgeEntries * sizeof(PatchableBackedge), DataAlignment);
     size_t bytes = paddedSnapshotsSize +
                    paddedRecoversSize +
                    paddedBailoutSize +
                    paddedConstantsSize +
                    paddedSafepointIndicesSize+
                    paddedOsiIndicesSize +
                    paddedCacheEntriesSize +
                    paddedRuntimeSize +
                    paddedSafepointSize +
-                   paddedCallTargetSize +
                    paddedBackedgeSize;
     IonScript *script = cx->zone()->pod_malloc_with_extra<IonScript, uint8_t>(bytes);
     if (!script)
         return nullptr;
     new (script) IonScript();
 
     uint32_t offsetCursor = sizeof(IonScript);
 
@@ -941,20 +894,16 @@ IonScript::New(JSContext *cx, types::Rec
     script->recovers_ = offsetCursor;
     script->recoversSize_ = recoversSize;
     offsetCursor += paddedRecoversSize;
 
     script->constantTable_ = offsetCursor;
     script->constantEntries_ = constants;
     offsetCursor += paddedConstantsSize;
 
-    script->callTargetList_ = offsetCursor;
-    script->callTargetEntries_ = callTargetEntries;
-    offsetCursor += paddedCallTargetSize;
-
     script->backedgeList_ = offsetCursor;
     script->backedgeEntries_ = backedgeEntries;
     offsetCursor += paddedBackedgeSize;
 
     script->frameSlots_ = frameSlots;
     script->frameSize_ = frameSize;
 
     script->recompileInfo_ = recompileInfo;
@@ -969,21 +918,16 @@ IonScript::trace(JSTracer *trc)
     if (method_)
         MarkJitCode(trc, &method_, "method");
 
     if (deoptTable_)
         MarkJitCode(trc, &deoptTable_, "deoptimizationTable");
 
     for (size_t i = 0; i < numConstants(); i++)
         gc::MarkValue(trc, &getConstant(i), "constant");
-
-    // No write barrier is needed for the call target list, as it's attached
-    // at compilation time and is read only.
-    for (size_t i = 0; i < callTargetEntries(); i++)
-        gc::MarkScriptUnbarriered(trc, &callTargetList()[i], "callTarget");
 }
 
 /* static */ void
 IonScript::writeBarrierPre(Zone *zone, IonScript *ionScript)
 {
     if (zone->needsIncrementalBarrier())
         ionScript->trace(zone->barrierTracer());
 }
@@ -1024,23 +968,16 @@ IonScript::copyBailoutTable(const Snapsh
 void
 IonScript::copyConstants(const Value *vp)
 {
     for (size_t i = 0; i < constantEntries_; i++)
         constants()[i].init(vp[i]);
 }
 
 void
-IonScript::copyCallTargetEntries(JSScript **callTargets)
-{
-    for (size_t i = 0; i < callTargetEntries_; i++)
-        callTargetList()[i] = callTargets[i];
-}
-
-void
 IonScript::copyPatchableBackedges(JSContext *cx, JitCode *code,
                                   PatchableBackedgeInfo *backedges,
                                   MacroAssembler &masm)
 {
     JitRuntime *jrt = cx->runtime()->jitRuntime();
     JitRuntime::AutoMutateBackedges amb(jrt);
 
     for (size_t i = 0; i < backedgeEntries_; i++) {
@@ -1375,31 +1312,16 @@ OptimizeMIR(MIRGenerator *mir)
             return false;
         IonSpewPass("Apply types");
         AssertExtendedGraphCoherency(graph);
 
         if (mir->shouldCancel("Apply types"))
             return false;
     }
 
-    // Parallel Safety Analysis. Note that this may delete blocks containing
-    // instructions pointed to by the dependency() field of instructions which
-    // are not deleted, leaving them dangling. This is ok, since we'll rerun
-    // AliasAnalysis, which recomputes them, before they're needed.
-    if (graph.entryBlock()->info().executionMode() == ParallelExecution) {
-        AutoTraceLog log(logger, TraceLogger_ParallelSafetyAnalysis);
-        ParallelSafetyAnalysis analysis(mir, graph);
-        if (!analysis.analyze())
-            return false;
-        IonSpewPass("Parallel Safety Analysis");
-        AssertExtendedGraphCoherency(graph);
-        if (mir->shouldCancel("Parallel Safety Analysis"))
-            return false;
-    }
-
     ValueNumberer gvn(mir, graph);
     if (!gvn.init())
         return false;
 
     // Alias analysis is required for LICM and GVN so that we don't move
     // loads across stores.
     if (mir->optimizationInfo().licmEnabled() ||
         mir->optimizationInfo().gvnEnabled())
@@ -1762,19 +1684,17 @@ AttachFinishedCompilations(JSContext *cx
                 break;
             }
         }
         if (!builder)
             break;
 
         // Try to defer linking if the script is on the stack, to postpone
         // invalidating them.
-        if (builder->info().executionMode() == SequentialExecution &&
-            builder->script()->hasIonScript())
-        {
+        if (builder->script()->hasIonScript()) {
             bool onStack = false;
             for (JitActivationIterator iter(cx->runtime()); !iter.done(); ++iter) {
                 for (JitFrameIterator it(iter); !it.done(); ++it) {
                     if (!it.isIonJS())
                         continue;
                     if (it.checkInvalidation())
                         continue;
 
@@ -1870,18 +1790,17 @@ TrackPropertiesForSingletonScopes(JSCont
         if (scope->is<CallObject>() && scope->hasSingletonType())
             TrackAllProperties(cx, scope);
     }
 }
 
 static AbortReason
 IonCompile(JSContext *cx, JSScript *script,
            BaselineFrame *baselineFrame, jsbytecode *osrPc, bool constructing,
-           ExecutionMode executionMode, bool recompile,
-           OptimizationLevel optimizationLevel)
+           bool recompile, OptimizationLevel optimizationLevel)
 {
     TraceLoggerThread *logger = TraceLoggerForMainThread(cx->runtime());
     TraceLoggerEvent event(logger, TraceLogger_AnnotateScripts, script);
     AutoTraceLog logScript(logger, event);
     AutoTraceLog logCompile(logger, TraceLogger_IonCompilation);
 
     MOZ_ASSERT(optimizationLevel > Optimization_DontCompile);
 
@@ -1906,33 +1825,26 @@ IonCompile(JSContext *cx, JSScript *scri
     types::AutoEnterAnalysis enter(cx);
 
     if (!cx->compartment()->ensureJitCompartmentExists(cx))
         return AbortReason_Alloc;
 
     if (!cx->compartment()->jitCompartment()->ensureIonStubsExist(cx))
         return AbortReason_Alloc;
 
-    if (executionMode == ParallelExecution &&
-        LIRGenerator::allowInlineForkJoinGetSlice() &&
-        !cx->runtime()->jitRuntime()->ensureForkJoinGetSliceStubExists(cx))
-    {
-        return AbortReason_Alloc;
-    }
-
     MIRGraph *graph = alloc->new_<MIRGraph>(temp);
     if (!graph)
         return AbortReason_Alloc;
 
     InlineScriptTree *inlineScriptTree = InlineScriptTree::New(temp, nullptr, nullptr, script);
     if (!inlineScriptTree)
         return AbortReason_Alloc;
 
     CompileInfo *info = alloc->new_<CompileInfo>(script, script->functionNonDelazifying(), osrPc,
-                                                 constructing, executionMode,
+                                                 constructing, SequentialExecution,
                                                  script->needsArgsObj(), inlineScriptTree);
     if (!info)
         return AbortReason_Alloc;
 
     BaselineInspector *inspector = alloc->new_<BaselineInspector>(script);
     if (!inspector)
         return AbortReason_Alloc;
 
@@ -1953,25 +1865,23 @@ IonCompile(JSContext *cx, JSScript *scri
     IonBuilder *builder = alloc->new_<IonBuilder>((JSContext *) nullptr,
                                                   CompileCompartment::get(cx->compartment()),
                                                   options, temp, graph, constraints,
                                                   inspector, info, optimizationInfo,
                                                   baselineFrameInspector);
     if (!builder)
         return AbortReason_Alloc;
 
-    MOZ_ASSERT(recompile == HasIonScript(builder->script(), executionMode));
-    MOZ_ASSERT(CanIonCompile(builder->script(), executionMode));
+    MOZ_ASSERT(recompile == builder->script()->hasIonScript());
+    MOZ_ASSERT(CanIonCompile(builder->script(), SequentialExecution));
 
     RootedScript builderScript(cx, builder->script());
 
-    if (recompile) {
-        MOZ_ASSERT(executionMode == SequentialExecution);
+    if (recompile)
         builderScript->ionScript()->setRecompiling();
-    }
 
 #ifdef DEBUG
     IonSpewFunction ionSpewFunction(graph, builderScript);
 #endif
 
     bool succeeded = builder->build();
     builder->clearForBackEnd();
 
@@ -1988,17 +1898,17 @@ IonCompile(JSContext *cx, JSScript *scri
             }
         }
         return reason;
     }
 
     // If possible, compile the script off thread.
     if (OffThreadCompilationAvailable(cx)) {
         if (!recompile)
-            SetIonScript(cx, builderScript, executionMode, ION_COMPILING_SCRIPT);
+            builderScript->setIonScript(cx, ION_COMPILING_SCRIPT);
 
         JitSpew(JitSpew_IonLogs, "Can't log script %s:%d. (Compiled on background thread.)",
                 builderScript->filename(), builderScript->lineno());
 
         if (!StartOffThreadIonCompile(cx, builder)) {
             JitSpew(JitSpew_IonAbort, "Unable to start off-thread ion compilation.");
             return AbortReason_Alloc;
         }
@@ -2100,35 +2010,28 @@ CanIonCompileScript(JSContext *cx, JSScr
 {
     if (!script->canIonCompile() || !CheckScript(cx, script, osr))
         return false;
 
     return CheckScriptSize(cx, script) == Method_Compiled;
 }
 
 static OptimizationLevel
-GetOptimizationLevel(HandleScript script, jsbytecode *pc, ExecutionMode executionMode)
+GetOptimizationLevel(HandleScript script, jsbytecode *pc)
 {
-    if (executionMode == ParallelExecution)
-        return Optimization_Normal;
-
-    MOZ_ASSERT(executionMode == SequentialExecution);
-
     return js_IonOptimizations.levelForScript(script, pc);
 }
 
 static MethodStatus
 Compile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc,
-        bool constructing, ExecutionMode executionMode, bool forceRecompile = false)
+        bool constructing, bool forceRecompile = false)
 {
     MOZ_ASSERT(jit::IsIonEnabled(cx));
     MOZ_ASSERT(jit::IsBaselineEnabled(cx));
     MOZ_ASSERT_IF(osrPc != nullptr, LoopEntryCanIonOsr(osrPc));
-    MOZ_ASSERT_IF(executionMode == ParallelExecution, !osrFrame && !osrPc);
-    MOZ_ASSERT_IF(executionMode == ParallelExecution, !HasIonScript(script, executionMode));
 
     if (!script->hasBaselineScript())
         return Method_Skipped;
 
     if (script->isDebuggee() || (osrFrame && osrFrame->isDebuggee())) {
         JitSpew(JitSpew_IonAbort, "debugging");
         return Method_Skipped;
     }
@@ -2140,21 +2043,21 @@ Compile(JSContext *cx, HandleScript scri
 
     MethodStatus status = CheckScriptSize(cx, script);
     if (status != Method_Compiled) {
         JitSpew(JitSpew_IonAbort, "Aborted compilation of %s:%d", script->filename(), script->lineno());
         return status;
     }
 
     bool recompile = false;
-    OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc, executionMode);
+    OptimizationLevel optimizationLevel = GetOptimizationLevel(script, osrPc);
     if (optimizationLevel == Optimization_DontCompile)
         return Method_Skipped;
 
-    IonScript *scriptIon = GetIonScript(script, executionMode);
+    IonScript *scriptIon = script->maybeIonScript();
     if (scriptIon) {
         if (!scriptIon->method())
             return Method_CantCompile;
 
         // Don't recompile/overwrite higher optimized code,
         // with a lower optimization level.
         if (optimizationLevel <= scriptIon->optimizationLevel() && !forceRecompile)
             return Method_Compiled;
@@ -2164,31 +2067,31 @@ Compile(JSContext *cx, HandleScript scri
             return Method_Compiled;
 
         if (osrPc)
             scriptIon->resetOsrPcMismatchCounter();
 
         recompile = true;
     }
 
-    AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, constructing, executionMode,
+    AbortReason reason = IonCompile(cx, script, osrFrame, osrPc, constructing,
                                     recompile, optimizationLevel);
     if (reason == AbortReason_Error)
         return Method_Error;
 
     if (reason == AbortReason_Disable)
         return Method_CantCompile;
 
     if (reason == AbortReason_Alloc) {
         js_ReportOutOfMemory(cx);
         return Method_Error;
     }
 
     // Compilation succeeded or we invalidated right away or an inlining/alloc abort
-    if (HasIonScript(script, executionMode))
+    if (script->hasIonScript())
         return Method_Compiled;
     return Method_Skipped;
 }
 
 } // namespace jit
 } // namespace js
 
 // Decide if a transition from interpreter execution to Ion code should occur.
@@ -2234,17 +2137,17 @@ jit::CanEnterAtBranch(JSContext *cx, JSS
 
     // Attempt compilation.
     // - Returns Method_Compiled if the right ionscript is present
     //   (Meaning it was present or a sequantial compile finished)
     // - Returns Method_Skipped if pc doesn't match
     //   (This means a background thread compilation with that pc could have started or not.)
     RootedScript rscript(cx, script);
     MethodStatus status = Compile(cx, rscript, osrFrame, pc, osrFrame->isConstructing(),
-                                  SequentialExecution, force);
+                                  force);
     if (status != Method_Compiled) {
         if (status == Method_CantCompile)
             ForbidCompilation(cx, script);
         return status;
     }
 
     // Return the compilation was skipped when the osr pc wasn't adjusted.
     // This can happen when there was still an IonScript available and a
@@ -2304,18 +2207,17 @@ jit::CanEnter(JSContext *cx, RunState &s
     if (js_JitOptions.eagerCompilation && !rscript->hasBaselineScript()) {
         MethodStatus status = CanEnterBaselineMethod(cx, state);
         if (status != Method_Compiled)
             return status;
     }
 
     // Attempt compilation. Returns Method_Compiled if already compiled.
     bool constructing = state.isInvoke() && state.asInvoke()->constructing();
-    MethodStatus status =
-        Compile(cx, rscript, nullptr, nullptr, constructing, SequentialExecution);
+    MethodStatus status = Compile(cx, rscript, nullptr, nullptr, constructing);
     if (status != Method_Compiled) {
         if (status == Method_CantCompile)
             ForbidCompilation(cx, rscript);
         return status;
     }
 
     return Method_Compiled;
 }
@@ -2331,18 +2233,17 @@ jit::CompileFunctionForBaseline(JSContex
 
     // Mark as forbidden if frame can't be handled.
     if (!CheckFrame(frame)) {
         ForbidCompilation(cx, script);
         return Method_CantCompile;
     }
 
     // Attempt compilation. Returns Method_Compiled if already compiled.
-    MethodStatus status =
-        Compile(cx, script, frame, nullptr, frame->isConstructing(), SequentialExecution);
+    MethodStatus status = Compile(cx, script, frame, nullptr, frame->isConstructing());
     if (status != Method_Compiled) {
         if (status == Method_CantCompile)
             ForbidCompilation(cx, script);
         return status;
     }
 
     return Method_Compiled;
 }
@@ -2350,71 +2251,27 @@ jit::CompileFunctionForBaseline(JSContex
 MethodStatus
 jit::Recompile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc,
                bool constructing, bool force)
 {
     MOZ_ASSERT(script->hasIonScript());
     if (script->ionScript()->isRecompiling())
         return Method_Compiled;
 
-    MethodStatus status =
-        Compile(cx, script, osrFrame, osrPc, constructing, SequentialExecution, force);
+    MethodStatus status = Compile(cx, script, osrFrame, osrPc, constructing, force);
     if (status != Method_Compiled) {
         if (status == Method_CantCompile)
             ForbidCompilation(cx, script);
         return status;
     }
 
     return Method_Compiled;
 }
 
 MethodStatus
-jit::CanEnterInParallel(JSContext *cx, HandleScript script)
-{
-    // Skip if the script has been disabled.
-    //
-    // Note: We return Method_Skipped in this case because the other
-    // CanEnter() methods do so. However, ForkJoin.cpp detects this
-    // condition differently treats it more like an error.
-    if (!script->canParallelIonCompile())
-        return Method_Skipped;
-
-    // Skip if the script is being compiled off thread.
-    if (script->isParallelIonCompilingOffThread())
-        return Method_Skipped;
-
-    MethodStatus status = Compile(cx, script, nullptr, nullptr, false, ParallelExecution);
-    if (status != Method_Compiled) {
-        if (status == Method_CantCompile)
-            ForbidCompilation(cx, script, ParallelExecution);
-        return status;
-    }
-
-    // This can GC, so afterward, script->parallelIon is
-    // not guaranteed to be valid.
-    if (!cx->runtime()->jitRuntime()->enterIon())
-        return Method_Error;
-
-    // Subtle: it is possible for GC to occur during
-    // compilation of one of the invoked functions, which
-    // would cause the earlier functions (such as the
-    // kernel itself) to be collected.  In this event, we
-    // give up and fallback to sequential for now.
-    if (!script->hasParallelIonScript()) {
-        parallel::Spew(
-            parallel::SpewCompile,
-            "Script %p:%s:%u was garbage-collected or invalidated",
-            script.get(), script->filename(), script->lineno());
-        return Method_Skipped;
-    }
-
-    return Method_Compiled;
-}
-
-MethodStatus
 jit::CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs)
 {
     MOZ_ASSERT(jit::IsIonEnabled(cx));
 
     // Skip if the code is expected to result in a bailout.
     if (!script->hasIonScript() || script->ionScript()->bailoutExpected())
         return Method_Skipped;
 
@@ -2800,27 +2657,19 @@ jit::Invalidate(types::TypeZone &types, 
             continue;
 
         SetIonScript(nullptr, script, executionMode, nullptr);
         ionScript->decrementInvalidationCount(fop);
         co->invalidate();
         numInvalidations--;
 
         // Wait for the scripts to get warm again before doing another
-        // compile, unless either:
-        // (1) we are recompiling *because* a script got hot;
-        //     (resetUses is false); or,
-        // (2) we are invalidating a parallel script.  This is because
-        //     the warmUpCounter only applies to sequential uses.  Parallel
-        //     execution *requires* ion, and so we don't limit it to
-        //     methods with a high usage count (though we do check that
-        //     the warmUpCount is at least 1 when compiling the transitive
-        //     closure of potential callees, to avoid compiling things
-        //     that are never run at all).
-        if (resetUses && executionMode != ParallelExecution)
+        // compile, unless we are recompiling *because* a script got hot
+        // (resetUses is false).
+        if (resetUses)
             script->resetWarmUpCounter();
     }
 
     // Make sure we didn't leak references by invalidating the same IonScript
     // multiple times in the above loop.
     MOZ_ASSERT(!numInvalidations);
 }
 
@@ -2839,18 +2688,17 @@ jit::IonScript::invalidate(JSContext *cx
     types::RecompileInfoVector list;
     if (!list.append(recompileInfo()))
         return false;
     Invalidate(cx, list, resetUses, true);
     return true;
 }
 
 bool
-jit::Invalidate(JSContext *cx, JSScript *script, ExecutionMode mode, bool resetUses,
-                bool cancelOffThread)
+jit::Invalidate(JSContext *cx, JSScript *script, bool resetUses, bool cancelOffThread)
 {
     MOZ_ASSERT(script->hasIonScript());
 
     if (cx->runtime()->spsProfiler.enabled()) {
         // Register invalidation with profiler.
         // Format of event payload string:
         //      "<filename>:<lineno>"
 
@@ -2866,135 +2714,70 @@ jit::Invalidate(JSContext *cx, JSScript 
 
         // Construct the descriptive string.
         JS_snprintf(buf, len, "Invalidate %s:%u", filename, (unsigned int)script->lineno());
         cx->runtime()->spsProfiler.markEvent(buf);
         js_free(buf);
     }
 
     types::RecompileInfoVector scripts;
-
-    switch (mode) {
-      case SequentialExecution:
-        MOZ_ASSERT(script->hasIonScript());
-        if (!scripts.append(script->ionScript()->recompileInfo()))
-            return false;
-        break;
-      case ParallelExecution:
-        MOZ_ASSERT(script->hasParallelIonScript());
-        if (!scripts.append(script->parallelIonScript()->recompileInfo()))
-            return false;
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    MOZ_ASSERT(script->hasIonScript());
+    if (!scripts.append(script->ionScript()->recompileInfo()))
+        return false;
 
     Invalidate(cx, scripts, resetUses, cancelOffThread);
     return true;
 }
 
-bool
-jit::Invalidate(JSContext *cx, JSScript *script, bool resetUses, bool cancelOffThread)
-{
-    return Invalidate(cx, script, SequentialExecution, resetUses, cancelOffThread);
-}
-
 static void
 FinishInvalidationOf(FreeOp *fop, JSScript *script, IonScript *ionScript)
 {
     types::TypeZone &types = script->zone()->types;
 
     // Note: If the script is about to be swept, the compiler output may have
     // already been destroyed.
     if (types::CompilerOutput *output = ionScript->recompileInfo().compilerOutput(types))
         output->invalidate();
 
     // If this script has Ion code on the stack, invalidated() will return
     // true. In this case we have to wait until destroying it.
     if (!ionScript->invalidated())
         jit::IonScript::Destroy(fop, ionScript);
 }
 
-template <ExecutionMode mode>
 void
 jit::FinishInvalidation(FreeOp *fop, JSScript *script)
 {
-    // In all cases, nullptr out script->ion or script->parallelIon to avoid
-    // re-entry.
-    switch (mode) {
-      case SequentialExecution:
-        if (script->hasIonScript()) {
-            IonScript *ion = script->ionScript();
-            script->setIonScript(nullptr, nullptr);
-            FinishInvalidationOf(fop, script, ion);
-        }
-        return;
-
-      case ParallelExecution:
-        if (script->hasParallelIonScript()) {
-            IonScript *parallelIon = script->parallelIonScript();
-            script->setParallelIonScript(nullptr);
-            FinishInvalidationOf(fop, script, parallelIon);
-        }
-        return;
-
-      default:
-        MOZ_CRASH("bad execution mode");
+    // In all cases, nullptr out script->ion to avoid re-entry.
+    if (script->hasIonScript()) {
+        IonScript *ion = script->ionScript();
+        script->setIonScript(nullptr, nullptr);
+        FinishInvalidationOf(fop, script, ion);
     }
 }
 
-template void
-jit::FinishInvalidation<SequentialExecution>(FreeOp *fop, JSScript *script);
-
-template void
-jit::FinishInvalidation<ParallelExecution>(FreeOp *fop, JSScript *script);
-
 void
 jit::ForbidCompilation(JSContext *cx, JSScript *script)
 {
-    ForbidCompilation(cx, script, SequentialExecution);
-}
-
-void
-jit::ForbidCompilation(JSContext *cx, JSScript *script, ExecutionMode mode)
-{
-    JitSpew(JitSpew_IonAbort, "Disabling Ion mode %d compilation of script %s:%d",
-            mode, script->filename(), script->lineno());
+    JitSpew(JitSpew_IonAbort, "Disabling Ion compilation of script %s:%d",
+            script->filename(), script->lineno());
 
     CancelOffThreadIonCompile(cx->compartment(), script);
 
-    switch (mode) {
-      case SequentialExecution:
-        if (script->hasIonScript()) {
-            // It is only safe to modify script->ion if the script is not currently
-            // running, because JitFrameIterator needs to tell what ionScript to
-            // use (either the one on the JSScript, or the one hidden in the
-            // breadcrumbs Invalidation() leaves). Therefore, if invalidation
-            // fails, we cannot disable the script.
-            if (!Invalidate(cx, script, mode, false))
-                return;
-        }
-
-        script->setIonScript(cx, ION_DISABLED_SCRIPT);
-        return;
-
-      case ParallelExecution:
-        if (script->hasParallelIonScript()) {
-            if (!Invalidate(cx, script, mode, false))
-                return;
-        }
-
-        script->setParallelIonScript(ION_DISABLED_SCRIPT);
-        return;
-
-      default:
-        MOZ_CRASH("No such execution mode");
+    if (script->hasIonScript()) {
+        // It is only safe to modify script->ion if the script is not currently
+        // running, because JitFrameIterator needs to tell what ionScript to
+        // use (either the one on the JSScript, or the one hidden in the
+        // breadcrumbs Invalidation() leaves). Therefore, if invalidation
+        // fails, we cannot disable the script.
+        if (!Invalidate(cx, script, false))
+            return;
     }
 
-    MOZ_CRASH("No such execution mode");
+    script->setIonScript(cx, ION_DISABLED_SCRIPT);
 }
 
 AutoFlushICache *
 PerThreadData::autoFlushICache() const
 {
     return autoFlushICache_;
 }
 
@@ -3134,57 +2917,45 @@ AutoFlushICache::~AutoFlushICache()
 #endif
 }
 
 void
 jit::PurgeCaches(JSScript *script)
 {
     if (script->hasIonScript())
         script->ionScript()->purgeCaches();
-
-    if (script->hasParallelIonScript())
-        script->parallelIonScript()->purgeCaches();
 }
 
 size_t
 jit::SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf)
 {
     size_t result = 0;
 
     if (script->hasIonScript())
         result += script->ionScript()->sizeOfIncludingThis(mallocSizeOf);
 
-    if (script->hasParallelIonScript())
-        result += script->parallelIonScript()->sizeOfIncludingThis(mallocSizeOf);
-
     return result;
 }
 
 void
 jit::DestroyJitScripts(FreeOp *fop, JSScript *script)
 {
     if (script->hasIonScript())
         jit::IonScript::Destroy(fop, script->ionScript());
 
-    if (script->hasParallelIonScript())
-        jit::IonScript::Destroy(fop, script->parallelIonScript());
-
     if (script->hasBaselineScript())
         jit::BaselineScript::Destroy(fop, script->baselineScript());
 }
 
 void
 jit::TraceJitScripts(JSTracer* trc, JSScript *script)
 {
     if (script->hasIonScript())
         jit::IonScript::Trace(trc, script->ionScript());
 
-    if (script->hasParallelIonScript())
-        jit::IonScript::Trace(trc, script->parallelIonScript());
-
     if (script->hasBaselineScript())
         jit::BaselineScript::Trace(trc, script->baselineScript());
 }
 
 bool
 jit::JitSupportsFloatingPoint()
 {
     return js::jit::MacroAssembler::SupportsFloatingPoint();
--- a/js/src/jit/Ion.h
+++ b/js/src/jit/Ion.h
@@ -83,18 +83,16 @@ void SetJitContext(JitContext *ctx);
 bool CanIonCompileScript(JSContext *cx, JSScript *script, bool osr);
 
 MethodStatus CanEnterAtBranch(JSContext *cx, JSScript *script,
                               BaselineFrame *frame, jsbytecode *pc);
 MethodStatus CanEnter(JSContext *cx, RunState &state);
 MethodStatus CompileFunctionForBaseline(JSContext *cx, HandleScript script, BaselineFrame *frame);
 MethodStatus CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs);
 
-MethodStatus CanEnterInParallel(JSContext *cx, HandleScript script);
-
 MethodStatus
 Recompile(JSContext *cx, HandleScript script, BaselineFrame *osrFrame, jsbytecode *osrPc,
           bool constructing, bool force);
 
 enum JitExecStatus
 {
     // The method call had to be aborted due to a stack limit check. This
     // error indicates that Ion never attempted to clean up frames.
@@ -124,18 +122,16 @@ JitExecStatus IonCannon(JSContext *cx, R
 JitExecStatus FastInvoke(JSContext *cx, HandleFunction fun, CallArgs &args);
 
 // Walk the stack and invalidate active Ion frames for the invalid scripts.
 void Invalidate(types::TypeZone &types, FreeOp *fop,
                 const types::RecompileInfoVector &invalid, bool resetUses = true,
                 bool cancelOffThread = true);
 void Invalidate(JSContext *cx, const types::RecompileInfoVector &invalid, bool resetUses = true,
                 bool cancelOffThread = true);
-bool Invalidate(JSContext *cx, JSScript *script, ExecutionMode mode, bool resetUses = true,
-                bool cancelOffThread = true);
 bool Invalidate(JSContext *cx, JSScript *script, bool resetUses = true,
                 bool cancelOffThread = true);
 
 void ToggleBarriers(JS::Zone *zone, bool needs);
 
 class IonBuilder;
 class MIRGenerator;
 class LIRGraph;
@@ -189,17 +185,16 @@ NumLocalsAndArgs(JSScript *script)
 {
     size_t num = 1 /* this */ + script->nfixed();
     if (JSFunction *fun = script->functionNonDelazifying())
         num += fun->nargs();
     return num;
 }
 
 void ForbidCompilation(JSContext *cx, JSScript *script);
-void ForbidCompilation(JSContext *cx, JSScript *script, ExecutionMode mode);
 
 void PurgeCaches(JSScript *script);
 size_t SizeOfIonData(JSScript *script, mozilla::MallocSizeOf mallocSizeOf);
 void DestroyJitScripts(FreeOp *fop, JSScript *script);
 void TraceJitScripts(JSTracer* trc, JSScript *script);
 
 bool JitSupportsFloatingPoint();
 bool JitSupportsSimd();
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -2060,17 +2060,16 @@ IsResumableMIRType(MIRType type)
       case MIRType_MagicIsConstructing:
       case MIRType_ObjectOrNull:
       case MIRType_None:
       case MIRType_Slots:
       case MIRType_Elements:
       case MIRType_Pointer:
       case MIRType_Shape:
       case MIRType_TypeObject:
-      case MIRType_ForkJoinContext:
       case MIRType_Float32x4:
       case MIRType_Int32x4:
       case MIRType_Doublex2:
         return false;
     }
     MOZ_CRASH("Unknown MIRType.");
 }
 
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -6257,21 +6257,16 @@ IonBuilder::jsop_initprop(PropertyName *
 
     bool needsBarrier = true;
     if (obj->resultTypeSet() &&
         !obj->resultTypeSet()->propertyNeedsBarrier(constraints(), NameToId(name)))
     {
         needsBarrier = false;
     }
 
-    // In parallel execution, we never require write barriers.  See
-    // forkjoin.cpp for more information.
-    if (info().executionMode() == ParallelExecution)
-        needsBarrier = false;
-
     if (templateObject->isFixedSlot(shape->slot())) {
         MStoreFixedSlot *store = MStoreFixedSlot::New(alloc(), obj, shape->slot(), value);
         if (needsBarrier)
             store->setNeedsBarrier();
 
         current->add(store);
         return resumeAfter(store);
     }
@@ -7143,17 +7138,17 @@ jit::TypeSetIncludes(types::TypeSet *typ
 }
 
 // Whether a write of the given value may need a post-write barrier for GC purposes.
 bool
 jit::NeedsPostBarrier(CompileInfo &info, MDefinition *value)
 {
     if (!GetJitContext()->runtime->gcNursery().exists())
         return false;
-    return info.executionMode() != ParallelExecution && value->mightBeType(MIRType_Object);
+    return value->mightBeType(MIRType_Object);
 }
 
 bool
 IonBuilder::setStaticName(JSObject *staticObject, PropertyName *name)
 {
     jsid id = NameToId(name);
 
     MOZ_ASSERT(staticObject->is<GlobalObject>() || staticObject->is<CallObject>());
@@ -7952,20 +7947,16 @@ IonBuilder::getElemTryCache(bool *emitte
     BarrierKind barrier = PropertyReadNeedsTypeBarrier(analysisContext, constraints(), obj,
                                                        nullptr, types);
 
     // Always add a barrier if the index might be a string or symbol, so that
     // the cache can attach stubs for particular properties.
     if (index->mightBeType(MIRType_String) || index->mightBeType(MIRType_Symbol))
         barrier = BarrierKind::TypeSet;
 
-    // See note about always needing a barrier in jsop_getprop.
-    if (needsToMonitorMissingProperties(types))
-        barrier = BarrierKind::TypeSet;
-
     MInstruction *ins = MGetElementCache::New(alloc(), obj, index, barrier == BarrierKind::TypeSet);
 
     current->add(ins);
     current->push(ins);
 
     if (!resumeAfter(ins))
         return false;
 
@@ -8025,24 +8016,18 @@ IonBuilder::jsop_getelem_dense(MDefiniti
     // Note: to help GVN, use the original MElements instruction and not
     // MConvertElementsToDoubles as operand. This is fine because converting
     // elements to double does not change the initialized length.
     MInitializedLength *initLength = MInitializedLength::New(alloc(), elements);
     current->add(initLength);
 
     // If we can load the element as a definite double, make sure to check that
     // the array has been converted to homogenous doubles first.
-    //
-    // NB: We disable this optimization in parallel execution mode
-    // because it is inherently not threadsafe (how do you convert the
-    // array atomically when there might be concurrent readers)?
     types::TemporaryTypeSet *objTypes = obj->resultTypeSet();
-    ExecutionMode executionMode = info().executionMode();
     bool loadDouble =
-        executionMode == SequentialExecution &&
         barrier == BarrierKind::NoBarrier &&
         loopDepth_ &&
         !readOutOfBounds &&
         !needsHoleCheck &&
         knownType == MIRType_Double &&
         objTypes &&
         objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles;
     if (loadDouble)
@@ -8067,47 +8052,16 @@ IonBuilder::jsop_getelem_dense(MDefiniti
         current->add(load);
 
         // If maybeUndefined was true, the typeset must have undefined, and
         // then either additional types or a barrier. This means we should
         // never have a typed version of LoadElementHole.
         MOZ_ASSERT(knownType == MIRType_Value);
     }
 
-    // If the array is being converted to doubles, but we've observed
-    // just int, substitute a type set of int+double into the observed
-    // type set. The reason for this is that, in the
-    // interpreter+baseline, such arrays may consist of mixed
-    // ints/doubles, but when we enter ion code, we will be coercing
-    // all inputs to doubles. Therefore, the type barrier checking for
-    // just int is highly likely (*almost* guaranteed) to fail sooner
-    // or later. Essentially, by eagerly coercing to double, ion is
-    // making the observed types outdated. To compensate for this, we
-    // substitute a broader observed type set consisting of both ints
-    // and doubles. There is perhaps a tradeoff here, so we limit this
-    // optimization to parallel code, where it is needed to prevent
-    // perpetual bailouts in some extreme cases. (Bug 977853)
-    //
-    // NB: we have not added a MConvertElementsToDoubles MIR, so we
-    // cannot *assume* the result is a double.
-    if (executionMode == ParallelExecution &&
-        barrier != BarrierKind::NoBarrier &&
-        types->getKnownMIRType() == MIRType_Int32 &&
-        objTypes &&
-        objTypes->convertDoubleElements(constraints()) == types::TemporaryTypeSet::AlwaysConvertToDoubles)
-    {
-        // Note: double implies int32 as well for typesets
-        LifoAlloc *lifoAlloc = alloc().lifoAlloc();
-        types = lifoAlloc->new_<types::TemporaryTypeSet>(lifoAlloc, types::Type::DoubleType());
-        if (!types)
-            return false;
-
-        barrier = BarrierKind::NoBarrier; // Don't need a barrier anymore
-    }
-
     if (knownType != MIRType_Value)
         load->setResultType(knownType);
 
     current->push(load);
     return pushTypeBarrier(load, types, barrier);
 }
 
 void
@@ -10020,42 +9974,32 @@ IonBuilder::getPropTryCache(bool *emitte
             return true;
     }
 
     // Since getters have no guaranteed return values, we must barrier in order to be
     // able to attach stubs for them.
     if (inspector->hasSeenAccessedGetter(pc))
         barrier = BarrierKind::TypeSet;
 
-    if (needsToMonitorMissingProperties(types))
-        barrier = BarrierKind::TypeSet;
-
     // Caches can read values from prototypes, so update the barrier to
     // reflect such possible values.
     if (barrier != BarrierKind::TypeSet) {
         BarrierKind protoBarrier =
             PropertyReadOnPrototypeNeedsTypeBarrier(constraints(), obj, name, types);
         if (protoBarrier != BarrierKind::NoBarrier) {
             MOZ_ASSERT(barrier <= protoBarrier);
             barrier = protoBarrier;
         }
     }
 
     MGetPropertyCache *load = MGetPropertyCache::New(alloc(), obj, name,
                                                      barrier == BarrierKind::TypeSet);
 
     // Try to mark the cache as idempotent.
-    //
-    // In parallel execution, idempotency of caches is ignored, since we
-    // repeat the entire ForkJoin workload if we bail out. Note that it's
-    // overly restrictive to mark everything as idempotent, because we can
-    // treat non-idempotent caches in parallel as repeatable.
-    if (obj->type() == MIRType_Object && !invalidatedIdempotentCache() &&
-        info().executionMode() != ParallelExecution)
-    {
+    if (obj->type() == MIRType_Object && !invalidatedIdempotentCache()) {
         if (PropertyReadIsIdempotent(constraints(), obj, name))
             load->setIdempotent();
     }
 
     // When we are in the context of making a call from the value returned from
     // a property, we query the typeObject for the given property name to fill
     // the InlinePropertyTable of the GetPropertyCache.  This information is
     // then used in inlineCallsite and inlineCalls, if the "this" definition is
@@ -10158,27 +10102,16 @@ IonBuilder::getPropTryInnerize(bool *emi
     if (!getPropTryCache(emitted, inner, name, barrier, types) || *emitted)
         return *emitted;
 
     MOZ_ASSERT(*emitted == false);
     return true;
 }
 
 bool
-IonBuilder::needsToMonitorMissingProperties(types::TemporaryTypeSet *types)
-{
-    // GetPropertyParIC and GetElementParIC cannot safely call
-    // TypeScript::Monitor to ensure that the observed type set contains
-    // undefined. To account for possible missing properties, which property
-    // types do not track, we must always insert a type barrier.
-    return info().executionMode() == ParallelExecution &&
-           !types->hasType(types::Type::UndefinedType());
-}
-
-bool
 IonBuilder::jsop_setprop(PropertyName *name)
 {
     MDefinition *value = current->pop();
     MDefinition *obj = current->pop();
 
     bool emitted = false;
 
     // Always use a call if we are doing the definite properties analysis and
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -434,17 +434,16 @@ class IonBuilder
     bool getPropTryComplexPropOfTypedObject(bool *emitted, MDefinition *typedObj,
                                             int32_t fieldOffset,
                                             TypedObjectPrediction fieldTypeReprs,
                                             size_t fieldIndex);
     bool getPropTryInnerize(bool *emitted, MDefinition *obj, PropertyName *name,
                             types::TemporaryTypeSet *types);
     bool getPropTryCache(bool *emitted, MDefinition *obj, PropertyName *name,
                          BarrierKind barrier, types::TemporaryTypeSet *types);
-    bool needsToMonitorMissingProperties(types::TemporaryTypeSet *types);
 
     // jsop_setprop() helpers.
     bool setPropTryCommonSetter(bool *emitted, MDefinition *obj,
                                 PropertyName *name, MDefinition *value);
     bool setPropTryCommonDOMSetter(bool *emitted, MDefinition *obj,
                                    MDefinition *value, JSFunction *setter,
                                    bool isDOM);
     bool setPropTryDefiniteSlot(bool *emitted, MDefinition *obj,
@@ -765,28 +764,22 @@ class IonBuilder
 
     // Array intrinsics.
     InliningStatus inlineUnsafePutElements(CallInfo &callInfo);
     bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
     bool inlineUnsafeSetTypedArrayElement(CallInfo &callInfo, uint32_t base,
                                           ScalarTypeDescr::Type arrayType);
     bool inlineUnsafeSetTypedObjectArrayElement(CallInfo &callInfo, uint32_t base,
                                                 ScalarTypeDescr::Type arrayType);
-    InliningStatus inlineNewDenseArray(CallInfo &callInfo);
-    InliningStatus inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo);
-    InliningStatus inlineNewDenseArrayForParallelExecution(CallInfo &callInfo);
 
     // Slot intrinsics.
     InliningStatus inlineUnsafeSetReservedSlot(CallInfo &callInfo);
     InliningStatus inlineUnsafeGetReservedSlot(CallInfo &callInfo,
                                                MIRType knownValueType);
 
-    // ForkJoin intrinsics
-    InliningStatus inlineForkJoinGetSlice(CallInfo &callInfo);
-
     // TypedArray intrinsics.
     InliningStatus inlineIsTypedArray(CallInfo &callInfo);
     InliningStatus inlineTypedArrayLength(CallInfo &callInfo);
 
     // TypedObject intrinsics and natives.
     InliningStatus inlineObjectIsTypeDescr(CallInfo &callInfo);
     InliningStatus inlineSetTypedObjectOffset(CallInfo &callInfo);
     bool elementAccessIsTypedObjectArrayOfScalarType(MDefinition* obj, MDefinition* id,
@@ -803,17 +796,16 @@ class IonBuilder
     InliningStatus inlineHasClass(CallInfo &callInfo, const Class *clasp,
                                   const Class *clasp2 = nullptr,
                                   const Class *clasp3 = nullptr,
                                   const Class *clasp4 = nullptr);
     InliningStatus inlineIsConstructing(CallInfo &callInfo);
     InliningStatus inlineSubstringKernel(CallInfo &callInfo);
 
     // Testing functions.
-    InliningStatus inlineForceSequentialOrInParallelSection(CallInfo &callInfo);
     InliningStatus inlineBailout(CallInfo &callInfo);
     InliningStatus inlineAssertFloat32(CallInfo &callInfo);
 
     // Bind function.
     InliningStatus inlineBoundFunction(CallInfo &callInfo, JSFunction *target);
 
     // Main inlining functions
     InliningStatus inlineNativeCall(CallInfo &callInfo, JSFunction *target);
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -16,17 +16,16 @@
 #include "jit/Ion.h"
 #include "jit/JitcodeMap.h"
 #include "jit/JitSpewer.h"
 #include "jit/Linker.h"
 #include "jit/Lowering.h"
 #ifdef JS_ION_PERF
 # include "jit/PerfSpewer.h"
 #endif
-#include "jit/ParallelFunctions.h"
 #include "jit/VMFunctions.h"
 #include "vm/Shape.h"
 
 #include "jit/JitFrames-inl.h"
 #include "vm/Interpreter-inl.h"
 #include "vm/Shape-inl.h"
 
 using namespace js;
@@ -391,19 +390,17 @@ IonCache::attachStub(MacroAssembler &mas
     // Update the success path to continue after the IC initial jump.
     attacher.patchRejoinJump(masm, code);
 
     // Replace the STUB_ADDR constant by the address of the generated stub, such
     // as it can be kept alive even if the cache is flushed (see
     // MarkJitExitFrame).
     attacher.patchStubCodePointer(masm, code);
 
-    // Update the failure path. Note it is this patch that makes the stub
-    // accessible for parallel ICs so it should not be moved unless you really
-    // know what is going on.
+    // Update the failure path.
     attacher.patchNextStubJump(masm, code);
 }
 
 bool
 IonCache::linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &attacher,
                             IonScript *ion, const char *attachKind)
 {
     Rooted<JitCode *> code(cx);
@@ -1200,18 +1197,18 @@ CanAttachNativeGetProp(typename GetPropC
     // proxies, IonBuilder can innerize and pass us the inner window (the global),
     // see IonBuilder::getPropTryInnerize. This is fine for native getters because
     // IsCacheableGetPropCallNative checks they can handle both the inner and
     // outer object, but scripted getters would need a similar mechanism.
     if (cache.allowGetters() &&
         (IsCacheableGetPropCallNative(obj, holder, shape) ||
          IsCacheableGetPropCallPropertyOp(obj, holder, shape)))
     {
-        // Don't enable getter call if cache is parallel or idempotent, since
-        // they can be effectful. This is handled by allowGetters()
+        // Don't enable getter call if cache is idempotent, since they can be
+        // effectful. This is handled by allowGetters()
         return GetPropertyIC::CanAttachCallGetter;
     }
 
     return GetPropertyIC::CanAttachNone;
 }
 
 bool
 GetPropertyIC::allowArrayLength(Context cx, HandleObject obj) const
@@ -1795,164 +1792,16 @@ GetPropertyIC::reset()
     RepatchIonCache::reset();
     hasTypedArrayLengthStub_ = false;
     hasSharedTypedArrayLengthStub_ = false;
     hasStrictArgumentsLengthStub_ = false;
     hasNormalArgumentsLengthStub_ = false;
     hasGenericProxyStub_ = false;
 }
 
-bool
-ParallelIonCache::initStubbedShapes(JSContext *cx)
-{
-    MOZ_ASSERT(isAllocated());
-    if (!stubbedShapes_) {
-        stubbedShapes_ = cx->new_<ShapeSet>(cx);
-        return stubbedShapes_ && stubbedShapes_->init();
-    }
-    return true;
-}
-
-bool
-ParallelIonCache::hasOrAddStubbedShape(LockedJSContext &cx, Shape *shape, bool *alreadyStubbed)
-{
-    // Check if we have already stubbed the current object to avoid
-    // attaching a duplicate stub.
-    if (!initStubbedShapes(cx))
-        return false;
-    ShapeSet::AddPtr p = stubbedShapes_->lookupForAdd(shape);
-    if ((*alreadyStubbed = !!p))
-        return true;
-    return stubbedShapes_->add(p, shape);
-}
-
-void
-ParallelIonCache::reset()
-{
-    DispatchIonCache::reset();
-    if (stubbedShapes_)
-        stubbedShapes_->clear();
-}
-
-void
-ParallelIonCache::destroy()
-{
-    DispatchIonCache::destroy();
-    js_delete(stubbedShapes_);
-}
-
-void
-GetPropertyParIC::reset()
-{
-    ParallelIonCache::reset();
-    hasTypedArrayLengthStub_ = false;
-    hasSharedTypedArrayLengthStub_ = false;
-}
-
-bool
-GetPropertyParIC::attachReadSlot(LockedJSContext &cx, IonScript *ion, HandleObject obj,
-                                 HandleNativeObject holder, HandleShape shape)
-{
-    // Ready to generate the read slot stub.
-    DispatchStubPrepender attacher(*this);
-    MacroAssembler masm(cx, ion);
-    GenerateReadSlot(cx, ion, masm, attacher, obj, holder, shape, object(), output());
-
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel reading");
-}
-
-bool
-GetPropertyParIC::attachArrayLength(LockedJSContext &cx, IonScript *ion, HandleObject obj)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    if (!GenerateArrayLength(cx, masm, attacher, obj, object(), output()))
-        return false;
-
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel array length");
-}
-
-bool
-GetPropertyParIC::attachTypedArrayLength(LockedJSContext &cx, IonScript *ion, HandleObject obj)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    GenerateTypedArrayLength(cx, masm, attacher, AnyTypedArrayLayout(obj), object(), output());
-
-    setHasTypedArrayLengthStub(obj);
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel typed array length");
-}
-
-bool
-GetPropertyParIC::update(ForkJoinContext *cx, size_t cacheIndex,
-                         HandleObject obj, MutableHandleValue vp)
-{
-    IonScript *ion = GetTopJitJSScript(cx)->parallelIonScript();
-    GetPropertyParIC &cache = ion->getCache(cacheIndex).toGetPropertyPar();
-
-    // Grab the property early, as the pure path is fast anyways and doesn't
-    // need a lock. If we can't do it purely, bail out of parallel execution.
-    if (!GetPropertyPure(cx, obj, NameToId(cache.name()), vp.address()))
-        return false;
-
-    // Avoid unnecessary locking if cannot attach stubs.
-    if (!cache.canAttachStub())
-        return true;
-
-    {
-        // Lock the context before mutating the cache. Ideally we'd like to do
-        // finer-grained locking, with one lock per cache. However, generating
-        // new jitcode uses a global ExecutableAllocator tied to the runtime.
-        LockedJSContext ncx(cx);
-
-        if (cache.canAttachStub()) {
-            bool alreadyStubbed;
-            if (!cache.hasOrAddStubbedShape(ncx, obj->lastProperty(), &alreadyStubbed))
-                return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-            if (alreadyStubbed)
-                return true;
-
-            // See note about the stub limit in GetPropertyCache.
-            bool attachedStub = false;
-
-            {
-                RootedShape shape(ncx);
-                RootedNativeObject holder(ncx);
-                RootedPropertyName name(ncx, cache.name());
-
-                GetPropertyIC::NativeGetPropCacheability canCache =
-                    CanAttachNativeGetProp(ncx, cache, obj, name, &holder, &shape);
-
-                if (canCache == GetPropertyIC::CanAttachReadSlot) {
-                    if (!cache.attachReadSlot(ncx, ion, obj, holder, shape))
-                        return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                    attachedStub = true;
-                }
-
-                if (!attachedStub && canCache == GetPropertyIC::CanAttachArrayLength) {
-                    if (!cache.attachArrayLength(ncx, ion, obj))
-                        return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                    attachedStub = true;
-                }
-            }
-
-            if (!attachedStub && !cache.hasAnyTypedArrayLengthStub(obj) &&
-                IsAnyTypedArray(obj) && cx->names().length == cache.name() &&
-                (cache.output().type() == MIRType_Value || cache.output().type() == MIRType_Int32))
-            {
-                if (!cache.attachTypedArrayLength(ncx, ion, obj))
-                    return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                attachedStub = true;
-            }
-        }
-    }
-
-    return true;
-}
-
 void
 IonCache::disable()
 {
     reset();
     this->disabled_ = 1;
 }
 
 void
@@ -2904,122 +2753,16 @@ SetPropertyIC::update(JSContext *cx, siz
 
 void
 SetPropertyIC::reset()
 {
     RepatchIonCache::reset();
     hasGenericProxyStub_ = false;
 }
 
-bool
-SetPropertyParIC::update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj,
-                         HandleValue value)
-{
-    MOZ_ASSERT(cx->isThreadLocal(obj));
-
-    IonScript *ion = GetTopJitJSScript(cx)->parallelIonScript();
-    SetPropertyParIC &cache = ion->getCache(cacheIndex).toSetPropertyPar();
-
-    RootedValue v(cx, value);
-    RootedId id(cx, AtomToId(cache.name()));
-
-    if (!obj->isNative())
-        return false;
-    RootedNativeObject nobj(cx, &obj->as<NativeObject>());
-
-    // Avoid unnecessary locking if cannot attach stubs.
-    if (!cache.canAttachStub()) {
-        return baseops::SetPropertyHelper<ParallelExecution>(
-            cx, nobj, nobj, id, baseops::Qualified, &v, cache.strict());
-    }
-
-    SetPropertyIC::NativeSetPropCacheability canCache = SetPropertyIC::CanAttachNone;
-    bool attachedStub = false;
-
-    {
-        // See note about locking context in GetPropertyParIC::update.
-        LockedJSContext ncx(cx);
-
-        if (cache.canAttachStub()) {
-            bool alreadyStubbed;
-            if (!cache.hasOrAddStubbedShape(ncx, nobj->lastProperty(), &alreadyStubbed))
-                return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-            if (alreadyStubbed) {
-                return baseops::SetPropertyHelper<ParallelExecution>(
-                    cx, nobj, nobj, id, baseops::Qualified, &v, cache.strict());
-            }
-
-            // If the object has a lazy type, we need to de-lazify it, but
-            // this is not safe in parallel.
-            if (nobj->hasLazyType())
-                return false;
-
-            {
-                RootedShape shape(cx);
-                RootedNativeObject holder(cx);
-                bool checkTypeset;
-                canCache = CanAttachNativeSetProp(cx, nobj, id, cache.value(), cache.needsTypeBarrier(),
-                                                  &holder, &shape, &checkTypeset);
-
-                if (canCache == SetPropertyIC::CanAttachSetSlot) {
-                    if (!cache.attachSetSlot(ncx, ion, nobj, shape, checkTypeset))
-                        return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                    attachedStub = true;
-                }
-            }
-        }
-    }
-
-    uint32_t oldSlots = nobj->numDynamicSlots();
-    RootedShape oldShape(cx, nobj->lastProperty());
-    RootedTypeObject oldType(cx, nobj->type());
-
-    if (!baseops::SetPropertyHelper<ParallelExecution>(cx, nobj, nobj, id, baseops::Qualified, &v,
-                                                       cache.strict()))
-    {
-        return false;
-    }
-
-    bool checkTypeset;
-    if (!attachedStub && canCache == SetPropertyIC::MaybeCanAttachAddSlot &&
-        IsPropertyAddInlineable(nobj, id,
-                                cache.value(), oldSlots, oldShape, cache.needsTypeBarrier(),
-                                &checkTypeset))
-    {
-        LockedJSContext ncx(cx);
-        if (cache.canAttachStub() && !cache.attachAddSlot(ncx, ion, nobj, oldShape, oldType, checkTypeset))
-            return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-    }
-
-    return true;
-}
-
-bool
-SetPropertyParIC::attachSetSlot(LockedJSContext &cx, IonScript *ion, HandleNativeObject obj,
-                                HandleShape shape, bool checkTypeset)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    GenerateSetSlot(cx, masm, attacher, obj, shape, object(), value(), needsTypeBarrier(),
-                    checkTypeset);
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel setting");
-}
-
-bool
-SetPropertyParIC::attachAddSlot(LockedJSContext &cx, IonScript *ion, HandleNativeObject obj,
-                                HandleShape oldShape, HandleTypeObject oldType, bool checkTypeset)
-{
-    MOZ_ASSERT_IF(!needsTypeBarrier(), !checkTypeset);
-
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    GenerateAddSlot(cx, masm, attacher, obj, oldShape, oldType, object(), value(), checkTypeset);
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel adding");
-}
-
 const size_t GetElementIC::MAX_FAILED_UPDATES = 16;
 
 /* static */ bool
 GetElementIC::canAttachGetProp(JSObject *obj, const Value &idval, jsid id)
 {
     uint32_t dummy;
     return obj->isNative() &&
            idval.isString() &&
@@ -3894,201 +3637,16 @@ SetElementIC::update(JSContext *cx, size
 void
 SetElementIC::reset()
 {
     RepatchIonCache::reset();
     hasDenseStub_ = false;
 }
 
 bool
-SetElementParIC::attachDenseElement(LockedJSContext &cx, IonScript *ion, HandleObject obj,
-                                    const Value &idval)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    if (!GenerateSetDenseElement(cx, masm, attacher, obj, idval,
-                                 guardHoles(), object(), index(),
-                                 value(), tempToUnboxIndex(),
-                                 temp()))
-    {
-        return false;
-    }
-
-    const char *message = guardHoles()                     ?
-                            "parallel dense array (holes)" :
-                            "parallel dense array";
-
-    return linkAndAttachStub(cx, masm, attacher, ion, message);
-}
-
-bool
-SetElementParIC::attachTypedArrayElement(LockedJSContext &cx, IonScript *ion,
-                                         HandleObject tarr)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    if (!GenerateSetTypedArrayElement(cx, masm, attacher, tarr,
-                                      object(), index(), value(),
-                                      tempToUnboxIndex(), temp(), tempDouble(), tempFloat32()))
-    {
-        return false;
-    }
-
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel typed array");
-}
-
-bool
-SetElementParIC::update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj,
-                        HandleValue idval, HandleValue value)
-{
-    IonScript *ion = GetTopJitJSScript(cx)->parallelIonScript();
-    SetElementParIC &cache = ion->getCache(cacheIndex).toSetElementPar();
-
-    // Avoid unnecessary locking if cannot attach stubs.
-    if (!cache.canAttachStub())
-        return SetElementPar(cx, obj, idval, value, cache.strict());
-
-    {
-        LockedJSContext ncx(cx);
-
-        if (cache.canAttachStub()) {
-            bool alreadyStubbed;
-            if (!cache.hasOrAddStubbedShape(ncx, obj->lastProperty(), &alreadyStubbed))
-                return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-            if (alreadyStubbed)
-                return SetElementPar(cx, obj, idval, value, cache.strict());
-
-            bool attachedStub = false;
-            if (IsDenseElementSetInlineable(obj, idval)) {
-                if (!cache.attachDenseElement(ncx, ion, obj, idval))
-                    return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                attachedStub = true;
-            }
-            if (!attachedStub && IsTypedArrayElementSetInlineable(obj, idval, value)) {
-                if (!cache.attachTypedArrayElement(ncx, ion, obj))
-                    return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-            }
-        }
-    }
-
-    return SetElementPar(cx, obj, idval, value, cache.strict());
-}
-
-bool
-GetElementParIC::attachReadSlot(LockedJSContext &cx, IonScript *ion, HandleObject obj,
-                                const Value &idval, HandlePropertyName name, HandleNativeObject holder,
-                                HandleShape shape)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-
-    // Guard on the index value.
-    Label failures;
-    ValueOperand val = index().reg().valueReg();
-    masm.branchTestValue(Assembler::NotEqual, val, idval, &failures);
-
-    GenerateReadSlot(cx, ion, masm, attacher, obj, holder, shape, object(), output(),
-                     &failures);
-
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel getelem reading");
-}
-
-bool
-GetElementParIC::attachDenseElement(LockedJSContext &cx, IonScript *ion, HandleObject obj,
-                                    const Value &idval)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    if (!GenerateDenseElement(cx, masm, attacher, obj, idval, object(), index(), output()))
-        return false;
-
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel dense element");
-}
-
-bool
-GetElementParIC::attachTypedArrayElement(LockedJSContext &cx, IonScript *ion,
-                                         HandleObject tarr, const Value &idval)
-{
-    MacroAssembler masm(cx, ion);
-    DispatchStubPrepender attacher(*this);
-    GenerateGetTypedArrayElement(cx, masm, attacher, tarr, idval, object(), index(), output(),
-                                 allowDoubleResult());
-    return linkAndAttachStub(cx, masm, attacher, ion, "parallel typed array");
-}
-
-bool
-GetElementParIC::update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj,
-                        HandleValue idval, MutableHandleValue vp)
-{
-    IonScript *ion = GetTopJitJSScript(cx)->parallelIonScript();
-    GetElementParIC &cache = ion->getCache(cacheIndex).toGetElementPar();
-
-    // Try to get the element early, as the pure path doesn't need a lock. If
-    // we can't do it purely, bail out of parallel execution.
-    if (!GetObjectElementOperationPure(cx, obj, idval, vp.address()))
-        return false;
-
-    // Avoid unnecessary locking if cannot attach stubs.
-    if (!cache.canAttachStub())
-        return true;
-
-    {
-        // See note about locking context in GetPropertyParIC::update.
-        LockedJSContext ncx(cx);
-
-        if (cache.canAttachStub()) {
-            bool alreadyStubbed;
-            if (!cache.hasOrAddStubbedShape(ncx, obj->lastProperty(), &alreadyStubbed))
-                return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-            if (alreadyStubbed)
-                return true;
-
-            jsid id;
-            if (!ValueToIdPure(idval, &id))
-                return false;
-
-            bool attachedStub = false;
-            if (cache.monitoredResult() &&
-                GetElementIC::canAttachGetProp(obj, idval, id))
-            {
-                RootedShape shape(ncx);
-                RootedNativeObject holder(ncx);
-                RootedPropertyName name(ncx, JSID_TO_ATOM(id)->asPropertyName());
-
-                GetPropertyIC::NativeGetPropCacheability canCache =
-                    CanAttachNativeGetProp(ncx, cache, obj, name, &holder, &shape);
-
-                if (canCache == GetPropertyIC::CanAttachReadSlot)
-                {
-                    if (!cache.attachReadSlot(ncx, ion, obj, idval, name, holder, shape))
-                        return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                    attachedStub = true;
-                }
-            }
-            if (!attachedStub &&
-                GetElementIC::canAttachDenseElement(obj, idval))
-            {
-                if (!cache.attachDenseElement(ncx, ion, obj, idval))
-                    return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                attachedStub = true;
-            }
-            if (!attachedStub &&
-                GetElementIC::canAttachTypedArrayElement(obj, idval, cache.output()))
-            {
-                if (!cache.attachTypedArrayElement(ncx, ion, obj, idval))
-                    return cx->setPendingAbortFatal(ParallelBailoutOutOfMemory);
-                attachedStub = true;
-            }
-        }
-    }
-
-    return true;
-}
-
-bool
 BindNameIC::attachGlobal(JSContext *cx, HandleScript outerScript, IonScript *ion,
                          HandleObject scopeChain)
 {
     MOZ_ASSERT(scopeChain->is<GlobalObject>());
 
     MacroAssembler masm(cx, ion, outerScript, profilerLeavePc_);
     RepatchStubAppender attacher(*this);
 
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -26,21 +26,17 @@ class LInstruction;
 
 #define IONCACHE_KIND_LIST(_)                                   \
     _(GetProperty)                                              \
     _(SetProperty)                                              \
     _(GetElement)                                               \
     _(SetElement)                                               \
     _(BindName)                                                 \
     _(Name)                                                     \
-    _(CallsiteClone)                                            \
-    _(GetPropertyPar)                                           \
-    _(GetElementPar)                                            \
-    _(SetPropertyPar)                                           \
-    _(SetElementPar)
+    _(CallsiteClone)
 
 // Forward declarations of Cache kinds.
 #define FORWARD_DECLARE(kind) class kind##IC;
 IONCACHE_KIND_LIST(FORWARD_DECLARE)
 #undef FORWARD_DECLARE
 
 class IonCacheVisitor
 {
@@ -415,17 +411,18 @@ class RepatchIonCache : public IonCache
 // jump to the previous stub on failure conditions, then overwrite the
 // firstStub_ pointer with the newly generated stub.
 //
 // This style does not patch the already executing instruction stream, does
 // not need to worry about cache coherence of cached jump addresses, and does
 // not have to worry about aligning the exit jumps to ensure atomic patching,
 // at the expense of an extra memory read to load the very first stub.
 //
-// ICs that need to work in parallel execution need to be dispatch style.
+// ICs that need to work in parallel execution need to be dispatch
+// style. Since PJS's removal, nothing else yet uses this style of ICs.
 //
 //        Control flow               Pointers             Memory load
 //      =======#                 ----.     .---->         ******
 //             #                     |     |                   *
 //             #======>              \-----/                   *******
 //
 // Initial state:
 //
@@ -1074,294 +1071,16 @@ class CallsiteCloneIC : public RepatchIo
     }
 
     bool attach(JSContext *cx, HandleScript outerScript, IonScript *ion,
                 HandleFunction original, HandleFunction clone);
 
     static JSObject *update(JSContext *cx, size_t cacheIndex, HandleObject callee);
 };
 
-class ParallelIonCache : public DispatchIonCache
-{
-  protected:
-    // A set of all objects that are stubbed. Used to detect duplicates in
-    // parallel execution.
-    ShapeSet *stubbedShapes_;
-
-    ParallelIonCache()
-      : stubbedShapes_(nullptr)
-    {
-    }
-
-    bool initStubbedShapes(JSContext *cx);
-
-  public:
-    void reset();
-    void destroy();
-
-    bool hasOrAddStubbedShape(LockedJSContext &cx, Shape *shape, bool *alreadyStubbed);
-};
-
-class GetPropertyParIC : public ParallelIonCache
-{
-  protected:
-    Register object_;
-    PropertyName *name_;
-    TypedOrValueRegister output_;
-    bool hasTypedArrayLengthStub_ : 1;
-    bool hasSharedTypedArrayLengthStub_ : 1;
-
-   public:
-    GetPropertyParIC(Register object, PropertyName *name, TypedOrValueRegister output)
-      : object_(object),
-        name_(name),
-        output_(output),
-        hasTypedArrayLengthStub_(false),
-        hasSharedTypedArrayLengthStub_(false)
-    {
-    }
-
-    CACHE_HEADER(GetPropertyPar)
-
-#ifdef JS_CODEGEN_X86
-    // x86 lacks a general purpose scratch register for dispatch caches and
-    // must be given one manually.
-    void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
-#endif
-
-    void reset();
-
-    Register object() const {
-        return object_;
-    }
-    PropertyName *name() const {
-        return name_;
-    }
-    TypedOrValueRegister output() const {
-        return output_;
-    }
-    bool hasAnyTypedArrayLengthStub(HandleObject obj) const {
-        return obj->is<TypedArrayObject>() ? hasTypedArrayLengthStub_ : hasSharedTypedArrayLengthStub_;
-    }
-
-    void setHasTypedArrayLengthStub(HandleObject obj) {
-        if (obj->is<TypedArrayObject>()) {
-            MOZ_ASSERT(!hasTypedArrayLengthStub_);
-            hasTypedArrayLengthStub_ = true;
-        } else {
-            MOZ_ASSERT(!hasSharedTypedArrayLengthStub_);
-            hasSharedTypedArrayLengthStub_ = true;
-        }
-    }
-
-    // CanAttachNativeGetProp Helpers
-    typedef LockedJSContext & Context;
-    bool canMonitorSingletonUndefinedSlot(HandleObject, HandleShape) const { return true; }
-    bool allowGetters() const { return false; }
-    bool allowArrayLength(Context, HandleObject) const { return true; }
-
-    bool attachReadSlot(LockedJSContext &cx, IonScript *ion,
-                        HandleObject obj, HandleNativeObject holder,
-                        HandleShape shape);
-    bool attachArrayLength(LockedJSContext &cx, IonScript *ion, HandleObject obj);
-    bool attachTypedArrayLength(LockedJSContext &cx, IonScript *ion, HandleObject obj);
-
-    static bool update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj,
-                       MutableHandleValue vp);
-};
-
-class GetElementParIC : public ParallelIonCache
-{
-  protected:
-    Register object_;
-    ConstantOrRegister index_;
-    TypedOrValueRegister output_;
-
-    bool monitoredResult_ : 1;
-    bool allowDoubleResult_ : 1;
-
-  public:
-    GetElementParIC(Register object, ConstantOrRegister index,
-                    TypedOrValueRegister output, bool monitoredResult, bool allowDoubleResult)
-      : object_(object),
-        index_(index),
-        output_(output),
-        monitoredResult_(monitoredResult),
-        allowDoubleResult_(allowDoubleResult)
-    {
-    }
-
-    CACHE_HEADER(GetElementPar)
-
-#ifdef JS_CODEGEN_X86
-    // x86 lacks a general purpose scratch register for dispatch caches and
-    // must be given one manually.
-    void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
-#endif
-
-    Register object() const {
-        return object_;
-    }
-    ConstantOrRegister index() const {
-        return index_;
-    }
-    TypedOrValueRegister output() const {
-        return output_;
-    }
-    bool monitoredResult() const {
-        return monitoredResult_;
-    }
-    bool allowDoubleResult() const {
-        return allowDoubleResult_;
-    }
-
-    // CanAttachNativeGetProp Helpers
-    typedef LockedJSContext & Context;
-    bool canMonitorSingletonUndefinedSlot(HandleObject, HandleShape) const { return true; }
-    bool allowGetters() const { return false; }
-    bool allowArrayLength(Context, HandleObject) const { return false; }
-
-    bool attachReadSlot(LockedJSContext &cx, IonScript *ion, HandleObject obj, const Value &idval,
-                        HandlePropertyName name, HandleNativeObject holder, HandleShape shape);
-    bool attachDenseElement(LockedJSContext &cx, IonScript *ion, HandleObject obj,
-                            const Value &idval);
-    bool attachTypedArrayElement(LockedJSContext &cx, IonScript *ion, HandleObject tarr,
-                                 const Value &idval);
-
-    static bool update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj, HandleValue idval,
-                       MutableHandleValue vp);
-
-};
-
-class SetPropertyParIC : public ParallelIonCache
-{
-  protected:
-    Register object_;
-    PropertyName *name_;
-    ConstantOrRegister value_;
-    bool strict_;
-    bool needsTypeBarrier_;
-
-  public:
-    SetPropertyParIC(Register object, PropertyName *name, ConstantOrRegister value,
-                     bool strict, bool needsTypeBarrier)
-      : object_(object),
-        name_(name),
-        value_(value),
-        strict_(strict),
-        needsTypeBarrier_(needsTypeBarrier)
-    {
-    }
-
-    CACHE_HEADER(SetPropertyPar)
-
-#ifdef JS_CODEGEN_X86
-    // x86 lacks a general purpose scratch register for dispatch caches and
-    // must be given one manually.
-    void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
-#endif
-
-    Register object() const {
-        return object_;
-    }
-    PropertyName *name() const {
-        return name_;
-    }
-    ConstantOrRegister value() const {
-        return value_;
-    }
-    bool strict() const {
-        return strict_;
-    }
-    bool needsTypeBarrier() const {
-        return needsTypeBarrier_;
-    }
-
-    bool attachSetSlot(LockedJSContext &cx, IonScript *ion, HandleNativeObject obj, HandleShape shape,
-                       bool checkTypeset);
-    bool attachAddSlot(LockedJSContext &cx, IonScript *ion, HandleNativeObject obj,
-                       HandleShape oldShape, HandleTypeObject oldType, bool checkTypeset);
-
-    static bool update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj,
-                       HandleValue value);
-};
-
-class SetElementParIC : public ParallelIonCache
-{
-  protected:
-    Register object_;
-    Register tempToUnboxIndex_;
-    Register temp_;
-    FloatRegister tempDouble_;
-    FloatRegister tempFloat32_;
-    ValueOperand index_;
-    ConstantOrRegister value_;
-    bool strict_;
-    bool guardHoles_;
-
-  public:
-    SetElementParIC(Register object, Register tempToUnboxIndex, Register temp,
-                    FloatRegister tempDouble, FloatRegister tempFloat32, ValueOperand index, ConstantOrRegister value,
-                    bool strict, bool guardHoles)
-      : object_(object),
-        tempToUnboxIndex_(tempToUnboxIndex),
-        temp_(temp),
-        tempDouble_(tempDouble),
-        tempFloat32_(tempFloat32),
-        index_(index),
-        value_(value),
-        strict_(strict),
-        guardHoles_(guardHoles)
-    {
-    }
-
-    CACHE_HEADER(SetElementPar)
-
-#ifdef JS_CODEGEN_X86
-    // x86 lacks a general purpose scratch register for dispatch caches and
-    // must be given one manually.
-    void initializeAddCacheState(LInstruction *ins, AddCacheState *addState);
-#endif
-
-    Register object() const {
-        return object_;
-    }
-    Register tempToUnboxIndex() const {
-        return tempToUnboxIndex_;
-    }
-    Register temp() const {
-        return temp_;
-    }
-    FloatRegister tempDouble() const {
-        return tempDouble_;
-    }
-    FloatRegister tempFloat32() const {
-        return tempFloat32_;
-    }
-    ValueOperand index() const {
-        return index_;
-    }
-    ConstantOrRegister value() const {
-        return value_;
-    }
-    bool strict() const {
-        return strict_;
-    }
-    bool guardHoles() const {
-        return guardHoles_;
-    }
-
-    bool attachDenseElement(LockedJSContext &cx, IonScript *ion, HandleObject obj,
-                            const Value &idval);
-    bool attachTypedArrayElement(LockedJSContext &cx, IonScript *ion, HandleObject tarr);
-
-    static bool update(ForkJoinContext *cx, size_t cacheIndex, HandleObject obj,
-                       HandleValue idval, HandleValue value);
-};
-
 #undef CACHE_HEADER
 
 // Implement cache casts now that the compiler can see the inheritance.
 #define CACHE_CASTS(ickind)                                             \
     ickind##IC &IonCache::to##ickind()                                  \
     {                                                                   \
         MOZ_ASSERT(is##ickind());                                       \
         return *static_cast<ickind##IC *>(this);                        \
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -251,22 +251,16 @@ struct IonScript
     // List of instructions needed to recover stack frames.
     uint32_t recovers_;
     uint32_t recoversSize_;
 
     // Constant table for constants stored in snapshots.
     uint32_t constantTable_;
     uint32_t constantEntries_;
 
-    // List of scripts that we call.
-    //
-    // Currently this is only non-nullptr for parallel IonScripts.
-    uint32_t callTargetList_;
-    uint32_t callTargetEntries_;
-
     // List of patchable backedges which are threaded into the runtime's list.
     uint32_t backedgeList_;
     uint32_t backedgeEntries_;
 
     // Number of references from invalidation records.
     uint32_t invalidationCount_;
 
     // If this is a parallel script, the number of major GC collections it has
@@ -328,19 +322,16 @@ struct IonScript
         return (OsiIndex *) &bottomBuffer()[osiIndexOffset_];
     }
     uint32_t *cacheIndex() {
         return (uint32_t *) &bottomBuffer()[cacheIndex_];
     }
     uint8_t *runtimeData() {
         return  &bottomBuffer()[runtimeData_];
     }
-    JSScript **callTargetList() {
-        return (JSScript **) &bottomBuffer()[callTargetList_];
-    }
     PatchableBackedge *backedgeList() {
         return (PatchableBackedge *) &bottomBuffer()[backedgeList_];
     }
 
   private:
     void trace(JSTracer *trc);
 
   public:
@@ -349,18 +340,17 @@ struct IonScript
 
     static IonScript *New(JSContext *cx, types::RecompileInfo recompileInfo,
                           uint32_t frameLocals, uint32_t frameSize,
                           size_t snapshotsListSize, size_t snapshotsRVATableSize,
                           size_t recoversSize, size_t bailoutEntries,
                           size_t constants, size_t safepointIndexEntries,
                           size_t osiIndexEntries, size_t cacheEntries,
                           size_t runtimeSize, size_t safepointsSize,
-                          size_t callTargetEntries, size_t backedgeEntries,
-                          OptimizationLevel optimizationLevel);
+                          size_t backedgeEntries, OptimizationLevel optimizationLevel);
     static void Trace(JSTracer *trc, IonScript *script);
     static void Destroy(FreeOp *fop, IonScript *script);
 
     static inline size_t offsetOfMethod() {
         return offsetof(IonScript, method_);
     }
     static inline size_t offsetOfOsrEntryOffset() {
         return offsetof(IonScript, osrEntryOffset_);
@@ -485,19 +475,16 @@ struct IonScript
         return recoversSize_;
     }
     const uint8_t *safepoints() const {
         return reinterpret_cast<const uint8_t *>(this) + safepointsStart_;
     }
     size_t safepointsSize() const {
         return safepointsSize_;
     }
-    size_t callTargetEntries() const {
-        return callTargetEntries_;
-    }
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
         return mallocSizeOf(this);
     }
     PreBarrieredValue &getConstant(size_t index) {
         MOZ_ASSERT(index < numConstants());
         return constants()[index];
     }
     size_t numConstants() const {
@@ -547,17 +534,16 @@ struct IonScript
     void copyRecovers(const RecoverWriter *writer);
     void copyBailoutTable(const SnapshotOffset *table);
     void copyConstants(const Value *vp);
     void copySafepointIndices(const SafepointIndex *firstSafepointIndex, MacroAssembler &masm);
     void copyOsiIndices(const OsiIndex *firstOsiIndex, MacroAssembler &masm);
     void copyRuntimeData(const uint8_t *data);
     void copyCacheEntries(const uint32_t *caches, MacroAssembler &masm);
     void copySafepoints(const SafepointWriter *writer);
-    void copyCallTargetEntries(JSScript **callTargets);
     void copyPatchableBackedges(JSContext *cx, JitCode *code,
                                 PatchableBackedgeInfo *backedges,
                                 MacroAssembler &masm);
 
     bool invalidated() const {
         return invalidationCount_ != 0;
     }
 
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -376,18 +376,17 @@ enum MIRType
     MIRType_Value,
     MIRType_ObjectOrNull,
     MIRType_None,                      // Invalid, used as a placeholder.
     MIRType_Slots,                     // A slots vector
     MIRType_Elements,                  // An elements vector
     MIRType_Pointer,                   // An opaque pointer that receives no special treatment
     MIRType_Shape,                     // A Shape pointer.
     MIRType_TypeObject,                // A TypeObject pointer.
-    MIRType_ForkJoinContext,           // js::ForkJoinContext*
-    MIRType_Last = MIRType_ForkJoinContext,
+    MIRType_Last = MIRType_TypeObject,
     MIRType_Float32x4 = MIRType_Float32 | (2 << VECTOR_SCALE_SHIFT),
     MIRType_Int32x4   = MIRType_Int32   | (2 << VECTOR_SCALE_SHIFT),
     MIRType_Doublex2  = MIRType_Double  | (1 << VECTOR_SCALE_SHIFT)
 };
 
 static inline MIRType
 MIRTypeFromValueType(JSValueType type)
 {
@@ -491,18 +490,16 @@ StringFromMIRType(MIRType type)
     case MIRType_None:
       return "None";
     case MIRType_Slots:
       return "Slots";
     case MIRType_Elements:
       return "Elements";
     case MIRType_Pointer:
       return "Pointer";
-    case MIRType_ForkJoinContext:
-      return "ForkJoinContext";
     case MIRType_Int32x4:
       return "Int32x4";
     case MIRType_Float32x4:
       return "Float32x4";
     default:
       MOZ_CRASH("Unknown MIRType.");
   }
 }
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -149,44 +149,37 @@ class JitRuntime
     // All accesses on this allocator must be protected by the runtime's
     // interrupt lock, as the executable memory may be protected() when
     // requesting an interrupt to force a fault in the Ion code and avoid the
     // need for explicit interrupt checks.
     ExecutableAllocator *ionAlloc_;
 
     // Shared exception-handler tail.
     JitCode *exceptionTail_;
-    JitCode *exceptionTailParallel_;
 
     // Shared post-bailout-handler tail.
     JitCode *bailoutTail_;
 
     // Trampoline for entering JIT code. Contains OSR prologue.
     JitCode *enterJIT_;
 
     // Trampoline for entering baseline JIT code.
     JitCode *enterBaselineJIT_;
 
     // Vector mapping frame class sizes to bailout tables.
     Vector<JitCode*, 4, SystemAllocPolicy> bailoutTables_;
 
     // Generic bailout table; used if the bailout table overflows.
     JitCode *bailoutHandler_;
 
-    // Bailout handler for parallel execution.
-    JitCode *parallelBailoutHandler_;
-
     // Argument-rectifying thunk, in the case of insufficient arguments passed
     // to a function call site.
     JitCode *argumentsRectifier_;
     void *argumentsRectifierReturnAddr_;
 
-    // Arguments-rectifying thunk which loads |parallelIon| instead of |ion|.
-    JitCode *parallelArgumentsRectifier_;
-
     // Thunk that invalides an (Ion compiled) caller on the Ion stack.
     JitCode *invalidator_;
 
     // Thunk that calls the GC pre barrier.
     JitCode *valuePreBarrier_;
     JitCode *stringPreBarrier_;
     JitCode *objectPreBarrier_;
     JitCode *shapePreBarrier_;
@@ -197,19 +190,16 @@ class JitRuntime
     JitCode *freeStub_;
 
     // Thunk called to finish compilation of an IonScript.
     JitCode *lazyLinkStub_;
 
     // Thunk used by the debugger for breakpoint and step mode.
     JitCode *debugTrapHandler_;
 
-    // Stub used to inline the ForkJoinGetSlice intrinsic.
-    JitCode *forkJoinGetSliceStub_;
-
     // Thunk used to fix up on-stack recompile of baseline scripts.
     JitCode *baselineDebugModeOSRHandler_;
     void *baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
 
     // Map VMFunction addresses to the JitCode of the wrapper.
     typedef WeakCache<const VMFunction *, JitCode *> VMWrapperMap;
     VMWrapperMap *functionWrappers_;
 
@@ -242,25 +232,24 @@ class JitRuntime
     // Global table of jitcode native address => bytecode address mappings.
     JitcodeGlobalTable *jitcodeGlobalTable_;
 
   private:
     JitCode *generateLazyLinkStub(JSContext *cx);
     JitCode *generateExceptionTailStub(JSContext *cx, void *handler);
     JitCode *generateBailoutTailStub(JSContext *cx);
     JitCode *generateEnterJIT(JSContext *cx, EnterJitType type);
-    JitCode *generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut);
+    JitCode *generateArgumentsRectifier(JSContext *cx, void **returnAddrOut);
     JitCode *generateBailoutTable(JSContext *cx, uint32_t frameClass);
-    JitCode *generateBailoutHandler(JSContext *cx, ExecutionMode mode);
+    JitCode *generateBailoutHandler(JSContext *cx);
     JitCode *generateInvalidator(JSContext *cx);
     JitCode *generatePreBarrier(JSContext *cx, MIRType type);
     JitCode *generateMallocStub(JSContext *cx);
     JitCode *generateFreeStub(JSContext *cx);
     JitCode *generateDebugTrapHandler(JSContext *cx);
-    JitCode *generateForkJoinGetSliceStub(JSContext *cx);
     JitCode *generateBaselineDebugModeOSRHandler(JSContext *cx, uint32_t *noFrameRegPopOffsetOut);
     JitCode *generateVMWrapper(JSContext *cx, const VMFunction &f);
 
     ExecutableAllocator *createIonAlloc(JSContext *cx);
 
   public:
     JitRuntime();
     ~JitRuntime();
@@ -317,43 +306,32 @@ class JitRuntime
 
     void patchIonBackedges(JSRuntime *rt, BackedgeTarget target);
 
     JitCode *getVMWrapper(const VMFunction &f) const;
     JitCode *debugTrapHandler(JSContext *cx);
     JitCode *getBaselineDebugModeOSRHandler(JSContext *cx);
     void *getBaselineDebugModeOSRHandlerAddress(JSContext *cx, bool popFrameReg);
 
-    JitCode *getGenericBailoutHandler(ExecutionMode mode) const {
-        switch (mode) {
-          case SequentialExecution: return bailoutHandler_;
-          case ParallelExecution:   return parallelBailoutHandler_;
-          default:                  MOZ_CRASH("No such execution mode");
-        }
+    JitCode *getGenericBailoutHandler() const {
+        return bailoutHandler_;
     }
 
     JitCode *getExceptionTail() const {
         return exceptionTail_;
     }
-    JitCode *getExceptionTailParallel() const {
-        return exceptionTailParallel_;
-    }
 
     JitCode *getBailoutTail() const {
         return bailoutTail_;
     }
 
     JitCode *getBailoutTable(const FrameSizeClass &frameClass) const;
 
-    JitCode *getArgumentsRectifier(ExecutionMode mode) const {
-        switch (mode) {
-          case SequentialExecution: return argumentsRectifier_;
-          case ParallelExecution:   return parallelArgumentsRectifier_;
-          default:                  MOZ_CRASH("No such execution mode");
-        }
+    JitCode *getArgumentsRectifier() const {
+        return argumentsRectifier_;
     }
 
     void *getArgumentsRectifierReturnAddr() const {
         return argumentsRectifierReturnAddr_;
     }
 
     JitCode *getInvalidationThunk() const {
         return invalidator_;
@@ -385,21 +363,16 @@ class JitRuntime
     JitCode *freeStub() const {
         return freeStub_;
     }
 
     JitCode *lazyLinkStub() const {
         return lazyLinkStub_;
     }
 
-    bool ensureForkJoinGetSliceStubExists(JSContext *cx);
-    JitCode *forkJoinGetSliceStub() const {
-        return forkJoinGetSliceStub_;
-    }
-
     bool hasIonReturnOverride() const {
         return !ionReturnOverride_.isMagic(JS_ARG_POISON);
     }
     js::Value takeIonReturnOverride() {
         js::Value v = ionReturnOverride_;
         ionReturnOverride_ = js::MagicValue(JS_ARG_POISON);
         return v;
     }
@@ -454,28 +427,27 @@ class JitCompartment
 
     // Stubs to concatenate two strings inline, or perform RegExp calls inline.
     // These bake in zone and compartment specific pointers and can't be stored
     // in JitRuntime. These are weak pointers, but are not declared as
     // ReadBarriered since they are only read from during Ion compilation,
     // which may occur off thread and whose barriers are captured during
     // CodeGenerator::link.
     JitCode *stringConcatStub_;
-    JitCode *parallelStringConcatStub_;
     JitCode *regExpExecStub_;
     JitCode *regExpTestStub_;
 
     // Set of JSScripts invoked by ForkJoin (i.e. the entry script). These
     // scripts are marked if their respective parallel IonScripts' age is less
     // than a certain amount. See IonScript::parallelAge_.
     typedef HashSet<PreBarrieredScript, DefaultHasher<PreBarrieredScript>, SystemAllocPolicy>
         ScriptSet;
     ScriptSet *activeParallelEntryScripts_;
 
-    JitCode *generateStringConcatStub(JSContext *cx, ExecutionMode mode);
+    JitCode *generateStringConcatStub(JSContext *cx);
     JitCode *generateRegExpExecStub(JSContext *cx);
     JitCode *generateRegExpTestStub(JSContext *cx);
 
   public:
     JitCode *getStubCode(uint32_t key) {
         ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
         if (p)
             return p->value();
@@ -528,22 +500,18 @@ class JitCompartment
     bool initialize(JSContext *cx);
 
     // Initialize code stubs only used by Ion, not Baseline.
     bool ensureIonStubsExist(JSContext *cx);
 
     void mark(JSTracer *trc, JSCompartment *compartment);
     void sweep(FreeOp *fop, JSCompartment *compartment);
 
-    JitCode *stringConcatStubNoBarrier(ExecutionMode mode) const {
-        switch (mode) {
-          case SequentialExecution: return stringConcatStub_;
-          case ParallelExecution:   return parallelStringConcatStub_;
-          default:                  MOZ_CRASH("No such execution mode");
-        }
+    JitCode *stringConcatStubNoBarrier() const {
+        return stringConcatStub_;
     }
 
     JitCode *regExpExecStubNoBarrier() const {
         return regExpExecStub_;
     }
 
     bool ensureRegExpExecStubExists(JSContext *cx) {
         if (regExpExecStub_)
@@ -561,17 +529,16 @@ class JitCompartment
             return true;
         regExpTestStub_ = generateRegExpTestStub(cx);
         return regExpTestStub_ != nullptr;
     }
 };
 
 // Called from JSCompartment::discardJitCode().
 void InvalidateAll(FreeOp *fop, JS::Zone *zone);
-template <ExecutionMode mode>
 void FinishInvalidation(FreeOp *fop, JSScript *script);
 
 // On windows systems, really large frames need to be incrementally touched.
 // The following constant defines the minimum increment of the touch.
 #ifdef XP_WIN
 const unsigned WINDOWS_BIG_FRAME_TOUCH_INCREMENT = 4096 - 1;
 #endif
 
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -16,17 +16,16 @@
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Ion.h"
 #include "jit/JitcodeMap.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitSpewer.h"
 #include "jit/MacroAssembler.h"
-#include "jit/ParallelFunctions.h"
 #include "jit/PcScriptCache.h"
 #include "jit/Recover.h"
 #include "jit/Safepoints.h"
 #include "jit/Snapshots.h"
 #include "jit/VMFunctions.h"
 #include "vm/ArgumentsObject.h"
 #include "vm/Debugger.h"
 #include "vm/ForkJoin.h"
@@ -868,39 +867,16 @@ HandleException(ResumeFromException *rfe
             js_ReportOverRecursed(cx);
         }
     }
 
     rfe->stackPointer = iter.fp();
 }
 
 void
-HandleParallelFailure(ResumeFromException *rfe)
-{
-    parallel::Spew(parallel::SpewBailouts, "Bailing from VM reentry");
-
-    ForkJoinContext *cx = ForkJoinContext::current();
-    JitFrameIterator frameIter(cx);
-
-    // Advance to the first Ion frame so we can pull out the BailoutKind.
-    while (!frameIter.isIonJS())
-        ++frameIter;
-    SnapshotIterator snapIter(frameIter);
-
-    cx->bailoutRecord->setIonBailoutKind(snapIter.bailoutKind());
-    while (!frameIter.done())
-        ++frameIter;
-
-    rfe->kind = ResumeFromException::RESUME_ENTRY_FRAME;
-
-    MOZ_ASSERT(frameIter.done());
-    rfe->stackPointer = frameIter.fp();
-}
-
-void
 EnsureExitFrame(CommonFrameLayout *frame)
 {
     if (frame->prevType() == JitFrame_Unwound_IonJS ||
         frame->prevType() == JitFrame_Unwound_BaselineJS ||
         frame->prevType() == JitFrame_Unwound_BaselineStub ||
         frame->prevType() == JitFrame_Unwound_Rectifier)
     {
         // Already an exit frame, nothing to do.
--- a/js/src/jit/JitFrames.h
+++ b/js/src/jit/JitFrames.h
@@ -264,17 +264,16 @@ struct ResumeFromException
 
     // Value to push when resuming into a |finally| block.
     Value exception;
 
     BaselineBailoutInfo *bailoutInfo;
 };
 
 void HandleException(ResumeFromException *rfe);
-void HandleParallelFailure(ResumeFromException *rfe);
 
 void EnsureExitFrame(CommonFrameLayout *frame);
 
 void MarkJitActivations(PerThreadData *ptd, JSTracer *trc);
 void MarkIonCompilerRoots(JSTracer *trc);
 
 JSCompartment *
 TopmostIonActivationCompartment(JSRuntime *rt);
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -796,84 +796,16 @@ class LNewTypedObject : public LInstruct
         return getTemp(0);
     }
 
     MNewTypedObject *mir() const {
         return mir_->toNewTypedObject();
     }
 };
 
-class LNewPar : public LInstructionHelper<1, 1, 2>
-{
-  public:
-    LIR_HEADER(NewPar);
-
-    LNewPar(const LAllocation &cx, const LDefinition &temp1, const LDefinition &temp2) {
-        setOperand(0, cx);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-
-    MNewPar *mir() const {
-        return mir_->toNewPar();
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    const LDefinition *getTemp0() {
-        return getTemp(0);
-    }
-
-    const LDefinition *getTemp1() {
-        return getTemp(1);
-    }
-};
-
-class LNewDenseArrayPar : public LInstructionHelper<1, 2, 3>
-{
-  public:
-    LIR_HEADER(NewDenseArrayPar);
-
-    LNewDenseArrayPar(const LAllocation &cx, const LAllocation &length,
-                      const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3)
-    {
-        setOperand(0, cx);
-        setOperand(1, length);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-        setTemp(2, temp3);
-    }
-
-    MNewDenseArrayPar *mir() const {
-        return mir_->toNewDenseArrayPar();
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    const LAllocation *length() {
-        return getOperand(1);
-    }
-
-    const LDefinition *getTemp0() {
-        return getTemp(0);
-    }
-
-    const LDefinition *getTemp1() {
-        return getTemp(1);
-    }
-
-    const LDefinition *getTemp2() {
-        return getTemp(2);
-    }
-};
-
 // Allocates a new DeclEnvObject.
 //
 // This instruction generates two possible instruction sets:
 //   (1) An inline allocation of the call object is attempted.
 //   (2) Otherwise, a callVM create a new object.
 //
 class LNewDeclEnvObject : public LInstructionHelper<1, 0, 1>
 {
@@ -940,50 +872,16 @@ class LNewSingletonCallObject : public L
     }
 
     MNewCallObjectBase *mir() const {
         MOZ_ASSERT(mir_->isNewCallObject() || mir_->isNewRunOnceCallObject());
         return static_cast<MNewCallObjectBase *>(mir_);
     }
 };
 
-class LNewCallObjectPar : public LInstructionHelper<1, 1, 2>
-{
-    LNewCallObjectPar(const LAllocation &cx, const LDefinition &temp1, const LDefinition &temp2) {
-        setOperand(0, cx);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-
-public:
-    LIR_HEADER(NewCallObjectPar);
-
-    static LNewCallObjectPar *New(TempAllocator &alloc, const LAllocation &cx,
-                                  const LDefinition &temp1, const LDefinition &temp2)
-    {
-        return new(alloc) LNewCallObjectPar(cx, temp1, temp2);
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    const MNewCallObjectPar *mir() const {
-        return mir_->toNewCallObjectPar();
-    }
-
-    const LDefinition *getTemp0() {
-        return getTemp(0);
-    }
-
-    const LDefinition *getTemp1() {
-        return getTemp(1);
-    }
-};
-
 class LNewDerivedTypedObject : public LCallInstructionHelper<1, 3, 0>
 {
   public:
     LIR_HEADER(NewDerivedTypedObject);
 
     LNewDerivedTypedObject(const LAllocation &type,
                            const LAllocation &owner,
                            const LAllocation &offset) {
@@ -1143,39 +1041,16 @@ class LCheckOverRecursed : public LInstr
     LCheckOverRecursed()
     { }
 
     MCheckOverRecursed *mir() const {
         return mir_->toCheckOverRecursed();
     }
 };
 
-class LCheckOverRecursedPar : public LInstructionHelper<0, 1, 1>
-{
-  public:
-    LIR_HEADER(CheckOverRecursedPar);
-
-    LCheckOverRecursedPar(const LAllocation &cx, const LDefinition &tempReg) {
-        setOperand(0, cx);
-        setTemp(0, tempReg);
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    const LDefinition *getTempReg() {
-        return getTemp(0);
-    }
-
-    MCheckOverRecursedPar *mir() const {
-        return mir_->toCheckOverRecursedPar();
-    }
-};
-
 class LAsmJSInterruptCheck : public LInstructionHelper<0, 0, 0>
 {
     Label *interruptExit_;
     const CallSiteDesc &funcDesc_;
 
   public:
     LIR_HEADER(AsmJSInterruptCheck);
 
@@ -1223,38 +1098,16 @@ class LInterruptCheckImplicit : public L
     void setOolEntry(Label *oolEntry) {
         oolEntry_ = oolEntry;
     }
     MInterruptCheck *mir() const {
         return mir_->toInterruptCheck();
     }
 };
 
-class LInterruptCheckPar : public LInstructionHelper<0, 1, 1>
-{
-  public:
-    LIR_HEADER(InterruptCheckPar);
-
-    LInterruptCheckPar(const LAllocation &cx, const LDefinition &tempReg) {
-        setOperand(0, cx);
-        setTemp(0, tempReg);
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    const LDefinition *getTempReg() {
-        return getTemp(0);
-    }
-    MInterruptCheckPar *mir() const {
-        return mir_->toInterruptCheckPar();
-    }
-};
-
 class LDefVar : public LCallInstructionHelper<0, 1, 0>
 {
   public:
     LIR_HEADER(DefVar)
 
     explicit LDefVar(const LAllocation &scopeChain)
     {
         setOperand(0, scopeChain);
@@ -3374,57 +3227,16 @@ class LConcat : public LInstructionHelpe
     const LDefinition *temp4() {
         return this->getTemp(3);
     }
     const LDefinition *temp5() {
         return this->getTemp(4);
     }
 };
 
-class LConcatPar : public LInstructionHelper<1, 3, 4>
-{
-  public:
-    LIR_HEADER(ConcatPar)
-
-    LConcatPar(const LAllocation &cx, const LAllocation &lhs, const LAllocation &rhs,
-               const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3,
-               const LDefinition &temp4)
-    {
-        setOperand(0, cx);
-        setOperand(1, lhs);
-        setOperand(2, rhs);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-        setTemp(2, temp3);
-        setTemp(3, temp4);
-    }
-
-    const LAllocation *forkJoinContext() {
-        return this->getOperand(0);
-    }
-    const LAllocation *lhs() {
-        return this->getOperand(1);
-    }
-    const LAllocation *rhs() {
-        return this->getOperand(2);
-    }
-    const LDefinition *temp1() {
-        return this->getTemp(0);
-    }
-    const LDefinition *temp2() {
-        return this->getTemp(1);
-    }
-    const LDefinition *temp3() {
-        return this->getTemp(2);
-    }
-    const LDefinition *temp4() {
-        return this->getTemp(3);
-    }
-};
-
 // Get uint16 character code from a string.
 class LCharCodeAt : public LInstructionHelper<1, 2, 0>
 {
   public:
     LIR_HEADER(CharCodeAt)
 
     LCharCodeAt(const LAllocation &str, const LAllocation &index) {
         setOperand(0, str);
@@ -4086,46 +3898,16 @@ class LLambdaArrow : public LInstruction
     const LDefinition *temp() {
         return getTemp(0);
     }
     const MLambdaArrow *mir() const {
         return mir_->toLambdaArrow();
     }
 };
 
-class LLambdaPar : public LInstructionHelper<1, 2, 2>
-{
-  public:
-    LIR_HEADER(LambdaPar);
-
-    LLambdaPar(const LAllocation &cx, const LAllocation &scopeChain,
-               const LDefinition &temp1, const LDefinition &temp2)
-    {
-        setOperand(0, cx);
-        setOperand(1, scopeChain);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-    const LAllocation *scopeChain() {
-        return getOperand(1);
-    }
-    const MLambdaPar *mir() const {
-        return mir_->toLambdaPar();
-    }
-    const LDefinition *getTemp0() {
-        return getTemp(0);
-    }
-    const LDefinition *getTemp1() {
-        return getTemp(1);
-    }
-};
-
 // Load the "slots" member out of a JSObject.
 //   Input: JSObject pointer
 //   Output: slots pointer
 class LSlots : public LInstructionHelper<1, 1, 0>
 {
   public:
     LIR_HEADER(Slots)
 
@@ -5298,27 +5080,23 @@ class LGetPropertyCacheV : public LInstr
     }
     const MGetPropertyCache *mir() const {
         return mir_->toGetPropertyCache();
     }
 };
 
 // Patchable jump to stubs generated for a GetProperty cache, which loads a
 // value of a known type, possibly into an FP register.
-class LGetPropertyCacheT : public LInstructionHelper<1, 1, 1>
+class LGetPropertyCacheT : public LInstructionHelper<1, 1, 0>
 {
   public:
     LIR_HEADER(GetPropertyCacheT)
 
-    LGetPropertyCacheT(const LAllocation &object, const LDefinition &temp) {
+    explicit LGetPropertyCacheT(const LAllocation &object) {
         setOperand(0, object);
-        setTemp(0, temp);
-    }
-    const LDefinition *temp() {
-        return getTemp(0);
     }
     const MGetPropertyCache *mir() const {
         return mir_->toGetPropertyCache();
     }
 };
 
 // Emit code to load a boxed value from an object's slots if its shape matches
 // one of the shapes observed by the baseline IC, else bails out.
@@ -5436,39 +5214,34 @@ class LGetElementCacheV : public LInstru
     const LAllocation *object() {
         return getOperand(0);
     }
     const MGetElementCache *mir() const {
         return mir_->toGetElementCache();
     }
 };
 
-class LGetElementCacheT : public LInstructionHelper<1, 2, 1>
+class LGetElementCacheT : public LInstructionHelper<1, 2, 0>
 {
   public:
     LIR_HEADER(GetElementCacheT)
 
-    LGetElementCacheT(const LAllocation &object, const LAllocation &index,
-                      const LDefinition &temp) {
+    LGetElementCacheT(const LAllocation &object, const LAllocation &index) {
         setOperand(0, object);
         setOperand(1, index);
-        setTemp(0, temp);
     }
     const LAllocation *object() {
         return getOperand(0);
     }
     const LAllocation *index() {
         return getOperand(1);
     }
     const LDefinition *output() {
         return getDef(0);
     }
-    const LDefinition *temp() {
-        return getTemp(0);
-    }
     const MGetElementCache *mir() const {
         return mir_->toGetElementCache();
     }
 };
 
 class LBindNameCache : public LInstructionHelper<1, 1, 0>
 {
   public:
@@ -5673,62 +5446,16 @@ class LFunctionEnvironment : public LIns
     explicit LFunctionEnvironment(const LAllocation &function) {
         setOperand(0, function);
     }
     const LAllocation *function() {
         return getOperand(0);
     }
 };
 
-class LForkJoinContext : public LCallInstructionHelper<1, 0, 1>
-{
-  public:
-    LIR_HEADER(ForkJoinContext);
-
-    explicit LForkJoinContext(const LDefinition &temp1) {
-        setTemp(0, temp1);
-    }
-
-    const LDefinition *getTempReg() {
-        return getTemp(0);
-    }
-};
-
-class LForkJoinGetSlice : public LInstructionHelper<1, 1, 4>
-{
-  public:
-    LIR_HEADER(ForkJoinGetSlice);
-
-    LForkJoinGetSlice(const LAllocation &cx,
-                      const LDefinition &temp1, const LDefinition &temp2,
-                      const LDefinition &temp3, const LDefinition &temp4) {
-        setOperand(0, cx);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-        setTemp(2, temp3);
-        setTemp(3, temp4);
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-    const LDefinition *temp1() {
-        return getTemp(0);
-    }
-    const LDefinition *temp2() {
-        return getTemp(1);
-    }
-    const LDefinition *temp3() {
-        return getTemp(2);
-    }
-    const LDefinition *temp4() {
-        return getTemp(3);
-    }
-};
-
 class LCallGetProperty : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
 {
   public:
     LIR_HEADER(CallGetProperty)
 
     static const size_t Value = 0;
 
     MCallGetProperty *mir() const {
@@ -5816,72 +5543,60 @@ class LCallDeleteElement : public LCallI
 
     MDeleteElement *mir() const {
         return mir_->toDeleteElement();
     }
 };
 
 // Patchable jump to stubs generated for a SetProperty cache, which stores a
 // boxed value.
-class LSetPropertyCacheV : public LInstructionHelper<0, 1 + BOX_PIECES, 2>
+class LSetPropertyCacheV : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
 {
   public:
     LIR_HEADER(SetPropertyCacheV)
 
-    LSetPropertyCacheV(const LAllocation &object, const LDefinition &slots,
-                       const LDefinition &temp) {
+    LSetPropertyCacheV(const LAllocation &object, const LDefinition &slots) {
         setOperand(0, object);
         setTemp(0, slots);
-        setTemp(1, temp);
     }
 
     static const size_t Value = 1;
 
     const MSetPropertyCache *mir() const {
         return mir_->toSetPropertyCache();
     }
-
-    const LDefinition *tempForDispatchCache() {
-        return getTemp(1);
-    }
 };
 
 // Patchable jump to stubs generated for a SetProperty cache, which stores a
 // value of a known type.
-class LSetPropertyCacheT : public LInstructionHelper<0, 2, 2>
+class LSetPropertyCacheT : public LInstructionHelper<0, 2, 1>
 {
     MIRType valueType_;
 
   public:
     LIR_HEADER(SetPropertyCacheT)
 
     LSetPropertyCacheT(const LAllocation &object, const LDefinition &slots,
-                       const LAllocation &value, const LDefinition &temp,
-                       MIRType valueType)
+                       const LAllocation &value, MIRType valueType)
         : valueType_(valueType)
     {
         setOperand(0, object);
         setOperand(1, value);
         setTemp(0, slots);
-        setTemp(1, temp);
     }
 
     const MSetPropertyCache *mir() const {
         return mir_->toSetPropertyCache();
     }
     MIRType valueType() {
         return valueType_;
     }
     const char *extraName() const {
         return StringFromMIRType(valueType_);
     }
-
-    const LDefinition *tempForDispatchCache() {
-        return getTemp(1);
-    }
 };
 
 class LSetElementCacheV : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 4>
 {
   public:
     LIR_HEADER(SetElementCacheV);
 
     static const size_t Index = 1;
@@ -6179,65 +5894,16 @@ class LRest : public LCallInstructionHel
     const LAllocation *numActuals() {
         return getOperand(0);
     }
     MRest *mir() const {
         return mir_->toRest();
     }
 };
 
-class LRestPar : public LInstructionHelper<1, 2, 3>
-{
-  public:
-    LIR_HEADER(RestPar);
-
-    LRestPar(const LAllocation &cx, const LAllocation &numActuals,
-             const LDefinition &temp1, const LDefinition &temp2, const LDefinition &temp3)
-    {
-        setOperand(0, cx);
-        setOperand(1, numActuals);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-        setTemp(2, temp3);
-    }
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-    const LAllocation *numActuals() {
-        return getOperand(1);
-    }
-    MRestPar *mir() const {
-        return mir_->toRestPar();
-    }
-};
-
-class LGuardThreadExclusive : public LCallInstructionHelper<0, 2, 1>
-{
-  public:
-    LIR_HEADER(GuardThreadExclusive);
-
-    LGuardThreadExclusive(const LAllocation &cx, const LAllocation &object, const LDefinition &temp1) {
-        setOperand(0, cx);
-        setOperand(1, object);
-        setTemp(0, temp1);
-    }
-
-    const LAllocation *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    const LAllocation *object() {
-        return getOperand(1);
-    }
-
-    const LDefinition *getTempReg() {
-        return getTemp(0);
-    }
-};
-
 class LGuardShapePolymorphic : public LInstructionHelper<0, 1, 1>
 {
   public:
     LIR_HEADER(GuardShapePolymorphic)
 
     LGuardShapePolymorphic(const LAllocation &in, const LDefinition &temp) {
         setOperand(0, in);
         setTemp(0, temp);
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -564,18 +564,16 @@ class LDefinition
           case MIRType_Value:
             return LDefinition::BOX;
 #endif
           case MIRType_Slots:
           case MIRType_Elements:
             return LDefinition::SLOTS;
           case MIRType_Pointer:
             return LDefinition::GENERAL;
-          case MIRType_ForkJoinContext:
-            return LDefinition::GENERAL;
           case MIRType_Int32x4:
             return LDefinition::INT32X4;
           case MIRType_Float32x4:
             return LDefinition::FLOAT32X4;
           default:
             MOZ_CRASH("unexpected type");
         }
     }
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -50,27 +50,23 @@
     _(NewArrayDynamicLength)        \
     _(ArraySplice)                  \
     _(NewObject)                    \
     _(NewTypedObject)               \
     _(NewDeclEnvObject)             \
     _(NewCallObject)                \
     _(NewSingletonCallObject)       \
     _(NewStringObject)              \
-    _(NewPar)                       \
-    _(NewDenseArrayPar)             \
-    _(NewCallObjectPar)             \
     _(NewDerivedTypedObject)        \
     _(InitElem)                     \
     _(InitElemGetterSetter)         \
     _(MutateProto)                  \
     _(InitProp)                     \
     _(InitPropGetterSetter)         \
     _(CheckOverRecursed)            \
-    _(CheckOverRecursedPar)         \
     _(DefVar)                       \
     _(DefFun)                       \
     _(CallKnown)                    \
     _(CallGeneric)                  \
     _(CallNative)                   \
     _(ApplyArgsGeneric)             \
     _(Bail)                         \
     _(Unreachable)                  \
@@ -151,17 +147,16 @@
     _(AddI)                         \
     _(SubI)                         \
     _(MulI)                         \
     _(MathD)                        \
     _(MathF)                        \
     _(ModD)                         \
     _(BinaryV)                      \
     _(Concat)                       \
-    _(ConcatPar)                    \
     _(CharCodeAt)                   \
     _(FromCharCode)                 \
     _(StringSplit)                  \
     _(Int32ToDouble)                \
     _(Float32ToDouble)              \
     _(DoubleToFloat32)              \
     _(Int32ToFloat32)               \
     _(ValueToDouble)                \
@@ -188,32 +183,30 @@
     _(RegExpExec)                   \
     _(RegExpTest)                   \
     _(RegExpReplace)                \
     _(StringReplace)                \
     _(Substr)                       \
     _(Lambda)                       \
     _(LambdaArrow)                  \
     _(LambdaForSingleton)           \
-    _(LambdaPar)                    \
     _(Slots)                        \
     _(Elements)                     \
     _(ConvertElementsToDoubles)     \
     _(MaybeToDoubleElement)         \
     _(MaybeCopyElementsForWrite)    \
     _(LoadSlotV)                    \
     _(LoadSlotT)                    \
     _(StoreSlotV)                   \
     _(StoreSlotT)                   \
     _(GuardShape)                   \
     _(GuardShapePolymorphic)        \
     _(GuardObjectType)              \
     _(GuardObjectIdentity)          \
     _(GuardClass)                   \
-    _(GuardThreadExclusive)         \
     _(TypeBarrierV)                 \
     _(TypeBarrierO)                 \
     _(MonitorTypes)                 \
     _(PostWriteBarrierO)            \
     _(PostWriteBarrierV)            \
     _(InitializedLength)            \
     _(SetInitializedLength)         \
     _(BoundsCheck)                  \
@@ -247,18 +240,16 @@
     _(ClampIToUint8)                \
     _(ClampDToUint8)                \
     _(ClampVToUint8)                \
     _(LoadFixedSlotV)               \
     _(LoadFixedSlotT)               \
     _(StoreFixedSlotV)              \
     _(StoreFixedSlotT)              \
     _(FunctionEnvironment)          \
-    _(ForkJoinContext)              \
-    _(ForkJoinGetSlice)             \
     _(GetPropertyCacheV)            \
     _(GetPropertyCacheT)            \
     _(GetPropertyPolymorphicV)      \
     _(GetPropertyPolymorphicT)      \
     _(GetElementCacheV)             \
     _(GetElementCacheT)             \
     _(BindNameCache)                \
     _(CallGetProperty)              \
@@ -292,17 +283,16 @@
     _(StringLength)                 \
     _(ArgumentsLength)              \
     _(GetFrameArgument)             \
     _(SetFrameArgumentT)            \
     _(SetFrameArgumentC)            \
     _(SetFrameArgumentV)            \
     _(RunOncePrologue)              \
     _(Rest)                         \
-    _(RestPar)                      \
     _(TypeOfV)                      \
     _(ToIdV)                        \
     _(Floor)                        \
     _(FloorF)                       \
     _(Ceil)                         \
     _(CeilF)                        \
     _(Round)                        \
     _(RoundF)                       \
@@ -331,17 +321,16 @@
     _(AsmJSLoadFFIFunc)             \
     _(AsmJSParameter)               \
     _(AsmJSReturn)                  \
     _(AsmJSVoidReturn)              \
     _(AsmJSPassStackArg)            \
     _(AsmJSCall)                    \
     _(AsmJSCompareExchangeHeap)     \
     _(AsmJSAtomicBinopHeap)         \
-    _(InterruptCheckPar)            \
     _(RecompileCheck)               \
     _(MemoryBarrier)                \
     _(AssertRangeI)                 \
     _(AssertRangeD)                 \
     _(AssertRangeF)                 \
     _(AssertRangeV)                 \
     _(LexicalCheck)                 \
     _(ThrowUninitializedLexical)    \
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -126,25 +126,16 @@ void
 LIRGenerator::visitCheckOverRecursed(MCheckOverRecursed *ins)
 {
     LCheckOverRecursed *lir = new(alloc()) LCheckOverRecursed();
     add(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
-LIRGenerator::visitCheckOverRecursedPar(MCheckOverRecursedPar *ins)
-{
-    LCheckOverRecursedPar *lir =
-        new(alloc()) LCheckOverRecursedPar(useRegister(ins->forkJoinContext()), temp());
-    add(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
 LIRGenerator::visitDefVar(MDefVar *ins)
 {
     LDefVar *lir = new(alloc()) LDefVar(useRegisterAtStart(ins->scopeChain()));
     add(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
@@ -238,25 +229,16 @@ LIRGenerator::visitNewDerivedTypedObject
         new(alloc()) LNewDerivedTypedObject(useRegisterAtStart(ins->type()),
                                             useRegisterAtStart(ins->owner()),
                                             useRegisterAtStart(ins->offset()));
     defineReturn(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
-LIRGenerator::visitNewCallObjectPar(MNewCallObjectPar *ins)
-{
-    const LAllocation &parThreadContext = useRegister(ins->forkJoinContext());
-    LNewCallObjectPar *lir = LNewCallObjectPar::New(alloc(), parThreadContext, temp(), temp());
-    define(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
 LIRGenerator::visitNewStringObject(MNewStringObject *ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType_String);
 
     LNewStringObject *lir = new(alloc()) LNewStringObject(useRegister(ins->input()), temp());
     define(lir, ins);
     assignSafepoint(lir, ins);
 }
@@ -1595,38 +1577,16 @@ LIRGenerator::visitConcat(MConcat *ins)
                                         tempFixed(CallTempReg2),
                                         tempFixed(CallTempReg3),
                                         tempFixed(CallTempReg4));
     defineFixed(lir, ins, LAllocation(AnyRegister(CallTempReg5)));
     assignSafepoint(lir, ins);
 }
 
 void
-LIRGenerator::visitConcatPar(MConcatPar *ins)
-{
-    MDefinition *cx = ins->forkJoinContext();
-    MDefinition *lhs = ins->lhs();
-    MDefinition *rhs = ins->rhs();
-
-    MOZ_ASSERT(lhs->type() == MIRType_String);
-    MOZ_ASSERT(rhs->type() == MIRType_String);
-    MOZ_ASSERT(ins->type() == MIRType_String);
-
-    LConcatPar *lir = new(alloc()) LConcatPar(useFixed(cx, CallTempReg4),
-                                              useFixedAtStart(lhs, CallTempReg0),
-                                              useFixedAtStart(rhs, CallTempReg1),
-                                              tempFixed(CallTempReg0),
-                                              tempFixed(CallTempReg1),
-                                              tempFixed(CallTempReg2),
-                                              tempFixed(CallTempReg3));
-    defineFixed(lir, ins, LAllocation(AnyRegister(CallTempReg5)));
-    assignSafepoint(lir, ins);
-}
-
-void
 LIRGenerator::visitCharCodeAt(MCharCodeAt *ins)
 {
     MDefinition *str = ins->getOperand(0);
     MDefinition *idx = ins->getOperand(1);
 
     MOZ_ASSERT(str->type() == MIRType_String);
     MOZ_ASSERT(idx->type() == MIRType_Int32);
 
@@ -2167,28 +2127,16 @@ LIRGenerator::visitLambdaArrow(MLambdaAr
 
     LLambdaArrow *lir = new(alloc()) LLambdaArrow(useRegister(ins->scopeChain()), temp());
     useBox(lir, LLambdaArrow::ThisValue, ins->thisDef());
     define(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
-LIRGenerator::visitLambdaPar(MLambdaPar *ins)
-{
-    MOZ_ASSERT(!ins->info().singletonType);
-    MOZ_ASSERT(!ins->info().useNewTypeForClone);
-    LLambdaPar *lir = new(alloc()) LLambdaPar(useRegister(ins->forkJoinContext()),
-                                              useRegister(ins->scopeChain()),
-                                              temp(), temp());
-    define(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
 LIRGenerator::visitSlots(MSlots *ins)
 {
     define(new(alloc()) LSlots(useRegisterAtStart(ins->object())), ins);
 }
 
 void
 LIRGenerator::visitElements(MElements *ins)
 {
@@ -2249,39 +2197,16 @@ LIRGenerator::visitLoadSlot(MLoadSlot *i
 
 void
 LIRGenerator::visitFunctionEnvironment(MFunctionEnvironment *ins)
 {
     define(new(alloc()) LFunctionEnvironment(useRegisterAtStart(ins->function())), ins);
 }
 
 void
-LIRGenerator::visitForkJoinContext(MForkJoinContext *ins)
-{
-    LForkJoinContext *lir = new(alloc()) LForkJoinContext(tempFixed(CallTempReg0));
-    defineReturn(lir, ins);
-}
-
-void
-LIRGenerator::visitGuardThreadExclusive(MGuardThreadExclusive *ins)
-{
-    // FIXME (Bug 956281) -- For now, we always generate the most
-    // general form of write guard check. we could employ TI feedback
-    // to optimize this if we know that the object being tested is a
-    // typed object or know that it is definitely NOT a typed object.
-    LGuardThreadExclusive *lir =
-        new(alloc()) LGuardThreadExclusive(useFixed(ins->forkJoinContext(), CallTempReg0),
-                                           useFixed(ins->object(), CallTempReg1),
-                                           tempFixed(CallTempReg2));
-    lir->setMir(ins);
-    assignSnapshot(lir, Bailout_GuardThreadExclusive);
-    add(lir, ins);
-}
-
-void
 LIRGenerator::visitInterruptCheck(MInterruptCheck *ins)
 {
     // Implicit interrupt checks require asm.js signal handlers to be installed.
     LInstructionHelper<0, 0, 0> *lir;
     if (GetJitContext()->runtime->canUseSignalHandlers())
         lir = new(alloc()) LInterruptCheckImplicit();
     else
         lir = new(alloc()) LInterruptCheck();
@@ -2295,50 +2220,16 @@ LIRGenerator::visitAsmJSInterruptCheck(M
     gen->setPerformsCall();
 
     LAsmJSInterruptCheck *lir = new(alloc()) LAsmJSInterruptCheck(ins->interruptExit(),
                                                                   ins->funcDesc());
     add(lir, ins);
 }
 
 void
-LIRGenerator::visitInterruptCheckPar(MInterruptCheckPar *ins)
-{
-    LInterruptCheckPar *lir =
-        new(alloc()) LInterruptCheckPar(useRegister(ins->forkJoinContext()), temp());
-    add(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
-LIRGenerator::visitNewPar(MNewPar *ins)
-{
-    LNewPar *lir = new(alloc()) LNewPar(useRegister(ins->forkJoinContext()), temp(), temp());
-    define(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
-LIRGenerator::visitNewDenseArrayPar(MNewDenseArrayPar *ins)
-{
-    MOZ_ASSERT(ins->forkJoinContext()->type() == MIRType_ForkJoinContext);
-    MOZ_ASSERT(ins->length()->type() == MIRType_Int32);
-    MOZ_ASSERT(ins->type() == MIRType_Object);
-
-    LNewDenseArrayPar *lir =
-        new(alloc()) LNewDenseArrayPar(useRegister(ins->forkJoinContext()),
-                                       useRegister(ins->length()),
-                                       temp(),
-                                       temp(),
-                                       temp());
-    define(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
 LIRGenerator::visitStoreSlot(MStoreSlot *ins)
 {
     LInstruction *lir;
 
     switch (ins->value()->type()) {
       case MIRType_Value:
         lir = new(alloc()) LStoreSlotV(useRegister(ins->slots()));
         useBox(lir, LStoreSlotV::Value, ins->value());
@@ -3150,18 +3041,17 @@ void
 LIRGenerator::visitGetPropertyCache(MGetPropertyCache *ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType_Object);
     if (ins->type() == MIRType_Value) {
         LGetPropertyCacheV *lir = new(alloc()) LGetPropertyCacheV(useRegister(ins->object()));
         defineBox(lir, ins);
         assignSafepoint(lir, ins);
     } else {
-        LGetPropertyCacheT *lir = new(alloc()) LGetPropertyCacheT(useRegister(ins->object()),
-                                                                  tempForDispatchCache(ins->type()));
+        LGetPropertyCacheT *lir = new(alloc()) LGetPropertyCacheT(useRegister(ins->object()));
         define(lir, ins);
         assignSafepoint(lir, ins);
     }
 }
 
 void
 LIRGenerator::visitGetPropertyPolymorphic(MGetPropertyPolymorphic *ins)
 {
@@ -3211,18 +3101,17 @@ LIRGenerator::visitGetElementCache(MGetE
         MOZ_ASSERT(ins->index()->type() == MIRType_Value);
         LGetElementCacheV *lir = new(alloc()) LGetElementCacheV(useRegister(ins->object()));
         useBox(lir, LGetElementCacheV::Index, ins->index());
         defineBox(lir, ins);
         assignSafepoint(lir, ins);
     } else {
         MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
         LGetElementCacheT *lir = new(alloc()) LGetElementCacheT(useRegister(ins->object()),
-                                                                useRegister(ins->index()),
-                                                                tempForDispatchCache(ins->type()));
+                                                                useRegister(ins->index()));
         define(lir, ins);
         assignSafepoint(lir, ins);
     }
 }
 
 void
 LIRGenerator::visitBindNameCache(MBindNameCache *ins)
 {
@@ -3368,25 +3257,24 @@ LIRGenerator::visitDeleteElement(MDelete
     assignSafepoint(lir, ins);
 }
 
 void
 LIRGenerator::visitSetPropertyCache(MSetPropertyCache *ins)
 {
     LUse obj = useRegisterAtStart(ins->object());
     LDefinition slots = tempCopy(ins->object(), 0);
-    LDefinition dispatchTemp = tempForDispatchCache();
 
     LInstruction *lir;
     if (ins->value()->type() == MIRType_Value) {
-        lir = new(alloc()) LSetPropertyCacheV(obj, slots, dispatchTemp);
+        lir = new(alloc()) LSetPropertyCacheV(obj, slots);
         useBox(lir, LSetPropertyCacheV::Value, ins->value());
     } else {
         LAllocation value = useRegisterOrConstant(ins->value());
-        lir = new(alloc()) LSetPropertyCacheT(obj, slots, value, dispatchTemp, ins->value()->type());
+        lir = new(alloc()) LSetPropertyCacheT(obj, slots, value, ins->value()->type());
     }
 
     add(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
 LIRGenerator::visitSetElementCache(MSetElementCache *ins)
@@ -3539,30 +3427,16 @@ LIRGenerator::visitRest(MRest *ins)
                                     tempFixed(CallTempReg1),
                                     tempFixed(CallTempReg2),
                                     tempFixed(CallTempReg3));
     defineReturn(lir, ins);
     assignSafepoint(lir, ins);
 }
 
 void
-LIRGenerator::visitRestPar(MRestPar *ins)
-{
-    MOZ_ASSERT(ins->numActuals()->type() == MIRType_Int32);
-
-    LRestPar *lir = new(alloc()) LRestPar(useRegister(ins->forkJoinContext()),
-                                          useRegister(ins->numActuals()),
-                                          temp(),
-                                          temp(),
-                                          temp());
-    define(lir, ins);
-    assignSafepoint(lir, ins);
-}
-
-void
 LIRGenerator::visitThrow(MThrow *ins)
 {
     MDefinition *value = ins->getOperand(0);
     MOZ_ASSERT(value->type() == MIRType_Value);
 
     LThrow *lir = new(alloc()) LThrow;
     useBoxAtStart(lir, LThrow::Value, value);
     add(lir, ins);
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -75,26 +75,22 @@ class LIRGenerator : public LIRGenerator
     void visitNewArrayDynamicLength(MNewArrayDynamicLength *ins);
     void visitNewObject(MNewObject *ins);
     void visitNewTypedObject(MNewTypedObject *ins);
     void visitNewDeclEnvObject(MNewDeclEnvObject *ins);
     void visitNewCallObject(MNewCallObject *ins);
     void visitNewRunOnceCallObject(MNewRunOnceCallObject *ins);
     void visitNewStringObject(MNewStringObject *ins);
     void visitNewDerivedTypedObject(MNewDerivedTypedObject *ins);
-    void visitNewPar(MNewPar *ins);
-    void visitNewCallObjectPar(MNewCallObjectPar *ins);
-    void visitNewDenseArrayPar(MNewDenseArrayPar *ins);
     void visitInitElem(MInitElem *ins);
     void visitInitElemGetterSetter(MInitElemGetterSetter *ins);
     void visitMutateProto(MMutateProto *ins);
     void visitInitProp(MInitProp *ins);
     void visitInitPropGetterSetter(MInitPropGetterSetter *ins);
     void visitCheckOverRecursed(MCheckOverRecursed *ins);
-    void visitCheckOverRecursedPar(MCheckOverRecursedPar *ins);
     void visitDefVar(MDefVar *ins);
     void visitDefFun(MDefFun *ins);
     void visitCreateThisWithTemplate(MCreateThisWithTemplate *ins);
     void visitCreateThisWithProto(MCreateThisWithProto *ins);
     void visitCreateThis(MCreateThis *ins);
     void visitCreateArgumentsObject(MCreateArgumentsObject *ins);
     void visitGetArgumentsObjectArg(MGetArgumentsObjectArg *ins);
     void visitSetArgumentsObjectArg(MSetArgumentsObjectArg *ins);
@@ -137,17 +133,16 @@ class LIRGenerator : public LIRGenerator
     void visitRandom(MRandom *ins);
     void visitMathFunction(MMathFunction *ins);
     void visitAdd(MAdd *ins);
     void visitSub(MSub *ins);
     void visitMul(MMul *ins);
     void visitDiv(MDiv *ins);
     void visitMod(MMod *ins);
     void visitConcat(MConcat *ins);
-    void visitConcatPar(MConcatPar *ins);
     void visitCharCodeAt(MCharCodeAt *ins);
     void visitFromCharCode(MFromCharCode *ins);
     void visitStringSplit(MStringSplit *ins);
     void visitStart(MStart *start);
     void visitOsrEntry(MOsrEntry *entry);
     void visitNop(MNop *nop);
     void visitLimitedTruncate(MLimitedTruncate *nop);
     void visitOsrValue(MOsrValue *value);
@@ -162,30 +157,26 @@ class LIRGenerator : public LIRGenerator
     void visitToObjectOrNull(MToObjectOrNull *convert);
     void visitRegExp(MRegExp *ins);
     void visitRegExpExec(MRegExpExec *ins);
     void visitRegExpTest(MRegExpTest *ins);
     void visitRegExpReplace(MRegExpReplace *ins);
     void visitStringReplace(MStringReplace *ins);
     void visitLambda(MLambda *ins);
     void visitLambdaArrow(MLambdaArrow *ins);
-    void visitLambdaPar(MLambdaPar *ins);
     void visitSlots(MSlots *ins);
     void visitElements(MElements *ins);
     void visitConstantElements(MConstantElements *ins);
     void visitConvertElementsToDoubles(MConvertElementsToDoubles *ins);
     void visitMaybeToDoubleElement(MMaybeToDoubleElement *ins);
     void visitMaybeCopyElementsForWrite(MMaybeCopyElementsForWrite *ins);
     void visitLoadSlot(MLoadSlot *ins);
     void visitFunctionEnvironment(MFunctionEnvironment *ins);
-    void visitForkJoinContext(MForkJoinContext *ins);
-    void visitGuardThreadExclusive(MGuardThreadExclusive *ins);
     void visitInterruptCheck(MInterruptCheck *ins);
     void visitAsmJSInterruptCheck(MAsmJSInterruptCheck *ins);
-    void visitInterruptCheckPar(MInterruptCheckPar *ins);
     void visitStoreSlot(MStoreSlot *ins);
     void visitFilterTypeSet(MFilterTypeSet *ins);
     void visitTypeBarrier(MTypeBarrier *ins);
     void visitMonitorTypes(MMonitorTypes *ins);
     void visitPostWriteBarrier(MPostWriteBarrier *ins);
     void visitArrayLength(MArrayLength *ins);
     void visitSetArrayLength(MSetArrayLength *ins);
     void visitTypedArrayLength(MTypedArrayLength *ins);
@@ -247,17 +238,16 @@ class LIRGenerator : public LIRGenerator
     void visitIsNoIter(MIsNoIter *ins);
     void visitIteratorEnd(MIteratorEnd *ins);
     void visitStringLength(MStringLength *ins);
     void visitArgumentsLength(MArgumentsLength *ins);
     void visitGetFrameArgument(MGetFrameArgument *ins);
     void visitSetFrameArgument(MSetFrameArgument *ins);
     void visitRunOncePrologue(MRunOncePrologue *ins);
     void visitRest(MRest *ins);
-    void visitRestPar(MRestPar *ins);
     void visitThrow(MThrow *ins);
     void visitIn(MIn *ins);
     void visitInArray(MInArray *ins);
     void visitInstanceOf(MInstanceOf *ins);
     void visitCallInstanceOf(MCallInstanceOf *ins);
     void visitProfilerStackOp(MProfilerStackOp *ins);
     void visitIsCallable(MIsCallable *ins);
     void visitIsObject(MIsObject *ins);
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -161,40 +161,31 @@ IonBuilder::inlineNativeCall(CallInfo &c
     if (native == regexp_exec && !CallResultEscapes(pc))
         return inlineRegExpTest(callInfo);
     if (native == regexp_test)
         return inlineRegExpTest(callInfo);
 
     // Array intrinsics.
     if (native == intrinsic_UnsafePutElements)
         return inlineUnsafePutElements(callInfo);
-    if (native == intrinsic_NewDenseArray)
-        return inlineNewDenseArray(callInfo);
 
     // Slot intrinsics.
     if (native == intrinsic_UnsafeSetReservedSlot)
         return inlineUnsafeSetReservedSlot(callInfo);
     if (native == intrinsic_UnsafeGetReservedSlot)
         return inlineUnsafeGetReservedSlot(callInfo, MIRType_Value);
     if (native == intrinsic_UnsafeGetObjectFromReservedSlot)
         return inlineUnsafeGetReservedSlot(callInfo, MIRType_Object);
     if (native == intrinsic_UnsafeGetInt32FromReservedSlot)
         return inlineUnsafeGetReservedSlot(callInfo, MIRType_Int32);
     if (native == intrinsic_UnsafeGetStringFromReservedSlot)
         return inlineUnsafeGetReservedSlot(callInfo, MIRType_String);
     if (native == intrinsic_UnsafeGetBooleanFromReservedSlot)
         return inlineUnsafeGetReservedSlot(callInfo, MIRType_Boolean);
 
-    // Parallel intrinsics.
-    if (native == intrinsic_ShouldForceSequential ||
-        native == intrinsic_InParallelSection)
-        return inlineForceSequentialOrInParallelSection(callInfo);
-    if (native == intrinsic_ForkJoinGetSlice)
-        return inlineForkJoinGetSlice(callInfo);
-
     // Utility intrinsics.
     if (native == intrinsic_IsCallable)
         return inlineIsCallable(callInfo);
     if (native == intrinsic_ToObject)
         return inlineToObject(callInfo);
     if (native == intrinsic_IsObject)
         return inlineIsObject(callInfo);
     if (native == intrinsic_ToInteger)
@@ -237,18 +228,16 @@ IonBuilder::inlineNativeCall(CallInfo &c
         return inlineHasClass(callInfo,
                               &ScalarTypeDescr::class_, &ReferenceTypeDescr::class_);
     if (native == intrinsic_TypeDescrIsArrayType)
         return inlineHasClass(callInfo, &ArrayTypeDescr::class_);
     if (native == intrinsic_SetTypedObjectOffset)
         return inlineSetTypedObjectOffset(callInfo);
 
     // Testing Functions
-    if (native == testingFunc_inParallelSection)
-        return inlineForceSequentialOrInParallelSection(callInfo);
     if (native == testingFunc_bailout)
         return inlineBailout(callInfo);
     if (native == testingFunc_assertFloat32)
         return inlineAssertFloat32(callInfo);
 
     // Bound function
     if (native == js::CallOrConstructBoundFunction)
         return inlineBoundFunction(callInfo, target);
@@ -1772,139 +1761,16 @@ IonBuilder::inlineUnsafeSetTypedObjectAr
 
     if (!jsop_setelem_typed_object(arrayType, SetElem_Unsafe, true, obj, id, elem))
         return false;
 
     return true;
 }
 
 IonBuilder::InliningStatus
-IonBuilder::inlineForceSequentialOrInParallelSection(CallInfo &callInfo)
-{
-    if (callInfo.constructing())
-        return InliningStatus_NotInlined;
-
-    ExecutionMode executionMode = info().executionMode();
-    switch (executionMode) {
-      case ParallelExecution: {
-        // During Parallel Exec, we always force sequential, so
-        // replace with true.  This permits UCE to eliminate the
-        // entire path as dead, which is important.
-        callInfo.setImplicitlyUsedUnchecked();
-        MConstant *ins = MConstant::New(alloc(), BooleanValue(true));
-        current->add(ins);
-        current->push(ins);
-        return InliningStatus_Inlined;
-      }
-
-      default:
-        // In sequential mode, leave as is, because we'd have to
-        // access the "in warmup" flag of the runtime.
-        return InliningStatus_NotInlined;
-    }
-
-    MOZ_CRASH("Invalid execution mode");
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineForkJoinGetSlice(CallInfo &callInfo)
-{
-    if (info().executionMode() != ParallelExecution)
-        return InliningStatus_NotInlined;
-
-    // Assert the way the function is used instead of testing, as it is a
-    // self-hosted function which must be used in a particular fashion.
-    MOZ_ASSERT(callInfo.argc() == 1 && !callInfo.constructing());
-    MOZ_ASSERT(callInfo.getArg(0)->type() == MIRType_Int32);
-
-    // Test this, as we might have not executed the native despite knowing the
-    // target here.
-    if (getInlineReturnType() != MIRType_Int32)
-        return InliningStatus_NotInlined;
-
-    callInfo.setImplicitlyUsedUnchecked();
-
-    switch (info().executionMode()) {
-      case ParallelExecution:
-        if (LIRGenerator::allowInlineForkJoinGetSlice()) {
-            MForkJoinGetSlice *getSlice = MForkJoinGetSlice::New(alloc(),
-                                                                 graph().forkJoinContext());
-            current->add(getSlice);
-            current->push(getSlice);
-            return InliningStatus_Inlined;
-        }
-        return InliningStatus_NotInlined;
-
-      default:
-        // ForkJoinGetSlice acts as identity for sequential execution.
-        current->push(callInfo.getArg(0));
-        return InliningStatus_Inlined;
-    }
-
-    MOZ_CRASH("Invalid execution mode");
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineNewDenseArray(CallInfo &callInfo)
-{
-    if (callInfo.constructing() || callInfo.argc() != 1)
-        return InliningStatus_NotInlined;
-
-    // For now, in seq. mode we just call the C function.  In
-    // par. mode we use inlined MIR.
-    ExecutionMode executionMode = info().executionMode();
-    switch (executionMode) {
-      case ParallelExecution:
-        return inlineNewDenseArrayForParallelExecution(callInfo);
-      default:
-        return inlineNewDenseArrayForSequentialExecution(callInfo);
-    }
-
-    MOZ_CRASH("unknown ExecutionMode");
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo)
-{
-    // not yet implemented; in seq. mode the C function is not so bad
-    return InliningStatus_NotInlined;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineNewDenseArrayForParallelExecution(CallInfo &callInfo)
-{
-    // Create the new parallel array object.  Parallel arrays have specially
-    // constructed type objects, so we can only perform the inlining if we
-    // already have one of these type objects.
-    types::TemporaryTypeSet *returnTypes = getInlineReturnTypeSet();
-    if (returnTypes->getKnownMIRType() != MIRType_Object)
-        return InliningStatus_NotInlined;
-    if (returnTypes->unknownObject() || returnTypes->getObjectCount() != 1)
-        return InliningStatus_NotInlined;
-    if (callInfo.getArg(0)->type() != MIRType_Int32)
-        return InliningStatus_NotInlined;
-    types::TypeObject *typeObject = returnTypes->getTypeObject(0);
-
-    NativeObject *templateObject = inspector->getTemplateObjectForNative(pc, intrinsic_NewDenseArray);
-    if (!templateObject || templateObject->type() != typeObject)
-        return InliningStatus_NotInlined;
-
-    callInfo.setImplicitlyUsedUnchecked();
-
-    MNewDenseArrayPar *newObject = MNewDenseArrayPar::New(alloc(),
-                                                          graph().forkJoinContext(),
-                                                          callInfo.getArg(0),
-                                                          &templateObject->as<ArrayObject>());
-    current->add(newObject);
-    current->push(newObject);
-
-    return InliningStatus_Inlined;
-}
-
-IonBuilder::InliningStatus
 IonBuilder::inlineHasClass(CallInfo &callInfo,
                            const Class *clasp1, const Class *clasp2,
                            const Class *clasp3, const Class *clasp4)
 {
     if (callInfo.constructing() || callInfo.argc() != 1)
         return InliningStatus_NotInlined;
 
     if (callInfo.getArg(0)->type() != MIRType_Object)
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -2266,27 +2266,16 @@ MBinaryArithInstruction::inferFallback(B
 {
     // Try to specialize based on what baseline observed in practice.
     specialization_ = inspector->expectedBinaryArithSpecialization(pc);
     if (specialization_ != MIRType_None) {
         setResultType(specialization_);
         return;
     }
 
-    // In parallel execution, for now anyhow, we *only* support adding
-    // and manipulating numbers (not strings or objects).  So no
-    // matter what we can specialize to double...if the result ought
-    // to have been something else, we'll fail in the various type
-    // guards that get inserted later.
-    if (block()->info().executionMode() == ParallelExecution) {
-        specialization_ = MIRType_Double;
-        setResultType(MIRType_Double);
-        return;
-    }
-
     // If we can't specialize because we have no type information at all for
     // the lhs or rhs, mark the binary instruction as having no possible types
     // either to avoid degrading subsequent analysis.
     if (getOperand(0)->emptyResultTypeSet() || getOperand(1)->emptyResultTypeSet()) {
         LifoAlloc *alloc = GetJitContext()->temp->lifoAlloc();
         types::TemporaryTypeSet *types = alloc->new_<types::TemporaryTypeSet>();
         if (types)
             setResultTypeSet(types);
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -2831,50 +2831,16 @@ class MNewObject
     bool writeRecoverData(CompactBufferWriter &writer) const;
     bool canRecoverOnBailout() const {
         // The template object can safely be used in the recover instruction
         // because it can never be mutated by any other function execution.
         return true;
     }
 };
 
-// Could be allocating either a new array or a new object.
-class MNewPar
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    AlwaysTenuredNativeObject templateObject_;
-
-    MNewPar(MDefinition *cx, NativeObject *templateObject)
-      : MUnaryInstruction(cx),
-        templateObject_(templateObject)
-    {
-        setResultType(MIRType_Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewPar);
-
-    static MNewPar *New(TempAllocator &alloc, MDefinition *cx, NativeObject *templateObject) {
-        return new(alloc) MNewPar(cx, templateObject);
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-
-    NativeObject *templateObject() const {
-        return templateObject_;
-    }
-
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-};
-
 class MNewTypedObject : public MNullaryInstruction
 {
     AlwaysTenured<InlineTypedObject *> templateObject_;
     gc::InitialHeap initialHeap_;
 
     MNewTypedObject(types::CompilerConstraintList *constraints,
                     InlineTypedObject *templateObject,
                     gc::InitialHeap initialHeap)
@@ -6070,56 +6036,16 @@ class MConcat
     bool writeRecoverData(CompactBufferWriter &writer) const;
     bool canRecoverOnBailout() const {
         return true;
     }
 
     ALLOW_CLONE(MConcat)
 };
 
-class MConcatPar
-  : public MTernaryInstruction,
-    public NoTypePolicy::Data
-{
-    MConcatPar(MDefinition *cx, MDefinition *left, MDefinition *right)
-      : MTernaryInstruction(cx, left, right)
-    {
-        // Type analysis has already run, before replacing with the parallel
-        // variant.
-        MOZ_ASSERT(left->type() == MIRType_String && right->type() == MIRType_String);
-
-        setMovable();
-        setResultType(MIRType_String);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ConcatPar)
-
-    static MConcatPar *New(TempAllocator &alloc, MDefinition *cx, MConcat *concat) {
-        return new(alloc) MConcatPar(cx, concat->lhs(), concat->rhs());
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-    MDefinition *lhs() const {
-        return getOperand(1);
-    }
-    MDefinition *rhs() const {
-        return getOperand(2);
-    }
-
-    bool congruentTo(const MDefinition *ins) const {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-};
-
 class MCharCodeAt
   : public MBinaryInstruction,
     public MixPolicy<StringPolicy<0>, IntPolicy<1> >::Data
 {
     MCharCodeAt(MDefinition *str, MDefinition *index)
         : MBinaryInstruction(str, index)
     {
         setMovable();
@@ -6623,69 +6549,16 @@ class MCheckOverRecursed
   public:
     INSTRUCTION_HEADER(CheckOverRecursed)
 
     static MCheckOverRecursed *New(TempAllocator &alloc) {
         return new(alloc) MCheckOverRecursed();
     }
 };
 
-// Check the current frame for over-recursion past the global stack limit.
-// Uses the per-thread recursion limit.
-class MCheckOverRecursedPar
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MCheckOverRecursedPar(MDefinition *cx)
-      : MUnaryInstruction(cx)
-    {
-        setResultType(MIRType_None);
-        setGuard();
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(CheckOverRecursedPar);
-
-    static MCheckOverRecursedPar *New(TempAllocator &alloc, MDefinition *cx) {
-        return new(alloc) MCheckOverRecursedPar(cx);
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-};
-
-// Check for an interrupt (or rendezvous) in parallel mode.
-class MInterruptCheckPar
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MInterruptCheckPar(MDefinition *cx)
-      : MUnaryInstruction(cx)
-    {
-        setResultType(MIRType_None);
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(InterruptCheckPar);
-
-    static MInterruptCheckPar *New(TempAllocator &alloc, MDefinition *cx) {
-        return new(alloc) MInterruptCheckPar(cx);
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-};
-
 // Check whether we need to fire the interrupt handler.
 class MInterruptCheck : public MNullaryInstruction
 {
     MInterruptCheck() {
         setGuard();
     }
 
   public:
@@ -7191,53 +7064,16 @@ class MLambdaArrow
     MDefinition *thisDef() const {
         return getOperand(1);
     }
     const LambdaFunctionInfo &info() const {
         return info_;
     }
 };
 
-class MLambdaPar
-  : public MBinaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    LambdaFunctionInfo info_;
-
-    MLambdaPar(MDefinition *cx, MDefinition *scopeChain, JSFunction *fun,
-               types::TemporaryTypeSet *resultTypes, const LambdaFunctionInfo &info)
-      : MBinaryInstruction(cx, scopeChain), info_(info)
-    {
-        MOZ_ASSERT(!info_.singletonType);
-        MOZ_ASSERT(!info_.useNewTypeForClone);
-        setResultType(MIRType_Object);
-        setResultTypeSet(resultTypes);
-    }
-
-  public:
-    INSTRUCTION_HEADER(LambdaPar);
-
-    static MLambdaPar *New(TempAllocator &alloc, MDefinition *cx, MLambda *lambda) {
-        return new(alloc) MLambdaPar(cx, lambda->scopeChain(), lambda->info().fun,
-                                     lambda->resultTypeSet(), lambda->info());
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-
-    MDefinition *scopeChain() const {
-        return getOperand(1);
-    }
-
-    const LambdaFunctionInfo &info() const {
-        return info_;
-    }
-};
-
 // Returns obj->slots.
 class MSlots
   : public MUnaryInstruction,
     public SingleObjectPolicy::Data
 {
     explicit MSlots(MDefinition *object)
       : MUnaryInstruction(object)
     {
@@ -9997,73 +9833,16 @@ class MFunctionEnvironment
     MDefinition *foldsTo(TempAllocator &alloc);
 
     // A function's environment is fixed.
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
 
-// Loads the current js::ForkJoinContext*.
-// Only applicable in ParallelExecution.
-class MForkJoinContext
-  : public MNullaryInstruction
-{
-    MForkJoinContext()
-        : MNullaryInstruction()
-    {
-        setResultType(MIRType_ForkJoinContext);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ForkJoinContext);
-
-    static MForkJoinContext *New(TempAllocator &alloc) {
-        return new(alloc) MForkJoinContext();
-    }
-
-    AliasSet getAliasSet() const {
-        // Indicate that this instruction reads nothing, stores nothing.
-        // (For all intents and purposes)
-        return AliasSet::None();
-    }
-
-    bool possiblyCalls() const {
-        return true;
-    }
-};
-
-// Calls the ForkJoinGetSlice stub, used for inlining the eponymous intrinsic.
-// Only applicable in ParallelExecution.
-class MForkJoinGetSlice
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    explicit MForkJoinGetSlice(MDefinition *cx)
-      : MUnaryInstruction(cx)
-    {
-        setResultType(MIRType_Int32);
-    }
-
-  public:
-    INSTRUCTION_HEADER(ForkJoinGetSlice);
-
-    static MForkJoinGetSlice *New(TempAllocator &alloc, MDefinition *cx) {
-        return new(alloc) MForkJoinGetSlice(cx);
-    }
-
-    MDefinition *forkJoinContext() {
-        return getOperand(0);
-    }
-
-    bool possiblyCalls() const {
-        return true;
-    }
-};
-
 // Store to vp[slot] (slots that are not inline in an object).
 class MStoreSlot
   : public MBinaryInstruction,
     public MixPolicy<ObjectPolicy<0>, NoFloatPolicy<1> >::Data
 {
     uint32_t slot_;
     MIRType slotType_;
     bool needsBarrier_;
@@ -11318,92 +11097,16 @@ class MRest
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
     bool possiblyCalls() const {
         return true;
     }
 };
 
-class MRestPar
-  : public MBinaryInstruction,
-    public MRestCommon,
-    public IntPolicy<1>::Data
-{
-    MRestPar(MDefinition *cx, MDefinition *numActuals, unsigned numFormals,
-             ArrayObject *templateObject, types::TemporaryTypeSet *resultTypes)
-      : MBinaryInstruction(cx, numActuals),
-        MRestCommon(numFormals, templateObject)
-    {
-        setResultType(MIRType_Object);
-        setResultTypeSet(resultTypes);
-    }
-
-  public:
-    INSTRUCTION_HEADER(RestPar);
-
-    static MRestPar *New(TempAllocator &alloc, MDefinition *cx, MRest *rest) {
-        return new(alloc) MRestPar(cx, rest->numActuals(), rest->numFormals(),
-                                   rest->templateObject(), rest->resultTypeSet());
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-    MDefinition *numActuals() const {
-        return getOperand(1);
-    }
-
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const {
-        return true;
-    }
-};
-
-// Guard on an object being safe for writes by current parallel cx.
-// Must be either thread-local or else a handle into the destination array.
-class MGuardThreadExclusive
-  : public MBinaryInstruction,
-    public ObjectPolicy<1>::Data
-{
-    MGuardThreadExclusive(MDefinition *cx, MDefinition *obj)
-      : MBinaryInstruction(cx, obj)
-    {
-        setResultType(MIRType_None);
-        setGuard();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardThreadExclusive);
-
-    static MGuardThreadExclusive *New(TempAllocator &alloc, MDefinition *cx, MDefinition *obj) {
-        return new(alloc) MGuardThreadExclusive(cx, obj);
-    }
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-    MDefinition *object() const {
-        return getOperand(1);
-    }
-    BailoutKind bailoutKind() const {
-        return Bailout_GuardThreadExclusive;
-    }
-    bool congruentTo(const MDefinition *ins) const {
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-    bool possiblyCalls() const {
-        return true;
-    }
-};
-
 class MFilterTypeSet
   : public MUnaryInstruction,
     public FilterTypeSetPolicy::Data
 {
     MFilterTypeSet(MDefinition *def, types::TemporaryTypeSet *types)
       : MUnaryInstruction(def)
     {
         MOZ_ASSERT(!types->unknown());
@@ -11641,49 +11344,16 @@ class MNewRunOnceCallObject : public MNe
 
     static MNewRunOnceCallObject *
     New(TempAllocator &alloc, CallObject *templateObj)
     {
         return new(alloc) MNewRunOnceCallObject(templateObj);
     }
 };
 
-class MNewCallObjectPar
-  : public MUnaryInstruction,
-    public NoTypePolicy::Data
-{
-    AlwaysTenured<CallObject*> templateObj_;
-
-    MNewCallObjectPar(MDefinition *cx, CallObject *templateObj)
-        : MUnaryInstruction(cx),
-          templateObj_(templateObj)
-    {
-        setResultType(MIRType_Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewCallObjectPar);
-
-    static MNewCallObjectPar *New(TempAllocator &alloc, MDefinition *cx, MNewCallObjectBase *callObj) {
-        return new(alloc) MNewCallObjectPar(cx, callObj->templateObject());
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-
-    CallObject *templateObj() const {
-        return templateObj_;
-    }
-
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-};
-
 class MNewStringObject :
   public MUnaryInstruction,
   public ConvertToStringPolicy<0>::Data
 {
     AlwaysTenuredObject templateObj_;
 
     MNewStringObject(MDefinition *input, JSObject *templateObj)
       : MUnaryInstruction(input),
@@ -11759,63 +11429,16 @@ class MEnclosingScope : public MLoadFixe
     }
 
     AliasSet getAliasSet() const {
         // ScopeObject reserved slots are immutable.
         return AliasSet::None();
     }
 };
 
-// Creates a dense array of the given length.
-//
-// Note: the template object should be an *empty* dense array!
-class MNewDenseArrayPar
-  : public MBinaryInstruction,
-    public NoTypePolicy::Data
-{
-    AlwaysTenured<ArrayObject*> templateObject_;
-
-    MNewDenseArrayPar(MDefinition *cx, MDefinition *length, ArrayObject *templateObject)
-      : MBinaryInstruction(cx, length),
-        templateObject_(templateObject)
-    {
-        MOZ_ASSERT(length->type() == MIRType_Int32);
-        setResultType(MIRType_Object);
-    }
-
-  public:
-    INSTRUCTION_HEADER(NewDenseArrayPar);
-
-    static MNewDenseArrayPar *New(TempAllocator &alloc, MDefinition *cx, MDefinition *length,
-                                  ArrayObject *templateObject)
-    {
-        return new(alloc) MNewDenseArrayPar(cx, length, templateObject);
-    }
-
-    MDefinition *forkJoinContext() const {
-        return getOperand(0);
-    }
-
-    MDefinition *length() const {
-        return getOperand(1);
-    }
-
-    ArrayObject *templateObject() const {
-        return templateObject_;
-    }
-
-    bool possiblyCalls() const {
-        return true;
-    }
-
-    AliasSet getAliasSet() const {
-        return AliasSet::None();
-    }
-};
-
 // This is an element of a spaghetti stack which is used to represent the memory
 // context which has to be restored in case of a bailout.
 struct MStoreToRecover : public TempObject, public InlineSpaghettiStackNode<MStoreToRecover>
 {
     MDefinition *operand;
 
     explicit MStoreToRecover(MDefinition *operand)
       : operand(operand)
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -197,47 +197,16 @@ MIRGraph::removeBlockIncludingPhis(MBasi
 
 void
 MIRGraph::unmarkBlocks()
 {
     for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++)
         i->unmark();
 }
 
-MDefinition *
-MIRGraph::forkJoinContext()
-{
-    // Search the entry block to find a ForkJoinContext instruction. If we do
-    // not find one, add one after the Start instruction.
-    //
-    // Note: the original design used a field in MIRGraph to cache the
-    // forkJoinContext rather than searching for it again.  However, this
-    // could become out of date due to DCE.  Given that we do not generally
-    // have to search very far to find the ForkJoinContext instruction if it
-    // exists, and that we don't look for it that often, I opted to simply
-    // eliminate the cache and search anew each time, so that it is that much
-    // easier to keep the IR coherent. - nmatsakis
-
-    MBasicBlock *entry = entryBlock();
-    MOZ_ASSERT(entry->info().executionMode() == ParallelExecution);
-
-    MInstruction *start = nullptr;
-    for (MInstructionIterator ins(entry->begin()); ins != entry->end(); ins++) {
-        if (ins->isForkJoinContext())
-            return *ins;
-        else if (ins->isStart())
-            start = *ins;
-    }
-    MOZ_ASSERT(start);
-
-    MForkJoinContext *cx = MForkJoinContext::New(alloc());
-    entry->insertAfter(start, cx);
-    return cx;
-}
-
 MBasicBlock *
 MBasicBlock::New(MIRGraph &graph, BytecodeAnalysis *analysis, CompileInfo &info,
                  MBasicBlock *pred, const BytecodeSite *site, Kind kind)
 {
     MOZ_ASSERT(site->pc() != nullptr);
 
     MBasicBlock *block = new(graph.alloc()) MBasicBlock(graph, info, site, kind);
     if (!block->init())
@@ -802,17 +771,16 @@ MBasicBlock::safeInsertTop(MDefinition *
     // Beta nodes and interrupt checks are required to be located at the
     // beginnings of basic blocks, so we must insert new instructions after any
     // such instructions.
     MInstructionIterator insertIter = !ins || ins->isPhi()
                                     ? begin()
                                     : begin(ins->toInstruction());
     while (insertIter->isBeta() ||
            insertIter->isInterruptCheck() ||
-           insertIter->isInterruptCheckPar() ||
            insertIter->isConstant() ||
            (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout()))
     {
         insertIter++;
     }
 
     return *insertIter;
 }
--- a/js/src/jit/MIRGraph.h
+++ b/js/src/jit/MIRGraph.h
@@ -802,22 +802,16 @@ class MIRGraph
 
     bool hasTryBlock() const {
         return hasTryBlock_;
     }
     void setHasTryBlock() {
         hasTryBlock_ = true;
     }
 
-    // The per-thread context. So as not to modify the calling convention for
-    // parallel code, we obtain the current ForkJoinContext from thread-local
-    // storage.  This helper method will lazilly insert an MForkJoinContext
-    // instruction in the entry block and return the definition.
-    MDefinition *forkJoinContext();
-
     void dump(FILE *fp);
     void dump();
 };
 
 class MDefinitionIterator
 {
   friend class MBasicBlock;
   friend class MNodeIterator;
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -86,17 +86,16 @@ namespace jit {
     _(Random)                                                               \
     _(MathFunction)                                                         \
     _(Add)                                                                  \
     _(Sub)                                                                  \
     _(Mul)                                                                  \
     _(Div)                                                                  \
     _(Mod)                                                                  \
     _(Concat)                                                               \
-    _(ConcatPar)                                                            \
     _(CharCodeAt)                                                           \
     _(FromCharCode)                                                         \
     _(StringSplit)                                                          \
     _(Substr)                                                               \
     _(Return)                                                               \
     _(Throw)                                                                \
     _(Box)                                                                  \
     _(Unbox)                                                                \
@@ -242,27 +241,17 @@ namespace jit {
     _(AsmJSStoreGlobalVar)                                                  \
     _(AsmJSLoadFuncPtr)                                                     \
     _(AsmJSLoadFFIFunc)                                                     \
     _(AsmJSReturn)                                                          \
     _(AsmJSParameter)                                                       \
     _(AsmJSVoidReturn)                                                      \
     _(AsmJSPassStackArg)                                                    \
     _(AsmJSCall)                                                            \
-    _(CheckOverRecursedPar)                                                 \
-    _(NewCallObjectPar)                                                     \
-    _(NewPar)                                                               \
-    _(NewDenseArrayPar)                                                     \
     _(NewDerivedTypedObject)                                                \
-    _(LambdaPar)                                                            \
-    _(RestPar)                                                              \
-    _(ForkJoinContext)                                                      \
-    _(ForkJoinGetSlice)                                                     \
-    _(GuardThreadExclusive)                                                 \
-    _(InterruptCheckPar)                                                    \
     _(RecompileCheck)                                                       \
     _(MemoryBarrier)                                                        \
     _(AsmJSCompareExchangeHeap)                                             \
     _(AsmJSAtomicBinopHeap)                                                 \
     _(UnknownValue)                                                         \
     _(LexicalCheck)                                                         \
     _(ThrowUninitializedLexical)                                            \
     _(Debugger)
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -13,19 +13,17 @@
 #include "gc/GCTrace.h"
 #include "jit/AtomicOp.h"
 #include "jit/Bailouts.h"
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
-#include "jit/ParallelFunctions.h"
 #include "js/Conversions.h"
-#include "vm/ForkJoin.h"
 #include "vm/TraceLogging.h"
 
 #include "jsgcinlines.h"
 #include "jsinferinlines.h"
 #include "jsobjinlines.h"
 #include "vm/Interpreter-inl.h"
 
 using namespace js;
@@ -831,96 +829,16 @@ MacroAssembler::newGCString(Register res
 
 void
 MacroAssembler::newGCFatInlineString(Register result, Register temp, Label *fail)
 {
     allocateNonObject(result, temp, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
 }
 
 void
-MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                              gc::AllocKind allocKind, Label *fail)
-{
-    return newGCTenuredThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
-}
-
-void
-MacroAssembler::newGCTenuredThingPar(Register result, Register cx,
-                                     Register tempReg1, Register tempReg2,
-                                     gc::AllocKind allocKind, Label *fail)
-{
-    // Similar to ::newGCThing(), except that it allocates from a custom
-    // Allocator in the ForkJoinContext*, rather than being hardcoded to the
-    // compartment allocator.  This requires two temporary registers.
-    //
-    // Subtle: I wanted to reuse `result` for one of the temporaries, but the
-    // register allocator was assigning it to the same register as `cx`.
-    // Then we overwrite that register which messed up the OOL code.
-
-    uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
-
-    // Load the allocator:
-    // tempReg1 = (Allocator*) forkJoinCx->allocator()
-    loadPtr(Address(cx, ThreadSafeContext::offsetOfAllocator()),
-            tempReg1);
-
-    // Get a pointer to the relevant free list:
-    // tempReg1 = (FreeList*) &tempReg1->arenas.freeLists[(allocKind)]
-    uint32_t offset = (offsetof(Allocator, arenas) +
-                       js::gc::ArenaLists::getFreeListOffset(allocKind));
-    addPtr(Imm32(offset), tempReg1);
-
-    // Load first item on the list
-    // tempReg2 = tempReg1->head.first
-    loadPtr(Address(tempReg1, gc::FreeList::offsetOfFirst()), tempReg2);
-
-    // Check whether bump-allocation is possible.
-    // if tempReg1->head.last <= tempReg2, fail
-    branchPtr(Assembler::BelowOrEqual,
-              Address(tempReg1, gc::FreeList::offsetOfLast()),
-              tempReg2,
-              fail);
-
-    // If so, take |first| and advance pointer by thingSize bytes.
-    // result = tempReg2;
-    // tempReg2 += thingSize;
-    movePtr(tempReg2, result);
-    addPtr(Imm32(thingSize), tempReg2);
-
-    // Update |first|.
-    // tempReg1->head.first = tempReg2;
-    storePtr(tempReg2, Address(tempReg1, gc::FreeList::offsetOfFirst()));
-}
-
-void
-MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                              NativeObject *templateObject, Label *fail)
-{
-    gc::AllocKind allocKind = templateObject->asTenured().getAllocKind();
-    MOZ_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
-    MOZ_ASSERT(!templateObject->numDynamicSlots());
-
-    newGCThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
-}
-
-void
-MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                               Label *fail)
-{
-    newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
-}
-
-void
-MacroAssembler::newGCFatInlineStringPar(Register result, Register cx, Register tempReg1,
-                                        Register tempReg2, Label *fail)
-{
-    newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
-}
-
-void
 MacroAssembler::copySlotsFromTemplate(Register obj, const NativeObject *templateObj,
                                       uint32_t start, uint32_t end)
 {
     uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
     for (unsigned i = start; i < nfixed; i++)
         storeValue(templateObj->getFixedSlot(i), Address(obj, NativeObject::getFixedSlotOffset(i)));
 }
 
@@ -1198,41 +1116,25 @@ MacroAssembler::loadStringChar(Register 
     jump(&done);
 
     bind(&isLatin1);
     load8ZeroExtend(BaseIndex(output, index, TimesOne), output);
 
     bind(&done);
 }
 
-void
-MacroAssembler::checkInterruptFlagPar(Register tempReg, Label *fail)
-{
-    movePtr(ImmPtr(GetJitContext()->runtime->addressOfInterruptParUint32()), tempReg);
-    branch32(Assembler::NonZero, Address(tempReg, 0), Imm32(0), fail);
-}
-
 // Save an exit frame (which must be aligned to the stack pointer) to
 // PerThreadData::jitTop of the main thread.
 void
 MacroAssembler::linkExitFrame()
 {
     AbsoluteAddress jitTop(GetJitContext()->runtime->addressOfJitTop());
     storePtr(StackPointer, jitTop);
 }
 
-// Save an exit frame to the thread data of the current thread, given a
-// register that holds a PerThreadData *.
-void
-MacroAssembler::linkParallelExitFrame(Register pt)
-{
-    Address jitTop(pt, offsetof(PerThreadData, jitTop));
-    storePtr(StackPointer, jitTop);
-}
-
 static void
 ReportOverRecursed(JSContext *cx)
 {
     js_ReportOverRecursed(cx);
 }
 
 void
 MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
@@ -1379,181 +1281,49 @@ MacroAssembler::generateBailoutTail(Regi
             addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), StackPointer);
 
             jump(jitcodeReg);
         }
     }
 }
 
 void
-MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode,
-                                     Label *failure)
+MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, Label *failure)
 {
-    if (mode == SequentialExecution) {
-        loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
-        if (failure)
-            branchTestPtr(Assembler::Zero, dest, dest, failure);
-    } else {
-        loadPtr(Address(script, JSScript::offsetOfParallelIonScript()), dest);
-        if (failure)
-            branchPtr(Assembler::BelowOrEqual, dest, ImmPtr(ION_COMPILING_SCRIPT), failure);
-        loadPtr(Address(dest, IonScript::offsetOfMethod()), dest);
-        loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
-    }
+    loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
+    if (failure)
+        branchTestPtr(Assembler::Zero, dest, dest, failure);
 }
 
 void
-MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, ExecutionMode mode,
-                                            Label *failure)
+MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, Label *failure)
 {
-    if (mode == SequentialExecution) {
-        loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
-        if (failure)
-            branchTestPtr(Assembler::Zero, dest, dest, failure);
-    } else {
-        // Find second register to get the offset to skip argument check
-        Register offset = script;
-        if (script == dest) {
-            GeneralRegisterSet regs(GeneralRegisterSet::All());
-            regs.take(dest);
-            offset = regs.takeAny();
-        }
-
-        loadPtr(Address(script, JSScript::offsetOfParallelIonScript()), dest);
-        if (failure)
-            branchPtr(Assembler::BelowOrEqual, dest, ImmPtr(ION_COMPILING_SCRIPT), failure);
-
-        Push(offset);
-        load32(Address(script, IonScript::offsetOfSkipArgCheckEntryOffset()), offset);
-
-        loadPtr(Address(dest, IonScript::offsetOfMethod()), dest);
-        loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
-        addPtr(offset, dest);
-
-        Pop(offset);
-    }
+    loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
+    if (failure)
+        branchTestPtr(Assembler::Zero, dest, dest, failure);
 }
 
 void
 MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
 {
     if (framePtr != dest)
         movePtr(framePtr, dest);
     subPtr(Imm32(BaselineFrame::Size()), dest);
 }
 
 void
-MacroAssembler::loadForkJoinContext(Register cx, Register scratch)
-{
-    // Load the current ForkJoinContext *. If we need a parallel exit frame,
-    // chances are we are about to do something very slow anyways, so just
-    // call ForkJoinContextPar again instead of using the cached version.
-    setupUnalignedABICall(0, scratch);
-    callWithABI(JS_FUNC_TO_DATA_PTR(void *, ForkJoinContextPar));
-    if (ReturnReg != cx)
-        movePtr(ReturnReg, cx);
-}
-
-void
-MacroAssembler::loadContext(Register cxReg, Register scratch, ExecutionMode executionMode)
-{
-    switch (executionMode) {
-      case SequentialExecution:
-        // The scratch register is not used for sequential execution.
-        loadJSContext(cxReg);
-        break;
-      case ParallelExecution:
-        loadForkJoinContext(cxReg, scratch);
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
-}
-
-void
-MacroAssembler::enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx,
-                                                     Register scratch)
-{
-    loadForkJoinContext(cx, scratch);
-    // Load the PerThreadData from from the cx.
-    loadPtr(Address(cx, offsetof(ForkJoinContext, perThreadData)), scratch);
-    linkParallelExitFrame(scratch);
-    // Push the ioncode.
-    exitCodePatch_ = PushWithPatch(ImmWord(-1));
-    // Push the VMFunction pointer, to mark arguments.
-    Push(ImmPtr(f));
-}
-
-void
-MacroAssembler::enterFakeParallelExitFrame(Register cx, Register scratch,
-                                           JitCode *codeVal)
-{
-    // Load the PerThreadData from from the cx.
-    loadPtr(Address(cx, offsetof(ForkJoinContext, perThreadData)), scratch);
-    linkParallelExitFrame(scratch);
-    Push(ImmPtr(codeVal));
-    Push(ImmPtr(nullptr));
-}
-
-void
-MacroAssembler::enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch,
-                                             ExecutionMode executionMode)
-{
-    switch (executionMode) {
-      case SequentialExecution:
-        // The scratch register is not used for sequential execution.
-        enterExitFrame(f);
-        loadJSContext(cxReg);
-        break;
-      case ParallelExecution:
-        enterParallelExitFrameAndLoadContext(f, cxReg, scratch);
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
-}
-
-void
-MacroAssembler::enterFakeExitFrame(Register cxReg, Register scratch,
-                                   ExecutionMode executionMode,
-                                   JitCode *codeVal)
-{
-    switch (executionMode) {
-      case SequentialExecution:
-        // The cx and scratch registers are not used for sequential execution.
-        enterFakeExitFrame(codeVal);
-        break;
-      case ParallelExecution:
-        enterFakeParallelExitFrame(cxReg, scratch, codeVal);
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
-}
-
-void
-MacroAssembler::handleFailure(ExecutionMode executionMode)
+MacroAssembler::handleFailure()
 {
     // Re-entry code is irrelevant because the exception will leave the
     // running function and never come back
     if (sps_)
         sps_->skipNextReenter();
     leaveSPSFrame();
 
-    JitCode *excTail;
-    switch (executionMode) {
-      case SequentialExecution:
-        excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTail();
-        break;
-      case ParallelExecution:
-        excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTailParallel();
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    JitCode *excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTail();
     jump(excTail);
 
     // Doesn't actually emit code, but balances the leave()
     if (sps_)
         sps_->reenter(*this, InvalidReg);
 }
 
 #ifdef DEBUG
@@ -2142,23 +1912,19 @@ MacroAssembler::convertTypedOrValueToInt
       default:
         MOZ_CRASH("Bad MIRType");
     }
 }
 
 void
 MacroAssembler::finish()
 {
-    if (sequentialFailureLabel_.used()) {
-        bind(&sequentialFailureLabel_);
-        handleFailure(SequentialExecution);
-    }
-    if (parallelFailureLabel_.used()) {
-        bind(&parallelFailureLabel_);
-        handleFailure(ParallelExecution);
+    if (failureLabel_.used()) {
+        bind(&failureLabel_);
+        handleFailure();
     }
 
     MacroAssemblerSpecific::finish();
 }
 
 void
 MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label)
 {
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -193,18 +193,17 @@ class MacroAssembler : public MacroAssem
     // This field is used to manage profiling instrumentation output. If
     // provided and enabled, then instrumentation will be emitted around call
     // sites. The IonInstrumentation instance is hosted inside of
     // CodeGeneratorShared and is the manager of when instrumentation is
     // actually emitted or not. If nullptr, then no instrumentation is emitted.
     IonInstrumentation *sps_;
 
     // Labels for handling exceptions and failures.
-    NonAssertingLabel sequentialFailureLabel_;
-    NonAssertingLabel parallelFailureLabel_;
+    NonAssertingLabel failureLabel_;
 
   public:
     // If instrumentation should be emitted, then the sps parameter should be
     // provided, but otherwise it can be safely omitted to prevent all
     // instrumentation from being emitted.
     MacroAssembler()
       : sps_(nullptr)
     {
@@ -825,46 +824,29 @@ class MacroAssembler : public MacroAssem
     void newGCThing(Register result, Register temp, NativeObject *templateObj,
                      gc::InitialHeap initialHeap, Label *fail);
     void initGCThing(Register obj, Register temp, JSObject *templateObj,
                      bool initFixedSlots = true);
 
     void newGCString(Register result, Register temp, Label *fail);
     void newGCFatInlineString(Register result, Register temp, Label *fail);
 
-    void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                       gc::AllocKind allocKind, Label *fail);
-    void newGCTenuredThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                              gc::AllocKind allocKind, Label *fail);
-    void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                       NativeObject *templateObject, Label *fail);
-    void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                        Label *fail);
-    void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
-                                 Label *fail);
-
-
     // Compares two strings for equality based on the JSOP.
     // This checks for identical pointers, atoms and length and fails for everything else.
     void compareStrings(JSOp op, Register left, Register right, Register result,
                         Label *fail);
 
-    // Checks the flags that signal that parallel code may need to interrupt or
-    // abort.  Branches to fail in that case.
-    void checkInterruptFlagPar(Register tempReg, Label *fail);
-
     // If the JitCode that created this assembler needs to transition into the VM,
     // we want to store the JitCode on the stack in order to mark it during a GC.
     // This is a reference to a patch location where the JitCode* will be written.
   private:
     CodeOffsetLabel exitCodePatch_;
 
   private:
     void linkExitFrame();
-    void linkParallelExitFrame(Register pt);
 
   public:
     void enterExitFrame(const VMFunction *f = nullptr) {
         linkExitFrame();
         // Push the ioncode. (Bailout or VM wrapper)
         exitCodePatch_ = PushWithPatch(ImmWord(-1));
         // Push VMFunction pointer, to mark arguments.
         Push(ImmPtr(f));
@@ -879,30 +861,16 @@ class MacroAssembler : public MacroAssem
     }
 
     void loadThreadPool(Register pool) {
         // JitRuntimes are tied to JSRuntimes and there is one ThreadPool per
         // JSRuntime, so we can hardcode the ThreadPool address here.
         movePtr(ImmPtr(GetJitContext()->runtime->addressOfThreadPool()), pool);
     }
 
-    void loadForkJoinContext(Register cx, Register scratch);
-    void loadContext(Register cxReg, Register scratch, ExecutionMode executionMode);
-
-    void enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx,
-                                              Register scratch);
-
-    void enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch,
-                                      ExecutionMode executionMode);
-
-    void enterFakeParallelExitFrame(Register cx, Register scratch, JitCode *codeVal);
-
-    void enterFakeExitFrame(Register cxReg, Register scratch, ExecutionMode executionMode,
-                            JitCode *codeVal);
-
     void leaveExitFrame() {
         freeStack(ExitFooterFrame::Size());
     }
 
     bool hasEnteredExitFrame() const {
         return exitCodePatch_.offset() != 0;
     }
 
@@ -1164,41 +1132,37 @@ class MacroAssembler : public MacroAssem
         loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
         add32(Imm32(-1), Address(temp, 0));
     }
 
     static const char enterJitLabel[];
     void spsMarkJit(SPSProfiler *p, Register framePtr, Register temp);
     void spsUnmarkJit(SPSProfiler *p, Register temp);
 
-    void loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode, Label *failure);
-    void loadBaselineOrIonNoArgCheck(Register callee, Register dest, ExecutionMode mode, Label *failure);
+    void loadBaselineOrIonRaw(Register script, Register dest, Label *failure);
+    void loadBaselineOrIonNoArgCheck(Register callee, Register dest, Label *failure);
 
     void loadBaselineFramePtr(Register framePtr, Register dest);
 
     void pushBaselineFramePtr(Register framePtr, Register scratch) {
         loadBaselineFramePtr(framePtr, scratch);
         push(scratch);
     }
 
   private:
-    void handleFailure(ExecutionMode executionMode);
+    void handleFailure();
 
   public:
     Label *exceptionLabel() {
         // Exceptions are currently handled the same way as sequential failures.
-        return &sequentialFailureLabel_;
+        return &failureLabel_;
     }
 
-    Label *failureLabel(ExecutionMode executionMode) {
-        switch (executionMode) {
-          case SequentialExecution: return &sequentialFailureLabel_;
-          case ParallelExecution: return &parallelFailureLabel_;
-          default: MOZ_CRASH("Unexpected execution mode");
-        }
+    Label *failureLabel() {
+        return &failureLabel_;
     }
 
     void finish();
 
     void assumeUnreachable(const char *output);
     void printf(const char *output);
     void printf(const char *output, Register value);
 
deleted file mode 100644
--- a/js/src/jit/ParallelFunctions.cpp
+++ /dev/null
@@ -1,620 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/ParallelFunctions.h"
-
-#include "builtin/TypedObject.h"
-#include "jit/arm/Simulator-arm.h"
-#include "jit/mips/Simulator-mips.h"
-#include "jit/RematerializedFrame.h"
-#include "vm/ArrayObject.h"
-
-#include "jsgcinlines.h"
-#include "jsobjinlines.h"
-
-#include "vm/NativeObject-inl.h"
-
-using namespace js;
-using namespace jit;
-
-using mozilla::IsInRange;
-
-using JS::AutoCheckCannotGC;
-
-using parallel::Spew;
-using parallel::SpewOps;
-using parallel::SpewBailouts;
-
-// Load the current thread context.
-ForkJoinContext *
-jit::ForkJoinContextPar()
-{
-    return ForkJoinContext::current();
-}
-
-// NewGCThingPar() is called in place of NewGCThing() when executing
-// parallel code.  It uses the ArenaLists for the current thread and
-// allocates from there.
-JSObject *
-jit::NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind)
-{
-    MOZ_ASSERT(ForkJoinContext::current() == cx);
-    return js::NewGCObject<NoGC>(cx, allocKind, 0, gc::TenuredHeap);
-}
-
-bool
-jit::ParallelWriteGuard(ForkJoinContext *cx, JSObject *object)
-{
-    // Implements the most general form of the write guard, which is
-    // suitable for writes to any object O. There are two cases to
-    // consider and test for:
-    //
-    // 1. Writes to thread-local memory are safe. Thread-local memory
-    //    is defined as memory allocated by the current thread.
-    //    The definition of the PJS API guarantees that such memory
-    //    cannot have escaped to other parallel threads.
-    //
-    // 2. Writes into the output buffer are safe. Some PJS operations
-    //    supply an out pointer into the final target buffer. The design
-    //    of the API ensures that this out pointer is always pointing
-    //    at a fresh region of the buffer that is not accessible to
-    //    other threads. Thus, even though this output buffer has not
-    //    been created by the current thread, it is writable.
-    //
-    // There are some subtleties to consider:
-    //
-    // A. Typed objects and typed arrays are just views onto a base buffer.
-    //    For the purposes of guarding parallel writes, it is not important
-    //    whether the *view* is thread-local -- what matters is whether
-    //    the *underlying buffer* is thread-local.
-    //
-    // B. With regard to the output buffer, we have to be careful
-    //    because of the potential for sequential iterations to be
-    //    intermingled with parallel ones. During a sequential
-    //    iteration, the out pointer could escape into global
-    //    variables and so forth, and thus be used during later
-    //    parallel operations. However, those out pointers must be
-    //    pointing to distinct regions of the final output buffer than
-    //    the ones that are currently being written, so there is no
-    //    harm done in letting them be read (but not written).
-    //
-    //    In order to be able to distinguish escaped out pointers from
-    //    prior iterations and the proper out pointers from the
-    //    current iteration, we always track a *target memory region*
-    //    (which is a span of bytes within the output buffer) and not
-    //    just the output buffer itself.
-
-    MOZ_ASSERT(ForkJoinContext::current() == cx);
-
-    if (object->is<TypedObject>()) {
-        TypedObject &typedObj = object->as<TypedObject>();
-
-        // Note: check target region based on `typedObj`, not the owner.
-        // This is because `typedObj` may point to some subregion of the
-        // owner and we only care if that *subregion* is within the
-        // target region, not the entire owner.
-        if (IsInTargetRegion(cx, &typedObj))
-            return true;
-
-        // Check whether the object which owns the memory is thread-local.
-        if (typedObj.is<OutlineTypedObject>())
-            return cx->isThreadLocal(&typedObj.as<OutlineTypedObject>().owner());
-        return cx->isThreadLocal(&typedObj);
-    }
-
-    // For other kinds of writable objects, must be thread-local.
-    return cx->isThreadLocal(object);
-}
-
-// Check that |object| (which must be a typed typedObj) maps
-// to memory in the target region.
-//
-// For efficiency, we assume that all handles which the user has
-// access to are either entirely within the target region or entirely
-// without, but not straddling the target region nor encompassing
-// it. This invariant is maintained by the PJS APIs, where the target
-// region and handles are always elements of the same output array.
-bool
-jit::IsInTargetRegion(ForkJoinContext *cx, TypedObject *typedObj)
-{
-    MOZ_ASSERT(typedObj->is<TypedObject>()); // in case JIT supplies something bogus
-    uint8_t *typedMem = typedObj->typedMem();
-    return typedMem >= cx->targetRegionStart &&
-           typedMem <  cx->targetRegionEnd;
-}
-
-bool
-jit::CheckOverRecursedPar(ForkJoinContext *cx)
-{
-    MOZ_ASSERT(ForkJoinContext::current() == cx);
-    int stackDummy_;
-
-    // In PJS, unlike sequential execution, we don't overwrite the stack limit
-    // on interrupt, but we do still call into this routine if the interrupt
-    // flag is set, so we still need to double check.
-
-#if defined(JS_ARM_SIMULATOR) || defined(JS_MIPS_SIMULATOR)
-    if (Simulator::Current()->overRecursed()) {
-        cx->bailoutRecord->joinCause(ParallelBailoutOverRecursed);
-        return false;
-    }
-#endif
-
-    if (!JS_CHECK_STACK_SIZE(cx->perThreadData->jitStackLimit(), &stackDummy_)) {
-        cx->bailoutRecord->joinCause(ParallelBailoutOverRecursed);
-        return false;
-    }
-
-    return InterruptCheckPar(cx);
-}
-
-bool
-jit::InterruptCheckPar(ForkJoinContext *cx)
-{
-    MOZ_ASSERT(ForkJoinContext::current() == cx);
-    bool result = cx->check();
-    if (!result) {
-        cx->bailoutRecord->joinCause(ParallelBailoutInterrupt);
-        return false;
-    }
-    return true;
-}
-
-ArrayObject *
-jit::ExtendArrayPar(ForkJoinContext *cx, ArrayObject *array, uint32_t length)
-{
-    NativeObject::EnsureDenseResult res =
-        array->ensureDenseElementsPreservePackedFlag(cx, 0, length);
-    if (res != NativeObject::ED_OK)
-        return nullptr;
-    return array;
-}
-
-bool
-jit::SetPropertyPar(ForkJoinContext *cx, HandleObject obj, HandlePropertyName name,
-                    HandleValue value, bool strict, jsbytecode *pc)
-{
-    MOZ_ASSERT(cx->isThreadLocal(obj));
-
-    if (*pc == JSOP_SETALIASEDVAR) {
-        // See comment in jit::SetProperty.
-        Shape *shape = obj->as<NativeObject>().lookupPure(name);
-        MOZ_ASSERT(shape && shape->hasSlot());
-        return obj->as<NativeObject>().setSlotIfHasType(shape, value);
-    }
-
-    // Fail early on hooks.
-    if (obj->getOps()->setProperty)
-        return TP_RETRY_SEQUENTIALLY;
-
-    RootedValue v(cx, value);
-    RootedId id(cx, NameToId(name));
-    return baseops::SetPropertyHelper<ParallelExecution>(cx,
-                                                         obj.as<NativeObject>(),
-                                                         obj.as<NativeObject>(),
-                                                         id, baseops::Qualified, &v,
-                                                         strict);
-}
-
-bool
-jit::SetElementPar(ForkJoinContext *cx, HandleObject obj, HandleValue index, HandleValue value,
-                   bool strict)
-{
-    RootedId id(cx);
-    if (!ValueToIdPure(index, id.address()))
-        return false;
-
-    if (!obj->isNative())
-        return false;
-
-    // SetObjectElementOperation, the sequential version, has several checks
-    // for certain deoptimizing behaviors, such as marking having written to
-    // holes and non-indexed element accesses. We don't do that here, as we
-    // can't modify any TI state anyways. If we need to add a new type, we
-    // would bail out.
-    RootedValue v(cx, value);
-    return baseops::SetPropertyHelper<ParallelExecution>(cx,
-                                                         obj.as<NativeObject>(),
-                                                         obj.as<NativeObject>(),
-                                                         id, baseops::Qualified, &v,
-                                                         strict);
-}
-
-bool
-jit::SetDenseElementPar(ForkJoinContext *cx, HandleObject obj, int32_t index, HandleValue value,
-                        bool strict)
-{
-    RootedValue indexVal(cx, Int32Value(index));
-    return SetElementPar(cx, obj, indexVal, value, strict);
-}
-
-JSString *
-jit::ConcatStringsPar(ForkJoinContext *cx, HandleString left, HandleString right)
-{
-    return ConcatStrings<NoGC>(cx, left, right);
-}
-
-JSFlatString *
-jit::IntToStringPar(ForkJoinContext *cx, int i)
-{
-    return Int32ToString<NoGC>(cx, i);
-}
-
-JSString *
-jit::DoubleToStringPar(ForkJoinContext *cx, double d)
-{
-    return NumberToString<NoGC>(cx, d);
-}
-
-JSString *
-jit::PrimitiveToStringPar(ForkJoinContext *cx, HandleValue input)
-{
-    // All other cases are handled in assembly.
-    MOZ_ASSERT(input.isDouble() || input.isInt32());
-
-    if (input.isInt32())
-        return Int32ToString<NoGC>(cx, input.toInt32());
-
-    return NumberToString<NoGC>(cx, input.toDouble());
-}
-
-bool
-jit::StringToNumberPar(ForkJoinContext *cx, JSString *str, double *out)
-{
-    return StringToNumber(cx, str, out);
-}
-
-#define PAR_RELATIONAL_OP(OP, EXPECTED)                                         \
-do {                                                                            \
-    /* Optimize for two int-tagged operands (typical loop control). */          \
-    if (lhs.isInt32() && rhs.isInt32()) {                                       \
-        *res = (lhs.toInt32() OP rhs.toInt32()) == EXPECTED;                    \
-    } else if (lhs.isNumber() && rhs.isNumber()) {                              \
-        double l = lhs.toNumber(), r = rhs.toNumber();                          \
-        *res = (l OP r) == EXPECTED;                                            \
-    } else if (lhs.isBoolean() && rhs.isBoolean()) {                            \
-        int l = lhs.toBoolean() ? 1 : 0;                                        \
-        int r = rhs.toBoolean() ? 1 : 0;                                        \
-        *res = (l OP r) == EXPECTED;                                            \
-    } else if (lhs.isBoolean() && rhs.isNumber()) {                             \
-        double l = lhs.toBoolean() ? 1.0 : 0.0;                                 \
-        double r = rhs.toNumber();                                              \
-        *res = (l OP r) == EXPECTED;                                            \
-    } else if (lhs.isNumber() && rhs.isBoolean()) {                             \
-        double l = lhs.toNumber();                                              \
-        double r = rhs.toBoolean() ? 1.0 : 0.0;                                 \
-        *res = (l OP r) == EXPECTED;                                            \
-    } else {                                                                    \
-        int32_t vsZero;                                                         \
-        if (!CompareMaybeStringsPar(cx, lhs, rhs, &vsZero))                     \
-            return false;                                                       \
-        *res = (vsZero OP 0) == EXPECTED;                                       \
-    }                                                                           \
-    return true;                                                                \
-} while(0)
-
-static bool
-CompareStringsPar(ForkJoinContext *cx, JSString *left, JSString *right, int32_t *res)
-{
-    ScopedThreadSafeStringInspector leftInspector(left);
-    ScopedThreadSafeStringInspector rightInspector(right);
-    AutoCheckCannotGC nogc;
-    if (!leftInspector.ensureChars(cx, nogc) || !rightInspector.ensureChars(cx, nogc))
-        return false;
-
-    if (leftInspector.hasLatin1Chars()) {
-        if (rightInspector.hasLatin1Chars()) {
-            *res = CompareChars(leftInspector.latin1Chars(), left->length(),
-                                rightInspector.latin1Chars(), right->length());
-        } else {
-            *res = CompareChars(leftInspector.latin1Chars(), left->length(),
-                                rightInspector.twoByteChars(), right->length());
-        }
-    } else {
-        if (rightInspector.hasLatin1Chars()) {
-            *res = CompareChars(leftInspector.twoByteChars(), left->length(),
-                                rightInspector.latin1Chars(), right->length());
-        } else {
-            *res = CompareChars(leftInspector.twoByteChars(), left->length(),
-                                rightInspector.twoByteChars(), right->length());
-        }
-    }
-
-    return true;
-}
-
-static bool
-CompareMaybeStringsPar(ForkJoinContext *cx, HandleValue v1, HandleValue v2, int32_t *res)
-{
-    if (!v1.isString())
-        return false;
-    if (!v2.isString())
-        return false;
-    return CompareStringsPar(cx, v1.toString(), v2.toString(), res);
-}
-
-template<bool Equal>
-bool
-LooselyEqualImplPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    PAR_RELATIONAL_OP(==, Equal);
-}
-
-bool
-js::jit::LooselyEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    return LooselyEqualImplPar<true>(cx, lhs, rhs, res);
-}
-
-bool
-js::jit::LooselyUnequalPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    return LooselyEqualImplPar<false>(cx, lhs, rhs, res);
-}
-
-template<bool Equal>
-bool
-StrictlyEqualImplPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    if (lhs.isNumber()) {
-        if (rhs.isNumber()) {
-            *res = (lhs.toNumber() == rhs.toNumber()) == Equal;
-            return true;
-        }
-    } else if (lhs.isBoolean()) {
-        if (rhs.isBoolean()) {
-            *res = (lhs.toBoolean() == rhs.toBoolean()) == Equal;
-            return true;
-        }
-    } else if (lhs.isNull()) {
-        if (rhs.isNull()) {
-            *res = Equal;
-            return true;
-        }
-    } else if (lhs.isUndefined()) {
-        if (rhs.isUndefined()) {
-            *res = Equal;
-            return true;
-        }
-    } else if (lhs.isObject()) {
-        if (rhs.isObject()) {
-            *res = (lhs.toObjectOrNull() == rhs.toObjectOrNull()) == Equal;
-            return true;
-        }
-    } else if (lhs.isString()) {
-        if (rhs.isString())
-            return LooselyEqualImplPar<Equal>(cx, lhs, rhs, res);
-    }
-
-    *res = !Equal;
-    return true;
-}
-
-bool
-js::jit::StrictlyEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    return StrictlyEqualImplPar<true>(cx, lhs, rhs, res);
-}
-
-bool
-js::jit::StrictlyUnequalPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    return StrictlyEqualImplPar<false>(cx, lhs, rhs, res);
-}
-
-bool
-js::jit::LessThanPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    PAR_RELATIONAL_OP(<, true);
-}
-
-bool
-js::jit::LessThanOrEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    PAR_RELATIONAL_OP(<=, true);
-}
-
-bool
-js::jit::GreaterThanPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    PAR_RELATIONAL_OP(>, true);
-}
-
-bool
-js::jit::GreaterThanOrEqualPar(ForkJoinContext *cx, MutableHandleValue lhs, MutableHandleValue rhs, bool *res)
-{
-    PAR_RELATIONAL_OP(>=, true);
-}
-
-template<bool Equal>
-bool
-StringsEqualImplPar(ForkJoinContext *cx, HandleString lhs, HandleString rhs, bool *res)
-{
-    int32_t vsZero;
-    bool ret = CompareStringsPar(cx, lhs, rhs, &vsZero);
-    if (ret != true)
-        return ret;
-    *res = (vsZero == 0) == Equal;
-    return true;
-}
-
-bool
-js::jit::StringsEqualPar(ForkJoinContext *cx, HandleString v1, HandleString v2, bool *res)
-{
-    return StringsEqualImplPar<true>(cx, v1, v2, res);
-}
-
-bool
-js::jit::StringsUnequalPar(ForkJoinContext *cx, HandleString v1, HandleString v2, bool *res)
-{
-    return StringsEqualImplPar<false>(cx, v1, v2, res);
-}
-
-bool
-jit::BitNotPar(ForkJoinContext *cx, HandleValue in, int32_t *out)
-{
-    if (in.isObject())
-        return false;
-    int i;
-    if (!NonObjectToInt32(cx, in, &i))
-        return false;
-    *out = ~i;
-    return true;
-}
-
-#define BIT_OP(OP)                                                      \
-    JS_BEGIN_MACRO                                                      \
-    int32_t left, right;                                                \
-    if (lhs.isObject() || rhs.isObject())                               \
-        return false;                                                   \
-    if (!NonObjectToInt32(cx, lhs, &left) ||                            \
-        !NonObjectToInt32(cx, rhs, &right))                             \
-    {                                                                   \
-        return false;                                                   \
-    }                                                                   \
-    *out = (OP);                                                        \
-    return true;                                                        \
-    JS_END_MACRO
-
-bool
-jit::BitXorPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out)
-{
-    BIT_OP(left ^ right);
-}
-
-bool
-jit::BitOrPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out)
-{
-    BIT_OP(left | right);
-}
-
-bool
-jit::BitAndPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out)
-{
-    BIT_OP(left & right);
-}
-
-bool
-jit::BitLshPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out)
-{
-    BIT_OP(uint32_t(left) << (right & 31));
-}
-
-bool
-jit::BitRshPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out)
-{
-    BIT_OP(left >> (right & 31));
-}
-
-#undef BIT_OP
-
-bool
-jit::UrshValuesPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs,
-                   MutableHandleValue out)
-{
-    uint32_t left;
-    int32_t right;
-    if (lhs.isObject() || rhs.isObject())
-        return false;
-    if (!NonObjectToUint32(cx, lhs, &left) || !NonObjectToInt32(cx, rhs, &right))
-        return false;
-    left >>= right & 31;
-    out.setNumber(uint32_t(left));
-    return true;
-}
-
-void
-jit::BailoutPar(BailoutStack *sp, uint8_t **entryFramePointer)
-{
-    parallel::Spew(parallel::SpewBailouts, "Bailing");
-
-    ForkJoinContext *cx = ForkJoinContext::current();
-
-    // We don't have an exit frame.
-    MOZ_ASSERT(IsInRange(FAKE_JIT_TOP_FOR_BAILOUT, 0, 0x1000) &&
-               IsInRange(FAKE_JIT_TOP_FOR_BAILOUT + sizeof(CommonFrameLayout), 0, 0x1000),
-               "Fake jitTop pointer should be within the first page.");
-    cx->perThreadData->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
-
-    JitActivationIterator jitActivations(cx->perThreadData);
-    BailoutFrameInfo bailoutData(jitActivations, sp);
-    JitFrameIterator frameIter(jitActivations);
-    SnapshotIterator snapIter(frameIter);
-
-    cx->bailoutRecord->setIonBailoutKind(snapIter.bailoutKind());
-    while (!frameIter.done())
-        ++frameIter;
-
-    MOZ_ASSERT(frameIter.done());
-    *entryFramePointer = frameIter.fp();
-}
-
-bool
-jit::CallToUncompiledScriptPar(ForkJoinContext *cx, JSObject *obj)
-{
-#ifdef DEBUG
-    static const int max_bound_function_unrolling = 5;
-
-    if (!obj->is<JSFunction>()) {
-        Spew(SpewBailouts, "Call to non-function");
-        return false;
-    }
-
-    JSFunction *func = &obj->as<JSFunction>();
-    if (func->hasScript()) {
-        JSScript *script = func->nonLazyScript();
-        Spew(SpewBailouts, "Call to uncompiled script: %p:%s:%d",
-             script, script->filename(), script->lineno());
-    } else if (func->isInterpretedLazy()) {
-        Spew(SpewBailouts, "Call to uncompiled lazy script");
-    } else if (func->isBoundFunction()) {
-        int depth = 0;
-        JSFunction *target = &func->getBoundFunctionTarget()->as<JSFunction>();
-        while (depth < max_bound_function_unrolling) {
-            if (target->hasScript())
-                break;
-            if (target->isBoundFunction())
-                target = &target->getBoundFunctionTarget()->as<JSFunction>();
-            depth--;
-        }
-        if (target->hasScript()) {
-            JSScript *script = target->nonLazyScript();
-            Spew(SpewBailouts, "Call to bound function leading (depth: %d) to script: %p:%s:%d",
-                 depth, script, script->filename(), script->lineno());
-        } else {
-            Spew(SpewBailouts, "Call to bound function (excessive depth: %d)", depth);
-        }
-    } else {
-        MOZ_ASSERT(func->isNative());
-        Spew(SpewBailouts, "Call to native function");
-    }
-#endif
-
-    return false;
-}
-
-JSObject *
-jit::InitRestParameterPar(ForkJoinContext *cx, uint32_t length, Value *rest,
-                          HandleObject templateObj, HandleArrayObject res)
-{
-    // In parallel execution, we should always have succeeded in allocation
-    // before this point. We can do the allocation here like in the sequential
-    // path, but duplicating the initGCThing logic is too tedious.
-    MOZ_ASSERT(res);
-    MOZ_ASSERT(!res->getDenseInitializedLength());
-    MOZ_ASSERT(res->type() == templateObj->type());
-
-    if (length > 0) {
-        NativeObject::EnsureDenseResult edr =
-            res->ensureDenseElementsPreservePackedFlag(cx, 0, length);
-        if (edr != NativeObject::ED_OK)
-            return nullptr;
-        res->initDenseElementsUnbarriered(0, rest, length);
-        res->setLengthInt32(length);
-    }
-
-    return res;
-}
deleted file mode 100644
--- a/js/src/jit/ParallelFunctions.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_ParallelFunctions_h
-#define jit_ParallelFunctions_h
-
-#include "gc/Heap.h"
-#include "vm/ForkJoin.h"
-
-namespace js {
-
-class TypedObject; // subclass of JSObject* defined in builtin/TypedObject.h
-
-namespace jit {
-
-ForkJoinContext *ForkJoinContextPar();
-JSObject *NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind);
-bool ParallelWriteGuard(ForkJoinContext *cx, JSObject *object);
-bool IsInTargetRegion(ForkJoinContext *cx, TypedObject *object);
-bool CheckOverRecursedPar(ForkJoinContext *cx);
-bool InterruptCheckPar(ForkJoinContext *cx);
-
-// Extends the given array with `length` new holes.  Returns nullptr on
-// failure or else `array`, which is convenient during code
-// generation.
-ArrayObject *ExtendArrayPar(ForkJoinContext *cx, ArrayObject *array, uint32_t length);
-
-// Set properties and elements on thread local objects.
-bool SetPropertyPar(ForkJoinContext *cx, HandleObject obj, HandlePropertyName name,
-                    HandleValue value, bool strict, jsbytecode *pc);
-bool SetElementPar(ForkJoinContext *cx, HandleObject obj, HandleValue index,
-                   HandleValue value, bool strict);
-bool SetDenseElementPar(ForkJoinContext *cx, HandleObject obj, int32_t index,
-                        HandleValue value, bool strict);
-
-// String related parallel functions. These tend to call existing VM functions
-// that take a ThreadSafeContext.
-JSString *ConcatStringsPar(ForkJoinContext *cx, HandleString left, HandleString right);
-JSFlatString *IntToStringPar(ForkJoinContext *cx, int i);
-JSString *DoubleToStringPar(ForkJoinContext *cx, double d);
-JSString *PrimitiveToStringPar(ForkJoinContext *cx, HandleValue input);
-bool StringToNumberPar(ForkJoinContext *cx, JSString *str, double *out);
-
-// Binary and unary operator functions on values. These tend to return
-// RETRY_SEQUENTIALLY if the values are objects.
-bool StrictlyEqualPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool StrictlyUnequalPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool LooselyEqualPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool LooselyUnequalPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool LessThanPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool LessThanOrEqualPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool GreaterThanPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-bool GreaterThanOrEqualPar(ForkJoinContext *cx, MutableHandleValue v1, MutableHandleValue v2, bool *);
-
-bool StringsEqualPar(ForkJoinContext *cx, HandleString v1, HandleString v2, bool *);
-bool StringsUnequalPar(ForkJoinContext *cx, HandleString v1, HandleString v2, bool *);
-
-bool BitNotPar(ForkJoinContext *cx, HandleValue in, int32_t *out);
-bool BitXorPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out);
-bool BitOrPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out);
-bool BitAndPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out);
-bool BitLshPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out);
-bool BitRshPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, int32_t *out);
-
-bool UrshValuesPar(ForkJoinContext *cx, HandleValue lhs, HandleValue rhs, MutableHandleValue out);
-
-// Make a new rest parameter in parallel.
-JSObject *InitRestParameterPar(ForkJoinContext *cx, uint32_t length, Value *rest,
-                               HandleObject templateObj, HandleArrayObject res);
-
-// Abort and debug tracing functions.
-void BailoutPar(BailoutStack *sp, uint8_t **entryFramePointer);
-bool CallToUncompiledScriptPar(ForkJoinContext *cx, JSObject *obj);
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_ParallelFunctions_h */
deleted file mode 100644
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ /dev/null
@@ -1,913 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/ParallelSafetyAnalysis.h"
-
-#include "jit/Ion.h"
-#include "jit/IonAnalysis.h"
-#include "jit/JitSpewer.h"
-#include "jit/MIR.h"
-#include "jit/MIRGenerator.h"
-#include "jit/MIRGraph.h"
-
-#include "jsinferinlines.h"
-#include "jsobjinlines.h"
-
-using namespace js;
-using namespace jit;
-
-using parallel::Spew;
-using parallel::SpewMIR;
-using parallel::SpewCompile;
-
-#define SAFE_OP(op)                             \
-    virtual void visit##op(M##op *prop) { }
-
-#define CUSTOM_OP(op)                        \
-    virtual void visit##op(M##op *prop);
-
-#define DROP_OP(op)                             \
-    virtual void visit##op(M##op *ins) {        \
-        MBasicBlock *block = ins->block();      \
-        block->discard(ins);                    \
-    }
-
-#define PERMIT(T) (1 << T)
-
-#define PERMIT_INT32 (PERMIT(MIRType_Int32))
-#define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double))
-
-#define SPECIALIZED_OP(op, flags)                                             \
-    virtual void visit##op(M##op *ins) {                                      \
-        visitSpecializedInstruction(ins, ins->specialization(), flags);       \
-    }
-
-#define UNSAFE_OP(op)                                                         \
-    virtual void visit##op(M##op *ins) {                                      \
-        SpewMIR(ins, "Unsafe");                                               \
-        markUnsafe();                                                         \
-    }
-
-#define WRITE_GUARDED_OP(op, obj)                                             \
-    virtual void visit##op(M##op *prop) {                                     \
-        insertWriteGuard(prop, prop->obj());                                  \
-    }
-
-#define MAYBE_WRITE_GUARDED_OP(op, obj)                                       \
-    virtual void visit##op(M##op *prop) {                                     \
-        if (!prop->racy())                                                    \
-            insertWriteGuard(prop, prop->obj());                              \
-    }
-
-class ParallelSafetyVisitor : public MDefinitionVisitor
-{
-    MIRGraph &graph_;
-    bool unsafe_;
-    MDefinition *cx_;
-
-    void insertWriteGuard(MInstruction *writeInstruction, MDefinition *valueBeingWritten);
-
-    void replaceWithNewPar(MInstruction *newInstruction, NativeObject *templateObject);
-    void replace(MInstruction *oldInstruction, MInstruction *replacementInstruction);
-
-    void visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags);
-
-    // Intended for use in a visitXyz() instruction.
-    void markUnsafe() {
-        MOZ_ASSERT(!unsafe_);
-        unsafe_ = true;
-    }
-
-    TempAllocator &alloc() const {
-        return graph_.alloc();
-    }
-
-  public:
-    explicit ParallelSafetyVisitor(MIRGraph &graph)
-      : graph_(graph),
-        unsafe_(false),
-        cx_(nullptr)
-    { }
-
-    void clearUnsafe() { unsafe_ = false; }
-    bool unsafe() { return unsafe_; }
-    MDefinition *ForkJoinContext() {
-        if (!cx_)
-            cx_ = graph_.forkJoinContext();
-        return cx_;
-    }
-
-    bool convertToBailout(MInstructionIterator &iter);
-
-    // I am taking the policy of blacklisting everything that's not
-    // obviously safe for now.  We can loosen as we need.
-
-    SAFE_OP(Constant)
-    SAFE_OP(SimdValueX4)
-    SAFE_OP(SimdSplatX4)
-    SAFE_OP(SimdConstant)
-    SAFE_OP(SimdConvert)
-    SAFE_OP(SimdReinterpretCast)
-    SAFE_OP(SimdExtractElement)
-    SAFE_OP(SimdInsertElement)
-    SAFE_OP(SimdSignMask)
-    SAFE_OP(SimdSwizzle)
-    SAFE_OP(SimdShuffle)
-    SAFE_OP(SimdUnaryArith)
-    SAFE_OP(SimdBinaryComp)
-    SAFE_OP(SimdBinaryArith)
-    SAFE_OP(SimdBinaryBitwise)
-    SAFE_OP(SimdShift)
-    SAFE_OP(SimdSelect)
-    UNSAFE_OP(CloneLiteral)
-    SAFE_OP(Parameter)
-    SAFE_OP(Callee)
-    SAFE_OP(IsConstructing)
-    SAFE_OP(TableSwitch)
-    SAFE_OP(Goto)
-    SAFE_OP(Test)
-    SAFE_OP(GotoWithFake)
-    SAFE_OP(Compare)
-    SAFE_OP(Phi)
-    SAFE_OP(Beta)
-    UNSAFE_OP(OsrValue)
-    UNSAFE_OP(OsrScopeChain)
-    UNSAFE_OP(OsrReturnValue)
-    UNSAFE_OP(OsrArgumentsObject)
-    UNSAFE_OP(ReturnFromCtor)
-    CUSTOM_OP(CheckOverRecursed)
-    UNSAFE_OP(DefVar)
-    UNSAFE_OP(DefFun)
-    UNSAFE_OP(CreateThis)
-    CUSTOM_OP(CreateThisWithTemplate)
-    UNSAFE_OP(CreateThisWithProto)
-    UNSAFE_OP(CreateArgumentsObject)
-    UNSAFE_OP(GetArgumentsObjectArg)
-    UNSAFE_OP(SetArgumentsObjectArg)
-    UNSAFE_OP(ComputeThis)
-    UNSAFE_OP(LoadArrowThis)
-    CUSTOM_OP(Call)
-    UNSAFE_OP(ApplyArgs)
-    UNSAFE_OP(ArraySplice)
-    SAFE_OP(Bail)
-    SAFE_OP(Unreachable)
-    UNSAFE_OP(AssertFloat32)
-    UNSAFE_OP(GetDynamicName)
-    UNSAFE_OP(FilterArgumentsOrEval)
-    UNSAFE_OP(CallDirectEval)
-    SAFE_OP(BitNot)
-    SAFE_OP(TypeOf)
-    UNSAFE_OP(ToId)
-    SAFE_OP(BitAnd)
-    SAFE_OP(BitOr)
-    SAFE_OP(BitXor)
-    SAFE_OP(Lsh)
-    SAFE_OP(Rsh)
-    SAFE_OP(Ursh)
-    SPECIALIZED_OP(MinMax, PERMIT_NUMERIC)
-    SAFE_OP(Abs)
-    SAFE_OP(Clz)
-    SAFE_OP(Sqrt)
-    UNSAFE_OP(Atan2)
-    UNSAFE_OP(Hypot)
-    CUSTOM_OP(MathFunction)
-    SPECIALIZED_OP(Add, PERMIT_NUMERIC)
-    SPECIALIZED_OP(Sub, PERMIT_NUMERIC)
-    SPECIALIZED_OP(Mul, PERMIT_NUMERIC)
-    SPECIALIZED_OP(Div, PERMIT_NUMERIC)
-    SPECIALIZED_OP(Mod, PERMIT_NUMERIC)
-    CUSTOM_OP(Concat)
-    SAFE_OP(ConcatPar)
-    UNSAFE_OP(CharCodeAt)
-    UNSAFE_OP(FromCharCode)
-    UNSAFE_OP(StringSplit)
-    SAFE_OP(Return)
-    CUSTOM_OP(Throw)
-    SAFE_OP(Box)     // Boxing just creates a JSVal, doesn't alloc.
-    SAFE_OP(Unbox)
-    SAFE_OP(GuardObject)
-    SAFE_OP(ToDouble)
-    SAFE_OP(ToFloat32)
-    SAFE_OP(ToInt32)
-    SAFE_OP(TruncateToInt32)
-    SAFE_OP(MaybeToDoubleElement)
-    CUSTOM_OP(ToString)
-    UNSAFE_OP(ToObjectOrNull)
-    CUSTOM_OP(NewArray)
-    UNSAFE_OP(NewArrayCopyOnWrite)
-    UNSAFE_OP(NewArrayDynamicLength)
-    UNSAFE_OP(NewTypedObject)
-    CUSTOM_OP(NewObject)
-    CUSTOM_OP(NewCallObject)
-    CUSTOM_OP(NewRunOnceCallObject)
-    CUSTOM_OP(NewDerivedTypedObject)
-    SAFE_OP(ObjectState)
-    SAFE_OP(ArrayState)
-    UNSAFE_OP(InitElem)
-    UNSAFE_OP(InitElemGetterSetter)
-    UNSAFE_OP(MutateProto)
-    UNSAFE_OP(InitProp)
-    UNSAFE_OP(InitPropGetterSetter)
-    SAFE_OP(Start)
-    UNSAFE_OP(OsrEntry)
-    SAFE_OP(Nop)
-    SAFE_OP(LimitedTruncate)
-    UNSAFE_OP(RegExp)
-    CUSTOM_OP(Lambda)
-    UNSAFE_OP(LambdaArrow)
-    SAFE_OP(Slots)
-    SAFE_OP(Elements)
-    SAFE_OP(ConstantElements)
-    SAFE_OP(LoadSlot)
-    WRITE_GUARDED_OP(StoreSlot, slots)
-    SAFE_OP(FunctionEnvironment) // just a load of func env ptr
-    SAFE_OP(FilterTypeSet)
-    SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
-    SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
-    UNSAFE_OP(PostWriteBarrier)
-    SAFE_OP(GetPropertyCache)
-    SAFE_OP(GetPropertyPolymorphic)
-    UNSAFE_OP(SetPropertyPolymorphic)
-    SAFE_OP(GetElementCache)
-    WRITE_GUARDED_OP(SetElementCache, object)
-    UNSAFE_OP(BindNameCache)
-    SAFE_OP(GuardShape)
-    SAFE_OP(GuardShapePolymorphic)
-    SAFE_OP(GuardObjectType)
-    SAFE_OP(GuardObjectIdentity)
-    SAFE_OP(GuardClass)
-    SAFE_OP(AssertRange)
-    SAFE_OP(ArrayLength)
-    WRITE_GUARDED_OP(SetArrayLength, elements)
-    SAFE_OP(TypedArrayLength)
-    SAFE_OP(TypedArrayElements)
-    SAFE_OP(TypedObjectDescr)
-    SAFE_OP(TypedObjectElements)
-    SAFE_OP(SetTypedObjectOffset)
-    SAFE_OP(InitializedLength)
-    WRITE_GUARDED_OP(SetInitializedLength, elements)
-    SAFE_OP(Not)
-    SAFE_OP(BoundsCheck)
-    SAFE_OP(BoundsCheckLower)
-    SAFE_OP(LoadElement)
-    SAFE_OP(LoadElementHole)
-    SAFE_OP(LoadUnboxedObjectOrNull)
-    SAFE_OP(LoadUnboxedString)
-    MAYBE_WRITE_GUARDED_OP(StoreElement, elements)
-    WRITE_GUARDED_OP(StoreElementHole, elements)
-    UNSAFE_OP(StoreUnboxedObjectOrNull)
-    UNSAFE_OP(StoreUnboxedString)
-    UNSAFE_OP(ArrayPopShift)
-    UNSAFE_OP(ArrayPush)
-    SAFE_OP(LoadTypedArrayElement)
-    SAFE_OP(LoadTypedArrayElementHole)
-    SAFE_OP(LoadTypedArrayElementStatic)
-    MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements)
-    WRITE_GUARDED_OP(StoreTypedArrayElementHole, elements)
-    UNSAFE_OP(StoreTypedArrayElementStatic)
-    UNSAFE_OP(ClampToUint8)
-    SAFE_OP(LoadFixedSlot)
-    WRITE_GUARDED_OP(StoreFixedSlot, object)
-    UNSAFE_OP(CallGetProperty)
-    UNSAFE_OP(GetNameCache)
-    UNSAFE_OP(CallGetIntrinsicValue)
-    UNSAFE_OP(CallsiteCloneCache)
-    UNSAFE_OP(CallGetElement)
-    WRITE_GUARDED_OP(CallSetElement, object)
-    UNSAFE_OP(CallInitElementArray)
-    WRITE_GUARDED_OP(CallSetProperty, object)
-    UNSAFE_OP(DeleteProperty)
-    UNSAFE_OP(DeleteElement)
-    WRITE_GUARDED_OP(SetPropertyCache, object)
-    UNSAFE_OP(IteratorStart)
-    UNSAFE_OP(IteratorMore)
-    UNSAFE_OP(IsNoIter)
-    UNSAFE_OP(IteratorEnd)
-    SAFE_OP(StringLength)
-    SAFE_OP(ArgumentsLength)
-    SAFE_OP(GetFrameArgument)
-    UNSAFE_OP(SetFrameArgument)
-    UNSAFE_OP(RunOncePrologue)
-    CUSTOM_OP(Rest)
-    SAFE_OP(RestPar)
-    SAFE_OP(Floor)
-    SAFE_OP(Ceil)
-    SAFE_OP(Round)
-    UNSAFE_OP(InstanceOf)
-    CUSTOM_OP(InterruptCheck)
-    UNSAFE_OP(AsmJSInterruptCheck)
-    SAFE_OP(ForkJoinContext)
-    SAFE_OP(ForkJoinGetSlice)
-    SAFE_OP(NewPar)
-    SAFE_OP(NewDenseArrayPar)
-    SAFE_OP(NewCallObjectPar)
-    SAFE_OP(LambdaPar)
-    UNSAFE_OP(ArrayConcat)
-    UNSAFE_OP(ArrayJoin)
-    UNSAFE_OP(GetDOMProperty)
-    UNSAFE_OP(GetDOMMember)
-    UNSAFE_OP(SetDOMProperty)
-    UNSAFE_OP(NewStringObject)
-    UNSAFE_OP(Random)
-    SAFE_OP(Pow)
-    SAFE_OP(PowHalf)
-    UNSAFE_OP(RegExpTest)
-    UNSAFE_OP(RegExpExec)
-    UNSAFE_OP(RegExpReplace)
-    UNSAFE_OP(StringReplace)
-    UNSAFE_OP(CallInstanceOf)
-    UNSAFE_OP(ProfilerStackOp)
-    UNSAFE_OP(GuardString)
-    UNSAFE_OP(Substr)
-    UNSAFE_OP(NewDeclEnvObject)
-    UNSAFE_OP(In)
-    UNSAFE_OP(InArray)
-    SAFE_OP(GuardThreadExclusive)
-    SAFE_OP(InterruptCheckPar)
-    SAFE_OP(CheckOverRecursedPar)
-    SAFE_OP(FunctionDispatch)
-    SAFE_OP(TypeObjectDispatch)
-    SAFE_OP(IsCallable)
-    SAFE_OP(IsObject)
-    SAFE_OP(HasClass)
-    UNSAFE_OP(EffectiveAddress)
-    UNSAFE_OP(AsmJSUnsignedToDouble)
-    UNSAFE_OP(AsmJSUnsignedToFloat32)
-    UNSAFE_OP(AsmJSNeg)
-    UNSAFE_OP(AsmJSLoadHeap)
-    UNSAFE_OP(AsmJSStoreHeap)
-    UNSAFE_OP(AsmJSLoadGlobalVar)
-    UNSAFE_OP(AsmJSStoreGlobalVar)
-    UNSAFE_OP(AsmJSLoadFuncPtr)
-    UNSAFE_OP(AsmJSLoadFFIFunc)
-    UNSAFE_OP(AsmJSReturn)
-    UNSAFE_OP(AsmJSVoidReturn)
-    UNSAFE_OP(AsmJSPassStackArg)
-    UNSAFE_OP(AsmJSParameter)
-    UNSAFE_OP(AsmJSCall)
-    DROP_OP(RecompileCheck)
-    UNSAFE_OP(CompareExchangeTypedArrayElement)
-    UNSAFE_OP(AtomicTypedArrayElementBinop)
-    UNSAFE_OP(MemoryBarrier)
-    UNSAFE_OP(AsmJSCompareExchangeHeap)
-    UNSAFE_OP(AsmJSAtomicBinopHeap)
-    UNSAFE_OP(UnknownValue)
-    UNSAFE_OP(LexicalCheck)
-    UNSAFE_OP(ThrowUninitializedLexical)
-    UNSAFE_OP(Debugger)
-
-    // It looks like these could easily be made safe:
-    UNSAFE_OP(ConvertElementsToDoubles)
-    UNSAFE_OP(MaybeCopyElementsForWrite)
-};
-
-static void
-TransplantResumePoint(MInstruction *oldInstruction, MInstruction *replacementInstruction)
-{
-    MOZ_ASSERT(!oldInstruction->isDiscarded());
-    if (oldInstruction->resumePoint())
-        replacementInstruction->stealResumePoint(oldInstruction);
-}
-
-bool
-ParallelSafetyAnalysis::analyze()
-{
-    // Walk the basic blocks in a DFS.  When we encounter a block with an
-    // unsafe instruction, then we know that this block will bailout when
-    // executed.  Therefore, we replace the block.
-    //
-    // We don't need a worklist, though, because the graph is sorted
-    // in RPO.  Therefore, we just use the marked flags to tell us
-    // when we visited some predecessor of the current block.
-    ParallelSafetyVisitor visitor(graph_);
-    graph_.entryBlock()->mark();  // Note: in par. exec., we never enter from OSR.
-    uint32_t marked = 0;
-    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
-        if (mir_->shouldCancel("ParallelSafetyAnalysis"))
-            return false;
-
-        if (block->isMarked()) {
-            // Count the number of reachable blocks.
-            marked++;
-
-            // Iterate through and transform the instructions.  Stop
-            // if we encounter an inherently unsafe operation, in
-            // which case we will transform this block into a bailout
-            // block.
-            MInstruction *ins = nullptr;
-            MInstructionIterator iter(block->begin());
-            while (iter != block->end() && !visitor.unsafe()) {
-                if (mir_->shouldCancel("ParallelSafetyAnalysis"))
-                    return false;
-
-                // We may be removing or replacing the current
-                // instruction, so advance `iter` now.  Remember the
-                // last instr. we looked at for use later if it should
-                // prove unsafe.
-                ins = *iter++;
-
-                ins->accept(&visitor);
-            }
-
-            if (!visitor.unsafe()) {
-                // Block consists of only safe instructions.  Visit its successors.
-                for (uint32_t i = 0; i < block->numSuccessors(); i++)
-                    block->getSuccessor(i)->markUnchecked();
-            } else {
-                // Block contains an unsafe instruction.  That means that once
-                // we enter this block, we are guaranteed to bailout.
-
-                // If this is the entry block, then there is no point
-                // in even trying to execute this function as it will
-                // always bailout.
-                if (*block == graph_.entryBlock()) {
-                    Spew(SpewCompile, "Entry block contains unsafe MIR");
-                    mir_->disable();
-                    return false;
-                }
-
-                // Otherwise, create a replacement that will. We seek back one
-                // position on the instruction iterator, as we will be
-                // discarding all instructions starting at the unsafe
-                // instruction.
-                if (!visitor.convertToBailout(--iter))
-                    return false;
-            }
-        }
-    }
-
-    Spew(SpewCompile, "Safe");
-    IonSpewPass("ParallelSafetyAnalysis");
-
-    // Sweep away any unmarked blocks. Note that this doesn't preserve
-    // AliasAnalysis dependencies, but we're not expected to at this point.
-    if (!RemoveUnmarkedBlocks(mir_, graph_, marked))
-        return false;
-    IonSpewPass("UCEAfterParallelSafetyAnalysis");
-    AssertExtendedGraphCoherency(graph_);
-
-    return true;
-}
-
-bool
-ParallelSafetyVisitor::convertToBailout(MInstructionIterator &iter)
-{
-    // We expect iter to be settled on the unsafe instruction.
-    MInstruction *ins = *iter;
-    MBasicBlock *block = ins->block();
-    MOZ_ASSERT(unsafe()); // `block` must have contained unsafe items
-    MOZ_ASSERT(block->isMarked()); // `block` must have been reachable to get here
-
-    clearUnsafe();
-
-    // Allocate a new bailout instruction.
-    MBail *bail = MBail::New(graph_.alloc(), Bailout_ParallelUnsafe);
-
-    // Discard the rest of the block and sever its link to its successors in
-    // the CFG.
-    for (size_t i = 0; i < block->numSuccessors(); i++)
-        block->getSuccessor(i)->removePredecessor(block);
-    block->discardAllInstructionsStartingAt(iter);
-
-    // No more successors are reachable, so the current block can no longer be
-    // the parent of an inlined function.
-    if (block->outerResumePoint())
-        block->clearOuterResumePoint();
-
-    // End the block in a bail.
-    block->add(bail);
-    block->end(MUnreachable::New(alloc()));
-    return true;
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// Memory allocation
-//
-// Simple memory allocation opcodes---those which ultimately compile
-// down to a (possibly inlined) invocation of NewGCThing()---are
-// replaced with MNewPar, which is supplied with the thread context.
-// These allocations will take place using per-helper-thread arenas.
-
-void
-ParallelSafetyVisitor::visitCreateThisWithTemplate(MCreateThisWithTemplate *ins)
-{
-    replaceWithNewPar(ins, ins->templateObject());
-}
-
-void
-ParallelSafetyVisitor::visitNewCallObject(MNewCallObject *ins)
-{
-    if (ins->templateObject()->hasDynamicSlots()) {
-        SpewMIR(ins, "call with dynamic slots");
-        markUnsafe();
-    } else {
-        replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins));
-    }
-}
-
-void
-ParallelSafetyVisitor::visitNewRunOnceCallObject(MNewRunOnceCallObject *ins)
-{
-    if (ins->templateObject()->hasDynamicSlots()) {
-        SpewMIR(ins, "call with dynamic slots");
-        markUnsafe();
-    } else {
-        replace(ins, MNewCallObjectPar::New(alloc(), ForkJoinContext(), ins));
-    }
-}
-
-void
-ParallelSafetyVisitor::visitLambda(MLambda *ins)
-{
-    if (ins->info().singletonType || ins->info().useNewTypeForClone) {
-        // slow path: bail on parallel execution.
-        markUnsafe();
-    } else {
-        // fast path: replace with LambdaPar op
-        replace(ins, MLambdaPar::New(alloc(), ForkJoinContext(), ins));
-    }
-}
-
-void
-ParallelSafetyVisitor::visitNewObject(MNewObject *newInstruction)
-{
-    if (newInstruction->shouldUseVM()) {
-        SpewMIR(newInstruction, "should use VM");
-        markUnsafe();
-    } else {
-        replaceWithNewPar(newInstruction, newInstruction->templateObject());
-    }
-}
-
-void
-ParallelSafetyVisitor::visitNewArray(MNewArray *newInstruction)
-{
-    if (newInstruction->shouldUseVM()) {
-        SpewMIR(newInstruction, "should use VM");
-        markUnsafe();
-    } else {
-        replaceWithNewPar(newInstruction, newInstruction->templateObject());
-    }
-}
-
-void
-ParallelSafetyVisitor::visitNewDerivedTypedObject(MNewDerivedTypedObject *ins)
-{
-    // FIXME(Bug 984090) -- There should really be a parallel-safe
-    // version of NewDerivedTypedObject. However, until that is
-    // implemented, let's just ignore those with 0 uses, since they
-    // will be stripped out by DCE later.
-    if (!ins->hasUses())
-        return;
-
-    SpewMIR(ins, "visitNewDerivedTypedObject");
-    markUnsafe();
-}
-
-void
-ParallelSafetyVisitor::visitRest(MRest *ins)
-{
-    replace(ins, MRestPar::New(alloc(), ForkJoinContext(), ins));
-}
-
-void
-ParallelSafetyVisitor::visitMathFunction(MMathFunction *ins)
-{
-    replace(ins, MMathFunction::New(alloc(), ins->input(), ins->function(), nullptr));
-}
-
-void
-ParallelSafetyVisitor::visitConcat(MConcat *ins)
-{
-    replace(ins, MConcatPar::New(alloc(), ForkJoinContext(), ins));
-}
-
-void
-ParallelSafetyVisitor::visitToString(MToString *ins)
-{
-    MIRType inputType = ins->input()->type();
-    if (inputType != MIRType_Int32 && inputType != MIRType_Double)
-        markUnsafe();
-}
-
-void
-ParallelSafetyVisitor::replaceWithNewPar(MInstruction *newInstruction,
-                                         NativeObject *templateObject)
-{
-    replace(newInstruction, MNewPar::New(alloc(), ForkJoinContext(), templateObject));
-}
-
-void
-ParallelSafetyVisitor::replace(MInstruction *oldInstruction,
-                               MInstruction *replacementInstruction)
-{
-    TransplantResumePoint(oldInstruction, replacementInstruction);
-
-    MBasicBlock *block = oldInstruction->block();
-    block->insertBefore(oldInstruction, replacementInstruction);
-    oldInstruction->replaceAllUsesWith(replacementInstruction);
-    block->discard(oldInstruction);
-
-    // We may have replaced a specialized Float32 instruction by its
-    // non-specialized version, so just retry to specialize it. This relies on
-    // the fact that Phis' types don't change during the ParallelSafetyAnalysis;
-    // otherwise we'd have to run the entire TypeAnalyzer Float32 analysis once
-    // instructions have been replaced.
-    if (replacementInstruction->isFloat32Commutative() &&
-        replacementInstruction->type() != MIRType_Float32)
-    {
-        replacementInstruction->trySpecializeFloat32(alloc());
-    }
-    MOZ_ASSERT(oldInstruction->type() == replacementInstruction->type());
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// Write Guards
-//
-// We only want to permit writes to locally guarded objects.
-// Furthermore, we want to avoid PICs and other non-thread-safe things
-// (though perhaps we should support PICs at some point).  If we
-// cannot determine the origin of an object, we can insert a write
-// guard which will check whether the object was allocated from the
-// per-thread-arena or not.
-
-void
-ParallelSafetyVisitor::insertWriteGuard(MInstruction *writeInstruction,
-                                        MDefinition *valueBeingWritten)
-{
-    // Many of the write operations do not take the JS object
-    // but rather something derived from it, such as the elements.
-    // So we need to identify the JS object:
-    MDefinition *object;
-    switch (valueBeingWritten->type()) {
-      case MIRType_Object:
-        object = valueBeingWritten;
-        break;
-
-      case MIRType_Slots:
-        switch (valueBeingWritten->op()) {
-          case MDefinition::Op_Slots:
-            object = valueBeingWritten->toSlots()->object();
-            break;
-
-          default:
-            SpewMIR(writeInstruction, "cannot insert write guard for %s",
-                    valueBeingWritten->opName());
-            markUnsafe();
-            return;
-        }
-        break;
-
-      case MIRType_Elements:
-        switch (valueBeingWritten->op()) {
-          case MDefinition::Op_Elements:
-            object = valueBeingWritten->toElements()->object();
-            break;
-
-          case MDefinition::Op_TypedArrayElements:
-            object = valueBeingWritten->toTypedArrayElements()->object();
-            break;
-
-          case MDefinition::Op_TypedObjectElements:
-            object = valueBeingWritten->toTypedObjectElements()->object();
-            break;
-
-          default:
-            SpewMIR(writeInstruction, "cannot insert write guard for %s",
-                    valueBeingWritten->opName());
-            markUnsafe();
-            return;
-        }
-        break;
-
-      default:
-        SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d",
-                valueBeingWritten->type());
-        markUnsafe();
-        return;
-    }
-
-    if (object->isUnbox())
-        object = object->toUnbox()->input();
-
-    switch (object->op()) {
-      case MDefinition::Op_NewPar:
-        // MNewPar will always be creating something thread-local, omit the guard
-        SpewMIR(writeInstruction, "write to NewPar prop does not require guard");
-        return;
-      default:
-        break;
-    }
-
-    MBasicBlock *block = writeInstruction->block();
-    MGuardThreadExclusive *writeGuard =
-        MGuardThreadExclusive::New(alloc(), ForkJoinContext(), object);
-    block->insertBefore(writeInstruction, writeGuard);
-    writeGuard->typePolicy()->adjustInputs(alloc(), writeGuard);
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// Calls
-//
-// We only support calls to interpreted functions that that have already been
-// Ion compiled. If a function has no IonScript, we bail out.
-
-void
-ParallelSafetyVisitor::visitCall(MCall *ins)
-{
-    // DOM? Scary.
-    if (ins->isCallDOMNative()) {
-        SpewMIR(ins, "call to dom function");
-        markUnsafe();
-        return;
-    }
-
-    JSFunction *target = ins->getSingleTarget();
-    if (target) {
-        // Non-parallel native? Scary
-        if (target->isNative() && !target->hasParallelNative()) {
-            SpewMIR(ins, "call to non-parallel native function");
-            markUnsafe();
-        }
-        return;
-    }
-
-    if (ins->isConstructing()) {
-        SpewMIR(ins, "call to unknown constructor");
-        markUnsafe();
-    }
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// Stack limit, interrupts
-//
-// In sequential Ion code, the stack limit is stored in the JSRuntime.
-// We store it in the thread context.  We therefore need a separate
-// instruction to access it, one parameterized by the thread context.
-// Similar considerations apply to checking for interrupts.
-
-void
-ParallelSafetyVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
-{
-    replace(ins, MCheckOverRecursedPar::New(alloc(), ForkJoinContext()));
-}
-
-void
-ParallelSafetyVisitor::visitInterruptCheck(MInterruptCheck *ins)
-{
-    replace(ins, MInterruptCheckPar::New(alloc(), ForkJoinContext()));
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// Specialized ops
-//
-// Some ops, like +, can be specialized to ints/doubles.  Anything
-// else is terrifying.
-//
-// TODO---Eventually, we should probably permit arbitrary + but bail
-// if the operands are not both integers/floats.
-
-void
-ParallelSafetyVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec,
-                                                   uint32_t flags)
-{
-    uint32_t flag = 1 << spec;
-    if (flags & flag)
-        return;
-
-    SpewMIR(ins, "specialized to unacceptable type %d", spec);
-    markUnsafe();
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// Throw
-
-void
-ParallelSafetyVisitor::visitThrow(MThrow *thr)
-{
-    MBasicBlock *block = thr->block();
-    MOZ_ASSERT(block->lastIns() == thr);
-    MBail *bail = MBail::New(alloc(), Bailout_ParallelUnsafe);
-    block->discardLastIns();
-    block->add(bail);
-    block->end(MUnreachable::New(alloc()));
-}
-
-///////////////////////////////////////////////////////////////////////////
-// Callee extraction
-//
-// See comments in header file.
-
-static bool
-GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc,
-                   types::TemporaryTypeSet *calleeTypes, CallTargetVector &targets);
-
-static bool
-AddCallTarget(HandleScript script, CallTargetVector &targets);
-
-bool
-jit::AddPossibleCallees(JSContext *cx, MIRGraph &graph, CallTargetVector &targets)
-{
-    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
-        for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++)
-        {
-            if (!ins->isCall())
-                continue;
-
-            MCall *callIns = ins->toCall();
-
-            RootedFunction target(cx, callIns->getSingleTarget());
-            if (target) {
-                MOZ_ASSERT_IF(!target->isInterpreted(), target->hasParallelNative());
-
-                if (target->isInterpreted()) {
-                    RootedScript script(cx, target->getOrCreateScript(cx));
-                    if (!script || !AddCallTarget(script, targets))
-                        return false;
-                }
-
-                continue;
-            }
-
-            types::TemporaryTypeSet *calleeTypes = callIns->getFunction()->resultTypeSet();
-            RootedScript script(cx, callIns->block()->info().script());
-            if (!GetPossibleCallees(cx,
-                                    script,
-                                    callIns->resumePoint()->pc(),
-                                    calleeTypes,
-                                    targets))
-                return false;
-        }
-    }
-
-    return true;
-}
-
-static bool
-GetPossibleCallees(JSContext *cx,
-                   HandleScript script,
-                   jsbytecode *pc,
-                   types::TemporaryTypeSet *calleeTypes,
-                   CallTargetVector &targets)
-{
-    if (!calleeTypes || calleeTypes->baseFlags() != 0)
-        return true;
-
-    unsigned objCount = calleeTypes->getObjectCount();
-
-    if (objCount == 0)
-        return true;
-
-    RootedFunction rootedFun(cx);
-    RootedScript rootedScript(cx);
-    for (unsigned i = 0; i < objCount; i++) {
-        JSObject *obj = calleeTypes->getSingleObject(i);
-        if (obj && obj->is<JSFunction>()) {
-            rootedFun = &obj->as<JSFunction>();
-        } else {
-            types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
-            if (!typeObj)
-                continue;
-            rootedFun = typeObj->interpretedFunction;
-            if (!rootedFun)
-                continue;
-        }
-
-        if (!rootedFun->isInterpreted())
-            continue;
-
-        rootedScript = rootedFun->getOrCreateScript(cx);
-        if (!rootedScript)
-            return false;
-
-        if (rootedScript->shouldCloneAtCallsite()) {
-            rootedFun = CloneFunctionAtCallsite(cx, rootedFun, script, pc);
-            if (!rootedFun)
-                return false;
-            rootedScript = rootedFun->nonLazyScript();
-        }
-
-        // check if this call target is already known
-        if (!AddCallTarget(rootedScript, targets))
-            return false;
-    }
-
-    return true;
-}
-
-static bool
-AddCallTarget(HandleScript script, CallTargetVector &targets)
-{
-    for (size_t i = 0; i < targets.length(); i++) {
-        if (targets[i] == script)
-            return true;
-    }
-
-    if (!targets.append(script))
-        return false;
-
-    return true;
-}
deleted file mode 100644
--- a/js/src/jit/ParallelSafetyAnalysis.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_ParallelSafetyAnalysis_h
-#define jit_ParallelSafetyAnalysis_h
-
-#include "jit/MIR.h"
-
-namespace js {
-
-class InterpreterFrame;
-
-namespace jit {
-
-class MIRGraph;
-class AutoDestroyAllocator;
-
-// Determines whether a function is compatible for parallel execution.
-// Removes basic blocks containing unsafe MIR operations from the
-// graph and replaces them with MBail blocks.
-class ParallelSafetyAnalysis
-{
-    MIRGenerator *mir_;
-    MIRGraph &graph_;
-
-  public:
-    ParallelSafetyAnalysis(MIRGenerator *mir,
-                           MIRGraph &graph)
-      : mir_(mir),
-        graph_(graph)
-    {}
-
-    bool analyze();
-};
-
-// Code to collect list of possible call targets by scraping through
-// TI and baseline data. Used to permit speculative transitive
-// compilation in vm/ForkJoin.
-//
-// This code may clone scripts and thus may invoke the GC.  Hence only
-// run from the link phase, which executes on the main thread.
-typedef Vector<JSScript *, 4, JitAllocPolicy> CallTargetVector;
-bool AddPossibleCallees(JSContext *cx, MIRGraph &graph, CallTargetVector &targets);
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_ParallelSafetyAnalysis_h */
--- a/js/src/jit/VMFunctions.h
+++ b/js/src/jit/VMFunctions.h
@@ -10,17 +10,16 @@
 #include "jspubtd.h"
 
 #include "jit/CompileInfo.h"
 #include "jit/JitFrames.h"
 
 namespace js {
 
 class DeclEnvObject;
-class ForkJoinContext;
 class StaticWithObject;
 class InlineTypedObject;
 
 namespace jit {
 
 enum DataType {
     Type_Void,
     Type_Bool,
@@ -463,19 +462,16 @@ template <> struct OutParamToRootType<Mu
 
 template <class> struct MatchContext { };
 template <> struct MatchContext<JSContext *> {
     static const ExecutionMode execMode = SequentialExecution;
 };
 template <> struct MatchContext<ExclusiveContext *> {
     static const ExecutionMode execMode = SequentialExecution;
 };
-template <> struct MatchContext<ForkJoinContext *> {
-    static const ExecutionMode execMode = ParallelExecution;
-};
 template <> struct MatchContext<ThreadSafeContext *> {
     // ThreadSafeContext functions can be called from either mode, but for
     // calling from parallel they should be wrapped first, so we default to
     // SequentialExecution here.
     static const ExecutionMode execMode = SequentialExecution;
 };
 
 #define FOR_EACH_ARGS_1(Macro, Sep, Last) Macro(1) Last(1)
--- a/js/src/jit/arm/Bailouts-arm.cpp
+++ b/js/src/jit/arm/Bailouts-arm.cpp
@@ -74,20 +74,17 @@ BailoutFrameInfo::BailoutFrameInfo(const
   : machine_(bailout->machine())
 {
     uint8_t *sp = bailout->parentStackPointer();
     framePointer_ = sp + bailout->frameSize();
     topFrameSize_ = framePointer_ - sp;
 
     JSScript *script = ScriptFromCalleeToken(((JitFrameLayout *) framePointer_)->calleeToken());
     JitActivation *activation = activations.activation()->asJit();
-    if (activation->cx()->isForkJoinContext())
-        topIonScript_ = script->parallelIonScript();
-    else
-        topIonScript_ = script->ionScript();
+    topIonScript_ = script->ionScript();
 
     attachOnJitActivation(activations);
 
     if (bailout->frameClass() == FrameSizeClass::None()) {
         snapshotOffset_ = bailout->snapshotOffset();
         return;
     }
 
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -55,20 +55,18 @@ CodeGeneratorARM::generatePrologue()
 
 bool
 CodeGeneratorARM::generateEpilogue()
 {
     MOZ_ASSERT(!gen->compilingAsmJS());
     masm.bind(&returnLabel_);
 
 #ifdef JS_TRACE_LOGGING
-    if (gen->info().executionMode() == SequentialExecution) {
-        emitTracelogStopEvent(TraceLogger_IonMonkey);
-        emitTracelogScriptStop();
-    }
+    emitTracelogStopEvent(TraceLogger_IonMonkey);
+    emitTracelogScriptStop();
 #endif
 
     masm.freeStack(frameSize());
     MOZ_ASSERT(masm.framePushed() == 0);
     masm.pop(pc);
     masm.flushBuffer();
     return true;
 }
@@ -145,17 +143,17 @@ CodeGeneratorARM::generateOutOfLineCode(
 
     if (deoptLabel_.used()) {
         // All non-table-based bailouts will go here.
         masm.bind(&deoptLabel_);
 
         // Push the frame size, so the handler can recover the IonScript.
         masm.ma_mov(Imm32(frameSize()), lr);
 
-        JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler(gen->info().executionMode());
+        JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
         masm.branch(handler);
     }
 
     return true;
 }
 
 void
 CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot)
@@ -2181,28 +2179,16 @@ CodeGeneratorARM::visitNegD(LNegD *ins)
 void
 CodeGeneratorARM::visitNegF(LNegF *ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     masm.ma_vneg_f32(input, ToFloatRegister(ins->output()));
 }
 
 void
-CodeGeneratorARM::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
-{
-    MOZ_CRASH("NYI");
-}
-
-JitCode *
-JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
-{
-    MOZ_CRASH("NYI");
-}
-
-void
 CodeGeneratorARM::memoryBarrier(MemoryBarrierBits barrier)
 {
     // On ARMv6 the optional argument (BarrierST, etc) is ignored.
     if (barrier == (MembarStoreStore|MembarSynchronizing))
         masm.ma_dsb(masm.BarrierST);
     else if (barrier & MembarSynchronizing)
         masm.ma_dsb();
     else if (barrier == MembarStoreStore)
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -209,18 +209,16 @@ class CodeGeneratorARM : public CodeGene
     void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
     void visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
     void visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
     void visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
     void visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
     void visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
 
-    void visitForkJoinGetSlice(LForkJoinGetSlice *ins);
-
     void visitMemoryBarrier(LMemoryBarrier *ins);
 
     void generateInvalidateEpilogue();
 
   protected:
     void visitEffectiveAddress(LEffectiveAddress *ins);
     void visitUDiv(LUDiv *ins);
     void visitUMod(LUMod *ins);
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -546,22 +546,16 @@ LIRGeneratorARM::lowerTruncateFToInt32(M
 
 void
 LIRGeneratorARM::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins)
 {
     MOZ_CRASH("NYI");
 }
 
 void
-LIRGeneratorARM::visitForkJoinGetSlice(MForkJoinGetSlice *ins)
-{
-    MOZ_CRASH("NYI");
-}
-
-void
 LIRGeneratorARM::visitSimdBinaryArith(MSimdBinaryArith *ins)
 {
     MOZ_CRASH("NYI");
 }
 
 void
 LIRGeneratorARM::visitSimdSelect(MSimdSelect *ins)
 {
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -33,22 +33,16 @@ class LIRGeneratorARM : public LIRGenera
     LDefinition tempByteOpRegister();
 
     inline LDefinition tempToUnbox() {
         return LDefinition::BogusTemp();
     }
 
     bool needTempForPostBarrier() { return false; }
 
-    // x64 has a scratch register, so no need for another temp for dispatch
-    // ICs.
-    LDefinition tempForDispatchCache(MIRType outputType = MIRType_None) {
-        return LDefinition::BogusTemp();
-    }
-
     void lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
     void defineUntypedPhi(MPhi *phi, size_t lirIndex);
     void lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
                        MDefinition *rhs);
     void lowerUrshD(MUrsh *mir);
 
     void lowerForALU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir,
                      MDefinition *input);
@@ -101,17 +95,16 @@ class LIRGeneratorARM : public LIRGenera
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
-    void visitForkJoinGetSlice(MForkJoinGetSlice *ins);
     void visitSimdBinaryArith(MSimdBinaryArith *ins);
     void visitSimdSelect(MSimdSelect *ins);
     void visitSimdSplatX4(MSimdSplatX4 *ins);
     void visitSimdValueX4(MSimdValueX4 *ins);
     void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
     void visitSubstr(MSubstr *ins);
 };
--- a/js/src/jit/arm/Trampoline-arm.cpp
+++ b/js/src/jit/arm/Trampoline-arm.cpp
@@ -10,21 +10,18 @@
 #include "jit/Bailouts.h"
 #include "jit/JitCompartment.h"
 #include "jit/JitFrames.h"
 #include "jit/JitSpewer.h"
 #include "jit/Linker.h"
 #ifdef JS_ION_PERF
 # include "jit/PerfSpewer.h"
 #endif
-#include "jit/ParallelFunctions.h"
 #include "jit/VMFunctions.h"
 
-#include "jit/ExecutionMode-inl.h"
-
 using namespace js;
 using namespace js::jit;
 
 static const FloatRegisterSet NonVolatileFloatRegs =
     FloatRegisterSet((1ULL << FloatRegisters::d8) |
                      (1ULL << FloatRegisters::d9) |
                      (1ULL << FloatRegisters::d10) |
                      (1ULL << FloatRegisters::d11) |
@@ -411,17 +408,17 @@ JitRuntime::generateInvalidator(JSContex
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "Invalidator");
 #endif
 
     return code;
 }
 
 JitCode *
-JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut)
+JitRuntime::generateArgumentsRectifier(JSContext *cx, void **returnAddrOut)
 {
     MacroAssembler masm(cx);
     masm.pushReturnAddress();
 
     // ArgumentsRectifierReg contains the |nargs| pushed onto the current frame.
     // Including |this|, there are (|nargs| + 1) arguments to copy.
     MOZ_ASSERT(ArgumentsRectifierReg == r8);
 
@@ -477,17 +474,17 @@ JitRuntime::generateArgumentsRectifier(J
     masm.ma_push(r0); // actual arguments.
     masm.ma_push(r1); // callee token
     masm.ma_push(r6); // frame descriptor.
 
     // Call the target function.
     // Note that this code assumes the function is JITted.
     masm.andPtr(Imm32(CalleeTokenMask), r1);
     masm.ma_ldr(DTRAddr(r1, DtrOffImm(JSFunction::offsetOfNativeOrScript())), r3);
-    masm.loadBaselineOrIonRaw(r3, r3, mode, nullptr);
+    masm.loadBaselineOrIonRaw(r3, r3, nullptr);
     masm.ma_callJitHalfPush(r3);
 
     uint32_t returnOffset = masm.currentOffset();
 
     // arg1
     //  ...
     // argN
     // num actual args
@@ -637,43 +634,16 @@ GenerateBailoutThunk(JSContext *cx, Macr
                     , sp);
     }
 
     // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
     JitCode *bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
     masm.branch(bailoutTail);
 }
 
-static void
-GenerateParallelBailoutThunk(MacroAssembler &masm, uint32_t frameClass)
-{
-    // As GenerateBailoutThunk, except we return an error immediately. We do the
-    // bailout dance so that we can walk the stack and have accurate reporting
-    // of frame information.
-
-    PushBailoutFrame(masm, frameClass, r0);
-
-    // Parallel bailout is like parallel failure in that we unwind all the way
-    // to the entry frame. Reserve space for the frame pointer of the entry
-    // frame.
-    const int sizeOfEntryFramePointer = sizeof(uint8_t *) * 2;
-    masm.reserveStack(sizeOfEntryFramePointer);
-    masm.mov(sp, r1);
-
-    masm.setupAlignedABICall(2);
-    masm.passABIArg(r0);
-    masm.passABIArg(r1);
-    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, BailoutPar));
-
-    // Get the frame pointer of the entry frame and return.
-    masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
-    masm.ma_ldr(Address(sp, 0), sp);
-    masm.as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(4)));
-}
-
 JitCode *
 JitRuntime::generateBailoutTable(JSContext *cx, uint32_t frameClass)
 {
     MacroAssembler masm(cx);
 
     {
         // Emit the table without any pools being inserted.
         Label bailout;
@@ -692,30 +662,20 @@ JitRuntime::generateBailoutTable(JSConte
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutTable");
 #endif
 
     return code;
 }
 
 JitCode *
-JitRuntime::generateBailoutHandler(JSContext *cx, ExecutionMode mode)
+JitRuntime::generateBailoutHandler(JSContext *cx)
 {
     MacroAssembler masm(cx);
-
-    switch (mode) {
-      case SequentialExecution:
-        GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
-        break;
-      case ParallelExecution:
-        GenerateParallelBailoutThunk(masm, NO_FRAME_SIZE_CLASS_ID);
-        break;
-      default:
-        MOZ_CRASH("No such execution mode");
-    }
+    GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
 
     Linker linker(masm);
     AutoFlushICache afc("BailoutHandler");
     JitCode *code = linker.newCode<NoGC>(cx, OTHER_CODE);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "BailoutHandler");
 #endif
@@ -748,17 +708,18 @@ JitRuntime::generateVMWrapper(JSContext 
     //  +8  [args] + argPadding
     //  +0  ExitFrame
     //
     // We're aligned to an exit frame, so link it up.
     // If it isn't a tail call, then the return address needs to be saved
     if (f.expectTailCall == NonTailCall)
         masm.pushReturnAddress();
 
-    masm.enterExitFrameAndLoadContext(&f, cxreg, regs.getAny(), f.executionMode);
+    masm.enterExitFrame(&f);
+    masm.loadJSContext(cxreg);
 
     // Save the base of the argument set stored on the stack.
     Register argsBase = InvalidReg;
     if (f.explicitArgs) {
         argsBase = r5;
         regs.take(argsBase);
         masm.ma_add(sp, Imm32(ExitFrameLayout::SizeWithFooter()), argsBase);
     }
@@ -836,20 +797,20 @@ JitRuntime::generateVMWrapper(JSContext 
     if (outReg != InvalidReg)
         masm.passABIArg(outReg);
 
     masm.callWithABI(f.wrapped);
 
     // Test for failure.
     switch (f.failType()) {
       case Type_Object:
-        masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel(f.executionMode));
+        masm.branchTestPtr(Assembler::Zero, r0, r0, masm.failureLabel());
         break;
       case Type_Bool:
-        masm.branchIfFalseBool(r0, masm.failureLabel(f.executionMode));
+        masm.branchIfFalseBool(r0, masm.failureLabel());
         break;
       default:
         MOZ_CRASH("unknown failure kind");
     }
 
     // Load the outparam and free any allocated stack.
     switch (f.outParam) {
       case Type_Handle:
--- a/js/src/jit/mips/Bailouts-mips.cpp
+++ b/js/src/jit/mips/Bailouts-mips.cpp
@@ -17,20 +17,17 @@ BailoutFrameInfo::BailoutFrameInfo(const
   : machine_(bailout->machine())
 {
     uint8_t *sp = bailout->parentStackPointer();
     framePointer_ = sp + bailout->frameSize();
     topFrameSize_ = framePointer_ - sp;
 
     JSScript *script = ScriptFromCalleeToken(((JitFrameLayout *) framePointer_)->calleeToken());
     JitActivation *activation = activations.activation()->asJit();
-    if (activation->cx()->isForkJoinContext())
-        topIonScript_ = script->parallelIonScript();
-    else
-        topIonScript_ = script->ionScript();
+    topIonScript_ = script->ionScript();
 
     attachOnJitActivation(activations);
 
     if (bailout->frameClass() == FrameSizeClass::None()) {
         snapshotOffset_ = bailout->snapshotOffset();
         return;
     }
 
--- a/js/src/jit/mips/CodeGenerator-mips.cpp
+++ b/js/src/jit/mips/CodeGenerator-mips.cpp
@@ -163,17 +163,17 @@ CodeGeneratorMIPS::generateOutOfLineCode
         masm.bind(&deoptLabel_);
 
         // Push the frame size, so the handler can recover the IonScript.
         // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
         // We have to use 'ra' because generateBailoutTable will implicitly do
         // the same.
         masm.move32(Imm32(frameSize()), ra);
 
-        JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler(gen->info().executionMode());
+        JitCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
 
         masm.branch(handler);
     }
 
     return true;
 }
 
 void
@@ -2146,20 +2146,8 @@ CodeGeneratorMIPS::visitNegD(LNegD *ins)
 void
 CodeGeneratorMIPS::visitNegF(LNegF *ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     FloatRegister output = ToFloatRegister(ins->output());
 
     masm.as_negs(output, input);
 }
-
-void
-CodeGeneratorMIPS::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
-{
-    MOZ_CRASH("NYI");
-}
-
-JitCode *
-JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
-{
-    MOZ_CRASH("NYI");
-}
--- a/js/src/jit/mips/CodeGenerator-mips.h
+++ b/js/src/jit/mips/CodeGenerator-mips.h
@@ -259,18 +259,16 @@ class CodeGeneratorMIPS : public CodeGen
     void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
     void visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
     void visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
     void visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
     void visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
 
     void visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
 
-    void visitForkJoinGetSlice(LForkJoinGetSlice *ins);
-
     void generateInvalidateEpilogue();
 
   protected:
     void visitEffectiveAddress(LEffectiveAddress *ins);
     void visitUDiv(LUDiv *ins);
     void visitUMod(LUMod *ins);
 
   public:
--- a/js/src/jit/mips/Lowering-mips.cpp
+++ b/js/src/jit/mips/Lowering-mips.cpp
@@ -536,22 +536,16 @@ LIRGeneratorMIPS::visitSubstr(MSubstr *i
 
 void
 LIRGeneratorMIPS::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins)
 {
     MOZ_CRASH("NYI");
 }
 
 void
-LIRGeneratorMIPS::visitForkJoinGetSlice(MForkJoinGetSlice *ins)
-{
-    MOZ_CRASH("NYI");
-}
-
-void
 LIRGeneratorMIPS::visitSimdBinaryArith(MSimdBinaryArith *ins)
 {
     MOZ_CRASH("NYI");
 }
 
 void
 LIRGeneratorMIPS::visitSimdSelect(MSimdSelect *ins)
 {
--- a/js/src/jit/mips/Lowering-mips.h
+++ b/js/src/jit/mips/Lowering-mips.h
@@ -33,22 +33,16 @@ class LIRGeneratorMIPS : public LIRGener
     LDefinition tempByteOpRegister();
 
     inline LDefinition tempToUnbox() {
         return LDefinition::BogusTemp();
     }
 
     bool needTempForPostBarrier() { return false; }
 
-    // MIPS has a scratch register, so no need for another temp for dispatch
-    // ICs.
-    LDefinition tempForDispatchCache(MIRType outputType = MIRType_None) {
-        return LDefinition::BogusTemp();
-    }
-
     void lowerUntypedPhiInput(MPhi *phi, uint32_t inputPosition, LBlock *block, size_t lirIndex);
     void defineUntypedPhi(MPhi *phi, size_t lirIndex);
     void lowerForShift(LInstructionHelper<1, 2, 0> *ins, MDefinition *mir, MDefinition *lhs,
                        MDefinition *rhs);
     void lowerUrshD(MUrsh *mir);
 
     void lowerForALU(LInstructionHelper<1, 1, 0> *ins, MDefinition *mir,
                      MDefinition *input);
@@ -101,17 +95,16 @@ class LIRGeneratorMIPS : public LIRGener
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     void visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
-    void visitForkJoinGetSlice(MForkJoinGetSlice *ins);
     void visitSimdBinaryArith(MSimdBinaryArith *ins);
     void visitSimdSelect(MSimdSelect *ins);
     void visitSimdSplatX4(MSimdSplatX4 *ins);
     void visitSimdValueX4(MSimdValueX4 *ins);
     void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
     void visitSubstr(MSubstr *ins);
 };
--- a/js/src/jit/mips/Trampoline-mips.cpp
+++ b/js/src/jit/mips/Trampoline-mips.cpp
@@ -11,21 +11,18 @@
 #include "jit/JitFrames.h"
 #include "jit/JitSpewer.h"
 #include "jit/Linker.h"
 #include "jit/mips/Bailouts-mips.h"
 #include "jit/mips/BaselineHelpers-mips.h"
 #ifdef JS_ION_PERF
 # include "jit/PerfSpewer.h"
 #endif
-#include "jit/ParallelFunctions.h"
 #include "jit/VMFunctions.h"
 
-#include "jit/ExecutionMode-inl.h"
-
 using namespace js;
 using namespace js::jit;
 
 static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean.");
 
 struct EnterJITRegs
 {
     double f30;
@@ -381,17 +378,17 @@ JitRuntime::generateInvalidator(JSContex
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "Invalidator");
 #endif
 
     return code;
 }
 
 JitCode *
-JitRuntime::generateArgumentsRectifier(JSContext *cx, ExecutionMode mode, void **returnAddrOut)
+JitRuntime::generateArgumentsRectifier(JSContext *cx, void **returnAddrOut)