Bug 1358599 - Use runtime guards for jitcode pre-barriers instead of patchable jumps. r=jandem r=sfink
authorKannan Vijayan <kvijayan@mozilla.com>
Sun, 30 Apr 2017 08:42:34 -0400
changeset 355813 0879ee58fcdce9c33dd20f3f4bf89c2d8f65828f
parent 355812 cc77c0f84a03b136ade5768ad111e4b79f454ebe
child 355814 5278e2a35fc8f2be390243db1e62858bf0982055
child 355821 fc9447fa866d14259aa46936fb03af7331dc31ae
push id31743
push userarchaeopteryx@coole-files.de
push dateSun, 30 Apr 2017 18:23:59 +0000
treeherdermozilla-central@5278e2a35fc8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem, sfink
bugs1358599
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1358599 - Use runtime guards for jitcode pre-barriers instead of patchable jumps. r=jandem r=sfink
js/public/HeapAPI.h
js/src/gc/Verifier.cpp
js/src/gc/Zone.cpp
js/src/gc/Zone.h
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineJIT.h
js/src/jit/CodeGenerator.cpp
js/src/jit/Ion.cpp
js/src/jit/Ion.h
js/src/jit/IonCacheIRCompiler.cpp
js/src/jit/IonCode.h
js/src/jit/IonIC.cpp
js/src/jit/IonIC.h
js/src/jit/JitCompartment.h
js/src/jit/MacroAssembler.h
js/src/jit/SharedIC.cpp
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/SharedICHelpers-arm.h
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/arm64/MacroAssembler-arm64.h
js/src/jit/arm64/SharedICHelpers-arm64.h
js/src/jit/mips-shared/Assembler-mips-shared.cpp
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/x64/SharedICHelpers-x64.h
js/src/jit/x86-shared/Assembler-x86-shared.cpp
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86/SharedICHelpers-x86.h
js/src/jsgc.cpp
--- a/js/public/HeapAPI.h
+++ b/js/public/HeapAPI.h
@@ -108,23 +108,23 @@ struct Zone
         Sweep,
         Finished,
         Compact
     };
 
   protected:
     JSRuntime* const runtime_;
     JSTracer* const barrierTracer_;     // A pointer to the JSRuntime's |gcMarker|.
-    bool needsIncrementalBarrier_;
+    uint32_t needsIncrementalBarrier_;
     GCState gcState_;
 
     Zone(JSRuntime* runtime, JSTracer* barrierTracerArg)
       : runtime_(runtime),
         barrierTracer_(barrierTracerArg),
-        needsIncrementalBarrier_(false),
+        needsIncrementalBarrier_(0),
         gcState_(NoGC)
     {}
 
   public:
     bool needsIncrementalBarrier() const {
         return needsIncrementalBarrier_;
     }
 
--- a/js/src/gc/Verifier.cpp
+++ b/js/src/gc/Verifier.cpp
@@ -241,17 +241,17 @@ gc::GCRuntime::startVerifyPreBarriers()
     }
 
     verifyPreData = trc;
     incrementalState = State::Mark;
     marker.start();
 
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         MOZ_ASSERT(!zone->usedByHelperThread());
-        zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit);
+        zone->setNeedsIncrementalBarrier(true);
         zone->arenas.purge();
     }
 
     return;
 
 oom:
     incrementalState = State::NotActive;
     js_delete(trc);
@@ -335,17 +335,17 @@ gc::GCRuntime::endVerifyPreBarriers()
 
     bool compartmentCreated = false;
 
     /* We need to disable barriers before tracing, which may invoke barriers. */
     for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
         if (!zone->needsIncrementalBarrier())
             compartmentCreated = true;
 
-        zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
+        zone->setNeedsIncrementalBarrier(false);
     }
 
     /*
      * We need to bump gcNumber so that the methodjit knows that jitcode has
      * been discarded.
      */
     MOZ_ASSERT(trc->number == number);
     number++;
--- a/js/src/gc/Zone.cpp
+++ b/js/src/gc/Zone.cpp
@@ -52,17 +52,16 @@ JS::Zone::Zone(JSRuntime* rt, ZoneGroup*
     data(group, nullptr),
     isSystem(group, false),
 #ifdef DEBUG
     gcLastSweepGroupIndex(group, 0),
 #endif
     jitZone_(group, nullptr),
     gcScheduled_(false),
     gcPreserveCode_(group, false),
-    jitUsingBarriers_(group, false),
     keepShapeTables_(group, false),
     listNext_(group, NotOnList)
 {
     /* Ensure that there are no vtables to mess us up here. */
     MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
                static_cast<JS::shadow::Zone*>(this));
 
     AutoLockGC lock(rt);
@@ -94,23 +93,18 @@ bool Zone::init(bool isSystemArg)
            gcSweepGroupEdges().init() &&
            gcWeakKeys().init() &&
            typeDescrObjects().init() &&
            markedAtoms().init() &&
            atomCache().init();
 }
 
 void
-Zone::setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit)
+Zone::setNeedsIncrementalBarrier(bool needs)
 {
-    if (updateJit == UpdateJit && needs != jitUsingBarriers_) {
-        jit::ToggleBarriers(this, needs);
-        jitUsingBarriers_ = needs;
-    }
-
     MOZ_ASSERT_IF(needs && isAtomsZone(),
                   !runtimeFromActiveCooperatingThread()->hasHelperThreadZones());
     MOZ_ASSERT_IF(needs, canCollect());
     needsIncrementalBarrier_ = needs;
 }
 
 void
 Zone::beginSweepTypes(FreeOp* fop, bool releaseTypes)
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -254,19 +254,18 @@ struct Zone : public JS::shadow::Zone,
     uint64_t gcNumber();
 
     bool compileBarriers() const { return compileBarriers(needsIncrementalBarrier()); }
     bool compileBarriers(bool needsIncrementalBarrier) const {
         return needsIncrementalBarrier ||
                runtimeFromActiveCooperatingThread()->hasZealMode(js::gc::ZealMode::VerifierPre);
     }
 
-    enum ShouldUpdateJit { DontUpdateJit, UpdateJit };
-    void setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit);
-    const bool* addressOfNeedsIncrementalBarrier() const { return &needsIncrementalBarrier_; }
+    void setNeedsIncrementalBarrier(bool needs);
+    const uint32_t* addressOfNeedsIncrementalBarrier() const { return &needsIncrementalBarrier_; }
 
     js::jit::JitZone* getJitZone(JSContext* cx) { return jitZone_ ? jitZone_ : createJitZone(cx); }
     js::jit::JitZone* jitZone() { return jitZone_; }
 
     bool isAtomsZone() const { return runtimeFromAnyThread()->isAtomsZone(this); }
     bool isSelfHostingZone() const { return runtimeFromAnyThread()->isSelfHostingZone(this); }
 
     void prepareForCompacting();
@@ -601,17 +600,16 @@ struct Zone : public JS::shadow::Zone,
         keepShapeTables_ = b;
     }
 
   private:
     js::ZoneGroupData<js::jit::JitZone*> jitZone_;
 
     js::ActiveThreadData<bool> gcScheduled_;
     js::ZoneGroupData<bool> gcPreserveCode_;
-    js::ZoneGroupData<bool> jitUsingBarriers_;
     js::ZoneGroupData<bool> keepShapeTables_;
 
     // Allow zones to be linked into a list
     friend class js::gc::ZoneList;
     static Zone * const NotOnList;
     js::ZoneGroupOrGCTaskData<Zone*> listNext_;
     bool isOnList() const;
     Zone* nextZone() const;
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -202,20 +202,16 @@ BaselineCacheIRCompiler::compile()
     Linker linker(masm);
     AutoFlushICache afc("getStubCode");
     Rooted<JitCode*> newStubCode(cx_, linker.newCode<NoGC>(cx_, BASELINE_CODE));
     if (!newStubCode) {
         cx_->recoverFromOutOfMemory();
         return nullptr;
     }
 
-    // All barriers are emitted off-by-default, enable them if needed.
-    if (cx_->zone()->needsIncrementalBarrier())
-        newStubCode->togglePreBarriers(true, DontReprotect);
-
     return newStubCode;
 }
 
 bool
 BaselineCacheIRCompiler::emitGuardShape()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     AutoScratchRegister scratch(allocator, masm);
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -235,20 +235,16 @@ BaselineCompiler::compile()
 
     // Copy IC entries
     if (icEntries_.length())
         baselineScript->copyICEntries(script, &icEntries_[0], masm);
 
     // Adopt fallback stubs from the compiler into the baseline script.
     baselineScript->adoptFallbackStubs(&stubSpace_);
 
-    // All barriers are emitted off-by-default, toggle them on if needed.
-    if (cx->zone()->needsIncrementalBarrier())
-        baselineScript->toggleBarriers(true, DontReprotect);
-
     // If profiler instrumentation is enabled, toggle instrumentation on.
     if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
         baselineScript->toggleProfilerInstrumentation(true);
 
     // Patch IC loads using IC entries.
     for (size_t i = 0; i < icLoadLabels_.length(); i++) {
         CodeOffset label = icLoadLabels_[i].label;
         size_t icEntry = icLoadLabels_[i].icEntry;
@@ -2647,17 +2643,17 @@ BaselineCompiler::emit_JSOP_SETALIASEDVA
     }
 
     // Keep rvalue in R0.
     frame.popRegsAndSync(1);
     Register objReg = R2.scratchReg();
 
     getEnvironmentCoordinateObject(objReg);
     Address address = getEnvironmentCoordinateAddressFromObject(objReg, R1.scratchReg());
-    masm.patchableCallPreBarrier(address, MIRType::Value);
+    masm.guardedCallPreBarrier(address, MIRType::Value);
     masm.storeValue(R0, address);
     frame.push(R0);
 
     // Only R0 is live at this point.
     // Scope coordinate object is already in R2.scratchReg().
     Register temp = R1.scratchReg();
 
     Label skipBarrier;
@@ -3067,17 +3063,17 @@ BaselineCompiler::emitFormalArgAccess(ui
     masm.loadPrivate(Address(reg, ArgumentsObject::getDataSlotOffset()), reg);
 
     // Load/store the argument.
     Address argAddr(reg, ArgumentsData::offsetOfArgs() + arg * sizeof(Value));
     if (get) {
         masm.loadValue(argAddr, R0);
         frame.push(R0);
     } else {
-        masm.patchableCallPreBarrier(argAddr, MIRType::Value);
+        masm.guardedCallPreBarrier(argAddr, MIRType::Value);
         masm.loadValue(frame.addressOfStackValue(frame.peek(-1)), R0);
         masm.storeValue(R0, argAddr);
 
         MOZ_ASSERT(frame.numUnsyncedSlots() == 0);
 
         Register temp = R1.scratchReg();
 
         // Reload the arguments object
@@ -4278,17 +4274,17 @@ BaselineCompiler::emit_JSOP_INITIALYIELD
 
     MOZ_ASSERT(GET_UINT24(pc) == 0);
     masm.storeValue(Int32Value(0),
                     Address(genObj, GeneratorObject::offsetOfYieldAndAwaitIndexSlot()));
 
     Register envObj = R0.scratchReg();
     Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot());
     masm.loadPtr(frame.addressOfEnvironmentChain(), envObj);
-    masm.patchableCallPreBarrier(envChainSlot, MIRType::Value);
+    masm.guardedCallPreBarrier(envChainSlot, MIRType::Value);
     masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot);
 
     Register temp = R1.scratchReg();
     Label skipBarrier;
     masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier);
     masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier);
     masm.push(genObj);
     MOZ_ASSERT(genObj == R2.scratchReg());
@@ -4324,17 +4320,17 @@ BaselineCompiler::emit_JSOP_YIELD()
         // generator is in the closing state, see GeneratorObject::suspend.
 
         masm.storeValue(Int32Value(GET_UINT24(pc)),
                         Address(genObj, GeneratorObject::offsetOfYieldAndAwaitIndexSlot()));
 
         Register envObj = R0.scratchReg();
         Address envChainSlot(genObj, GeneratorObject::offsetOfEnvironmentChainSlot());
         masm.loadPtr(frame.addressOfEnvironmentChain(), envObj);
-        masm.patchableCallPreBarrier(envChainSlot, MIRType::Value);
+        masm.guardedCallPreBarrier(envChainSlot, MIRType::Value);
         masm.storeValue(JSVAL_TYPE_OBJECT, envObj, envChainSlot);
 
         Register temp = R1.scratchReg();
         Label skipBarrier;
         masm.branchPtrInNurseryChunk(Assembler::Equal, genObj, temp, &skipBarrier);
         masm.branchPtrInNurseryChunk(Assembler::NotEqual, envObj, temp, &skipBarrier);
         MOZ_ASSERT(genObj == R2.scratchReg());
         masm.call(&postBarrierSlot_);
@@ -4567,17 +4563,17 @@ BaselineCompiler::emit_JSOP_RESUME()
         {
             masm.pushValue(Address(scratch2, 0));
             masm.addPtr(Imm32(sizeof(Value)), scratch2);
             masm.sub32(Imm32(1), initLength);
             masm.jump(&loop);
         }
         masm.bind(&loopDone);
 
-        masm.patchableCallPreBarrier(exprStackSlot, MIRType::Value);
+        masm.guardedCallPreBarrier(exprStackSlot, MIRType::Value);
         masm.storeValue(NullValue(), exprStackSlot);
         regs.add(initLength);
     }
 
     masm.bind(&noExprStack);
     masm.pushValue(retVal);
 
     if (resumeKind == GeneratorObject::NEXT) {
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -370,20 +370,16 @@ struct BaselineScript
     EnvironmentObject* templateEnvironment() const {
         return templateEnv_;
     }
     void setTemplateEnvironment(EnvironmentObject* templateEnv) {
         MOZ_ASSERT(!templateEnv_);
         templateEnv_ = templateEnv;
     }
 
-    void toggleBarriers(bool enabled, ReprotectCode reprotect = Reprotect) {
-        method()->togglePreBarriers(enabled, reprotect);
-    }
-
     bool containsCodeAddress(uint8_t* addr) const {
         return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
     }
 
     BaselineICEntry& icEntry(size_t index);
     BaselineICEntry& icEntryFromReturnOffset(CodeOffset returnOffset);
     BaselineICEntry& icEntryFromPCOffset(uint32_t pcOffset);
     BaselineICEntry& icEntryFromPCOffset(uint32_t pcOffset, BaselineICEntry* prevLookedUpEntry);
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -1326,19 +1326,19 @@ PrepareAndExecuteRegExp(JSContext* cx, M
     // Lazily update the RegExpStatics.
     masm.movePtr(ImmPtr(res), temp1);
 
     Address pendingInputAddress(temp1, RegExpStatics::offsetOfPendingInput());
     Address matchesInputAddress(temp1, RegExpStatics::offsetOfMatchesInput());
     Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource());
     Address lazyIndexAddress(temp1, RegExpStatics::offsetOfLazyIndex());
 
-    masm.patchableCallPreBarrier(pendingInputAddress, MIRType::String);
-    masm.patchableCallPreBarrier(matchesInputAddress, MIRType::String);
-    masm.patchableCallPreBarrier(lazySourceAddress, MIRType::String);
+    masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
+    masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
+    masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
 
     masm.storePtr(input, pendingInputAddress);
     masm.storePtr(input, matchesInputAddress);
     masm.storePtr(lastIndex, Address(temp1, RegExpStatics::offsetOfLazyIndex()));
     masm.store32(Imm32(1), Address(temp1, RegExpStatics::offsetOfPendingLazyEvaluation()));
 
     masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp2);
     masm.loadPtr(Address(temp2, RegExpShared::offsetOfSource()), temp3);
@@ -1812,19 +1812,16 @@ JitCompartment::generateRegExpMatcherStu
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub");
 #endif
 #ifdef MOZ_VTUNE
     vtune::MarkStub(code, "RegExpMatcherStub");
 #endif
 
-    if (cx->zone()->needsIncrementalBarrier())
-        code->togglePreBarriers(true, DontReprotect);
-
     return code;
 }
 
 class OutOfLineRegExpMatcher : public OutOfLineCodeBase<CodeGenerator>
 {
     LRegExpMatcher* lir_;
 
   public:
@@ -1972,19 +1969,16 @@ JitCompartment::generateRegExpSearcherSt
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
 #endif
 #ifdef MOZ_VTUNE
     vtune::MarkStub(code, "RegExpSearcherStub");
 #endif
 
-    if (cx->zone()->needsIncrementalBarrier())
-        code->togglePreBarriers(true, DontReprotect);
-
     return code;
 }
 
 class OutOfLineRegExpSearcher : public OutOfLineCodeBase<CodeGenerator>
 {
     LRegExpSearcher* lir_;
 
   public:
@@ -2123,19 +2117,16 @@ JitCompartment::generateRegExpTesterStub
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "RegExpTesterStub");
 #endif
 #ifdef MOZ_VTUNE
     vtune::MarkStub(code, "RegExpTesterStub");
 #endif
 
-    if (cx->zone()->needsIncrementalBarrier())
-        code->togglePreBarriers(true, DontReprotect);
-
     return code;
 }
 
 class OutOfLineRegExpTester : public OutOfLineCodeBase<CodeGenerator>
 {
     LRegExpTester* lir_;
 
   public:
@@ -3255,19 +3246,19 @@ CodeGenerator::visitGetPropertyPolymorph
     emitGetPropertyPolymorphic(ins, obj, temp, output);
 }
 
 template <typename T>
 static void
 EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type)
 {
     if (type == JSVAL_TYPE_OBJECT)
-        masm.patchableCallPreBarrier(address, MIRType::Object);
+        masm.guardedCallPreBarrier(address, MIRType::Object);
     else if (type == JSVAL_TYPE_STRING)
-        masm.patchableCallPreBarrier(address, MIRType::String);
+        masm.guardedCallPreBarrier(address, MIRType::String);
     else
         MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
 }
 
 void
 CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
                                           const ConstantOrRegister& value)
 {
@@ -6556,18 +6547,18 @@ void
 CodeGenerator::emitLoadIteratorValues<ValueMap>(Register result, Register temp, Register front)
 {
     size_t elementsOffset = NativeObject::offsetOfFixedElements();
 
     Address keyAddress(front, ValueMap::Entry::offsetOfKey());
     Address valueAddress(front, ValueMap::Entry::offsetOfValue());
     Address keyElemAddress(result, elementsOffset);
     Address valueElemAddress(result, elementsOffset + sizeof(Value));
-    masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value);
-    masm.patchableCallPreBarrier(valueElemAddress, MIRType::Value);
+    masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
+    masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value);
     masm.storeValue(keyAddress, keyElemAddress, temp);
     masm.storeValue(valueAddress, valueElemAddress, temp);
 
     Label keyIsNotObject, valueIsNotNurseryObject, emitBarrier;
     masm.branchTestObject(Assembler::NotEqual, keyAddress, &keyIsNotObject);
     masm.branchValueIsNurseryObject(Assembler::Equal, keyAddress, temp, &emitBarrier);
     masm.bind(&keyIsNotObject);
     masm.branchTestObject(Assembler::NotEqual, valueAddress, &valueIsNotNurseryObject);
@@ -6585,17 +6576,17 @@ CodeGenerator::emitLoadIteratorValues<Va
 template <>
 void
 CodeGenerator::emitLoadIteratorValues<ValueSet>(Register result, Register temp, Register front)
 {
     size_t elementsOffset = NativeObject::offsetOfFixedElements();
 
     Address keyAddress(front, ValueSet::offsetOfEntryKey());
     Address keyElemAddress(result, elementsOffset);
-    masm.patchableCallPreBarrier(keyElemAddress, MIRType::Value);
+    masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value);
     masm.storeValue(keyAddress, keyElemAddress, temp);
 
     Label keyIsNotObject;
     masm.branchTestObject(Assembler::NotEqual, keyAddress, &keyIsNotObject);
     masm.branchValueIsNurseryObject(Assembler::NotEqual, keyAddress, temp, &keyIsNotObject);
     {
         saveVolatile(temp);
         emitPostWriteBarrier(result);
@@ -8775,17 +8766,17 @@ CodeGenerator::visitOutOfLineStoreElemen
 }
 
 template <typename T>
 static void
 StoreUnboxedPointer(MacroAssembler& masm, T address, MIRType type, const LAllocation* value,
                     bool preBarrier)
 {
     if (preBarrier)
-        masm.patchableCallPreBarrier(address, type);
+        masm.guardedCallPreBarrier(address, type);
     if (value->isConstant()) {
         Value v = value->toConstant()->toJSValue();
         if (v.isGCThing()) {
             masm.storePtr(ImmGCPtr(v.toGCThing()), address);
         } else {
             MOZ_ASSERT(v.isNull());
             masm.storePtr(ImmWord(0), address);
         }
@@ -9232,17 +9223,17 @@ CodeGenerator::visitIteratorStartO(LIter
     // Ensure the object's prototype's prototype is nullptr. The last native
     // iterator will always have a prototype chain length of one (i.e. it must
     // be a plain object), so we do not need to generate a loop here.
     masm.loadObjProto(obj, temp1);
     masm.loadObjProto(temp1, temp1);
     masm.branchTestPtr(Assembler::NonZero, temp1, temp1, ool->entry());
 
     // Pre-write barrier for store to 'obj'.
-    masm.patchableCallPreBarrier(Address(niTemp, offsetof(NativeIterator, obj)), MIRType::Object);
+    masm.guardedCallPreBarrier(Address(niTemp, offsetof(NativeIterator, obj)), MIRType::Object);
 
     // Mark iterator as active.
     masm.storePtr(obj, Address(niTemp, offsetof(NativeIterator, obj)));
     masm.or32(Imm32(JSITER_ACTIVE), Address(niTemp, offsetof(NativeIterator, flags)));
 
     // Post-write barrier for stores to 'obj'. The iterator JSObject is never
     // nursery allocated. Put this in the whole cell buffer is we wrote a
     // nursery pointer into it.
@@ -10032,22 +10023,16 @@ CodeGenerator::link(JSContext* cx, Compi
                 cx->zone()->group()->storeBuffer().putWholeCell(script);
                 break;
             }
         }
     }
     if (patchableBackedges_.length() > 0)
         ionScript->copyPatchableBackedges(cx, code, patchableBackedges_.begin(), masm);
 
-    // The correct state for prebarriers is unknown until the end of compilation,
-    // since a GC can occur during code generation. All barriers are emitted
-    // off-by-default, and are toggled on here if necessary.
-    if (cx->zone()->needsIncrementalBarrier())
-        ionScript->toggleBarriers(true, DontReprotect);
-
     // Attach any generated script counts to the script.
     if (IonScriptCounts* counts = extractScriptCounts())
         script->addIonCounts(counts);
 
     guardIonScript.release();
     guardRecordedConstraints.release();
     return true;
 }
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -675,43 +675,16 @@ JitCompartment::sweep(FreeOp* fop, JSCom
 }
 
 void
 JitZone::sweep(FreeOp* fop)
 {
     baselineCacheIRStubCodes_.sweep();
 }
 
-void
-JitCompartment::toggleBarriers(bool enabled)
-{
-    // Toggle barriers in compartment wide stubs that have patchable pre barriers.
-    if (regExpMatcherStub_)
-        regExpMatcherStub_->togglePreBarriers(enabled, Reprotect);
-    if (regExpSearcherStub_)
-        regExpSearcherStub_->togglePreBarriers(enabled, Reprotect);
-    if (regExpTesterStub_)
-        regExpTesterStub_->togglePreBarriers(enabled, Reprotect);
-
-    // Toggle barriers in baseline IC stubs.
-    for (ICStubCodeMap::Enum e(*stubCodes_); !e.empty(); e.popFront()) {
-        JitCode* code = *e.front().value().unsafeGet();
-        code->togglePreBarriers(enabled, Reprotect);
-    }
-}
-
-void
-JitZone::toggleBarriers(bool enabled)
-{
-    for (BaselineCacheIRStubCodeMap::Enum e(baselineCacheIRStubCodes_); !e.empty(); e.popFront()) {
-        JitCode* code = *e.front().value().unsafeGet();
-        code->togglePreBarriers(enabled, Reprotect);
-    }
-}
-
 size_t
 JitCompartment::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
 {
     size_t n = mallocSizeOf(this);
     if (stubCodes_)
         n += stubCodes_->sizeOfIncludingThis(mallocSizeOf);
     return n;
 }
@@ -783,19 +756,16 @@ JitCode::copyFrom(MacroAssembler& masm)
     masm.executableCopy(code_);
 
     jumpRelocTableBytes_ = masm.jumpRelocationTableBytes();
     masm.copyJumpRelocationTable(code_ + jumpRelocTableOffset());
 
     dataRelocTableBytes_ = masm.dataRelocationTableBytes();
     masm.copyDataRelocationTable(code_ + dataRelocTableOffset());
 
-    preBarrierTableBytes_ = masm.preBarrierTableBytes();
-    masm.copyPreBarrierTable(code_ + preBarrierTableOffset());
-
     masm.processCodeLabels(code_);
 }
 
 void
 JitCode::traceChildren(JSTracer* trc)
 {
     // Note that we cannot mark invalidated scripts, since we've basically
     // corrupted the code stream by injecting bailouts.
@@ -851,36 +821,16 @@ JitCode::finalize(FreeOp* fop)
     // integration, we don't want to reuse code addresses, so we just leak the
     // memory instead.
     if (!PerfEnabled())
         pool_->release(headerSize_ + bufferSize_, CodeKind(kind_));
 
     pool_ = nullptr;
 }
 
-void
-JitCode::togglePreBarriers(bool enabled, ReprotectCode reprotect)
-{
-    uint8_t* start = code_ + preBarrierTableOffset();
-    CompactBufferReader reader(start, start + preBarrierTableBytes_);
-
-    if (!reader.more())
-        return;
-
-    MaybeAutoWritableJitCode awjc(this, reprotect);
-    do {
-        size_t offset = reader.readUnsigned();
-        CodeLocationLabel loc(this, CodeOffset(offset));
-        if (enabled)
-            Assembler::ToggleToCmp(loc);
-        else
-            Assembler::ToggleToJmp(loc);
-    } while (reader.more());
-}
-
 IonScript::IonScript()
   : method_(nullptr),
     deoptTable_(nullptr),
     osrPc_(nullptr),
     osrEntryOffset_(0),
     skipArgCheckEntryOffset_(0),
     invalidateEpilogueOffset_(0),
     invalidateEpilogueDataOffset_(0),
@@ -1263,25 +1213,16 @@ IonScript::Destroy(FreeOp* fop, IonScrip
 
 void
 JS::DeletePolicy<js::jit::IonScript>::operator()(const js::jit::IonScript* script)
 {
     IonScript::Destroy(rt_->defaultFreeOp(), const_cast<IonScript*>(script));
 }
 
 void
-IonScript::toggleBarriers(bool enabled, ReprotectCode reprotect)
-{
-    method()->togglePreBarriers(enabled, reprotect);
-
-    for (size_t i = 0; i < numICs(); i++)
-        getICFromIndex(i).togglePreBarriers(enabled, reprotect);
-}
-
-void
 IonScript::purgeOptimizedStubs(Zone* zone)
 {
     for (size_t i = 0; i < numSharedStubs(); i++) {
         IonICEntry& entry = sharedStubList()[i];
         if (!entry.hasStub())
             continue;
 
         ICStub* lastStub = entry.firstStub();
@@ -1357,39 +1298,16 @@ IonScript::unlinkFromRuntime(FreeOp* fop
         jzg->removePatchableBackedge(fop->runtime()->jitRuntime(), &backedgeList()[i]);
 
     // Clear the list of backedges, so that this method is idempotent. It is
     // called during destruction, and may be additionally called when the
     // script is invalidated.
     backedgeEntries_ = 0;
 }
 
-void
-jit::ToggleBarriers(JS::Zone* zone, bool needs)
-{
-    JSRuntime* rt = zone->runtimeFromActiveCooperatingThread();
-    if (!rt->hasJitRuntime())
-        return;
-
-    for (auto script = zone->cellIter<JSScript>(); !script.done(); script.next()) {
-        if (script->hasIonScript())
-            script->ionScript()->toggleBarriers(needs);
-        if (script->hasBaselineScript())
-            script->baselineScript()->toggleBarriers(needs);
-    }
-
-    if (JitZone* jitZone = zone->jitZone())
-        jitZone->toggleBarriers(needs);
-
-    for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
-        if (comp->jitCompartment())
-            comp->jitCompartment()->toggleBarriers(needs);
-    }
-}
-
 namespace js {
 namespace jit {
 
 static void
 OptimizeSinCos(MIRGenerator *mir, MIRGraph &graph)
 {
     // Now, we are looking for:
     // var y = sin(x);
--- a/js/src/jit/Ion.h
+++ b/js/src/jit/Ion.h
@@ -142,18 +142,16 @@ JitExecStatus FastInvoke(JSContext* cx, 
 void Invalidate(TypeZone& types, FreeOp* fop,
                 const RecompileInfoVector& invalid, bool resetUses = true,
                 bool cancelOffThread = true);
 void Invalidate(JSContext* cx, const RecompileInfoVector& invalid, bool resetUses = true,
                 bool cancelOffThread = true);
 void Invalidate(JSContext* cx, JSScript* script, bool resetUses = true,
                 bool cancelOffThread = true);
 
-void ToggleBarriers(JS::Zone* zone, bool needs);
-
 class IonBuilder;
 class MIRGenerator;
 class LIRGraph;
 class CodeGenerator;
 
 MOZ_MUST_USE bool OptimizeMIR(MIRGenerator* mir);
 LIRGraph* GenerateLIR(MIRGenerator* mir);
 CodeGenerator* GenerateCode(MIRGenerator* mir, LIRGraph* lir);
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -525,20 +525,16 @@ IonCacheIRCompiler::compile()
                                            ImmPtr((void*)-1));
     }
     if (stubJitCodeOffset_) {
         Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
                                            ImmPtr(newStubCode.get()),
                                            ImmPtr((void*)-1));
     }
 
-    // All barriers are emitted off-by-default, enable them if needed.
-    if (cx_->zone()->needsIncrementalBarrier())
-        newStubCode->togglePreBarriers(true, DontReprotect);
-
     return newStubCode;
 }
 
 bool
 IonCacheIRCompiler::emitGuardShape()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     Shape* shape = shapeStubField(reader.stubOffset());
--- a/js/src/jit/IonCode.h
+++ b/js/src/jit/IonCode.h
@@ -38,43 +38,36 @@ class JitCode : public gc::TenuredCell
   protected:
     uint8_t* code_;
     ExecutablePool* pool_;
     uint32_t bufferSize_;             // Total buffer size. Does not include headerSize_.
     uint32_t insnSize_;               // Instruction stream size.
     uint32_t dataSize_;               // Size of the read-only data area.
     uint32_t jumpRelocTableBytes_;    // Size of the jump relocation table.
     uint32_t dataRelocTableBytes_;    // Size of the data relocation table.
-    uint32_t preBarrierTableBytes_;   // Size of the prebarrier table.
     uint8_t headerSize_ : 5;          // Number of bytes allocated before codeStart.
     uint8_t kind_ : 3;                // jit::CodeKind, for the memory reporters.
     bool invalidated_ : 1;            // Whether the code object has been invalidated.
                                       // This is necessary to prevent GC tracing.
     bool hasBytecodeMap_ : 1;         // Whether the code object has been registered with
                                       // native=>bytecode mapping tables.
 
-#if JS_BITS_PER_WORD == 32
-    // Ensure JitCode is gc::Cell aligned.
-    uint32_t padding_;
-#endif
-
     JitCode()
       : code_(nullptr),
         pool_(nullptr)
     { }
     JitCode(uint8_t* code, uint32_t bufferSize, uint32_t headerSize, ExecutablePool* pool,
             CodeKind kind)
       : code_(code),
         pool_(pool),
         bufferSize_(bufferSize),
         insnSize_(0),
         dataSize_(0),
         jumpRelocTableBytes_(0),
         dataRelocTableBytes_(0),
-        preBarrierTableBytes_(0),
         headerSize_(headerSize),
         kind_(kind),
         invalidated_(false),
         hasBytecodeMap_(false)
     {
         MOZ_ASSERT(CodeKind(kind_) == kind);
         MOZ_ASSERT(headerSize_ == headerSize);
     }
@@ -83,19 +76,16 @@ class JitCode : public gc::TenuredCell
         return insnSize_;
     }
     uint32_t jumpRelocTableOffset() const {
         return dataOffset() + dataSize_;
     }
     uint32_t dataRelocTableOffset() const {
         return jumpRelocTableOffset() + jumpRelocTableBytes_;
     }
-    uint32_t preBarrierTableOffset() const {
-        return dataRelocTableOffset() + dataRelocTableBytes_;
-    }
 
   public:
     uint8_t* raw() const {
         return code_;
     }
     uint8_t* rawEnd() const {
         return code_ + insnSize_;
     }
@@ -507,17 +497,16 @@ struct IonScript
         return (IonICEntry*) &bottomBuffer()[sharedStubList_];
     }
     size_t numSharedStubs() const {
         return sharedStubEntries_;
     }
     size_t runtimeSize() const {
         return runtimeSize_;
     }
-    void toggleBarriers(bool enabled, ReprotectCode reprotect = Reprotect);
     void purgeICs(Zone* zone);
     void unlinkFromRuntime(FreeOp* fop);
     void copySnapshots(const SnapshotWriter* writer);
     void copyRecovers(const RecoverWriter* writer);
     void copyBailoutTable(const SnapshotOffset* table);
     void copyConstants(const Value* vp);
     void copySafepointIndices(const SafepointIndex* firstSafepointIndex, MacroAssembler& masm);
     void copyOsiIndices(const OsiIndex* firstOsiIndex, MacroAssembler& masm);
--- a/js/src/jit/IonIC.cpp
+++ b/js/src/jit/IonIC.cpp
@@ -101,29 +101,16 @@ IonIC::trace(JSTracer* trc)
         TraceCacheIRStub(trc, stub, stub->stubInfo());
 
         nextCodeRaw = stub->nextCodeRaw();
     }
 
     MOZ_ASSERT(nextCodeRaw == fallbackLabel_.raw());
 }
 
-void
-IonIC::togglePreBarriers(bool enabled, ReprotectCode reprotect)
-{
-    uint8_t* nextCodeRaw = codeRaw_;
-    for (IonICStub* stub = firstStub_; stub; stub = stub->next()) {
-        JitCode* code = JitCode::FromExecutable(nextCodeRaw);
-        code->togglePreBarriers(enabled, reprotect);
-        nextCodeRaw = stub->nextCodeRaw();
-    }
-
-    MOZ_ASSERT(nextCodeRaw == fallbackLabel_.raw());
-}
-
 /* static */ bool
 IonGetPropertyIC::update(JSContext* cx, HandleScript outerScript, IonGetPropertyIC* ic,
 			 HandleValue val, HandleValue idVal, MutableHandleValue res)
 {
     // Override the return value if we are invalidated (bug 728188).
     IonScript* ionScript = outerScript->ionScript();
     AutoDetectInvalidation adi(cx, res, ionScript);
 
--- a/js/src/jit/IonIC.h
+++ b/js/src/jit/IonIC.h
@@ -118,18 +118,16 @@ class IonIC
 
     // Discard all stubs and reset the ICState.
     void reset(Zone* zone);
 
     ICState& state() {
         return state_;
     }
 
-    void togglePreBarriers(bool enabled, ReprotectCode reprotect);
-
     CacheKind kind() const { return kind_; }
     uint8_t** codeRawPtr() { return &codeRaw_; }
 
     bool idempotent() const { return idempotent_; }
     void setIdempotent() { idempotent_ = true; }
 
     void setFallbackLabel(CodeOffset fallbackLabel) { fallbackLabel_ = fallbackLabel; }
     void setRejoinLabel(CodeOffset rejoinLabel) { rejoinLabel_ = rejoinLabel; }
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -417,17 +417,16 @@ class JitZone
                                                  ReadBarrieredJitCode,
                                                  CacheIRStubKey,
                                                  SystemAllocPolicy,
                                                  IcStubCodeMapGCPolicy<CacheIRStubKey>>;
     BaselineCacheIRStubCodeMap baselineCacheIRStubCodes_;
 
   public:
     MOZ_MUST_USE bool init(JSContext* cx);
-    void toggleBarriers(bool enabled);
     void sweep(FreeOp* fop);
 
     void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
                                 size_t* jitZone,
                                 size_t* baselineStubsOptimized,
                                 size_t* cachedCFG) const;
 
     OptimizedICStubSpace* optimizedStubSpace() {
@@ -571,18 +570,16 @@ class JitCompartment
         MOZ_ASSERT(bailoutReturnStubInfo_[kind].addr == nullptr);
         bailoutReturnStubInfo_[kind] = BailoutReturnStubInfo { addr, key };
     }
     void* bailoutReturnAddr(BailoutReturnStub kind) {
         MOZ_ASSERT(bailoutReturnStubInfo_[kind].addr);
         return bailoutReturnStubInfo_[kind].addr;
     }
 
-    void toggleBarriers(bool enabled);
-
     JitCompartment();
     ~JitCompartment();
 
     MOZ_MUST_USE bool initialize(JSContext* cx);
 
     // Initialize code stubs only used by Ion, not Baseline.
     MOZ_MUST_USE bool ensureIonStubsExist(JSContext* cx);
 
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1639,44 +1639,32 @@ class MacroAssembler : public MacroAssem
     void store32(const RegisterOrInt32Constant& key, const Address& dest) {
         if (key.isRegister())
             store32(key.reg(), dest);
         else
             store32(Imm32(key.constant()), dest);
     }
 
     template <typename T>
-    void callPreBarrier(const T& address, MIRType type) {
+    void guardedCallPreBarrier(const T& address, MIRType type) {
         Label done;
 
+        branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
+
         if (type == MIRType::Value)
             branchTestGCThing(Assembler::NotEqual, address, &done);
 
         Push(PreBarrierReg);
         computeEffectiveAddress(address, PreBarrierReg);
 
         const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
         JitCode* preBarrier = rt->preBarrier(type);
 
         call(preBarrier);
         Pop(PreBarrierReg);
-
-        bind(&done);
-    }
-
-    template <typename T>
-    void patchableCallPreBarrier(const T& address, MIRType type) {
-        Label done;
-
-        // All barriers are off by default.
-        // They are enabled if necessary at the end of CodeGenerator::generate().
-        CodeOffset nopJump = toggledJump(&done);
-        writePrebarrierOffset(nopJump);
-
-        callPreBarrier(address, type);
         jump(&done);
 
         haltingAlign(8);
         bind(&done);
     }
 
     template<typename T>
     void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
--- a/js/src/jit/SharedIC.cpp
+++ b/js/src/jit/SharedIC.cpp
@@ -502,20 +502,16 @@ ICStubCompiler::getStubCode()
     if (!generateStubCode(masm))
         return nullptr;
     Linker linker(masm);
     AutoFlushICache afc("getStubCode");
     Rooted<JitCode*> newStubCode(cx, linker.newCode<CanGC>(cx, BASELINE_CODE));
     if (!newStubCode)
         return nullptr;
 
-    // All barriers are emitted off-by-default, enable them if needed.
-    if (cx->zone()->needsIncrementalBarrier())
-        newStubCode->togglePreBarriers(true, DontReprotect);
-
     // Cache newly compiled stubcode.
     if (!comp->putStubCode(cx, stubKey, newStubCode))
         return nullptr;
 
     // After generating code, run postGenerateStubCode().  We must not fail
     // after this point.
     postGenerateStubCode(masm, newStubCode);
 
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -922,23 +922,16 @@ Assembler::copyJumpRelocationTable(uint8
 void
 Assembler::copyDataRelocationTable(uint8_t* dest)
 {
     if (dataRelocations_.length())
         memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
 }
 
 void
-Assembler::copyPreBarrierTable(uint8_t* dest)
-{
-    if (preBarriers_.length())
-        memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
-}
-
-void
 Assembler::trace(JSTracer* trc)
 {
     for (size_t i = 0; i < jumps_.length(); i++) {
         RelativePatch& rp = jumps_[i];
         if (rp.kind() == Relocation::JITCODE) {
             JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target());
             TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
             MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target()));
@@ -1394,18 +1387,17 @@ VFPRegister::isMissing() const
 
 
 bool
 Assembler::oom() const
 {
     return AssemblerShared::oom() ||
            m_buffer.oom() ||
            jumpRelocations_.oom() ||
-           dataRelocations_.oom() ||
-           preBarriers_.oom();
+           dataRelocations_.oom();
 }
 
 // Size of the instruction stream, in bytes. Including pools. This function
 // expects all pools that need to be placed have been placed. If they haven't
 // then we need to go an flush the pools :(
 size_t
 Assembler::size() const
 {
@@ -1418,30 +1410,23 @@ Assembler::jumpRelocationTableBytes() co
     return jumpRelocations_.length();
 }
 size_t
 Assembler::dataRelocationTableBytes() const
 {
     return dataRelocations_.length();
 }
 
-size_t
-Assembler::preBarrierTableBytes() const
-{
-    return preBarriers_.length();
-}
-
 // Size of the data table, in bytes.
 size_t
 Assembler::bytesNeeded() const
 {
     return size() +
         jumpRelocationTableBytes() +
-        dataRelocationTableBytes() +
-        preBarrierTableBytes();
+        dataRelocationTableBytes();
 }
 
 #ifdef JS_DISASM_ARM
 
 void
 Assembler::spewInst(Instruction* i)
 {
     disasm::NameConverter converter;
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1280,17 +1280,16 @@ class Assembler : public AssemblerShared
     };
 
     // TODO: this should actually be a pool-like object. It is currently a big
     // hack, and probably shouldn't exist.
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
 
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
-    CompactBufferWriter preBarriers_;
 
     ARMBuffer m_buffer;
 
 #ifdef JS_DISASM_ARM
   private:
     class SpewNodes {
         struct Node {
             uint32_t key;
@@ -1358,19 +1357,16 @@ class Assembler : public AssemblerShared
     void writeDataRelocation(ImmGCPtr ptr) {
         if (ptr.value) {
             if (gc::IsInsideNursery(ptr.value))
                 embedsNurseryPointers_ = true;
             if (ptr.value)
                 dataRelocations_.writeUnsigned(nextOffset().getOffset());
         }
     }
-    void writePrebarrierOffset(CodeOffset label) {
-        preBarriers_.writeUnsigned(label.offset());
-    }
 
     enum RelocBranchStyle {
         B_MOVWT,
         B_LDR_BX,
         B_LDR,
         B_MOVW_ADD
     };
 
@@ -1410,24 +1406,22 @@ class Assembler : public AssemblerShared
 
   private:
     bool isFinished;
   public:
     void finish();
     bool asmMergeWith(Assembler& other);
     void copyJumpRelocationTable(uint8_t* dest);
     void copyDataRelocationTable(uint8_t* dest);
-    void copyPreBarrierTable(uint8_t* dest);
 
     // Size of the instruction stream, in bytes, after pools are flushed.
     size_t size() const;
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const;
     size_t dataRelocationTableBytes() const;
-    size_t preBarrierTableBytes() const;
 
     // Size of the data table, in bytes.
     size_t bytesNeeded() const;
 
     // Write a single instruction into the instruction stream.  Very hot,
     // inlined for performance
     MOZ_ALWAYS_INLINE BufferOffset writeInst(uint32_t x) {
         BufferOffset offs = m_buffer.putInt(x);
--- a/js/src/jit/arm/SharedICHelpers-arm.h
+++ b/js/src/jit/arm/SharedICHelpers-arm.h
@@ -251,19 +251,19 @@ EmitUnstowICValues(MacroAssembler& masm,
     }
     masm.adjustFrame(-values * sizeof(Value));
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
 {
-    // On ARM, lr is clobbered by patchableCallPreBarrier. Save it first.
+    // On ARM, lr is clobbered by guardedCallPreBarrier. Save it first.
     masm.push(lr);
-    masm.patchableCallPreBarrier(addr, type);
+    masm.guardedCallPreBarrier(addr, type);
     masm.pop(lr);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler& masm)
 {
     // Load next stub into ICStubReg.
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -213,53 +213,44 @@ class Assembler : public vixl::Assembler
     void bindLater(Label* label, wasm::TrapDesc target) {
         MOZ_CRASH("NYI");
     }
 
     bool oom() const {
         return AssemblerShared::oom() ||
             armbuffer_.oom() ||
             jumpRelocations_.oom() ||
-            dataRelocations_.oom() ||
-            preBarriers_.oom();
+            dataRelocations_.oom();
     }
 
     void disableProtection() {}
     void enableProtection() {}
     void setLowerBoundForProtection(size_t) {}
     void unprotectRegion(unsigned char*, size_t) {}
     void reprotectRegion(unsigned char*, size_t) {}
 
     void copyJumpRelocationTable(uint8_t* dest) const {
         if (jumpRelocations_.length())
             memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
     }
     void copyDataRelocationTable(uint8_t* dest) const {
         if (dataRelocations_.length())
             memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
     }
-    void copyPreBarrierTable(uint8_t* dest) const {
-        if (preBarriers_.length())
-            memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
-    }
 
     size_t jumpRelocationTableBytes() const {
         return jumpRelocations_.length();
     }
     size_t dataRelocationTableBytes() const {
         return dataRelocations_.length();
     }
-    size_t preBarrierTableBytes() const {
-        return preBarriers_.length();
-    }
     size_t bytesNeeded() const {
         return SizeOfCodeGenerated() +
             jumpRelocationTableBytes() +
-            dataRelocationTableBytes() +
-            preBarrierTableBytes();
+            dataRelocationTableBytes();
     }
 
     void processCodeLabels(uint8_t* rawCode) {
         for (size_t i = 0; i < codeLabels_.length(); i++) {
             CodeLabel label = codeLabels_[i];
             Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
         }
     }
@@ -433,17 +424,16 @@ class Assembler : public vixl::Assembler
     // List of jumps for which the target is either unknown until finalization,
     // or cannot be known due to GC. Each entry here requires a unique entry
     // in the extended jump table, and is patched at finalization.
     js::Vector<RelativePatch, 8, SystemAllocPolicy> pendingJumps_;
 
     // Final output formatters.
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
-    CompactBufferWriter preBarriers_;
 };
 
 static const uint32_t NumIntArgRegs = 8;
 static const uint32_t NumFloatArgRegs = 8;
 
 class ABIArgGenerator
 {
   public:
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -1837,20 +1837,16 @@ class MacroAssemblerCompat : public vixl
         if (val.isGCThing()) {
             gc::Cell* cell = val.toGCThing();
             if (cell && gc::IsInsideNursery(cell))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(load.getOffset());
         }
     }
 
-    void writePrebarrierOffset(CodeOffset label) {
-        preBarriers_.writeUnsigned(label.offset());
-    }
-
     void computeEffectiveAddress(const Address& address, Register dest) {
         Add(ARMRegister(dest, 64), ARMRegister(address.base, 64), Operand(address.offset));
     }
     void computeEffectiveAddress(const BaseIndex& address, Register dest) {
         ARMRegister dest64(dest, 64);
         ARMRegister base64(address.base, 64);
         ARMRegister index64(address.index, 64);
 
--- a/js/src/jit/arm64/SharedICHelpers-arm64.h
+++ b/js/src/jit/arm64/SharedICHelpers-arm64.h
@@ -233,19 +233,19 @@ EmitUnstowICValues(MacroAssembler& masm,
     }
     masm.adjustFrame(-values * sizeof(Value));
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
 {
-    // On AArch64, lr is clobbered by patchableCallPreBarrier. Save it first.
+    // On AArch64, lr is clobbered by guardedCallPreBarrier. Save it first.
     masm.push(lr);
-    masm.patchableCallPreBarrier(addr, type);
+    masm.guardedCallPreBarrier(addr, type);
     masm.pop(lr);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler& masm)
 {
     // Load next stub into ICStubReg.
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
--- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -129,23 +129,16 @@ AssemblerMIPSShared::copyJumpRelocationT
 void
 AssemblerMIPSShared::copyDataRelocationTable(uint8_t* dest)
 {
     if (dataRelocations_.length())
         memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
 }
 
 void
-AssemblerMIPSShared::copyPreBarrierTable(uint8_t* dest)
-{
-    if (preBarriers_.length())
-        memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
-}
-
-void
 AssemblerMIPSShared::processCodeLabels(uint8_t* rawCode)
 {
     for (size_t i = 0; i < codeLabels_.length(); i++) {
         CodeLabel label = codeLabels_[i];
         Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
     }
 }
 
@@ -235,18 +228,17 @@ BOffImm16::getDest(Instruction* src) con
 }
 
 bool
 AssemblerMIPSShared::oom() const
 {
     return AssemblerShared::oom() ||
            m_buffer.oom() ||
            jumpRelocations_.oom() ||
-           dataRelocations_.oom() ||
-           preBarriers_.oom();
+           dataRelocations_.oom();
 }
 
 // Size of the instruction stream, in bytes.
 size_t
 AssemblerMIPSShared::size() const
 {
     return m_buffer.size();
 }
@@ -259,30 +251,23 @@ AssemblerMIPSShared::jumpRelocationTable
 }
 
 size_t
 AssemblerMIPSShared::dataRelocationTableBytes() const
 {
     return dataRelocations_.length();
 }
 
-size_t
-AssemblerMIPSShared::preBarrierTableBytes() const
-{
-    return preBarriers_.length();
-}
-
 // Size of the data table, in bytes.
 size_t
 AssemblerMIPSShared::bytesNeeded() const
 {
     return size() +
            jumpRelocationTableBytes() +
-           dataRelocationTableBytes() +
-           preBarrierTableBytes();
+           dataRelocationTableBytes();
 }
 
 // write a blob of binary into the instruction stream
 BufferOffset
 AssemblerMIPSShared::writeInst(uint32_t x, uint32_t* dest)
 {
     if (dest == nullptr)
         return m_buffer.putInt(x);
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -872,17 +872,16 @@ class AssemblerMIPSShared : public Assem
         { }
     };
 
     js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
     js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
 
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
-    CompactBufferWriter preBarriers_;
 
     MIPSBufferWithExecutableCopy m_buffer;
 
   public:
     AssemblerMIPSShared()
       : m_buffer(),
         isFinished(false)
     { }
@@ -898,19 +897,16 @@ class AssemblerMIPSShared : public Assem
     // before to recover the pointer, and not after.
     void writeDataRelocation(ImmGCPtr ptr) {
         if (ptr.value) {
             if (gc::IsInsideNursery(ptr.value))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(nextOffset().getOffset());
         }
     }
-    void writePrebarrierOffset(CodeOffset label) {
-        preBarriers_.writeUnsigned(label.offset());
-    }
 
   public:
     bool oom() const;
 
     void disableProtection() {}
     void enableProtection() {}
     void setLowerBoundForProtection(size_t) {}
     void unprotectRegion(unsigned char*, size_t) {}
@@ -926,24 +922,22 @@ class AssemblerMIPSShared : public Assem
   protected:
     bool isFinished;
   public:
     void finish();
     bool asmMergeWith(const AssemblerMIPSShared& other);
     void executableCopy(void* buffer, bool flushICache = true);
     void copyJumpRelocationTable(uint8_t* dest);
     void copyDataRelocationTable(uint8_t* dest);
-    void copyPreBarrierTable(uint8_t* dest);
 
     // Size of the instruction stream, in bytes.
     size_t size() const;
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const;
     size_t dataRelocationTableBytes() const;
-    size_t preBarrierTableBytes() const;
 
     // Size of the data table, in bytes.
     size_t bytesNeeded() const;
 
     // Write a blob of binary into the instruction stream *OR*
     // into a destination address. If dest is nullptr (the default), then the
     // instruction gets written into the instruction stream. If dest is not null
     // it is interpreted as a pointer to the location that we want the
--- a/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
@@ -255,19 +255,19 @@ EmitUnstowICValues(MacroAssembler& masm,
     }
     masm.adjustFrame(-values * sizeof(Value));
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
 {
-    // On MIPS, $ra is clobbered by patchableCallPreBarrier. Save it first.
+    // On MIPS, $ra is clobbered by guardedCallPreBarrier. Save it first.
     masm.push(ra);
-    masm.patchableCallPreBarrier(addr, type);
+    masm.guardedCallPreBarrier(addr, type);
     masm.pop(ra);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler& masm)
 {
     // Load next stub into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -221,18 +221,16 @@ class MacroAssemblerNone : public Assemb
     void breakpoint() { MOZ_CRASH(); }
     void abiret() { MOZ_CRASH(); }
     void ret() { MOZ_CRASH(); }
 
     CodeOffset toggledJump(Label*) { MOZ_CRASH(); }
     CodeOffset toggledCall(JitCode*, bool) { MOZ_CRASH(); }
     static size_t ToggledCallSize(uint8_t*) { MOZ_CRASH(); }
 
-    void writePrebarrierOffset(CodeOffset) { MOZ_CRASH(); }
-
     void finish() { MOZ_CRASH(); }
 
     template <typename T, typename S> void moveValue(T, S) { MOZ_CRASH(); }
     template <typename T, typename S, typename U> void moveValue(T, S, U) { MOZ_CRASH(); }
     template <typename T, typename S> void storeValue(T, S) { MOZ_CRASH(); }
     template <typename T, typename S, typename U> void storeValue(T, S, U) { MOZ_CRASH(); }
     template <typename T, typename S> void loadValue(T, S) { MOZ_CRASH(); }
     template <typename T> void pushValue(T) { MOZ_CRASH(); }
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1649,27 +1649,27 @@ CodeGeneratorShared::visitWasmStoreGloba
     masm.store64(value, addr);
 }
 
 void
 CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
 {
     if (index->isConstant()) {
         Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
-        masm.patchableCallPreBarrier(address, MIRType::Value);
+        masm.guardedCallPreBarrier(address, MIRType::Value);
     } else {
         BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
-        masm.patchableCallPreBarrier(address, MIRType::Value);
+        masm.guardedCallPreBarrier(address, MIRType::Value);
     }
 }
 
 void
 CodeGeneratorShared::emitPreBarrier(Address address)
 {
-    masm.patchableCallPreBarrier(address, MIRType::Value);
+    masm.guardedCallPreBarrier(address, MIRType::Value);
 }
 
 Label*
 CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
 {
     // If this is a loop backedge to a loop header with an implicit interrupt
     // check, use a patchable jump. Skip this search if compiling without a
     // script for wasm, as there will be no interrupt check instruction.
--- a/js/src/jit/x64/SharedICHelpers-x64.h
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -252,17 +252,17 @@ EmitUnstowICValues(MacroAssembler& masm,
     }
     masm.adjustFrame(-values * sizeof(Value));
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
 {
-    masm.patchableCallPreBarrier(addr, type);
+    masm.guardedCallPreBarrier(addr, type);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler& masm)
 {
     // Load next stub into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
 
--- a/js/src/jit/x86-shared/Assembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.cpp
@@ -34,23 +34,16 @@ AssemblerX86Shared::copyJumpRelocationTa
 
 void
 AssemblerX86Shared::copyDataRelocationTable(uint8_t* dest)
 {
     if (dataRelocations_.length())
         memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
 }
 
-void
-AssemblerX86Shared::copyPreBarrierTable(uint8_t* dest)
-{
-    if (preBarriers_.length())
-        memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
-}
-
 static void
 TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
 {
     while (reader.more()) {
         size_t offset = reader.readUnsigned();
         void* ptr = X86Encoding::GetPointer(buffer + offset);
 
 #ifdef JS_PUNBOX64
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -262,28 +262,24 @@ class AssemblerX86Shared : public Assemb
             target(target),
             kind(kind)
         { }
     };
 
     Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
     CompactBufferWriter jumpRelocations_;
     CompactBufferWriter dataRelocations_;
-    CompactBufferWriter preBarriers_;
 
     void writeDataRelocation(ImmGCPtr ptr) {
         if (ptr.value) {
             if (gc::IsInsideNursery(ptr.value))
                 embedsNurseryPointers_ = true;
             dataRelocations_.writeUnsigned(masm.currentOffset());
         }
     }
-    void writePrebarrierOffset(CodeOffset label) {
-        preBarriers_.writeUnsigned(label.offset());
-    }
 
   protected:
     X86Encoding::BaseAssemblerSpecific masm;
 
     typedef X86Encoding::JmpSrc JmpSrc;
     typedef X86Encoding::JmpDst JmpDst;
 
   public:
@@ -400,18 +396,17 @@ class AssemblerX86Shared : public Assemb
 
     // MacroAssemblers hold onto gcthings, so they are traced by the GC.
     void trace(JSTracer* trc);
 
     bool oom() const {
         return AssemblerShared::oom() ||
                masm.oom() ||
                jumpRelocations_.oom() ||
-               dataRelocations_.oom() ||
-               preBarriers_.oom();
+               dataRelocations_.oom();
     }
 
     void disableProtection() { masm.disableProtection(); }
     void enableProtection() { masm.enableProtection(); }
     void setLowerBoundForProtection(size_t size) {
         masm.setLowerBoundForProtection(size);
     }
     void unprotectRegion(unsigned char* first, size_t size) {
@@ -434,38 +429,33 @@ class AssemblerX86Shared : public Assemb
         MOZ_ASSERT(other.jumps_.length() == 0);
         if (!AssemblerShared::asmMergeWith(masm.size(), other))
             return false;
         return masm.appendBuffer(other.masm);
     }
     void processCodeLabels(uint8_t* rawCode);
     void copyJumpRelocationTable(uint8_t* dest);
     void copyDataRelocationTable(uint8_t* dest);
-    void copyPreBarrierTable(uint8_t* dest);
 
     // Size of the instruction stream, in bytes.
     size_t size() const {
         return masm.size();
     }
     // Size of the jump relocation table, in bytes.
     size_t jumpRelocationTableBytes() const {
         return jumpRelocations_.length();
     }
     size_t dataRelocationTableBytes() const {
         return dataRelocations_.length();
     }
-    size_t preBarrierTableBytes() const {
-        return preBarriers_.length();
-    }
     // Size of the data table, in bytes.
     size_t bytesNeeded() const {
         return size() +
                jumpRelocationTableBytes() +
-               dataRelocationTableBytes() +
-               preBarrierTableBytes();
+               dataRelocationTableBytes();
     }
 
   public:
     void haltingAlign(int alignment) {
         masm.haltingAlign(alignment);
     }
     void nopAlign(int alignment) {
         masm.nopAlign(alignment);
--- a/js/src/jit/x86/SharedICHelpers-x86.h
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -248,17 +248,17 @@ EmitUnstowICValues(MacroAssembler& masm,
     }
     masm.adjustFrame(-values * sizeof(Value));
 }
 
 template <typename AddrType>
 inline void
 EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
 {
-    masm.patchableCallPreBarrier(addr, type);
+    masm.guardedCallPreBarrier(addr, type);
 }
 
 inline void
 EmitStubGuardFailure(MacroAssembler& masm)
 {
     // Load next stub into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -4625,17 +4625,17 @@ GCRuntime::getNextSweepGroup()
     if (!isIncremental)
         ZoneComponentFinder::mergeGroups(currentSweepGroup);
 
     if (abortSweepAfterCurrentGroup) {
         MOZ_ASSERT(!isIncremental);
         for (GCSweepGroupIter zone(rt); !zone.done(); zone.next()) {
             MOZ_ASSERT(!zone->gcNextGraphComponent);
             MOZ_ASSERT(zone->isGCMarking());
-            zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
+            zone->setNeedsIncrementalBarrier(false);
             zone->setGCState(Zone::NoGC);
             zone->gcGrayRoots().clearAndFree();
         }
 
         for (GCCompartmentGroupIter comp(rt); !comp.done(); comp.next())
             ResetGrayList(comp);
 
         abortSweepAfterCurrentGroup = false;
@@ -5904,17 +5904,17 @@ GCRuntime::resetIncrementalGC(gc::AbortR
         marker.stop();
         clearBufferedGrayRoots();
 
         for (GCCompartmentsIter c(rt); !c.done(); c.next())
             ResetGrayList(c);
 
         for (GCZonesIter zone(rt); !zone.done(); zone.next()) {
             MOZ_ASSERT(zone->isGCMarking());
-            zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
+            zone->setNeedsIncrementalBarrier(false);
             zone->setGCState(Zone::NoGC);
         }
 
         blocksToFreeAfterSweeping.ref().freeAll();
 
         incrementalState = State::NotActive;
 
         MOZ_ASSERT(!marker.shouldCheckCompartments());
@@ -6022,32 +6022,32 @@ AutoGCSlice::AutoGCSlice(JSRuntime* rt)
         /*
          * Clear needsIncrementalBarrier early so we don't do any write
          * barriers during GC. We don't need to update the Ion barriers (which
          * is expensive) because Ion code doesn't run during GC. If need be,
          * we'll update the Ion barriers in ~AutoGCSlice.
          */
         if (zone->isGCMarking()) {
             MOZ_ASSERT(zone->needsIncrementalBarrier());
-            zone->setNeedsIncrementalBarrier(false, Zone::DontUpdateJit);
+            zone->setNeedsIncrementalBarrier(false);
         } else {
             MOZ_ASSERT(!zone->needsIncrementalBarrier());
         }
     }
 }
 
 AutoGCSlice::~AutoGCSlice()
 {
     /* We can't use GCZonesIter if this is the end of the last slice. */
     for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
         if (zone->isGCMarking()) {
-            zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit);
+            zone->setNeedsIncrementalBarrier(true);
             zone->arenas.purge();
         } else {
-            zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
+            zone->setNeedsIncrementalBarrier(false);
         }
     }
 }
 
 void
 GCRuntime::pushZealSelectedObjects()
 {
 #ifdef JS_GC_ZEAL