Bug 1366375 - Add CacheIR stub for optimizing calls to array_push. r=jandem
authorKannan Vijayan <kvijayan@mozilla.com>
Tue, 25 Jul 2017 11:28:38 -0400
changeset 370833 5bb170d708758ed504199cf0c7b0583295c07966
parent 370832 0318e26f75c82bf5c0938e1b616f623e2f55a5f9
child 370834 d499efc1cb2cedeed2b63d4dea15b59a5fdbcc0e
push id92938
push userkvijayan@mozilla.com
push dateTue, 25 Jul 2017 15:28:44 +0000
treeherdermozilla-inbound@5bb170d70875 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1366375
milestone56.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1366375 - Add CacheIR stub for optimizing calls to array_push. r=jandem
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/BaselineCacheIRCompiler.h
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineIC.h
js/src/jit/CacheIR.cpp
js/src/jit/CacheIR.h
js/src/jit/ICState.h
js/src/jit/IonCacheIRCompiler.cpp
js/src/jit/SharedIC.cpp
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -1502,16 +1502,129 @@ BaselineCacheIRCompiler::emitStoreDenseE
     masm.bind(&doStore);
     masm.storeValue(val, element);
 
     emitPostBarrierElement(obj, val, scratch, index);
     return true;
 }
 
 bool
+BaselineCacheIRCompiler::emitArrayPush()
+{
+    ObjOperandId objId = reader.objOperandId();
+    ValOperandId rhsId = reader.valOperandId();
+
+    // Allocate the fixed registers first. These need to be fixed for
+    // callTypeUpdateIC.
+    AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
+    ValueOperand val = allocator.useFixedValueRegister(masm, rhsId, R0);
+
+    Register obj = allocator.useRegister(masm, objId);
+    AutoScratchRegister scratchLength(allocator, masm);
+
+    FailurePath* failure;
+    if (!addFailurePath(&failure))
+        return false;
+
+    // Load obj->elements in scratch.
+    masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+    masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratchLength);
+
+    BaseObjectElementIndex element(scratch, scratchLength);
+    Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
+    Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
+
+    // Check for copy-on-write or frozen elements.
+    masm.branchTest32(Assembler::NonZero, elementsFlags,
+                      Imm32(ObjectElements::COPY_ON_WRITE |
+                            ObjectElements::FROZEN),
+                      failure->label());
+
+    // Fail if length != initLength.
+    masm.branch32(Assembler::NotEqual, initLength, scratchLength, failure->label());
+
+    // If scratchLength < capacity, we can add a dense element inline. If not we
+    // need to allocate more elements.
+    Label capacityOk;
+    Address capacity(scratch, ObjectElements::offsetOfCapacity());
+    masm.branch32(Assembler::Above, capacity, scratchLength, &capacityOk);
+
+    // Check for non-writable array length. We only have to do this if
+    // index >= capacity.
+    masm.branchTest32(Assembler::NonZero, elementsFlags,
+                      Imm32(ObjectElements::NONWRITABLE_ARRAY_LENGTH),
+                      failure->label());
+
+    LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
+    save.takeUnchecked(scratch);
+    masm.PushRegsInMask(save);
+
+    masm.setupUnalignedABICall(scratch);
+    masm.loadJSContext(scratch);
+    masm.passABIArg(scratch);
+    masm.passABIArg(obj);
+    masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::addDenseElementDontReportOOM));
+    masm.mov(ReturnReg, scratch);
+
+    masm.PopRegsInMask(save);
+    masm.branchIfFalseBool(scratch, failure->label());
+
+    // Load the reallocated elements pointer.
+    masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+    masm.bind(&capacityOk);
+
+    // Check if we have to convert a double element.
+    Label noConversion;
+    masm.branchTest32(Assembler::Zero, elementsFlags,
+                      Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
+                      &noConversion);
+
+    // We need to convert int32 values being stored into doubles. Note that
+    // double arrays are only created by IonMonkey, so if we have no FP support
+    // Ion is disabled and there should be no double arrays.
+    if (cx_->runtime()->jitSupportsFloatingPoint) {
+        // It's fine to convert the value in place in Baseline. We can't do
+        // this in Ion.
+        masm.convertInt32ValueToDouble(val);
+    } else {
+        masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
+    }
+
+    masm.bind(&noConversion);
+
+    // Call the type update IC. After this everything must be infallible as we
+    // don't save all registers here.
+    LiveGeneralRegisterSet saveRegs;
+    saveRegs.add(obj);
+    saveRegs.add(val);
+    if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
+        return false;
+
+    // Reload obj->elements as callTypeUpdateIC used the scratch register.
+    masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
+
+    // Increment initLength and length.
+    Address length(scratch, ObjectElements::offsetOfLength());
+    masm.add32(Imm32(1), initLength);
+    masm.load32(length, scratchLength);
+    masm.add32(Imm32(1), length);
+
+    // Store the value.
+    masm.storeValue(val, element);
+    emitPostBarrierElement(obj, val, scratch, scratchLength);
+
+    // Return value is new length.
+    masm.add32(Imm32(1), scratchLength);
+    masm.tagValue(JSVAL_TYPE_INT32, scratchLength, val);
+
+    return true;
+}
+
+bool
 BaselineCacheIRCompiler::emitStoreTypedElement()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     Register index = allocator.useRegister(masm, reader.int32OperandId());
     ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
     TypedThingLayout layout = reader.typedThingLayout();
     Scalar::Type type = reader.scalarType();
@@ -2093,60 +2206,44 @@ BaselineCacheIRCompiler::init(CacheKind 
     allocator.initAvailableRegs(available);
     outputUnchecked_.emplace(R0);
     return true;
 }
 
 static const size_t MaxOptimizedCacheIRStubs = 16;
 
 ICStub*
-jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
-                               CacheKind kind, ICStubEngine engine, JSScript* outerScript,
-                               ICFallbackStub* stub, bool* attached)
+js::jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
+                                   CacheKind kind, BaselineCacheIRStubKind stubKind,
+                                   ICStubEngine engine, JSScript* outerScript,
+                                   ICFallbackStub* stub, bool* attached)
 {
     // We shouldn't GC or report OOM (or any other exception) here.
     AutoAssertNoPendingException aanpe(cx);
     JS::AutoCheckCannotGC nogc;
 
     MOZ_ASSERT(!*attached);
 
     if (writer.failed())
         return nullptr;
 
     // Just a sanity check: the caller should ensure we don't attach an
     // unlimited number of stubs.
     MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
 
-    enum class CacheIRStubKind { Regular, Monitored, Updated };
-
-    uint32_t stubDataOffset;
-    CacheIRStubKind stubKind;
-    switch (kind) {
-      case CacheKind::Compare:
-      case CacheKind::In:
-      case CacheKind::HasOwn:
-      case CacheKind::BindName:
-      case CacheKind::TypeOf:
-      case CacheKind::GetIterator:
-        stubDataOffset = sizeof(ICCacheIR_Regular);
-        stubKind = CacheIRStubKind::Regular;
+    uint32_t stubDataOffset = 0;
+    switch (stubKind) {
+      case BaselineCacheIRStubKind::Monitored:
+        stubDataOffset = sizeof(ICCacheIR_Monitored);
         break;
-      case CacheKind::GetProp:
-      case CacheKind::GetElem:
-      case CacheKind::GetName:
-      case CacheKind::GetPropSuper:
-      case CacheKind::GetElemSuper:
-      case CacheKind::Call:
-        stubDataOffset = sizeof(ICCacheIR_Monitored);
-        stubKind = CacheIRStubKind::Monitored;
+      case BaselineCacheIRStubKind::Regular:
+        stubDataOffset = sizeof(ICCacheIR_Regular);
         break;
-      case CacheKind::SetProp:
-      case CacheKind::SetElem:
+      case BaselineCacheIRStubKind::Updated:
         stubDataOffset = sizeof(ICCacheIR_Updated);
-        stubKind = CacheIRStubKind::Updated;
         break;
     }
 
     JitZone* jitZone = cx->zone()->jitZone();
 
     // Check if we already have JitCode for this stub.
     CacheIRStubInfo* stubInfo;
     CacheIRStubKey::Lookup lookup(kind, engine, writer.codeStart(), writer.codeLength());
@@ -2181,37 +2278,37 @@ jit::AttachBaselineCacheIRStub(JSContext
     MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
 
     // Ensure we don't attach duplicate stubs. This can happen if a stub failed
     // for some reason and the IR generator doesn't check for exactly the same
     // conditions.
     for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
         bool updated = false;
         switch (stubKind) {
-          case CacheIRStubKind::Regular: {
+          case BaselineCacheIRStubKind::Regular: {
             if (!iter->isCacheIR_Regular())
                 continue;
             auto otherStub = iter->toCacheIR_Regular();
             if (otherStub->stubInfo() != stubInfo)
                 continue;
             if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
                 continue;
             break;
           }
-          case CacheIRStubKind::Monitored: {
+          case BaselineCacheIRStubKind::Monitored: {
             if (!iter->isCacheIR_Monitored())
                 continue;
             auto otherStub = iter->toCacheIR_Monitored();
             if (otherStub->stubInfo() != stubInfo)
                 continue;
             if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
                 continue;
             break;
           }
-          case CacheIRStubKind::Updated: {
+          case BaselineCacheIRStubKind::Updated: {
             if (!iter->isCacheIR_Updated())
                 continue;
             auto otherStub = iter->toCacheIR_Updated();
             if (otherStub->stubInfo() != stubInfo)
                 continue;
             if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
                 continue;
             break;
@@ -2232,33 +2329,33 @@ jit::AttachBaselineCacheIRStub(JSContext
 
     ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(stubInfo->makesGCCalls(),
                                                               outerScript, engine);
     void* newStubMem = stubSpace->alloc(bytesNeeded);
     if (!newStubMem)
         return nullptr;
 
     switch (stubKind) {
-      case CacheIRStubKind::Regular: {
+      case BaselineCacheIRStubKind::Regular: {
         auto newStub = new(newStubMem) ICCacheIR_Regular(code, stubInfo);
         writer.copyStubData(newStub->stubDataStart());
         stub->addNewStub(newStub);
         *attached = true;
         return newStub;
       }
-      case CacheIRStubKind::Monitored: {
+      case BaselineCacheIRStubKind::Monitored: {
         ICStub* monitorStub =
             stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
         auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
         writer.copyStubData(newStub->stubDataStart());
         stub->addNewStub(newStub);
         *attached = true;
         return newStub;
       }
-      case CacheIRStubKind::Updated: {
+      case BaselineCacheIRStubKind::Updated: {
         auto newStub = new(newStubMem) ICCacheIR_Updated(code, stubInfo);
         if (!newStub->initUpdatingChain(cx, stubSpace)) {
             cx->recoverFromOutOfMemory();
             return nullptr;
         }
         writer.copyStubData(newStub->stubDataStart());
         stub->addNewStub(newStub);
         *attached = true;
--- a/js/src/jit/BaselineCacheIRCompiler.h
+++ b/js/src/jit/BaselineCacheIRCompiler.h
@@ -12,16 +12,19 @@
 #include "jit/CacheIRCompiler.h"
 
 namespace js {
 namespace jit {
 
 class ICFallbackStub;
 class ICStub;
 
+enum class BaselineCacheIRStubKind { Regular, Monitored, Updated };
+
 ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
-                                  CacheKind kind, ICStubEngine engine, JSScript* outerScript,
+                                  CacheKind kind, BaselineCacheIRStubKind stubKind,
+                                  ICStubEngine engine, JSScript* outerScript,
                                   ICFallbackStub* stub, bool* attached);
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_BaselineCacheIRCompiler_h */
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -809,16 +809,17 @@ DoGetElemFallback(JSContext* cx, Baselin
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         ICStubEngine engine = ICStubEngine::Baseline;
         GetPropIRGenerator gen(cx, script, pc, CacheKind::GetElem, stub->state().mode(),
                                &isTemporarilyUnoptimizable, lhs, rhs, lhs, CanAttachGetter::Yes);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Monitored,
                                                         engine, script, stub, &attached);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Monitored()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);
             }
@@ -881,16 +882,17 @@ DoGetElemSuperFallback(JSContext* cx, Ba
 
     if (stub->state().canAttachStub()) {
         ICStubEngine engine = ICStubEngine::Baseline;
         GetPropIRGenerator gen(cx, script, pc, CacheKind::GetElemSuper, stub->state().mode(),
                                &isTemporarilyUnoptimizable, lhs, rhs, receiver,
                                CanAttachGetter::Yes);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Monitored,
                                                         engine, script, stub, &attached);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Monitored()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);
             }
@@ -1046,16 +1048,17 @@ DoSetElemFallback(JSContext* cx, Baselin
     if (stub->state().maybeTransition())
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         SetPropIRGenerator gen(cx, script, pc, CacheKind::SetElem, stub->state().mode(),
                                &isTemporarilyUnoptimizable, objv, index, rhs);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Updated,
                                                         ICStubEngine::Baseline, frame->script(),
                                                         stub, &attached);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
 
                 SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
 
                 if (gen.shouldNotePreliminaryObjectStub())
@@ -1109,16 +1112,17 @@ DoSetElemFallback(JSContext* cx, Baselin
     if (stub->state().maybeTransition())
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         SetPropIRGenerator gen(cx, script, pc, CacheKind::SetElem, stub->state().mode(),
                                &isTemporarilyUnoptimizable, objv, index, rhs);
         if (gen.tryAttachAddSlotStub(oldGroup, oldShape)) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Updated,
                                                         ICStubEngine::Baseline, frame->script(),
                                                         stub, &attached);
             if (newStub) {
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Updated()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);
 
@@ -1306,16 +1310,17 @@ DoInFallback(JSContext* cx, BaselineFram
         RootedScript script(cx, frame->script());
         jsbytecode* pc = stub->icEntry()->pc(script);
 
         ICStubEngine engine = ICStubEngine::Baseline;
         HasPropIRGenerator gen(cx, script, pc, CacheKind::In, stub->state().mode(), key, objValue);
         bool attached = false;
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Regular,
                                                         engine, script, stub, &attached);
             if (newStub)
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
         }
         if (!attached)
             stub->state().trackNotAttached();
     }
 
@@ -1374,16 +1379,17 @@ DoHasOwnFallback(JSContext* cx, Baseline
         jsbytecode* pc = stub->icEntry()->pc(script);
 
         ICStubEngine engine = ICStubEngine::Baseline;
         HasPropIRGenerator gen(cx, script, pc, CacheKind::HasOwn,
                                stub->state().mode(), keyValue, objValue);
         bool attached = false;
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Regular,
                                                         engine, script, stub, &attached);
             if (newStub)
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
         }
         if (!attached)
             stub->state().trackNotAttached();
     }
 
@@ -1445,16 +1451,17 @@ DoGetNameFallback(JSContext* cx, Baselin
     if (stub->state().maybeTransition())
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         ICStubEngine engine = ICStubEngine::Baseline;
         GetNameIRGenerator gen(cx, script, pc, stub->state().mode(), envChain, name);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Monitored,
                                                         engine, script, stub, &attached);
             if (newStub)
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
         }
         if (!attached)
             stub->state().trackNotAttached();
     }
 
@@ -1524,16 +1531,17 @@ DoBindNameFallback(JSContext* cx, Baseli
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         bool attached = false;
         RootedScript script(cx, frame->script());
         BindNameIRGenerator gen(cx, script, pc, stub->state().mode(), envChain, name);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Regular,
                                                         ICStubEngine::Baseline, script, stub,
                                                         &attached);
             if (newStub)
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
         }
         if (!attached)
             stub->state().trackNotAttached();
     }
@@ -1698,16 +1706,17 @@ DoSetPropFallback(JSContext* cx, Baselin
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         RootedValue idVal(cx, StringValue(name));
         SetPropIRGenerator gen(cx, script, pc, CacheKind::SetProp, stub->state().mode(),
                                &isTemporarilyUnoptimizable, lhs, idVal, rhs);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Updated,
                                                         ICStubEngine::Baseline, frame->script(),
                                                         stub, &attached);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
 
                 SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
 
                 if (gen.shouldNotePreliminaryObjectStub())
@@ -1769,16 +1778,17 @@ DoSetPropFallback(JSContext* cx, Baselin
         stub->discardStubs(cx);
 
     if (stub->state().canAttachStub()) {
         RootedValue idVal(cx, StringValue(name));
         SetPropIRGenerator gen(cx, script, pc, CacheKind::SetProp, stub->state().mode(),
                                &isTemporarilyUnoptimizable, lhs, idVal, rhs);
         if (gen.tryAttachAddSlotStub(oldGroup, oldShape)) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Updated,
                                                         ICStubEngine::Baseline, frame->script(),
                                                         stub, &attached);
             if (newStub) {
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Updated()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);
 
@@ -2505,26 +2515,50 @@ DoCallFallback(JSContext* cx, BaselineFr
     RootedValue callee(cx, vp[0]);
 
     // Handle funapply with JSOP_ARGUMENTS
     if (op == JSOP_FUNAPPLY && argc == 2 && callArgs[1].isMagic(JS_OPTIMIZED_ARGUMENTS)) {
         if (!GuardFunApplyArgumentsOptimization(cx, frame, callArgs))
             return false;
     }
 
-    CallIRGenerator gen(cx, script, pc, stub->state().mode(), argc,
-                        callee, callArgs.thisv(),
-                        HandleValueArray::fromMarkedLocation(argc, vp+2));
-    bool optimizeAfterCall = false;
-    CallIRGenerator::OptStrategy optStrategy = gen.getOptStrategy(&optimizeAfterCall);
-
-    // Try attaching a call stub, if the CallIRGenerator has determined that this
-    // operation cannot be optimized after the call.
+    // Transition stub state to megamorphic or generic if warranted.
+    if (stub->state().maybeTransition())
+        stub->discardStubs(cx);
+
+    bool canAttachStub = stub->state().canAttachStub();
     bool handled = false;
-    if (!optimizeAfterCall) {
+
+    // Only bother to try optimizing JSOP_CALL with CacheIR if the chain is still
+    // allowed to attach stubs.
+    if (canAttachStub) {
+        CallIRGenerator gen(cx, script, pc, stub, stub->state().mode(), argc,
+                            callee, callArgs.thisv(),
+                            HandleValueArray::fromMarkedLocation(argc, vp+2));
+        if (gen.tryAttachStub()) {
+            ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        gen.cacheIRStubKind(),
+                                                        ICStubEngine::Baseline,
+                                                        script, stub, &handled);
+
+            if (newStub) {
+                JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
+
+                // If it's an updated stub, initialize it.
+                if (gen.cacheIRStubKind() == BaselineCacheIRStubKind::Updated)
+                    SetUpdateStubData(newStub->toCacheIR_Updated(), gen.typeCheckInfo());
+            }
+        }
+        if (!handled)
+            stub->state().trackNotAttached();
+    }
+
+    // Try attaching a regular call stub, but only if the CacheIR attempt didn't add
+    // any stubs.
+    if (!handled) {
         bool createSingleton = ObjectGroup::useSingletonForNewObject(cx, script, pc);
         if (!TryAttachCallStub(cx, stub, script, pc, op, argc, vp, constructing, false,
                                createSingleton, &handled))
         {
             return false;
         }
     }
 
@@ -2563,27 +2597,16 @@ DoCallFallback(JSContext* cx, BaselineFr
     // Check if debug mode toggling made the stub invalid.
     if (stub.invalid())
         return true;
 
     // Add a type monitor stub for the resulting value.
     if (!stub->addMonitorStubForValue(cx, frame, types, res))
         return false;
 
-    if (optimizeAfterCall && !handled && optStrategy != CallIRGenerator::OptStrategy::None) {
-        if (gen.tryAttachStub()) {
-            ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
-                                                        ICStubEngine::Baseline, script, stub,
-                                                        &handled);
-            if (newStub) {
-                JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
-            }
-        }
-    }
-
     if (!handled) {
         // If 'callee' is a potential Call_ConstStringSplit, try to attach an
         // optimized ConstStringSplit stub. Note that vp[0] now holds the return value
         // instead of the callee, so we pass the callee as well.
         if (!TryAttachConstStringSplit(cx, stub, script, argc, callee, vp, pc, res, &handled))
             return false;
     }
 
@@ -4167,16 +4190,17 @@ DoGetIteratorFallback(JSContext* cx, Bas
         RootedScript script(cx, frame->script());
         jsbytecode* pc = stub->icEntry()->pc(script);
 
         ICStubEngine engine = ICStubEngine::Baseline;
         GetIteratorIRGenerator gen(cx, script, pc, stub->state().mode(), value);
         bool attached = false;
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Regular,
                                                         engine, script, stub, &attached);
             if (newStub)
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
         }
         if (!attached)
             stub->state().trackNotAttached();
     }
 
@@ -4557,16 +4581,17 @@ DoTypeOfFallback(JSContext* cx, Baseline
         RootedScript script(cx, frame->script());
         jsbytecode* pc = stub->icEntry()->pc(script);
 
         ICStubEngine engine = ICStubEngine::Baseline;
         TypeOfIRGenerator gen(cx, script, pc, stub->state().mode(), val);
         bool attached = false;
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Regular,
                                                         engine, script, stub, &attached);
             if (newStub)
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
         }
         if (!attached)
             stub->state().trackNotAttached();
     }
 
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -742,21 +742,21 @@ class ICCall_Fallback : public ICMonitor
 {
     friend class ICStubSpace;
   public:
     static const unsigned UNOPTIMIZABLE_CALL_FLAG = 0x1;
 
     static const uint32_t MAX_OPTIMIZED_STUBS = 16;
     static const uint32_t MAX_SCRIPTED_STUBS = 7;
     static const uint32_t MAX_NATIVE_STUBS = 7;
+
   private:
-
     explicit ICCall_Fallback(JitCode* stubCode)
       : ICMonitoredFallbackStub(ICStub::Call_Fallback, stubCode)
-    { }
+    {}
 
   public:
     void noteUnoptimizableCall() {
         extra_ |= UNOPTIMIZABLE_CALL_FLAG;
     }
     bool hadUnoptimizableCall() const {
         return extra_ & UNOPTIMIZABLE_CALL_FLAG;
     }
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -4,16 +4,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/CacheIR.h"
 
 #include "mozilla/DebugOnly.h"
 #include "mozilla/FloatingPoint.h"
 
+#include "jit/BaselineCacheIRCompiler.h"
 #include "jit/BaselineIC.h"
 #include "jit/CacheIRSpewer.h"
 #include "jit/IonCaches.h"
 
 #include "vm/SelfHosting.h"
 #include "jsobjinlines.h"
 
 #include "jit/MacroAssembler-inl.h"
@@ -3742,140 +3743,233 @@ GetIteratorIRGenerator::tryAttachNativeI
         writer.guardAndGetIterator(objId, iterobj, &cx_->compartment()->enumerators);
     writer.loadObjectResult(iterId);
     writer.returnFromIC();
 
     return true;
 }
 
 CallIRGenerator::CallIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
-                                 ICState::Mode mode, uint32_t argc,
+                                 ICCall_Fallback* stub, ICState::Mode mode, uint32_t argc,
                                  HandleValue callee, HandleValue thisval, HandleValueArray args)
   : IRGenerator(cx, script, pc, CacheKind::Call, mode),
     argc_(argc),
     callee_(callee),
     thisval_(thisval),
     args_(args),
-    cachedStrategy_()
+    typeCheckInfo_(cx, /* needsTypeBarrier = */ true),
+    cacheIRStubKind_(BaselineCacheIRStubKind::Regular)
 { }
 
-CallIRGenerator::OptStrategy
-CallIRGenerator::canOptimize()
+bool
+CallIRGenerator::tryAttachStringSplit()
 {
-    // Ensure callee is a function.
-    if (!callee_.isObject() || !callee_.toObject().is<JSFunction>())
-        return OptStrategy::None;
-
-    RootedFunction calleeFunc(cx_, &callee_.toObject().as<JSFunction>());
-
-    OptStrategy strategy;
-    if ((strategy = canOptimizeStringSplit(calleeFunc)) != OptStrategy::None) {
-        return strategy;
-    }
-
-    return OptStrategy::None;
-}
-
-CallIRGenerator::OptStrategy
-CallIRGenerator::canOptimizeStringSplit(HandleFunction calleeFunc)
-{
+    // Only optimize StringSplitString(str, str)
     if (argc_ != 2 || !args_[0].isString() || !args_[1].isString())
-        return OptStrategy::None;
+        return false;
 
     // Just for now: if they're both atoms, then do not optimize using
     // CacheIR and allow the legacy "ConstStringSplit" BaselineIC optimization
     // to proceed.
     if (args_[0].toString()->isAtom() && args_[1].toString()->isAtom())
-        return OptStrategy::None;
-
-    if (!calleeFunc->isNative())
-        return OptStrategy::None;
-
-    if (calleeFunc->native() != js::intrinsic_StringSplitString)
-        return OptStrategy::None;
-
-    return OptStrategy::StringSplit;
-}
-
-bool
-CallIRGenerator::tryAttachStringSplit()
-{
+        return false;
+
     // Get the object group to use for this location.
     RootedObjectGroup group(cx_, ObjectGroupCompartment::getStringSplitStringGroup(cx_));
-    if (!group) {
+    if (!group)
         return false;
-    }
 
     AutoAssertNoPendingException aanpe(cx_);
     Int32OperandId argcId(writer.setInputOperandId(0));
 
     // Ensure argc == 1.
     writer.guardSpecificInt32Immediate(argcId, 2);
 
-    // 1 argument only.  Stack-layout here is (bottom to top):
+    // 2 arguments.  Stack-layout here is (bottom to top):
     //
     //  3: Callee
     //  2: ThisValue
     //  1: Arg0
     //  0: Arg1 <-- Top of stack
 
-    // Ensure callee is an object and is the function that matches the callee optimized
-    // against during stub generation (i.e. the String_split function object).
+    // Ensure callee is the |String_split| native function.
     ValOperandId calleeValId = writer.loadStackValue(3);
     ObjOperandId calleeObjId = writer.guardIsObject(calleeValId);
     writer.guardIsNativeFunction(calleeObjId, js::intrinsic_StringSplitString);
 
     // Ensure arg0 is a string.
     ValOperandId arg0ValId = writer.loadStackValue(1);
     StringOperandId arg0StrId = writer.guardIsString(arg0ValId);
 
     // Ensure arg1 is a string.
     ValOperandId arg1ValId = writer.loadStackValue(0);
     StringOperandId arg1StrId = writer.guardIsString(arg1ValId);
 
     // Call custom string splitter VM-function.
     writer.callStringSplitResult(arg0StrId, arg1StrId, group);
     writer.typeMonitorResult();
 
+    cacheIRStubKind_ = BaselineCacheIRStubKind::Monitored;
+    trackAttached("StringSplitString");
+
+    TypeScript::Monitor(cx_, script_, pc_, TypeSet::ObjectType(group));
+
     return true;
 }
 
-CallIRGenerator::OptStrategy
-CallIRGenerator::getOptStrategy(bool* optimizeAfterCall)
+bool
+CallIRGenerator::tryAttachArrayPush()
 {
-    if (!cachedStrategy_) {
-        cachedStrategy_ = mozilla::Some(canOptimize());
-    }
-    if (optimizeAfterCall != nullptr) {
-        MOZ_ASSERT(cachedStrategy_.isSome());
-        switch (cachedStrategy_.value()) {
-          case OptStrategy::StringSplit:
-            *optimizeAfterCall = true;
-            break;
-
-          default:
-            *optimizeAfterCall = false;
-        }
-    }
-    return cachedStrategy_.value();
+    // Only optimize on obj.push(val);
+    if (argc_ != 1 || !thisval_.isObject())
+        return false;
+
+    // Where |obj| is a native array.
+    RootedObject thisobj(cx_, &thisval_.toObject());
+    if (!thisobj->is<ArrayObject>())
+        return false;
+
+    RootedArrayObject thisarray(cx_, &thisobj->as<ArrayObject>());
+
+    // And the object group for the array is not collecting preliminary objects.
+    if (thisobj->group()->maybePreliminaryObjects())
+        return false;
+
+    // Check for other indexed properties or class hooks.
+    if (!CanAttachAddElement(thisobj, /* isInit = */ false))
+        return false;
+
+    // Can't add new elements to arrays with non-writable length.
+    if (!thisarray->lengthIsWritable())
+        return false;
+
+    // Check that array is extensible.
+    if (!thisarray->nonProxyIsExtensible())
+        return false;
+
+    MOZ_ASSERT(!thisarray->getElementsHeader()->isFrozen(),
+               "Extensible arrays should not have frozen elements");
+    MOZ_ASSERT(thisarray->lengthIsWritable());
+
+    // After this point, we can generate code fine.
+
+    // Generate code.
+    AutoAssertNoPendingException aanpe(cx_);
+    Int32OperandId argcId(writer.setInputOperandId(0));
+
+    // Ensure argc == 1.
+    writer.guardSpecificInt32Immediate(argcId, 1);
+
+    // 1 argument only.  Stack-layout here is (bottom to top):
+    //
+    //  2: Callee
+    //  1: ThisValue
+    //  0: Arg0 <-- Top of stack.
+
+    // Guard callee is the |js::array_push| native function.
+    ValOperandId calleeValId = writer.loadStackValue(2);
+    ObjOperandId calleeObjId = writer.guardIsObject(calleeValId);
+    writer.guardIsNativeFunction(calleeObjId, js::array_push);
+
+    // Guard this is an array object.
+    ValOperandId thisValId = writer.loadStackValue(1);
+    ObjOperandId thisObjId = writer.guardIsObject(thisValId);
+    writer.guardClass(thisObjId, GuardClassKind::Array);
+
+    // This is a soft assert, documenting the fact that we pass 'true'
+    // for needsTypeBarrier when constructing typeCheckInfo_ for CallIRGenerator.
+    // Can be removed safely if the assumption becomes false.
+    MOZ_ASSERT(typeCheckInfo_.needsTypeBarrier());
+
+    // Guard that the group and shape matches.
+    if (typeCheckInfo_.needsTypeBarrier())
+        writer.guardGroup(thisObjId, thisobj->group());
+    writer.guardShape(thisObjId, thisarray->shape());
+
+    // Guard proto chain shapes.
+    ShapeGuardProtoChain(writer, thisobj, thisObjId);
+
+    // arr.push(x) is equivalent to arr[arr.length] = x for regular arrays.
+    ValOperandId argId = writer.loadStackValue(0);
+    writer.arrayPush(thisObjId, argId);
+
+    writer.returnFromIC();
+
+    // Set the type-check info, and the stub kind to Updated
+    typeCheckInfo_.set(thisobj->group(), JSID_VOID);
+
+    cacheIRStubKind_ = BaselineCacheIRStubKind::Updated;
+
+    trackAttached("ArrayPush");
+    return true;
 }
 
 bool
 CallIRGenerator::tryAttachStub()
 {
-    OptStrategy strategy = getOptStrategy();
-
-    if (strategy == OptStrategy::StringSplit) {
-        return tryAttachStringSplit();
+    // Only optimize when the mode is Specialized.
+    if (mode_ != ICState::Mode::Specialized)
+        return false;
+
+    // Ensure callee is a function.
+    if (!callee_.isObject() || !callee_.toObject().is<JSFunction>())
+        return false;
+
+    RootedFunction calleeFunc(cx_, &callee_.toObject().as<JSFunction>());
+
+    // Check for native-function optimizations.
+    if (calleeFunc->isNative()) {
+
+        if (calleeFunc->native() == js::intrinsic_StringSplitString) {
+            if (tryAttachStringSplit())
+                return true;
+        }
+
+        if (calleeFunc->native() == js::array_push) {
+            if (tryAttachArrayPush())
+                return true;
+        }
     }
 
-    MOZ_ASSERT(strategy == OptStrategy::None);
     return false;
 }
 
+void
+CallIRGenerator::trackAttached(const char* name)
+{
+#ifdef JS_CACHEIR_SPEW
+    CacheIRSpewer& sp = CacheIRSpewer::singleton();
+    if (sp.enabled()) {
+        LockGuard<Mutex> guard(sp.lock());
+        sp.beginCache(guard, *this);
+        sp.valueProperty(guard, "callee", callee_);
+        sp.valueProperty(guard, "thisval", thisval_);
+        sp.valueProperty(guard, "argc", Int32Value(argc_));
+        sp.attached(guard, name);
+        sp.endCache(guard);
+    }
+#endif
+}
+
+void
+CallIRGenerator::trackNotAttached()
+{
+#ifdef JS_CACHEIR_SPEW
+    CacheIRSpewer& sp = CacheIRSpewer::singleton();
+    if (sp.enabled()) {
+        LockGuard<Mutex> guard(sp.lock());
+        sp.beginCache(guard, *this);
+        sp.valueProperty(guard, "callee", callee_);
+        sp.valueProperty(guard, "thisval", thisval_);
+        sp.valueProperty(guard, "argc", Int32Value(argc_));
+        sp.endCache(guard);
+    }
+#endif
+}
+
 CompareIRGenerator::CompareIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
                                        ICState::Mode mode, JSOp op,
                                        HandleValue lhsVal, HandleValue rhsVal)
   : IRGenerator(cx, script, pc, CacheKind::Compare, mode),
     op_(op), lhsVal_(lhsVal), rhsVal_(rhsVal)
 { }
 
 bool
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -14,16 +14,19 @@
 #include "gc/Rooting.h"
 #include "jit/CompactBuffer.h"
 #include "jit/ICState.h"
 #include "jit/SharedIC.h"
 
 namespace js {
 namespace jit {
 
+
+enum class BaselineCacheIRStubKind;
+
 // CacheIR is an (extremely simple) linear IR language for inline caches.
 // From this IR, we can generate machine code for Baseline or Ion IC stubs.
 //
 // IRWriter
 // --------
 // CacheIR bytecode is written using IRWriter. This class also records some
 // metadata that's used by the Baseline and Ion code generators to generate
 // (efficient) machine code.
@@ -213,16 +216,17 @@ extern const char* CacheKindNames[];
     _(AddAndStoreFixedSlot)               \
     _(AddAndStoreDynamicSlot)             \
     _(AllocateAndStoreDynamicSlot)        \
     _(StoreTypedObjectReferenceProperty)  \
     _(StoreTypedObjectScalarProperty)     \
     _(StoreUnboxedProperty)               \
     _(StoreDenseElement)                  \
     _(StoreDenseElementHole)              \
+    _(ArrayPush)                          \
     _(StoreTypedElement)                  \
     _(StoreUnboxedArrayElement)           \
     _(StoreUnboxedArrayElementHole)       \
     _(CallNativeSetter)                   \
     _(CallScriptedSetter)                 \
     _(CallSetArrayLength)                 \
     _(CallProxySet)                       \
     _(CallProxySetByValue)                \
@@ -815,16 +819,20 @@ class MOZ_RAII CacheIRWriter : public JS
     void storeDenseElementHole(ObjOperandId obj, Int32OperandId index, ValOperandId rhs,
                                bool handleAdd)
     {
         writeOpWithOperandId(CacheOp::StoreDenseElementHole, obj);
         writeOperandId(index);
         writeOperandId(rhs);
         buffer_.writeByte(handleAdd);
     }
+    void arrayPush(ObjOperandId obj, ValOperandId rhs) {
+        writeOpWithOperandId(CacheOp::ArrayPush, obj);
+        writeOperandId(rhs);
+    }
     void callScriptedSetter(ObjOperandId obj, JSFunction* setter, ValOperandId rhs) {
         writeOpWithOperandId(CacheOp::CallScriptedSetter, obj);
         addStubField(uintptr_t(setter), StubField::Type::JSObject);
         writeOperandId(rhs);
     }
     void callNativeSetter(ObjOperandId obj, JSFunction* setter, ValOperandId rhs) {
         writeOpWithOperandId(CacheOp::CallNativeSetter, obj);
         addStubField(uintptr_t(setter), StubField::Type::JSObject);
@@ -1455,41 +1463,45 @@ class MOZ_RAII GetIteratorIRGenerator : 
     GetIteratorIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc, ICState::Mode mode,
                            HandleValue value);
 
     bool tryAttachStub();
 };
 
 class MOZ_RAII CallIRGenerator : public IRGenerator
 {
-  public:
-    enum class OptStrategy {
-        None = 0,
-        StringSplit
-    };
-
   private:
     uint32_t argc_;
     HandleValue callee_;
     HandleValue thisval_;
     HandleValueArray args_;
-
-    mozilla::Maybe<OptStrategy> cachedStrategy_;
+    PropertyTypeCheckInfo typeCheckInfo_;
+    BaselineCacheIRStubKind cacheIRStubKind_;
 
-    OptStrategy canOptimize();
-    OptStrategy canOptimizeStringSplit(HandleFunction calleeFunc);
     bool tryAttachStringSplit();
+    bool tryAttachArrayPush();
+
+    void trackAttached(const char* name);
+    void trackNotAttached();
 
   public:
-    CallIRGenerator(JSContext* cx, HandleScript, jsbytecode* pc, ICState::Mode mode,
+    CallIRGenerator(JSContext* cx, HandleScript script, jsbytecode* pc,
+                    ICCall_Fallback* stub, ICState::Mode mode,
                     uint32_t argc, HandleValue callee, HandleValue thisval,
                     HandleValueArray args);
 
-    OptStrategy getOptStrategy(bool* optimizeAfterCall = nullptr);
     bool tryAttachStub();
+
+    BaselineCacheIRStubKind cacheIRStubKind() const {
+        return cacheIRStubKind_;
+    }
+
+    const PropertyTypeCheckInfo* typeCheckInfo() const {
+        return &typeCheckInfo_;
+    }
 };
 
 class MOZ_RAII CompareIRGenerator : public IRGenerator
 {
     JSOp op_;
     HandleValue lhsVal_;
     HandleValue rhsVal_;
 
--- a/js/src/jit/ICState.h
+++ b/js/src/jit/ICState.h
@@ -61,29 +61,33 @@ class ICState
     {
         reset();
     }
 
     Mode mode() const { return mode_; }
     size_t numOptimizedStubs() const { return numOptimizedStubs_; }
 
     MOZ_ALWAYS_INLINE bool canAttachStub() const {
-        MOZ_ASSERT(numOptimizedStubs_ <= MaxOptimizedStubs);
+        // Note: we cannot assert that numOptimizedStubs_ <= MaxOptimizedStubs
+        // because old-style baseline ICs may attach more stubs than
+        // MaxOptimizedStubs allows.
         if (mode_ == Mode::Generic || JitOptions.disableCacheIR)
             return false;
         return true;
     }
 
     bool invalid() const { return invalid_; }
     void setInvalid() { invalid_ = true; }
 
     // If this returns true, we transitioned to a new mode and the caller
     // should discard all stubs.
     MOZ_MUST_USE MOZ_ALWAYS_INLINE bool maybeTransition() {
-        MOZ_ASSERT(numOptimizedStubs_ <= MaxOptimizedStubs);
+        // Note: we cannot assert that numOptimizedStubs_ <= MaxOptimizedStubs
+        // because old-style baseline ICs may attach more stubs than
+        // MaxOptimizedStubs allows.
         if (mode_ == Mode::Generic)
             return false;
         if (numOptimizedStubs_ < MaxOptimizedStubs && numFailures_ < maxFailures())
             return false;
         if (numFailures_ == maxFailures() || mode_ == Mode::Megamorphic) {
             transition(Mode::Generic);
             return true;
         }
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -1778,16 +1778,23 @@ IonCacheIRCompiler::emitStoreDenseElemen
     masm.bind(&doStore);
     EmitIonStoreDenseElement(masm, val, scratch, element);
     if (needsPostBarrier())
         emitPostBarrierElement(obj, val, scratch, index);
     return true;
 }
 
 bool
+IonCacheIRCompiler::emitArrayPush()
+{
+    MOZ_ASSERT_UNREACHABLE("emitArrayPush not supported for IonCaches.");
+    return false;
+}
+
+bool
 IonCacheIRCompiler::emitStoreTypedElement()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     Register index = allocator.useRegister(masm, reader.int32OperandId());
     ConstantOrRegister val = allocator.useConstantOrRegister(masm, reader.valOperandId());
 
     TypedThingLayout layout = reader.typedThingLayout();
     Scalar::Type arrayType = reader.scalarType();
--- a/js/src/jit/SharedIC.cpp
+++ b/js/src/jit/SharedIC.cpp
@@ -1457,16 +1457,17 @@ DoCompareFallback(JSContext* cx, void* p
     }
 
     if (engine ==  ICStubEngine::Baseline) {
         RootedScript script(cx, info.outerScript(cx));
         CompareIRGenerator gen(cx, script, pc, stub->state().mode(), op, lhs, rhs);
         bool attached = false;
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Regular,
                                                         engine, script, stub, &attached);
             if (newStub)
                  JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
             return true;
         }
     }
 
     // Try to generate new stubs.
@@ -2066,16 +2067,17 @@ DoGetPropFallback(JSContext* cx, Baselin
 
     bool attached = false;
     if (stub->state().canAttachStub()) {
         RootedValue idVal(cx, StringValue(name));
         GetPropIRGenerator gen(cx, script, pc, CacheKind::GetProp, stub->state().mode(),
                                &isTemporarilyUnoptimizable, val, idVal, val, CanAttachGetter::Yes);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Monitored,
                                                         ICStubEngine::Baseline, script,
                                                         stub, &attached);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Monitored()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);
@@ -2136,16 +2138,17 @@ DoGetPropSuperFallback(JSContext* cx, Ba
     bool attached = false;
     if (stub->state().canAttachStub()) {
         RootedValue idVal(cx, StringValue(name));
         GetPropIRGenerator gen(cx, script, pc, CacheKind::GetPropSuper, stub->state().mode(),
                                &isTemporarilyUnoptimizable, val, idVal, receiver,
                                CanAttachGetter::Yes);
         if (gen.tryAttachStub()) {
             ICStub* newStub = AttachBaselineCacheIRStub(cx, gen.writerRef(), gen.cacheKind(),
+                                                        BaselineCacheIRStubKind::Monitored,
                                                         ICStubEngine::Baseline, script,
                                                         stub, &attached);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Monitored()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);