Bug 1310125 part 2 - Port Baseline scripted getter IC stub to CacheIR. r=h4writer
authorJan de Mooij <jdemooij@mozilla.com>
Tue, 15 Nov 2016 15:54:14 +0100
changeset 322754 3656a6f2cd7e5c93f32c820d73ed73ad1a170dae
parent 322753 a9c898900950efe519e898507414e6e4f10240b4
child 322755 8d6553075ae1ce62d1032b13906892e508e9e1f7
push id21
push usermaklebus@msu.edu
push dateThu, 01 Dec 2016 06:22:08 +0000
reviewersh4writer
bugs1310125
milestone53.0a1
Bug 1310125 part 2 - Port Baseline scripted getter IC stub to CacheIR. r=h4writer
js/src/jit/BaselineCacheIR.cpp
js/src/jit/BaselineCacheIR.h
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/BaselineInspector.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIR.h
js/src/jit/IonCaches.cpp
js/src/jit/IonCaches.h
js/src/jit/JitCompartment.h
js/src/jit/JitFrames.cpp
js/src/jit/SharedIC.cpp
js/src/jit/SharedIC.h
js/src/jit/SharedICList.h
--- a/js/src/jit/BaselineCacheIR.cpp
+++ b/js/src/jit/BaselineCacheIR.cpp
@@ -10,16 +10,18 @@
 #include "jit/Linker.h"
 #include "jit/SharedICHelpers.h"
 
 #include "jit/MacroAssembler-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
+using mozilla::Maybe;
+
 // OperandLocation represents the location of an OperandId. The operand is
 // either in a register or on the stack, and is either boxed or unboxed.
 class OperandLocation
 {
   public:
     enum Kind {
         Uninitialized = 0,
         PayloadReg,
@@ -141,35 +143,41 @@ class MOZ_RAII CacheRegisterAllocator
     // The current location of each operand.
     Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
 
     // The registers allocated while emitting the current CacheIR op.
     // This prevents us from allocating a register and then immediately
     // clobbering it for something else, while we're still holding on to it.
     LiveGeneralRegisterSet currentOpRegs_;
 
+    const AllocatableGeneralRegisterSet allocatableRegs_;
+
     // Registers that are currently unused and available.
     AllocatableGeneralRegisterSet availableRegs_;
 
     // The number of bytes pushed on the native stack.
     uint32_t stackPushed_;
 
     // The index of the CacheIR instruction we're currently emitting.
     uint32_t currentInstruction_;
 
     const CacheIRWriter& writer_;
 
     CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
     CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
 
+    void freeDeadOperandRegisters();
+
   public:
     friend class AutoScratchRegister;
+    friend class AutoScratchRegisterExcluding;
 
     explicit CacheRegisterAllocator(const CacheIRWriter& writer)
-      : stackPushed_(0),
+      : allocatableRegs_(GeneralRegisterSet::All()),
+        stackPushed_(0),
         currentInstruction_(0),
         writer_(writer)
     {}
 
     MOZ_MUST_USE bool init(const AllocatableGeneralRegisterSet& available) {
         availableRegs_ = available;
         if (!origInputLocations_.resize(writer_.numInputOperands()))
             return false;
@@ -193,46 +201,97 @@ class MOZ_RAII CacheRegisterAllocator
         currentOpRegs_.clear();
         currentInstruction_++;
     }
 
     uint32_t stackPushed() const {
         return stackPushed_;
     }
 
+    bool isAllocatable(Register reg) const {
+        return allocatableRegs_.has(reg);
+    }
+
     // Allocates a new register.
     Register allocateRegister(MacroAssembler& masm);
     ValueOperand allocateValueRegister(MacroAssembler& masm);
+    void allocateFixedRegister(MacroAssembler& masm, Register reg);
+
+    // Releases a register so it can be reused later.
+    void releaseRegister(Register reg) {
+        MOZ_ASSERT(currentOpRegs_.has(reg));
+        availableRegs_.add(reg);
+    }
+
+    // Removes spilled values from the native stack. This should only be
+    // called after all registers have been allocated.
+    void discardStack(MacroAssembler& masm);
 
     // Returns the register for the given operand. If the operand is currently
     // not in a register, it will load it into one.
     ValueOperand useRegister(MacroAssembler& masm, ValOperandId val);
     Register useRegister(MacroAssembler& masm, ObjOperandId obj);
 
     // Allocates an output register for the given operand.
     Register defineRegister(MacroAssembler& masm, ObjOperandId obj);
 };
 
-// RAII class to put a scratch register back in the allocator's availableRegs
-// set when we're done with it.
+// RAII class to allocate a scratch register and release it when we're done
+// with it.
 class MOZ_RAII AutoScratchRegister
 {
     CacheRegisterAllocator& alloc_;
     Register reg_;
 
   public:
-    AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm)
+    AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
+                        Register reg = InvalidReg)
       : alloc_(alloc)
     {
-        reg_ = alloc.allocateRegister(masm);
+        if (reg != InvalidReg) {
+            alloc.allocateFixedRegister(masm, reg);
+            reg_ = reg;
+        } else {
+            reg_ = alloc.allocateRegister(masm);
+        }
         MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
     }
     ~AutoScratchRegister() {
+        alloc_.releaseRegister(reg_);
+    }
+    operator Register() const { return reg_; }
+};
+
+// Like AutoScratchRegister, but lets the caller specify a register that should
+// not be allocated here.
+class MOZ_RAII AutoScratchRegisterExcluding
+{
+    CacheRegisterAllocator& alloc_;
+    Register reg_;
+
+  public:
+    AutoScratchRegisterExcluding(CacheRegisterAllocator& alloc, MacroAssembler& masm,
+                                 Register excluding)
+      : alloc_(alloc)
+    {
+        MOZ_ASSERT(excluding != InvalidReg);
+
+        reg_ = alloc.allocateRegister(masm);
+
+        if (reg_ == excluding) {
+            // We need a different register, so try again.
+            reg_ = alloc.allocateRegister(masm);
+            MOZ_ASSERT(reg_ != excluding);
+            alloc_.releaseRegister(excluding);
+        }
+
         MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
-        alloc_.availableRegs_.add(reg_);
+    }
+    ~AutoScratchRegisterExcluding() {
+        alloc_.releaseRegister(reg_);
     }
     operator Register() const { return reg_; }
 };
 
 // The FailurePath class stores everything we need to generate a failure path
 // at the end of the IC code. The failure path restores the input registers, if
 // needed, and jumps to the next stub.
 class FailurePath
@@ -375,35 +434,57 @@ CacheIRCompiler::emitFailurePath(size_t 
                                orig.valueReg());
             }
             break;
           default:
             MOZ_CRASH();
         }
     }
 
-    if (stackPushed > 0)
-        masm.addToStackPtr(Imm32(stackPushed));
+    allocator.discardStack(masm);
 }
 
 // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
 class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
 {
+    // Some Baseline IC stubs can be used in IonMonkey through SharedStubs.
+    // Those stubs have different machine code, so we need to track whether
+    // we're compiling for Baseline or Ion.
+    ICStubEngine engine_;
+
+#ifdef DEBUG
+    uint32_t framePushedAtEnterStubFrame_;
+#endif
+
     uint32_t stubDataOffset_;
+    bool inStubFrame_;
+    bool makesGCCalls_;
+
+    void enterStubFrame(MacroAssembler& masm, Register scratch);
+    void leaveStubFrame(MacroAssembler& masm, bool calledIntoIon);
 
   public:
-    BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, uint32_t stubDataOffset)
+    BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, ICStubEngine engine,
+                            uint32_t stubDataOffset)
       : CacheIRCompiler(cx, writer),
-        stubDataOffset_(stubDataOffset)
+        engine_(engine),
+#ifdef DEBUG
+        framePushedAtEnterStubFrame_(0),
+#endif
+        stubDataOffset_(stubDataOffset),
+        inStubFrame_(false),
+        makesGCCalls_(false)
     {}
 
     MOZ_MUST_USE bool init(CacheKind kind);
 
     JitCode* compile();
 
+    bool makesGCCalls() const { return makesGCCalls_; }
+
   private:
 #define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
     CACHE_IR_OPS(DEFINE_OP)
 #undef DEFINE_OP
 
     Address stubAddress(uint32_t offset) const {
         return Address(ICStubReg, stubDataOffset_ + offset * sizeof(uintptr_t));
     }
@@ -425,27 +506,61 @@ class MOZ_RAII BaselineCacheIRCompiler :
 
         if (!failurePaths.append(Move(newFailure)))
             return false;
 
         *failure = &failurePaths.back();
         return true;
     }
     void emitEnterTypeMonitorIC() {
-        if (allocator.stackPushed() > 0)
-            masm.addToStackPtr(Imm32(allocator.stackPushed()));
+        allocator.discardStack(masm);
         EmitEnterTypeMonitorIC(masm);
     }
     void emitReturnFromIC() {
-        if (allocator.stackPushed() > 0)
-            masm.addToStackPtr(Imm32(allocator.stackPushed()));
+        allocator.discardStack(masm);
         EmitReturnFromIC(masm);
     }
 };
 
+void
+BaselineCacheIRCompiler::enterStubFrame(MacroAssembler& masm, Register scratch)
+{
+    if (engine_ == ICStubEngine::Baseline) {
+        EmitBaselineEnterStubFrame(masm, scratch);
+#ifdef DEBUG
+        framePushedAtEnterStubFrame_ = masm.framePushed();
+#endif
+    } else {
+        EmitIonEnterStubFrame(masm, scratch);
+    }
+
+    MOZ_ASSERT(!inStubFrame_);
+    inStubFrame_ = true;
+    makesGCCalls_ = true;
+}
+
+void
+BaselineCacheIRCompiler::leaveStubFrame(MacroAssembler& masm, bool calledIntoIon)
+{
+    MOZ_ASSERT(inStubFrame_);
+    inStubFrame_ = false;
+
+    if (engine_ == ICStubEngine::Baseline) {
+#ifdef DEBUG
+        masm.setFramePushed(framePushedAtEnterStubFrame_);
+        if (calledIntoIon)
+            masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
+#endif
+
+        EmitBaselineLeaveStubFrame(masm, calledIntoIon);
+    } else {
+        EmitIonLeaveStubFrame(masm);
+    }
+}
+
 JitCode*
 BaselineCacheIRCompiler::compile()
 {
 #ifndef JS_USE_LINK_REGISTER
     // The first value contains the return addres,
     // which we pull into ICTailCallReg for tail calls.
     masm.adjustFrame(sizeof(intptr_t));
 #endif
@@ -597,43 +712,63 @@ CacheRegisterAllocator::defineRegister(M
     OperandLocation& loc = operandLocations_[op.id()];
     MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
 
     Register reg = allocateRegister(masm);
     loc.setPayloadReg(reg, JSVAL_TYPE_OBJECT);
     return reg;
 }
 
+void
+CacheRegisterAllocator::freeDeadOperandRegisters()
+{
+    // See if any operands are dead so we can reuse their registers. Note that
+    // we skip the input operands, as those are also used by failure paths, and
+    // we currently don't track those uses.
+    for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
+        if (!writer_.operandIsDead(i, currentInstruction_))
+            continue;
+
+        OperandLocation& loc = operandLocations_[i];
+        switch (loc.kind()) {
+          case OperandLocation::PayloadReg:
+            availableRegs_.add(loc.payloadReg());
+            break;
+          case OperandLocation::ValueReg:
+            availableRegs_.add(loc.valueReg());
+            break;
+          case OperandLocation::Uninitialized:
+          case OperandLocation::PayloadStack:
+          case OperandLocation::ValueStack:
+            break;
+        }
+        loc.setUninitialized();
+    }
+}
+
+void
+CacheRegisterAllocator::discardStack(MacroAssembler& masm)
+{
+    // This should only be called when we are no longer using the operands,
+    // as we're discarding everything from the native stack. Set all operand
+    // locations to Uninitialized to catch bugs.
+    for (size_t i = 0; i < operandLocations_.length(); i++)
+        operandLocations_[i].setUninitialized();
+
+    if (stackPushed_ > 0) {
+        masm.addToStackPtr(Imm32(stackPushed_));
+        stackPushed_ = 0;
+    }
+}
+
 Register
 CacheRegisterAllocator::allocateRegister(MacroAssembler& masm)
 {
-    if (availableRegs_.empty()) {
-        // No registers available. See if any operands are dead so we can reuse
-        // their registers. Note that we skip the input operands, as those are
-        // also used by failure paths, and we currently don't track those uses.
-        for (size_t i = writer_.numInputOperands(); i < operandLocations_.length(); i++) {
-            if (!writer_.operandIsDead(i, currentInstruction_))
-                continue;
-
-            OperandLocation& loc = operandLocations_[i];
-            switch (loc.kind()) {
-              case OperandLocation::PayloadReg:
-                availableRegs_.add(loc.payloadReg());
-                break;
-              case OperandLocation::ValueReg:
-                availableRegs_.add(loc.valueReg());
-                break;
-              case OperandLocation::Uninitialized:
-              case OperandLocation::PayloadStack:
-              case OperandLocation::ValueStack:
-                break;
-            }
-            loc.setUninitialized();
-        }
-    }
+    if (availableRegs_.empty())
+        freeDeadOperandRegisters();
 
     if (availableRegs_.empty()) {
         // Still no registers available, try to spill unused operands to
         // the stack.
         for (size_t i = 0; i < operandLocations_.length(); i++) {
             OperandLocation& loc = operandLocations_[i];
             if (loc.kind() == OperandLocation::PayloadReg) {
                 Register reg = loc.payloadReg();
@@ -665,16 +800,61 @@ CacheRegisterAllocator::allocateRegister
     // have to spill some unrelated registers.)
     MOZ_RELEASE_ASSERT(!availableRegs_.empty());
 
     Register reg = availableRegs_.takeAny();
     currentOpRegs_.add(reg);
     return reg;
 }
 
+void
+CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm, Register reg)
+{
+    // Fixed registers should be allocated first, to ensure they're
+    // still available.
+    MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
+
+    freeDeadOperandRegisters();
+
+    if (availableRegs_.has(reg)) {
+        availableRegs_.take(reg);
+        currentOpRegs_.add(reg);
+        return;
+    }
+
+    // The register must be used by some operand. Spill it to the stack.
+    for (size_t i = 0; i < operandLocations_.length(); i++) {
+        OperandLocation& loc = operandLocations_[i];
+        if (loc.kind() == OperandLocation::PayloadReg) {
+            if (loc.payloadReg() != reg)
+                continue;
+
+            masm.push(reg);
+            stackPushed_ += sizeof(uintptr_t);
+            loc.setPayloadStack(stackPushed_, loc.payloadType());
+            currentOpRegs_.add(reg);
+            return;
+        }
+        if (loc.kind() == OperandLocation::ValueReg) {
+            if (!loc.valueReg().aliases(reg))
+                continue;
+
+            masm.pushValue(loc.valueReg());
+            stackPushed_ += sizeof(js::Value);
+            loc.setValueStack(stackPushed_);
+            availableRegs_.add(loc.valueReg());
+            availableRegs_.take(reg);
+            currentOpRegs_.add(reg);
+            return;
+        }
+    }
+
+    MOZ_CRASH("Invalid register");
+}
+
 ValueOperand
 CacheRegisterAllocator::allocateValueRegister(MacroAssembler& masm)
 {
 #ifdef JS_NUNBOX32
     Register reg1 = allocateRegister(masm);
     Register reg2 = allocateRegister(masm);
     return ValueOperand(reg1, reg2);
 #else
@@ -869,16 +1049,88 @@ BaselineCacheIRCompiler::emitLoadDynamic
     masm.load32(stubAddress(reader.stubOffset()), scratch);
     masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
     masm.loadValue(BaseIndex(obj, scratch, TimesOne), R0);
     emitEnterTypeMonitorIC();
     return true;
 }
 
 bool
+BaselineCacheIRCompiler::emitCallScriptedGetterResult()
+{
+    MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
+
+    // We use ICTailCallReg when entering the stub frame, so ensure it's not
+    // used for something else.
+    Maybe<AutoScratchRegister> tail;
+    if (allocator.isAllocatable(ICTailCallReg))
+        tail.emplace(allocator, masm, ICTailCallReg);
+
+    Register obj = allocator.useRegister(masm, reader.objOperandId());
+    Address getterAddr(stubAddress(reader.stubOffset()));
+
+    AutoScratchRegisterExcluding code(allocator, masm, ArgumentsRectifierReg);
+    AutoScratchRegister callee(allocator, masm);
+    AutoScratchRegister scratch(allocator, masm);
+
+    // First, ensure our getter is non-lazy and has JIT code.
+    {
+        FailurePath* failure;
+        if (!addFailurePath(&failure))
+            return false;
+
+        masm.loadPtr(getterAddr, callee);
+        masm.branchIfFunctionHasNoScript(callee, failure->label());
+        masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
+        masm.loadBaselineOrIonRaw(code, code, failure->label());
+    }
+
+    allocator.discardStack(masm);
+
+    // Push a stub frame so that we can perform a non-tail call.
+    enterStubFrame(masm, scratch);
+
+    // Align the stack such that the JitFrameLayout is aligned on
+    // JitStackAlignment.
+    masm.alignJitStackBasedOnNArgs(0);
+
+    // Getter is called with 0 arguments, just |obj| as thisv.
+    // Note that we use Push, not push, so that callJit will align the stack
+    // properly on ARM.
+    masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
+
+    EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+    masm.Push(Imm32(0));  // ActualArgc is 0
+    masm.Push(callee);
+    masm.Push(scratch);
+
+    // Handle arguments underflow.
+    Label noUnderflow;
+    masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
+    masm.branch32(Assembler::Equal, callee, Imm32(0), &noUnderflow);
+    {
+        // Call the arguments rectifier.
+        MOZ_ASSERT(ArgumentsRectifierReg != code);
+
+        JitCode* argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
+        masm.movePtr(ImmGCPtr(argumentsRectifier), code);
+        masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
+        masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
+    }
+
+    masm.bind(&noUnderflow);
+    masm.callJit(code);
+
+    leaveStubFrame(masm, true);
+
+    emitEnterTypeMonitorIC();
+    return true;
+}
+
+bool
 BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     AutoScratchRegister scratch(allocator, masm);
 
     JSValueType fieldType = reader.valueType();
 
     Address fieldOffset(stubAddress(reader.stubOffset()));
@@ -1126,40 +1378,46 @@ CacheIRWriter::copyStubData(uint8_t* des
         MOZ_CRASH();
     }
 }
 
 HashNumber
 CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l)
 {
     HashNumber hash = mozilla::HashBytes(l.code, l.length);
-    return mozilla::AddToHash(hash, uint32_t(l.kind));
+    hash = mozilla::AddToHash(hash, uint32_t(l.kind));
+    hash = mozilla::AddToHash(hash, uint32_t(l.engine));
+    return hash;
 }
 
 bool
 CacheIRStubKey::match(const CacheIRStubKey& entry, const CacheIRStubKey::Lookup& l)
 {
     if (entry.stubInfo->kind() != l.kind)
         return false;
 
+    if (entry.stubInfo->engine() != l.engine)
+        return false;
+
     if (entry.stubInfo->codeLength() != l.length)
         return false;
 
     if (!mozilla::PodEqual(entry.stubInfo->code(), l.code, l.length))
         return false;
 
     return true;
 }
 
 CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
   : CacheIRReader(stubInfo->code(), stubInfo->code() + stubInfo->codeLength())
 {}
 
 CacheIRStubInfo*
-CacheIRStubInfo::New(CacheKind kind, uint32_t stubDataOffset, const CacheIRWriter& writer)
+CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
+                     uint32_t stubDataOffset, const CacheIRWriter& writer)
 {
     size_t numStubFields = writer.numStubFields();
     size_t bytesNeeded = sizeof(CacheIRStubInfo) +
                          writer.codeLength() +
                          (numStubFields + 1); // +1 for the GCType::Limit terminator.
     uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
     if (!p)
         return nullptr;
@@ -1172,23 +1430,25 @@ CacheIRStubInfo::New(CacheKind kind, uin
                   "All StubField::GCTypes must fit in uint8_t");
 
     // Copy the GC types of the stub fields.
     uint8_t* gcTypes = codeStart + writer.codeLength();
     for (size_t i = 0; i < numStubFields; i++)
         gcTypes[i] = uint8_t(writer.stubFieldGCType(i));
     gcTypes[numStubFields] = uint8_t(StubField::GCType::Limit);
 
-    return new(p) CacheIRStubInfo(kind, stubDataOffset, codeStart, writer.codeLength(), gcTypes);
+    return new(p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset, codeStart,
+                                  writer.codeLength(), gcTypes);
 }
 
 static const size_t MaxOptimizedCacheIRStubs = 16;
 
 ICStub*
-jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
+jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
+                               CacheKind kind, ICStubEngine engine, JSScript* outerScript,
                                ICFallbackStub* stub)
 {
     // We shouldn't GC or report OOM (or any other exception) here.
     AutoAssertNoPendingException aanpe(cx);
     JS::AutoCheckCannotGC nogc;
 
     if (writer.failed())
         return nullptr;
@@ -1199,54 +1459,55 @@ jit::AttachBaselineCacheIRStub(JSContext
 
     MOZ_ASSERT(kind == CacheKind::GetProp);
     uint32_t stubDataOffset = sizeof(ICCacheIR_Monitored);
 
     JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
 
     // Check if we already have JitCode for this stub.
     CacheIRStubInfo* stubInfo;
-    CacheIRStubKey::Lookup lookup(kind, writer.codeStart(), writer.codeLength());
+    CacheIRStubKey::Lookup lookup(kind, engine, writer.codeStart(), writer.codeLength());
     JitCode* code = jitCompartment->getCacheIRStubCode(lookup, &stubInfo);
     if (!code) {
         // We have to generate stub code.
         JitContext jctx(cx, nullptr);
-        BaselineCacheIRCompiler comp(cx, writer, stubDataOffset);
+        BaselineCacheIRCompiler comp(cx, writer, engine, stubDataOffset);
         if (!comp.init(kind))
             return nullptr;
 
         code = comp.compile();
         if (!code)
             return nullptr;
 
         // Allocate the shared CacheIRStubInfo. Note that the putCacheIRStubCode
         // call below will transfer ownership to the stub code HashMap, so we
         // don't have to worry about freeing it below.
         MOZ_ASSERT(!stubInfo);
-        stubInfo = CacheIRStubInfo::New(kind, stubDataOffset, writer);
+        stubInfo = CacheIRStubInfo::New(kind, engine, comp.makesGCCalls(), stubDataOffset, writer);
         if (!stubInfo)
             return nullptr;
 
         CacheIRStubKey key(stubInfo);
         if (!jitCompartment->putCacheIRStubCode(lookup, key, code))
             return nullptr;
     }
 
     // We got our shared stub code and stub info. Time to allocate and attach a
     // new stub.
 
     MOZ_ASSERT(code);
     MOZ_ASSERT(stubInfo);
     MOZ_ASSERT(stub->isMonitoredFallback());
+    MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
 
-    size_t bytesNeeded = stubInfo->stubDataOffset() + writer.stubDataSize();
+    size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
 
-    // For now, no stubs can make calls so they are all allocated in the
-    // optimized stub space.
-    void* newStub = cx->zone()->jitZone()->optimizedStubSpace()->alloc(bytesNeeded);
+    ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(stubInfo->makesGCCalls(),
+                                                              outerScript, engine);
+    void* newStub = stubSpace->alloc(bytesNeeded);
     if (!newStub)
         return nullptr;
 
     ICStub* monitorStub = stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
     new(newStub) ICCacheIR_Monitored(code, monitorStub, stubInfo);
 
     writer.copyStubData((uint8_t*)newStub + stubInfo->stubDataOffset());
     stub->addNewStub((ICStub*)newStub);
@@ -1276,8 +1537,75 @@ jit::TraceBaselineCacheIRStub(JSTracer* 
           case StubField::GCType::Limit:
             return; // Done.
           default:
             MOZ_CRASH();
         }
         field++;
     }
 }
+
+size_t
+CacheIRStubInfo::stubDataSize() const
+{
+    size_t field = 0;
+    size_t size = 0;
+    while (true) {
+        switch (gcType(field++)) {
+          case StubField::GCType::NoGCThing:
+          case StubField::GCType::Shape:
+          case StubField::GCType::ObjectGroup:
+          case StubField::GCType::JSObject:
+            size += sizeof(uintptr_t);
+            continue;
+          case StubField::GCType::Limit:
+            return size;
+        }
+        MOZ_CRASH("unreachable");
+    }
+}
+
+void
+CacheIRStubInfo::copyStubData(ICStub* src, ICStub* dest) const
+{
+    uintptr_t* srcWords = reinterpret_cast<uintptr_t*>(src);
+    uintptr_t* destWords = reinterpret_cast<uintptr_t*>(dest);
+
+    size_t field = 0;
+    while (true) {
+        switch (gcType(field)) {
+          case StubField::GCType::NoGCThing:
+            destWords[field] = srcWords[field];
+            break;
+          case StubField::GCType::Shape:
+            getStubField<Shape*>(dest, field).init(getStubField<Shape*>(src, field));
+            break;
+          case StubField::GCType::JSObject:
+            getStubField<JSObject*>(dest, field).init(getStubField<JSObject*>(src, field));
+            break;
+          case StubField::GCType::ObjectGroup:
+            getStubField<ObjectGroup*>(dest, field).init(getStubField<ObjectGroup*>(src, field));
+            break;
+          case StubField::GCType::Limit:
+            return; // Done.
+        }
+        field++;
+    }
+}
+
+
+/* static */ ICCacheIR_Monitored*
+ICCacheIR_Monitored::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+                           ICCacheIR_Monitored& other)
+{
+    const CacheIRStubInfo* stubInfo = other.stubInfo();
+    MOZ_ASSERT(stubInfo->makesGCCalls());
+
+    size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
+    void* newStub = space->alloc(bytesNeeded);
+    if (!newStub)
+        return nullptr;
+
+    ICCacheIR_Monitored* res = new(newStub) ICCacheIR_Monitored(other.jitCode(), firstMonitorStub,
+                                                                stubInfo);
+    stubInfo->copyStubData(&other, res);
+    return res;
+}
--- a/js/src/jit/BaselineCacheIR.h
+++ b/js/src/jit/BaselineCacheIR.h
@@ -15,53 +15,70 @@ namespace jit {
 
 class ICFallbackStub;
 class ICStub;
 
 // See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
 // of this class.
 class CacheIRStubInfo
 {
-    CacheKind kind_;
+    // These fields don't require 8 bits, but GCC complains if these fields are
+    // smaller than the size of the enums.
+    CacheKind kind_ : 8;
+    ICStubEngine engine_ : 8;
+    bool makesGCCalls_ : 1;
     uint8_t stubDataOffset_;
+
     const uint8_t* code_;
     uint32_t length_;
     const uint8_t* gcTypes_;
 
-    CacheIRStubInfo(CacheKind kind, uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
+    CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
+                    uint32_t stubDataOffset, const uint8_t* code, uint32_t codeLength,
                     const uint8_t* gcTypes)
       : kind_(kind),
+        engine_(engine),
+        makesGCCalls_(makesGCCalls),
         stubDataOffset_(stubDataOffset),
         code_(code),
         length_(codeLength),
         gcTypes_(gcTypes)
     {
+        MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
+        MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
         MOZ_ASSERT(stubDataOffset_ == stubDataOffset, "stubDataOffset must fit in uint8_t");
     }
 
     CacheIRStubInfo(const CacheIRStubInfo&) = delete;
     CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
 
   public:
     CacheKind kind() const { return kind_; }
+    ICStubEngine engine() const { return engine_; }
+    bool makesGCCalls() const { return makesGCCalls_; }
 
     const uint8_t* code() const { return code_; }
     uint32_t codeLength() const { return length_; }
     uint32_t stubDataOffset() const { return stubDataOffset_; }
 
+    size_t stubDataSize() const;
+
     StubField::GCType gcType(uint32_t i) const { return (StubField::GCType)gcTypes_[i]; }
 
-    static CacheIRStubInfo* New(CacheKind kind, uint32_t stubDataOffset,
-                                const CacheIRWriter& writer);
+    static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine, bool canMakeCalls,
+                                uint32_t stubDataOffset, const CacheIRWriter& writer);
 
     template <class T>
     js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const;
+
+    void copyStubData(ICStub* src, ICStub* dest) const;
 };
 
 void TraceBaselineCacheIRStub(JSTracer* trc, ICStub* stub, const CacheIRStubInfo* stubInfo);
 
-ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer, CacheKind kind,
+ICStub* AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
+                                  CacheKind kind, ICStubEngine engine, JSScript* outerScript,
                                   ICFallbackStub* stub);
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_BaselineCacheIR_h */
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -685,28 +685,28 @@ RecompileBaselineScriptForDebugMode(JSCo
 
     // Don't destroy the old baseline script yet, since if we fail any of the
     // recompiles we need to rollback all the old baseline scripts.
     MOZ_ASSERT(script->baselineScript()->hasDebugInstrumentation() == observing);
     return true;
 }
 
 #define PATCHABLE_ICSTUB_KIND_LIST(_)           \
+    _(CacheIR_Monitored)                        \
     _(Call_Scripted)                            \
     _(Call_AnyScripted)                         \
     _(Call_Native)                              \
     _(Call_ClassHook)                           \
     _(Call_ScriptedApplyArray)                  \
     _(Call_ScriptedApplyArguments)              \
     _(Call_ScriptedFunCall)                     \
     _(GetElem_NativePrototypeCallNativeName)    \
     _(GetElem_NativePrototypeCallNativeSymbol)  \
     _(GetElem_NativePrototypeCallScriptedName)  \
     _(GetElem_NativePrototypeCallScriptedSymbol) \
-    _(GetProp_CallScripted)                     \
     _(GetProp_CallNative)                       \
     _(GetProp_CallNativeGlobal)                 \
     _(GetProp_CallDOMProxyNative)               \
     _(GetProp_CallDOMProxyWithGenerationNative) \
     _(GetProp_DOMProxyShadowed)                 \
     _(GetProp_Generic)                          \
     _(SetProp_CallScripted)                     \
     _(SetProp_CallNative)
@@ -714,17 +714,17 @@ RecompileBaselineScriptForDebugMode(JSCo
 static bool
 CloneOldBaselineStub(JSContext* cx, DebugModeOSREntryVector& entries, size_t entryIndex)
 {
     DebugModeOSREntry& entry = entries[entryIndex];
     if (!entry.oldStub)
         return true;
 
     ICStub* oldStub = entry.oldStub;
-    MOZ_ASSERT(ICStub::CanMakeCalls(oldStub->kind()));
+    MOZ_ASSERT(oldStub->makesGCCalls());
 
     if (entry.frameKind == ICEntry::Kind_Invalid) {
         // The exception handler can modify the frame's override pc while
         // unwinding scopes. This is fine, but if we have a stub frame, the code
         // code below will get confused: the entry's pcOffset doesn't match the
         // stub that's still on the stack. To prevent that, we just set the new
         // stub to nullptr as we will never return to this stub frame anyway.
         entry.newStub = nullptr;
@@ -758,17 +758,17 @@ CloneOldBaselineStub(JSContext* cx, Debu
     // the new fallback stub if so.
     ICStub* firstMonitorStub;
     if (fallbackStub->isMonitoredFallback()) {
         ICMonitoredFallbackStub* monitored = fallbackStub->toMonitoredFallbackStub();
         firstMonitorStub = monitored->fallbackMonitorStub()->firstMonitorStub();
     } else {
         firstMonitorStub = nullptr;
     }
-    ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForKind(oldStub->kind(), entry.script,
+    ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(oldStub->makesGCCalls(), entry.script,
                                                               ICStubCompiler::Engine::Baseline);
 
     // Clone the existing stub into the recompiled IC.
     //
     // Note that since JitCode is a GC thing, cloning an ICStub with the same
     // JitCode ensures it won't be collected.
     switch (oldStub->kind()) {
 #define CASE_KIND(kindName)                                                  \
--- a/js/src/jit/BaselineInspector.cpp
+++ b/js/src/jit/BaselineInspector.cpp
@@ -691,35 +691,170 @@ GlobalShapeForGetPropFunction(ICStub* st
         Shape* shape = nstub->globalShape();
         MOZ_ASSERT(shape->getObjectClass()->flags & JSCLASS_IS_GLOBAL);
         return shape;
     }
 
     return nullptr;
 }
 
+static bool
+MatchCacheIRReceiverGuard(CacheIRReader& reader, ICCacheIR_Monitored* stub, ObjOperandId objId,
+                          ReceiverGuard* receiver)
+{
+    // This matches the CacheIR emitted in TestMatchingReceiver.
+    //
+    // Either:
+    //
+    //   GuardShape objId
+    //
+    // or:
+    //
+    //   GuardGroup objId
+    //   [GuardNoUnboxedExpando objId]
+    //
+    // or:
+    //
+    //   GuardGroup objId
+    //   expandoId: GuardAndLoadUnboxedExpando
+    //   GuardShape expandoId
+
+    *receiver = ReceiverGuard();
+
+    if (reader.matchOp(CacheOp::GuardShape, objId)) {
+        // The first case.
+        receiver->shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
+        return true;
+    }
+
+    if (!reader.matchOp(CacheOp::GuardGroup, objId))
+        return false;
+    receiver->group = stub->stubInfo()->getStubField<ObjectGroup*>(stub, reader.stubOffset());
+
+    if (!reader.matchOp(CacheOp::GuardAndLoadUnboxedExpando, objId)) {
+        // Second case, just a group guard.
+        reader.matchOp(CacheOp::GuardNoUnboxedExpando, objId);
+        return true;
+    }
+
+    // Third case.
+    ObjOperandId expandoId = reader.objOperandId();
+    if (!reader.matchOp(CacheOp::GuardShape, expandoId))
+        return false;
+
+    receiver->shape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
+    return true;
+}
+
+static bool
+AddCacheIRGetPropFunction(ICCacheIR_Monitored* stub, JSObject** holder, Shape** holderShape,
+                          JSFunction** commonGetter, Shape** globalShape, bool* isOwnProperty,
+                          BaselineInspector::ReceiverVector& receivers,
+                          BaselineInspector::ObjectGroupVector& convertUnboxedGroups)
+{
+    // We match either an own getter:
+    //
+    //   GuardIsObject objId
+    //   <GuardReceiver objId>
+    //   CallScriptedGetterResult objId
+    //
+    // Or a getter on the prototype:
+    //
+    //   GuardIsObject objId
+    //   <GuardReceiver objId>
+    //   LoadObject holderId
+    //   GuardShape holderId
+    //   CallScriptedGetterResult objId
+
+    CacheIRReader reader(stub->stubInfo());
+
+    ObjOperandId objId = ObjOperandId(0);
+    if (!reader.matchOp(CacheOp::GuardIsObject, objId))
+        return false;
+
+    ReceiverGuard receiver;
+    if (!MatchCacheIRReceiverGuard(reader, stub, objId, &receiver))
+        return false;
+
+    if (reader.matchOp(CacheOp::CallScriptedGetterResult, objId)) {
+        // This is an own property getter, the first case.
+        MOZ_ASSERT(receiver.shape);
+        MOZ_ASSERT(!receiver.group);
+
+        size_t offset = reader.stubOffset();
+        JSFunction* getter =
+            &stub->stubInfo()->getStubField<JSObject*>(stub, offset)->as<JSFunction>();
+
+        if (*commonGetter && (!*isOwnProperty || *globalShape || *holderShape != receiver.shape))
+            return false;
+
+        MOZ_ASSERT_IF(*commonGetter, *commonGetter == getter);
+        *holder = nullptr;
+        *holderShape = receiver.shape;
+        *commonGetter = getter;
+        *isOwnProperty = true;
+        return true;
+    }
+
+    if (!reader.matchOp(CacheOp::LoadObject))
+        return false;
+    ObjOperandId holderId = reader.objOperandId();
+    JSObject* obj = stub->stubInfo()->getStubField<JSObject*>(stub, reader.stubOffset());
+
+    if (!reader.matchOp(CacheOp::GuardShape, holderId))
+        return false;
+    Shape* objShape = stub->stubInfo()->getStubField<Shape*>(stub, reader.stubOffset());
+
+    if (!reader.matchOp(CacheOp::CallScriptedGetterResult, objId))
+        return false;
+
+    // A getter on the prototype.
+    size_t offset = reader.stubOffset();
+    JSFunction* getter =
+        &stub->stubInfo()->getStubField<JSObject*>(stub, offset)->as<JSFunction>();
+
+    if (*commonGetter && (*isOwnProperty || *globalShape || *holderShape != objShape))
+        return false;
+
+    MOZ_ASSERT_IF(*commonGetter, *commonGetter == getter);
+
+    if (!AddReceiver(receiver, receivers, convertUnboxedGroups))
+        return false;
+
+    if (obj->as<NativeObject>().lastProperty() != objShape) {
+        // Skip this stub as the shape is no longer correct.
+        return true;
+    }
+
+    *holder = obj;
+    *holderShape = objShape;
+    *commonGetter = getter;
+    *isOwnProperty = false;
+    return true;
+}
+
 bool
 BaselineInspector::commonGetPropFunction(jsbytecode* pc, JSObject** holder, Shape** holderShape,
                                          JSFunction** commonGetter, Shape** globalShape,
                                          bool* isOwnProperty,
                                          ReceiverVector& receivers,
                                          ObjectGroupVector& convertUnboxedGroups)
 {
     if (!hasBaselineScript())
         return false;
 
     MOZ_ASSERT(receivers.empty());
     MOZ_ASSERT(convertUnboxedGroups.empty());
 
+    *globalShape = nullptr;
     *commonGetter = nullptr;
     const ICEntry& entry = icEntryFromPC(pc);
 
     for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
-        if (stub->isGetProp_CallScripted() ||
-            stub->isGetProp_CallNative() ||
+        if (stub->isGetProp_CallNative() ||
             stub->isGetProp_CallNativeGlobal())
         {
             ICGetPropCallGetter* nstub = static_cast<ICGetPropCallGetter*>(stub);
             bool isOwn = nstub->isOwnGetter();
             if (!isOwn && !AddReceiver(nstub->receiverGuard(), receivers, convertUnboxedGroups))
                 return false;
 
             if (!*commonGetter) {
@@ -731,16 +866,23 @@ BaselineInspector::commonGetPropFunction
             } else if (nstub->holderShape() != *holderShape ||
                        GlobalShapeForGetPropFunction(nstub) != *globalShape ||
                        isOwn != *isOwnProperty)
             {
                 return false;
             } else {
                 MOZ_ASSERT(*commonGetter == nstub->getter());
             }
+        } else if (stub->isCacheIR_Monitored()) {
+            if (!AddCacheIRGetPropFunction(stub->toCacheIR_Monitored(), holder, holderShape,
+                                           commonGetter, globalShape, isOwnProperty, receivers,
+                                           convertUnboxedGroups))
+            {
+                return false;
+            }
         } else if (stub->isGetProp_Fallback()) {
             // If we have an unoptimizable access, don't try to optimize.
             if (stub->toGetProp_Fallback()->hadUnoptimizableAccess())
                 return false;
         } else if (stub->isGetName_Fallback()) {
             if (stub->toGetName_Fallback()->hadUnoptimizableAccess())
                 return false;
         } else {
@@ -842,17 +984,16 @@ BaselineInspector::expectedPropertyAcces
           case ICStub::GetProp_Generic:
             return MIRType::Value;
 
           case ICStub::GetProp_ArgumentsLength:
           case ICStub::GetElem_Arguments:
             // Either an object or magic arguments.
             return MIRType::Value;
 
-          case ICStub::GetProp_CallScripted:
           case ICStub::GetProp_CallNative:
           case ICStub::GetProp_CallDOMProxyNative:
           case ICStub::GetProp_CallDOMProxyWithGenerationNative:
           case ICStub::GetProp_DOMProxyShadowed:
           case ICStub::GetElem_NativeSlotName:
           case ICStub::GetElem_NativeSlotSymbol:
           case ICStub::GetElem_NativePrototypeSlotName:
           case ICStub::GetElem_NativePrototypeSlotSymbol:
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -13,23 +13,27 @@
 
 #include "vm/UnboxedObject-inl.h"
 
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Maybe;
 
-GetPropIRGenerator::GetPropIRGenerator(JSContext* cx, jsbytecode* pc, HandleValue val, HandlePropertyName name,
+GetPropIRGenerator::GetPropIRGenerator(JSContext* cx, jsbytecode* pc, ICStubEngine engine,
+                                       bool* isTemporarilyUnoptimizable,
+                                       HandleValue val, HandlePropertyName name,
                                        MutableHandleValue res)
   : cx_(cx),
     pc_(pc),
     val_(val),
     name_(name),
     res_(res),
+    engine_(engine),
+    isTemporarilyUnoptimizable_(isTemporarilyUnoptimizable),
     emitted_(false),
     preliminaryObjectAction_(PreliminaryObjectAction::None)
 {}
 
 static void
 EmitLoadSlotResult(CacheIRWriter& writer, ObjOperandId holderOp, NativeObject* holder,
                    Shape* shape)
 {
@@ -91,22 +95,23 @@ IsCacheableNoProperty(JSContext* cx, JSO
         return false;
 
     return CheckHasNoSuchProperty(cx, obj, JSID_TO_ATOM(id)->asPropertyName());
 }
 
 enum NativeGetPropCacheability {
     CanAttachNone,
     CanAttachReadSlot,
+    CanAttachCallGetter,
 };
 
 static NativeGetPropCacheability
 CanAttachNativeGetProp(JSContext* cx, HandleObject obj, HandleId id,
                        MutableHandleNativeObject holder, MutableHandleShape shape,
-                       jsbytecode* pc, bool skipArrayLen = false)
+                       jsbytecode* pc, ICStubEngine engine, bool* isTemporarilyUnoptimizable)
 {
     MOZ_ASSERT(JSID_IS_STRING(id) || JSID_IS_SYMBOL(id));
 
     // The lookup needs to be universally pure, otherwise we risk calling hooks out
     // of turn. We don't mind doing this even when purity isn't required, because we
     // only miss out on shape hashification, which is only a temporary perf cost.
     // The limits were arbitrarily set, anyways.
     JSObject* baseHolder = nullptr;
@@ -121,16 +126,22 @@ CanAttachNativeGetProp(JSContext* cx, Ha
     }
 
     if (IsCacheableGetPropReadSlotForIonOrCacheIR(obj, holder, shape) ||
         IsCacheableNoProperty(cx, obj, holder, shape, id, pc))
     {
         return CanAttachReadSlot;
     }
 
+    if (IsCacheableGetPropCallScripted(obj, holder, shape, isTemporarilyUnoptimizable)) {
+        // See bug 1226816.
+        if (engine == ICStubEngine::Baseline)
+            return CanAttachCallGetter;
+    }
+
     return CanAttachNone;
 }
 
 static void
 GeneratePrototypeGuards(CacheIRWriter& writer, JSObject* obj, JSObject* holder, ObjOperandId objId)
 {
     // The guards here protect against the effects of JSObject::swap(). If the
     // prototype chain is directly altered, then TI will toss the jitcode, so we
@@ -224,26 +235,49 @@ EmitReadSlotResult(CacheIRWriter& writer
         MOZ_ASSERT(holderId.valid());
         EmitLoadSlotResult(writer, holderId, &holder->as<NativeObject>(), shape);
     } else {
         MOZ_ASSERT(!holderId.valid());
         writer.loadUndefinedResult();
     }
 }
 
+static void
+EmitCallGetterResult(CacheIRWriter& writer, JSObject* obj, JSObject* holder,
+                     Shape* shape, ObjOperandId objId)
+{
+    Maybe<ObjOperandId> expandoId;
+    TestMatchingReceiver(writer, obj, shape, objId, &expandoId);
+
+    if (obj != holder) {
+        GeneratePrototypeGuards(writer, obj, holder, objId);
+
+        // Guard on the holder's shape.
+        ObjOperandId holderId = writer.loadObject(holder);
+        writer.guardShape(holderId, holder->as<NativeObject>().lastProperty());
+    }
+
+    MOZ_ASSERT(IsCacheableGetPropCallScripted(obj, holder, shape));
+
+    JSFunction* target = &shape->getterValue().toObject().as<JSFunction>();
+    MOZ_ASSERT(target->hasJITCode());
+    writer.callScriptedGetterResult(objId, target);
+}
+
 bool
 GetPropIRGenerator::tryAttachNative(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId)
 {
     MOZ_ASSERT(!emitted_);
 
     RootedShape shape(cx_);
     RootedNativeObject holder(cx_);
 
     RootedId id(cx_, NameToId(name_));
-    NativeGetPropCacheability type = CanAttachNativeGetProp(cx_, obj, id, &holder, &shape, pc_);
+    NativeGetPropCacheability type = CanAttachNativeGetProp(cx_, obj, id, &holder, &shape, pc_,
+                                                            engine_, isTemporarilyUnoptimizable_);
     if (type == CanAttachNone)
         return true;
 
     emitted_ = true;
 
     switch (type) {
       case CanAttachReadSlot:
         if (holder) {
@@ -253,16 +287,19 @@ GetPropIRGenerator::tryAttachNative(Cach
                 if (IsPreliminaryObject(obj))
                     preliminaryObjectAction_ = PreliminaryObjectAction::NotePreliminary;
                 else
                     preliminaryObjectAction_ = PreliminaryObjectAction::Unlink;
             }
         }
         EmitReadSlotResult(writer, obj, holder, shape, objId);
         break;
+      case CanAttachCallGetter:
+        EmitCallGetterResult(writer, obj, holder, shape, objId);
+        break;
       default:
         MOZ_CRASH("Bad NativeGetPropCacheability");
     }
 
     return true;
 }
 
 bool
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -86,23 +86,26 @@ class ObjOperandId : public OperandId
     _(GuardProto)                         \
     _(GuardClass)                         \
     _(GuardSpecificObject)                \
     _(GuardNoDetachedTypedObjects)        \
     _(GuardNoUnboxedExpando)              \
     _(GuardAndLoadUnboxedExpando)         \
     _(LoadObject)                         \
     _(LoadProto)                          \
+                                          \
+    /* The *Result ops load a value into the cache's result register. */ \
     _(LoadFixedSlotResult)                \
     _(LoadDynamicSlotResult)              \
     _(LoadUnboxedPropertyResult)          \
     _(LoadTypedObjectResult)              \
     _(LoadInt32ArrayLengthResult)         \
     _(LoadUnboxedArrayLengthResult)       \
     _(LoadArgumentsObjectLengthResult)    \
+    _(CallScriptedGetterResult)           \
     _(LoadUndefinedResult)
 
 enum class CacheOp {
 #define DEFINE_OP(op) op,
     CACHE_IR_OPS(DEFINE_OP)
 #undef DEFINE_OP
 };
 
@@ -328,16 +331,20 @@ class MOZ_RAII CacheIRWriter
         writeOpWithOperandId(CacheOp::LoadInt32ArrayLengthResult, obj);
     }
     void loadUnboxedArrayLengthResult(ObjOperandId obj) {
         writeOpWithOperandId(CacheOp::LoadUnboxedArrayLengthResult, obj);
     }
     void loadArgumentsObjectLengthResult(ObjOperandId obj) {
         writeOpWithOperandId(CacheOp::LoadArgumentsObjectLengthResult, obj);
     }
+    void callScriptedGetterResult(ObjOperandId obj, JSFunction* getter) {
+        writeOpWithOperandId(CacheOp::CallScriptedGetterResult, obj);
+        addStubWord(uintptr_t(getter), StubField::GCType::JSObject);
+    }
 };
 
 class CacheIRStubInfo;
 
 // Helper class for reading CacheIR bytecode.
 class MOZ_RAII CacheIRReader
 {
     CompactBufferReader buffer_;
@@ -400,16 +407,18 @@ class MOZ_RAII CacheIRReader
 // GetPropIRGenerator generates CacheIR for a GetProp IC.
 class MOZ_RAII GetPropIRGenerator
 {
     JSContext* cx_;
     jsbytecode* pc_;
     HandleValue val_;
     HandlePropertyName name_;
     MutableHandleValue res_;
+    ICStubEngine engine_;
+    bool* isTemporarilyUnoptimizable_;
     bool emitted_;
 
     enum class PreliminaryObjectAction { None, Unlink, NotePreliminary };
     PreliminaryObjectAction preliminaryObjectAction_;
 
     MOZ_MUST_USE bool tryAttachNative(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId);
     MOZ_MUST_USE bool tryAttachUnboxed(CacheIRWriter& writer, HandleObject obj, ObjOperandId objId);
     MOZ_MUST_USE bool tryAttachUnboxedExpando(CacheIRWriter& writer, HandleObject obj,
@@ -422,32 +431,33 @@ class MOZ_RAII GetPropIRGenerator
                                                ObjOperandId objId);
 
     MOZ_MUST_USE bool tryAttachPrimitive(CacheIRWriter& writer, ValOperandId valId);
 
     GetPropIRGenerator(const GetPropIRGenerator&) = delete;
     GetPropIRGenerator& operator=(const GetPropIRGenerator&) = delete;
 
   public:
-    GetPropIRGenerator(JSContext* cx, jsbytecode* pc, HandleValue val, HandlePropertyName name,
-                       MutableHandleValue res);
+    GetPropIRGenerator(JSContext* cx, jsbytecode* pc, ICStubEngine engine,
+                       bool* isTemporarilyUnoptimizable,
+                       HandleValue val, HandlePropertyName name, MutableHandleValue res);
 
     bool emitted() const { return emitted_; }
 
     MOZ_MUST_USE bool tryAttachStub(mozilla::Maybe<CacheIRWriter>& writer);
 
     bool shouldUnlinkPreliminaryObjectStubs() const {
         return preliminaryObjectAction_ == PreliminaryObjectAction::Unlink;
     }
     bool shouldNotePreliminaryObjectStub() const {
         return preliminaryObjectAction_ == PreliminaryObjectAction::NotePreliminary;
     }
 };
 
-enum class CacheKind
+enum class CacheKind : uint8_t
 {
     GetProp
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_CacheIR_h */
--- a/js/src/jit/IonCaches.cpp
+++ b/js/src/jit/IonCaches.cpp
@@ -577,34 +577,41 @@ IsCacheableGetPropCallNative(JSObject* o
         return true;
 
     // For getters that need the WindowProxy (instead of the Window) as this
     // object, don't cache if obj is the Window, since our cache will pass that
     // instead of the WindowProxy.
     return !IsWindow(obj);
 }
 
-static bool
-IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape)
+bool
+jit::IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape,
+                                    bool* isTemporarilyUnoptimizable)
 {
     if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
         return false;
 
     if (!shape->hasGetterValue() || !shape->getterValue().isObject())
         return false;
 
     if (!shape->getterValue().toObject().is<JSFunction>())
         return false;
 
-    JSFunction& getter = shape->getterValue().toObject().as<JSFunction>();
-    if (!getter.hasJITCode())
+    // See IsCacheableGetPropCallNative.
+    if (IsWindow(obj))
         return false;
 
-    // See IsCacheableGetPropCallNative.
-    return !IsWindow(obj);
+    JSFunction& getter = shape->getterValue().toObject().as<JSFunction>();
+    if (!getter.hasJITCode()) {
+        if (isTemporarilyUnoptimizable)
+            *isTemporarilyUnoptimizable = true;
+        return false;
+    }
+
+    return true;
 }
 
 static bool
 IsCacheableGetPropCallPropertyOp(JSObject* obj, JSObject* holder, Shape* shape)
 {
     if (!shape || !IsCacheableProtoChainForIonOrCacheIR(obj, holder))
         return false;
 
--- a/js/src/jit/IonCaches.h
+++ b/js/src/jit/IonCaches.h
@@ -837,12 +837,15 @@ class NameIC : public IonCache
         return *static_cast<const ickind##IC*>(this);                  \
     }
 IONCACHE_KIND_LIST(CACHE_CASTS)
 #undef OPCODE_CASTS
 
 bool IsCacheableProtoChainForIonOrCacheIR(JSObject* obj, JSObject* holder);
 bool IsCacheableGetPropReadSlotForIonOrCacheIR(JSObject* obj, JSObject* holder, Shape* shape);
 
+bool IsCacheableGetPropCallScripted(JSObject* obj, JSObject* holder, Shape* shape,
+                                    bool* isTemporarilyUnoptimizable = nullptr);
+
 } // namespace jit
 } // namespace js
 
 #endif /* jit_IonCaches_h */
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -386,27 +386,33 @@ class JitZone
     OptimizedICStubSpace optimizedStubSpace_;
 
   public:
     OptimizedICStubSpace* optimizedStubSpace() {
         return &optimizedStubSpace_;
     }
 };
 
-enum class CacheKind;
+enum class CacheKind : uint8_t;
 class CacheIRStubInfo;
 
+enum class ICStubEngine : uint8_t {
+    Baseline = 0,
+    IonMonkey
+};
+
 struct CacheIRStubKey : public DefaultHasher<CacheIRStubKey> {
     struct Lookup {
         CacheKind kind;
+        ICStubEngine engine;
         const uint8_t* code;
         uint32_t length;
 
-        Lookup(CacheKind kind, const uint8_t* code, uint32_t length)
-          : kind(kind), code(code), length(length)
+        Lookup(CacheKind kind, ICStubEngine engine, const uint8_t* code, uint32_t length)
+          : kind(kind), engine(engine), code(code), length(length)
         {}
     };
 
     static HashNumber hash(const Lookup& l);
     static bool match(const CacheIRStubKey& entry, const Lookup& l);
 
     UniquePtr<CacheIRStubInfo, JS::FreePolicy> stubInfo;
 
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -1170,17 +1170,17 @@ MarkJitStubFrame(JSTracer* trc, const Ji
 {
     // Mark the ICStub pointer stored in the stub frame. This is necessary
     // so that we don't destroy the stub code after unlinking the stub.
 
     MOZ_ASSERT(frame.type() == JitFrame_IonStub || frame.type() == JitFrame_BaselineStub);
     JitStubFrameLayout* layout = (JitStubFrameLayout*)frame.fp();
 
     if (ICStub* stub = layout->maybeStubPtr()) {
-        MOZ_ASSERT(ICStub::CanMakeCalls(stub->kind()));
+        MOZ_ASSERT(stub->makesGCCalls());
         stub->trace(trc);
     }
 }
 
 static void
 MarkIonAccessorICFrame(JSTracer* trc, const JitFrameIterator& frame)
 {
     MOZ_ASSERT(frame.type() == JitFrame_IonAccessorIC);
--- a/js/src/jit/SharedIC.cpp
+++ b/js/src/jit/SharedIC.cpp
@@ -153,16 +153,69 @@ ICStubIterator::unlink(JSContext* cx)
     MOZ_ASSERT(!unlinked_);
 
     fallbackStub_->unlinkStub(cx->zone(), previousStub_, currentStub_);
 
     // Mark the current iterator position as unlinked, so operator++ works properly.
     unlinked_ = true;
 }
 
+/* static */ bool
+ICStub::NonCacheIRStubMakesGCCalls(Kind kind)
+{
+    MOZ_ASSERT(IsValidKind(kind));
+    MOZ_ASSERT(!IsCacheIRKind(kind));
+
+    switch (kind) {
+      case Call_Fallback:
+      case Call_Scripted:
+      case Call_AnyScripted:
+      case Call_Native:
+      case Call_ClassHook:
+      case Call_ScriptedApplyArray:
+      case Call_ScriptedApplyArguments:
+      case Call_ScriptedFunCall:
+      case Call_StringSplit:
+      case WarmUpCounter_Fallback:
+      case GetElem_NativeSlotName:
+      case GetElem_NativeSlotSymbol:
+      case GetElem_NativePrototypeSlotName:
+      case GetElem_NativePrototypeSlotSymbol:
+      case GetElem_NativePrototypeCallNativeName:
+      case GetElem_NativePrototypeCallNativeSymbol:
+      case GetElem_NativePrototypeCallScriptedName:
+      case GetElem_NativePrototypeCallScriptedSymbol:
+      case GetElem_UnboxedPropertyName:
+      case GetProp_CallNative:
+      case GetProp_CallNativeGlobal:
+      case GetProp_CallDOMProxyNative:
+      case GetProp_CallDOMProxyWithGenerationNative:
+      case GetProp_DOMProxyShadowed:
+      case GetProp_Generic:
+      case SetProp_CallScripted:
+      case SetProp_CallNative:
+      case RetSub_Fallback:
+      // These two fallback stubs don't actually make non-tail calls,
+      // but the fallback code for the bailout path needs to pop the stub frame
+      // pushed during the bailout.
+      case GetProp_Fallback:
+      case SetProp_Fallback:
+        return true;
+      default:
+        return false;
+    }
+}
+
+bool
+ICStub::makesGCCalls() const
+{
+    if (isCacheIR_Monitored())
+        return toCacheIR_Monitored()->stubInfo()->makesGCCalls();
+    return NonCacheIRStubMakesGCCalls(kind());
+}
 
 void
 ICStub::markCode(JSTracer* trc, const char* name)
 {
     JitCode* stubJitCode = jitCode();
     TraceManuallyBarrieredEdge(trc, &stubJitCode, name);
 }
 
@@ -429,24 +482,16 @@ ICStub::trace(JSTracer* trc)
         break;
       }
       case ICStub::GetProp_DOMProxyShadowed: {
         ICGetProp_DOMProxyShadowed* propStub = toGetProp_DOMProxyShadowed();
         TraceEdge(trc, &propStub->shape(), "baseline-getproplistbaseshadowed-stub-shape");
         TraceEdge(trc, &propStub->name(), "baseline-getproplistbaseshadowed-stub-name");
         break;
       }
-      case ICStub::GetProp_CallScripted: {
-        ICGetProp_CallScripted* callStub = toGetProp_CallScripted();
-        callStub->receiverGuard().trace(trc);
-        TraceEdge(trc, &callStub->holder(), "baseline-getpropcallscripted-stub-holder");
-        TraceEdge(trc, &callStub->holderShape(), "baseline-getpropcallscripted-stub-holdershape");
-        TraceEdge(trc, &callStub->getter(), "baseline-getpropcallscripted-stub-getter");
-        break;
-      }
       case ICStub::GetProp_CallNative: {
         ICGetProp_CallNative* callStub = toGetProp_CallNative();
         callStub->receiverGuard().trace(trc);
         TraceEdge(trc, &callStub->holder(), "baseline-getpropcallnative-stub-holder");
         TraceEdge(trc, &callStub->holderShape(), "baseline-getpropcallnative-stub-holdershape");
         TraceEdge(trc, &callStub->getter(), "baseline-getpropcallnative-stub-getter");
         break;
       }
@@ -565,31 +610,31 @@ ICFallbackStub::unlinkStub(Zone* zone, I
     numOptimizedStubs_--;
 
     if (zone->needsIncrementalBarrier()) {
         // We are removing edges from ICStub to gcthings. Perform one final trace
         // of the stub for incremental GC, as it must know about those edges.
         stub->trace(zone->barrierTracer());
     }
 
-    if (ICStub::CanMakeCalls(stub->kind()) && stub->isMonitored()) {
+    if (stub->makesGCCalls() && stub->isMonitored()) {
         // This stub can make calls so we can return to it if it's on the stack.
         // We just have to reset its firstMonitorStub_ field to avoid a stale
         // pointer when purgeOptimizedStubs destroys all optimized monitor
         // stubs (unlinked stubs won't be updated).
         ICTypeMonitor_Fallback* monitorFallback = toMonitoredFallbackStub()->fallbackMonitorStub();
         stub->toMonitoredStub()->resetFirstMonitorStub(monitorFallback);
     }
 
 #ifdef DEBUG
     // Poison stub code to ensure we don't call this stub again. However, if this
     // stub can make calls, a pointer to it may be stored in a stub frame on the
     // stack, so we can't touch the stubCode_ or GC will crash when marking this
     // pointer.
-    if (!ICStub::CanMakeCalls(stub->kind()))
+    if (!stub->makesGCCalls())
         stub->stubCode_ = (uint8_t*)0xbad;
 #endif
 }
 
 void
 ICFallbackStub::unlinkStubsWithKind(JSContext* cx, ICStub::Kind kind)
 {
     for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) {
@@ -715,17 +760,17 @@ ICStubCompiler::getStubCode()
     // Cache newly compiled stubcode.
     if (!comp->putStubCode(cx, stubKey, newStubCode))
         return nullptr;
 
     // After generating code, run postGenerateStubCode().  We must not fail
     // after this point.
     postGenerateStubCode(masm, newStubCode);
 
-    MOZ_ASSERT(entersStubFrame_ == ICStub::CanMakeCalls(kind));
+    MOZ_ASSERT(entersStubFrame_ == ICStub::NonCacheIRStubMakesGCCalls(kind));
     MOZ_ASSERT(!inStubFrame_);
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(newStubCode, "BaselineIC");
 #endif
 
     return newStubCode;
 }
@@ -2340,18 +2385,17 @@ IsCacheableGetPropCall(JSContext* cx, JS
 // If 'getter' is an own property, holder == receiver must be true.
 bool
 UpdateExistingGetPropCallStubs(ICFallbackStub* fallbackStub,
                                ICStub::Kind kind,
                                HandleNativeObject holder,
                                HandleObject receiver,
                                HandleFunction getter)
 {
-    MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
-               kind == ICStub::GetProp_CallNative ||
+    MOZ_ASSERT(kind == ICStub::GetProp_CallNative ||
                kind == ICStub::GetProp_CallNativeGlobal);
     MOZ_ASSERT(fallbackStub->isGetName_Fallback() ||
                fallbackStub->isGetProp_Fallback());
     MOZ_ASSERT(holder);
     MOZ_ASSERT(receiver);
 
     bool isOwnGetter = (holder == receiver);
     bool foundMatchingStub = false;
@@ -2425,43 +2469,16 @@ TryAttachNativeGetAccessorPropStub(JSCon
 
     ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
 
     bool isScripted = false;
     bool cacheableCall = IsCacheableGetPropCall(cx, obj, holder, shape, &isScripted,
                                                 isTemporarilyUnoptimizable,
                                                 isDOMProxy);
 
-    // Try handling scripted getters.
-    if (cacheableCall && isScripted && !isDOMProxy &&
-        info->engine() == ICStubCompiler::Engine::Baseline)
-    {
-        RootedFunction callee(cx, &shape->getterObject()->as<JSFunction>());
-        MOZ_ASSERT(callee->hasScript());
-
-        if (UpdateExistingGetPropCallStubs(stub, ICStub::GetProp_CallScripted,
-                                           holder.as<NativeObject>(), obj, callee)) {
-            *attached = true;
-            return true;
-        }
-
-        JitSpew(JitSpew_BaselineIC, "  Generating GetProp(NativeObj/ScriptedGetter %s:%" PRIuSIZE ") stub",
-                callee->nonLazyScript()->filename(), callee->nonLazyScript()->lineno());
-
-        ICGetProp_CallScripted::Compiler compiler(cx, monitorStub, obj, holder, callee,
-                                                  info->pcOffset());
-        ICStub* newStub = compiler.getStub(compiler.getStubSpace(info->outerScript(cx)));
-        if (!newStub)
-            return false;
-
-        stub->addNewStub(newStub);
-        *attached = true;
-        return true;
-    }
-
     // If it's a shadowed listbase proxy property, attach stub to call Proxy::get instead.
     if (isDOMProxy && DOMProxyIsShadowing(domProxyShadowsResult)) {
         MOZ_ASSERT(obj == holder);
 
         JitSpew(JitSpew_BaselineIC, "  Generating GetProp(DOMProxyProxy) stub");
         Rooted<ProxyObject*> proxy(cx, &obj->as<ProxyObject>());
         ICGetProp_DOMProxyShadowed::Compiler compiler(cx, info->engine(), monitorStub, proxy, name,
                                                       info->pcOffset());
@@ -2672,33 +2689,34 @@ DoGetPropFallback(JSContext* cx, void* p
         if (!newStub)
             return false;
         stub->addNewStub(newStub);
         attached = true;
     }
 
     if (!attached && !JitOptions.disableCacheIR) {
         mozilla::Maybe<CacheIRWriter> writer;
-        GetPropIRGenerator gen(cx, pc, val, name, res);
+        GetPropIRGenerator gen(cx, pc, engine, &isTemporarilyUnoptimizable, val, name, res);
         if (!gen.tryAttachStub(writer))
             return false;
         if (gen.emitted()) {
-            ICStub* newStub = AttachBaselineCacheIRStub(cx, writer.ref(), CacheKind::GetProp, stub);
+            ICStub* newStub = AttachBaselineCacheIRStub(cx, writer.ref(), CacheKind::GetProp,
+                                                        engine, info.outerScript(cx), stub);
             if (newStub) {
                 JitSpew(JitSpew_BaselineIC, "  Attached CacheIR stub");
                 attached = true;
                 if (gen.shouldNotePreliminaryObjectStub())
                     newStub->toCacheIR_Monitored()->notePreliminaryObject();
                 else if (gen.shouldUnlinkPreliminaryObjectStubs())
                     StripPreliminaryObjectStubs(cx, stub);
             }
         }
     }
 
-    if (!attached && !stub.invalid() &&
+    if (!attached && !stub.invalid() && !isTemporarilyUnoptimizable &&
         !TryAttachNativeGetAccessorPropStub(cx, &info, stub, name, val, res, &attached,
                                             &isTemporarilyUnoptimizable))
     {
         return false;
     }
 
     if (!ComputeGetPropResult(cx, info.maybeFrame(), op, name, val, res))
         return false;
@@ -2957,108 +2975,16 @@ GetProtoShapes(JSObject* obj, size_t pro
         curProto = curProto->staticPrototype();
     }
 
     MOZ_ASSERT(!curProto,
                "longer prototype chain encountered than this stub permits!");
     return true;
 }
 
-bool
-ICGetProp_CallScripted::Compiler::generateStubCode(MacroAssembler& masm)
-{
-    MOZ_ASSERT(engine_ == Engine::Baseline);
-
-    Label failure;
-    Label failureLeaveStubFrame;
-    AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
-    Register scratch = regs.takeAnyExcluding(ICTailCallReg);
-
-    // Guard input is an object.
-    masm.branchTestObject(Assembler::NotEqual, R0, &failure);
-
-    // Unbox and shape guard.
-    Register objReg = masm.extractObject(R0, ExtractTemp0);
-    GuardReceiverObject(masm, ReceiverGuard(receiver_), objReg, scratch,
-                        ICGetProp_CallScripted::offsetOfReceiverGuard(), &failure);
-
-    if (receiver_ != holder_) {
-        Register holderReg = regs.takeAny();
-        masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfHolder()), holderReg);
-        masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfHolderShape()), scratch);
-        masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratch, &failure);
-        regs.add(holderReg);
-    }
-
-    // Push a stub frame so that we can perform a non-tail call.
-    enterStubFrame(masm, scratch);
-
-    // Load callee function and code.  To ensure that |code| doesn't end up being
-    // ArgumentsRectifierReg, if it's available we assign it to |callee| instead.
-    Register callee;
-    if (regs.has(ArgumentsRectifierReg)) {
-        callee = ArgumentsRectifierReg;
-        regs.take(callee);
-    } else {
-        callee = regs.takeAny();
-    }
-    Register code = regs.takeAny();
-    masm.loadPtr(Address(ICStubReg, ICGetProp_CallScripted::offsetOfGetter()), callee);
-    masm.branchIfFunctionHasNoScript(callee, &failureLeaveStubFrame);
-    masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
-    masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
-
-    // Align the stack such that the JitFrameLayout is aligned on
-    // JitStackAlignment.
-    masm.alignJitStackBasedOnNArgs(0);
-
-    // Getter is called with 0 arguments, just |obj| as thisv.
-    // Note that we use Push, not push, so that callJit will align the stack
-    // properly on ARM.
-    masm.Push(R0);
-    EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
-    masm.Push(Imm32(0));  // ActualArgc is 0
-    masm.Push(callee);
-    masm.Push(scratch);
-
-    // Handle arguments underflow.
-    Label noUnderflow;
-    masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
-    masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
-    {
-        // Call the arguments rectifier.
-        MOZ_ASSERT(ArgumentsRectifierReg != code);
-
-        JitCode* argumentsRectifier =
-            cx->runtime()->jitRuntime()->getArgumentsRectifier();
-
-        masm.movePtr(ImmGCPtr(argumentsRectifier), code);
-        masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
-        masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
-    }
-
-    masm.bind(&noUnderflow);
-    masm.callJit(code);
-
-    leaveStubFrame(masm, true);
-
-    // Enter type monitor IC to type-check result.
-    EmitEnterTypeMonitorIC(masm);
-
-    // Leave stub frame and go to next stub.
-    masm.bind(&failureLeaveStubFrame);
-    inStubFrame_ = true;
-    leaveStubFrame(masm, false);
-
-    // Failure case - jump to next stub
-    masm.bind(&failure);
-    EmitStubGuardFailure(masm);
-    return true;
-}
-
 //
 // VM function to help call native getters.
 //
 
 bool
 DoCallNativeGetter(JSContext* cx, HandleFunction callee, HandleObject obj,
                    MutableHandleValue result)
 {
@@ -3626,33 +3552,22 @@ ICGetPropCallGetter::ICGetPropCallGetter
                                          uint32_t pcOffset)
   : ICMonitoredStub(kind, stubCode, firstMonitorStub),
     receiverGuard_(receiverGuard),
     holder_(holder),
     holderShape_(holderShape),
     getter_(getter),
     pcOffset_(pcOffset)
 {
-    MOZ_ASSERT(kind == ICStub::GetProp_CallScripted  ||
-               kind == ICStub::GetProp_CallNative    ||
+    MOZ_ASSERT(kind == ICStub::GetProp_CallNative    ||
                kind == ICStub::GetProp_CallNativeGlobal ||
                kind == ICStub::GetProp_CallDOMProxyNative ||
                kind == ICStub::GetProp_CallDOMProxyWithGenerationNative);
 }
 
-/* static */ ICGetProp_CallScripted*
-ICGetProp_CallScripted::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
-                              ICGetProp_CallScripted& other)
-{
-    return New<ICGetProp_CallScripted>(cx, space, other.jitCode(), firstMonitorStub,
-                                       other.receiverGuard(),
-                                       other.holder_, other.holderShape_,
-                                       other.getter_, other.pcOffset_);
-}
-
 /* static */ ICGetProp_CallNative*
 ICGetProp_CallNative::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
                             ICGetProp_CallNative& other)
 {
     return New<ICGetProp_CallNative>(cx, space, other.jitCode(), firstMonitorStub,
                                      other.receiverGuard(), other.holder_,
                                      other.holderShape_, other.getter_, other.pcOffset_);
 }
@@ -4254,17 +4169,17 @@ DoNewObject(JSContext* cx, void* payload
                 (templateObject->is<UnboxedPlainObject>() ||
                  !templateObject->as<PlainObject>().hasDynamicSlots()))
             {
                 JitCode* code = GenerateNewObjectWithTemplateCode(cx, templateObject);
                 if (!code)
                     return false;
 
                 ICStubSpace* space =
-                    ICStubCompiler::StubSpaceForKind(ICStub::NewObject_WithTemplate, script,
+                    ICStubCompiler::StubSpaceForStub(/* makesGCCalls = */ false, script,
                                                      ICStubCompiler::Engine::Baseline);
                 ICStub* templateStub = ICStub::New<ICNewObject_WithTemplate>(cx, space, code);
                 if (!templateStub)
                     return false;
 
                 stub->addNewStub(templateStub);
             }
 
--- a/js/src/jit/SharedIC.h
+++ b/js/src/jit/SharedIC.h
@@ -499,19 +499,22 @@ class ICStub
         INVALID = 0,
 #define DEF_ENUM_KIND(kindName) kindName,
         IC_BASELINE_STUB_KIND_LIST(DEF_ENUM_KIND)
         IC_SHARED_STUB_KIND_LIST(DEF_ENUM_KIND)
 #undef DEF_ENUM_KIND
         LIMIT
     };
 
-    static inline bool IsValidKind(Kind k) {
+    static bool IsValidKind(Kind k) {
         return (k > INVALID) && (k < LIMIT);
     }
+    static bool IsCacheIRKind(Kind k) {
+        return k == CacheIR_Monitored;
+    }
 
     static const char* KindString(Kind k) {
         switch(k) {
 #define DEF_KIND_STR(kindName) case kindName: return #kindName;
             IC_BASELINE_STUB_KIND_LIST(DEF_KIND_STR)
             IC_SHARED_STUB_KIND_LIST(DEF_KIND_STR)
 #undef DEF_KIND_STR
           default:
@@ -705,66 +708,26 @@ class ICStub
     static inline size_t offsetOfStubCode() {
         return offsetof(ICStub, stubCode_);
     }
 
     static inline size_t offsetOfExtra() {
         return offsetof(ICStub, extra_);
     }
 
-    static bool CanMakeCalls(ICStub::Kind kind) {
-        MOZ_ASSERT(IsValidKind(kind));
-        switch (kind) {
-          case Call_Fallback:
-          case Call_Scripted:
-          case Call_AnyScripted:
-          case Call_Native:
-          case Call_ClassHook:
-          case Call_ScriptedApplyArray:
-          case Call_ScriptedApplyArguments:
-          case Call_ScriptedFunCall:
-          case Call_StringSplit:
-          case WarmUpCounter_Fallback:
-          case GetElem_NativeSlotName:
-          case GetElem_NativeSlotSymbol:
-          case GetElem_NativePrototypeSlotName:
-          case GetElem_NativePrototypeSlotSymbol:
-          case GetElem_NativePrototypeCallNativeName:
-          case GetElem_NativePrototypeCallNativeSymbol:
-          case GetElem_NativePrototypeCallScriptedName:
-          case GetElem_NativePrototypeCallScriptedSymbol:
-          case GetElem_UnboxedPropertyName:
-          case GetProp_CallScripted:
-          case GetProp_CallNative:
-          case GetProp_CallNativeGlobal:
-          case GetProp_CallDOMProxyNative:
-          case GetProp_CallDOMProxyWithGenerationNative:
-          case GetProp_DOMProxyShadowed:
-          case GetProp_Generic:
-          case SetProp_CallScripted:
-          case SetProp_CallNative:
-          case RetSub_Fallback:
-          // These two fallback stubs don't actually make non-tail calls,
-          // but the fallback code for the bailout path needs to pop the stub frame
-          // pushed during the bailout.
-          case GetProp_Fallback:
-          case SetProp_Fallback:
-            return true;
-          default:
-            return false;
-        }
-    }
+    static bool NonCacheIRStubMakesGCCalls(Kind kind);
+    bool makesGCCalls() const;
 
     // Optimized stubs get purged on GC.  But some stubs can be active on the
     // stack during GC - specifically the ones that can make calls.  To ensure
     // that these do not get purged, all stubs that can make calls are allocated
     // in the fallback stub space.
     bool allocatedInFallbackSpace() const {
         MOZ_ASSERT(next());
-        return CanMakeCalls(kind());
+        return makesGCCalls();
     }
 };
 
 class ICFallbackStub : public ICStub
 {
     friend class ICStubConstIterator;
   protected:
     // Fallback stubs need these fields to easily add new stubs to
@@ -897,24 +860,28 @@ class ICMonitoredStub : public ICStub
 
     static inline size_t offsetOfFirstMonitorStub() {
         return offsetof(ICMonitoredStub, firstMonitorStub_);
     }
 };
 
 class ICCacheIR_Monitored : public ICMonitoredStub
 {
-    CacheIRStubInfo* stubInfo_;
+    const CacheIRStubInfo* stubInfo_;
 
   public:
-    ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub, CacheIRStubInfo* stubInfo)
+    ICCacheIR_Monitored(JitCode* stubCode, ICStub* firstMonitorStub,
+                        const CacheIRStubInfo* stubInfo)
       : ICMonitoredStub(ICStub::CacheIR_Monitored, stubCode, firstMonitorStub),
         stubInfo_(stubInfo)
     {}
 
+    static ICCacheIR_Monitored* Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
+                                      ICCacheIR_Monitored& other);
+
     void notePreliminaryObject() {
         extra_ = 1;
     }
     bool hasPreliminaryObject() const {
         return extra_;
     }
 
     const CacheIRStubInfo* stubInfo() const {
@@ -989,20 +956,17 @@ class ICUpdatedStub : public ICStub
 
 // Base class for stubcode compilers.
 class ICStubCompiler
 {
     // Prevent GC in the middle of stub compilation.
     js::gc::AutoSuppressGC suppressGC;
 
   public:
-    enum class Engine {
-        Baseline = 0,
-        IonMonkey
-    };
+    using Engine = ICStubEngine;
 
   protected:
     JSContext* cx;
     ICStub::Kind kind;
     Engine engine_;
     bool inStubFrame_;
 
 #ifdef DEBUG
@@ -1102,27 +1066,26 @@ class ICStubCompiler
     template <typename T, typename... Args>
     T* newStub(Args&&... args) {
         return ICStub::New<T>(cx, mozilla::Forward<Args>(args)...);
     }
 
   public:
     virtual ICStub* getStub(ICStubSpace* space) = 0;
 
-    static ICStubSpace* StubSpaceForKind(ICStub::Kind kind, JSScript* outerScript, Engine engine) {
-        if (ICStub::CanMakeCalls(kind)) {
+    static ICStubSpace* StubSpaceForStub(bool makesGCCalls, JSScript* outerScript, Engine engine) {
+        if (makesGCCalls) {
             if (engine == ICStubCompiler::Engine::Baseline)
                 return outerScript->baselineScript()->fallbackStubSpace();
             return outerScript->ionScript()->fallbackStubSpace();
         }
         return outerScript->zone()->jitZone()->optimizedStubSpace();
     }
-
     ICStubSpace* getStubSpace(JSScript* outerScript) {
-        return StubSpaceForKind(kind, outerScript, engine_);
+        return StubSpaceForStub(ICStub::NonCacheIRStubMakesGCCalls(kind), outerScript, engine_);
     }
 };
 
 class SharedStubInfo
 {
     BaselineFrame* maybeFrame_;
     RootedScript outerScript_;
     RootedScript innerScript_;
@@ -2661,64 +2624,22 @@ class ICGetPropCallGetter : public ICMon
           : ICStubCompiler(cx, kind, engine),
             firstMonitorStub_(firstMonitorStub),
             receiver_(cx, receiver),
             holder_(cx, holder),
             getter_(cx, getter),
             pcOffset_(pcOffset),
             outerClass_(outerClass)
         {
-            MOZ_ASSERT(kind == ICStub::GetProp_CallScripted ||
-                       kind == ICStub::GetProp_CallNative ||
+            MOZ_ASSERT(kind == ICStub::GetProp_CallNative ||
                        kind == ICStub::GetProp_CallNativeGlobal);
         }
     };
 };
 
-// Stub for calling a scripted getter on a native object when the getter is kept on the
-// proto-chain.
-class ICGetProp_CallScripted : public ICGetPropCallGetter
-{
-    friend class ICStubSpace;
-
-  protected:
-    ICGetProp_CallScripted(JitCode* stubCode, ICStub* firstMonitorStub,
-                           ReceiverGuard receiverGuard,
-                           JSObject* holder, Shape* holderShape,
-                           JSFunction* getter, uint32_t pcOffset)
-      : ICGetPropCallGetter(GetProp_CallScripted, stubCode, firstMonitorStub,
-                            receiverGuard, holder, holderShape, getter, pcOffset)
-    {}
-
-  public:
-    static ICGetProp_CallScripted* Clone(JSContext* cx, ICStubSpace* space,
-                                         ICStub* firstMonitorStub, ICGetProp_CallScripted& other);
-
-    class Compiler : public ICGetPropCallGetter::Compiler {
-      protected:
-        MOZ_MUST_USE bool generateStubCode(MacroAssembler& masm);
-
-      public:
-        Compiler(JSContext* cx, ICStub* firstMonitorStub, HandleObject obj,
-                 HandleObject holder, HandleFunction getter, uint32_t pcOffset)
-          : ICGetPropCallGetter::Compiler(cx, ICStub::GetProp_CallScripted, Engine::Baseline,
-                                          firstMonitorStub, obj, holder,
-                                          getter, pcOffset, /* outerClass = */ nullptr)
-        {}
-
-        ICStub* getStub(ICStubSpace* space) {
-            ReceiverGuard guard(receiver_);
-            Shape* holderShape = holder_->as<NativeObject>().lastProperty();
-            return newStub<ICGetProp_CallScripted>(space, getStubCode(), firstMonitorStub_,
-                                                       guard, holder_, holderShape, getter_,
-                                                       pcOffset_);
-        }
-    };
-};
-
 // Stub for calling a native getter on a native object.
 class ICGetProp_CallNative : public ICGetPropCallGetter
 {
     friend class ICStubSpace;
 
   protected:
 
     ICGetProp_CallNative(JitCode* stubCode, ICStub* firstMonitorStub,
--- a/js/src/jit/SharedICList.h
+++ b/js/src/jit/SharedICList.h
@@ -31,17 +31,16 @@ namespace jit {
     _(Compare_String)                            \
     _(Compare_Boolean)                           \
     _(Compare_Object)                            \
     _(Compare_ObjectWithUndefined)               \
     _(Compare_Int32WithBoolean)                  \
                                                  \
     _(GetProp_Fallback)                          \
     _(GetProp_StringLength)                      \
-    _(GetProp_CallScripted)                      \
     _(GetProp_CallNative)                        \
     _(GetProp_CallNativeGlobal)                  \
     _(GetProp_CallDOMProxyNative)                \
     _(GetProp_CallDOMProxyWithGenerationNative)  \
     _(GetProp_DOMProxyShadowed)                  \
     _(GetProp_ArgumentsLength)                   \
     _(GetProp_ArgumentsCallee)                   \
     _(GetProp_Generic)                           \