Bug 1499649 - Split ICEntry in ICEntry and RetAddrEntry, get rid of fake ICEntries. r=tcampbell
authorJan de Mooij <jdemooij@mozilla.com>
Wed, 24 Oct 2018 07:55:51 +0000
changeset 442734 daf8a3b98eee4a27c5691ecea4d4699dfc33cd31
parent 442733 deb666fd309c96f986823290ffbb55f6dc4ca492
child 442735 0ce248abad81e801972662b8c56a726f039a9cf0
push id34921
push usershindli@mozilla.com
push dateWed, 24 Oct 2018 13:27:03 +0000
treeherdermozilla-central@d94d73fcec77 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstcampbell
bugs1499649
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1499649 - Split ICEntry in ICEntry and RetAddrEntry, get rid of fake ICEntries. r=tcampbell The return address/offset is no longer stored in ICEntry but in RetAddrEntry. ICEntry now only stores IC-related fields. As a follow-up I think we should get rid of ICEntry::isForOp_ but this patch is big enough as it is. Differential Revision: https://phabricator.services.mozilla.com/D8963
js/src/jit/BaselineBailouts.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineCompiler.h
js/src/jit/BaselineDebugModeOSR.cpp
js/src/jit/BaselineDebugModeOSR.h
js/src/jit/BaselineFrame.cpp
js/src/jit/BaselineFrame.h
js/src/jit/BaselineIC.cpp
js/src/jit/BaselineIC.h
js/src/jit/BaselineJIT.cpp
js/src/jit/BaselineJIT.h
js/src/jit/JSJitFrameIter.cpp
js/src/jit/VMFunctions.cpp
js/src/jit/arm/SharedICHelpers-arm.h
js/src/jit/arm64/SharedICHelpers-arm64.h
js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
js/src/jit/none/SharedICHelpers-none.h
js/src/jit/x64/SharedICHelpers-x64.h
js/src/jit/x86/SharedICHelpers-x86.h
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -1188,20 +1188,23 @@ InitFromBailout(JSContext* cx, size_t fr
                 blFrame->setFrameSize(frameSize);
                 JitSpew(JitSpew_BaselineBailouts, "      Adjusted framesize += %d: %d",
                                 (int) (numUses * sizeof(Value)),
                                 (int) frameSize);
             }
 
             // Set the resume address to the return point from the IC, and set
             // the monitor stub addr.
-            builder.setResumeAddr(baselineScript->returnAddressForIC(icEntry));
+            RetAddrEntry& retAddrEntry =
+                baselineScript->retAddrEntryFromPCOffset(pcOff, RetAddrEntry::Kind::IC);
+            uint8_t* retAddr = baselineScript->returnAddressForEntry(retAddrEntry);
+            builder.setResumeAddr(retAddr);
             builder.setMonitorStub(firstMonStub);
             JitSpew(JitSpew_BaselineBailouts, "      Set resumeAddr=%p monitorStub=%p",
-                    baselineScript->returnAddressForIC(icEntry), firstMonStub);
+                    retAddr, firstMonStub);
 
         } else {
             // If needed, initialize BaselineBailoutInfo's valueR0 and/or valueR1 with the
             // top stack values.
             //
             // Note that we use the 'maybe' variant of nativeCodeForPC because
             // of exception propagation for debug mode. See note below.
             PCMappingSlotInfo slotInfo;
@@ -1304,17 +1307,20 @@ InitFromBailout(JSContext* cx, size_t fr
     if (!builder.writeWord(baselineFrameDescr, "Descriptor")) {
         return false;
     }
 
     // Calculate and write out return address.
     // The icEntry in question MUST have an inlinable fallback stub.
     ICEntry& icEntry = baselineScript->icEntryFromPCOffset(pcOff);
     MOZ_ASSERT(IsInlinableFallback(icEntry.firstStub()->getChainFallback()));
-    if (!builder.writePtr(baselineScript->returnAddressForIC(icEntry), "ReturnAddr")) {
+
+    RetAddrEntry& retAddrEntry =
+        baselineScript->retAddrEntryFromPCOffset(pcOff, RetAddrEntry::Kind::IC);
+    if (!builder.writePtr(baselineScript->returnAddressForEntry(retAddrEntry), "ReturnAddr")) {
         return false;
     }
 
     // Build baseline stub frame:
     // +===============+
     // |    StubPtr    |
     // +---------------+
     // |   FramePtr    |
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -240,16 +240,17 @@ BaselineCompiler::compile()
     size_t bytecodeTypeMapEntries = script->nTypeSets() + 1;
     UniquePtr<BaselineScript> baselineScript(
         BaselineScript::New(script, prologueOffset_.offset(),
                             epilogueOffset_.offset(),
                             profilerEnterFrameToggleOffset_.offset(),
                             profilerExitFrameToggleOffset_.offset(),
                             postDebugPrologueOffset_.offset(),
                             icEntries_.length(),
+                            retAddrEntries_.length(),
                             pcMappingIndexEntries.length(),
                             pcEntries.length(),
                             bytecodeTypeMapEntries,
                             yieldAndAwaitOffsets_.length(),
                             traceLoggerToggleOffsets_.length()),
         JS::DeletePolicy<BaselineScript>(cx->runtime()));
     if (!baselineScript) {
         ReportOutOfMemory(cx);
@@ -264,20 +265,23 @@ BaselineCompiler::compile()
             script->filename(), script->lineno(), script->column());
 
     MOZ_ASSERT(pcMappingIndexEntries.length() > 0);
     baselineScript->copyPCMappingIndexEntries(&pcMappingIndexEntries[0]);
 
     MOZ_ASSERT(pcEntries.length() > 0);
     baselineScript->copyPCMappingEntries(pcEntries);
 
-    // Copy IC entries
-    if (icEntries_.length()) {
+    // Copy ICEntries and RetAddrEntries.
+    if (icEntries_.length() > 0) {
         baselineScript->copyICEntries(script, &icEntries_[0]);
     }
+    if (retAddrEntries_.length() > 0) {
+        baselineScript->copyRetAddrEntries(script, &retAddrEntries_[0]);
+    }
 
     // Adopt fallback stubs from the compiler into the baseline script.
     baselineScript->adoptFallbackStubs(&stubSpace_);
 
     // If profiler instrumentation is enabled, toggle instrumentation on.
     if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime())) {
         baselineScript->toggleProfilerInstrumentation(true);
     }
@@ -573,26 +577,38 @@ BaselineCompiler::emitOutOfLinePostBarri
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
 
     masm.popValue(R0);
     masm.ret();
     return true;
 }
 
 bool
-BaselineCompiler::emitIC(ICStub* stub, ICEntry::Kind kind)
-{
-    ICEntry* entry = allocateICEntry(stub, kind);
-    if (!entry) {
+BaselineCompiler::emitIC(ICStub* stub, bool isForOp)
+{
+    if (!stub) {
         return false;
     }
 
-    CodeOffset patchOffset;
-    EmitCallIC(&patchOffset, masm);
-    entry->setReturnOffset(CodeOffset(masm.currentOffset()));
+    CodeOffset patchOffset, callOffset;
+    EmitCallIC(masm, &patchOffset, &callOffset);
+
+    // ICs need both an ICEntry and a RetAddrEntry.
+
+    RetAddrEntry::Kind kind = isForOp ? RetAddrEntry::Kind::IC : RetAddrEntry::Kind::NonOpIC;
+    if (!retAddrEntries_.emplaceBack(script->pcToOffset(pc), kind, callOffset)) {
+        ReportOutOfMemory(cx);
+        return false;
+    }
+
+    if (!icEntries_.emplaceBack(stub, script->pcToOffset(pc), isForOp)) {
+        ReportOutOfMemory(cx);
+        return false;
+    }
+
     if (!addICLoadLabel(patchOffset)) {
         return false;
     }
 
     return true;
 }
 
 void
@@ -681,19 +697,17 @@ BaselineCompiler::callVM(const VMFunctio
         Label ok;
         masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
                           Imm32(BaselineFrame::HAS_OVERRIDE_PC), &ok);
         masm.assumeUnreachable("BaselineFrame shouldn't override pc after VM call");
         masm.bind(&ok);
     }
 #endif
 
-    // Add a fake ICEntry (without stubs), so that the return offset to
-    // pc mapping works.
-    return appendICEntry(ICEntry::Kind_CallVM, callOffset);
+    return appendRetAddrEntry(RetAddrEntry::Kind::CallVM, callOffset);
 }
 
 typedef bool (*CheckOverRecursedBaselineFn)(JSContext*, BaselineFrame*);
 static const VMFunction CheckOverRecursedBaselineInfo =
     FunctionInfo<CheckOverRecursedBaselineFn>(CheckOverRecursedBaseline,
                                               "CheckOverRecursedBaseline");
 
 bool
@@ -730,17 +744,17 @@ BaselineCompiler::emitStackCheck()
     if (needsEarlyStackCheck()) {
         phase = CHECK_OVER_RECURSED;
     }
 
     if (!callVMNonOp(CheckOverRecursedBaselineInfo, phase)) {
         return false;
     }
 
-    icEntries_.back().setFakeKind(ICEntry::Kind_StackCheck);
+    retAddrEntries_.back().setKind(RetAddrEntry::Kind::StackCheck);
 
     masm.bind(&skipCall);
     return true;
 }
 
 void
 BaselineCompiler::emitIsDebuggeeCheck()
 {
@@ -767,18 +781,18 @@ BaselineCompiler::emitDebugPrologue()
 
         prepareVMCall();
         pushArg(ImmPtr(pc));
         pushArg(R0.scratchReg());
         if (!callVM(DebugPrologueInfo)) {
             return false;
         }
 
-        // Fix up the fake ICEntry appended by callVM for on-stack recompilation.
-        icEntries_.back().setFakeKind(ICEntry::Kind_DebugPrologue);
+        // Fix up the RetAddrEntry appended by callVM for on-stack recompilation.
+        retAddrEntries_.back().setKind(RetAddrEntry::Kind::DebugPrologue);
 
         // If the stub returns |true|, we have to return the value stored in the
         // frame's return value slot.
         Label done;
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
         {
             masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
             masm.jump(&return_);
@@ -940,18 +954,18 @@ BaselineCompiler::emitWarmUpCounterIncre
 
         masm.Push(ImmPtr(pc));
         masm.PushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
 
         if (!callVM(IonCompileScriptForBaselineInfo)) {
             return false;
         }
 
-        // Annotate the ICEntry as warmup counter.
-        icEntries_.back().setFakeKind(ICEntry::Kind_WarmupCounter);
+        // Annotate the RetAddrEntry as warmup counter.
+        retAddrEntries_.back().setKind(RetAddrEntry::Kind::WarmupCounter);
     }
     masm.bind(&skipCall);
 
     return true;
 }
 
 bool
 BaselineCompiler::emitArgumentTypeChecks()
@@ -1005,18 +1019,18 @@ BaselineCompiler::emitDebugTrap()
     mozilla::DebugOnly<CodeOffset> offset = masm.toggledCall(handler, enabled);
 
 #ifdef DEBUG
     // Patchable call offset has to match the pc mapping offset.
     PCMappingEntry& entry = pcMappingEntries_.back();
     MOZ_ASSERT((&offset)->offset() == entry.nativeOffset);
 #endif
 
-    // Add an IC entry for the return offset -> pc mapping.
-    return appendICEntry(ICEntry::Kind_DebugTrap, masm.currentOffset());
+    // Add a RetAddrEntry for the return offset -> pc mapping.
+    return appendRetAddrEntry(RetAddrEntry::Kind::DebugTrap, masm.currentOffset());
 }
 
 #ifdef JS_TRACE_LOGGING
 bool
 BaselineCompiler::emitTraceLoggerEnter()
 {
     AllocatableRegisterSet regs(RegisterSet::Volatile());
     Register loggerReg = regs.takeAnyGeneral();
@@ -4202,18 +4216,18 @@ BaselineCompiler::emitReturn()
 
         prepareVMCall();
         pushArg(ImmPtr(pc));
         pushArg(R0.scratchReg());
         if (!callVM(DebugEpilogueInfo)) {
             return false;
         }
 
-        // Fix up the fake ICEntry appended by callVM for on-stack recompilation.
-        icEntries_.back().setFakeKind(ICEntry::Kind_DebugEpilogue);
+        // Fix up the RetAddrEntry appended by callVM for on-stack recompilation.
+        retAddrEntries_.back().setKind(RetAddrEntry::Kind::DebugEpilogue);
 
         masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
     }
 
     // Only emit the jump if this JSOP_RETRVAL is not the last instruction.
     // Not needed for last instruction, because last instruction flows
     // into return label.
     if (pc + GetBytecodeLength(pc) < script->codeEnd()) {
@@ -4941,17 +4955,17 @@ BaselineCompiler::emit_JSOP_DEBUGAFTERYI
     masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
     prepareVMCall();
     pushArg(ImmPtr(pc));
     pushArg(R0.scratchReg());
     if (!callVM(DebugAfterYieldInfo)) {
         return false;
     }
 
-    icEntries_.back().setFakeKind(ICEntry::Kind_DebugAfterYield);
+    retAddrEntries_.back().setKind(RetAddrEntry::Kind::DebugAfterYield);
 
     Label done;
     masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &done);
     {
         masm.loadValue(frame.addressOfReturnValue(), JSReturnOperand);
         masm.jump(&return_);
     }
     masm.bind(&done);
@@ -5063,18 +5077,18 @@ BaselineCompiler::emit_JSOP_RESUME()
     // generator returns.
     Label genStart, returnTarget;
 #ifdef JS_USE_LINK_REGISTER
     masm.call(&genStart);
 #else
     masm.callAndPushReturnAddress(&genStart);
 #endif
 
-    // Add an IC entry so the return offset -> pc mapping works.
-    if (!appendICEntry(ICEntry::Kind_Op, masm.currentOffset())) {
+    // Add a RetAddrEntry so the return offset -> pc mapping works.
+    if (!appendRetAddrEntry(RetAddrEntry::Kind::IC, masm.currentOffset())) {
         return false;
     }
 
     masm.jump(&returnTarget);
     masm.bind(&genStart);
 #ifdef JS_USE_LINK_REGISTER
     masm.pushReturnAddress();
 #endif
--- a/js/src/jit/BaselineCompiler.h
+++ b/js/src/jit/BaselineCompiler.h
@@ -258,16 +258,17 @@ class BaselineCompiler final
     bool compileDebugInstrumentation_;
 
     TempAllocator& alloc_;
     BytecodeAnalysis analysis_;
     FrameInfo frame;
 
     FallbackICStubSpace stubSpace_;
     js::Vector<ICEntry, 16, SystemAllocPolicy> icEntries_;
+    js::Vector<RetAddrEntry, 16, SystemAllocPolicy> retAddrEntries_;
 
     // Stores the native code offset for a bytecode pc.
     struct PCMappingEntry
     {
         uint32_t pcOffset;
         uint32_t nativeOffset;
         PCMappingSlotInfo slotInfo;
 
@@ -339,40 +340,18 @@ class BaselineCompiler final
 
     MethodStatus compile();
 
     void setCompileDebugInstrumentation() {
         compileDebugInstrumentation_ = true;
     }
 
   private:
-    ICEntry* allocateICEntry(ICStub* stub, ICEntry::Kind kind) {
-        if (!stub) {
-            return nullptr;
-        }
-
-        // Create the entry and add it to the vector.
-        if (!icEntries_.append(ICEntry(script->pcToOffset(pc), kind))) {
-            ReportOutOfMemory(cx);
-            return nullptr;
-        }
-        ICEntry& vecEntry = icEntries_.back();
-
-        // Set the first stub for the IC entry to the fallback stub
-        vecEntry.setFirstStub(stub);
-
-        // Return pointer to the IC entry
-        return &vecEntry;
-    }
-
-    // Append an ICEntry without a stub.
-    bool appendICEntry(ICEntry::Kind kind, uint32_t returnOffset) {
-        ICEntry entry(script->pcToOffset(pc), kind);
-        entry.setReturnOffset(CodeOffset(returnOffset));
-        if (!icEntries_.append(entry)) {
+    MOZ_MUST_USE bool appendRetAddrEntry(RetAddrEntry::Kind kind, uint32_t retOffset) {
+        if (!retAddrEntries_.emplaceBack(script->pcToOffset(pc), kind, CodeOffset(retOffset))) {
             ReportOutOfMemory(cx);
             return false;
         }
         return true;
     }
 
     bool addICLoadLabel(CodeOffset label) {
         MOZ_ASSERT(!icEntries_.empty());
@@ -421,39 +400,39 @@ class BaselineCompiler final
         CHECK_OVER_RECURSED
     };
     bool callVM(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE);
 
     bool callVMNonOp(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE) {
         if (!callVM(fun, phase)) {
             return false;
         }
-        icEntries_.back().setFakeKind(ICEntry::Kind_NonOpCallVM);
+        retAddrEntries_.back().setKind(RetAddrEntry::Kind::NonOpCallVM);
         return true;
     }
 
     BytecodeAnalysis& analysis() {
         return analysis_;
     }
 
     MethodStatus emitBody();
 
     MOZ_MUST_USE bool emitCheckThis(ValueOperand val, bool reinit=false);
     void emitLoadReturnValue(ValueOperand val);
 
     void emitInitializeLocals();
     MOZ_MUST_USE bool emitPrologue();
     MOZ_MUST_USE bool emitEpilogue();
     MOZ_MUST_USE bool emitOutOfLinePostBarrierSlot();
-    MOZ_MUST_USE bool emitIC(ICStub* stub, ICEntry::Kind kind);
+    MOZ_MUST_USE bool emitIC(ICStub* stub, bool isForOp);
     MOZ_MUST_USE bool emitOpIC(ICStub* stub) {
-        return emitIC(stub, ICEntry::Kind_Op);
+        return emitIC(stub, true);
     }
     MOZ_MUST_USE bool emitNonOpIC(ICStub* stub) {
-        return emitIC(stub, ICEntry::Kind_NonOp);
+        return emitIC(stub, false);
     }
 
     MOZ_MUST_USE bool emitStackCheck();
     MOZ_MUST_USE bool emitInterruptCheck();
     MOZ_MUST_USE bool emitWarmUpCounterIncrement(bool allowOsr=true);
     MOZ_MUST_USE bool emitArgumentTypeChecks();
     void emitIsDebuggeeCheck();
     MOZ_MUST_USE bool emitDebugPrologue();
--- a/js/src/jit/BaselineDebugModeOSR.cpp
+++ b/js/src/jit/BaselineDebugModeOSR.cpp
@@ -22,50 +22,50 @@ using namespace js::jit;
 struct DebugModeOSREntry
 {
     JSScript* script;
     BaselineScript* oldBaselineScript;
     ICStub* oldStub;
     ICStub* newStub;
     BaselineDebugModeOSRInfo* recompInfo;
     uint32_t pcOffset;
-    ICEntry::Kind frameKind;
+    RetAddrEntry::Kind frameKind;
 
     explicit DebugModeOSREntry(JSScript* script)
       : script(script),
         oldBaselineScript(script->baselineScript()),
         oldStub(nullptr),
         newStub(nullptr),
         recompInfo(nullptr),
         pcOffset(uint32_t(-1)),
-        frameKind(ICEntry::Kind_Invalid)
+        frameKind(RetAddrEntry::Kind::Invalid)
     { }
 
     DebugModeOSREntry(JSScript* script, uint32_t pcOffset)
       : script(script),
         oldBaselineScript(script->baselineScript()),
         oldStub(nullptr),
         newStub(nullptr),
         recompInfo(nullptr),
         pcOffset(pcOffset),
-        frameKind(ICEntry::Kind_Invalid)
+        frameKind(RetAddrEntry::Kind::Invalid)
     { }
 
-    DebugModeOSREntry(JSScript* script, const ICEntry& icEntry)
+    DebugModeOSREntry(JSScript* script, const RetAddrEntry& retAddrEntry)
       : script(script),
         oldBaselineScript(script->baselineScript()),
         oldStub(nullptr),
         newStub(nullptr),
         recompInfo(nullptr),
-        pcOffset(icEntry.pcOffset()),
-        frameKind(icEntry.kind())
+        pcOffset(retAddrEntry.pcOffset()),
+        frameKind(retAddrEntry.kind())
     {
 #ifdef DEBUG
-        MOZ_ASSERT(pcOffset == icEntry.pcOffset());
-        MOZ_ASSERT(frameKind == icEntry.kind());
+        MOZ_ASSERT(pcOffset == retAddrEntry.pcOffset());
+        MOZ_ASSERT(frameKind == retAddrEntry.kind());
 #endif
     }
 
     DebugModeOSREntry(JSScript* script, BaselineDebugModeOSRInfo* info)
       : script(script),
         oldBaselineScript(script->baselineScript()),
         oldStub(nullptr),
         newStub(nullptr),
@@ -92,23 +92,23 @@ struct DebugModeOSREntry
     ~DebugModeOSREntry() {
         // Note that this is nulled out when the recompInfo is taken by the
         // frame. The frame then has the responsibility of freeing the
         // recompInfo.
         js_delete(recompInfo);
     }
 
     bool needsRecompileInfo() const {
-        return frameKind == ICEntry::Kind_CallVM ||
-               frameKind == ICEntry::Kind_WarmupCounter ||
-               frameKind == ICEntry::Kind_StackCheck ||
-               frameKind == ICEntry::Kind_DebugTrap ||
-               frameKind == ICEntry::Kind_DebugPrologue ||
-               frameKind == ICEntry::Kind_DebugAfterYield ||
-               frameKind == ICEntry::Kind_DebugEpilogue;
+        return frameKind == RetAddrEntry::Kind::CallVM ||
+               frameKind == RetAddrEntry::Kind::WarmupCounter ||
+               frameKind == RetAddrEntry::Kind::StackCheck ||
+               frameKind == RetAddrEntry::Kind::DebugTrap ||
+               frameKind == RetAddrEntry::Kind::DebugPrologue ||
+               frameKind == RetAddrEntry::Kind::DebugAfterYield ||
+               frameKind == RetAddrEntry::Kind::DebugEpilogue;
     }
 
     bool recompiled() const {
         return oldBaselineScript != script->baselineScript();
     }
 
     BaselineDebugModeOSRInfo* takeRecompInfo() {
         MOZ_ASSERT(needsRecompileInfo() && recompInfo);
@@ -123,17 +123,17 @@ struct DebugModeOSREntry
 
         // If we are returning to a frame which needs a continuation fixer,
         // allocate the recompile info up front so that the patching function
         // is infallible.
         jsbytecode* pc = script->offsetToPC(pcOffset);
 
         // XXX: Work around compiler error disallowing using bitfields
         // with the template magic of new_.
-        ICEntry::Kind kind = frameKind;
+        RetAddrEntry::Kind kind = frameKind;
         recompInfo = cx->new_<BaselineDebugModeOSRInfo>(pc, kind);
         return !!recompInfo;
     }
 
     ICFallbackStub* fallbackStub() const {
         MOZ_ASSERT(script);
         MOZ_ASSERT(oldStub);
         return script->baselineScript()->icEntryFromPCOffset(pcOffset).fallbackStub();
@@ -199,34 +199,36 @@ CollectJitStackScripts(JSContext* cx, co
 
             BaselineFrame* baselineFrame = frame.baselineFrame();
 
             if (BaselineDebugModeOSRInfo* info = baselineFrame->getDebugModeOSRInfo()) {
                 // If patching a previously patched yet unpopped frame, we can
                 // use the BaselineDebugModeOSRInfo on the frame directly to
                 // patch. Indeed, we cannot use frame.returnAddressToFp(), as
                 // it points into the debug mode OSR handler and cannot be
-                // used to look up a corresponding ICEntry.
+                // used to look up a corresponding RetAddrEntry.
                 //
                 // See case F in PatchBaselineFramesForDebugMode.
                 if (!entries.append(DebugModeOSREntry(script, info))) {
                     return false;
                 }
             } else if (baselineFrame->hasOverridePc()) {
-                // If the frame is not settled on a pc with an ICEntry, overridePc
-                // will contain an explicit bytecode offset. We can (and must) use that.
+                // If the frame is not settled on a pc with a RetAddrEntry,
+                // overridePc will contain an explicit bytecode offset. We can
+                // (and must) use that.
                 uint32_t offset = script->pcToOffset(baselineFrame->overridePc());
                 if (!entries.append(DebugModeOSREntry(script, offset))) {
                     return false;
                 }
             } else {
-                // The frame must be settled on a pc with an ICEntry.
+                // The frame must be settled on a pc with a RetAddrEntry.
                 uint8_t* retAddr = frame.returnAddressToFp();
-                ICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr);
-                if (!entries.append(DebugModeOSREntry(script, icEntry))) {
+                RetAddrEntry& retAddrEntry =
+                    script->baselineScript()->retAddrEntryFromReturnAddress(retAddr);
+                if (!entries.append(DebugModeOSREntry(script, retAddrEntry))) {
                     return false;
                 }
             }
 
             if (entries.back().needsRecompileInfo()) {
                 if (!entries.back().allocateRecompileInfo(cx)) {
                     return false;
                 }
@@ -292,51 +294,51 @@ CollectInterpreterStackScripts(JSContext
             }
         }
     }
     return true;
 }
 
 #ifdef JS_JITSPEW
 static const char*
-ICEntryKindToString(ICEntry::Kind kind)
+RetAddrEntryKindToString(RetAddrEntry::Kind kind)
 {
     switch (kind) {
-      case ICEntry::Kind_Op:
+      case RetAddrEntry::Kind::IC:
         return "IC";
-      case ICEntry::Kind_NonOp:
+      case RetAddrEntry::Kind::NonOpIC:
         return "non-op IC";
-      case ICEntry::Kind_CallVM:
+      case RetAddrEntry::Kind::CallVM:
         return "callVM";
-      case ICEntry::Kind_WarmupCounter:
+      case RetAddrEntry::Kind::WarmupCounter:
         return "warmup counter";
-      case ICEntry::Kind_StackCheck:
+      case RetAddrEntry::Kind::StackCheck:
         return "stack check";
-      case ICEntry::Kind_DebugTrap:
+      case RetAddrEntry::Kind::DebugTrap:
         return "debug trap";
-      case ICEntry::Kind_DebugPrologue:
+      case RetAddrEntry::Kind::DebugPrologue:
         return "debug prologue";
-      case ICEntry::Kind_DebugAfterYield:
+      case RetAddrEntry::Kind::DebugAfterYield:
         return "debug after yield";
-      case ICEntry::Kind_DebugEpilogue:
+      case RetAddrEntry::Kind::DebugEpilogue:
         return "debug epilogue";
       default:
-        MOZ_CRASH("bad ICEntry kind");
+        MOZ_CRASH("bad RetAddrEntry kind");
     }
 }
 #endif // JS_JITSPEW
 
 static void
-SpewPatchBaselineFrame(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
-                       JSScript* script, ICEntry::Kind frameKind, jsbytecode* pc)
+SpewPatchBaselineFrame(const uint8_t* oldReturnAddress, const uint8_t* newReturnAddress,
+                       JSScript* script, RetAddrEntry::Kind frameKind, const jsbytecode* pc)
 {
     JitSpew(JitSpew_BaselineDebugModeOSR,
             "Patch return %p -> %p on BaselineJS frame (%s:%u:%u) from %s at %s",
             oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
-            script->column(), ICEntryKindToString(frameKind), CodeName[(JSOp)*pc]);
+            script->column(), RetAddrEntryKindToString(frameKind), CodeName[(JSOp)*pc]);
 }
 
 static void
 SpewPatchBaselineFrameFromExceptionHandler(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
                                            JSScript* script, jsbytecode* pc)
 {
     JitSpew(JitSpew_BaselineDebugModeOSR,
             "Patch return %p -> %p on BaselineJS frame (%s:%u:%u) from exception handler at %s",
@@ -412,44 +414,45 @@ PatchBaselineFramesForDebugMode(JSContex
             JSScript* script = entry.script;
             uint32_t pcOffset = entry.pcOffset;
             jsbytecode* pc = script->offsetToPC(pcOffset);
 
             MOZ_ASSERT(script == frame.script());
             MOZ_ASSERT(pcOffset < script->length());
 
             BaselineScript* bl = script->baselineScript();
-            ICEntry::Kind kind = entry.frameKind;
+            RetAddrEntry::Kind kind = entry.frameKind;
 
-            if (kind == ICEntry::Kind_Op) {
+            if (kind == RetAddrEntry::Kind::IC) {
                 // Case A above.
                 //
                 // Patching these cases needs to patch both the stub frame and
                 // the baseline frame. The stub frame is patched below. For
                 // the baseline frame here, we resume right after the IC
                 // returns.
                 //
                 // Since we're using the same IC stub code, we can resume
                 // directly to the IC resume address.
-                uint8_t* retAddr = bl->returnAddressForIC(bl->icEntryFromPCOffset(pcOffset));
+                RetAddrEntry& retAddrEntry = bl->retAddrEntryFromPCOffset(pcOffset, kind);
+                uint8_t* retAddr = bl->returnAddressForEntry(retAddrEntry);
                 SpewPatchBaselineFrame(prev->returnAddress(), retAddr, script, kind, pc);
                 DebugModeOSRVolatileJitFrameIter::forwardLiveIterators(
                     cx, prev->returnAddress(), retAddr);
                 prev->setReturnAddress(retAddr);
                 entryIndex++;
                 break;
             }
 
-            if (kind == ICEntry::Kind_Invalid) {
+            if (kind == RetAddrEntry::Kind::Invalid) {
                 // Cases G and H above.
                 //
                 // We are recompiling a frame with an override pc.
                 // This may occur from inside the exception handler,
                 // by way of an onExceptionUnwind invocation, on a pc
-                // without an ICEntry. It may also happen if we call
+                // without a RetAddrEntry. It may also happen if we call
                 // GeneratorThrowOrReturn and trigger onEnterFrame.
                 //
                 // If profiling is off, patch the resume address to nullptr,
                 // to ensure the old address is not used anywhere.
                 // If profiling is on, JSJitProfilingFrameIterator requires a
                 // valid return address.
                 MOZ_ASSERT(frame.baselineFrame()->overridePc() == pc);
                 uint8_t* retAddr;
@@ -471,113 +474,102 @@ PatchBaselineFramesForDebugMode(JSContex
             //
             // We undo a previous recompile by handling cases B, C, D, E, I or J
             // like normal, except that we retrieve the pc information via
             // the previous OSR debug info stashed on the frame.
             BaselineDebugModeOSRInfo* info = frame.baselineFrame()->getDebugModeOSRInfo();
             if (info) {
                 MOZ_ASSERT(info->pc == pc);
                 MOZ_ASSERT(info->frameKind == kind);
-                MOZ_ASSERT(kind == ICEntry::Kind_CallVM ||
-                           kind == ICEntry::Kind_WarmupCounter ||
-                           kind == ICEntry::Kind_StackCheck ||
-                           kind == ICEntry::Kind_DebugTrap ||
-                           kind == ICEntry::Kind_DebugPrologue ||
-                           kind == ICEntry::Kind_DebugAfterYield ||
-                           kind == ICEntry::Kind_DebugEpilogue);
+                MOZ_ASSERT(kind == RetAddrEntry::Kind::CallVM ||
+                           kind == RetAddrEntry::Kind::WarmupCounter ||
+                           kind == RetAddrEntry::Kind::StackCheck ||
+                           kind == RetAddrEntry::Kind::DebugTrap ||
+                           kind == RetAddrEntry::Kind::DebugPrologue ||
+                           kind == RetAddrEntry::Kind::DebugAfterYield ||
+                           kind == RetAddrEntry::Kind::DebugEpilogue);
 
                 // We will have allocated a new recompile info, so delete the
                 // existing one.
                 frame.baselineFrame()->deleteDebugModeOSRInfo();
             }
 
             // The RecompileInfo must already be allocated so that this
             // function may be infallible.
             BaselineDebugModeOSRInfo* recompInfo = entry.takeRecompInfo();
 
             bool popFrameReg;
             switch (kind) {
-              case ICEntry::Kind_CallVM: {
+              case RetAddrEntry::Kind::CallVM: {
                 // Case B above.
                 //
                 // Patching returns from a VM call. After fixing up the the
                 // continuation for unsynced values (the frame register is
                 // popped by the callVM trampoline), we resume at the
                 // return-from-callVM address. The assumption here is that all
                 // callVMs which can trigger debug mode OSR are the *only*
                 // callVMs generated for their respective pc locations in the
                 // baseline JIT code.
-                ICEntry& callVMEntry = bl->callVMEntryFromPCOffset(pcOffset);
-                recompInfo->resumeAddr = bl->returnAddressForIC(callVMEntry);
+                RetAddrEntry& retAddrEntry = bl->retAddrEntryFromPCOffset(pcOffset, kind);
+                recompInfo->resumeAddr = bl->returnAddressForEntry(retAddrEntry);
                 popFrameReg = false;
                 break;
               }
 
-              case ICEntry::Kind_WarmupCounter: {
-                // Case J above.
+              case RetAddrEntry::Kind::WarmupCounter:
+              case RetAddrEntry::Kind::StackCheck: {
+                // Cases I and J above.
                 //
                 // Patching mechanism is identical to a CallVM. This is
-                // handled especially only because the warmup counter VM call is
-                // part of the prologue, and not tied an opcode.
-                ICEntry& warmupCountEntry = bl->warmupCountICEntry();
-                recompInfo->resumeAddr = bl->returnAddressForIC(warmupCountEntry);
+                // handled especially only because these VM calls are part of
+                // the prologue, and not tied to an opcode.
+                RetAddrEntry& entry = bl->prologueRetAddrEntry(kind);
+                recompInfo->resumeAddr = bl->returnAddressForEntry(entry);
                 popFrameReg = false;
                 break;
               }
 
-              case ICEntry::Kind_StackCheck: {
-                // Case I above.
-                //
-                // Patching mechanism is identical to a CallVM. This is
-                // handled especially only because the stack check VM call is
-                // part of the prologue, and not tied an opcode.
-                ICEntry& stackCheckEntry = bl->stackCheckICEntry();
-                recompInfo->resumeAddr = bl->returnAddressForIC(stackCheckEntry);
-                popFrameReg = false;
-                break;
-              }
-
-              case ICEntry::Kind_DebugTrap:
+              case RetAddrEntry::Kind::DebugTrap:
                 // Case C above.
                 //
                 // Debug traps are emitted before each op, so we resume at the
                 // same op. Calling debug trap handlers is done via a toggled
                 // call to a thunk (DebugTrapHandler) that takes care tearing
                 // down its own stub frame so we don't need to worry about
                 // popping the frame reg.
                 recompInfo->resumeAddr = bl->nativeCodeForPC(script, pc, &recompInfo->slotInfo);
                 popFrameReg = false;
                 break;
 
-              case ICEntry::Kind_DebugPrologue:
+              case RetAddrEntry::Kind::DebugPrologue:
                 // Case D above.
                 //
                 // We patch a jump directly to the right place in the prologue
                 // after popping the frame reg and checking for forced return.
                 recompInfo->resumeAddr = bl->postDebugPrologueAddr();
                 popFrameReg = true;
                 break;
 
-              case ICEntry::Kind_DebugAfterYield:
+              case RetAddrEntry::Kind::DebugAfterYield:
                 // Case K above.
                 //
                 // Resume at the next instruction.
                 MOZ_ASSERT(*pc == JSOP_DEBUGAFTERYIELD);
                 recompInfo->resumeAddr = bl->nativeCodeForPC(script,
                                                              pc + JSOP_DEBUGAFTERYIELD_LENGTH,
                                                              &recompInfo->slotInfo);
                 popFrameReg = true;
                 break;
 
               default:
                 // Case E above.
                 //
                 // We patch a jump directly to the epilogue after popping the
                 // frame reg and checking for forced return.
-                MOZ_ASSERT(kind == ICEntry::Kind_DebugEpilogue);
+                MOZ_ASSERT(kind == RetAddrEntry::Kind::DebugEpilogue);
                 recompInfo->resumeAddr = bl->epilogueEntryAddr();
                 popFrameReg = true;
                 break;
             }
 
             SpewPatchBaselineFrame(prev->returnAddress(), recompInfo->resumeAddr,
                                    script, kind, recompInfo->pc);
 
@@ -742,17 +734,17 @@ CloneOldBaselineStub(JSContext* cx, Debu
 
     // If this script was not recompiled (because it already had the correct
     // debug instrumentation), don't clone to avoid attaching duplicate stubs.
     if (!entry.recompiled()) {
         entry.newStub = nullptr;
         return true;
     }
 
-    if (entry.frameKind == ICEntry::Kind_Invalid) {
+    if (entry.frameKind == RetAddrEntry::Kind::Invalid) {
         // The exception handler can modify the frame's override pc while
         // unwinding scopes. This is fine, but if we have a stub frame, the code
         // code below will get confused: the entry's pcOffset doesn't match the
         // stub that's still on the stack. To prevent that, we just set the new
         // stub to nullptr as we will never return to this stub frame anyway.
         entry.newStub = nullptr;
         return true;
     }
@@ -784,17 +776,17 @@ CloneOldBaselineStub(JSContext* cx, Debu
         entry.newStub = fallbackStub;
         return true;
     }
 
     // Check if we have already cloned the stub on a younger frame. Ignore
     // frames that entered the exception handler (entries[i].newStub is nullptr
     // in that case, see above).
     for (size_t i = 0; i < entryIndex; i++) {
-        if (oldStub == entries[i].oldStub && entries[i].frameKind != ICEntry::Kind_Invalid) {
+        if (oldStub == entries[i].oldStub && entries[i].frameKind != RetAddrEntry::Kind::Invalid) {
             MOZ_ASSERT(entries[i].newStub);
             entry.newStub = entries[i].newStub;
             return true;
         }
     }
 
     ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(oldStub->makesGCCalls(),
                                                               entry.script);
@@ -973,62 +965,63 @@ BaselineDebugModeOSRInfo::popValueInto(P
     }
 
     stackAdjust++;
 }
 
 static inline bool
 HasForcedReturn(BaselineDebugModeOSRInfo* info, bool rv)
 {
-    ICEntry::Kind kind = info->frameKind;
+    RetAddrEntry::Kind kind = info->frameKind;
 
     // The debug epilogue always checks its resumption value, so we don't need
     // to check rv.
-    if (kind == ICEntry::Kind_DebugEpilogue) {
+    if (kind == RetAddrEntry::Kind::DebugEpilogue) {
         return true;
     }
 
     // |rv| is the value in ReturnReg. If true, in the case of the prologue or
     // after yield, it means a forced return.
-    if (kind == ICEntry::Kind_DebugPrologue || kind == ICEntry::Kind_DebugAfterYield) {
+    if (kind == RetAddrEntry::Kind::DebugPrologue || kind == RetAddrEntry::Kind::DebugAfterYield) {
         return rv;
     }
 
     // N.B. The debug trap handler handles its own forced return, so no
     // need to deal with it here.
     return false;
 }
 
 static inline bool
 IsReturningFromCallVM(BaselineDebugModeOSRInfo* info)
 {
     // Keep this in sync with EmitBranchIsReturningFromCallVM.
     //
     // The stack check entries are returns from a callVM, but have a special
     // kind because they do not exist in a 1-1 relationship with a pc offset.
-    return info->frameKind == ICEntry::Kind_CallVM ||
-           info->frameKind == ICEntry::Kind_WarmupCounter ||
-           info->frameKind == ICEntry::Kind_StackCheck;
+    return info->frameKind == RetAddrEntry::Kind::CallVM ||
+           info->frameKind == RetAddrEntry::Kind::WarmupCounter ||
+           info->frameKind == RetAddrEntry::Kind::StackCheck;
 }
 
 static void
-EmitBranchICEntryKind(MacroAssembler& masm, Register entry, ICEntry::Kind kind, Label* label)
+EmitBranchRetAddrEntryKind(MacroAssembler& masm, Register entry, RetAddrEntry::Kind kind,
+                          Label* label)
 {
     masm.branch32(MacroAssembler::Equal,
                   Address(entry, offsetof(BaselineDebugModeOSRInfo, frameKind)),
-                  Imm32(kind), label);
+                  Imm32(uint32_t(kind)), label);
 }
 
 static void
 EmitBranchIsReturningFromCallVM(MacroAssembler& masm, Register entry, Label* label)
 {
     // Keep this in sync with IsReturningFromCallVM.
-    EmitBranchICEntryKind(masm, entry, ICEntry::Kind_CallVM, label);
-    EmitBranchICEntryKind(masm, entry, ICEntry::Kind_WarmupCounter, label);
-    EmitBranchICEntryKind(masm, entry, ICEntry::Kind_StackCheck, label);
+    EmitBranchRetAddrEntryKind(masm, entry, RetAddrEntry::Kind::CallVM, label);
+    EmitBranchRetAddrEntryKind(masm, entry, RetAddrEntry::Kind::WarmupCounter, label);
+    EmitBranchRetAddrEntryKind(masm, entry, RetAddrEntry::Kind::StackCheck, label);
 }
 
 static void
 SyncBaselineDebugModeOSRInfo(BaselineFrame* frame, Value* vp, bool rv)
 {
     AutoUnsafeCallWithABI unsafe;
     BaselineDebugModeOSRInfo* info = frame->debugModeOSRInfo();
     MOZ_ASSERT(info);
--- a/js/src/jit/BaselineDebugModeOSR.h
+++ b/js/src/jit/BaselineDebugModeOSR.h
@@ -101,24 +101,24 @@ class DebugModeOSRVolatileJitFrameIter :
 //
 // Auxiliary info to help the DebugModeOSRHandler fix up state.
 //
 struct BaselineDebugModeOSRInfo
 {
     uint8_t* resumeAddr;
     jsbytecode* pc;
     PCMappingSlotInfo slotInfo;
-    ICEntry::Kind frameKind;
+    RetAddrEntry::Kind frameKind;
 
     // Filled in by SyncBaselineDebugModeOSRInfo.
     uintptr_t stackAdjust;
     Value valueR0;
     Value valueR1;
 
-    BaselineDebugModeOSRInfo(jsbytecode* pc, ICEntry::Kind kind)
+    BaselineDebugModeOSRInfo(jsbytecode* pc, RetAddrEntry::Kind kind)
       : resumeAddr(nullptr),
         pc(pc),
         slotInfo(0),
         frameKind(kind),
         stackAdjust(0),
         valueR0(UndefinedValue()),
         valueR1(UndefinedValue())
     { }
--- a/js/src/jit/BaselineFrame.cpp
+++ b/js/src/jit/BaselineFrame.cpp
@@ -145,22 +145,23 @@ BaselineFrame::initForOsr(InterpreterFra
     if (fp->isDebuggee()) {
         JSContext* cx = TlsContext.get();
 
         // For debuggee frames, update any Debugger.Frame objects for the
         // InterpreterFrame to point to the BaselineFrame.
 
         // The caller pushed a fake return address. ScriptFrameIter, used by the
         // debugger, wants a valid return address, but it's okay to just pick one.
-        // In debug mode there's always at least 1 ICEntry (since there are always
-        // debug prologue/epilogue calls).
+        // In debug mode there's always at least one RetAddrEntry (since there are
+        // always debug prologue/epilogue calls).
         JSJitFrameIter frame(cx->activation()->asJit());
         MOZ_ASSERT(frame.returnAddress() == nullptr);
         BaselineScript* baseline = fp->script()->baselineScript();
-        frame.current()->setReturnAddress(baseline->returnAddressForIC(baseline->icEntry(0)));
+        uint8_t* retAddr = baseline->returnAddressForEntry(baseline->retAddrEntry(0));
+        frame.current()->setReturnAddress(retAddr);
 
         if (!Debugger::handleBaselineOsr(cx, fp, this)) {
             return false;
         }
 
         setIsDebuggee();
     }
 
--- a/js/src/jit/BaselineFrame.h
+++ b/js/src/jit/BaselineFrame.h
@@ -53,25 +53,26 @@ class BaselineFrame
         // Frame has over-recursed on an early check.
         OVER_RECURSED    = 1 << 9,
 
         // Frame has a BaselineRecompileInfo stashed in the scratch value
         // slot. See PatchBaselineFramesForDebugMode.
         HAS_DEBUG_MODE_OSR_INFO = 1 << 10,
 
         // This flag is intended for use whenever the frame is settled on a
-        // native code address without a corresponding ICEntry. In this case,
-        // the frame contains an explicit bytecode offset for frame iterators.
+        // native code address without a corresponding RetAddrEntry. In this
+        // case, the frame contains an explicit bytecode offset for frame
+        // iterators.
         //
         // There can also be an override pc if the frame has had its
         // environment chain unwound to a pc during exception handling that is
         // different from its current pc.
         //
         // This flag should never be set on the top frame while we're
-        // executing JIT code. In debug mode, it is checked before and
+        // executing JIT code. In debug builds, it is checked before and
         // after VM calls.
         HAS_OVERRIDE_PC = 1 << 11,
 
         // If set, we're handling an exception for this frame. This is set for
         // debug mode OSR sanity checking when it handles corner cases which
         // only arise during exception handling.
         HANDLING_EXCEPTION = 1 << 12,
     };
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -113,19 +113,16 @@ ICFallbackStub*
 ICEntry::fallbackStub() const
 {
     return firstStub()->getChainFallback();
 }
 
 void
 ICEntry::trace(JSTracer* trc)
 {
-    if (!hasStub()) {
-        return;
-    }
     for (ICStub* stub = firstStub(); stub; stub = stub->next()) {
         stub->trace(trc);
     }
 }
 
 ICStubConstIterator&
 ICStubConstIterator::operator++()
 {
--- a/js/src/jit/BaselineIC.h
+++ b/js/src/jit/BaselineIC.h
@@ -209,142 +209,68 @@ void FallbackICSpew(JSContext* cx, ICFal
     MOZ_FORMAT_PRINTF(3, 4);
 void TypeFallbackICSpew(JSContext* cx, ICTypeMonitor_Fallback* stub, const char* fmt, ...)
     MOZ_FORMAT_PRINTF(3, 4);
 #else
 #define FallbackICSpew(...)
 #define TypeFallbackICSpew(...)
 #endif
 
-//
-// An entry in the JIT IC descriptor table.
-//
+// An entry in the BaselineScript IC descriptor table. There's one ICEntry per
+// IC.
 class ICEntry
 {
-  private:
-    // A pointer to the shared IC stub for this instruction.
+    // A pointer to the first IC stub for this instruction.
     ICStub* firstStub_;
 
-    // Offset from the start of the JIT code where the IC
-    // load and call instructions are.
-    uint32_t returnOffset_;
-
     // The PC of this IC's bytecode op within the JSScript.
-    uint32_t pcOffset_ : 28;
+    uint32_t pcOffset_ : 31;
+    uint32_t isForOp_ : 1;
 
   public:
-    enum Kind {
-        // A for-op IC entry.
-        Kind_Op = 0,
-
-        // A non-op IC entry.
-        Kind_NonOp,
-
-        // A fake IC entry for returning from a callVM for an op.
-        Kind_CallVM,
-
-        // A fake IC entry for returning from a callVM not for an op (e.g., in
-        // the prologue).
-        Kind_NonOpCallVM,
-
-        // A fake IC entry for returning from a callVM to after the
-        // warmup counter.
-        Kind_WarmupCounter,
-
-        // A fake IC entry for returning from a callVM to the interrupt
-        // handler via the over-recursion check on function entry.
-        Kind_StackCheck,
-
-        // A fake IC entry for returning from DebugTrapHandler.
-        Kind_DebugTrap,
-
-        // A fake IC entry for returning from a callVM to
-        // Debug{Prologue,AfterYield,Epilogue}.
-        Kind_DebugPrologue,
-        Kind_DebugAfterYield,
-        Kind_DebugEpilogue,
-
-        Kind_Invalid
-    };
-
-  private:
-    // What this IC is for.
-    Kind kind_ : 4;
-
-    // Set the kind and asserts that it's sane.
-    void setKind(Kind kind) {
-        MOZ_ASSERT(kind < Kind_Invalid);
-        kind_ = kind;
-        MOZ_ASSERT(this->kind() == kind);
+    ICEntry(ICStub* firstStub, uint32_t pcOffset, bool isForOp)
+      : firstStub_(firstStub), pcOffset_(pcOffset), isForOp_(uint32_t(isForOp))
+    {
+        // The offset must fit in at least 31 bits, since we shave off 1 for
+        // the isForOp_ flag.
+        MOZ_ASSERT(pcOffset_ == pcOffset);
+        JS_STATIC_ASSERT(BaselineMaxScriptLength <= (1u << 31) - 1);
+        MOZ_ASSERT(pcOffset <= BaselineMaxScriptLength);
     }
 
-  public:
-    ICEntry(uint32_t pcOffset, Kind kind)
-      : firstStub_(nullptr), returnOffset_(), pcOffset_(pcOffset)
-    {
-        // The offset must fit in at least 28 bits, since we shave off 4 for
-        // the Kind enum.
-        MOZ_ASSERT(pcOffset_ == pcOffset);
-        JS_STATIC_ASSERT(BaselineScript::MAX_JSSCRIPT_LENGTH <= (1u << 28) - 1);
-        MOZ_ASSERT(pcOffset <= BaselineScript::MAX_JSSCRIPT_LENGTH);
-        setKind(kind);
-    }
-
-    CodeOffset returnOffset() const {
-        return CodeOffset(returnOffset_);
-    }
-
-    void setReturnOffset(CodeOffset offset) {
-        MOZ_ASSERT(offset.offset() <= (size_t) UINT32_MAX);
-        returnOffset_ = (uint32_t) offset.offset();
-    }
-
-    uint32_t pcOffset() const {
-        return pcOffset_;
-    }
-
-    jsbytecode* pc(JSScript* script) const {
-        return script->offsetToPC(pcOffset_);
-    }
-
-    Kind kind() const {
-        // MSVC compiles enums as signed.
-        return Kind(kind_ & 0xf);
-    }
-    bool isForOp() const {
-        return kind() == Kind_Op;
-    }
-
-    void setFakeKind(Kind kind) {
-        MOZ_ASSERT(kind != Kind_Op && kind != Kind_NonOp);
-        setKind(kind);
-    }
-
-    bool hasStub() const {
-        return firstStub_ != nullptr;
-    }
     ICStub* firstStub() const {
-        MOZ_ASSERT(hasStub());
+        MOZ_ASSERT(firstStub_);
         return firstStub_;
     }
 
     ICFallbackStub* fallbackStub() const;
 
     void setFirstStub(ICStub* stub) {
         firstStub_ = stub;
     }
 
+    uint32_t pcOffset() const {
+        return pcOffset_;
+    }
+    jsbytecode* pc(JSScript* script) const {
+        return script->offsetToPC(pcOffset_);
+    }
+
     static inline size_t offsetOfFirstStub() {
         return offsetof(ICEntry, firstStub_);
     }
 
     inline ICStub** addressOfFirstStub() {
         return &firstStub_;
     }
 
+    bool isForOp() const {
+        return !!isForOp_;
+    }
+
     void trace(JSTracer* trc);
 };
 
 class ICMonitoredStub;
 class ICMonitoredFallbackStub;
 class ICUpdatedStub;
 
 // Constant iterator that traverses arbitrary chains of ICStubs.
--- a/js/src/jit/BaselineJIT.cpp
+++ b/js/src/jit/BaselineJIT.cpp
@@ -287,21 +287,21 @@ CanEnterBaselineJIT(JSContext* cx, Handl
 {
     MOZ_ASSERT(jit::IsBaselineEnabled(cx));
 
     // Skip if the script has been disabled.
     if (!script->canBaselineCompile()) {
         return Method_Skipped;
     }
 
-    if (script->length() > BaselineScript::MAX_JSSCRIPT_LENGTH) {
+    if (script->length() > BaselineMaxScriptLength) {
         return Method_CantCompile;
     }
 
-    if (script->nslots() > BaselineScript::MAX_JSSCRIPT_SLOTS) {
+    if (script->nslots() > BaselineMaxScriptSlots) {
         return Method_CantCompile;
     }
 
     if (script->hasBaselineScript()) {
         return Method_Compiled;
     }
 
     // Check this before calling ensureJitRealmExists, so we're less
@@ -382,37 +382,41 @@ jit::CanEnterBaselineMethod(JSContext* c
 
 BaselineScript*
 BaselineScript::New(JSScript* jsscript,
                     uint32_t prologueOffset, uint32_t epilogueOffset,
                     uint32_t profilerEnterToggleOffset,
                     uint32_t profilerExitToggleOffset,
                     uint32_t postDebugPrologueOffset,
                     size_t icEntries,
+                    size_t retAddrEntries,
                     size_t pcMappingIndexEntries, size_t pcMappingSize,
                     size_t bytecodeTypeMapEntries,
                     size_t yieldEntries,
                     size_t traceLoggerToggleOffsetEntries)
 {
     static const unsigned DataAlignment = sizeof(uintptr_t);
 
     size_t icEntriesSize = icEntries * sizeof(ICEntry);
+    size_t retAddrEntriesSize = retAddrEntries * sizeof(RetAddrEntry);
     size_t pcMappingIndexEntriesSize = pcMappingIndexEntries * sizeof(PCMappingIndexEntry);
     size_t bytecodeTypeMapSize = bytecodeTypeMapEntries * sizeof(uint32_t);
     size_t yieldEntriesSize = yieldEntries * sizeof(uintptr_t);
     size_t tlEntriesSize = traceLoggerToggleOffsetEntries * sizeof(uint32_t);
 
     size_t paddedICEntriesSize = AlignBytes(icEntriesSize, DataAlignment);
+    size_t paddedRetAddrEntriesSize = AlignBytes(retAddrEntriesSize, DataAlignment);
     size_t paddedPCMappingIndexEntriesSize = AlignBytes(pcMappingIndexEntriesSize, DataAlignment);
     size_t paddedPCMappingSize = AlignBytes(pcMappingSize, DataAlignment);
     size_t paddedBytecodeTypesMapSize = AlignBytes(bytecodeTypeMapSize, DataAlignment);
     size_t paddedYieldEntriesSize = AlignBytes(yieldEntriesSize, DataAlignment);
     size_t paddedTLEntriesSize = AlignBytes(tlEntriesSize, DataAlignment);
 
     size_t allocBytes = paddedICEntriesSize +
+                        paddedRetAddrEntriesSize +
                         paddedPCMappingIndexEntriesSize +
                         paddedPCMappingSize +
                         paddedBytecodeTypesMapSize +
                         paddedYieldEntriesSize +
                         paddedTLEntriesSize;
 
     BaselineScript* script = jsscript->zone()->pod_malloc_with_extra<BaselineScript, uint8_t>(allocBytes);
     if (!script) {
@@ -424,16 +428,20 @@ BaselineScript::New(JSScript* jsscript,
 
     size_t offsetCursor = sizeof(BaselineScript);
     MOZ_ASSERT(offsetCursor == AlignBytes(sizeof(BaselineScript), DataAlignment));
 
     script->icEntriesOffset_ = offsetCursor;
     script->icEntries_ = icEntries;
     offsetCursor += paddedICEntriesSize;
 
+    script->retAddrEntriesOffset_ = offsetCursor;
+    script->retAddrEntries_ = retAddrEntries;
+    offsetCursor += paddedRetAddrEntriesSize;
+
     script->pcMappingIndexOffset_ = offsetCursor;
     script->pcMappingIndexEntries_ = pcMappingIndexEntries;
     offsetCursor += paddedPCMappingIndexEntriesSize;
 
     script->pcMappingOffset_ = offsetCursor;
     script->pcMappingSize_ = pcMappingSize;
     offsetCursor += paddedPCMappingSize;
 
@@ -560,16 +568,23 @@ BaselineScript::removeDependentWasmImpor
 
 ICEntry&
 BaselineScript::icEntry(size_t index)
 {
     MOZ_ASSERT(index < numICEntries());
     return icEntryList()[index];
 }
 
+RetAddrEntry&
+BaselineScript::retAddrEntry(size_t index)
+{
+    MOZ_ASSERT(index < numRetAddrEntries());
+    return retAddrEntryList()[index];
+}
+
 PCMappingIndexEntry&
 BaselineScript::pcMappingIndexEntry(size_t index)
 {
     MOZ_ASSERT(index < numPCMappingIndexEntries());
     return pcMappingIndexEntryList()[index];
 }
 
 CompactBufferReader
@@ -582,99 +597,125 @@ BaselineScript::pcMappingReader(size_t i
         ? pcMappingData() + pcMappingSize_
         : pcMappingData() + pcMappingIndexEntry(indexEntry + 1).bufferOffset;
 
     return CompactBufferReader(dataStart, dataEnd);
 }
 
 struct ICEntries
 {
+    using EntryT = ICEntry;
+
     BaselineScript* const baseline_;
 
     explicit ICEntries(BaselineScript* baseline) : baseline_(baseline) {}
 
+    size_t numEntries() const {
+        return baseline_->numICEntries();
+    }
     ICEntry& operator[](size_t index) const {
         return baseline_->icEntry(index);
     }
 };
 
-ICEntry&
-BaselineScript::icEntryFromReturnOffset(CodeOffset returnOffset)
+struct RetAddrEntries
+{
+    using EntryT = RetAddrEntry;
+
+    BaselineScript* const baseline_;
+
+    explicit RetAddrEntries(BaselineScript* baseline) : baseline_(baseline) {}
+
+    size_t numEntries() const {
+        return baseline_->numRetAddrEntries();
+    }
+    RetAddrEntry& operator[](size_t index) const {
+        return baseline_->retAddrEntry(index);
+    }
+};
+
+RetAddrEntry&
+BaselineScript::retAddrEntryFromReturnOffset(CodeOffset returnOffset)
 {
     size_t loc;
 #ifdef DEBUG
     bool found =
 #endif
-        BinarySearchIf(ICEntries(this), 0, numICEntries(),
-                       [&returnOffset](ICEntry& entry) {
+        BinarySearchIf(RetAddrEntries(this), 0, numRetAddrEntries(),
+                       [&returnOffset](const RetAddrEntry& entry) {
                            size_t roffset = returnOffset.offset();
                            size_t entryRoffset = entry.returnOffset().offset();
                            if (roffset < entryRoffset) {
                                return -1;
                            }
                            if (entryRoffset < roffset) {
                                return 1;
                            }
                            return 0;
                        },
                        &loc);
 
     MOZ_ASSERT(found);
-    MOZ_ASSERT(loc < numICEntries());
-    MOZ_ASSERT(icEntry(loc).returnOffset().offset() == returnOffset.offset());
-    return icEntry(loc);
+    MOZ_ASSERT(loc < numRetAddrEntries());
+    MOZ_ASSERT(retAddrEntry(loc).returnOffset().offset() == returnOffset.offset());
+    return retAddrEntry(loc);
 }
 
+template <typename Entries>
 static inline bool
 ComputeBinarySearchMid(BaselineScript* baseline, uint32_t pcOffset, size_t* loc)
 {
-    return BinarySearchIf(ICEntries(baseline), 0, baseline->numICEntries(),
-                          [pcOffset](ICEntry& entry) {
+    Entries entries(baseline);
+    return BinarySearchIf(entries, 0, entries.numEntries(),
+                          [pcOffset](typename Entries::EntryT& entry) {
                               uint32_t entryOffset = entry.pcOffset();
                               if (pcOffset < entryOffset) {
                                   return -1;
                               }
                               if (entryOffset < pcOffset) {
                                   return 1;
                               }
                               return 0;
                           },
                           loc);
 }
 
 uint8_t*
-BaselineScript::returnAddressForIC(const ICEntry& ent)
+BaselineScript::returnAddressForEntry(const RetAddrEntry& ent)
 {
     return method()->raw() + ent.returnOffset().offset();
 }
 
 ICEntry*
 BaselineScript::maybeICEntryFromPCOffset(uint32_t pcOffset)
 {
     // Multiple IC entries can have the same PC offset, but this method only looks for
     // those which have isForOp() set.
     size_t mid;
-    if (!ComputeBinarySearchMid(this, pcOffset, &mid)) {
+    if (!ComputeBinarySearchMid<ICEntries>(this, pcOffset, &mid)) {
         return nullptr;
     }
 
     MOZ_ASSERT(mid < numICEntries());
 
     // Found an IC entry with a matching PC offset.  Search backward, and then
     // forward from this IC entry, looking for one with the same PC offset which
     // has isForOp() set.
     for (size_t i = mid; icEntry(i).pcOffset() == pcOffset; i--) {
         if (icEntry(i).isForOp()) {
             return &icEntry(i);
         }
         if (i == 0) {
             break;
         }
     }
-    for (size_t i = mid+1; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i++) {
+    for (size_t i = mid + 1; i < numICEntries(); i++) {
+        if (icEntry(i).pcOffset() != pcOffset) {
+            break;
+        }
         if (icEntry(i).isForOp()) {
             return &icEntry(i);
         }
     }
     return nullptr;
 }
 
 ICEntry&
@@ -711,76 +752,68 @@ BaselineScript::maybeICEntryFromPCOffset
 ICEntry&
 BaselineScript::icEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry)
 {
     ICEntry* entry = maybeICEntryFromPCOffset(pcOffset, prevLookedUpEntry);
     MOZ_RELEASE_ASSERT(entry);
     return *entry;
 }
 
-ICEntry&
-BaselineScript::callVMEntryFromPCOffset(uint32_t pcOffset)
+RetAddrEntry&
+BaselineScript::retAddrEntryFromPCOffset(uint32_t pcOffset, RetAddrEntry::Kind kind)
 {
-    // Like icEntryFromPCOffset, but only looks for the fake ICEntries
-    // inserted by VM calls.
     size_t mid;
-    MOZ_ALWAYS_TRUE(ComputeBinarySearchMid(this, pcOffset, &mid));
-    MOZ_ASSERT(mid < numICEntries());
+    MOZ_ALWAYS_TRUE(ComputeBinarySearchMid<RetAddrEntries>(this, pcOffset, &mid));
+    MOZ_ASSERT(mid < numRetAddrEntries());
 
-    for (size_t i = mid; icEntry(i).pcOffset() == pcOffset; i--) {
-        if (icEntry(i).kind() == ICEntry::Kind_CallVM) {
-            return icEntry(i);
+    for (size_t i = mid; retAddrEntry(i).pcOffset() == pcOffset; i--) {
+        if (retAddrEntry(i).kind() == kind) {
+            return retAddrEntry(i);
         }
         if (i == 0) {
             break;
         }
     }
-    for (size_t i = mid+1; i < numICEntries() && icEntry(i).pcOffset() == pcOffset; i++) {
-        if (icEntry(i).kind() == ICEntry::Kind_CallVM) {
-            return icEntry(i);
+    for (size_t i = mid + 1; i < numRetAddrEntries(); i++) {
+        if (retAddrEntry(i).pcOffset() != pcOffset) {
+            break;
+        }
+        if (retAddrEntry(i).kind() == kind) {
+            return retAddrEntry(i);
         }
     }
-    MOZ_CRASH("Invalid PC offset for callVM entry.");
+    MOZ_CRASH("Didn't find RetAddrEntry.");
 }
 
-ICEntry&
-BaselineScript::stackCheckICEntry()
+RetAddrEntry&
+BaselineScript::prologueRetAddrEntry(RetAddrEntry::Kind kind)
 {
-    // The stack check will always be at offset 0, so just do a linear search
-    // from the beginning. This is only needed for debug mode OSR, when
-    // patching a frame that has invoked a Debugger hook via the interrupt
-    // handler via the stack check, which is part of the prologue.
-    for (size_t i = 0; i < numICEntries() && icEntry(i).pcOffset() == 0; i++) {
-        if (icEntry(i).kind() == ICEntry::Kind_StackCheck) {
-            return icEntry(i);
+    MOZ_ASSERT(kind == RetAddrEntry::Kind::StackCheck ||
+               kind == RetAddrEntry::Kind::WarmupCounter);
+
+    // The prologue entries will always be at a very low offset, so just do a
+    // linear search from the beginning.
+    for (size_t i = 0; i < numRetAddrEntries(); i++) {
+        if (retAddrEntry(i).pcOffset() != 0) {
+            break;
+        }
+        if (retAddrEntry(i).kind() == kind) {
+            return retAddrEntry(i);
         }
     }
-    MOZ_CRASH("No stack check ICEntry found.");
+    MOZ_CRASH("Didn't find prologue RetAddrEntry.");
 }
 
-ICEntry&
-BaselineScript::warmupCountICEntry()
-{
-    // The stack check will be at a very low offset, so just do a linear search
-    // from the beginning.
-    for (size_t i = 0; i < numICEntries() && icEntry(i).pcOffset() == 0; i++) {
-        if (icEntry(i).kind() == ICEntry::Kind_WarmupCounter) {
-            return icEntry(i);
-        }
-    }
-    MOZ_CRASH("No warmup count ICEntry found.");
-}
-
-ICEntry&
-BaselineScript::icEntryFromReturnAddress(uint8_t* returnAddr)
+RetAddrEntry&
+BaselineScript::retAddrEntryFromReturnAddress(uint8_t* returnAddr)
 {
     MOZ_ASSERT(returnAddr > method_->raw());
     MOZ_ASSERT(returnAddr < method_->raw() + method_->instructionsSize());
     CodeOffset offset(returnAddr - method_->raw());
-    return icEntryFromReturnOffset(offset);
+    return retAddrEntryFromReturnOffset(offset);
 }
 
 void
 BaselineScript::copyYieldAndAwaitEntries(JSScript* script, Vector<uint32_t>& yieldAndAwaitOffsets)
 {
     uint8_t** entries = yieldEntryList();
 
     for (size_t i = 0; i < yieldAndAwaitOffsets.length(); i++) {
@@ -793,21 +826,16 @@ void
 BaselineScript::copyICEntries(JSScript* script, const ICEntry* entries)
 {
     // Fix up the return offset in the IC entries and copy them in.
     // Also write out the IC entry ptrs in any fallback stubs that were added.
     for (uint32_t i = 0; i < numICEntries(); i++) {
         ICEntry& realEntry = icEntry(i);
         realEntry = entries[i];
 
-        if (!realEntry.hasStub()) {
-            // VM call without any stubs.
-            continue;
-        }
-
         // If the attached stub is a fallback stub, then fix it up with
         // a pointer to the (now available) realEntry.
         if (realEntry.firstStub()->isFallback()) {
             realEntry.firstStub()->toFallbackStub()->fixupICEntry(&realEntry);
         }
 
         if (realEntry.firstStub()->isTypeMonitor_Fallback()) {
             ICTypeMonitor_Fallback* stub = realEntry.firstStub()->toTypeMonitor_Fallback();
@@ -817,16 +845,24 @@ BaselineScript::copyICEntries(JSScript* 
         if (realEntry.firstStub()->isTableSwitch()) {
             ICTableSwitch* stub = realEntry.firstStub()->toTableSwitch();
             stub->fixupJumpTable(script, this);
         }
     }
 }
 
 void
+BaselineScript::copyRetAddrEntries(JSScript* script, const RetAddrEntry* entries)
+{
+    for (uint32_t i = 0; i < numRetAddrEntries(); i++) {
+        retAddrEntry(i) = entries[i];
+    }
+}
+
+void
 BaselineScript::adoptFallbackStubs(FallbackICStubSpace* stubSpace)
 {
     fallbackStubSpace_.adoptFrom(stubSpace);
 }
 
 void
 BaselineScript::copyPCMappingEntries(const CompactBufferWriter& entries)
 {
@@ -1104,20 +1140,16 @@ BaselineScript::toggleProfilerInstrument
 
 void
 BaselineScript::purgeOptimizedStubs(Zone* zone)
 {
     JitSpew(JitSpew_BaselineIC, "Purging optimized stubs");
 
     for (size_t i = 0; i < numICEntries(); i++) {
         ICEntry& entry = icEntry(i);
-        if (!entry.hasStub()) {
-            continue;
-        }
-
         ICStub* lastStub = entry.firstStub();
         while (lastStub->next()) {
             lastStub = lastStub->next();
         }
 
         if (lastStub->isFallback()) {
             // Unlink all stubs allocated in the optimized space.
             ICStub* stub = entry.firstStub();
@@ -1149,20 +1181,16 @@ BaselineScript::purgeOptimizedStubs(Zone
             MOZ_ASSERT(lastStub->isTableSwitch());
         }
     }
 
 #ifdef DEBUG
     // All remaining stubs must be allocated in the fallback space.
     for (size_t i = 0; i < numICEntries(); i++) {
         ICEntry& entry = icEntry(i);
-        if (!entry.hasStub()) {
-            continue;
-        }
-
         ICStub* stub = entry.firstStub();
         while (stub->next()) {
             MOZ_ASSERT(stub->allocatedInFallbackSpace());
             stub = stub->next();
         }
     }
 #endif
 }
@@ -1191,19 +1219,16 @@ DumpICInfo(JSScript* script)
     Fprinter& out = JitSpewPrinter();
 
     const char* filename = script->filename() ? script->filename() : "unknown";
     out.printf("Dumping IC info for %s:%d\n", filename,
             PCToLineNumber(script, script->code()));
 
     for (size_t i = 0; i < blScript->numICEntries(); i++) {
         ICEntry& entry = blScript->icEntry(i);
-        if (!entry.hasStub()) {
-            continue;
-        }
 
         unsigned column;
         jsbytecode* pc = entry.pc(script);
         unsigned int line = PCToLineNumber(script, pc, &column);
         out.printf("\t%s:%u:%u (%s) \t", filename, line, column, CodeName[*pc]);
 
         ICStub* stub = entry.firstStub();
         while (stub) {
--- a/js/src/jit/BaselineJIT.h
+++ b/js/src/jit/BaselineJIT.h
@@ -20,16 +20,17 @@
 
 namespace js {
 namespace jit {
 
 class StackValue;
 class ICEntry;
 class ICStub;
 class ControlFlowGraph;
+class ReturnAddressEntry;
 
 class PCMappingSlotInfo
 {
     uint8_t slotInfo_;
 
   public:
     // SlotInfo encoding:
     //  Bits 0 & 1: number of slots at top of stack which are unsynced.
@@ -103,34 +104,138 @@ struct DependentWasmImport
     size_t importIndex;
 
     DependentWasmImport(wasm::Instance& instance, size_t importIndex)
       : instance(&instance),
         importIndex(importIndex)
     { }
 };
 
+// Largest script that the baseline compiler will attempt to compile.
+#if defined(JS_CODEGEN_ARM)
+// ARM branches can only reach 32MB, and the macroassembler doesn't mitigate
+// that limitation. Use a stricter limit on the acceptable script size to
+// avoid crashing when branches go out of range.
+static constexpr uint32_t BaselineMaxScriptLength = 1000000u;
+#else
+static constexpr uint32_t BaselineMaxScriptLength = 0x0fffffffu;
+#endif
+
+// Limit the locals on a given script so that stack check on baseline frames
+// doesn't overflow a uint32_t value.
+// (MAX_JSSCRIPT_SLOTS * sizeof(Value)) must fit within a uint32_t.
+static constexpr uint32_t BaselineMaxScriptSlots = 0xffffu;
+
+// An entry in the BaselineScript return address table. These entries are used
+// to determine the bytecode pc for a return address into Baseline code.
+//
+// There must be an entry for each location where we can end up calling into
+// C++ (directly or via script/trampolines) and C++ can request the current
+// bytecode pc (this includes anything that may throw an exception, GC, or walk
+// the stack). We currently add entries for each:
+//
+// * callVM
+// * IC
+// * DebugTrap (trampoline call)
+// * JSOP_RESUME (because this is like a scripted call)
+//
+// Note: see also BaselineFrame::HAS_OVERRIDE_PC.
+class RetAddrEntry
+{
+    // Offset from the start of the JIT code where call instruction is.
+    uint32_t returnOffset_;
+
+    // The offset of this bytecode op within the JSScript.
+    uint32_t pcOffset_ : 28;
+
+  public:
+    enum class Kind : uint32_t {
+        // A for-op IC.
+        IC,
+
+        // A non-op IC.
+        NonOpIC,
+
+        // A callVM for an op.
+        CallVM,
+
+        // A callVM not for an op (e.g., in the prologue).
+        NonOpCallVM,
+
+        // A callVM for the warmup counter.
+        WarmupCounter,
+
+        // A callVM for the over-recursion check on function entry.
+        StackCheck,
+
+        // DebugTrapHandler (for debugger breakpoints/stepping).
+        DebugTrap,
+
+        // A callVM for Debug{Prologue,AfterYield,Epilogue}.
+        DebugPrologue,
+        DebugAfterYield,
+        DebugEpilogue,
+
+        Invalid
+    };
+
+  private:
+    // What this entry is for.
+    uint32_t kind_ : 4;
+
+  public:
+    RetAddrEntry(uint32_t pcOffset, Kind kind, CodeOffset retOffset)
+      : returnOffset_(uint32_t(retOffset.offset())), pcOffset_(pcOffset)
+    {
+        MOZ_ASSERT(returnOffset_ == retOffset.offset(),
+                   "retOffset must fit in returnOffset_");
+
+        // The pc offset must fit in at least 28 bits, since we shave off 4 for
+        // the Kind enum.
+        MOZ_ASSERT(pcOffset_ == pcOffset);
+        JS_STATIC_ASSERT(BaselineMaxScriptLength <= (1u << 28) - 1);
+        MOZ_ASSERT(pcOffset <= BaselineMaxScriptLength);
+        setKind(kind);
+    }
+
+    // Set the kind and asserts that it's sane.
+    void setKind(Kind kind) {
+        MOZ_ASSERT(kind < Kind::Invalid);
+        kind_ = uint32_t(kind);
+        MOZ_ASSERT(this->kind() == kind);
+    }
+
+    CodeOffset returnOffset() const {
+        return CodeOffset(returnOffset_);
+    }
+
+    uint32_t pcOffset() const {
+        return pcOffset_;
+    }
+
+    jsbytecode* pc(JSScript* script) const {
+        return script->offsetToPC(pcOffset_);
+    }
+
+    Kind kind() const {
+        MOZ_ASSERT(kind_ < uint32_t(Kind::Invalid));
+        return Kind(kind_);
+    }
+    bool isForOp() const {
+        return kind() == Kind::IC;
+    }
+
+    void setNonICKind(Kind kind) {
+        MOZ_ASSERT(kind != Kind::IC && kind != Kind::NonOpIC);
+        setKind(kind);
+    }
+};
+
 struct BaselineScript
 {
-  public:
-    // Largest script that the baseline compiler will attempt to compile.
-#if defined(JS_CODEGEN_ARM)
-    // ARM branches can only reach 32MB, and the macroassembler doesn't mitigate
-    // that limitation. Use a stricter limit on the acceptable script size to
-    // avoid crashing when branches go out of range.
-    static const uint32_t MAX_JSSCRIPT_LENGTH = 1000000u;
-#else
-    static const uint32_t MAX_JSSCRIPT_LENGTH = 0x0fffffffu;
-#endif
-
-    // Limit the locals on a given script so that stack check on baseline frames
-    // doesn't overflow a uint32_t value.
-    // (MAX_JSSCRIPT_SLOTS * sizeof(Value)) must fit within a uint32_t.
-    static const uint32_t MAX_JSSCRIPT_SLOTS = 0xffffu;
-
   private:
     // Code pointer containing the actual method.
     HeapPtr<JitCode*> method_;
 
     // For functions with a call object, template objects to use for the call
     // object and decl env object (linked via the call object's enclosing
     // scope).
     HeapPtr<EnvironmentObject*> templateEnv_;
@@ -206,16 +311,19 @@ struct BaselineScript
     uint32_t flags_;
 
   private:
     void trace(JSTracer* trc);
 
     uint32_t icEntriesOffset_;
     uint32_t icEntries_;
 
+    uint32_t retAddrEntriesOffset_;
+    uint32_t retAddrEntries_;
+
     uint32_t pcMappingIndexOffset_;
     uint32_t pcMappingIndexEntries_;
 
     uint32_t pcMappingOffset_;
     uint32_t pcMappingSize_;
 
     // List mapping indexes of bytecode type sets to the offset of the opcode
     // they correspond to, for use by TypeScript::BytecodeTypes.
@@ -261,16 +369,17 @@ struct BaselineScript
     }
 
     static BaselineScript* New(JSScript* jsscript,
                                uint32_t prologueOffset, uint32_t epilogueOffset,
                                uint32_t profilerEnterToggleOffset,
                                uint32_t profilerExitToggleOffset,
                                uint32_t postDebugPrologueOffset,
                                size_t icEntries,
+                               size_t retAddrEntries,
                                size_t pcMappingIndexEntries, size_t pcMappingSize,
                                size_t bytecodeTypeMapEntries,
                                size_t yieldEntries,
                                size_t traceLoggerToggleOffsetEntries);
 
     static void Trace(JSTracer* trc, BaselineScript* script);
     static void Destroy(FreeOp* fop, BaselineScript* script);
 
@@ -353,16 +462,19 @@ struct BaselineScript
     }
     uint8_t* postDebugPrologueAddr() const {
         return method_->raw() + postDebugPrologueOffset_;
     }
 
     ICEntry* icEntryList() {
         return (ICEntry*)(reinterpret_cast<uint8_t*>(this) + icEntriesOffset_);
     }
+    RetAddrEntry* retAddrEntryList() {
+        return (RetAddrEntry*)(reinterpret_cast<uint8_t*>(this) + retAddrEntriesOffset_);
+    }
     uint8_t** yieldEntryList() {
         return (uint8_t**)(reinterpret_cast<uint8_t*>(this) + yieldEntriesOffset_);
     }
     PCMappingIndexEntry* pcMappingIndexEntryList() {
         return (PCMappingIndexEntry*)(reinterpret_cast<uint8_t*>(this) + pcMappingIndexOffset_);
     }
     uint8_t* pcMappingData() {
         return reinterpret_cast<uint8_t*>(this) + pcMappingOffset_;
@@ -388,33 +500,41 @@ struct BaselineScript
     }
 
     bool containsCodeAddress(uint8_t* addr) const {
         return method()->raw() <= addr && addr <= method()->raw() + method()->instructionsSize();
     }
 
     ICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset);
     ICEntry* maybeICEntryFromPCOffset(uint32_t pcOffset,
-                                              ICEntry* prevLookedUpEntry);
+                                      ICEntry* prevLookedUpEntry);
 
     ICEntry& icEntry(size_t index);
-    ICEntry& icEntryFromReturnOffset(CodeOffset returnOffset);
     ICEntry& icEntryFromPCOffset(uint32_t pcOffset);
     ICEntry& icEntryFromPCOffset(uint32_t pcOffset, ICEntry* prevLookedUpEntry);
-    ICEntry& callVMEntryFromPCOffset(uint32_t pcOffset);
-    ICEntry& stackCheckICEntry();
-    ICEntry& warmupCountICEntry();
-    ICEntry& icEntryFromReturnAddress(uint8_t* returnAddr);
-    uint8_t* returnAddressForIC(const ICEntry& ent);
+
+    uint8_t* returnAddressForEntry(const RetAddrEntry& ent);
+
+    RetAddrEntry& retAddrEntry(size_t index);
+    RetAddrEntry& retAddrEntryFromPCOffset(uint32_t pcOffset, RetAddrEntry::Kind kind);
+    RetAddrEntry& prologueRetAddrEntry(RetAddrEntry::Kind kind);
+    RetAddrEntry& retAddrEntryFromReturnOffset(CodeOffset returnOffset);
+    RetAddrEntry& retAddrEntryFromReturnAddress(uint8_t* returnAddr);
 
     size_t numICEntries() const {
         return icEntries_;
     }
 
+    size_t numRetAddrEntries() const {
+        return retAddrEntries_;
+    }
+
     void copyICEntries(JSScript* script, const ICEntry* entries);
+    void copyRetAddrEntries(JSScript* script, const RetAddrEntry* entries);
+
     void adoptFallbackStubs(FallbackICStubSpace* stubSpace);
 
     void copyYieldAndAwaitEntries(JSScript* script, Vector<uint32_t>& yieldAndAwaitOffsets);
 
     PCMappingIndexEntry& pcMappingIndexEntry(size_t index);
     CompactBufferReader pcMappingReader(size_t indexEntry);
 
     size_t numPCMappingIndexEntries() const {
--- a/js/src/jit/JSJitFrameIter.cpp
+++ b/js/src/jit/JSJitFrameIter.cpp
@@ -142,20 +142,20 @@ JSJitFrameIter::baselineScriptAndPc(JSSc
     // Use the frame's override pc, if we have one. This should only happen
     // when we're in FinishBailoutToBaseline, handling an exception or toggling
     // debug mode.
     if (jsbytecode* overridePc = baselineFrame()->maybeOverridePc()) {
         *pcRes = overridePc;
         return;
     }
 
-    // Else, there must be an ICEntry for the current return address.
+    // Else, there must be a VMCallEntry for the current return address.
     uint8_t* retAddr = returnAddressToFp();
-    ICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr);
-    *pcRes = icEntry.pc(script);
+    RetAddrEntry& entry = script->baselineScript()->retAddrEntryFromReturnAddress(retAddr);
+    *pcRes = entry.pc(script);
 }
 
 Value*
 JSJitFrameIter::actualArgs() const
 {
     return jsFrame()->argv() + 1;
 }
 
--- a/js/src/jit/VMFunctions.cpp
+++ b/js/src/jit/VMFunctions.cpp
@@ -1111,17 +1111,17 @@ InitRestParameter(JSContext* cx, uint32_
 }
 
 bool
 HandleDebugTrap(JSContext* cx, BaselineFrame* frame, uint8_t* retAddr, bool* mustReturn)
 {
     *mustReturn = false;
 
     RootedScript script(cx, frame->script());
-    jsbytecode* pc = script->baselineScript()->icEntryFromReturnAddress(retAddr).pc(script);
+    jsbytecode* pc = script->baselineScript()->retAddrEntryFromReturnAddress(retAddr).pc(script);
 
     if (*pc == JSOP_DEBUGAFTERYIELD) {
         // JSOP_DEBUGAFTERYIELD will set the frame's debuggee flag and call the
         // onEnterFrame handler, but if we set a breakpoint there we have to do
         // it now.
         MOZ_ASSERT(!frame->isDebuggee());
 
         if (!DebugAfterYield(cx, frame, pc, mustReturn)) {
--- a/js/src/jit/arm/SharedICHelpers-arm.h
+++ b/js/src/jit/arm/SharedICHelpers-arm.h
@@ -26,32 +26,33 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on ARM because link register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+EmitCallIC(MacroAssembler& masm, CodeOffset* patchOffset, CodeOffset* callOffset)
 {
     // Move ICEntry offset into ICStubReg
     CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     MOZ_ASSERT(R2 == ValueOperand(r1, r0));
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
 
     // Call the stubcode via a direct branch-and-link.
     masm.ma_blx(r0);
+    *callOffset = CodeOffset(masm.currentOffset());
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler& masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
     // This is expected to be called from within an IC, when ICStubReg is
     // properly initialized to point to the stub.
--- a/js/src/jit/arm64/SharedICHelpers-arm64.h
+++ b/js/src/jit/arm64/SharedICHelpers-arm64.h
@@ -26,32 +26,33 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on ARM because link register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+EmitCallIC(MacroAssembler& masm, CodeOffset* patchOffset, CodeOffset* callOffset)
 {
     // Move ICEntry offset into ICStubReg
     CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use r0.
     MOZ_ASSERT(R2 == ValueOperand(r0));
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), r0);
 
     // Call the stubcode via a direct branch-and-link.
     masm.Blr(x0);
+    *callOffset = CodeOffset(masm.currentOffset());
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler& masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
     // This is expected to be called from within an IC, when ICStubReg is
     // properly initialized to point to the stub.
--- a/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
@@ -38,31 +38,32 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     // No-op on MIPS because ra register is always holding the return address.
 }
 
 inline void
-EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+EmitCallIC(MacroAssembler& masm, CodeOffset* patchOffset, CodeOffset* callOffset)
 {
     // Move ICEntry offset into ICStubReg.
     CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg.
     masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry.
     // R2 won't be active when we call ICs, so we can use it as scratch.
     masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
 
     // Call the stubcode via a direct jump-and-link
     masm.call(R2.scratchReg());
+    *callOffset = CodeOffset(masm.currentOffset());
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler& masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
     // This is expected to be called from within an IC, when ICStubReg
     // is properly initialized to point to the stub.
--- a/js/src/jit/none/SharedICHelpers-none.h
+++ b/js/src/jit/none/SharedICHelpers-none.h
@@ -9,17 +9,17 @@
 
 namespace js {
 namespace jit {
 
 static const size_t ICStackValueOffset = 0;
 
 inline void EmitRestoreTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
 inline void EmitRepushTailCallReg(MacroAssembler&) { MOZ_CRASH(); }
-inline void EmitCallIC(CodeOffset*, MacroAssembler&) { MOZ_CRASH(); }
+inline void EmitCallIC(MacroAssembler&, CodeOffset*, CodeOffset*) { MOZ_CRASH(); }
 inline void EmitEnterTypeMonitorIC(MacroAssembler&, size_t v = 0) { MOZ_CRASH(); }
 inline void EmitReturnFromIC(MacroAssembler&) { MOZ_CRASH(); }
 inline void EmitChangeICReturnAddress(MacroAssembler&, Register) { MOZ_CRASH(); }
 inline void EmitBaselineLeaveStubFrame(MacroAssembler&, bool v = false) { MOZ_CRASH(); }
 inline void EmitStubGuardFailure(MacroAssembler&) { MOZ_CRASH(); }
 
 template <typename T> inline void EmitPreBarrier(MacroAssembler&, T, MIRType) { MOZ_CRASH(); }
 
--- a/js/src/jit/x64/SharedICHelpers-x64.h
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -26,28 +26,29 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     masm.Push(ICTailCallReg);
 }
 
 inline void
-EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+EmitCallIC(MacroAssembler& masm, CodeOffset* patchOffset, CodeOffset* callOffset)
 {
     // Move ICEntry offset into ICStubReg
     CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
                  ICStubReg);
 
     // Call the stubcode.
     masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+    *callOffset = CodeOffset(masm.currentOffset());
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler& masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
     // This is expected to be called from within an IC, when ICStubReg
     // is properly initialized to point to the stub.
--- a/js/src/jit/x86/SharedICHelpers-x86.h
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -26,29 +26,30 @@ EmitRestoreTailCallReg(MacroAssembler& m
 
 inline void
 EmitRepushTailCallReg(MacroAssembler& masm)
 {
     masm.Push(ICTailCallReg);
 }
 
 inline void
-EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+EmitCallIC(MacroAssembler& masm, CodeOffset* patchOffset, CodeOffset* callOffset)
 {
     // Move ICEntry offset into ICStubReg
     CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
     *patchOffset = offset;
 
     // Load stub pointer into ICStubReg
     masm.loadPtr(Address(ICStubReg, (int32_t) ICEntry::offsetOfFirstStub()),
                  ICStubReg);
 
     // Load stubcode pointer from BaselineStubEntry into ICTailCallReg
     // ICTailCallReg will always be unused in the contexts where ICs are called.
     masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+    *callOffset = CodeOffset(masm.currentOffset());
 }
 
 inline void
 EmitEnterTypeMonitorIC(MacroAssembler& masm,
                        size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
 {
     // This is expected to be called from within an IC, when ICStubReg
     // is properly initialized to point to the stub.