Bug 1533890: Add fun_apply support to CacheIR r=mgaudet
authorIain Ireland <iireland@mozilla.com>
Mon, 08 Apr 2019 15:28:49 +0000
changeset 468392 66ea618f7b136044ceb240b1ed8190f6fe42a4b9
parent 468391 f05cdb03558fb1bdf501bfb77312e864c355ebdf
child 468393 8664fa8a8a10098284899e8ca8e843072fcce9ab
push id35835
push useraciure@mozilla.com
push dateMon, 08 Apr 2019 19:00:29 +0000
treeherdermozilla-central@40456af7da1c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmgaudet
bugs1533890
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1533890: Add fun_apply support to CacheIR r=mgaudet This patch moves ScriptedApplyArgs and ScriptedApplyArray to CacheIR, and adds NativeApplyArgs and NativeApplyArray. Like a spread call, FunApply updates argc as part of the call op, after we've passed all the guards. Comparisons: - The new code in BaselineCacheIRCompiler::updateArgc corresponds to parts of ICCallStubCompiler::guardFunApply. - BaselineCacheIRCompiler::emitGuardFunApply also corresponds to ICCallStubCompiler::guardFunApply. - BaselineCacheIRCompiler::pushFunApplyArgs corresponds to ICCallStubCompiler::pushCallerArguments - BaselineCacheIRCompiler::pushFunApplyArray corresponds to ICCallStubCompiler::pushArrayArguments - CallIRGenerator::tryAttachFunApply corresponds to TryAttachFunApplyStub + ICCall_ScriptedApplyArray::Compiler::generateStubCode Differential Revision: https://phabricator.services.mozilla.com/D25872
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIR.h
js/src/jit/CacheIRCompiler.h
js/src/jit/IonCacheIRCompiler.cpp
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -65,26 +65,29 @@ class MOZ_RAII BaselineCacheIRCompiler :
   MOZ_MUST_USE bool callTypeUpdateIC(Register obj, ValueOperand val,
                                      Register scratch,
                                      LiveGeneralRegisterSet saveRegs);
 
   MOZ_MUST_USE bool emitStoreSlotShared(bool isFixed);
   MOZ_MUST_USE bool emitAddAndStoreSlotShared(CacheOp op);
 
   bool updateArgc(CallFlags flags, Register argcReg, Register scratch);
-  void loadStackObject(ArgumentKind slot, CallFlags flags, size_t stackPushed,
+  void loadStackObject(ArgumentKind kind, CallFlags flags, size_t stackPushed,
                        Register argcReg, Register dest);
   void pushCallArguments(Register argcReg, Register scratch, Register scratch2,
                          bool isJitCall, bool isConstructing);
-  void pushSpreadCallArguments(Register argcReg, Register scratch,
-                               Register scratch2, bool isJitCall,
-                               bool isConstructing);
+  void pushArrayArguments(Register argcReg, Register scratch, Register scratch2,
+                          bool isJitCall, bool isConstructing);
   void pushFunCallArguments(Register argcReg, Register calleeReg,
                             Register scratch, Register scratch2,
                             bool isJitCall);
+  void pushFunApplyArgs(Register argcReg, Register calleeReg, Register scratch,
+                        Register scratch2, bool isJitCall);
+  void pushFunApplyArray(Register argcReg, Register scratch, Register scratch2,
+                         bool isJitCall);
   void createThis(Register argcReg, Register calleeReg, Register scratch,
                   CallFlags flags);
   void updateReturnValue();
 
   enum class NativeCallType { Native, ClassHook };
   bool emitCallNativeShared(NativeCallType callType);
 
  public:
@@ -2423,64 +2426,165 @@ bool BaselineCacheIRCompiler::emitCallSt
   return true;
 }
 
 // The value of argc entering the call IC is not always the value of
 // argc entering the callee. (For example, argc for a spread call IC
 // is always 1, but argc for the callee is the length of the array.)
 // In these cases, we update argc as part of the call op itself, to
 // avoid modifying input operands while it is still possible to fail a
-// guard. The code to update argc overlaps with some of the guard
-// code, so for the sake of efficiency we perform the final set of
-// guards here, just before updating argc. (In a perfect world, we
-// would have more registers and we would not need to worry about
-// modifying argc. In the real world, we have x86-32.)
+// guard. We also limit callee argc to a reasonable value to avoid
+// blowing the stack limit.
 bool BaselineCacheIRCompiler::updateArgc(CallFlags flags, Register argcReg,
                                          Register scratch) {
+  static_assert(CacheIRCompiler::MAX_ARGS_ARRAY_LENGTH <= ARGS_LENGTH_MAX,
+                "maximum arguments length for optimized stub should be <= "
+                "ARGS_LENGTH_MAX");
+
   CallFlags::ArgFormat format = flags.getArgFormat();
   switch (format) {
     case CallFlags::Standard:
       // Standard calls have no extra guards, and argc is already correct.
       return true;
     case CallFlags::FunCall:
       // fun_call has no extra guards, and argc will be corrected in
       // pushFunCallArguments.
       return true;
+    case CallFlags::FunApplyArray: {
+      // GuardFunApply array already guarded argc while checking for
+      // holes in the array, so we don't need to guard again here. We
+      // do still need to update argc.
+      BaselineFrameSlot slot(0);
+      masm.unboxObject(allocator.addressOf(masm, slot), argcReg);
+      masm.loadPtr(Address(argcReg, NativeObject::offsetOfElements()), argcReg);
+      masm.load32(Address(argcReg, ObjectElements::offsetOfLength()), argcReg);
+      return true;
+    }
     default:
       break;
   }
 
+  // We need to guard the length of the arguments.
+  FailurePath* failure;
+  if (!addFailurePath(&failure)) {
+    return false;
+  }
+
+  // Load callee argc into scratch.
+  switch (flags.getArgFormat()) {
+    case CallFlags::Spread: {
+      // Load the length of the elements.
+      BaselineFrameSlot slot(flags.isConstructing());
+      masm.unboxObject(allocator.addressOf(masm, slot), scratch);
+      masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
+      masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
+    } break;
+    case CallFlags::FunApplyArgs: {
+      // The length of |arguments| is stored in the baseline frame.
+      Address numActualArgsAddr(BaselineFrameReg,
+                                BaselineFrame::offsetOfNumActualArgs());
+      masm.load32(numActualArgsAddr, scratch);
+    } break;
+    default:
+      MOZ_CRASH("Unknown arg format");
+  }
+
+  // Ensure that callee argc does not exceed the limit.
+  masm.branch32(Assembler::Above, scratch,
+                Imm32(CacheIRCompiler::MAX_ARGS_ARRAY_LENGTH),
+                failure->label());
+
+  // We're past the final guard. Update argc with the new value.
+  masm.move32(scratch, argcReg);
+
+  return true;
+}
+
+bool BaselineCacheIRCompiler::emitGuardFunApply() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
+  Register argcReg = allocator.useRegister(masm, reader.int32OperandId());
+  AutoScratchRegister scratch(allocator, masm);
+  AutoScratchRegister scratch2(allocator, masm);
+  CallFlags flags = reader.callFlags();
+
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
+  // Ensure argc == 2
+  masm.branch32(Assembler::NotEqual, argcReg, Imm32(2), failure->label());
+
+  // Stack layout is (bottom to top):
+  //   Callee (fun_apply)
+  //   ThisValue (target)
+  //   Arg0 (new this)
+  //   Arg1 (argument array)
+
+  Address argsAddr = allocator.addressOf(masm, BaselineFrameSlot(0));
   switch (flags.getArgFormat()) {
-    case CallFlags::Spread: {
-      // Find length of args array
-      BaselineFrameSlot slot(flags.isConstructing());
-      masm.unboxObject(allocator.addressOf(masm, slot), scratch);
-      masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
-      masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
-
-      // Limit actual argc to something reasonable to avoid blowing stack limit.
-      static_assert(CacheIRCompiler::MAX_ARGS_SPREAD_LENGTH <= ARGS_LENGTH_MAX,
-                    "maximum arguments length for optimized stub should be <= "
-                    "ARGS_LENGTH_MAX");
-      masm.branch32(Assembler::Above, scratch,
-                    Imm32(CacheIRCompiler::MAX_ARGS_SPREAD_LENGTH),
+    case CallFlags::FunApplyArgs: {
+      // Ensure that args is magic |arguments|.
+      masm.branchTestMagic(Assembler::NotEqual, argsAddr, failure->label());
+
+      // Ensure that this frame doesn't have an arguments object.
+      Address flagAddr(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags());
+      masm.branchTest32(Assembler::NonZero, flagAddr,
+                        Imm32(BaselineFrame::HAS_ARGS_OBJ), failure->label());
+    } break;
+    case CallFlags::FunApplyArray: {
+      // Ensure that args is an array object.
+      masm.branchTestObject(Assembler::NotEqual, argsAddr, failure->label());
+      masm.unboxObject(argsAddr, scratch);
+      const Class* clasp = &ArrayObject::class_;
+      masm.branchTestObjClass(Assembler::NotEqual, scratch, clasp, scratch2,
+                              scratch, failure->label());
+
+      // Get the array elements and length
+      Register elementsReg = scratch;
+      masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()),
+                   elementsReg);
+      Register calleeArgcReg = scratch2;
+      masm.load32(Address(elementsReg, ObjectElements::offsetOfLength()),
+                  calleeArgcReg);
+
+      // Ensure that callee argc does not exceed the limit.  Note that
+      // we do this earlier for FunApplyArray than for FunApplyArgs,
+      // because we don't want to loop over every element of the array
+      // looking for holes if we already know it is too long.
+      masm.branch32(Assembler::Above, calleeArgcReg,
+                    Imm32(CacheIRCompiler::MAX_ARGS_ARRAY_LENGTH),
                     failure->label());
 
-      // We're past the final guard. Overwrite argc with the new value.
-      masm.move32(scratch, argcReg);
+      // Ensure that length == initializedLength
+      Address initLenAddr(elementsReg,
+                          ObjectElements::offsetOfInitializedLength());
+      masm.branch32(Assembler::NotEqual, initLenAddr, calleeArgcReg,
+                    failure->label());
+
+      // Ensure no holes. Loop through array and verify no elements are magic.
+      Register start = elementsReg;
+      Register end = scratch2;
+      BaseValueIndex endAddr(elementsReg, calleeArgcReg);
+      masm.computeEffectiveAddress(endAddr, end);
+
+      Label loop;
+      Label endLoop;
+      masm.bind(&loop);
+      masm.branchPtr(Assembler::AboveOrEqual, start, end, &endLoop);
+      masm.branchTestMagic(Assembler::Equal, Address(start, 0),
+                           failure->label());
+      masm.addPtr(Imm32(sizeof(Value)), start);
+      masm.jump(&loop);
+      masm.bind(&endLoop);
     } break;
     default:
-      MOZ_CRASH("Unknown arg format");
+      MOZ_CRASH("Invalid arg format");
+      break;
   }
-
   return true;
 }
 
 void BaselineCacheIRCompiler::pushCallArguments(Register argcReg,
                                                 Register scratch,
                                                 Register scratch2,
                                                 bool isJitCall,
                                                 bool isConstructing) {
@@ -2517,21 +2621,21 @@ void BaselineCacheIRCompiler::pushCallAr
     masm.addPtr(Imm32(sizeof(Value)), argPtr);
 
     masm.sub32(Imm32(1), countReg);
     masm.jump(&loop);
   }
   masm.bind(&done);
 }
 
-void BaselineCacheIRCompiler::pushSpreadCallArguments(Register argcReg,
-                                                      Register scratch,
-                                                      Register scratch2,
-                                                      bool isJitCall,
-                                                      bool isConstructing) {
+void BaselineCacheIRCompiler::pushArrayArguments(Register argcReg,
+                                                 Register scratch,
+                                                 Register scratch2,
+                                                 bool isJitCall,
+                                                 bool isConstructing) {
   // Pull the array off the stack before aligning.
   Register startReg = scratch;
   masm.unboxObject(Address(masm.getStackPointer(),
                            (isConstructing * sizeof(Value)) + STUB_FRAME_SIZE),
                    startReg);
   masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
 
   // Align the stack such that the JitFrameLayout is aligned on the
@@ -2624,16 +2728,64 @@ void BaselineCacheIRCompiler::pushFunCal
   masm.pushValue(UndefinedValue());
 
   // Store |callee|.
   masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
 
   masm.bind(&done);
 }
 
+void BaselineCacheIRCompiler::pushFunApplyArgs(Register argcReg,
+                                               Register calleeReg,
+                                               Register scratch,
+                                               Register scratch2,
+                                               bool isJitCall) {
+  // Push the caller's arguments onto the stack.
+
+  // Find the start of the caller's arguments.
+  Register startReg = scratch;
+  masm.loadPtr(Address(BaselineFrameReg, 0), startReg);
+  masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), startReg);
+
+  if (isJitCall) {
+    masm.alignJitStackBasedOnNArgs(argcReg);
+  }
+
+  Register endReg = scratch2;
+  BaseValueIndex endAddr(startReg, argcReg);
+  masm.computeEffectiveAddress(endAddr, endReg);
+
+  // Copying pre-decrements endReg by 8 until startReg is reached
+  Label copyDone;
+  Label copyStart;
+  masm.bind(&copyStart);
+  masm.branchPtr(Assembler::Equal, endReg, startReg, &copyDone);
+  masm.subPtr(Imm32(sizeof(Value)), endReg);
+  masm.pushValue(Address(endReg, 0));
+  masm.jump(&copyStart);
+  masm.bind(&copyDone);
+
+  // Push arg0 as |this| for call
+  masm.pushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE + sizeof(Value)));
+
+  // Push |callee|.
+  masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
+}
+
+void BaselineCacheIRCompiler::pushFunApplyArray(Register argcReg,
+                                                Register scratch,
+                                                Register scratch2,
+                                                bool isJitCall) {
+  // Push the contents of the array onto the stack.
+  // We have already ensured that the array is packed and has no holes.
+
+  pushArrayArguments(argcReg, scratch, scratch2, isJitCall,
+                     /*isConstructing =*/false);
+}
+
 bool BaselineCacheIRCompiler::emitCallNativeShared(NativeCallType callType) {
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
 
   Register calleeReg = allocator.useRegister(masm, reader.objOperandId());
   Register argcReg = allocator.useRegister(masm, reader.int32OperandId());
 
@@ -2657,23 +2809,30 @@ bool BaselineCacheIRCompiler::emitCallNa
   }
 
   switch (flags.getArgFormat()) {
     case CallFlags::Standard:
       pushCallArguments(argcReg, scratch, scratch2, /*isJitCall =*/false,
                         isConstructing);
       break;
     case CallFlags::Spread:
-      pushSpreadCallArguments(argcReg, scratch, scratch2, /*isJitCall =*/false,
-                              isConstructing);
+      pushArrayArguments(argcReg, scratch, scratch2, /*isJitCall =*/false,
+                         isConstructing);
       break;
     case CallFlags::FunCall:
       pushFunCallArguments(argcReg, calleeReg, scratch, scratch2,
                            /*isJitCall = */ false);
       break;
+    case CallFlags::FunApplyArgs:
+      pushFunApplyArgs(argcReg, calleeReg, scratch, scratch2,
+                       /*isJitCall = */ false);
+      break;
+    case CallFlags::FunApplyArray:
+      pushFunApplyArray(argcReg, scratch, scratch2, /*isJitCall = */ false);
+      break;
     default:
       MOZ_CRASH("Invalid arg format");
   }
 
   // Native functions have the signature:
   //
   //    bool (*)(JSContext*, unsigned, Value* vp)
   //
@@ -2914,23 +3073,30 @@ bool BaselineCacheIRCompiler::emitCallSc
   }
 
   switch (flags.getArgFormat()) {
     case CallFlags::Standard:
       pushCallArguments(argcReg, scratch, scratch2, /*isJitCall = */ true,
                         isConstructing);
       break;
     case CallFlags::Spread:
-      pushSpreadCallArguments(argcReg, scratch, scratch2, /*isJitCall = */ true,
-                              isConstructing);
+      pushArrayArguments(argcReg, scratch, scratch2, /*isJitCall = */ true,
+                         isConstructing);
       break;
     case CallFlags::FunCall:
       pushFunCallArguments(argcReg, calleeReg, scratch, scratch2,
                            /*isJitCall = */ true);
       break;
+    case CallFlags::FunApplyArgs:
+      pushFunApplyArgs(argcReg, calleeReg, scratch, scratch2,
+                       /*isJitCall = */ true);
+      break;
+    case CallFlags::FunApplyArray:
+      pushFunApplyArray(argcReg, scratch, scratch2, /*isJitCall = */ true);
+      break;
     default:
       MOZ_CRASH("Invalid arg format");
   }
 
   // TODO: The callee is currently on top of the stack.  The old
   // implementation popped it at this point, but I'm not sure why,
   // because it is still in a register along both paths. For now we
   // just free that stack slot to make things line up. This should
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -4970,25 +4970,99 @@ bool CallIRGenerator::tryAttachFunCall()
     trackAttached("Scripted fun_call");
   } else {
     trackAttached("Native fun_call");
   }
 
   return true;
 }
 
+bool CallIRGenerator::tryAttachFunApply() {
+  if (JitOptions.disableCacheIRCalls) {
+    return false;
+  }
+
+  if (argc_ != 2) {
+    return false;
+  }
+
+  if (!thisval_.isObject() || !thisval_.toObject().is<JSFunction>()) {
+    return false;
+  }
+  RootedFunction target(cx_, &thisval_.toObject().as<JSFunction>());
+
+  bool isScripted = target->isInterpreted() || target->isNativeWithJitEntry();
+  MOZ_ASSERT_IF(!isScripted, target->isNative());
+
+  CallFlags::ArgFormat format = CallFlags::Standard;
+  if (args_[1].isMagic(JS_OPTIMIZED_ARGUMENTS) && !script_->needsArgsObj()) {
+    format = CallFlags::FunApplyArgs;
+  } else if (args_[1].isObject() && args_[1].toObject().is<ArrayObject>()) {
+    format = CallFlags::FunApplyArray;
+  } else {
+    return false;
+  }
+
+  Int32OperandId argcId(writer.setInputOperandId(0));
+
+  // Guard that callee is the |fun_apply| native function.
+  ValOperandId calleeValId =
+      writer.loadArgumentDynamicSlot(ArgumentKind::Callee, argcId);
+  ObjOperandId calleeObjId = writer.guardIsObject(calleeValId);
+  writer.guardSpecificNativeFunction(calleeObjId, fun_apply);
+
+  // Guard that |this| is a function.
+  ValOperandId thisValId =
+      writer.loadArgumentDynamicSlot(ArgumentKind::This, argcId);
+  ObjOperandId thisObjId = writer.guardIsObject(thisValId);
+  writer.guardClass(thisObjId, GuardClassKind::JSFunction);
+
+  // Guard that function is not a class constructor.
+  writer.guardNotClassConstructor(thisObjId);
+
+  CallFlags targetFlags(format);
+  writer.guardFunApply(argcId, targetFlags);
+
+  if (isScripted) {
+    // Guard that function is scripted.
+    writer.guardFunctionHasJitEntry(thisObjId, /*isConstructing =*/false);
+    writer.callScriptedFunction(thisObjId, argcId, targetFlags);
+  } else {
+    // Guard that function is native.
+    writer.guardFunctionIsNative(thisObjId);
+    writer.callAnyNativeFunction(thisObjId, argcId, targetFlags);
+  }
+
+  writer.typeMonitorResult();
+  cacheIRStubKind_ = BaselineCacheIRStubKind::Monitored;
+
+  if (isScripted) {
+    trackAttached("Scripted fun_apply");
+  } else {
+    trackAttached("Native fun_apply");
+  }
+
+  return true;
+}
+
 bool CallIRGenerator::tryAttachSpecialCaseCallNative(HandleFunction callee) {
   MOZ_ASSERT(callee->isNative());
 
   if (op_ == JSOP_FUNCALL && callee->native() == fun_call) {
     if (tryAttachFunCall()) {
       return true;
     }
   }
 
+  if (op_ == JSOP_FUNAPPLY && callee->native() == fun_apply) {
+    if (tryAttachFunApply()) {
+      return true;
+    }
+  }
+
   if (op_ != JSOP_CALL && op_ != JSOP_CALL_IGNORES_RV) {
     return false;
   }
 
   if (callee->native() == js::intrinsic_StringSplitString) {
     if (tryAttachStringSplit()) {
       return true;
     }
@@ -5378,16 +5452,17 @@ bool CallIRGenerator::tryAttachStub() {
   // Some opcodes are not yet supported.
   switch (op_) {
     case JSOP_CALL:
     case JSOP_CALL_IGNORES_RV:
     case JSOP_SPREADCALL:
     case JSOP_NEW:
     case JSOP_SPREADNEW:
     case JSOP_FUNCALL:
+    case JSOP_FUNAPPLY:
       break;
     default:
       return false;
   }
 
   // Only optimize when the mode is Specialized.
   if (mode_ != ICState::Mode::Specialized) {
     return false;
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -245,16 +245,17 @@ extern const uint32_t ArgLengths[];
   _(GuardTagNotEqual, Id, Id)                                                  \
   _(GuardXrayExpandoShapeAndDefaultProto, Id, Byte, Field)                     \
   _(GuardFunctionPrototype, Id, Id, Field)                                     \
   _(GuardNoAllocationMetadataBuilder, None)                                    \
   _(GuardObjectGroupNotPretenured, Field)                                      \
   _(GuardFunctionHasJitEntry, Id, Byte)                                        \
   _(GuardFunctionIsNative, Id)                                                 \
   _(GuardNotClassConstructor, Id)                                              \
+  _(GuardFunApply, Id, Byte)                                                   \
   _(LoadObject, Id, Field)                                                     \
   _(LoadProto, Id, Id)                                                         \
   _(LoadEnclosingEnvironment, Id, Id)                                          \
   _(LoadWrapperTarget, Id, Id)                                                 \
   _(LoadValueTag, Id, Id)                                                      \
   _(LoadArgumentFixedSlot, Id, Byte)                                           \
   _(LoadArgumentDynamicSlot, Id, Id, Byte)                                     \
                                                                                \
@@ -465,17 +466,19 @@ using FieldOffset = uint8_t;
 // CacheIRWriter encodes the CallFlags in CacheIR, and CacheIRReader
 // decodes them and uses them for compilation.)
 class CallFlags {
  public:
   enum ArgFormat : uint8_t {
     Standard,
     Spread,
     FunCall,
-    LastArgFormat = FunCall
+    FunApplyArgs,
+    FunApplyArray,
+    LastArgFormat = FunApplyArray
   };
 
   CallFlags(bool isConstructing, bool isSpread, bool isSameRealm = false)
       : argFormat_(isSpread ? Spread : Standard),
         isConstructing_(isConstructing),
         isSameRealm_(isSameRealm) {}
   explicit CallFlags(ArgFormat format)
       : argFormat_(format), isConstructing_(false), isSameRealm_(false) {}
@@ -524,24 +527,28 @@ inline int32_t GetIndexOfArgument(Argume
   //
   // If this is a spread call, then argc is always 1, and we can calculate the
   // index directly. If this is not a spread call, then the index of any
   // argument other than NewTarget depends on argc.
 
   // First we determine whether the caller needs to add argc.
   switch (flags.getArgFormat()) {
     case CallFlags::Standard:
-    case CallFlags::FunCall:
       *addArgc = true;
       break;
     case CallFlags::Spread:
       // Spread calls do not have Arg1 or higher.
       MOZ_ASSERT(kind != ArgumentKind::Arg1);
       *addArgc = false;
       break;
+    case CallFlags::FunCall:
+    case CallFlags::FunApplyArgs:
+    case CallFlags::FunApplyArray:
+      MOZ_CRASH("Currently unreachable");
+      break;
   }
 
   // Second, we determine the offset relative to argc.
   bool hasArgumentArray = !*addArgc;
   switch (kind) {
     case ArgumentKind::Callee:
       return flags.isConstructing() + hasArgumentArray + 1;
     case ArgumentKind::This:
@@ -1100,16 +1107,21 @@ class MOZ_RAII CacheIRWriter : public JS
       buffer_.writeByte(uint32_t(slotIndex));
     } else {
       writeOpWithOperandId(CacheOp::LoadArgumentFixedSlot, res);
       buffer_.writeByte(uint32_t(slotIndex));
     }
     return res;
   }
 
+  void guardFunApply(Int32OperandId argcId, CallFlags flags) {
+    writeOpWithOperandId(CacheOp::GuardFunApply, argcId);
+    writeCallFlags(flags);
+  }
+
   ValOperandId loadDOMExpandoValue(ObjOperandId obj) {
     ValOperandId res(nextOperandId_++);
     writeOpWithOperandId(CacheOp::LoadDOMExpandoValue, obj);
     writeOperandId(res);
     return res;
   }
   void guardDOMExpandoMissingOrGuardShape(ValOperandId expando, Shape* shape) {
     writeOpWithOperandId(CacheOp::GuardDOMExpandoMissingOrGuardShape, expando);
@@ -2288,16 +2300,17 @@ class MOZ_RAII CallIRGenerator : public 
   bool getTemplateObjectForClassHook(HandleObject calleeObj,
                                      MutableHandleObject result);
 
   bool tryAttachStringSplit();
   bool tryAttachArrayPush();
   bool tryAttachArrayJoin();
   bool tryAttachIsSuspendedGenerator();
   bool tryAttachFunCall();
+  bool tryAttachFunApply();
   bool tryAttachCallScripted(HandleFunction calleeFunc);
   bool tryAttachSpecialCaseCallNative(HandleFunction calleeFunc);
   bool tryAttachCallNative(HandleFunction calleeFunc);
   bool tryAttachCallHook(HandleObject calleeObj);
 
   void trackAttached(const char* name);
 
  public:
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -884,20 +884,20 @@ class MOZ_RAII CacheIRCompiler {
     return (const void*)readStubWord(offset, StubField::Type::RawWord);
   }
   jsid idStubField(uint32_t offset) {
     MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
     return jsid::fromRawBits(readStubWord(offset, StubField::Type::Id));
   }
 
  public:
-  // The maximum number of inlineable spread call arguments. Keep this small
-  // to avoid controllable stack overflows by attackers passing large arrays
-  // to spread call.
-  static const uint32_t MAX_ARGS_SPREAD_LENGTH = 16;
+  // The maximum number of arguments passed to a spread call or
+  // fun_apply IC.  Keep this small to avoid controllable stack
+  // overflows by attackers passing large arrays.
+  static const uint32_t MAX_ARGS_ARRAY_LENGTH = 16;
 };
 
 // Ensures the IC's output register is available for writing.
 class MOZ_RAII AutoOutputRegister {
   TypedOrValueRegister output_;
   CacheRegisterAllocator& alloc_;
 
   AutoOutputRegister(const AutoOutputRegister&) = delete;
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -2615,8 +2615,12 @@ bool IonCacheIRCompiler::emitCallClassHo
 
 bool IonCacheIRCompiler::emitLoadArgumentFixedSlot() {
   MOZ_CRASH("Call ICs not used in ion");
 }
 
 bool IonCacheIRCompiler::emitLoadArgumentDynamicSlot() {
   MOZ_CRASH("Call ICs not used in ion");
 }
+
+bool IonCacheIRCompiler::emitGuardFunApply() {
+  MOZ_CRASH("Call ICs not used in ion");
+}