Bug 1533890: Migrate CallScripted to CacheIR r=mgaudet
authorIain Ireland <iireland@mozilla.com>
Tue, 19 Mar 2019 22:57:50 +0000
changeset 465216 1a502b69ad35597b718f6e06893a2bf09a28375c
parent 465215 3fad807ff7d4da01b6c161ef4eac3c3a155d5fc4
child 465217 e8af74a64a88d40dfa03cd41346cea716c623aba
push id112496
push usershindli@mozilla.com
push dateThu, 21 Mar 2019 04:37:39 +0000
treeherdermozilla-inbound@29476d3ca61d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmgaudet
bugs1533890
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1533890: Migrate CallScripted to CacheIR r=mgaudet This patch implements calls to native functions in CacheIR. Spread calls and constructor calls are handled in a later patch. Notes: 1. This patch adds GuardFunctionHasJitEntry as a separate CacheIR op, which guards against relazification. The same functionality currently exists inside of ScriptedGetterResult. My original intent was to refactor ScriptedGetterResult to use the op, but if Ted's plans to add a delazification trampoline work out then the issue goes away entirely and we can just delete both versions of the code. 2. Like the callNative patch, this patch bakes argc into the stub code. 3. The existing code pops callee off the stack and unboxes it, immediately after copying the args. This doesn't make any sense, because we have already unboxed it once and it's still in a register. 4. There are a couple of places in tryAttachCallScripted where we want to decline to attach without counting it as a failure. Right now, if the CacheIR path fails, the old implementation will do the right thing for us. A future patch will add isTemporarilyUnoptimizable support to CallIRGenerator. Comparison points: - CallIRGenerator::tryAttachCallScripted and BaselineCacheIRCompiler::emitCallScriptedFunction correspond to TryAttachCallStub and ICCallScriptedCompiler::generateStubCode Differential Revision: https://phabricator.services.mozilla.com/D22776
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIR.h
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CacheIRCompiler.h
js/src/jit/IonCacheIRCompiler.cpp
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -2572,8 +2572,89 @@ bool BaselineCacheIRCompiler::emitCallNa
   stubFrame.leave(masm);
 
   if (isCrossRealm) {
     masm.switchToBaselineFrameRealm(scratch2);
   }
 
   return true;
 }
+bool BaselineCacheIRCompiler::emitCallScriptedFunction() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
+  AutoOutputRegister output(*this);
+  AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
+
+  Register calleeReg = allocator.useRegister(masm, reader.objOperandId());
+  Register argcReg = allocator.useRegister(masm, reader.int32OperandId());
+  bool maybeCrossRealm = reader.readBool();
+
+  // TODO: support constructors
+  bool isConstructing = false;
+
+  allocator.discardStack(masm);
+
+  // Push a stub frame so that we can perform a non-tail call.
+  // Note that this leaves the return address in TailCallReg.
+  AutoStubFrame stubFrame(*this);
+  stubFrame.enter(masm, scratch);
+
+  if (maybeCrossRealm) {
+    masm.switchToObjectRealm(calleeReg, scratch);
+  }
+
+  // TODO: If we are constructing, create |this|.
+  MOZ_ASSERT(!isConstructing);
+
+  // Values are on the stack left-to-right. Calling convention wants them
+  // right-to-left so duplicate them on the stack in reverse order.
+  // |this| and callee are pushed last.
+  pushCallArguments(argcReg, scratch, /*isJitCall = */ true, isConstructing);
+
+  // TODO: The callee is currently on top of the stack.  The old
+  // implementation popped it at this point, but since we don't
+  // support constructors yet, callee is definitely still in a
+  // register. For now we just free that stack slot to make things
+  // line up. This should probably be rewritten to avoid pushing
+  // callee at all if we don't have to.
+  masm.freeStack(sizeof(Value));
+
+  // Load the start of the target JitCode.
+  AutoScratchRegister code(allocator, masm);
+  if (!isConstructing) {
+    masm.loadJitCodeRaw(calleeReg, code);
+  }
+
+  EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
+
+  // Note that we use Push, not push, so that callJit will align the stack
+  // properly on ARM.
+  masm.Push(argcReg);
+  masm.PushCalleeToken(calleeReg, isConstructing);
+  masm.Push(scratch);
+
+  // Handle arguments underflow.
+  Label noUnderflow;
+  masm.load16ZeroExtend(Address(calleeReg, JSFunction::offsetOfNargs()),
+                        calleeReg);
+  masm.branch32(Assembler::AboveOrEqual, argcReg, calleeReg, &noUnderflow);
+  {
+    // Call the arguments rectifier.
+    TrampolinePtr argumentsRectifier =
+        cx_->runtime()->jitRuntime()->getArgumentsRectifier();
+    masm.movePtr(argumentsRectifier, code);
+  }
+
+  masm.bind(&noUnderflow);
+  masm.callJit(code);
+
+  // TODO: If the return value of a constructor is not an object,
+  // replace it with |this|.
+  MOZ_ASSERT(!isConstructing);
+
+  stubFrame.leave(masm, true);
+
+  if (maybeCrossRealm) {
+    // Use |code| as a scratch register.
+    masm.switchToBaselineFrameRealm(code);
+  }
+
+  return true;
+}
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -5264,17 +5264,84 @@ bool CallIRGenerator::tryAttachSpecialCa
   return false;
 }
 
 bool CallIRGenerator::tryAttachCallScripted(HandleFunction calleeFunc) {
   if (JitOptions.disableCacheIRCalls) {
     return false;
   }
 
-  return false;
+  // Never attach optimized scripted call stubs for JSOP_FUNAPPLY.
+  // MagicArguments may escape the frame through them.
+  if (op_ == JSOP_FUNAPPLY) {
+    return false;
+  }
+
+  bool isConstructing = IsConstructorCallPC(pc_);
+
+  // TODO: Support spread calls.
+  // bool isSpread = IsSpreadCallPC(pc_);
+  MOZ_ASSERT(!IsSpreadCallPC(pc_));
+
+  // If callee is not an interpreted constructor, we have to throw.
+  if (isConstructing && !calleeFunc->isConstructor()) {
+    return false;
+  }
+
+  // Likewise, if the callee is a class constructor, we have to throw.
+  if (!isConstructing && calleeFunc->isClassConstructor()) {
+    return false;
+  }
+
+  if (!calleeFunc->hasJitEntry()) {
+    // Don't treat this as an unoptimizable case, as we'll add a
+    // stub when the callee is delazified.
+    // TODO: find a way to represent *handled = true;
+    return false;
+  }
+
+  if (isConstructing && !calleeFunc->hasJITCode()) {
+    // If we're constructing, require the callee to have JIT
+    // code. This isn't required for correctness but avoids allocating
+    // a template object below for constructors that aren't hot. See
+    // bug 1419758.
+    // TODO: find a way to represent *handled = true;
+    return false;
+  }
+
+  // Keep track of the function's |prototype| property in type
+  // information, for use during Ion compilation.
+  if (IsIonEnabled(cx_)) {
+    EnsureTrackPropertyTypes(cx_, calleeFunc, NameToId(cx_->names().prototype));
+  }
+
+  // TODO: template object for constructors.
+  MOZ_ASSERT(!isConstructing);
+
+  // Load argc.
+  Int32OperandId argcId(writer.setInputOperandId(0));
+
+  // Load the callee
+  ValOperandId calleeValId = writer.loadStackValue(argc_ + 1);
+  ObjOperandId calleeObjId = writer.guardIsObject(calleeValId);
+
+  // Ensure callee matches this stub's callee
+  writer.guardSpecificObject(calleeObjId, calleeFunc);
+
+  // Guard against relazification
+  writer.guardFunctionHasJitEntry(calleeObjId, isConstructing);
+
+  bool isCrossRealm = cx_->realm() != calleeFunc->realm();
+  writer.callScriptedFunction(calleeObjId, argcId, isCrossRealm);
+  writer.typeMonitorResult();
+
+  cacheIRStubKind_ = BaselineCacheIRStubKind::Monitored;
+  trackAttached("Call scripted func");
+
+  return true;
 }
 
 bool CallIRGenerator::tryAttachCallNative(HandleFunction calleeFunc) {
   MOZ_ASSERT(mode_ == ICState::Mode::Specialized);
   MOZ_ASSERT(calleeFunc->isNative());
 
   // Check for specific native-function optimizations.
   if (tryAttachSpecialCaseCallNative(calleeFunc)) {
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -168,17 +168,16 @@ class TypedOperandId : public OperandId 
 enum class CacheKind : uint8_t {
 #define DEFINE_KIND(kind) kind,
   CACHE_IR_KINDS(DEFINE_KIND)
 #undef DEFINE_KIND
 };
 
 extern const char* const CacheKindNames[];
 
-
 // This namespace exists to make it possible to use unqualified
 // argument types in CACHE_IR_OPS without letting the symbols escape
 // into the global namespace. Any code that consumes the argument
 // information must have CacheIROpFormat in scope.
 namespace CacheIROpFormat {
 enum ArgType {
   None,
   Id,
@@ -245,16 +244,17 @@ extern const uint32_t OpLengths[];
   _(GuardIndexGreaterThanArrayLength, Id, Id)                                  \
   _(GuardIndexIsValidUpdateOrAdd, Id, Id)                                      \
   _(GuardIndexGreaterThanDenseInitLength, Id, Id)                              \
   _(GuardTagNotEqual, Id, Id)                                                  \
   _(GuardXrayExpandoShapeAndDefaultProto, Id, Byte, Field)                     \
   _(GuardFunctionPrototype, Id, Id, Field)                                     \
   _(GuardNoAllocationMetadataBuilder, None)                                    \
   _(GuardObjectGroupNotPretenured, Field)                                      \
+  _(GuardFunctionHasJitEntry, Id, Byte)                                        \
   _(LoadStackValue, Id, UInt32)                                                \
   _(LoadObject, Id, Field)                                                     \
   _(LoadProto, Id, Id)                                                         \
   _(LoadEnclosingEnvironment, Id, Id)                                          \
   _(LoadWrapperTarget, Id, Id)                                                 \
   _(LoadValueTag, Id, Id)                                                      \
                                                                                \
   _(TruncateDoubleToUInt32, Id, Id)                                            \
@@ -287,16 +287,17 @@ extern const uint32_t OpLengths[];
   _(CallNativeSetter, Id, Id, Field)                                           \
   _(CallScriptedSetter, Id, Field, Id, Byte)                                   \
   _(CallSetArrayLength, Id, Byte, Id)                                          \
   _(CallProxySet, Id, Id, Field, Byte)                                         \
   _(CallProxySetByValue, Id, Id, Id, Byte)                                     \
   _(CallAddOrUpdateSparseElementHelper, Id, Id, Id, Byte)                      \
   _(CallInt32ToString, Id, Id)                                                 \
   _(CallNumberToString, Id, Id)                                                \
+  _(CallScriptedFunction, Id, Id, Byte)                                        \
   _(CallNativeFunction, Id, Id, Byte, IF_SIMULATOR(Field, Byte))               \
                                                                                \
   /* The *Result ops load a value into the cache's result register. */         \
   _(LoadFixedSlotResult, Id, Field)                                            \
   _(LoadDynamicSlotResult, Id, Field)                                          \
   _(LoadUnboxedPropertyResult, Id, Byte, Field)                                \
   _(LoadTypedObjectResult, Id, Byte, Byte, Field)                              \
   _(LoadDenseElementResult, Id, Id)                                            \
@@ -710,16 +711,20 @@ class MOZ_RAII CacheIRWriter : public JS
   }
   void guardNoAllocationMetadataBuilder() {
     writeOp(CacheOp::GuardNoAllocationMetadataBuilder);
   }
   void guardObjectGroupNotPretenured(ObjectGroup* group) {
     writeOp(CacheOp::GuardObjectGroupNotPretenured);
     addStubField(uintptr_t(group), StubField::Type::ObjectGroup);
   }
+  void guardFunctionHasJitEntry(ObjOperandId fun, bool isConstructing) {
+    writeOpWithOperandId(CacheOp::GuardFunctionHasJitEntry, fun);
+    buffer_.writeByte(isConstructing);
+  }
 
  public:
   // Use (or create) a specialization below to clarify what constaint the
   // group guard is implying.
   void guardGroup(ObjOperandId obj, ObjectGroup* group) {
     writeOpWithOperandId(CacheOp::GuardGroup, obj);
     addStubField(uintptr_t(group), StubField::Type::ObjectGroup);
   }
@@ -1113,16 +1118,22 @@ class MOZ_RAII CacheIRWriter : public JS
     return res;
   }
   StringOperandId callNumberToString(ValOperandId id) {
     StringOperandId res(nextOperandId_++);
     writeOpWithOperandId(CacheOp::CallNumberToString, id);
     writeOperandId(res);
     return res;
   }
+  void callScriptedFunction(ObjOperandId calleeId, Int32OperandId argc,
+                            bool isCrossRealm) {
+    writeOpWithOperandId(CacheOp::CallScriptedFunction, calleeId);
+    writeOperandId(argc);
+    buffer_.writeByte(uint32_t(isCrossRealm));
+  }
   void callNativeFunction(ObjOperandId calleeId, Int32OperandId argc, JSOp op,
                           HandleFunction calleeFunc) {
     writeOpWithOperandId(CacheOp::CallNativeFunction, calleeId);
     writeOperandId(argc);
     bool isCrossRealm = cx_->realm() != calleeFunc->realm();
     buffer_.writeByte(uint32_t(isCrossRealm));
 
     // Some native functions can be implemented faster if we know that
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -3067,16 +3067,29 @@ bool CacheIRCompiler::emitGuardObjectGro
 
   StubFieldOffset group(reader.stubOffset(), StubField::Type::ObjectGroup);
   emitLoadStubField(group, scratch);
 
   masm.branchIfPretenuredGroup(scratch, failure->label());
   return true;
 }
 
+bool CacheIRCompiler::emitGuardFunctionHasJitEntry() {
+  Register fun = allocator.useRegister(masm, reader.objOperandId());
+  bool isConstructing = reader.readBool();
+
+  FailurePath* failure;
+  if (!addFailurePath(&failure)) {
+    return false;
+  }
+
+  masm.branchIfFunctionHasNoJitEntry(fun, isConstructing, failure->label());
+  return true;
+}
+
 bool CacheIRCompiler::emitLoadDenseElementHoleResult() {
   JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -51,16 +51,17 @@ namespace jit {
   _(GuardIndexGreaterThanDenseCapacity)   \
   _(GuardIndexGreaterThanArrayLength)     \
   _(GuardIndexIsValidUpdateOrAdd)         \
   _(GuardIndexGreaterThanDenseInitLength) \
   _(GuardTagNotEqual)                     \
   _(GuardXrayExpandoShapeAndDefaultProto) \
   _(GuardNoAllocationMetadataBuilder)     \
   _(GuardObjectGroupNotPretenured)        \
+  _(GuardFunctionHasJitEntry)             \
   _(LoadObject)                           \
   _(LoadProto)                            \
   _(LoadEnclosingEnvironment)             \
   _(LoadWrapperTarget)                    \
   _(LoadValueTag)                         \
   _(LoadDOMExpandoValue)                  \
   _(LoadDOMExpandoValueIgnoreGeneration)  \
   _(LoadUndefinedResult)                  \
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -2631,11 +2631,15 @@ bool IonCacheIRCompiler::emitCallStringO
 
   using Fn = bool (*)(JSContext*, HandleValue, HandleValue, MutableHandleValue);
   callVM<Fn, DoConcatStringObject>(masm);
 
   masm.storeCallResultValue(output);
   return true;
 }
 
+bool IonCacheIRCompiler::emitCallScriptedFunction() {
+  MOZ_CRASH("Call ICs not used in ion");
+}
+
 bool IonCacheIRCompiler::emitCallNativeFunction() {
   MOZ_CRASH("Call ICs not used in ion");
 }