Bug 1533890: Add constructor hook support to CacheIR r=mgaudet
authorIain Ireland <iireland@mozilla.com>
Tue, 19 Mar 2019 23:19:06 +0000
changeset 465211 9fe35422fa326fc61e8f8049eb3291d0a898e821
parent 465210 69a09b505a35c4e3aadc1d3a9d858014de004ea2
child 465212 b641ee647a01b1a05bd7775c7ce6e4fac0564917
push id35735
push usershindli@mozilla.com
push dateThu, 21 Mar 2019 04:34:45 +0000
treeherdermozilla-central@ac0cd1a710f3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmgaudet
bugs1533890
milestone68.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1533890: Add constructor hook support to CacheIR r=mgaudet Constructor stubs in the existing implementation contain template objects, which are unused by the stub itself but consumed by Ion. To make this work in CacheIR, this patch introduces meta ops, which generate no code but can be consumed by BaselineInspector. This patch adds the infrastructure for meta ops and uses it to implement constructor class hooks, because it is a simple place to start. Subsequent patches add support for native constructor calls and scripted constructor calls. In each case, BaselineInspector needs two pieces of information: a) The template object itself, and b) the callee, which BaselineInspector checks to make sure it is getting the correct template object. (The precise nature of the template object varies: it's a class pointer for hooks, but a pointer to the actual callee object for native and scripted constructors.) The meta op's arguments are a tag (in this case, "ClassTemplateObject") and two stub field offsets. NB: the class offset is reused from an earlier guard. To make this work, addStubField now returns the offset of the new field. Other notes: 1. I changed an assertion in CompactBufferReader::seek to make it possible to seek directly to the end of the buffer. This is safe, because we already check to make sure we aren't at the end before reading from the buffer. 2. It turns out that the code to iterate through CacheIR bytecode is nicer if the CacheIR op table defines the length of the arguments to an op, without including the op itself. I made the change. Comparison points: - CallIRGenerator::getTemplateObjectForClassHook corresponds to GetTemplateObjectForClassHook in BaselineIC.cpp Differential Revision: https://phabricator.services.mozilla.com/D22780
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/BaselineInspector.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIR.h
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CacheIRCompiler.h
js/src/jit/CompactBuffer.h
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -2564,19 +2564,17 @@ void BaselineCacheIRCompiler::pushSpread
 bool BaselineCacheIRCompiler::emitCallNativeShared(NativeCallType callType) {
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Register calleeReg = allocator.useRegister(masm, reader.objOperandId());
   Register argcReg = allocator.useRegister(masm, reader.int32OperandId());
   bool maybeCrossRealm = reader.readBool();
   bool isSpread = reader.readBool();
-
-  // TODO: support constructors
-  bool isConstructing = false;
+  bool isConstructing = reader.readBool();
 
   allocator.discardStack(masm);
 
   // Push a stub frame so that we can perform a non-tail call.
   // Note that this leaves the return address in TailCallReg.
   AutoStubFrame stubFrame(*this);
   stubFrame.enter(masm, scratch);
 
@@ -2604,18 +2602,17 @@ bool BaselineCacheIRCompiler::emitCallNa
 
   // Construct a native exit frame.
   masm.push(argcReg);
 
   EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
   masm.push(scratch);
   masm.push(ICTailCallReg);
   masm.loadJSContext(scratch);
-  masm.enterFakeExitFrameForNative(scratch, scratch,
-                                   /*isConstructing = */ false);
+  masm.enterFakeExitFrameForNative(scratch, scratch, isConstructing);
 
   // Execute call.
   masm.setupUnalignedABICall(scratch);
   masm.loadJSContext(scratch);
   masm.passABIArg(scratch);
   masm.passABIArg(argcReg);
   masm.passABIArg(scratch2);
 
@@ -2677,19 +2674,17 @@ bool BaselineCacheIRCompiler::emitCallSc
   JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Register calleeReg = allocator.useRegister(masm, reader.objOperandId());
   Register argcReg = allocator.useRegister(masm, reader.int32OperandId());
   bool maybeCrossRealm = reader.readBool();
   bool isSpread = reader.readBool();
-
-  // TODO: support constructors
-  bool isConstructing = false;
+  bool isConstructing = reader.readBool();
 
   allocator.discardStack(masm);
 
   // Push a stub frame so that we can perform a non-tail call.
   // Note that this leaves the return address in TailCallReg.
   AutoStubFrame stubFrame(*this);
   stubFrame.enter(masm, scratch);
 
--- a/js/src/jit/BaselineInspector.cpp
+++ b/js/src/jit/BaselineInspector.cpp
@@ -773,16 +773,69 @@ bool BaselineInspector::hasSeenDoubleRes
   MOZ_ASSERT(stub->isUnaryArith_Fallback() || stub->isBinaryArith_Fallback());
 
   if (stub->isUnaryArith_Fallback()) {
     return stub->toUnaryArith_Fallback()->sawDoubleResult();
   }
   return stub->toBinaryArith_Fallback()->sawDoubleResult();
 }
 
+static const CacheIRStubInfo* GetCacheIRStubInfo(ICStub* stub) {
+  const CacheIRStubInfo* stubInfo = nullptr;
+  switch (stub->kind()) {
+    case ICStub::Kind::CacheIR_Monitored:
+      stubInfo = stub->toCacheIR_Monitored()->stubInfo();
+      break;
+    case ICStub::Kind::CacheIR_Regular:
+      stubInfo = stub->toCacheIR_Regular()->stubInfo();
+      break;
+    case ICStub::Kind::CacheIR_Updated:
+      stubInfo = stub->toCacheIR_Updated()->stubInfo();
+      break;
+    default:
+      MOZ_CRASH("Only cache IR stubs supported");
+  }
+  return stubInfo;
+}
+
+static bool MaybeArgumentReader(ICStub* stub, CacheOp targetOp,
+                                mozilla::Maybe<CacheIRReader>& argReader) {
+  MOZ_ASSERT(ICStub::IsCacheIRKind(stub->kind()));
+
+  CacheIRReader stubReader(GetCacheIRStubInfo(stub));
+  while (stubReader.more()) {
+    CacheOp op = stubReader.readOp();
+    uint32_t argLength = CacheIROpFormat::ArgLengths[uint8_t(op)];
+
+    if (op == targetOp) {
+      MOZ_ASSERT(argReader.isNothing(),
+                 "Multiple instances of an op are not currently supported");
+      const uint8_t* argStart = stubReader.currentPosition();
+      argReader.emplace(argStart, argStart + argLength);
+    }
+
+    // Advance to next opcode.
+    stubReader.skip(argLength);
+  }
+  return argReader.isSome();
+}
+
+template <typename Filter>
+JSObject* MaybeTemplateObject(ICStub* stub, MetaTwoByteKind kind,
+                              Filter filter) {
+  const CacheIRStubInfo* stubInfo = GetCacheIRStubInfo(stub);
+  mozilla::Maybe<CacheIRReader> argReader;
+  if (!MaybeArgumentReader(stub, CacheOp::MetaTwoByte, argReader) ||
+      argReader->metaKind<MetaTwoByteKind>() != kind ||
+      !filter(*argReader, stubInfo)) {
+    return nullptr;
+  }
+  return stubInfo->getStubField<JSObject*>(stub, argReader->stubOffset());
+}
+
 JSObject* BaselineInspector::getTemplateObject(jsbytecode* pc) {
   if (!hasICScript()) {
     return nullptr;
   }
 
   const ICEntry& entry = icEntryFromPC(pc);
   for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
     switch (stub->kind()) {
@@ -894,16 +947,27 @@ JSObject* BaselineInspector::getTemplate
   }
 
   const ICEntry& entry = icEntryFromPC(pc);
   for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
     if (stub->isCall_ClassHook() &&
         stub->toCall_ClassHook()->clasp() == clasp) {
       return stub->toCall_ClassHook()->templateObject();
     }
+    if (ICStub::IsCacheIRKind(stub->kind())) {
+      auto filter = [stub, clasp](CacheIRReader& args,
+                                  const CacheIRStubInfo* info) {
+        return info->getStubField<Class*>(stub, args.stubOffset()) == clasp;
+      };
+      JSObject* result = MaybeTemplateObject(
+          stub, MetaTwoByteKind::ClassTemplateObject, filter);
+      if (result) {
+        return result;
+      }
+    }
   }
 
   return nullptr;
 }
 
 LexicalEnvironmentObject* BaselineInspector::templateNamedLambdaObject() {
   if (!script->hasBaselineScript()) {
     return nullptr;
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -30,23 +30,23 @@ using mozilla::Maybe;
 
 const char* const js::jit::CacheKindNames[] = {
 #define DEFINE_KIND(kind) #kind,
     CACHE_IR_KINDS(DEFINE_KIND)
 #undef DEFINE_KIND
 };
 
 // We need to enter the namespace here so that the definition of
-// CacheIROpFormat::OpLengths can see CacheIROpFormat::ArgType
+// CacheIROpFormat::ArgLengths can see CacheIROpFormat::ArgType
 // (without defining None/Id/Field/etc everywhere else in this file.)
 namespace js {
 namespace jit {
 namespace CacheIROpFormat {
 
-static constexpr uint32_t CacheIROpLength(ArgType arg) {
+static constexpr uint32_t CacheIRArgLength(ArgType arg) {
   switch (arg) {
     case None:
       return 0;
     case Id:
       return sizeof(uint8_t);
     case Field:
       return sizeof(uint8_t);
     case Byte:
@@ -54,24 +54,24 @@ static constexpr uint32_t CacheIROpLengt
     case Int32:
     case UInt32:
       return sizeof(uint32_t);
     case Word:
       return sizeof(uintptr_t);
   }
 }
 template <typename... Args>
-static constexpr uint32_t CacheIROpLength(ArgType arg, Args... args) {
-  return CacheIROpLength(arg) + CacheIROpLength(args...);
-}
-
-const uint32_t OpLengths[] = {
-#define OPLENGTH(op, ...) 1 + CacheIROpLength(__VA_ARGS__),
-    CACHE_IR_OPS(OPLENGTH)
-#undef OPLENGTH
+static constexpr uint32_t CacheIRArgLength(ArgType arg, Args... args) {
+  return CacheIRArgLength(arg) + CacheIRArgLength(args...);
+}
+
+const uint32_t ArgLengths[] = {
+#define ARGLENGTH(op, ...) CacheIRArgLength(__VA_ARGS__),
+    CACHE_IR_OPS(ARGLENGTH)
+#undef ARGLENGTH
 };
 
 }  // namespace CacheIROpFormat
 }  // namespace jit
 }  // namespace js
 
 void CacheIRWriter::assertSameCompartment(JSObject* obj) {
   cx_->debugOnlyCheck(obj);
@@ -5288,16 +5288,20 @@ bool CallIRGenerator::tryAttachCallScrip
 
   // Never attach optimized scripted call stubs for JSOP_FUNAPPLY.
   // MagicArguments may escape the frame through them.
   if (op_ == JSOP_FUNAPPLY) {
     return false;
   }
 
   bool isConstructing = IsConstructorCallPC(pc_);
+  if (isConstructing) {
+    // TODO: Support scripted constructors
+    return false;
+  }
 
   bool isSpread = IsSpreadCallPC(pc_);
 
   // If callee is not an interpreted constructor, we have to throw.
   if (isConstructing && !calleeFunc->isConstructor()) {
     return false;
   }
 
@@ -5363,17 +5367,19 @@ bool CallIRGenerator::tryAttachCallScrip
 bool CallIRGenerator::tryAttachCallNative(HandleFunction calleeFunc) {
   MOZ_ASSERT(mode_ == ICState::Mode::Specialized);
   MOZ_ASSERT(calleeFunc->isNative());
 
   bool isSpread = IsSpreadCallPC(pc_);
 
   bool isConstructing = IsConstructorCallPC(pc_);
   // TODO: Support constructors
-  MOZ_ASSERT(!isConstructing);
+  if (isConstructing) {
+    return false;
+  }
 
   // Check for specific native-function optimizations.
   if (tryAttachSpecialCaseCallNative(calleeFunc)) {
     return true;
   }
   if (JitOptions.disableCacheIRCalls) {
     return false;
   }
@@ -5397,16 +5403,37 @@ bool CallIRGenerator::tryAttachCallNativ
   writer.typeMonitorResult();
 
   cacheIRStubKind_ = BaselineCacheIRStubKind::Monitored;
   trackAttached("Call native func");
 
   return true;
 }
 
+bool CallIRGenerator::getTemplateObjectForClassHook(
+    HandleObject calleeObj, MutableHandleObject result) {
+  MOZ_ASSERT(IsConstructorCallPC(pc_));
+  JSNative hook = calleeObj->constructHook();
+
+  if (calleeObj->nonCCWRealm() != cx_->realm()) {
+    return true;
+  }
+
+  if (hook == TypedObject::construct) {
+    Rooted<TypeDescr*> descr(cx_, calleeObj.as<TypeDescr>());
+    result.set(TypedObject::createZeroed(cx_, descr, gc::TenuredHeap));
+    if (!result) {
+      cx_->clearPendingException();
+      return false;
+    }
+  }
+
+  return true;
+}
+
 bool CallIRGenerator::tryAttachCallHook(HandleObject calleeObj) {
   if (JitOptions.disableCacheIRCalls) {
     return false;
   }
 
   if (op_ == JSOP_FUNAPPLY) {
     return false;
   }
@@ -5414,52 +5441,62 @@ bool CallIRGenerator::tryAttachCallHook(
   bool isSpread = IsSpreadCallPC(pc_);
   bool isConstructing = IsConstructorCallPC(pc_);
   JSNative hook =
       isConstructing ? calleeObj->constructHook() : calleeObj->callHook();
   if (!hook) {
     return false;
   }
 
-  // TODO: Template objects.
-  MOZ_ASSERT(!isConstructing);
+  RootedObject templateObj(cx_);
+  if (isConstructing &&
+      !getTemplateObjectForClassHook(calleeObj, &templateObj)) {
+    return false;
+  }
 
   // Load argc.
   Int32OperandId argcId(writer.setInputOperandId(0));
 
   // Load the callee.
   uint32_t calleeSlot = calleeStackSlot(isSpread, isConstructing);
   ValOperandId calleeValId = writer.loadStackValue(calleeSlot);
   ObjOperandId calleeObjId = writer.guardIsObject(calleeValId);
 
   // Ensure the callee's class matches the one in this stub.
-  writer.guardAnyClass(calleeObjId, calleeObj->getClass());
+  FieldOffset classOffset =
+      writer.guardAnyClass(calleeObjId, calleeObj->getClass());
 
   // Enforce limits on spread call length, and update argc.
   if (isSpread) {
     writer.guardAndUpdateSpreadArgc(argcId, isConstructing);
   }
 
-  writer.callClassHook(calleeObjId, argcId, hook, isSpread);
+  writer.callClassHook(calleeObjId, argcId, hook, isSpread, isConstructing);
   writer.typeMonitorResult();
 
+  if (templateObj) {
+    writer.metaClassTemplateObject(templateObj, classOffset);
+  }
+
   cacheIRStubKind_ = BaselineCacheIRStubKind::Monitored;
   trackAttached("Call native func");
 
   return true;
 }
 
 bool CallIRGenerator::tryAttachStub() {
   AutoAssertNoPendingException aanpe(cx_);
 
   // Some opcodes are not yet supported.
   switch (op_) {
     case JSOP_CALL:
     case JSOP_CALL_IGNORES_RV:
     case JSOP_SPREADCALL:
+    case JSOP_NEW:
+    case JSOP_SPREADNEW:
       break;
     default:
       return false;
   }
 
   // Only optimize when the mode is Specialized.
   if (mode_ != ICState::Mode::Specialized) {
     return false;
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -183,17 +183,17 @@ enum ArgType {
   Id,
   Field,
   Byte,
   Int32,
   UInt32,
   Word,
 };
 
-extern const uint32_t OpLengths[];
+extern const uint32_t ArgLengths[];
 }  // namespace CacheIROpFormat
 
 #ifdef JS_SIMULATOR
 #  define IF_SIMULATOR(x, y) x
 #else
 #  define IF_SIMULATOR(x, y) y
 #endif
 
@@ -288,19 +288,22 @@ extern const uint32_t OpLengths[];
   _(CallNativeSetter, Id, Id, Field)                                           \
   _(CallScriptedSetter, Id, Field, Id, Byte)                                   \
   _(CallSetArrayLength, Id, Byte, Id)                                          \
   _(CallProxySet, Id, Id, Field, Byte)                                         \
   _(CallProxySetByValue, Id, Id, Id, Byte)                                     \
   _(CallAddOrUpdateSparseElementHelper, Id, Id, Id, Byte)                      \
   _(CallInt32ToString, Id, Id)                                                 \
   _(CallNumberToString, Id, Id)                                                \
-  _(CallScriptedFunction, Id, Id, Byte, Byte)                                  \
-  _(CallNativeFunction, Id, Id, Byte, Byte, IF_SIMULATOR(Field, Byte))         \
-  _(CallClassHook, Id, Id, Byte, Byte, Field)                                  \
+  _(CallScriptedFunction, Id, Id, Byte, Byte, Byte)                            \
+  _(CallNativeFunction, Id, Id, Byte, Byte, Byte, IF_SIMULATOR(Field, Byte))   \
+  _(CallClassHook, Id, Id, Byte, Byte, Byte, Field)                            \
+                                                                               \
+  /* Meta ops generate no code, but contain data for BaselineInspector */      \
+  _(MetaTwoByte, Byte, Field, Field)                                           \
                                                                                \
   /* The *Result ops load a value into the cache's result register. */         \
   _(LoadFixedSlotResult, Id, Field)                                            \
   _(LoadDynamicSlotResult, Id, Field)                                          \
   _(LoadUnboxedPropertyResult, Id, Byte, Field)                                \
   _(LoadTypedObjectResult, Id, Byte, Byte, Field)                              \
   _(LoadDenseElementResult, Id, Id)                                            \
   _(LoadDenseElementHoleResult, Id, Id)                                        \
@@ -452,16 +455,18 @@ class StubField {
     return uintptr_t(data_);
   }
   uint64_t asInt64() const {
     MOZ_ASSERT(sizeIsInt64());
     return data_;
   }
 } JS_HAZ_GC_POINTER;
 
+typedef uint8_t FieldOffset;
+
 // We use this enum as GuardClass operand, instead of storing Class* pointers
 // in the IR, to keep the IR compact and the same size on all platforms.
 enum class GuardClassKind : uint8_t {
   Array,
   MappedArguments,
   UnmappedArguments,
   WindowProxy,
   JSFunction,
@@ -478,16 +483,20 @@ enum TypedThingLayout {
   Layout_TypedArray,
   Layout_OutlineTypedObject,
   Layout_InlineTypedObject
 };
 
 void LoadShapeWrapperContents(MacroAssembler& masm, Register obj, Register dst,
                               Label* failure);
 
+enum class MetaTwoByteKind : uint8_t {
+  ClassTemplateObject,
+};
+
 // Class to record CacheIR + some additional metadata for code generation.
 class MOZ_RAII CacheIRWriter : public JS::CustomAutoRooter {
   JSContext* cx_;
   CompactBufferWriter buffer_;
 
   uint32_t nextOperandId_;
   uint32_t nextInstructionId_;
   uint32_t numInputOperands_;
@@ -541,26 +550,33 @@ class MOZ_RAII CacheIRWriter : public JS
   void writeUint32Immediate(uint32_t u32) { buffer_.writeFixedUint32_t(u32); }
   void writePointer(void* ptr) { buffer_.writeRawPointer(ptr); }
 
   void writeOpWithOperandId(CacheOp op, OperandId opId) {
     writeOp(op);
     writeOperandId(opId);
   }
 
-  void addStubField(uint64_t value, StubField::Type fieldType) {
+  uint8_t addStubField(uint64_t value, StubField::Type fieldType) {
+    uint8_t offset = 0;
     size_t newStubDataSize = stubDataSize_ + StubField::sizeInBytes(fieldType);
     if (newStubDataSize < MaxStubDataSizeInBytes) {
       buffer_.propagateOOM(stubFields_.append(StubField(value, fieldType)));
       MOZ_ASSERT((stubDataSize_ % sizeof(uintptr_t)) == 0);
-      buffer_.writeByte(stubDataSize_ / sizeof(uintptr_t));
+      offset = stubDataSize_ / sizeof(uintptr_t);
+      buffer_.writeByte(offset);
       stubDataSize_ = newStubDataSize;
     } else {
       tooLarge_ = true;
     }
+    return offset;
+  }
+  void reuseStubField(FieldOffset offset) {
+    MOZ_ASSERT(offset < stubDataSize_ / sizeof(uintptr_t));
+    buffer_.writeByte(offset);
   }
 
   CacheIRWriter(const CacheIRWriter&) = delete;
   CacheIRWriter& operator=(const CacheIRWriter&) = delete;
 
  public:
   explicit CacheIRWriter(JSContext* cx)
       : CustomAutoRooter(cx),
@@ -762,19 +778,19 @@ class MOZ_RAII CacheIRWriter : public JS
     addStubField(uintptr_t(proto), StubField::Type::JSObject);
   }
   void guardClass(ObjOperandId obj, GuardClassKind kind) {
     static_assert(sizeof(GuardClassKind) == sizeof(uint8_t),
                   "GuardClassKind must fit in a byte");
     writeOpWithOperandId(CacheOp::GuardClass, obj);
     buffer_.writeByte(uint32_t(kind));
   }
-  void guardAnyClass(ObjOperandId obj, const Class* clasp) {
+  FieldOffset guardAnyClass(ObjOperandId obj, const Class* clasp) {
     writeOpWithOperandId(CacheOp::GuardAnyClass, obj);
-    addStubField(uintptr_t(clasp), StubField::Type::RawWord);
+    return addStubField(uintptr_t(clasp), StubField::Type::RawWord);
   }
   void guardIsNativeFunction(ObjOperandId obj, JSNative nativeFunc) {
     writeOpWithOperandId(CacheOp::GuardIsNativeFunction, obj);
     writePointer(JS_FUNC_TO_DATA_PTR(void*, nativeFunc));
   }
   void guardIsNativeObject(ObjOperandId obj) {
     writeOpWithOperandId(CacheOp::GuardIsNativeObject, obj);
   }
@@ -1134,24 +1150,26 @@ class MOZ_RAII CacheIRWriter : public JS
     return res;
   }
   void callScriptedFunction(ObjOperandId calleeId, Int32OperandId argc,
                             bool isCrossRealm, bool isSpread) {
     writeOpWithOperandId(CacheOp::CallScriptedFunction, calleeId);
     writeOperandId(argc);
     buffer_.writeByte(uint32_t(isCrossRealm));
     buffer_.writeByte(uint32_t(isSpread));
+    buffer_.writeByte(uint32_t(false));  // isConstructing
   }
   void callNativeFunction(ObjOperandId calleeId, Int32OperandId argc, JSOp op,
                           HandleFunction calleeFunc, bool isSpread) {
     writeOpWithOperandId(CacheOp::CallNativeFunction, calleeId);
     writeOperandId(argc);
     bool isCrossRealm = cx_->realm() != calleeFunc->realm();
     buffer_.writeByte(uint32_t(isCrossRealm));
     buffer_.writeByte(uint32_t(isSpread));
+    buffer_.writeByte(uint32_t(false));  // isConstructing
 
     // Some native functions can be implemented faster if we know that
     // the return value is ignored.
     bool ignoresReturnValue =
         op == JSOP_CALL_IGNORES_RV && calleeFunc->hasJitInfo() &&
         calleeFunc->jitInfo()->type() == JSJitInfo::IgnoresReturnValueNative;
 
 #ifdef JS_SIMULATOR
@@ -1170,33 +1188,44 @@ class MOZ_RAII CacheIRWriter : public JS
 #else
     // If we are not running in the simulator, we generate different jitcode
     // to find the ignoresReturnValue version of a native function.
     buffer_.writeByte(ignoresReturnValue);
 #endif
   }
 
   void callClassHook(ObjOperandId calleeId, Int32OperandId argc, JSNative hook,
-                     bool isSpread) {
+                     bool isSpread, bool isConstructing) {
     writeOpWithOperandId(CacheOp::CallClassHook, calleeId);
     writeOperandId(argc);
     buffer_.writeByte(true);                // may be cross-realm
-    buffer_.writeByte(uint32_t(isSpread));  // may be cross-realm
+    buffer_.writeByte(uint32_t(isSpread));
+    buffer_.writeByte(uint32_t(isConstructing));
     void* target = JS_FUNC_TO_DATA_PTR(void*, hook);
 
 #ifdef JS_SIMULATOR
     // The simulator requires VM calls to be redirected to a special
     // swi instruction to handle them, so we store the redirected
     // pointer in the stub and use that instead of the original one.
     target = Simulator::RedirectNativeFunction(target, Args_General3);
 #endif
 
     addStubField(uintptr_t(target), StubField::Type::RawWord);
   }
 
+  // This generates no code, but saves the template object in a stub
+  // field for BaselineInspector.
+  void metaClassTemplateObject(JSObject* templateObject,
+                               FieldOffset classOffset) {
+    writeOp(CacheOp::MetaTwoByte);
+    buffer_.writeByte(uint32_t(MetaTwoByteKind::ClassTemplateObject));
+    reuseStubField(classOffset);
+    addStubField(uintptr_t(templateObject), StubField::Type::JSObject);
+  }
+
   void megamorphicLoadSlotResult(ObjOperandId obj, PropertyName* name,
                                  bool handleMissing) {
     writeOpWithOperandId(CacheOp::MegamorphicLoadSlotResult, obj);
     addStubField(uintptr_t(name), StubField::Type::String);
     buffer_.writeByte(uint32_t(handleMissing));
   }
   void megamorphicLoadSlotByValueResult(ObjOperandId obj, ValOperandId id,
                                         bool handleMissing) {
@@ -1555,16 +1584,21 @@ class MOZ_RAII CacheIRReader {
   explicit CacheIRReader(const CacheIRStubInfo* stubInfo);
 
   bool more() const { return buffer_.more(); }
 
   CacheOp readOp() { return CacheOp(buffer_.readByte()); }
 
   // Skip data not currently used.
   void skip() { buffer_.readByte(); }
+  void skip(uint32_t skipLength) {
+    if (skipLength > 0) {
+      buffer_.seek(buffer_.currentPosition(), skipLength);
+    }
+  }
 
   ValOperandId valOperandId() { return ValOperandId(buffer_.readByte()); }
   ValueTagOperandId valueTagOperandId() {
     return ValueTagOperandId(buffer_.readByte());
   }
   ObjOperandId objOperandId() { return ObjOperandId(buffer_.readByte()); }
   StringOperandId stringOperandId() {
     return StringOperandId(buffer_.readByte());
@@ -1584,16 +1618,21 @@ class MOZ_RAII CacheIRReader {
   Scalar::Type scalarType() { return Scalar::Type(buffer_.readByte()); }
   uint32_t typeDescrKey() { return buffer_.readByte(); }
   JSWhyMagic whyMagic() { return JSWhyMagic(buffer_.readByte()); }
   JSOp jsop() { return JSOp(buffer_.readByte()); }
   int32_t int32Immediate() { return int32_t(buffer_.readFixedUint32_t()); }
   uint32_t uint32Immediate() { return buffer_.readFixedUint32_t(); }
   void* pointer() { return buffer_.readRawPointer(); }
 
+  template <typename MetaKind>
+  MetaKind metaKind() {
+    return MetaKind(buffer_.readByte());
+  }
+
   ReferenceType referenceTypeDescrType() {
     return ReferenceType(buffer_.readByte());
   }
 
   uint8_t readByte() { return buffer_.readByte(); }
   bool readBool() {
     uint8_t b = buffer_.readByte();
     MOZ_ASSERT(b <= 1);
@@ -2072,16 +2111,18 @@ class MOZ_RAII CallIRGenerator : public 
   uint32_t argc_;
   HandleValue callee_;
   HandleValue thisval_;
   HandleValueArray args_;
   PropertyTypeCheckInfo typeCheckInfo_;
   BaselineCacheIRStubKind cacheIRStubKind_;
 
   uint32_t calleeStackSlot(bool isSpread, bool isConstructing);
+  bool getTemplateObjectForClassHook(HandleObject calleeObj,
+                                     MutableHandleObject result);
 
   bool tryAttachStringSplit();
   bool tryAttachArrayPush();
   bool tryAttachArrayJoin();
   bool tryAttachIsSuspendedGenerator();
   bool tryAttachCallScripted(HandleFunction calleeFunc);
   bool tryAttachSpecialCaseCallNative(HandleFunction calleeFunc);
   bool tryAttachCallNative(HandleFunction calleeFunc);
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -950,16 +950,18 @@ template GCPtr<JSObject*>& CacheIRStubIn
 template GCPtr<JSString*>& CacheIRStubInfo::getStubField<ICStub>(
     ICStub* stub, uint32_t offset) const;
 template GCPtr<JS::Symbol*>& CacheIRStubInfo::getStubField<ICStub>(
     ICStub* stub, uint32_t offset) const;
 template GCPtr<JS::Value>& CacheIRStubInfo::getStubField<ICStub>(
     ICStub* stub, uint32_t offset) const;
 template GCPtr<jsid>& CacheIRStubInfo::getStubField<ICStub>(
     ICStub* stub, uint32_t offset) const;
+template GCPtr<Class*>& CacheIRStubInfo::getStubField<ICStub>(
+    ICStub* stub, uint32_t offset) const;
 
 template <typename T, typename V>
 static void InitGCPtr(uintptr_t* ptr, V val) {
   AsGCPtr<T>(ptr)->init(mozilla::BitwiseCast<T>(val));
 }
 
 void CacheIRWriter::copyStubData(uint8_t* dest) const {
   MOZ_ASSERT(!failed());
@@ -4329,8 +4331,17 @@ bool CacheIRCompiler::emitCallIsSuspende
   masm.jump(&done);
 
   masm.bind(&returnFalse);
   masm.moveValue(BooleanValue(false), output.valueReg());
 
   masm.bind(&done);
   return true;
 }
+
+// This op generates no code. It is consumed by BaselineInspector.
+bool CacheIRCompiler::emitMetaTwoByte() {
+  mozilla::Unused << reader.readByte();  // meta kind
+  mozilla::Unused << reader.readByte();  // payload byte 1
+  mozilla::Unused << reader.readByte();  // payload byte 2
+
+  return true;
+}
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -121,16 +121,17 @@ namespace jit {
   _(MegamorphicLoadSlotResult)            \
   _(MegamorphicLoadSlotByValueResult)     \
   _(MegamorphicStoreSlot)                 \
   _(MegamorphicHasPropResult)             \
   _(CallObjectHasSparseElementResult)     \
   _(CallInt32ToString)                    \
   _(CallNumberToString)                   \
   _(CallIsSuspendedGeneratorResult)       \
+  _(MetaTwoByte)                          \
   _(WrapResult)
 
 // [SMDDOC] CacheIR Value Representation and Tracking
 //
 // While compiling an IC stub the CacheIR compiler needs to keep track of the
 // physical location for each logical piece of data we care about, as well as
 // ensure that in the case of a stub failing, we are able to restore the input
 // state so that a subsequent stub can attempt to provide a value.
@@ -733,17 +734,17 @@ class MOZ_RAII CacheIRCompiler {
 
 #ifdef DEBUG
   const uint8_t* currentVerificationPosition_;
 
   // Verify that the number of bytes consumed by the compiler matches
   // up with the opcode signature in CACHE_IR_OPS.
   void assertAllArgumentsConsumed() {
     CacheOp prevOp = CacheOp(*currentVerificationPosition_);
-    uint32_t expectedLength = CacheIROpFormat::OpLengths[uint8_t(prevOp)];
+    uint32_t expectedLength = 1 + CacheIROpFormat::ArgLengths[uint8_t(prevOp)];
 
     const uint8_t* newPosition = reader.currentPosition();
     MOZ_ASSERT(newPosition > currentVerificationPosition_);
     uint32_t actualLength = newPosition - currentVerificationPosition_;
     MOZ_ASSERT(actualLength == expectedLength);
     currentVerificationPosition_ = newPosition;
   };
 #endif
--- a/js/src/jit/CompactBuffer.h
+++ b/js/src/jit/CompactBuffer.h
@@ -95,17 +95,17 @@ class CompactBufferReader {
   bool more() const {
     MOZ_ASSERT(buffer_ <= end_);
     return buffer_ < end_;
   }
 
   void seek(const uint8_t* start, uint32_t offset) {
     buffer_ = start + offset;
     MOZ_ASSERT(start < end_);
-    MOZ_ASSERT(buffer_ < end_);
+    MOZ_ASSERT(buffer_ <= end_);
   }
 
   const uint8_t* currentPosition() const { return buffer_; }
 };
 
 class CompactBufferWriter {
   js::Vector<uint8_t, 32, SystemAllocPolicy> buffer_;
   bool enoughMemory_;