Bug 1629791 part 5 - Auto-generate CacheIRWriter and CacheIRCompiler code for a handful of ops. r=iain
authorJan de Mooij <jdemooij@mozilla.com>
Fri, 17 Apr 2020 07:58:26 +0000
changeset 524559 667744615032982f90e8f4e5d38a0c403371a7d0
parent 524558 219a8f2d153f89cb8d93939f5a942e69ebc65036
child 524560 cb9f27043c05088c8eb4fb37da821319a6436a69
push id37323
push userdluca@mozilla.com
push dateFri, 17 Apr 2020 16:25:55 +0000
treeherdermozilla-central@b4b1d6f91ef0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersiain
bugs1629791
milestone77.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1629791 part 5 - Auto-generate CacheIRWriter and CacheIRCompiler code for a handful of ops. r=iain For ops with the gen_boilerplate attribute, this auto-generates the writer and compiler boilerplate code. The attribute has been added to some arbitrary ops to test this. CacheIRWriter methods that return a new OperandId are not supported yet. Initially I wanted to generate multiple header files and classes, but the current approach works fine and is probably a bit easier to reason about. Because the code generation itself is pretty simple it should be easy to experiment with different codegen strategies in the future. Differential Revision: https://phabricator.services.mozilla.com/D71153
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/BaselineCacheIRCompiler.h
js/src/jit/CacheIR.h
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CacheIRCompiler.h
js/src/jit/CacheIROps.yaml
js/src/jit/GenerateCacheIRFiles.py
js/src/jit/IonCacheIRCompiler.cpp
js/src/jit/IonCacheIRCompiler.h
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -188,66 +188,66 @@ JitCode* BaselineCacheIRCompiler::compil
   if (!newStubCode) {
     cx_->recoverFromOutOfMemory();
     return nullptr;
   }
 
   return newStubCode;
 }
 
-bool BaselineCacheIRCompiler::emitGuardShape() {
+bool BaselineCacheIRCompiler::emitGuardShape(ObjOperandId objId,
+                                             uint32_t shapeOffset) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
-  ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch1(allocator, masm);
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch2;
   if (needSpectreMitigations) {
     maybeScratch2.emplace(allocator, masm);
   }
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
-  Address addr(stubAddress(reader.stubOffset()));
+  Address addr(stubAddress(shapeOffset));
   masm.loadPtr(addr, scratch1);
   if (needSpectreMitigations) {
     masm.branchTestObjShape(Assembler::NotEqual, obj, scratch1, *maybeScratch2,
                             obj, failure->label());
   } else {
     masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj,
                                                 scratch1, failure->label());
   }
 
   return true;
 }
 
-bool BaselineCacheIRCompiler::emitGuardGroup() {
+bool BaselineCacheIRCompiler::emitGuardGroup(ObjOperandId objId,
+                                             uint32_t groupOffset) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
-  ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch1(allocator, masm);
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch2;
   if (needSpectreMitigations) {
     maybeScratch2.emplace(allocator, masm);
   }
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
-  Address addr(stubAddress(reader.stubOffset()));
+  Address addr(stubAddress(groupOffset));
   masm.loadPtr(addr, scratch1);
   if (needSpectreMitigations) {
     masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch1, *maybeScratch2,
                             obj, failure->label());
   } else {
     masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj,
                                                 scratch1, failure->label());
   }
--- a/js/src/jit/BaselineCacheIRCompiler.h
+++ b/js/src/jit/BaselineCacheIRCompiler.h
@@ -88,14 +88,16 @@ class MOZ_RAII BaselineCacheIRCompiler :
   bool makesGCCalls() const;
 
   Address stubAddress(uint32_t offset) const;
 
  private:
 #define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
   CACHE_IR_UNSHARED_OPS(DEFINE_OP)
 #undef DEFINE_OP
+
+  CACHE_IR_COMPILER_UNSHARED_GENERATED
 };
 
 }  // namespace jit
 }  // namespace js
 
 #endif /* jit_BaselineCacheIRCompiler_h */
--- a/js/src/jit/CacheIR.h
+++ b/js/src/jit/CacheIR.h
@@ -538,16 +538,30 @@ class MOZ_RAII CacheIRWriter : public JS
     return offset;
   }
 
   void reuseStubField(FieldOffset offset) {
     MOZ_ASSERT(offset < stubDataSize_ / sizeof(uintptr_t));
     buffer_.writeByte(offset);
   }
 
+  void writeShapeField(Shape* shape) {
+    MOZ_ASSERT(shape);
+    addStubField(uintptr_t(shape), StubField::Type::Shape);
+  }
+  void writeGroupField(ObjectGroup* group) {
+    MOZ_ASSERT(group);
+    addStubField(uintptr_t(group), StubField::Type::ObjectGroup);
+  }
+  void writeJSOpImm(JSOp op) {
+    static_assert(sizeof(JSOp) == sizeof(uint8_t), "JSOp must fit in a byte");
+    buffer_.writeByte(uint8_t(op));
+  }
+  void writeBoolImm(bool b) { buffer_.writeByte(uint32_t(b)); }
+
   CacheIRWriter(const CacheIRWriter&) = delete;
   CacheIRWriter& operator=(const CacheIRWriter&) = delete;
 
  public:
   explicit CacheIRWriter(JSContext* cx)
       : CustomAutoRooter(cx),
         cx_(cx),
         nextOperandId_(0),
@@ -700,22 +714,16 @@ class MOZ_RAII CacheIRWriter : public JS
   void guardIsNull(ValOperandId val) {
     writeOpWithOperandId(CacheOp::GuardIsNull, val);
   }
 
   void guardIsUndefined(ValOperandId val) {
     writeOpWithOperandId(CacheOp::GuardIsUndefined, val);
   }
 
-  void guardShape(ObjOperandId obj, Shape* shape) {
-    MOZ_ASSERT(shape);
-    writeOpWithOperandId(CacheOp::GuardShape, obj);
-    addStubField(uintptr_t(shape), StubField::Type::Shape);
-  }
-
   void guardShapeForClass(ObjOperandId obj, Shape* shape) {
     // Guard shape to ensure that object class is unchanged. This is true
     // for all shapes.
     guardShape(obj, shape);
   }
 
   void guardShapeForOwnProperties(ObjOperandId obj, Shape* shape) {
     // Guard shape to detect changes to (non-dense) own properties. This
@@ -754,23 +762,18 @@ class MOZ_RAII CacheIRWriter : public JS
     buffer_.writeByte(isConstructing);
   }
 
   void guardNotClassConstructor(ObjOperandId fun) {
     writeOpWithOperandId(CacheOp::GuardNotClassConstructor, fun);
   }
 
  public:
-  // Use (or create) a specialization below to clarify what constaint the
-  // group guard is implying.
-  void guardGroup(ObjOperandId obj, ObjectGroup* group) {
-    writeOpWithOperandId(CacheOp::GuardGroup, obj);
-    addStubField(uintptr_t(group), StubField::Type::ObjectGroup);
-  }
-
+  // Instead of calling guardGroup manually, use (or create) a specialization
+  // below to clarify what constraint the group guard is implying.
   void guardGroupForProto(ObjOperandId obj, ObjectGroup* group) {
     MOZ_ASSERT(!group->hasUncacheableProto());
     guardGroup(obj, group);
   }
 
   void guardGroupForTypeBarrier(ObjOperandId obj, ObjectGroup* group) {
     // Typesets will always be a super-set of any typesets previously seen
     // for this group. If the type/group of a value being stored to a
@@ -1409,26 +1412,16 @@ class MOZ_RAII CacheIRWriter : public JS
     writeOperandId(rhsId);
   }
 
   void doublePowResult(NumberOperandId lhsId, NumberOperandId rhsId) {
     writeOpWithOperandId(CacheOp::DoublePowResult, lhsId);
     writeOperandId(rhsId);
   }
 
-  void int32AddResult(Int32OperandId lhs, Int32OperandId rhs) {
-    writeOpWithOperandId(CacheOp::Int32AddResult, lhs);
-    writeOperandId(rhs);
-  }
-
-  void int32SubResult(Int32OperandId lhs, Int32OperandId rhs) {
-    writeOpWithOperandId(CacheOp::Int32SubResult, lhs);
-    writeOperandId(rhs);
-  }
-
   void int32MulResult(Int32OperandId lhs, Int32OperandId rhs) {
     writeOpWithOperandId(CacheOp::Int32MulResult, lhs);
     writeOperandId(rhs);
   }
 
   void int32DivResult(Int32OperandId lhs, Int32OperandId rhs) {
     writeOpWithOperandId(CacheOp::Int32DivResult, lhs);
     writeOperandId(rhs);
@@ -1665,21 +1658,16 @@ class MOZ_RAII CacheIRWriter : public JS
     buffer_.writeByte(uint32_t(elementType));
     buffer_.writeByte(uint32_t(handleOOB));
   }
 
   void loadStringLengthResult(StringOperandId str) {
     writeOpWithOperandId(CacheOp::LoadStringLengthResult, str);
   }
 
-  void loadStringCharResult(StringOperandId str, Int32OperandId index) {
-    writeOpWithOperandId(CacheOp::LoadStringCharResult, str);
-    writeOperandId(index);
-  }
-
   void callScriptedGetterResult(ObjOperandId obj, JSFunction* getter) {
     writeOpWithOperandId(CacheOp::CallScriptedGetterResult, obj);
     addStubField(uintptr_t(getter), StubField::Type::JSObject);
     buffer_.writeByte(cx_->realm() == getter->realm());
   }
 
   void callScriptedGetterByValueResult(ValOperandId obj, JSFunction* getter) {
     writeOpWithOperandId(CacheOp::CallScriptedGetterByValueResult, obj);
@@ -1702,23 +1690,16 @@ class MOZ_RAII CacheIRWriter : public JS
     addStubField(uintptr_t(JSID_BITS(id)), StubField::Type::Id);
   }
 
   void callProxyGetByValueResult(ObjOperandId obj, ValOperandId idVal) {
     writeOpWithOperandId(CacheOp::CallProxyGetByValueResult, obj);
     writeOperandId(idVal);
   }
 
-  void callProxyHasPropResult(ObjOperandId obj, ValOperandId idVal,
-                              bool hasOwn) {
-    writeOpWithOperandId(CacheOp::CallProxyHasPropResult, obj);
-    writeOperandId(idVal);
-    buffer_.writeByte(uint32_t(hasOwn));
-  }
-
   void callObjectHasSparseElementResult(ObjOperandId obj,
                                         Int32OperandId index) {
     writeOpWithOperandId(CacheOp::CallObjectHasSparseElementResult, obj);
     writeOperandId(index);
   }
 
   void callNativeGetElementResult(ObjOperandId obj, Int32OperandId index) {
     writeOpWithOperandId(CacheOp::CallNativeGetElementResult, obj);
@@ -1825,22 +1806,16 @@ class MOZ_RAII CacheIRWriter : public JS
   }
 
   void compareSymbolResult(JSOp op, SymbolOperandId lhs, SymbolOperandId rhs) {
     writeOpWithOperandId(CacheOp::CompareSymbolResult, lhs);
     writeOperandId(rhs);
     buffer_.writeByte(uint8_t(op));
   }
 
-  void compareInt32Result(JSOp op, Int32OperandId lhs, Int32OperandId rhs) {
-    writeOpWithOperandId(CacheOp::CompareInt32Result, lhs);
-    writeOperandId(rhs);
-    buffer_.writeByte(uint8_t(op));
-  }
-
   void compareDoubleResult(JSOp op, NumberOperandId lhs, NumberOperandId rhs) {
     writeOpWithOperandId(CacheOp::CompareDoubleResult, lhs);
     writeOperandId(rhs);
     buffer_.writeByte(uint8_t(op));
   }
 
   void compareBigIntResult(JSOp op, BigIntOperandId lhs, BigIntOperandId rhs) {
     writeOpWithOperandId(CacheOp::CompareBigIntResult, lhs);
@@ -1892,18 +1867,19 @@ class MOZ_RAII CacheIRWriter : public JS
 
   void callPrintString(const char* str) {
     writeOp(CacheOp::CallPrintString);
     writePointer(const_cast<char*>(str));
   }
 
   void breakpoint() { writeOp(CacheOp::Breakpoint); }
   void typeMonitorResult() { writeOp(CacheOp::TypeMonitorResult); }
-  void returnFromIC() { writeOp(CacheOp::ReturnFromIC); }
   void wrapResult() { writeOp(CacheOp::WrapResult); }
+
+  CACHE_IR_WRITER_GENERATED
 };
 
 class CacheIRStubInfo;
 
 // Helper class for reading CacheIR bytecode.
 class MOZ_RAII CacheIRReader {
   CompactBufferReader buffer_;
 
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -2454,41 +2454,43 @@ bool CacheIRCompiler::emitDoublePowResul
   ignore.add(FloatReg0);
   masm.PopRegsInMaskIgnore(save, ignore);
 
   masm.boxDouble(FloatReg0, output.valueReg(), FloatReg0);
 
   return true;
 }
 
-bool CacheIRCompiler::emitInt32AddResult() {
+bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
+                                         Int32OperandId rhsId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
-  Register lhs = allocator.useRegister(masm, reader.int32OperandId());
-  Register rhs = allocator.useRegister(masm, reader.int32OperandId());
+  Register lhs = allocator.useRegister(masm, lhsId);
+  Register rhs = allocator.useRegister(masm, rhsId);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.mov(rhs, scratch);
   masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
 
   return true;
 }
-bool CacheIRCompiler::emitInt32SubResult() {
+bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
+                                         Int32OperandId rhsId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
-  Register lhs = allocator.useRegister(masm, reader.int32OperandId());
-  Register rhs = allocator.useRegister(masm, reader.int32OperandId());
+  Register lhs = allocator.useRegister(masm, lhsId);
+  Register rhs = allocator.useRegister(masm, rhsId);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.mov(lhs, scratch);
   masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
@@ -3104,21 +3106,22 @@ bool CacheIRCompiler::emitLoadStringLeng
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   masm.loadStringLength(str, scratch);
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
   return true;
 }
 
-bool CacheIRCompiler::emitLoadStringCharResult() {
+bool CacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
+                                               Int32OperandId indexId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoOutputRegister output(*this);
-  Register str = allocator.useRegister(masm, reader.stringOperandId());
-  Register index = allocator.useRegister(masm, reader.int32OperandId());
+  Register str = allocator.useRegister(masm, strId);
+  Register index = allocator.useRegister(masm, indexId);
   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -4326,22 +4329,22 @@ bool CacheIRCompiler::emitCompareObjectR
   return emitComparePointerResultShared(false);
 }
 
 bool CacheIRCompiler::emitCompareSymbolResult() {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   return emitComparePointerResultShared(true);
 }
 
-bool CacheIRCompiler::emitCompareInt32Result() {
+bool CacheIRCompiler::emitCompareInt32Result(JSOp op, Int32OperandId lhsId,
+                                             Int32OperandId rhsId) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoOutputRegister output(*this);
-  Register left = allocator.useRegister(masm, reader.int32OperandId());
-  Register right = allocator.useRegister(masm, reader.int32OperandId());
-  JSOp op = reader.jsop();
+  Register left = allocator.useRegister(masm, lhsId);
+  Register right = allocator.useRegister(masm, rhsId);
 
   Label ifTrue, done;
   masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
 
   EmitStoreBoolean(masm, false, output);
   masm.jump(&done);
 
   masm.bind(&ifTrue);
@@ -5640,23 +5643,24 @@ bool CacheIRCompiler::emitCallNativeGetE
 
   using Fn = bool (*)(JSContext*, HandleNativeObject, HandleValue, int32_t,
                       MutableHandleValue);
   callvm.call<Fn, NativeGetElement>();
 
   return true;
 }
 
-bool CacheIRCompiler::emitCallProxyHasPropResult() {
+bool CacheIRCompiler::emitCallProxyHasPropResult(ObjOperandId objId,
+                                                 ValOperandId idId,
+                                                 bool hasOwn) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
   AutoCallVM callvm(masm, this, allocator);
 
-  Register obj = allocator.useRegister(masm, reader.objOperandId());
-  ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
-  bool hasOwn = reader.readBool();
+  Register obj = allocator.useRegister(masm, objId);
+  ValueOperand idVal = allocator.useValueRegister(masm, idId);
 
   callvm.prepare();
 
   masm.Push(idVal);
   masm.Push(obj);
 
   using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
   if (hasOwn) {
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -779,16 +779,18 @@ class MOZ_RAII CacheIRCompiler {
   MOZ_MUST_USE bool emitBigIntUnaryOperationShared();
 
   bool emitDoubleIncDecResult(bool isInc);
 
 #define DEFINE_SHARED_OP(op) MOZ_MUST_USE bool emit##op();
   CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
 #undef DEFINE_SHARED_OP
 
+  CACHE_IR_COMPILER_SHARED_GENERATED
+
   void emitLoadStubField(StubFieldOffset val, Register dest);
   void emitLoadStubFieldConstant(StubFieldOffset val, Register dest);
   Address emitAddressFromStubField(StubFieldOffset val, Register base);
 
   uintptr_t readStubWord(uint32_t offset, StubField::Type type) {
     MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
     MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
     return writer_.readStubFieldForIon(offset, type).asWord();
--- a/js/src/jit/CacheIROps.yaml
+++ b/js/src/jit/CacheIROps.yaml
@@ -14,16 +14,21 @@
 # to the IC's output register.
 #
 # shared
 # ======
 # If true, Baseline and Ion use the same CacheIRCompiler code for this op.
 # If false, the op must be implemented in both BaselineCacheIRCompiler and
 # IonCacheIRCompiler.
 #
+# gen_boilerplate (optional)
+# ==========================
+# If true, generate CacheIRWriter and CacheIRCompiler boilerplate. This
+# attribute is temporary until all ops have been converted.
+#
 # operands
 # ========
 # List of operands. There are three kinds:
 #
 # - Id (ObjId, ValId, ...): refers to either an IC input or a value defined by
 #   a previous CacheIR instruction. This is encoded as integer in the bytecode
 #   stream.
 #
@@ -124,22 +129,24 @@
 - name: GuardType
   shared: true
   operands:
     val: ValId
     type: ValueTypeImm
 
 - name: GuardShape
   shared: false
+  gen_boilerplate: true
   operands:
     obj: ObjId
     shape: ShapeField
 
 - name: GuardGroup
   shared: false
+  gen_boilerplate: true
   operands:
     obj: ObjId
     group: GroupField
 
 - name: GuardProto
   shared: false
   operands:
     obj: ObjId
@@ -756,16 +763,17 @@
 
 - name: LoadFunctionLengthResult
   shared: true
   operands:
     obj: ObjId
 
 - name: LoadStringCharResult
   shared: true
+  gen_boilerplate: true
   operands:
     str: StrId
     index: Int32Id
 
 - name: LoadStringLengthResult
   shared: true
   operands:
     str: StrId
@@ -845,16 +853,17 @@
 - name: CallProxyGetByValueResult
   shared: true
   operands:
     obj: ObjId
     id: ValId
 
 - name: CallProxyHasPropResult
   shared: true
+  gen_boilerplate: true
   operands:
     obj: ObjId
     id: ValId
     hasOwn: BoolImm
 
 - name: CallObjectHasSparseElementResult
   shared: true
   operands:
@@ -925,22 +934,24 @@
 - name: DoublePowResult
   shared: true
   operands:
     lhs: NumId
     rhs: NumId
 
 - name: Int32AddResult
   shared: true
+  gen_boilerplate: true
   operands:
     lhs: Int32Id
     rhs: Int32Id
 
 - name: Int32SubResult
   shared: true
+  gen_boilerplate: true
   operands:
     lhs: Int32Id
     rhs: Int32Id
 
 - name: Int32MulResult
   shared: true
   operands:
     lhs: Int32Id
@@ -1194,20 +1205,21 @@
   shared: true
   operands:
     lhs: SymId
     rhs: SymId
     op: JSOpImm
 
 - name: CompareInt32Result
   shared: true
+  gen_boilerplate: true
   operands:
+    op: JSOpImm
     lhs: Int32Id
     rhs: Int32Id
-    op: JSOpImm
 
 - name: CompareDoubleResult
   shared: true
   operands:
     lhs: NumId
     rhs: NumId
     op: JSOpImm
 
@@ -1276,13 +1288,14 @@
   operands:
 
 - name: TypeMonitorResult
   shared: false
   operands:
 
 - name: ReturnFromIC
   shared: false
+  gen_boilerplate: true
   operands:
 
 - name: WrapResult
   shared: true
   operands:
--- a/js/src/jit/GenerateCacheIRFiles.py
+++ b/js/src/jit/GenerateCacheIRFiles.py
@@ -1,18 +1,18 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # This script generates jit/CacheIROpsGenerated.h from CacheIROps.yaml
 
 import buildconfig
 import yaml
+import six
 from collections import OrderedDict
-from six import StringIO
 from mozbuild.preprocessor import Preprocessor
 
 HEADER_TEMPLATE = """\
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef %(includeguard)s
@@ -33,17 +33,17 @@ def generate_header(c_out, includeguard,
     })
 
 
 def load_yaml(yaml_path):
     # First invoke preprocessor.py so that we can use #ifdef JS_SIMULATOR in
     # the YAML file.
     pp = Preprocessor()
     pp.context.update(buildconfig.defines['ALLDEFINES'])
-    pp.out = StringIO()
+    pp.out = six.StringIO()
     pp.do_filter('substitution')
     pp.do_include(yaml_path)
     contents = pp.out.getvalue()
 
     # Load into an OrderedDict to ensure order is preserved. Note: Python 3.7+
     # also preserves ordering for normal dictionaries.
     # Code based on https://stackoverflow.com/a/21912744.
     class OrderedLoader(yaml.Loader):
@@ -52,27 +52,134 @@ def load_yaml(yaml_path):
     def construct_mapping(loader, node):
         loader.flatten_mapping(node)
         return OrderedDict(loader.construct_pairs(node))
     tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
     OrderedLoader.add_constructor(tag, construct_mapping)
     return yaml.load(contents, OrderedLoader)
 
 
+# Information for generating CacheIRWriter code for a single operand. Tuple
+# stores the C++ argument type and the CacheIRWriter method to call.
+operand_writer_info = {
+    'ValId': ('ValOperandId', 'writeOperandId'),
+    'ObjId': ('ObjOperandId', 'writeOperandId'),
+    'StrId': ('StringOperandId', 'writeOperandId'),
+    'Int32Id': ('Int32OperandId', 'writeOperandId'),
+
+    'ShapeField': ('Shape*', 'writeShapeField'),
+    'GroupField': ('ObjectGroup*', 'writeGroupField'),
+
+    'JSOpImm': ('JSOp', 'writeJSOpImm'),
+    'BoolImm': ('bool', 'writeBoolImm'),
+}
+
+
+def gen_writer_method(name, operands):
+    """Generates a CacheIRWRiter method for a single opcode."""
+
+    # Generate a single method that writes the opcode and each operand.
+    # For example:
+    #
+    #   void guardShape(ObjOperandId obj, Shape* shape) {
+    #     writeOp(CacheOp::GuardShape);
+    #     writeOperandId(obj);
+    #     writeShapeField(shape);
+    #  }
+
+    # Method names start with a lowercase letter.
+    method_name = name[0].lower() + name[1:]
+
+    args_sig = []
+    operands_code = ''
+    if operands:
+        for opnd_name, opnd_type in six.iteritems(operands):
+            argtype, write_method = operand_writer_info[opnd_type]
+            args_sig.append('{} {}'.format(argtype, opnd_name))
+            operands_code += '  {}({});\\\n'.format(write_method, opnd_name)
+
+    code = 'void {}({}) {{\\\n'.format(method_name, ', '.join(args_sig))
+    code += '  writeOp(CacheOp::{});\\\n'.format(name)
+    code += operands_code
+    code += '}'
+    return code
+
+
+# Information for generating CacheIRCompiler code for a single operand. Tuple
+# stores the C++ type, the suffix used for arguments/variables of this type, and
+# the expression to read this type from CacheIRReader.
+operand_compiler_info = {
+    'ValId': ('ValOperandId', 'Id', 'reader.valOperandId()'),
+    'ObjId': ('ObjOperandId', 'Id', 'reader.objOperandId()'),
+    'StrId': ('StringOperandId', 'Id', 'reader.stringOperandId()'),
+    'Int32Id': ('Int32OperandId', 'Id', 'reader.int32OperandId()'),
+
+    'ShapeField': ('uint32_t', 'Offset', 'reader.stubOffset()'),
+    'GroupField': ('uint32_t', 'Offset', 'reader.stubOffset()'),
+
+    'JSOpImm': ('JSOp', '', 'reader.jsop()'),
+    'BoolImm': ('bool', '', 'reader.readBool()')
+}
+
+
+def gen_compiler_method(name, operands):
+    """Generates CacheIRCompiler header code for a single opcode."""
+
+    method_name = 'emit' + name
+
+    # If there are no operands, just generate a `bool emitFoo();` signature.`
+    if not operands:
+        return 'MOZ_MUST_USE bool {}();\\\n'.format(method_name)
+
+    # If there are operands we generate the signature of the method that needs
+    # to be implemented and a separate function forwarding to it. For example:
+    #
+    #   MOZ_MUST_USE bool emitGuardShape(ObjOperandId objId, uint32_t shapeOffset);
+    #   MOZ_MUST_USE bool emitGuardShape() {
+    #     ObjOperandId objId = reader.objOperandId();
+    #     uint32_t shapeOffset = reader.stubOffset();
+    #     return emitGuardShape(objId, shapeOffset);
+    #   }
+    args_names = []
+    args_sig = []
+    operands_code = ''
+    for opnd_name, opnd_type in six.iteritems(operands):
+        vartype, suffix, readexpr = operand_compiler_info[opnd_type]
+        varname = opnd_name + suffix
+        args_names.append(varname)
+        args_sig.append('{} {}'.format(vartype, varname))
+        operands_code += '  {} {} = {};\\\n'.format(vartype, varname, readexpr)
+
+    # Generate signature.
+    code = 'MOZ_MUST_USE bool {}({});\\\n'.format(method_name, ', '.join(args_sig))
+
+    # Generate the method forwarding to it.
+    code += 'MOZ_MUST_USE bool {}() {{\\\n'.format(method_name)
+    code += operands_code
+    code += '  return {}({});\\\n'.format(method_name, ', '.join(args_names))
+    code += '}\\\n'
+
+    return code
+
+
 def generate_cacheirops_header(c_out, yaml_path):
     """Generate CacheIROpsGenerated.h from CacheIROps.yaml. The generated file
-    has a list of CacheIR ops, like this:
+    contains:
+
+    * A list of all CacheIR ops:
 
         #define CACHE_IR_OPS(_)\
         _(GuardToObject, Id)\
         _(CompareObjectUndefinedNullResult, Id, Byte)\
         ...
 
-    It also contains lists of shared and unshared ops. See the 'shared'
-    attribute in the YAML file.
+    * Lists of shared and unshared ops for the CacheIRCompiler classes. See the
+    'shared' attribute in the YAML file.
+
+    * Generated source code for CacheIRWriter and CacheIRCompiler.
     """
 
     data = load_yaml(yaml_path)
 
     # Mapping from operand types to the less precise types expected by current
     # C++ code.
     mapping = {
         'ValId': 'Id',
@@ -111,44 +218,77 @@ def generate_cacheirops_header(c_out, ya
         'Int32Imm': 'Int32',
 
         'UInt32Imm': 'UInt32',
 
         'JSNativeImm': 'Word',
         'StaticStringImm': 'Word',
     }
 
+    # CACHE_IR_OPS items.
     ops_items = []
+
+    # CACHE_IR_SHARED_OPS and CACHE_IR_UNSHARED_OPS items. These will go away
+    # when all ops have generated boilerplate.
     ops_shared = []
     ops_unshared = []
+
+    # Generated CacheIRWriter methods.
+    writer_methods = []
+
+    # Generated CacheIRCompiler methods.
+    compiler_shared_methods = []
+    compiler_unshared_methods = []
+
     for op in data:
         name = op['name']
 
         operands = op['operands']
         assert operands is None or isinstance(operands, OrderedDict)
 
         shared = op['shared']
         assert isinstance(shared, bool)
 
+        gen_boilerplate = op.get('gen_boilerplate', False)
+
         if operands:
             operands_str = ', '.join([mapping[v] for v in operands.values()])
         else:
             operands_str = 'None'
         ops_items.append('_({}, {})'.format(name, operands_str))
 
-        if shared:
-            ops_shared.append('_({})'.format(name))
+        if gen_boilerplate:
+            writer_methods.append(gen_writer_method(name, operands))
+            if shared:
+                compiler_shared_methods.append(gen_compiler_method(name, operands))
+            else:
+                compiler_unshared_methods.append(gen_compiler_method(name, operands))
         else:
-            ops_unshared.append('_({})'.format(name))
+            if shared:
+                ops_shared.append('_({})'.format(name))
+            else:
+                ops_unshared.append('_({})'.format(name))
 
     contents = '#define CACHE_IR_OPS(_)\\\n'
     contents += '\\\n'.join(ops_items)
     contents += '\n\n'
 
     contents += '#define CACHE_IR_SHARED_OPS(_)\\\n'
     contents += '\\\n'.join(ops_shared)
     contents += '\n\n'
 
     contents += '#define CACHE_IR_UNSHARED_OPS(_)\\\n'
     contents += '\\\n'.join(ops_unshared)
     contents += '\n\n'
 
+    contents += '#define CACHE_IR_WRITER_GENERATED \\\n'
+    contents += '\\\n'.join(writer_methods)
+    contents += '\n\n'
+
+    contents += '#define CACHE_IR_COMPILER_SHARED_GENERATED \\\n'
+    contents += '\\\n'.join(compiler_shared_methods)
+    contents += '\n\n'
+
+    contents += '#define CACHE_IR_COMPILER_UNSHARED_GENERATED \\\n'
+    contents += '\\\n'.join(compiler_unshared_methods)
+    contents += '\n\n'
+
     generate_header(c_out, 'jit_CacheIROpsGenerated_h', contents)
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -565,21 +565,21 @@ JitCode* IonCacheIRCompiler::compile() {
     Assembler::PatchDataWithValueCheck(
         CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
         ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
   }
 
   return newStubCode;
 }
 
-bool IonCacheIRCompiler::emitGuardShape() {
+bool IonCacheIRCompiler::emitGuardShape(ObjOperandId objId,
+                                        uint32_t shapeOffset) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
-  ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
-  Shape* shape = shapeStubField(reader.stubOffset());
+  Shape* shape = shapeStubField(shapeOffset);
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch;
   if (needSpectreMitigations) {
     maybeScratch.emplace(allocator, masm);
   }
 
@@ -594,21 +594,21 @@ bool IonCacheIRCompiler::emitGuardShape(
   } else {
     masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
                                                 failure->label());
   }
 
   return true;
 }
 
-bool IonCacheIRCompiler::emitGuardGroup() {
+bool IonCacheIRCompiler::emitGuardGroup(ObjOperandId objId,
+                                        uint32_t groupOffset) {
   JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
-  ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
-  ObjectGroup* group = groupStubField(reader.stubOffset());
+  ObjectGroup* group = groupStubField(groupOffset);
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch;
   if (needSpectreMitigations) {
     maybeScratch.emplace(allocator, masm);
   }
 
--- a/js/src/jit/IonCacheIRCompiler.h
+++ b/js/src/jit/IonCacheIRCompiler.h
@@ -69,14 +69,16 @@ class MOZ_RAII IonCacheIRCompiler : publ
 
   bool needsPostBarrier() const;
 
   void pushStubCodePointer();
 
 #define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
   CACHE_IR_UNSHARED_OPS(DEFINE_OP)
 #undef DEFINE_OP
+
+  CACHE_IR_COMPILER_UNSHARED_GENERATED
 };
 
 }  // namespace jit
 }  // namespace js
 
 #endif /* jit_IonCacheIRCompiler_h */