Bug 1521092 - Codegen Spew: Output the name of the CacheIR functions. r=sstangl
authorNicolas B. Pierron <nicolas.b.pierron@nbp.name>
Wed, 23 Jan 2019 19:33:10 +0100
changeset 515436 35d94075e745d86bea017e573d11388b810f0ee8
parent 515435 88420eeebda0c49c945ef719128208f33b5f0d6c
child 515437 9c28047982f2ef9cae4e87301b8148230af7e17f
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssstangl
bugs1521092
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1521092 - Codegen Spew: Output the name of the CacheIR functions. r=sstangl
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/CacheIRCompiler.cpp
js/src/jit/IonCacheIRCompiler.cpp
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -224,16 +224,17 @@ JitCode* BaselineCacheIRCompiler::compil
     cx_->recoverFromOutOfMemory();
     return nullptr;
   }
 
   return newStubCode;
 }
 
 bool BaselineCacheIRCompiler::emitGuardShape() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch1(allocator, masm);
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch2;
   if (needSpectreMitigations) {
@@ -254,16 +255,17 @@ bool BaselineCacheIRCompiler::emitGuardS
     masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj,
                                                 scratch1, failure->label());
   }
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardGroup() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch1(allocator, masm);
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch2;
   if (needSpectreMitigations) {
@@ -284,31 +286,33 @@ bool BaselineCacheIRCompiler::emitGuardG
     masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj,
                                                 scratch1, failure->label());
   }
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardProto() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   Address addr(stubAddress(reader.stubOffset()));
   masm.loadObjProto(obj, scratch);
   masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardCompartment() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -322,16 +326,17 @@ bool BaselineCacheIRCompiler::emitGuardC
 
   Address addr(stubAddress(reader.stubOffset()));
   masm.branchTestObjCompartment(Assembler::NotEqual, obj, addr, scratch,
                                 failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardAnyClass() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -344,16 +349,17 @@ bool BaselineCacheIRCompiler::emitGuardA
     masm.branchTestObjClassNoSpectreMitigations(
         Assembler::NotEqual, obj, testAddr, scratch, failure->label());
   }
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardHasProxyHandler() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -361,29 +367,31 @@ bool BaselineCacheIRCompiler::emitGuardH
   masm.loadPtr(testAddr, scratch);
 
   Address handlerAddr(obj, ProxyObject::offsetOfHandler());
   masm.branchPtr(Assembler::NotEqual, handlerAddr, scratch, failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardSpecificObject() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   Address addr(stubAddress(reader.stubOffset()));
   masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardSpecificAtom() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -421,57 +429,62 @@ bool BaselineCacheIRCompiler::emitGuardS
   masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   masm.branchIfFalseBool(scratch, failure->label());
 
   masm.bind(&done);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardSpecificSymbol() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register sym = allocator.useRegister(masm, reader.symbolOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   Address addr(stubAddress(reader.stubOffset()));
   masm.branchPtr(Assembler::NotEqual, addr, sym, failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadValueResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   masm.loadValue(stubAddress(reader.stubOffset()), output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadFixedSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   masm.load32(stubAddress(reader.stubOffset()), scratch);
   masm.loadValue(BaseIndex(obj, scratch, TimesOne), output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadDynamicSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
 
   masm.load32(stubAddress(reader.stubOffset()), scratch);
   masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
   masm.loadValue(BaseIndex(scratch2, scratch, TimesOne), output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardHasGetterSetter() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address shapeAddr = stubAddress(reader.stubOffset());
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -494,16 +507,17 @@ bool BaselineCacheIRCompiler::emitGuardH
   masm.mov(ReturnReg, scratch1);
   masm.PopRegsInMask(volatileRegs);
 
   masm.branchIfFalseBool(scratch1, failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallScriptedGetterResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address getterAddr(stubAddress(reader.stubOffset()));
   bool isCrossRealm = reader.readBool();
 
   AutoScratchRegister code(allocator, masm);
   AutoScratchRegister callee(allocator, masm);
   AutoScratchRegister scratch(allocator, masm);
 
@@ -567,16 +581,17 @@ bool BaselineCacheIRCompiler::emitCallSc
 }
 
 typedef bool (*CallNativeGetterFn)(JSContext*, HandleFunction, HandleObject,
                                    MutableHandleValue);
 static const VMFunction CallNativeGetterInfo =
     FunctionInfo<CallNativeGetterFn>(CallNativeGetter, "CallNativeGetter");
 
 bool BaselineCacheIRCompiler::emitCallNativeGetterResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address getterAddr(stubAddress(reader.stubOffset()));
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
@@ -592,16 +607,17 @@ bool BaselineCacheIRCompiler::emitCallNa
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallProxyGetResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address idAddr(stubAddress(reader.stubOffset()));
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
@@ -617,16 +633,17 @@ bool BaselineCacheIRCompiler::emitCallPr
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallProxyGetByValueResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
@@ -639,16 +656,17 @@ bool BaselineCacheIRCompiler::emitCallPr
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallProxyHasPropResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
   bool hasOwn = reader.readBool();
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
@@ -668,16 +686,17 @@ bool BaselineCacheIRCompiler::emitCallPr
     }
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallNativeGetElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
@@ -691,62 +710,67 @@ bool BaselineCacheIRCompiler::emitCallNa
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   JSValueType fieldType = reader.valueType();
   Address fieldOffset(stubAddress(reader.stubOffset()));
   masm.load32(fieldOffset, scratch);
   masm.loadUnboxedProperty(BaseIndex(obj, scratch, TimesOne), fieldType,
                            output);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardFrameHasNoArgumentsObject() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchTest32(
       Assembler::NonZero,
       Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
       Imm32(BaselineFrame::HAS_ARGS_OBJ), failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadFrameCalleeResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Address callee(BaselineFrameReg, BaselineFrame::offsetOfCalleeToken());
   masm.loadFunctionFromCalleeToken(callee, scratch);
   masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadFrameNumActualArgsResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
   masm.loadPtr(actualArgs, scratch);
   masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadTypedObjectResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   TypedThingLayout layout = reader.typedThingLayout();
   uint32_t typeDescr = reader.typeDescrKey();
   Address fieldOffset(stubAddress(reader.stubOffset()));
@@ -759,16 +783,17 @@ bool BaselineCacheIRCompiler::emitLoadTy
   masm.addPtr(scratch2, scratch1);
 
   Address fieldAddr(scratch1, 0);
   emitLoadTypedObjectResultShared(fieldAddr, scratch2, typeDescr, output);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadFrameArgumentResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -783,16 +808,17 @@ bool BaselineCacheIRCompiler::emitLoadFr
   // Load the argument.
   masm.loadValue(
       BaseValueIndex(BaselineFrameReg, index, BaselineFrame::offsetOfArg(0)),
       output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadEnvironmentFixedSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -804,16 +830,17 @@ bool BaselineCacheIRCompiler::emitLoadEn
   masm.branchTestMagic(Assembler::Equal, slot, failure->label());
 
   // Load the value.
   masm.loadValue(slot, output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -827,25 +854,27 @@ bool BaselineCacheIRCompiler::emitLoadEn
   masm.branchTestMagic(Assembler::Equal, slot, failure->label());
 
   // Load the value.
   masm.loadValue(slot, output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadStringResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   masm.loadPtr(stubAddress(reader.stubOffset()), scratch);
   masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallStringSplitResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   Register sep = allocator.useRegister(masm, reader.stringOperandId());
   Address groupAddr(stubAddress(reader.stubOffset()));
 
   AutoScratchRegister scratch(allocator, masm);
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
@@ -863,16 +892,17 @@ bool BaselineCacheIRCompiler::emitCallSt
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCompareStringResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register left = allocator.useRegister(masm, reader.stringOperandId());
   Register right = allocator.useRegister(masm, reader.stringOperandId());
   JSOp op = reader.jsop();
 
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
@@ -962,16 +992,17 @@ bool BaselineCacheIRCompiler::callTypeUp
 
   stubFrame.leave(masm);
 
   masm.bind(&done);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Address offsetAddr = stubAddress(reader.stubOffset());
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
   ValueOperand val =
       allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
@@ -1002,24 +1033,27 @@ bool BaselineCacheIRCompiler::emitStoreS
     masm.storeValue(val, slot);
   }
 
   emitPostBarrierSlot(obj, val, scratch1);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreFixedSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitStoreSlotShared(true);
 }
 
 bool BaselineCacheIRCompiler::emitStoreDynamicSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitStoreSlotShared(false);
 }
 
 bool BaselineCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Address offsetAddr = stubAddress(reader.stubOffset());
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
   ValueOperand val =
       allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
@@ -1109,28 +1143,32 @@ bool BaselineCacheIRCompiler::emitAddAnd
     masm.storeValue(val, slot);
   }
 
   emitPostBarrierSlot(obj, val, scratch1);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitAddAndStoreFixedSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
 }
 
 bool BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
 }
 
 bool BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
 }
 
 bool BaselineCacheIRCompiler::emitStoreUnboxedProperty() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   JSValueType fieldType = reader.valueType();
   Address offsetAddr = stubAddress(reader.stubOffset());
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
   ValueOperand val =
@@ -1160,16 +1198,17 @@ bool BaselineCacheIRCompiler::emitStoreU
 
   if (UnboxedTypeNeedsPostBarrier(fieldType)) {
     emitPostBarrierSlot(obj, val, scratch);
   }
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreTypedObjectReferenceProperty() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Address offsetAddr = stubAddress(reader.stubOffset());
   TypedThingLayout layout = reader.typedThingLayout();
   ReferenceType type = reader.referenceTypeDescrType();
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
@@ -1196,16 +1235,17 @@ bool BaselineCacheIRCompiler::emitStoreT
 
   emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
   emitPostBarrierSlot(obj, val, scratch1);
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreTypedObjectScalarProperty() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address offsetAddr = stubAddress(reader.stubOffset());
   TypedThingLayout layout = reader.typedThingLayout();
   Scalar::Type type = reader.scalarType();
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
@@ -1219,16 +1259,17 @@ bool BaselineCacheIRCompiler::emitStoreT
   masm.addPtr(offsetAddr, scratch1);
   Address dest(scratch1, 0);
 
   StoreToTypedArray(cx_, masm, type, val, dest, scratch2, failure->label());
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreDenseElement() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Int32OperandId indexId = reader.int32OperandId();
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
   ValueOperand val =
       allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
@@ -1296,16 +1337,17 @@ bool BaselineCacheIRCompiler::emitStoreD
   EmitPreBarrier(masm, element, MIRType::Value);
   masm.storeValue(val, element);
 
   emitPostBarrierElement(obj, val, scratch, index);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreDenseElementHole() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Int32OperandId indexId = reader.int32OperandId();
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
   ValueOperand val =
       allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
@@ -1443,16 +1485,17 @@ bool BaselineCacheIRCompiler::emitStoreD
   masm.bind(&doStore);
   masm.storeValue(val, element);
 
   emitPostBarrierElement(obj, val, scratch, index);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitArrayPush() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   ValOperandId rhsId = reader.valOperandId();
 
   // Allocate the fixed registers first. These need to be fixed for
   // callTypeUpdateIC.
   AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
   ValueOperand val = allocator.useFixedValueRegister(masm, rhsId, R0);
 
@@ -1556,16 +1599,17 @@ bool BaselineCacheIRCompiler::emitArrayP
   // Return value is new length.
   masm.add32(Imm32(1), scratchLength);
   masm.tagValue(JSVAL_TYPE_INT32, scratchLength, val);
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitStoreTypedElement() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   TypedThingLayout layout = reader.typedThingLayout();
   Scalar::Type type = reader.scalarType();
   bool handleOOB = reader.readBool();
 
@@ -1611,16 +1655,17 @@ bool BaselineCacheIRCompiler::emitStoreT
 }
 
 typedef bool (*CallNativeSetterFn)(JSContext*, HandleFunction, HandleObject,
                                    HandleValue);
 static const VMFunction CallNativeSetterInfo =
     FunctionInfo<CallNativeSetterFn>(CallNativeSetter, "CallNativeSetter");
 
 bool BaselineCacheIRCompiler::emitCallNativeSetter() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address setterAddr(stubAddress(reader.stubOffset()));
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
@@ -1638,16 +1683,17 @@ bool BaselineCacheIRCompiler::emitCallNa
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallScriptedSetter() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address setterAddr(stubAddress(reader.stubOffset()));
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   bool isCrossRealm = reader.readBool();
 
@@ -1716,16 +1762,17 @@ bool BaselineCacheIRCompiler::emitCallSc
   if (isCrossRealm) {
     masm.switchToBaselineFrameRealm(R1.scratchReg());
   }
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallSetArrayLength() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   bool strict = reader.readBool();
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
@@ -1740,16 +1787,17 @@ bool BaselineCacheIRCompiler::emitCallSe
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallProxySet() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   Address idAddr(stubAddress(reader.stubOffset()));
   bool strict = reader.readBool();
 
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
@@ -1769,16 +1817,17 @@ bool BaselineCacheIRCompiler::emitCallPr
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallProxySetByValue() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   bool strict = reader.readBool();
 
   allocator.discardStack(masm);
 
   // We need a scratch register but we don't have any registers available on
@@ -1803,16 +1852,17 @@ bool BaselineCacheIRCompiler::emitCallPr
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register id = allocator.useRegister(masm, reader.int32OperandId());
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   bool strict = reader.readBool();
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
@@ -1827,16 +1877,17 @@ bool BaselineCacheIRCompiler::emitCallAd
   if (!callVM(masm, AddOrUpdateSparseElementHelperInfo)) {
     return false;
   }
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallGetSparseElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register id = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
   stubFrame.enter(masm, scratch);
@@ -1847,16 +1898,17 @@ bool BaselineCacheIRCompiler::emitCallGe
   if (!callVM(masm, GetSparseElementHelperInfo)) {
     return false;
   }
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitMegamorphicSetElement() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   bool strict = reader.readBool();
 
   allocator.discardStack(masm);
 
   // We need a scratch register but we don't have any registers available on
@@ -1882,36 +1934,40 @@ bool BaselineCacheIRCompiler::emitMegamo
     return false;
   }
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitTypeMonitorResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   allocator.discardStack(masm);
   EmitEnterTypeMonitorIC(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitReturnFromIC() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   allocator.discardStack(masm);
   EmitReturnFromIC(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadStackValue() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand val = allocator.defineValueRegister(masm, reader.valOperandId());
   Address addr =
       allocator.addressOf(masm, BaselineFrameSlot(reader.uint32Immediate()));
   masm.loadValue(addr, val);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardAndGetIterator() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
   AutoScratchRegister niScratch(allocator, masm);
 
   Address iterAddr(stubAddress(reader.stubOffset()));
   Address enumeratorsAddr(stubAddress(reader.stubOffset()));
@@ -1950,16 +2006,17 @@ bool BaselineCacheIRCompiler::emitGuardA
   masm.loadPtr(enumeratorsAddr, scratch1);
   masm.loadPtr(Address(scratch1, 0), scratch1);
   emitRegisterEnumerator(scratch1, niScratch, scratch2);
 
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   AutoScratchRegister shapeScratch(allocator, masm);
   AutoScratchRegister objScratch(allocator, masm);
   Address shapeAddr(stubAddress(reader.stubOffset()));
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -1976,16 +2033,17 @@ bool BaselineCacheIRCompiler::emitGuardD
   masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
                                               shapeScratch, failure->label());
 
   masm.bind(&done);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Address expandoAndGenerationAddr(stubAddress(reader.stubOffset()));
   Address generationAddr(stubAddress(reader.stubOffset()));
 
   AutoScratchRegister scratch(allocator, masm);
   ValueOperand output =
       allocator.defineValueRegister(masm, reader.valOperandId());
 
@@ -2319,16 +2377,17 @@ uint8_t* ICCacheIR_Monitored::stubDataSt
   return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
 }
 
 uint8_t* ICCacheIR_Updated::stubDataStart() {
   return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
 }
 
 bool BaselineCacheIRCompiler::emitCallStringConcatResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.stringOperandId());
   Register rhs = allocator.useRegister(masm, reader.stringOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   allocator.discardStack(masm);
 
   AutoStubFrame stubFrame(*this);
@@ -2343,16 +2402,17 @@ bool BaselineCacheIRCompiler::emitCallSt
 
   masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, output.valueReg());
 
   stubFrame.leave(masm);
   return true;
 }
 
 bool BaselineCacheIRCompiler::emitCallStringObjectConcatResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand lhs = allocator.useValueRegister(masm, reader.valOperandId());
   ValueOperand rhs = allocator.useValueRegister(masm, reader.valOperandId());
 
   allocator.discardStack(masm);
 
   // For the expression decompiler
   EmitRestoreTailCallReg(masm);
   masm.pushValue(lhs);
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -1259,16 +1259,17 @@ bool CacheIRCompiler::addFailurePath(Fai
     return false;
   }
 
   *failure = &failurePaths.back();
   return true;
 }
 
 bool CacheIRCompiler::emitFailurePath(size_t index) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   FailurePath& failure = failurePaths[index];
 
   allocator.setStackPushed(failure.stackPushed());
 
   for (size_t i = 0; i < writer_.numInputOperands(); i++) {
     allocator.setOperandLocation(i, failure.input(i));
   }
 
@@ -1277,16 +1278,17 @@ bool CacheIRCompiler::emitFailurePath(si
   }
 
   masm.bind(failure.label());
   allocator.restoreInputState(masm);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsNumber() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType knownType = allocator.knownType(inputId);
 
   // Doubles and ints are numbers!
   if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
     return true;
   }
 
@@ -1296,31 +1298,33 @@ bool CacheIRCompiler::emitGuardIsNumber(
     return false;
   }
 
   masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsObject() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
   masm.branchTestObject(Assembler::NotEqual, input, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsNullOrUndefined() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType knownType = allocator.knownType(inputId);
   if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
@@ -1332,16 +1336,17 @@ bool CacheIRCompiler::emitGuardIsNullOrU
   masm.branchTestNull(Assembler::Equal, input, &success);
   masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
 
   masm.bind(&success);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsNotNullOrUndefined() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType knownType = allocator.knownType(inputId);
   if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
     return false;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
@@ -1351,16 +1356,17 @@ bool CacheIRCompiler::emitGuardIsNotNull
 
   masm.branchTestNull(Assembler::Equal, input, failure->label());
   masm.branchTestUndefined(Assembler::Equal, input, failure->label());
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsNull() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType knownType = allocator.knownType(inputId);
   if (knownType == JSVAL_TYPE_NULL) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
@@ -1369,16 +1375,17 @@ bool CacheIRCompiler::emitGuardIsNull() 
   }
 
   Label success;
   masm.branchTestNull(Assembler::NotEqual, input, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsUndefined() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType knownType = allocator.knownType(inputId);
   if (knownType == JSVAL_TYPE_UNDEFINED) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
@@ -1386,16 +1393,17 @@ bool CacheIRCompiler::emitGuardIsUndefin
     return false;
   }
 
   masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsObjectOrNull() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType knownType = allocator.knownType(inputId);
   if (knownType == JSVAL_TYPE_OBJECT || knownType == JSVAL_TYPE_NULL) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
@@ -1406,16 +1414,17 @@ bool CacheIRCompiler::emitGuardIsObjectO
   Label done;
   masm.branchTestObject(Assembler::Equal, input, &done);
   masm.branchTestNull(Assembler::NotEqual, input, failure->label());
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsBoolean() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   Register output = allocator.defineRegister(masm, reader.int32OperandId());
 
   if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
     Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
     masm.move32(input, output);
     return true;
   }
@@ -1427,46 +1436,49 @@ bool CacheIRCompiler::emitGuardIsBoolean
   }
 
   masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
   masm.unboxBoolean(input, output);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsString() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
   masm.branchTestString(Assembler::NotEqual, input, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsSymbol() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
   masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsInt32() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   Register output = allocator.defineRegister(masm, reader.int32OperandId());
 
   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
     Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
     masm.move32(input, output);
     return true;
   }
@@ -1478,16 +1490,17 @@ bool CacheIRCompiler::emitGuardIsInt32()
   }
 
   masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
   masm.unboxInt32(input, output);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsInt32Index() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   Register output = allocator.defineRegister(masm, reader.int32OperandId());
 
   if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
     Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
     masm.move32(input, output);
     return true;
   }
@@ -1528,16 +1541,17 @@ bool CacheIRCompiler::emitGuardIsInt32In
     masm.jump(failure->label());
   }
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardType() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValOperandId inputId = reader.valOperandId();
   JSValueType type = reader.valueType();
 
   if (allocator.knownType(inputId) == type) {
     return true;
   }
 
   ValueOperand input = allocator.useValueRegister(masm, inputId);
@@ -1572,16 +1586,17 @@ bool CacheIRCompiler::emitGuardType() {
     default:
       MOZ_CRASH("Unexpected type");
   }
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardClass() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -1613,16 +1628,17 @@ bool CacheIRCompiler::emitGuardClass() {
     masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
                                                 scratch, failure->label());
   }
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsExtensible() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -1640,16 +1656,17 @@ bool CacheIRCompiler::emitGuardIsExtensi
   // Spectre-style checks are not needed here because we do not
   // interpret data based on this check.
   masm.branch32(Assembler::Equal, scratch, Imm32(js::BaseShape::NOT_EXTENSIBLE),
                 failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsNativeFunction() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSNative nativeFunc = reinterpret_cast<JSNative>(reader.pointer());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -1662,16 +1679,17 @@ bool CacheIRCompiler::emitGuardIsNativeF
   // Ensure function native matches.
   masm.branchPtr(Assembler::NotEqual,
                  Address(obj, JSFunction::offsetOfNativeOrEnv()),
                  ImmPtr(nativeFunc), failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardFunctionPrototype() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register prototypeObject = allocator.useRegister(masm, reader.objOperandId());
 
   // Allocate registers before the failure path to make sure they're registered
   // by addFailurePath.
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
@@ -1689,129 +1707,138 @@ bool CacheIRCompiler::emitGuardFunctionP
   masm.unboxObject(prototypeSlot, scratch1);
   masm.branchPtr(Assembler::NotEqual, prototypeObject, scratch1,
                  failure->label());
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsNativeObject() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchIfNonNativeObj(obj, scratch, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIsProxy() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardNotDOMProxy() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
                                     GetDOMProxyHandlerFamily(),
                                     failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardSpecificInt32Immediate() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register reg = allocator.useRegister(masm, reader.int32OperandId());
   int32_t ival = reader.int32Immediate();
   Assembler::Condition cond = (Assembler::Condition)reader.readByte();
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branch32(Assembler::InvertCondition(cond), reg, Imm32(ival),
                 failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardMagicValue() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   JSWhyMagic magic = reader.whyMagic();
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchTestMagicValue(Assembler::NotEqual, val, magic, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardNoUnboxedExpando() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
   masm.branchPtr(Assembler::NotEqual, expandoAddr, ImmWord(0),
                  failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardAndLoadUnboxedExpando() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register output = allocator.defineRegister(masm, reader.objOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   Address expandoAddr(obj, UnboxedPlainObject::offsetOfExpando());
   masm.loadPtr(expandoAddr, output);
   masm.branchTestPtr(Assembler::Zero, output, output, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardNoDetachedTypedObjects() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   // All stubs manipulating typed objects must check the zone-wide flag
   // indicating whether their underlying storage might be detached, to bail
   // out if needed.
   uint32_t* address = &cx_->zone()->detachedTypedObjects;
   masm.branch32(Assembler::NotEqual, AbsoluteAddress(address), Imm32(0),
                 failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardNoDenseElements() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -1820,16 +1847,17 @@ bool CacheIRCompiler::emitGuardNoDenseEl
 
   // Make sure there are no dense elements.
   Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
   masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardAndGetNumberFromString() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   ValueOperand output =
       allocator.defineValueRegister(masm, reader.valOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -1882,16 +1910,17 @@ bool CacheIRCompiler::emitGuardAndGetNum
     masm.boxDouble(FloatReg0, output, FloatReg0);
     masm.freeStack(sizeof(double));
   }
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardAndGetIndexFromString() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   Register output = allocator.defineRegister(masm, reader.int32OperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -1918,64 +1947,70 @@ bool CacheIRCompiler::emitGuardAndGetInd
     masm.branchTest32(Assembler::Signed, output, output, failure->label());
   }
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadProto() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register reg = allocator.defineRegister(masm, reader.objOperandId());
   masm.loadObjProto(obj, reg);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadEnclosingEnvironment() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register reg = allocator.defineRegister(masm, reader.objOperandId());
   masm.unboxObject(
       Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadWrapperTarget() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register reg = allocator.defineRegister(masm, reader.objOperandId());
 
   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
   masm.unboxObject(
       Address(reg, detail::ProxyReservedSlots::offsetOfPrivateSlot()), reg);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadValueTag() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   Register res = allocator.defineRegister(masm, reader.valueTagOperandId());
 
   Register tag = masm.extractTag(val, res);
   if (tag != res) {
     masm.mov(tag, res);
   }
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDOMExpandoValue() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand val = allocator.defineValueRegister(masm, reader.valOperandId());
 
   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
                val.scratchReg());
   masm.loadValue(Address(val.scratchReg(),
                          detail::ProxyReservedSlots::offsetOfPrivateSlot()),
                  val);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand output =
       allocator.defineValueRegister(masm, reader.valOperandId());
 
   // Determine the expando's Address.
   Register scratch = output.scratchReg();
   masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
   Address expandoAddr(scratch,
@@ -1994,16 +2029,17 @@ bool CacheIRCompiler::emitLoadDOMExpando
 
   // Load expandoAndGeneration->expando into the output Value register.
   masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
                  output);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadUndefinedResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   if (output.hasValue()) {
     masm.moveValue(UndefinedValue(), output.valueReg());
   } else {
     masm.assumeUnreachable("Should have monitored undefined result");
   }
   return true;
 }
@@ -2015,16 +2051,17 @@ static void EmitStoreBoolean(MacroAssemb
     masm.moveValue(val, output.valueReg());
   } else {
     MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
     masm.movePtr(ImmWord(b), output.typedReg().gpr());
   }
 }
 
 bool CacheIRCompiler::emitLoadBooleanResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   bool b = reader.readBool();
   EmitStoreBoolean(masm, b, output);
 
   return true;
 }
 
 static void EmitStoreResult(MacroAssembler& masm, Register reg,
@@ -2041,16 +2078,17 @@ static void EmitStoreResult(MacroAssembl
   if (type == output.type()) {
     masm.mov(reg, output.typedReg().gpr());
     return;
   }
   masm.assumeUnreachable("Should have monitored result");
 }
 
 bool CacheIRCompiler::emitLoadInt32ArrayLengthResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -2060,63 +2098,68 @@ bool CacheIRCompiler::emitLoadInt32Array
 
   // Guard length fits in an int32.
   masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
   return true;
 }
 
 bool CacheIRCompiler::emitDoubleAddResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   // Float register must be preserved. The BinaryArith ICs use
   // the fact that baseline has them available, as well as fixed temps on
   // LBinaryCache.
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg1);
 
   masm.addDouble(FloatReg1, FloatReg0);
   masm.boxDouble(FloatReg0, output.valueReg(), FloatReg0);
 
   return true;
 }
 bool CacheIRCompiler::emitDoubleSubResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg1);
 
   masm.subDouble(FloatReg1, FloatReg0);
   masm.boxDouble(FloatReg0, output.valueReg(), FloatReg0);
 
   return true;
 }
 bool CacheIRCompiler::emitDoubleMulResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg1);
 
   masm.mulDouble(FloatReg1, FloatReg0);
   masm.boxDouble(FloatReg0, output.valueReg(), FloatReg0);
 
   return true;
 }
 bool CacheIRCompiler::emitDoubleDivResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg1);
 
   masm.divDouble(FloatReg1, FloatReg0);
   masm.boxDouble(FloatReg0, output.valueReg(), FloatReg0);
 
   return true;
 }
 bool CacheIRCompiler::emitDoubleModResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg1);
 
   LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
   masm.PushRegsInMask(save);
@@ -2132,47 +2175,50 @@ bool CacheIRCompiler::emitDoubleModResul
   masm.PopRegsInMaskIgnore(save, ignore);
 
   masm.boxDouble(FloatReg0, output.valueReg(), FloatReg0);
 
   return true;
 }
 
 bool CacheIRCompiler::emitInt32AddResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchAdd32(Assembler::Overflow, lhs, rhs, failure->label());
   EmitStoreResult(masm, rhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 bool CacheIRCompiler::emitInt32SubResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchSub32(Assembler::Overflow, rhs, lhs, failure->label());
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 
 bool CacheIRCompiler::emitInt32MulResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2190,16 +2236,17 @@ bool CacheIRCompiler::emitInt32MulResult
   masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
 
   masm.bind(&done);
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
   return true;
 }
 
 bool CacheIRCompiler::emitInt32DivResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister rem(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2222,16 +2269,17 @@ bool CacheIRCompiler::emitInt32DivResult
 
   // A remainder implies a double result.
   masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
   return true;
 }
 
 bool CacheIRCompiler::emitInt32ModResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -2253,75 +2301,81 @@ bool CacheIRCompiler::emitInt32ModResult
   masm.flexibleRemainder32(rhs, lhs, false, volatileRegs);
 
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 
 bool CacheIRCompiler::emitInt32BitOrResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   masm.or32(lhs, rhs);
   EmitStoreResult(masm, rhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 bool CacheIRCompiler::emitInt32BitXorResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   masm.xor32(lhs, rhs);
   EmitStoreResult(masm, rhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 bool CacheIRCompiler::emitInt32BitAndResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   masm.and32(lhs, rhs);
   EmitStoreResult(masm, rhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 bool CacheIRCompiler::emitInt32LeftShiftResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   // Mask shift amount as specified by 12.9.3.1 Step 7
   masm.and32(Imm32(0x1F), rhs);
   masm.flexibleLshift32(rhs, lhs);
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 
 bool CacheIRCompiler::emitInt32RightShiftResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
 
   // Mask shift amount as specified by 12.9.4.1 Step 7
   masm.and32(Imm32(0x1F), rhs);
   masm.flexibleRshift32Arithmetic(rhs, lhs);
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 
 bool CacheIRCompiler::emitInt32URightShiftResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register lhs = allocator.useRegister(masm, reader.int32OperandId());
   Register rhs = allocator.useRegister(masm, reader.int32OperandId());
   bool allowDouble = reader.readBool();
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -2346,16 +2400,17 @@ bool CacheIRCompiler::emitInt32URightShi
   }
   masm.bind(&intDone);
   EmitStoreResult(masm, lhs, JSVAL_TYPE_INT32, output);
   masm.bind(&floatDone);
   return true;
 }
 
 bool CacheIRCompiler::emitInt32NegationResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register val = allocator.useRegister(masm, reader.int32OperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -2397,24 +2452,26 @@ bool CacheIRCompiler::emitInt32DecResult
   masm.mov(input, scratch);
   masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
 
   return true;
 }
 
 bool CacheIRCompiler::emitInt32NotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register val = allocator.useRegister(masm, reader.int32OperandId());
   masm.not32(val);
   masm.tagValue(JSVAL_TYPE_INT32, val, output.valueReg());
   return true;
 }
 
 bool CacheIRCompiler::emitDoubleNegationResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -2486,16 +2543,17 @@ bool CacheIRCompiler::emitDoubleIncResul
   return emitDoubleIncDecResult(true);
 }
 
 bool CacheIRCompiler::emitDoubleDecResult() {
   return emitDoubleIncDecResult(false);
 }
 
 bool CacheIRCompiler::emitTruncateDoubleToUInt32() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   Register res = allocator.defineRegister(masm, reader.int32OperandId());
 
   Label int32, done;
   masm.branchTestInt32(Assembler::Equal, val, &int32);
 
   Label doneTruncate, truncateABICall;
   if (mode_ != Mode::Baseline) {
@@ -2533,16 +2591,17 @@ bool CacheIRCompiler::emitTruncateDouble
 
   masm.unboxInt32(val, res);
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -2559,16 +2618,17 @@ bool CacheIRCompiler::emitLoadArgumentsO
   // Shift out arguments length and return it. No need to type monitor
   // because this stub always returns int32.
   masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratch);
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadFunctionLengthResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -2610,26 +2670,28 @@ bool CacheIRCompiler::emitLoadFunctionLe
                         scratch);
 
   masm.bind(&done);
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadStringLengthResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   masm.loadStringLength(str, scratch);
   EmitStoreResult(masm, scratch, JSVAL_TYPE_INT32, output);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadStringCharResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -2647,16 +2709,17 @@ bool CacheIRCompiler::emitLoadStringChar
   masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
   masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
 
   EmitStoreResult(masm, scratch2, JSVAL_TYPE_STRING, output);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadArgumentsObjectArgResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -2689,16 +2752,17 @@ bool CacheIRCompiler::emitLoadArgumentsO
   // Guard the argument is not a FORWARD_TO_CALL_SLOT MagicValue.
   BaseValueIndex argValue(scratch1, index, ArgumentsData::offsetOfArgs());
   masm.branchTestMagic(Assembler::Equal, argValue, failure->label());
   masm.loadValue(argValue, output.valueReg());
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDenseElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -2715,28 +2779,30 @@ bool CacheIRCompiler::emitLoadDenseEleme
   // Hole check.
   BaseObjectElementIndex element(scratch1, index);
   masm.branchTestMagic(Assembler::Equal, element, failure->label());
   masm.loadTypedOrValue(element, output);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIndexIsNonNegative() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register index = allocator.useRegister(masm, reader.int32OperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIndexGreaterThanDenseInitLength() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2751,16 +2817,17 @@ bool CacheIRCompiler::emitGuardIndexGrea
   masm.spectreBoundsCheck32(index, capacity, scratch2, &outOfBounds);
   masm.jump(failure->label());
   masm.bind(&outOfBounds);
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIndexGreaterThanDenseCapacity() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2775,16 +2842,17 @@ bool CacheIRCompiler::emitGuardIndexGrea
   masm.spectreBoundsCheck32(index, capacity, scratch2, &outOfBounds);
   masm.jump(failure->label());
   masm.bind(&outOfBounds);
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIndexGreaterThanArrayLength() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2798,16 +2866,17 @@ bool CacheIRCompiler::emitGuardIndexGrea
   Address length(scratch, ObjectElements::offsetOfLength());
   masm.spectreBoundsCheck32(index, length, scratch2, &outOfBounds);
   masm.jump(failure->label());
   masm.bind(&outOfBounds);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2828,16 +2897,17 @@ bool CacheIRCompiler::emitGuardIndexIsVa
   Address length(scratch, ObjectElements::offsetOfLength());
   masm.spectreBoundsCheck32(index, length, scratch2,
                             /* failure = */ failure->label());
   masm.bind(&success);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardTagNotEqual() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register lhs = allocator.useRegister(masm, reader.valueTagOperandId());
   Register rhs = allocator.useRegister(masm, reader.valueTagOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -2850,16 +2920,17 @@ bool CacheIRCompiler::emitGuardTagNotEqu
   masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
   masm.jump(failure->label());
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   bool hasExpando = reader.readBool();
   StubFieldOffset shapeWrapper(reader.stubOffset(), StubField::Type::JSObject);
 
   AutoScratchRegister scratch(allocator, masm);
   Maybe<AutoScratchRegister> scratch2, scratch3;
   if (hasExpando) {
     scratch2.emplace(allocator, masm);
@@ -2909,44 +2980,47 @@ bool CacheIRCompiler::emitGuardXrayExpan
     masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
     masm.bind(&done);
   }
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchPtr(Assembler::NotEqual,
                  AbsoluteAddress(cx_->realm()->addressOfMetadataBuilder()),
                  ImmWord(0), failure->label());
 
   return true;
 }
 
 bool CacheIRCompiler::emitGuardObjectGroupNotPretenured() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   StubFieldOffset group(reader.stubOffset(), StubField::Type::ObjectGroup);
   emitLoadStubField(group, scratch);
 
   masm.branchIfPretenuredGroup(scratch, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDenseElementHoleResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
 
   if (!output.hasValue()) {
     masm.assumeUnreachable(
@@ -2979,16 +3053,17 @@ bool CacheIRCompiler::emitLoadDenseEleme
   masm.bind(&hole);
   masm.moveValue(UndefinedValue(), output.valueReg());
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadTypedElementExistsResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   TypedThingLayout layout = reader.typedThingLayout();
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Label outOfBounds, done;
 
@@ -3001,16 +3076,17 @@ bool CacheIRCompiler::emitLoadTypedEleme
   masm.bind(&outOfBounds);
   EmitStoreBoolean(masm, false, output);
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDenseElementExistsResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -3027,16 +3103,17 @@ bool CacheIRCompiler::emitLoadDenseEleme
   BaseObjectElementIndex element(scratch, index);
   masm.branchTestMagic(Assembler::Equal, element, failure->label());
 
   EmitStoreBoolean(masm, true, output);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -3064,16 +3141,17 @@ bool CacheIRCompiler::emitLoadDenseEleme
   masm.bind(&hole);
   EmitStoreBoolean(masm, false, output);
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitArrayJoinResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
 
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -3111,16 +3189,17 @@ bool CacheIRCompiler::emitArrayJoinResul
   masm.loadValue(elementAddr, output.valueReg());
 
   masm.bind(&finished);
 
   return true;
 }
 
 bool CacheIRCompiler::emitLoadTypedElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   TypedThingLayout layout = reader.typedThingLayout();
   Scalar::Type type = reader.scalarType();
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
@@ -3174,16 +3253,17 @@ bool CacheIRCompiler::emitLoadTypedEleme
     }
   }
   return true;
 }
 
 void CacheIRCompiler::emitLoadTypedObjectResultShared(
     const Address& fieldAddr, Register scratch, uint32_t typeDescr,
     const AutoOutputRegister& output) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   MOZ_ASSERT(output.hasValue());
 
   if (SimpleTypeDescrKeyIsScalar(typeDescr)) {
     Scalar::Type type = ScalarTypeFromSimpleTypeDescrKey(typeDescr);
     masm.loadFromTypedArray(type, fieldAddr, output.valueReg(),
                             /* allowDouble = */ true, scratch, nullptr);
   } else {
     ReferenceType type = ReferenceTypeFromSimpleTypeDescrKey(typeDescr);
@@ -3214,29 +3294,31 @@ void CacheIRCompiler::emitLoadTypedObjec
 
       default:
         MOZ_CRASH("Invalid ReferenceTypeDescr");
     }
   }
 }
 
 bool CacheIRCompiler::emitLoadObjectResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
 
   if (output.hasValue()) {
     masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
   } else {
     masm.mov(obj, output.typedReg().gpr());
   }
 
   return true;
 }
 
 bool CacheIRCompiler::emitLoadTypeOfObjectResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Label slowCheck, isObject, isCallable, isUndefined, done;
   masm.typeOfObject(obj, scratch, &slowCheck, &isObject, &isCallable,
                     &isUndefined);
 
@@ -3272,32 +3354,34 @@ bool CacheIRCompiler::emitLoadTypeOfObje
     masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
   }
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadInt32TruthyResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   Label ifFalse, done;
   masm.branchTestInt32Truthy(false, val, &ifFalse);
   masm.moveValue(BooleanValue(true), output.valueReg());
   masm.jump(&done);
 
   masm.bind(&ifFalse);
   masm.moveValue(BooleanValue(false), output.valueReg());
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadStringTruthyResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
 
   Label ifFalse, done;
   masm.branch32(Assembler::Equal, Address(str, JSString::offsetOfLength()),
                 Imm32(0), &ifFalse);
   masm.moveValue(BooleanValue(true), output.valueReg());
   masm.jump(&done);
@@ -3305,16 +3389,17 @@ bool CacheIRCompiler::emitLoadStringTrut
   masm.bind(&ifFalse);
   masm.moveValue(BooleanValue(false), output.valueReg());
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadDoubleTruthyResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   Label ifFalse, done, failurePopReg;
 
   // If we're compiling a Baseline IC, FloatReg0 is always available.
   if (mode_ != Mode::Baseline) {
     masm.push(FloatReg0);
@@ -3332,16 +3417,17 @@ bool CacheIRCompiler::emitLoadDoubleTrut
   if (mode_ != Mode::Baseline) {
     masm.pop(FloatReg0);
   }
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadObjectTruthyResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   Label emulatesUndefined, slowPath, done;
   masm.branchIfObjectEmulatesUndefined(obj, scratch, &slowPath,
                                        &emulatesUndefined);
   masm.moveValue(BooleanValue(true), output.valueReg());
@@ -3359,16 +3445,17 @@ bool CacheIRCompiler::emitLoadObjectTrut
   masm.xor32(Imm32(1), ReturnReg);
   masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitLoadNewObjectFromTemplateResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegister obj(allocator, masm);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   TemplateObject templateObj(objectStubFieldUnchecked(reader.stubOffset()));
 
   // Consume the disambiguation id (2 halves)
   mozilla::Unused << reader.uint32Immediate();
@@ -3381,16 +3468,17 @@ bool CacheIRCompiler::emitLoadNewObjectF
 
   masm.createGCObject(obj, scratch, templateObj, gc::DefaultHeap,
                       failure->label());
   masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
   return true;
 }
 
 bool CacheIRCompiler::emitComparePointerResultShared(bool symbol) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register left = symbol ? allocator.useRegister(masm, reader.symbolOperandId())
                          : allocator.useRegister(masm, reader.objOperandId());
   Register right = symbol
                        ? allocator.useRegister(masm, reader.symbolOperandId())
                        : allocator.useRegister(masm, reader.objOperandId());
   JSOp op = reader.jsop();
@@ -3406,24 +3494,27 @@ bool CacheIRCompiler::emitComparePointer
 
   masm.bind(&ifTrue);
   EmitStoreBoolean(masm, true, output);
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitCompareObjectResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitComparePointerResultShared(false);
 }
 
 bool CacheIRCompiler::emitCompareSymbolResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitComparePointerResultShared(true);
 }
 
 bool CacheIRCompiler::emitCompareInt32Result() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register left = allocator.useRegister(masm, reader.int32OperandId());
   Register right = allocator.useRegister(masm, reader.int32OperandId());
   JSOp op = reader.jsop();
 
   Label ifTrue, done;
   masm.branch32(JSOpToCondition(op, /* signed = */ true), left, right, &ifTrue);
 
@@ -3432,16 +3523,17 @@ bool CacheIRCompiler::emitCompareInt32Re
 
   masm.bind(&ifTrue);
   EmitStoreBoolean(masm, true, output);
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitCompareDoubleResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
@@ -3455,16 +3547,17 @@ bool CacheIRCompiler::emitCompareDoubleR
 
   masm.bind(&ifTrue);
   EmitStoreBoolean(masm, true, output);
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitCompareObjectUndefinedNullResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSOp op = reader.jsop();
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -3484,30 +3577,33 @@ bool CacheIRCompiler::emitCompareObjectU
     masm.bind(&emulatesUndefined);
     EmitStoreBoolean(masm, op == JSOP_EQ, output);
     masm.bind(&done);
   }
   return true;
 }
 
 bool CacheIRCompiler::emitCallPrintString() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   const char* str = reinterpret_cast<char*>(reader.pointer());
   masm.printf(str);
   return true;
 }
 
 bool CacheIRCompiler::emitBreakpoint() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   masm.breakpoint();
   return true;
 }
 
 void CacheIRCompiler::emitStoreTypedObjectReferenceProp(ValueOperand val,
                                                         ReferenceType type,
                                                         const Address& dest,
                                                         Register scratch) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   // Callers will post-barrier this store.
 
   switch (type) {
     case ReferenceType::TYPE_ANY:
       EmitPreBarrier(masm, dest, MIRType::Value);
       masm.storeValue(val, dest);
       break;
 
@@ -3532,16 +3628,17 @@ void CacheIRCompiler::emitStoreTypedObje
       masm.unboxString(val, scratch);
       masm.storePtr(scratch, dest);
       break;
   }
 }
 
 void CacheIRCompiler::emitRegisterEnumerator(Register enumeratorsList,
                                              Register iter, Register scratch) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   // iter->next = list
   masm.storePtr(enumeratorsList, Address(iter, NativeIterator::offsetOfNext()));
 
   // iter->prev = list->prev
   masm.loadPtr(Address(enumeratorsList, NativeIterator::offsetOfPrev()),
                scratch);
   masm.storePtr(scratch, Address(iter, NativeIterator::offsetOfPrev()));
 
@@ -3551,16 +3648,17 @@ void CacheIRCompiler::emitRegisterEnumer
   // list->prev = ni
   masm.storePtr(iter, Address(enumeratorsList, NativeIterator::offsetOfPrev()));
 }
 
 void CacheIRCompiler::emitPostBarrierShared(Register obj,
                                             const ConstantOrRegister& val,
                                             Register scratch,
                                             Register maybeIndex) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   if (!cx_->nursery().exists()) {
     return;
   }
 
   if (val.constant()) {
     MOZ_ASSERT_IF(val.value().isGCThing(),
                   !IsInsideNursery(val.value().toGCThing()));
     return;
@@ -3602,16 +3700,17 @@ void CacheIRCompiler::emitPostBarrierSha
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
   }
   masm.PopRegsInMask(save);
 
   masm.bind(&skipBarrier);
 }
 
 bool CacheIRCompiler::emitWrapResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -3642,16 +3741,17 @@ bool CacheIRCompiler::emitWrapResult() {
   // We clobbered the output register, so we have to retag.
   masm.tagValue(JSVAL_TYPE_OBJECT, obj, output.valueReg());
 
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitMegamorphicLoadSlotByValueResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
   bool handleMissing = reader.readBool();
 
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
@@ -3703,16 +3803,17 @@ bool CacheIRCompiler::emitMegamorphicLoa
   }
   masm.setFramePushed(framePushed);
   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
   masm.adjustStack(sizeof(Value));
   return true;
 }
 
 bool CacheIRCompiler::emitMegamorphicHasPropResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
   bool hasOwn = reader.readBool();
 
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
@@ -3758,16 +3859,17 @@ bool CacheIRCompiler::emitMegamorphicHas
   masm.bind(&ok);
   masm.setFramePushed(framePushed);
   masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
   masm.adjustStack(sizeof(Value));
   return true;
 }
 
 bool CacheIRCompiler::emitCallObjectHasSparseElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
 
   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
 
@@ -3808,16 +3910,17 @@ bool CacheIRCompiler::emitCallObjectHasS
   return true;
 }
 
 /*
  * Move a constant value into register dest.
  */
 void CacheIRCompiler::emitLoadStubFieldConstant(StubFieldOffset val,
                                                 Register dest) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   MOZ_ASSERT(mode_ == Mode::Ion);
   switch (val.getStubFieldType()) {
     case StubField::Type::Shape:
       masm.movePtr(ImmGCPtr(shapeStubField(val.getOffset())), dest);
       break;
     case StubField::Type::String:
       masm.movePtr(ImmGCPtr(stringStubField(val.getOffset())), dest);
       break;
@@ -3840,25 +3943,27 @@ void CacheIRCompiler::emitLoadStubFieldC
  * After this is done executing, dest contains the value; either through a
  * constant load or through the load from the stub data.
  *
  * The current policy is that Baseline will use loads from the stub data (to
  * allow IC sharing), where as Ion doesn't share ICs, and so we can safely use
  * constants in the IC.
  */
 void CacheIRCompiler::emitLoadStubField(StubFieldOffset val, Register dest) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   if (stubFieldPolicy_ == StubFieldPolicy::Constant) {
     emitLoadStubFieldConstant(val, dest);
   } else {
     Address load(ICStubReg, stubDataOffset_ + val.getOffset());
     masm.loadPtr(load, dest);
   }
 }
 
 bool CacheIRCompiler::emitLoadInstanceOfObjectResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   ValueOperand lhs = allocator.useValueRegister(masm, reader.valOperandId());
   Register proto = allocator.useRegister(masm, reader.objOperandId());
 
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -3894,16 +3999,17 @@ bool CacheIRCompiler::emitLoadInstanceOf
   masm.bind(&returnTrue);
   EmitStoreBoolean(masm, true, output);
   // fallthrough
   masm.bind(&done);
   return true;
 }
 
 bool CacheIRCompiler::emitMegamorphicLoadSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   StubFieldOffset name(reader.stubOffset(), StubField::Type::String);
   bool handleMissing = reader.readBool();
 
   AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
@@ -3951,16 +4057,17 @@ bool CacheIRCompiler::emitMegamorphicLoa
   if (JitOptions.spectreJitToCxxCalls) {
     masm.speculationBarrier();
   }
 
   return true;
 }
 
 bool CacheIRCompiler::emitMegamorphicStoreSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   StubFieldOffset name(reader.stubOffset(), StubField::Type::String);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   bool needsTypeBarrier = reader.readBool();
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
@@ -3999,38 +4106,41 @@ bool CacheIRCompiler::emitMegamorphicSto
   masm.loadValue(Address(masm.getStackPointer(), 0), val);
   masm.adjustStack(sizeof(Value));
 
   masm.branchIfFalseBool(scratch1, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitGuardGroupHasUnanalyzedNewScript() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   StubFieldOffset group(reader.stubOffset(), StubField::Type::ObjectGroup);
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   emitLoadStubField(group, scratch1);
   masm.guardGroupHasUnanalyzedNewScript(scratch1, scratch2, failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitLoadObject() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register reg = allocator.defineRegister(masm, reader.objOperandId());
   StubFieldOffset obj(reader.stubOffset(), StubField::Type::JSObject);
   emitLoadStubField(obj, reg);
   return true;
 }
 
 bool CacheIRCompiler::emitCallInt32ToString() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register input = allocator.useRegister(masm, reader.int32OperandId());
   Register result = allocator.defineRegister(masm, reader.stringOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -4048,16 +4158,17 @@ bool CacheIRCompiler::emitCallInt32ToStr
   masm.mov(ReturnReg, result);
   masm.PopRegsInMask(volatileRegs);
 
   masm.branchPtr(Assembler::Equal, result, ImmPtr(0), failure->label());
   return true;
 }
 
 bool CacheIRCompiler::emitCallNumberToString() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   // Float register must be preserved. The BinaryArith ICs use
   // the fact that baseline has them available, as well as fixed temps on
   // LBinaryCache.
   allocator.ensureDoubleRegister(masm, reader.valOperandId(), FloatReg0);
   Register result = allocator.defineRegister(masm, reader.stringOperandId());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -4113,16 +4224,17 @@ void js::jit::LoadTypedThingLength(Macro
       masm.loadTypedObjectLength(obj, result);
       break;
     default:
       MOZ_CRASH();
   }
 }
 
 bool CacheIRCompiler::emitCallIsSuspendedGeneratorResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   AutoScratchRegister scratch2(allocator, masm);
   ValueOperand input = allocator.useValueRegister(masm, reader.valOperandId());
 
   // Test if it's an object.
   Label returnFalse, done;
   masm.branchTestObject(Assembler::NotEqual, input, &returnFalse);
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -639,16 +639,17 @@ JitCode* IonCacheIRCompiler::compile() {
         CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
         ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
   }
 
   return newStubCode;
 }
 
 bool IonCacheIRCompiler::emitGuardShape() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   Shape* shape = shapeStubField(reader.stubOffset());
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch;
   if (needSpectreMitigations) {
@@ -667,16 +668,17 @@ bool IonCacheIRCompiler::emitGuardShape(
     masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
                                                 failure->label());
   }
 
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardGroup() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   ObjectGroup* group = groupStubField(reader.stubOffset());
 
   bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
 
   Maybe<AutoScratchRegister> maybeScratch;
   if (needSpectreMitigations) {
@@ -695,16 +697,17 @@ bool IonCacheIRCompiler::emitGuardGroup(
     masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, group,
                                                 failure->label());
   }
 
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardProto() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSObject* proto = objectStubField(reader.stubOffset());
 
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -712,16 +715,17 @@ bool IonCacheIRCompiler::emitGuardProto(
 
   masm.loadObjProto(obj, scratch);
   masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
                  failure->label());
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardCompartment() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSObject* globalWrapper = objectStubField(reader.stubOffset());
   JS::Compartment* compartment = compartmentStubField(reader.stubOffset());
   AutoScratchRegister scratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -735,16 +739,17 @@ bool IonCacheIRCompiler::emitGuardCompar
                  ImmPtr(&DeadObjectProxy::singleton), failure->label());
 
   masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
                                 failure->label());
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardAnyClass() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ObjOperandId objId = reader.objOperandId();
   Register obj = allocator.useRegister(masm, objId);
   AutoScratchRegister scratch(allocator, masm);
 
   const Class* clasp = classStubField(reader.stubOffset());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -758,45 +763,48 @@ bool IonCacheIRCompiler::emitGuardAnyCla
     masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
                                                 scratch, failure->label());
   }
 
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardHasProxyHandler() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   const void* handler = proxyHandlerStubField(reader.stubOffset());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   Address handlerAddr(obj, ProxyObject::offsetOfHandler());
   masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
                  failure->label());
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardSpecificObject() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSObject* expected = objectStubField(reader.stubOffset());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
   masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
                  failure->label());
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardSpecificAtom() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   JSAtom* atom = &stringStubField(reader.stubOffset())->asAtom();
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -832,16 +840,17 @@ bool IonCacheIRCompiler::emitGuardSpecif
   masm.PopRegsInMaskIgnore(volatileRegs, ignore);
   masm.branchIfFalseBool(scratch, failure->label());
 
   masm.bind(&done);
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardSpecificSymbol() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register sym = allocator.useRegister(masm, reader.symbolOperandId());
   JS::Symbol* expected = symbolStubField(reader.stubOffset());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
 
@@ -850,35 +859,38 @@ bool IonCacheIRCompiler::emitGuardSpecif
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadValueResult() {
   MOZ_CRASH("Baseline-specific op");
 }
 
 bool IonCacheIRCompiler::emitLoadFixedSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   masm.loadTypedOrValue(Address(obj, offset), output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadDynamicSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
 
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
   masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
   masm.loadTypedOrValue(Address(scratch, offset), output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardHasGetterSetter() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Shape* shape = shapeStubField(reader.stubOffset());
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
@@ -901,16 +913,17 @@ bool IonCacheIRCompiler::emitGuardHasGet
   masm.mov(ReturnReg, scratch1);
   masm.PopRegsInMask(volatileRegs);
 
   masm.branchIfFalseBool(scratch1, failure->label());
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallScriptedGetterResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
   AutoScratchRegister scratch(allocator, masm);
 
   bool isCrossRealm = reader.readBool();
@@ -972,16 +985,17 @@ bool IonCacheIRCompiler::emitCallScripte
   }
 
   masm.storeCallResultValue(output);
   masm.freeStack(masm.framePushed() - framePushedBefore);
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallNativeGetterResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
   MOZ_ASSERT(target->isNative());
 
   AutoScratchRegister argJSContext(allocator, masm);
@@ -1045,16 +1059,17 @@ bool IonCacheIRCompiler::emitCallNativeG
     masm.speculationBarrier();
   }
 
   masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallProxyGetResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   jsid id = idStubField(reader.stubOffset());
 
   // ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
   //                  MutableHandleValue vp)
@@ -1111,16 +1126,17 @@ bool IonCacheIRCompiler::emitCallProxyGe
   }
 
   // masm.leaveExitFrame & pop locals
   masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallProxyGetByValueResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
 
   allocator.discardStack(masm);
 
@@ -1133,16 +1149,17 @@ bool IonCacheIRCompiler::emitCallProxyGe
     return false;
   }
 
   masm.storeCallResultValue(output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallProxyHasPropResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
   bool hasOwn = reader.readBool();
 
   allocator.discardStack(masm);
@@ -1162,16 +1179,17 @@ bool IonCacheIRCompiler::emitCallProxyHa
     }
   }
 
   masm.storeCallResultValue(output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallNativeGetElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
 
   allocator.discardStack(masm);
 
@@ -1185,16 +1203,17 @@ bool IonCacheIRCompiler::emitCallNativeG
     return false;
   }
 
   masm.storeCallResultValue(output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadUnboxedPropertyResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
 
   JSValueType fieldType = reader.valueType();
   int32_t fieldOffset = int32StubField(reader.stubOffset());
   masm.loadUnboxedProperty(Address(obj, fieldOffset), fieldType, output);
   return true;
 }
@@ -1211,16 +1230,17 @@ bool IonCacheIRCompiler::emitLoadFrameNu
   MOZ_CRASH("Baseline-specific op");
 }
 
 bool IonCacheIRCompiler::emitLoadFrameArgumentResult() {
   MOZ_CRASH("Baseline-specific op");
 }
 
 bool IonCacheIRCompiler::emitLoadEnvironmentFixedSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
   }
@@ -1230,16 +1250,17 @@ bool IonCacheIRCompiler::emitLoadEnviron
   masm.branchTestMagic(Assembler::Equal, slot, failure->label());
 
   // Load the value.
   masm.loadTypedOrValue(slot, output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -1252,20 +1273,22 @@ bool IonCacheIRCompiler::emitLoadEnviron
   masm.branchTestMagic(Assembler::Equal, slot, failure->label());
 
   // Load the value.
   masm.loadTypedOrValue(slot, output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadStringResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   MOZ_CRASH("not used in ion");
 }
 
 bool IonCacheIRCompiler::emitCallStringSplitResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register str = allocator.useRegister(masm, reader.stringOperandId());
   Register sep = allocator.useRegister(masm, reader.stringOperandId());
   ObjectGroup* group = groupStubField(reader.stubOffset());
 
   allocator.discardStack(masm);
@@ -1281,16 +1304,17 @@ bool IonCacheIRCompiler::emitCallStringS
     return false;
   }
 
   masm.storeCallResultValue(output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitCompareStringResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register left = allocator.useRegister(masm, reader.stringOperandId());
   Register right = allocator.useRegister(masm, reader.stringOperandId());
   JSOp op = reader.jsop();
 
   allocator.discardStack(masm);
@@ -1470,16 +1494,17 @@ static void EmitCheckPropertyTypes(Macro
   masm.bind(&done);
   if (objScratch != InvalidReg) {
     masm.Pop(objScratch);
   }
   masm.Pop(obj);
 }
 
 bool IonCacheIRCompiler::emitStoreFixedSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   Maybe<AutoScratchRegister> scratch;
   if (needsPostBarrier()) {
     scratch.emplace(allocator, masm);
@@ -1500,16 +1525,17 @@ bool IonCacheIRCompiler::emitStoreFixedS
   masm.storeConstantOrRegister(val, slot);
   if (needsPostBarrier()) {
     emitPostBarrierSlot(obj, val, scratch.ref());
   }
   return true;
 }
 
 bool IonCacheIRCompiler::emitStoreDynamicSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
   AutoScratchRegister scratch(allocator, masm);
 
   if (typeCheckInfo_->isSet()) {
     FailurePath* failure;
@@ -1527,16 +1553,17 @@ bool IonCacheIRCompiler::emitStoreDynami
   masm.storeConstantOrRegister(val, slot);
   if (needsPostBarrier()) {
     emitPostBarrierSlot(obj, val, scratch);
   }
   return true;
 }
 
 bool IonCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op) {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   AutoScratchRegister scratch1(allocator, masm);
 
   Maybe<AutoScratchRegister> scratch2;
@@ -1621,28 +1648,32 @@ bool IonCacheIRCompiler::emitAddAndStore
   if (needsPostBarrier()) {
     emitPostBarrierSlot(obj, val, scratch1);
   }
 
   return true;
 }
 
 bool IonCacheIRCompiler::emitAddAndStoreFixedSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
 }
 
 bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
 }
 
 bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
 }
 
 bool IonCacheIRCompiler::emitStoreUnboxedProperty() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSValueType fieldType = reader.valueType();
   int32_t offset = int32StubField(reader.stubOffset());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   Maybe<AutoScratchRegister> scratch;
   if (needsPostBarrier() && UnboxedTypeNeedsPostBarrier(fieldType)) {
@@ -1665,16 +1696,17 @@ bool IonCacheIRCompiler::emitStoreUnboxe
   masm.storeUnboxedProperty(fieldAddr, fieldType, val, /* failure = */ nullptr);
   if (needsPostBarrier() && UnboxedTypeNeedsPostBarrier(fieldType)) {
     emitPostBarrierSlot(obj, val, scratch.ref());
   }
   return true;
 }
 
 bool IonCacheIRCompiler::emitStoreTypedObjectReferenceProperty() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   TypedThingLayout layout = reader.typedThingLayout();
   ReferenceType type = reader.referenceTypeDescrType();
 
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
 
   AutoScratchRegister scratch1(allocator, masm);
@@ -1699,16 +1731,17 @@ bool IonCacheIRCompiler::emitStoreTypedO
 
   if (needsPostBarrier() && type != ReferenceType::TYPE_STRING) {
     emitPostBarrierSlot(obj, val, scratch1);
   }
   return true;
 }
 
 bool IonCacheIRCompiler::emitStoreTypedObjectScalarProperty() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   int32_t offset = int32StubField(reader.stubOffset());
   TypedThingLayout layout = reader.typedThingLayout();
   Scalar::Type type = reader.scalarType();
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
@@ -1788,16 +1821,17 @@ static void EmitAssertNoCopyOnWriteEleme
   masm.branchTest32(Assembler::Zero, elementsFlags,
                     Imm32(ObjectElements::COPY_ON_WRITE), &ok);
   masm.assumeUnreachable("Unexpected copy-on-write elements in Ion IC!");
   masm.bind(&ok);
 #endif
 }
 
 bool IonCacheIRCompiler::emitStoreDenseElement() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
@@ -1833,16 +1867,17 @@ bool IonCacheIRCompiler::emitStoreDenseE
   EmitStoreDenseElement(masm, val, scratch1, element);
   if (needsPostBarrier()) {
     emitPostBarrierElement(obj, val, scratch1, index);
   }
   return true;
 }
 
 bool IonCacheIRCompiler::emitStoreDenseElementHole() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   // handleAdd boolean is only relevant for Baseline. Ion ICs can always
   // handle adds as we don't have to set any flags on the fallback stub to
   // track this.
@@ -1937,16 +1972,17 @@ bool IonCacheIRCompiler::emitStoreDenseE
 }
 
 bool IonCacheIRCompiler::emitArrayPush() {
   MOZ_ASSERT_UNREACHABLE("emitArrayPush not supported for IonCaches.");
   return false;
 }
 
 bool IonCacheIRCompiler::emitStoreTypedElement() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register index = allocator.useRegister(masm, reader.int32OperandId());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   TypedThingLayout layout = reader.typedThingLayout();
   Scalar::Type arrayType = reader.scalarType();
   bool handleOOB = reader.readBool();
@@ -2006,16 +2042,17 @@ bool IonCacheIRCompiler::emitStoreTypedE
     masm.storeToTypedIntArray(arrayType, valueToStore, dest);
   }
 
   masm.bind(&done);
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallNativeSetter() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
   MOZ_ASSERT(target->isNative());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
@@ -2071,16 +2108,17 @@ bool IonCacheIRCompiler::emitCallNativeS
     masm.switchToRealm(cx_->realm(), ReturnReg);
   }
 
   masm.adjustStack(IonOOLNativeExitFrameLayout::Size(1));
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallScriptedSetter() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   bool isCrossRealm = reader.readBool();
@@ -2143,16 +2181,17 @@ bool IonCacheIRCompiler::emitCallScripte
     masm.switchToRealm(cx_->realm(), ReturnReg);
   }
 
   masm.freeStack(masm.framePushed() - framePushedBefore);
   return true;
 }
 
 bool IonCacheIRCompiler::emitCallSetArrayLength() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   bool strict = reader.readBool();
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
 
   allocator.discardStack(masm);
@@ -2161,16 +2200,17 @@ bool IonCacheIRCompiler::emitCallSetArra
   masm.Push(Imm32(strict));
   masm.Push(val);
   masm.Push(obj);
 
   return callVM(masm, SetArrayLengthInfo);
 }
 
 bool IonCacheIRCompiler::emitCallProxySet() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
   jsid id = idStubField(reader.stubOffset());
   bool strict = reader.readBool();
 
@@ -2183,16 +2223,17 @@ bool IonCacheIRCompiler::emitCallProxySe
   masm.Push(val);
   masm.Push(id, scratch);
   masm.Push(obj);
 
   return callVM(masm, ProxySetPropertyInfo);
 }
 
 bool IonCacheIRCompiler::emitCallProxySetByValue() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ConstantOrRegister idVal =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
   bool strict = reader.readBool();
@@ -2204,16 +2245,17 @@ bool IonCacheIRCompiler::emitCallProxySe
   masm.Push(val);
   masm.Push(idVal);
   masm.Push(obj);
 
   return callVM(masm, ProxySetPropertyByValueInfo);
 }
 
 bool IonCacheIRCompiler::emitCallAddOrUpdateSparseElementHelper() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register id = allocator.useRegister(masm, reader.int32OperandId());
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   bool strict = reader.readBool();
 
   allocator.discardStack(masm);
@@ -2223,16 +2265,17 @@ bool IonCacheIRCompiler::emitCallAddOrUp
   masm.Push(val);
   masm.Push(id);
   masm.Push(obj);
 
   return callVM(masm, AddOrUpdateSparseElementHelperInfo);
 }
 
 bool IonCacheIRCompiler::emitCallGetSparseElementResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   Register id = allocator.useRegister(masm, reader.int32OperandId());
 
   allocator.discardStack(masm);
   prepareVMCall(masm, save);
@@ -2243,16 +2286,17 @@ bool IonCacheIRCompiler::emitCallGetSpar
     return false;
   }
 
   masm.storeCallResultValue(output);
   return true;
 }
 
 bool IonCacheIRCompiler::emitMegamorphicSetElement() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
 
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ConstantOrRegister idVal =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
   ConstantOrRegister val =
       allocator.useConstantOrRegister(masm, reader.valOperandId());
   bool strict = reader.readBool();
@@ -2265,16 +2309,17 @@ bool IonCacheIRCompiler::emitMegamorphic
   masm.Push(val);
   masm.Push(idVal);
   masm.Push(obj);
 
   return callVM(masm, SetObjectElementInfo);
 }
 
 bool IonCacheIRCompiler::emitLoadTypedObjectResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoOutputRegister output(*this);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
 
   TypedThingLayout layout = reader.typedThingLayout();
   uint32_t typeDescr = reader.typeDescrKey();
   uint32_t fieldOffset = int32StubField(reader.stubOffset());
@@ -2282,35 +2327,40 @@ bool IonCacheIRCompiler::emitLoadTypedOb
   // Get the object's data pointer.
   LoadTypedThingData(masm, layout, obj, scratch1);
 
   Address fieldAddr(scratch1, fieldOffset);
   emitLoadTypedObjectResultShared(fieldAddr, scratch2, typeDescr, output);
   return true;
 }
 
-bool IonCacheIRCompiler::emitTypeMonitorResult() { return emitReturnFromIC(); }
+bool IonCacheIRCompiler::emitTypeMonitorResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
+  return emitReturnFromIC();
+}
 
 bool IonCacheIRCompiler::emitReturnFromIC() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   if (!savedLiveRegs_) {
     allocator.restoreInputState(masm);
   }
 
   RepatchLabel rejoin;
   rejoinOffset_ = masm.jumpWithPatch(&rejoin);
   masm.bind(&rejoin);
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadStackValue() {
   MOZ_ASSERT_UNREACHABLE("emitLoadStackValue not supported for IonCaches.");
   return false;
 }
 
 bool IonCacheIRCompiler::emitGuardAndGetIterator() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
   AutoScratchRegister niScratch(allocator, masm);
 
   PropertyIteratorObject* iterobj =
       &objectStubField(reader.stubOffset())->as<PropertyIteratorObject>();
@@ -2348,16 +2398,17 @@ bool IonCacheIRCompiler::emitGuardAndGet
   // Chain onto the active iterator stack.
   masm.loadPtr(AbsoluteAddress(enumerators), scratch1);
   emitRegisterEnumerator(scratch1, niScratch, scratch2);
 
   return true;
 }
 
 bool IonCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
   Shape* shape = shapeStubField(reader.stubOffset());
 
   AutoScratchRegister objScratch(allocator, masm);
 
   FailurePath* failure;
   if (!addFailurePath(&failure)) {
     return false;
@@ -2373,16 +2424,17 @@ bool IonCacheIRCompiler::emitGuardDOMExp
   masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch,
                                               shape, failure->label());
 
   masm.bind(&done);
   return true;
 }
 
 bool IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   Register obj = allocator.useRegister(masm, reader.objOperandId());
   ExpandoAndGeneration* expandoAndGeneration =
       rawWordStubField<ExpandoAndGeneration*>(reader.stubOffset());
   uint64_t* generationFieldPtr =
       expandoGenerationStubFieldPtr(reader.stubOffset());
 
   AutoScratchRegister scratch1(allocator, masm);
   AutoScratchRegister scratch2(allocator, masm);
@@ -2516,16 +2568,17 @@ void IonIC::attachCacheIRStub(JSContext*
     return;
   }
 
   attachStub(newStub, code);
   *attached = true;
 }
 
 bool IonCacheIRCompiler::emitCallStringConcatResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   Register lhs = allocator.useRegister(masm, reader.stringOperandId());
   Register rhs = allocator.useRegister(masm, reader.stringOperandId());
 
   allocator.discardStack(masm);
 
@@ -2544,16 +2597,17 @@ bool IonCacheIRCompiler::emitCallStringC
 
 typedef bool (*DoConcatStringObjectFn)(JSContext*, HandleValue, HandleValue,
                                        MutableHandleValue);
 const VMFunction DoIonConcatStringObjectInfo =
     FunctionInfo<DoConcatStringObjectFn>(DoConcatStringObject,
                                          "DoIonConcatStringObject");
 
 bool IonCacheIRCompiler::emitCallStringObjectConcatResult() {
+  JitSpew(JitSpew_Codegen, __FUNCTION__);
   AutoSaveLiveRegisters save(*this);
   AutoOutputRegister output(*this);
 
   ValueOperand lhs = allocator.useValueRegister(masm, reader.valOperandId());
   ValueOperand rhs = allocator.useValueRegister(masm, reader.valOperandId());
 
   allocator.discardStack(masm);