Bug 1442561 part 2 - Add Spectre mitigations for most shape/group/class guards in JIT code. r=luke,tcampbell
authorJan de Mooij <jdemooij@mozilla.com>
Sun, 11 Mar 2018 20:18:24 +0100
changeset 407529 240114d8acd30222fe7d18b88b1ba25b227d3c9b
parent 407528 2bf39f14c6f8772d66ee1d369bf8772b38bf3e08
child 407530 497aead7d738c85926e1edf6259dde2eef6d368b
child 407544 a6f5fb18e6bcc9bffe4a0209a22d8a25510936be
push id100708
push userjandemooij@gmail.com
push dateSun, 11 Mar 2018 19:18:53 +0000
treeherdermozilla-inbound@240114d8acd3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke, tcampbell
bugs1442561
milestone60.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1442561 part 2 - Add Spectre mitigations for most shape/group/class guards in JIT code. r=luke,tcampbell
js/src/jit/AliasAnalysisShared.cpp
js/src/jit/BaselineCacheIRCompiler.cpp
js/src/jit/BaselineCompiler.cpp
js/src/jit/BaselineIC.cpp
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CacheIRCompiler.h
js/src/jit/CodeGenerator.cpp
js/src/jit/CodeGenerator.h
js/src/jit/IonAnalysis.cpp
js/src/jit/IonCacheIRCompiler.cpp
js/src/jit/Lowering.cpp
js/src/jit/Lowering.h
js/src/jit/MIR.h
js/src/jit/MOpcodes.h
js/src/jit/MacroAssembler-inl.h
js/src/jit/MacroAssembler.cpp
js/src/jit/MacroAssembler.h
js/src/jit/SharedIC.cpp
js/src/jit/arm/MacroAssembler-arm-inl.h
js/src/jit/arm64/MacroAssembler-arm64-inl.h
js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
js/src/jit/shared/LIR-shared.h
js/src/jit/shared/LOpcodes-shared.h
js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
js/src/vm/UnboxedObject.cpp
js/src/vm/UnboxedObject.h
--- a/js/src/jit/AliasAnalysisShared.cpp
+++ b/js/src/jit/AliasAnalysisShared.cpp
@@ -105,17 +105,16 @@ GetObject(const MDefinition* ins)
       case MDefinition::Opcode::LoadFixedSlotAndUnbox:
       case MDefinition::Opcode::StoreFixedSlot:
       case MDefinition::Opcode::GetPropertyPolymorphic:
       case MDefinition::Opcode::SetPropertyPolymorphic:
       case MDefinition::Opcode::GuardShape:
       case MDefinition::Opcode::GuardReceiverPolymorphic:
       case MDefinition::Opcode::GuardObjectGroup:
       case MDefinition::Opcode::GuardObjectIdentity:
-      case MDefinition::Opcode::GuardClass:
       case MDefinition::Opcode::GuardUnboxedExpando:
       case MDefinition::Opcode::LoadUnboxedExpando:
       case MDefinition::Opcode::LoadSlot:
       case MDefinition::Opcode::StoreSlot:
       case MDefinition::Opcode::InArray:
       case MDefinition::Opcode::LoadElementHole:
       case MDefinition::Opcode::TypedArrayElements:
       case MDefinition::Opcode::TypedObjectElements:
--- a/js/src/jit/BaselineCacheIRCompiler.cpp
+++ b/js/src/jit/BaselineCacheIRCompiler.cpp
@@ -205,42 +205,70 @@ BaselineCacheIRCompiler::compile()
     }
 
     return newStubCode;
 }
 
 bool
 BaselineCacheIRCompiler::emitGuardShape()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
-    AutoScratchRegister scratch(allocator, masm);
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
+    AutoScratchRegister scratch1(allocator, masm);
+
+    bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+    Maybe<AutoScratchRegister> maybeScratch2;
+    if (needSpectreMitigations)
+        maybeScratch2.emplace(allocator, masm);
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     Address addr(stubAddress(reader.stubOffset()));
-    masm.loadPtr(addr, scratch);
-    masm.branchTestObjShape(Assembler::NotEqual, obj, scratch, failure->label());
+    masm.loadPtr(addr, scratch1);
+    if (needSpectreMitigations) {
+        masm.branchTestObjShape(Assembler::NotEqual, obj, scratch1, *maybeScratch2, obj,
+                                failure->label());
+    } else {
+        masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, scratch1,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 BaselineCacheIRCompiler::emitGuardGroup()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
-    AutoScratchRegister scratch(allocator, masm);
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
+    AutoScratchRegister scratch1(allocator, masm);
+
+    bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+    Maybe<AutoScratchRegister> maybeScratch2;
+    if (needSpectreMitigations)
+        maybeScratch2.emplace(allocator, masm);
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     Address addr(stubAddress(reader.stubOffset()));
-    masm.loadPtr(addr, scratch);
-    masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch, failure->label());
+    masm.loadPtr(addr, scratch1);
+    if (needSpectreMitigations) {
+        masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch1, *maybeScratch2, obj,
+                                failure->label());
+    } else {
+        masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, scratch1,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 BaselineCacheIRCompiler::emitGuardGroupHasUnanalyzedNewScript()
 {
     Address addr(stubAddress(reader.stubOffset()));
     AutoScratchRegister scratch1(allocator, masm);
@@ -285,25 +313,33 @@ BaselineCacheIRCompiler::emitGuardCompar
     Address addr(stubAddress(reader.stubOffset()));
     masm.branchTestObjCompartment(Assembler::NotEqual, obj, addr, scratch, failure->label());
     return true;
 }
 
 bool
 BaselineCacheIRCompiler::emitGuardAnyClass()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
     AutoScratchRegister scratch(allocator, masm);
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     Address testAddr(stubAddress(reader.stubOffset()));
-    masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, testAddr, failure->label());
+    if (objectGuardNeedsSpectreMitigations(objId)) {
+        masm.branchTestObjClass(Assembler::NotEqual, obj, testAddr, scratch, obj,
+                                failure->label());
+    } else {
+        masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, testAddr, scratch,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 BaselineCacheIRCompiler::emitGuardHasProxyHandler()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     AutoScratchRegister scratch(allocator, masm);
@@ -398,19 +434,21 @@ BaselineCacheIRCompiler::emitGuardSpecif
 bool
 BaselineCacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     bool hasExpando = reader.readBool();
     Address shapeWrapperAddress(stubAddress(reader.stubOffset()));
 
     AutoScratchRegister scratch(allocator, masm);
-    Maybe<AutoScratchRegister> scratch2;
-    if (hasExpando)
+    Maybe<AutoScratchRegister> scratch2, scratch3;
+    if (hasExpando) {
         scratch2.emplace(allocator, masm);
+        scratch3.emplace(allocator, masm);
+    }
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
     Address holderAddress(scratch, sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
     Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(GetXrayJitInfo()->holderExpandoSlot));
@@ -422,17 +460,18 @@ BaselineCacheIRCompiler::emitGuardXrayEx
         masm.unboxObject(expandoAddress, scratch);
 
         // Unwrap the expando before checking its shape.
         masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
         masm.unboxObject(Address(scratch, detail::ProxyReservedSlots::offsetOfPrivateSlot()), scratch);
 
         masm.loadPtr(shapeWrapperAddress, scratch2.ref());
         LoadShapeWrapperContents(masm, scratch2.ref(), scratch2.ref(), failure->label());
-        masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2.ref(), failure->label());
+        masm.branchTestObjShape(Assembler::NotEqual, scratch, *scratch2, *scratch3, scratch,
+                                failure->label());
 
         // The reserved slots on the expando should all be in fixed slots.
         Address protoAddress(scratch, NativeObject::getFixedSlotOffset(GetXrayJitInfo()->expandoProtoSlot));
         masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
     } else {
         Label done;
         masm.branchTestObject(Assembler::NotEqual, holderAddress, &done);
         masm.unboxObject(holderAddress, scratch);
@@ -2025,17 +2064,20 @@ BaselineCacheIRCompiler::emitGuardDOMExp
         return false;
 
     Label done;
     masm.branchTestUndefined(Assembler::Equal, val, &done);
 
     masm.debugAssertIsObject(val);
     masm.loadPtr(shapeAddr, shapeScratch);
     masm.unboxObject(val, objScratch);
-    masm.branchTestObjShape(Assembler::NotEqual, objScratch, shapeScratch, failure->label());
+    // The expando object is not used in this case, so we don't need Spectre
+    // mitigations.
+    masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch, shapeScratch,
+                                                failure->label());
 
     masm.bind(&done);
     return true;
 }
 
 bool
 BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration()
 {
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -4375,17 +4375,18 @@ BaselineCompiler::emit_JSOP_SUPERFUN()
     masm.loadObjProto(callee, proto);
 
     // Use VMCall for missing or lazy proto
     Label needVMCall;
     MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
     masm.branchPtr(Assembler::BelowOrEqual, proto, ImmWord(1), &needVMCall);
 
     // Use VMCall for non-JSFunction objects (eg. Proxy)
-    masm.branchTestObjClass(Assembler::NotEqual, proto, scratch, &JSFunction::class_, &needVMCall);
+    masm.branchTestObjClass(Assembler::NotEqual, proto, &JSFunction::class_, scratch, proto,
+                            &needVMCall);
 
     // Use VMCall if not constructor
     masm.load16ZeroExtend(Address(proto, JSFunction::offsetOfFlags()), scratch);
     masm.branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::CONSTRUCTOR), &needVMCall);
 
     // Valid constructor
     Label hasSuperFun;
     masm.jump(&hasSuperFun);
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -421,20 +421,21 @@ bool
 ICTypeUpdate_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm)
 {
     MOZ_ASSERT(engine_ == Engine::Baseline);
 
     Label failure;
     masm.branchTestObject(Assembler::NotEqual, R0, &failure);
 
     // Guard on the object's ObjectGroup.
-    Register scratch = R1.scratchReg();
-    Register obj = masm.extractObject(R0, scratch);
     Address expectedGroup(ICStubReg, ICTypeUpdate_ObjectGroup::offsetOfGroup());
-    masm.branchTestObjGroup(Assembler::NotEqual, obj, expectedGroup, scratch, &failure);
+    Register scratch1 = R1.scratchReg();
+    masm.unboxObject(R0, scratch1);
+    masm.branchTestObjGroup(Assembler::NotEqual, scratch1, expectedGroup, scratch1,
+                            R0.payloadOrValueReg(), &failure);
 
     // Group matches, load true into R1.scratchReg() and return.
     masm.mov(ImmWord(1), R1.scratchReg());
     EmitReturnFromIC(masm);
 
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
@@ -2626,18 +2627,18 @@ ICCallStubCompiler::guardFunApply(MacroA
         masm.loadValue(secondArgSlot, secondArgVal);
 
         masm.branchTestObject(Assembler::NotEqual, secondArgVal, failure);
         Register secondArgObj = masm.extractObject(secondArgVal, ExtractTemp1);
 
         regsx.add(secondArgVal);
         regsx.takeUnchecked(secondArgObj);
 
-        masm.branchTestObjClass(Assembler::NotEqual, secondArgObj, regsx.getAny(),
-                                &ArrayObject::class_, failure);
+        masm.branchTestObjClass(Assembler::NotEqual, secondArgObj, &ArrayObject::class_,
+                                regsx.getAny(), secondArgObj, failure);
 
         // Get the array elements and ensure that initializedLength == length
         masm.loadPtr(Address(secondArgObj, NativeObject::offsetOfElements()), secondArgObj);
 
         Register lenReg = regsx.takeAny();
         masm.load32(Address(secondArgObj, ObjectElements::offsetOfLength()), lenReg);
 
         masm.branch32(Assembler::NotEqual,
@@ -2674,34 +2675,34 @@ ICCallStubCompiler::guardFunApply(MacroA
     // Load the callee, ensure that it's fun_apply
     ValueOperand val = regs.takeAnyValue();
     Address calleeSlot(masm.getStackPointer(), ICStackValueOffset + (3 * sizeof(Value)));
     masm.loadValue(calleeSlot, val);
 
     masm.branchTestObject(Assembler::NotEqual, val, failure);
     Register callee = masm.extractObject(val, ExtractTemp1);
 
-    masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
-                            failure);
+    masm.branchTestObjClass(Assembler::NotEqual, callee, &JSFunction::class_, regs.getAny(),
+                            callee, failure);
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrEnv()), callee);
 
     masm.branchPtr(Assembler::NotEqual, callee, ImmPtr(fun_apply), failure);
 
     // Load the |thisv|, ensure that it's a scripted function with a valid baseline or ion
     // script, or a native function.
     Address thisSlot(masm.getStackPointer(), ICStackValueOffset + (2 * sizeof(Value)));
     masm.loadValue(thisSlot, val);
 
     masm.branchTestObject(Assembler::NotEqual, val, failure);
     Register target = masm.extractObject(val, ExtractTemp1);
     regs.add(val);
     regs.takeUnchecked(target);
 
-    masm.branchTestObjClass(Assembler::NotEqual, target, regs.getAny(), &JSFunction::class_,
-                            failure);
+    masm.branchTestObjClass(Assembler::NotEqual, target, &JSFunction::class_, regs.getAny(),
+                            target, failure);
 
     Register temp = regs.takeAny();
     masm.branchIfFunctionHasNoJitEntry(target, /* constructing */ false, failure);
     masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, callee, temp, failure);
     regs.add(temp);
     return target;
 }
 
@@ -2938,18 +2939,18 @@ ICCallScriptedCompiler::generateStubCode
         // Check if the object matches this callee.
         Address expectedCallee(ICStubReg, ICCall_Scripted::offsetOfCallee());
         masm.branchPtr(Assembler::NotEqual, expectedCallee, callee, &failure);
 
         // Guard against relazification.
         masm.branchIfFunctionHasNoJitEntry(callee, isConstructing_, &failure);
     } else {
         // Ensure the object is a function.
-        masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
-                                &failure);
+        masm.branchTestObjClass(Assembler::NotEqual, callee, &JSFunction::class_, regs.getAny(),
+                                callee, &failure);
         if (isConstructing_) {
             masm.branchIfNotInterpretedConstructor(callee, regs.getAny(), &failure);
         } else {
             masm.branchIfFunctionHasNoJitEntry(callee, /* constructing */ false, &failure);
             masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, callee,
                                     regs.getAny(), &failure);
         }
     }
@@ -3183,18 +3184,18 @@ ICCall_ConstStringSplit::Compiler::gener
         ValueOperand calleeVal = regs.takeAnyValue();
 
         // Ensure that callee is an object.
         masm.loadValue(calleeAddr, calleeVal);
         masm.branchTestObject(Assembler::NotEqual, calleeVal, &failureRestoreArgc);
 
         // Ensure that callee is a function.
         Register calleeObj = masm.extractObject(calleeVal, ExtractTemp0);
-        masm.branchTestObjClass(Assembler::NotEqual, calleeObj, scratchReg,
-                                &JSFunction::class_, &failureRestoreArgc);
+        masm.branchTestObjClass(Assembler::NotEqual, calleeObj, &JSFunction::class_, scratchReg,
+                                calleeObj, &failureRestoreArgc);
 
         // Ensure that callee's function impl is the native intrinsic_StringSplitString.
         masm.loadPtr(Address(calleeObj, JSFunction::offsetOfNativeOrEnv()), scratchReg);
         masm.branchPtr(Assembler::NotEqual, scratchReg, ImmPtr(js::intrinsic_StringSplitString),
                        &failureRestoreArgc);
 
         regs.add(calleeVal);
     }
@@ -3273,18 +3274,18 @@ ICCall_IsSuspendedGenerator::Compiler::g
     // Check if it's an object.
     Label returnFalse;
     Register genObj = regs.takeAny();
     masm.branchTestObject(Assembler::NotEqual, argVal, &returnFalse);
     masm.unboxObject(argVal, genObj);
 
     // Check if it's a GeneratorObject.
     Register scratch = regs.takeAny();
-    masm.branchTestObjClass(Assembler::NotEqual, genObj, scratch, &GeneratorObject::class_,
-                            &returnFalse);
+    masm.branchTestObjClass(Assembler::NotEqual, genObj, &GeneratorObject::class_, scratch,
+                            genObj, &returnFalse);
 
     // If the yield index slot holds an int32 value < YIELD_AND_AWAIT_INDEX_CLOSING,
     // the generator is suspended.
     masm.loadValue(Address(genObj, GeneratorObject::offsetOfYieldAndAwaitIndexSlot()), argVal);
     masm.branchTestInt32(Assembler::NotEqual, argVal, &returnFalse);
     masm.unboxInt32(argVal, scratch);
     masm.branch32(Assembler::AboveOrEqual, scratch,
                   Imm32(GeneratorObject::YIELD_AND_AWAIT_INDEX_CLOSING),
@@ -3422,21 +3423,25 @@ ICCall_ClassHook::Compiler::generateStub
     unsigned nonArgSlots = (1 + isConstructing_) * sizeof(Value);
     BaseValueIndex calleeSlot(masm.getStackPointer(), argcReg, ICStackValueOffset + nonArgSlots);
     masm.loadValue(calleeSlot, R1);
     regs.take(R1);
 
     masm.branchTestObject(Assembler::NotEqual, R1, &failure);
 
     // Ensure the callee's class matches the one in this stub.
+    // We use |Address(ICStubReg, ICCall_ClassHook::offsetOfNative())| below
+    // instead of extracting the hook from callee. As a result the callee
+    // register is no longer used and we must use spectreRegToZero := ICStubReg
+    // instead.
     Register callee = masm.extractObject(R1, ExtractTemp0);
     Register scratch = regs.takeAny();
-    masm.branchTestObjClass(Assembler::NotEqual, callee, scratch,
+    masm.branchTestObjClass(Assembler::NotEqual, callee,
                             Address(ICStubReg, ICCall_ClassHook::offsetOfClass()),
-                            &failure);
+                            scratch, ICStubReg, &failure);
     regs.add(R1);
     regs.takeUnchecked(callee);
 
     // Push a stub frame so that we can perform a non-tail call.
     // Note that this leaves the return address in TailCallReg.
     enterStubFrame(masm, regs.getAny());
 
     regs.add(scratch);
@@ -3689,30 +3694,30 @@ ICCall_ScriptedFunCall::Compiler::genera
     BaseValueIndex calleeSlot(masm.getStackPointer(), argcReg, ICStackValueOffset + sizeof(Value));
     masm.loadValue(calleeSlot, R1);
     regs.take(R1);
 
     // Ensure callee is fun_call.
     masm.branchTestObject(Assembler::NotEqual, R1, &failure);
 
     Register callee = masm.extractObject(R1, ExtractTemp0);
-    masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
-                            &failure);
+    masm.branchTestObjClass(Assembler::NotEqual, callee, &JSFunction::class_, regs.getAny(),
+                            callee, &failure);
     masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrEnv()), callee);
     masm.branchPtr(Assembler::NotEqual, callee, ImmPtr(fun_call), &failure);
 
     // Ensure |this| is a function with a jit entry.
     BaseIndex thisSlot(masm.getStackPointer(), argcReg, TimesEight, ICStackValueOffset);
     masm.loadValue(thisSlot, R1);
 
     masm.branchTestObject(Assembler::NotEqual, R1, &failure);
     callee = masm.extractObject(R1, ExtractTemp0);
 
-    masm.branchTestObjClass(Assembler::NotEqual, callee, regs.getAny(), &JSFunction::class_,
-                            &failure);
+    masm.branchTestObjClass(Assembler::NotEqual, callee, &JSFunction::class_, regs.getAny(),
+                            callee, &failure);
     masm.branchIfFunctionHasNoJitEntry(callee, /* constructing */ false, &failure);
     masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor,
                             callee, regs.getAny(), &failure);
 
     // Load the start of the target JitCode.
     Register code = regs.takeAny();
     masm.loadJitCodeRaw(callee, code);
 
@@ -4037,18 +4042,18 @@ ICIteratorMore_Native::Compiler::generat
     Label failure;
 
     Register obj = masm.extractObject(R0, ExtractTemp0);
 
     AllocatableGeneralRegisterSet regs(availableGeneralRegs(1));
     Register nativeIterator = regs.takeAny();
     Register scratch = regs.takeAny();
 
-    masm.branchTestObjClass(Assembler::NotEqual, obj, scratch,
-                            &PropertyIteratorObject::class_, &failure);
+    masm.branchTestObjClass(Assembler::NotEqual, obj, &PropertyIteratorObject::class_, scratch,
+                            obj, &failure);
     masm.loadObjPrivate(obj, JSObject::ITER_CLASS_NFIXED_SLOTS, nativeIterator);
 
     // If props_cursor < props_end, load the next string and advance the cursor.
     // Else, return MagicValue(JS_NO_ITER_VALUE).
     Label iterDone;
     Address cursorAddr(nativeIterator, offsetof(NativeIterator, props_cursor));
     Address cursorEndAddr(nativeIterator, offsetof(NativeIterator, props_end));
     masm.loadPtr(cursorAddr, scratch);
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -1405,17 +1405,18 @@ CacheIRCompiler::emitGuardType()
     }
 
     return true;
 }
 
 bool
 CacheIRCompiler::emitGuardClass()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
     AutoScratchRegister scratch(allocator, masm);
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     const Class* clasp = nullptr;
     switch (reader.guardClassKind()) {
@@ -1430,36 +1431,42 @@ CacheIRCompiler::emitGuardClass()
         break;
       case GuardClassKind::WindowProxy:
         clasp = cx_->runtime()->maybeWindowProxyClass();
         break;
       case GuardClassKind::JSFunction:
         clasp = &JSFunction::class_;
         break;
     }
-
     MOZ_ASSERT(clasp);
-    masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, clasp, failure->label());
+
+    if (objectGuardNeedsSpectreMitigations(objId)) {
+        masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj, failure->label());
+    } else {
+        masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp, scratch,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 CacheIRCompiler::emitGuardIsNativeFunction()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     JSNative nativeFunc = reinterpret_cast<JSNative>(reader.pointer());
     AutoScratchRegister scratch(allocator, masm);
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     // Ensure obj is a function.
     const Class* clasp = &JSFunction::class_;
-    masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, clasp, failure->label());
+    masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj, failure->label());
 
     // Ensure function native matches.
     masm.branchPtr(Assembler::NotEqual, Address(obj, JSFunction::offsetOfNativeOrEnv()),
                    ImmPtr(nativeFunc), failure->label());
     return true;
 }
 
 bool
--- a/js/src/jit/CacheIRCompiler.h
+++ b/js/src/jit/CacheIRCompiler.h
@@ -374,16 +374,20 @@ class MOZ_RAII CacheRegisterAllocator
         return spilledRegs_.appendAll(regs);
     }
 
     void nextOp() {
         currentOpRegs_.clear();
         currentInstruction_++;
     }
 
+    bool isDeadAfterInstruction(OperandId opId) const {
+        return writer_.operandIsDead(opId.id(), currentInstruction_ + 1);
+    }
+
     uint32_t stackPushed() const {
         return stackPushed_;
     }
     void setStackPushed(uint32_t pushed) {
         stackPushed_ = pushed;
     }
 
     bool isAllocatable(Register reg) const {
@@ -565,16 +569,24 @@ class MOZ_RAII CacheIRCompiler
     MOZ_MUST_USE bool emitFailurePath(size_t i);
 
     // Returns the set of volatile float registers that are live. These
     // registers need to be saved when making non-GC calls with callWithABI.
     FloatRegisterSet liveVolatileFloatRegs() const {
         return FloatRegisterSet::Intersect(liveFloatRegs_.set(), FloatRegisterSet::Volatile());
     }
 
+    bool objectGuardNeedsSpectreMitigations(ObjOperandId objId) const {
+        // Instructions like GuardShape need Spectre mitigations if
+        // (1) mitigations are enabled and (2) the object is used by other
+        // instructions (if the object is *not* used by other instructions,
+        // zeroing its register is pointless).
+        return JitOptions.spectreObjectMitigationsMisc && !allocator.isDeadAfterInstruction(objId);
+    }
+
     void emitLoadTypedObjectResultShared(const Address& fieldAddr, Register scratch,
                                          uint32_t typeDescr,
                                          const AutoOutputRegister& output);
 
     void emitStoreTypedObjectReferenceProp(ValueOperand val, ReferenceTypeDescr::Type type,
                                            const Address& dest, Register scratch);
 
     void emitRegisterEnumerator(Register enumeratorsList, Register iter, Register scratch);
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -955,17 +955,17 @@ CodeGenerator::visitFunctionDispatch(LFu
         lastLabel = skipTrivialBlocks(mir->getFallback())->lir()->label();
     }
 
     // Compare function pointers, except for the last case.
     for (size_t i = 0; i < casesWithFallback - 1; i++) {
         MOZ_ASSERT(i < mir->numCases());
         LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
         if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) {
-            masm.branchTestObjGroup(Assembler::Equal, input, funcGroup, target->label());
+            masm.branchTestObjGroupUnsafe(Assembler::Equal, input, funcGroup, target->label());
         } else {
             JSFunction* func = mir->getCase(i);
             masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label());
         }
     }
 
     // Jump to the last case.
     masm.jump(lastLabel);
@@ -2513,17 +2513,17 @@ CodeGenerator::visitRegExpPrototypeOptim
     addOutOfLineCode(ool, ins->mir());
 
     masm.loadJSContext(temp);
     masm.loadPtr(Address(temp, JSContext::offsetOfCompartment()), temp);
     size_t offset = JSCompartment::offsetOfRegExps() +
                     RegExpCompartment::offsetOfOptimizableRegExpPrototypeShape();
     masm.loadPtr(Address(temp, offset), temp);
 
-    masm.branchTestObjShape(Assembler::NotEqual, object, temp, ool->entry());
+    masm.branchTestObjShapeUnsafe(Assembler::NotEqual, object, temp, ool->entry());
     masm.move32(Imm32(0x1), output);
 
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(OutOfLineRegExpPrototypeOptimizable* ool)
 {
@@ -2573,17 +2573,17 @@ CodeGenerator::visitRegExpInstanceOptimi
     addOutOfLineCode(ool, ins->mir());
 
     masm.loadJSContext(temp);
     masm.loadPtr(Address(temp, JSContext::offsetOfCompartment()), temp);
     size_t offset = JSCompartment::offsetOfRegExps() +
                     RegExpCompartment::offsetOfOptimizableRegExpInstanceShape();
     masm.loadPtr(Address(temp, offset), temp);
 
-    masm.branchTestObjShape(Assembler::NotEqual, object, temp, ool->entry());
+    masm.branchTestObjShapeUnsafe(Assembler::NotEqual, object, temp, ool->entry());
     masm.move32(Imm32(0x1), output);
 
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(OutOfLineRegExpInstanceOptimizable* ool)
 {
@@ -3463,54 +3463,58 @@ CodeGenerator::visitStoreSlotV(LStoreSlo
     if (lir->mir()->needsBarrier())
        emitPreBarrier(Address(base, offset));
 
     masm.storeValue(value, Address(base, offset));
 }
 
 static void
 GuardReceiver(MacroAssembler& masm, const ReceiverGuard& guard,
-              Register obj, Register scratch, Label* miss, bool checkNullExpando)
+              Register obj, Register expandoScratch, Register scratch, Label* miss,
+              bool checkNullExpando)
 {
     if (guard.group) {
-        masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.group, miss);
+        masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.group, scratch, obj, miss);
 
         Address expandoAddress(obj, UnboxedPlainObject::offsetOfExpando());
         if (guard.shape) {
-            masm.loadPtr(expandoAddress, scratch);
-            masm.branchPtr(Assembler::Equal, scratch, ImmWord(0), miss);
-            masm.branchTestObjShape(Assembler::NotEqual, scratch, guard.shape, miss);
+            masm.loadPtr(expandoAddress, expandoScratch);
+            masm.branchPtr(Assembler::Equal, expandoScratch, ImmWord(0), miss);
+            masm.branchTestObjShape(Assembler::NotEqual, expandoScratch, guard.shape, scratch,
+                                    expandoScratch, miss);
         } else if (checkNullExpando) {
             masm.branchPtr(Assembler::NotEqual, expandoAddress, ImmWord(0), miss);
         }
     } else {
-        masm.branchTestObjShape(Assembler::NotEqual, obj, guard.shape, miss);
-    }
-}
-
-void
-CodeGenerator::emitGetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
+        masm.branchTestObjShape(Assembler::NotEqual, obj, guard.shape, scratch, obj, miss);
+    }
+}
+
+void
+CodeGenerator::emitGetPropertyPolymorphic(LInstruction* ins, Register obj, Register expandoScratch,
+                                          Register scratch,
                                           const TypedOrValueRegister& output)
 {
     MGetPropertyPolymorphic* mir = ins->mirRaw()->toGetPropertyPolymorphic();
 
     Label done;
 
     for (size_t i = 0; i < mir->numReceivers(); i++) {
         ReceiverGuard receiver = mir->receiver(i);
 
         Label next;
         masm.comment("GuardReceiver");
-        GuardReceiver(masm, receiver, obj, scratch, &next, /* checkNullExpando = */ false);
+        GuardReceiver(masm, receiver, obj, expandoScratch, scratch, &next,
+                      /* checkNullExpando = */ false);
 
         if (receiver.shape) {
             masm.comment("loadTypedOrValue");
             // If this is an unboxed expando access, GuardReceiver loaded the
-            // expando object into scratch.
-            Register target = receiver.group ? scratch : obj;
+            // expando object into expandoScratch.
+            Register target = receiver.group ? expandoScratch : obj;
 
             Shape* shape = mir->shape(i);
             if (shape->slot() < shape->numFixedSlots()) {
                 // Fixed slot.
                 masm.loadTypedOrValue(Address(target, NativeObject::getFixedSlotOffset(shape->slot())),
                                       output);
             } else {
                 // Dynamic slot.
@@ -3538,59 +3542,62 @@ CodeGenerator::emitGetPropertyPolymorphi
     masm.bind(&done);
 }
 
 void
 CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins)
 {
     Register obj = ToRegister(ins->obj());
     ValueOperand output = ToOutValue(ins);
-    emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), output);
+    Register temp = ToRegister(ins->temp());
+    emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), temp, output);
 }
 
 void
 CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins)
 {
     Register obj = ToRegister(ins->obj());
     TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output()));
-    Register temp = (output.type() == MIRType::Double)
-                    ? ToRegister(ins->temp())
-                    : output.typedReg().gpr();
-    emitGetPropertyPolymorphic(ins, obj, temp, output);
+    Register temp1 = ToRegister(ins->temp1());
+    Register temp2 = (output.type() == MIRType::Double)
+                     ? ToRegister(ins->temp2())
+                     : output.typedReg().gpr();
+    emitGetPropertyPolymorphic(ins, obj, temp1, temp2, output);
 }
 
 template <typename T>
 static void
 EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type)
 {
     if (type == JSVAL_TYPE_OBJECT)
         masm.guardedCallPreBarrier(address, MIRType::Object);
     else if (type == JSVAL_TYPE_STRING)
         masm.guardedCallPreBarrier(address, MIRType::String);
     else
         MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
 }
 
 void
-CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
-                                          const ConstantOrRegister& value)
+CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register expandoScratch,
+                                          Register scratch, const ConstantOrRegister& value)
 {
     MSetPropertyPolymorphic* mir = ins->mirRaw()->toSetPropertyPolymorphic();
 
     Label done;
     for (size_t i = 0; i < mir->numReceivers(); i++) {
         ReceiverGuard receiver = mir->receiver(i);
 
         Label next;
-        GuardReceiver(masm, receiver, obj, scratch, &next, /* checkNullExpando = */ false);
+        GuardReceiver(masm, receiver, obj, expandoScratch, scratch, &next,
+                      /* checkNullExpando = */ false);
 
         if (receiver.shape) {
             // If this is an unboxed expando access, GuardReceiver loaded the
-            // expando object into scratch.
-            Register target = receiver.group ? scratch : obj;
+            // expando object into expandoScratch.
+            Register target = receiver.group ? expandoScratch : obj;
 
             Shape* shape = mir->shape(i);
             if (shape->slot() < shape->numFixedSlots()) {
                 // Fixed slot.
                 Address addr(target, NativeObject::getFixedSlotOffset(shape->slot()));
                 if (mir->needsBarrier())
                     emitPreBarrier(addr);
                 masm.storeConstantOrRegister(value, addr);
@@ -3621,34 +3628,36 @@ CodeGenerator::emitSetPropertyPolymorphi
 
     masm.bind(&done);
 }
 
 void
 CodeGenerator::visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins)
 {
     Register obj = ToRegister(ins->obj());
-    Register temp = ToRegister(ins->temp());
+    Register temp1 = ToRegister(ins->temp1());
+    Register temp2 = ToRegister(ins->temp2());
     ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value);
-    emitSetPropertyPolymorphic(ins, obj, temp, TypedOrValueRegister(value));
+    emitSetPropertyPolymorphic(ins, obj, temp1, temp2, TypedOrValueRegister(value));
 }
 
 void
 CodeGenerator::visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins)
 {
     Register obj = ToRegister(ins->obj());
-    Register temp = ToRegister(ins->temp());
+    Register temp1 = ToRegister(ins->temp1());
+    Register temp2 = ToRegister(ins->temp2());
 
     ConstantOrRegister value;
     if (ins->mir()->value()->isConstant())
         value = ConstantOrRegister(ins->mir()->value()->toConstant()->toJSValue());
     else
         value = TypedOrValueRegister(ins->mir()->value()->type(), ToAnyRegister(ins->value()));
 
-    emitSetPropertyPolymorphic(ins, obj, temp, value);
+    emitSetPropertyPolymorphic(ins, obj, temp1, temp2, value);
 }
 
 void
 CodeGenerator::visitElements(LElements* lir)
 {
     Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
     masm.loadPtr(elements, ToRegister(lir->output()));
 }
@@ -3790,39 +3799,31 @@ CodeGenerator::visitCopyLexicalEnvironme
     pushArg(ToRegister(lir->env()));
     callVM(CopyLexicalEnvironmentObjectInfo, lir);
 }
 
 void
 CodeGenerator::visitGuardShape(LGuardShape* guard)
 {
     Register obj = ToRegister(guard->input());
+    Register temp = ToTempRegisterOrInvalid(guard->temp());
     Label bail;
-    masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), &bail);
+    masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp, obj, &bail);
     bailoutFrom(&bail, guard->snapshot());
 }
 
 void
 CodeGenerator::visitGuardObjectGroup(LGuardObjectGroup* guard)
 {
     Register obj = ToRegister(guard->input());
+    Register temp = ToTempRegisterOrInvalid(guard->temp());
     Assembler::Condition cond =
         guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
     Label bail;
-    masm.branchTestObjGroup(cond, obj, guard->mir()->group(), &bail);
-    bailoutFrom(&bail, guard->snapshot());
-}
-
-void
-CodeGenerator::visitGuardClass(LGuardClass* guard)
-{
-    Register obj = ToRegister(guard->input());
-    Register tmp = ToRegister(guard->tempInt());
-    Label bail;
-    masm.branchTestObjClass(Assembler::NotEqual, obj, tmp, guard->mir()->getClass(), &bail);
+    masm.branchTestObjGroup(cond, obj, guard->mir()->group(), temp, obj, &bail);
     bailoutFrom(&bail, guard->snapshot());
 }
 
 void
 CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard)
 {
     Register input = ToRegister(guard->input());
     Register expected = ToRegister(guard->expected());
@@ -3832,25 +3833,26 @@ CodeGenerator::visitGuardObjectIdentity(
     bailoutCmpPtr(cond, input, expected, guard->snapshot());
 }
 
 void
 CodeGenerator::visitGuardReceiverPolymorphic(LGuardReceiverPolymorphic* lir)
 {
     const MGuardReceiverPolymorphic* mir = lir->mir();
     Register obj = ToRegister(lir->object());
-    Register temp = ToRegister(lir->temp());
+    Register temp1 = ToRegister(lir->temp1());
+    Register temp2 = ToRegister(lir->temp2());
 
     Label done;
 
     for (size_t i = 0; i < mir->numReceivers(); i++) {
         const ReceiverGuard& receiver = mir->receiver(i);
 
         Label next;
-        GuardReceiver(masm, receiver, obj, temp, &next, /* checkNullExpando = */ true);
+        GuardReceiver(masm, receiver, obj, temp1, temp2, &next, /* checkNullExpando = */ true);
 
         if (i == mir->numReceivers() - 1) {
             bailoutFrom(&next, lir->snapshot());
         } else {
             masm.jump(&done);
             masm.bind(&next);
         }
     }
@@ -4512,18 +4514,18 @@ CodeGenerator::visitCallGeneric(LCallGen
 
     // Known-target case is handled by LCallKnown.
     MOZ_ASSERT(!call->hasSingleTarget());
 
     masm.checkStackAlignment();
 
     // Guard that calleereg is actually a function object.
     if (call->mir()->needsClassCheck()) {
-        masm.branchTestObjClass(Assembler::NotEqual, calleereg, nargsreg, &JSFunction::class_,
-                                &invoke);
+        masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_, nargsreg,
+                                calleereg, &invoke);
     }
 
     // Guard that calleereg is an interpreted function with a JSScript or a
     // wasm function.
     // If we are constructing, also ensure the callee is a constructor.
     if (call->mir()->isConstructing()) {
         masm.branchIfNotInterpretedConstructor(calleereg, nargsreg, &invoke);
     } else {
@@ -4912,18 +4914,18 @@ CodeGenerator::emitApplyGeneric(T* apply
 
     // Holds the function nargs, computed in the invoker or (for
     // ApplyArray) in the argument pusher.
     Register argcreg = ToRegister(apply->getArgc());
 
     // Unless already known, guard that calleereg is actually a function object.
     if (!apply->hasSingleTarget()) {
         Label bail;
-        masm.branchTestObjClass(Assembler::NotEqual, calleereg, objreg, &JSFunction::class_,
-                                &bail);
+        masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_, objreg,
+                                calleereg, &bail);
         bailoutFrom(&bail, apply->snapshot());
     }
 
     // Copy the arguments of the current function.
     //
     // In the case of ApplyArray, also compute argc: the argc register
     // and the elements register are the same; argc must not be
     // referenced before the call to emitPushArguments() and elements
@@ -6994,18 +6996,20 @@ CodeGenerator::emitGetNextEntryForIterat
     Register dataLength = ToRegister(lir->temp1());
     Register range = ToRegister(lir->temp2());
     Register output = ToRegister(lir->output());
 
 #ifdef DEBUG
     // Self-hosted code is responsible for ensuring GetNextEntryForIterator is
     // only called with the correct iterator class. Assert here all self-
     // hosted callers of GetNextEntryForIterator perform this class check.
+    // No Spectre mitigations are needed because this is DEBUG-only code.
     Label success;
-    masm.branchTestObjClass(Assembler::Equal, iter, temp, &IteratorObject::class_, &success);
+    masm.branchTestObjClassNoSpectreMitigations(Assembler::Equal, iter, &IteratorObject::class_,
+                                                temp, &success);
     masm.assumeUnreachable("Iterator object should have the correct class.");
     masm.bind(&success);
 #endif
 
     masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot)),
                      range);
 
     Label iterAlreadyDone, iterDone, done;
@@ -9296,30 +9300,33 @@ CodeGenerator::visitStoreUnboxedPointer(
         Address address(elements, ToInt32(index) * sizeof(uintptr_t) + offsetAdjustment);
         StoreUnboxedPointer(masm, address, type, value, preBarrier);
     } else {
         BaseIndex address(elements, ToRegister(index), ScalePointer, offsetAdjustment);
         StoreUnboxedPointer(masm, address, type, value, preBarrier);
     }
 }
 
-typedef bool (*ConvertUnboxedObjectToNativeFn)(JSContext*, JSObject*);
+typedef NativeObject* (*ConvertUnboxedObjectToNativeFn)(JSContext*, JSObject*);
 static const VMFunction ConvertUnboxedPlainObjectToNativeInfo =
     FunctionInfo<ConvertUnboxedObjectToNativeFn>(UnboxedPlainObject::convertToNative,
                                                  "UnboxedPlainObject::convertToNative");
 
 void
 CodeGenerator::visitConvertUnboxedObjectToNative(LConvertUnboxedObjectToNative* lir)
 {
     Register object = ToRegister(lir->getOperand(0));
-
+    Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+    // The call will return the same object so StoreRegisterTo(object) is safe.
     OutOfLineCode* ool = oolCallVM(ConvertUnboxedPlainObjectToNativeInfo,
-                                   lir, ArgList(object), StoreNothing());
-
-    masm.branchTestObjGroup(Assembler::Equal, object, lir->mir()->group(), ool->entry());
+                                   lir, ArgList(object), StoreRegisterTo(object));
+
+    masm.branchTestObjGroup(Assembler::Equal, object, lir->mir()->group(), temp, object,
+                            ool->entry());
     masm.bind(ool->rejoin());
 }
 
 typedef bool (*ArrayPopShiftFn)(JSContext*, HandleObject, MutableHandleValue);
 static const VMFunction ArrayPopDenseInfo =
     FunctionInfo<ArrayPopShiftFn>(jit::ArrayPopDense, "ArrayPopDense");
 static const VMFunction ArrayShiftDenseInfo =
     FunctionInfo<ArrayPopShiftFn>(jit::ArrayShiftDense, "ArrayShiftDense");
@@ -9589,17 +9596,18 @@ CodeGenerator::visitGetIteratorCache(LGe
 }
 
 static void
 LoadNativeIterator(MacroAssembler& masm, Register obj, Register dest, Label* failures)
 {
     MOZ_ASSERT(obj != dest);
 
     // Test class.
-    masm.branchTestObjClass(Assembler::NotEqual, obj, dest, &PropertyIteratorObject::class_, failures);
+    masm.branchTestObjClass(Assembler::NotEqual, obj, &PropertyIteratorObject::class_, dest,
+                            obj, failures);
 
     // Load NativeIterator object.
     masm.loadObjPrivate(obj, JSObject::ITER_CLASS_NFIXED_SLOTS, dest);
 }
 
 typedef bool (*IteratorMoreFn)(JSContext*, HandleObject, MutableHandleValue);
 static const VMFunction IteratorMoreInfo =
     FunctionInfo<IteratorMoreFn>(IteratorMore, "IteratorMore");
@@ -13121,17 +13129,18 @@ CodeGenerator::visitFinishBoundFunctionI
 
     OutOfLineCode* ool = oolCallVM(FinishBoundFunctionInitInfo, lir,
                                    ArgList(bound, target, argCount), StoreNothing());
     Label* slowPath = ool->entry();
 
     const size_t boundLengthOffset = FunctionExtended::offsetOfExtendedSlot(BOUND_FUN_LENGTH_SLOT);
 
     // Take the slow path if the target is not a JSFunction.
-    masm.branchTestObjClass(Assembler::NotEqual, target, temp1, &JSFunction::class_, slowPath);
+    masm.branchTestObjClass(Assembler::NotEqual, target, &JSFunction::class_, temp1, target,
+                            slowPath);
 
     // Take the slow path if we'd need to adjust the [[Prototype]].
     masm.loadObjProto(bound, temp1);
     masm.loadObjProto(target, temp2);
     masm.branchPtr(Assembler::NotEqual, temp1, temp2, slowPath);
 
     // Get the function flags.
     masm.load16ZeroExtend(Address(target, JSFunction::offsetOfFlags()), temp1);
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -152,17 +152,16 @@ class CodeGenerator final : public CodeG
     void visitStoreSlotT(LStoreSlotT* lir);
     void visitStoreSlotV(LStoreSlotV* lir);
     void visitElements(LElements* lir);
     void visitConvertElementsToDoubles(LConvertElementsToDoubles* lir);
     void visitMaybeToDoubleElement(LMaybeToDoubleElement* lir);
     void visitMaybeCopyElementsForWrite(LMaybeCopyElementsForWrite* lir);
     void visitGuardShape(LGuardShape* guard);
     void visitGuardObjectGroup(LGuardObjectGroup* guard);
-    void visitGuardClass(LGuardClass* guard);
     void visitGuardObjectIdentity(LGuardObjectIdentity* guard);
     void visitGuardReceiverPolymorphic(LGuardReceiverPolymorphic* lir);
     void visitGuardUnboxedExpando(LGuardUnboxedExpando* lir);
     void visitLoadUnboxedExpando(LLoadUnboxedExpando* lir);
     void visitTypeBarrierV(LTypeBarrierV* lir);
     void visitTypeBarrierO(LTypeBarrierO* lir);
     void emitPostWriteBarrier(const LAllocation* obj);
     void emitPostWriteBarrier(Register objreg);
@@ -257,21 +256,21 @@ class CodeGenerator final : public CodeG
     void visitBoundsCheckRange(LBoundsCheckRange* lir);
     void visitBoundsCheckLower(LBoundsCheckLower* lir);
     void visitSpectreMaskIndex(LSpectreMaskIndex* lir);
     void visitLoadFixedSlotV(LLoadFixedSlotV* ins);
     void visitLoadFixedSlotAndUnbox(LLoadFixedSlotAndUnbox* lir);
     void visitLoadFixedSlotT(LLoadFixedSlotT* ins);
     void visitStoreFixedSlotV(LStoreFixedSlotV* ins);
     void visitStoreFixedSlotT(LStoreFixedSlotT* ins);
-    void emitGetPropertyPolymorphic(LInstruction* lir, Register obj,
+    void emitGetPropertyPolymorphic(LInstruction* lir, Register obj, Register expandoScratch,
                                     Register scratch, const TypedOrValueRegister& output);
     void visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins);
     void visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins);
-    void emitSetPropertyPolymorphic(LInstruction* lir, Register obj,
+    void emitSetPropertyPolymorphic(LInstruction* lir, Register obj, Register expandoScratch,
                                     Register scratch, const ConstantOrRegister& value);
     void visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins);
     void visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins);
     void visitAbsI(LAbsI* lir);
     void visitAtan2D(LAtan2D* lir);
     void visitHypot(LHypot* lir);
     void visitPowI(LPowI* lir);
     void visitPowD(LPowD* lir);
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -3537,18 +3537,22 @@ TryOptimizeLoadObjectOrNull(MDefinition*
 
 static inline MDefinition*
 PassthroughOperand(MDefinition* def)
 {
     if (def->isConvertElementsToDoubles())
         return def->toConvertElementsToDoubles()->elements();
     if (def->isMaybeCopyElementsForWrite())
         return def->toMaybeCopyElementsForWrite()->object();
-    if (def->isConvertUnboxedObjectToNative())
-        return def->toConvertUnboxedObjectToNative()->object();
+    if (!JitOptions.spectreObjectMitigationsMisc) {
+        // If Spectre mitigations are enabled, LConvertUnboxedObjectToNative
+        // needs to have its own def.
+        if (def->isConvertUnboxedObjectToNative())
+            return def->toConvertUnboxedObjectToNative()->object();
+    }
     return nullptr;
 }
 
 // Eliminate checks which are redundant given each other or other instructions.
 //
 // A type barrier is considered redundant if all missing types have been tested
 // for by earlier control instructions.
 //
--- a/js/src/jit/IonCacheIRCompiler.cpp
+++ b/js/src/jit/IonCacheIRCompiler.cpp
@@ -623,38 +623,66 @@ IonCacheIRCompiler::compile()
     }
 
     return newStubCode;
 }
 
 bool
 IonCacheIRCompiler::emitGuardShape()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
     Shape* shape = shapeStubField(reader.stubOffset());
 
+    bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+    Maybe<AutoScratchRegister> maybeScratch;
+    if (needSpectreMitigations)
+        maybeScratch.emplace(allocator, masm);
+
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
-    masm.branchTestObjShape(Assembler::NotEqual, obj, shape, failure->label());
+    if (needSpectreMitigations) {
+        masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
+                                failure->label());
+    } else {
+        masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 IonCacheIRCompiler::emitGuardGroup()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
     ObjectGroup* group = groupStubField(reader.stubOffset());
 
+    bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
+
+    Maybe<AutoScratchRegister> maybeScratch;
+    if (needSpectreMitigations)
+        maybeScratch.emplace(allocator, masm);
+
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
-    masm.branchTestObjGroup(Assembler::NotEqual, obj, group, failure->label());
+    if (needSpectreMitigations) {
+        masm.branchTestObjGroup(Assembler::NotEqual, obj, group, *maybeScratch, obj,
+                                failure->label());
+    } else {
+        masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, group,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 IonCacheIRCompiler::emitGuardGroupHasUnanalyzedNewScript()
 {
     ObjectGroup* group = groupStubField(reader.stubOffset());
     AutoScratchRegister scratch1(allocator, masm);
@@ -702,26 +730,33 @@ IonCacheIRCompiler::emitGuardCompartment
     masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
                                   failure->label());
     return true;
 }
 
 bool
 IonCacheIRCompiler::emitGuardAnyClass()
 {
-    Register obj = allocator.useRegister(masm, reader.objOperandId());
+    ObjOperandId objId = reader.objOperandId();
+    Register obj = allocator.useRegister(masm, objId);
     AutoScratchRegister scratch(allocator, masm);
 
     const Class* clasp = classStubField(reader.stubOffset());
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
-    masm.branchTestObjClass(Assembler::NotEqual, obj, scratch, clasp, failure->label());
+    if (objectGuardNeedsSpectreMitigations(objId)) {
+        masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj, failure->label());
+    } else {
+        masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp, scratch,
+                                                    failure->label());
+    }
+
     return true;
 }
 
 bool
 IonCacheIRCompiler::emitGuardHasProxyHandler()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     const void* handler = proxyHandlerStubField(reader.stubOffset());
@@ -812,19 +847,21 @@ bool
 IonCacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto()
 {
     Register obj = allocator.useRegister(masm, reader.objOperandId());
     bool hasExpando = reader.readBool();
     JSObject* shapeWrapper = objectStubField(reader.stubOffset());
     MOZ_ASSERT(hasExpando == !!shapeWrapper);
 
     AutoScratchRegister scratch(allocator, masm);
-    Maybe<AutoScratchRegister> scratch2;
-    if (hasExpando)
+    Maybe<AutoScratchRegister> scratch2, scratch3;
+    if (hasExpando) {
         scratch2.emplace(allocator, masm);
+        scratch3.emplace(allocator, masm);
+    }
 
     FailurePath* failure;
     if (!addFailurePath(&failure))
         return false;
 
     masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
     Address holderAddress(scratch, sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
     Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(GetXrayJitInfo()->holderExpandoSlot));
@@ -836,17 +873,18 @@ IonCacheIRCompiler::emitGuardXrayExpando
         masm.unboxObject(expandoAddress, scratch);
 
         // Unwrap the expando before checking its shape.
         masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
         masm.unboxObject(Address(scratch, detail::ProxyReservedSlots::offsetOfPrivateSlot()), scratch);
 
         masm.movePtr(ImmGCPtr(shapeWrapper), scratch2.ref());
         LoadShapeWrapperContents(masm, scratch2.ref(), scratch2.ref(), failure->label());
-        masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2.ref(), failure->label());
+        masm.branchTestObjShape(Assembler::NotEqual, scratch, *scratch2, *scratch3, scratch,
+                                failure->label());
 
         // The reserved slots on the expando should all be in fixed slots.
         Address protoAddress(scratch, NativeObject::getFixedSlotOffset(GetXrayJitInfo()->expandoProtoSlot));
         masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
     } else {
         Label done;
         masm.branchTestObject(Assembler::NotEqual, holderAddress, &done);
         masm.unboxObject(holderAddress, scratch);
@@ -2358,17 +2396,20 @@ IonCacheIRCompiler::emitGuardDOMExpandoM
     if (!addFailurePath(&failure))
         return false;
 
     Label done;
     masm.branchTestUndefined(Assembler::Equal, val, &done);
 
     masm.debugAssertIsObject(val);
     masm.unboxObject(val, objScratch);
-    masm.branchTestObjShape(Assembler::NotEqual, objScratch, shape, failure->label());
+    // The expando object is not used in this case, so we don't need Spectre
+    // mitigations.
+    masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch, shape,
+                                                failure->label());
 
     masm.bind(&done);
     return true;
 }
 
 bool
 IonCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration()
 {
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3495,19 +3495,29 @@ LIRGenerator::visitStoreUnboxedString(MS
 
     LInstruction* lir = new(alloc()) LStoreUnboxedPointer(elements, index, value);
     add(lir, ins);
 }
 
 void
 LIRGenerator::visitConvertUnboxedObjectToNative(MConvertUnboxedObjectToNative* ins)
 {
-    LInstruction* check = new(alloc()) LConvertUnboxedObjectToNative(useRegister(ins->object()));
-    add(check, ins);
-    assignSafepoint(check, ins);
+    MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+    if (JitOptions.spectreObjectMitigationsMisc) {
+        auto* lir = new(alloc()) LConvertUnboxedObjectToNative(useRegisterAtStart(ins->object()),
+                                                               temp());
+        defineReuseInput(lir, ins, 0);
+        assignSafepoint(lir, ins);
+    } else {
+        auto* lir = new(alloc()) LConvertUnboxedObjectToNative(useRegister(ins->object()),
+                                                               LDefinition::BogusTemp());
+        add(lir, ins);
+        assignSafepoint(lir, ins);
+    }
 }
 
 void
 LIRGenerator::visitEffectiveAddress(MEffectiveAddress* ins)
 {
     define(new(alloc()) LEffectiveAddress(useRegister(ins->base()), useRegister(ins->index())), ins);
 }
 
@@ -3928,45 +3938,46 @@ LIRGenerator::visitGetPropertyCache(MGet
 
 void
 LIRGenerator::visitGetPropertyPolymorphic(MGetPropertyPolymorphic* ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType::Object);
 
     if (ins->type() == MIRType::Value) {
         LGetPropertyPolymorphicV* lir =
-            new(alloc()) LGetPropertyPolymorphicV(useRegister(ins->object()));
+            new(alloc()) LGetPropertyPolymorphicV(useRegister(ins->object()), temp());
         assignSnapshot(lir, Bailout_ShapeGuard);
         defineBox(lir, ins);
     } else {
-        LDefinition maybeTemp = (ins->type() == MIRType::Double) ? temp() : LDefinition::BogusTemp();
+        LDefinition maybeTemp2 =
+            (ins->type() == MIRType::Double) ? temp() : LDefinition::BogusTemp();
         LGetPropertyPolymorphicT* lir =
-            new(alloc()) LGetPropertyPolymorphicT(useRegister(ins->object()), maybeTemp);
+            new(alloc()) LGetPropertyPolymorphicT(useRegister(ins->object()), temp(), maybeTemp2);
         assignSnapshot(lir, Bailout_ShapeGuard);
         define(lir, ins);
     }
 }
 
 void
 LIRGenerator::visitSetPropertyPolymorphic(MSetPropertyPolymorphic* ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType::Object);
 
     if (ins->value()->type() == MIRType::Value) {
         LSetPropertyPolymorphicV* lir =
             new(alloc()) LSetPropertyPolymorphicV(useRegister(ins->object()),
                                                   useBox(ins->value()),
-                                                  temp());
+                                                  temp(), temp());
         assignSnapshot(lir, Bailout_ShapeGuard);
         add(lir, ins);
     } else {
         LAllocation value = useRegisterOrConstant(ins->value());
         LSetPropertyPolymorphicT* lir =
             new(alloc()) LSetPropertyPolymorphicT(useRegister(ins->object()), value,
-                                                  ins->value()->type(), temp());
+                                                  ins->value()->type(), temp(), temp());
         assignSnapshot(lir, Bailout_ShapeGuard);
         add(lir, ins);
     }
 }
 
 void
 LIRGenerator::visitBindNameCache(MBindNameCache* ins)
 {
@@ -3998,39 +4009,45 @@ LIRGenerator::visitGuardObjectIdentity(M
     redefine(ins, ins->object());
 }
 
 void
 LIRGenerator::visitGuardShape(MGuardShape* ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType::Object);
 
-    LGuardShape* guard = new(alloc()) LGuardShape(useRegisterAtStart(ins->object()));
-    assignSnapshot(guard, ins->bailoutKind());
-    add(guard, ins);
-    redefine(ins, ins->object());
+    if (JitOptions.spectreObjectMitigationsMisc) {
+        auto* lir = new(alloc()) LGuardShape(useRegisterAtStart(ins->object()), temp());
+        assignSnapshot(lir, ins->bailoutKind());
+        defineReuseInput(lir, ins, 0);
+    } else {
+        auto* lir = new(alloc()) LGuardShape(useRegister(ins->object()),
+                                             LDefinition::BogusTemp());
+        assignSnapshot(lir, ins->bailoutKind());
+        add(lir, ins);
+        redefine(ins, ins->object());
+    }
 }
 
 void
 LIRGenerator::visitGuardObjectGroup(MGuardObjectGroup* ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType::Object);
 
-    LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegisterAtStart(ins->object()));
-    assignSnapshot(guard, ins->bailoutKind());
-    add(guard, ins);
-    redefine(ins, ins->object());
-}
-
-void
-LIRGenerator::visitGuardClass(MGuardClass* ins)
-{
-    LGuardClass* guard = new(alloc()) LGuardClass(useRegister(ins->object()), temp());
-    assignSnapshot(guard, Bailout_ObjectIdentityOrTypeGuard);
-    add(guard, ins);
+    if (JitOptions.spectreObjectMitigationsMisc) {
+        auto* lir = new(alloc()) LGuardObjectGroup(useRegisterAtStart(ins->object()), temp());
+        assignSnapshot(lir, ins->bailoutKind());
+        defineReuseInput(lir, ins, 0);
+    } else {
+        auto* lir = new(alloc()) LGuardObjectGroup(useRegister(ins->object()),
+                                                   LDefinition::BogusTemp());
+        assignSnapshot(lir, ins->bailoutKind());
+        add(lir, ins);
+        redefine(ins, ins->object());
+    }
 }
 
 void
 LIRGenerator::visitGuardObject(MGuardObject* ins)
 {
     // The type policy does all the work, so at this point the input
     // is guaranteed to be an object.
     MOZ_ASSERT(ins->input()->type() == MIRType::Object);
@@ -4064,21 +4081,28 @@ LIRGenerator::visitPolyInlineGuard(MPoly
 }
 
 void
 LIRGenerator::visitGuardReceiverPolymorphic(MGuardReceiverPolymorphic* ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType::Object);
     MOZ_ASSERT(ins->type() == MIRType::Object);
 
-    LGuardReceiverPolymorphic* guard =
-        new(alloc()) LGuardReceiverPolymorphic(useRegister(ins->object()), temp());
-    assignSnapshot(guard, Bailout_ShapeGuard);
-    add(guard, ins);
-    redefine(ins, ins->object());
+    if (JitOptions.spectreObjectMitigationsMisc) {
+        auto* lir = new(alloc()) LGuardReceiverPolymorphic(useRegisterAtStart(ins->object()),
+                                                           temp(), temp());
+        assignSnapshot(lir, Bailout_ShapeGuard);
+        defineReuseInput(lir, ins, 0);
+    } else {
+        auto* lir = new(alloc()) LGuardReceiverPolymorphic(useRegister(ins->object()),
+                                                           temp(), temp());
+        assignSnapshot(lir, Bailout_ShapeGuard);
+        add(lir, ins);
+        redefine(ins, ins->object());
+    }
 }
 
 void
 LIRGenerator::visitGuardUnboxedExpando(MGuardUnboxedExpando* ins)
 {
     LGuardUnboxedExpando* guard =
         new(alloc()) LGuardUnboxedExpando(useRegister(ins->object()));
     assignSnapshot(guard, ins->bailoutKind());
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -261,17 +261,16 @@ class LIRGenerator : public LIRGenerator
     void visitGetPropertyCache(MGetPropertyCache* ins) override;
     void visitGetPropertyPolymorphic(MGetPropertyPolymorphic* ins) override;
     void visitSetPropertyPolymorphic(MSetPropertyPolymorphic* ins) override;
     void visitBindNameCache(MBindNameCache* ins) override;
     void visitCallBindVar(MCallBindVar* ins) override;
     void visitGuardObjectIdentity(MGuardObjectIdentity* ins) override;
     void visitGuardShape(MGuardShape* ins) override;
     void visitGuardObjectGroup(MGuardObjectGroup* ins) override;
-    void visitGuardClass(MGuardClass* ins) override;
     void visitGuardObject(MGuardObject* ins) override;
     void visitGuardString(MGuardString* ins) override;
     void visitGuardReceiverPolymorphic(MGuardReceiverPolymorphic* ins) override;
     void visitGuardUnboxedExpando(MGuardUnboxedExpando* ins) override;
     void visitLoadUnboxedExpando(MLoadUnboxedExpando* ins) override;
     void visitPolyInlineGuard(MPolyInlineGuard* ins) override;
     void visitAssertRange(MAssertRange* ins) override;
     void visitCallGetProperty(MCallGetProperty* ins) override;
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -11757,53 +11757,16 @@ class MGuardObjectIdentity
             return false;
         return congruentIfOperandsEqual(ins);
     }
     AliasSet getAliasSet() const override {
         return AliasSet::Load(AliasSet::ObjectFields);
     }
 };
 
-// Guard on an object's class.
-class MGuardClass
-  : public MUnaryInstruction,
-    public SingleObjectPolicy::Data
-{
-    const Class* class_;
-
-    MGuardClass(MDefinition* obj, const Class* clasp)
-      : MUnaryInstruction(classOpcode, obj),
-        class_(clasp)
-    {
-        setGuard();
-        setMovable();
-    }
-
-  public:
-    INSTRUCTION_HEADER(GuardClass)
-    TRIVIAL_NEW_WRAPPERS
-    NAMED_OPERANDS((0, object))
-
-    const Class* getClass() const {
-        return class_;
-    }
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!ins->isGuardClass())
-            return false;
-        if (getClass() != ins->toGuardClass()->getClass())
-            return false;
-        return congruentIfOperandsEqual(ins);
-    }
-    AliasSet getAliasSet() const override {
-        return AliasSet::Load(AliasSet::ObjectFields);
-    }
-
-    ALLOW_CLONE(MGuardClass)
-};
-
 // Guard on the presence or absence of an unboxed object's expando.
 class MGuardUnboxedExpando
   : public MUnaryInstruction,
     public SingleObjectPolicy::Data
 {
     bool requireExpando_;
     BailoutKind bailoutKind_;
 
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -193,17 +193,16 @@ namespace jit {
     _(GetPropertyPolymorphic)                                               \
     _(SetPropertyPolymorphic)                                               \
     _(BindNameCache)                                                        \
     _(CallBindVar)                                                          \
     _(GuardShape)                                                           \
     _(GuardReceiverPolymorphic)                                             \
     _(GuardObjectGroup)                                                     \
     _(GuardObjectIdentity)                                                  \
-    _(GuardClass)                                                           \
     _(GuardUnboxedExpando)                                                  \
     _(LoadUnboxedExpando)                                                   \
     _(ArrayLength)                                                          \
     _(SetArrayLength)                                                       \
     _(GetNextEntryForIterator)                                              \
     _(TypedArrayLength)                                                     \
     _(TypedArrayElements)                                                   \
     _(SetDisjointTypedElements)                                             \
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -487,54 +487,170 @@ MacroAssembler::branchFunctionKind(Condi
     int32_t mask = IMM32_16ADJ(JSFunction::FUNCTION_KIND_MASK);
     int32_t bit = IMM32_16ADJ(kind << JSFunction::FUNCTION_KIND_SHIFT);
     load32(address, scratch);
     and32(Imm32(mask), scratch);
     branch32(cond, scratch, Imm32(bit), label);
 }
 
 void
-MacroAssembler::branchTestObjClass(Condition cond, Register obj, Register scratch,
-                                   const js::Class* clasp, Label* label)
+MacroAssembler::branchTestObjClass(Condition cond, Register obj, const js::Class* clasp,
+                                   Register scratch, Register spectreRegToZero, Label* label)
+{
+    MOZ_ASSERT(obj != scratch);
+    MOZ_ASSERT(scratch != spectreRegToZero);
+
+    loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+    branchPtr(cond, Address(scratch, ObjectGroup::offsetOfClasp()), ImmPtr(clasp), label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreZeroRegister(cond, scratch, spectreRegToZero);
+}
+
+void
+MacroAssembler::branchTestObjClassNoSpectreMitigations(Condition cond, Register obj,
+                                                       const js::Class* clasp,
+                                                       Register scratch, Label* label)
 {
     loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
     branchPtr(cond, Address(scratch, ObjectGroup::offsetOfClasp()), ImmPtr(clasp), label);
 }
 
 void
-MacroAssembler::branchTestObjClass(Condition cond, Register obj, Register scratch,
-                                   const Address& clasp, Label* label)
+MacroAssembler::branchTestObjClass(Condition cond, Register obj, const Address& clasp,
+                                   Register scratch, Register spectreRegToZero, Label* label)
 {
+    MOZ_ASSERT(obj != scratch);
+    MOZ_ASSERT(scratch != spectreRegToZero);
+
+    loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+    loadPtr(Address(scratch, ObjectGroup::offsetOfClasp()), scratch);
+    branchPtr(cond, clasp, scratch, label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreZeroRegister(cond, scratch, spectreRegToZero);
+}
+
+void
+MacroAssembler::branchTestObjClassNoSpectreMitigations(Condition cond, Register obj,
+                                                       const Address& clasp, Register scratch,
+                                                       Label* label)
+{
+    MOZ_ASSERT(obj != scratch);
     loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
     loadPtr(Address(scratch, ObjectGroup::offsetOfClasp()), scratch);
     branchPtr(cond, clasp, scratch, label);
 }
 
 void
-MacroAssembler::branchTestObjShape(Condition cond, Register obj, const Shape* shape, Label* label)
+MacroAssembler::branchTestObjShape(Condition cond, Register obj, const Shape* shape, Register scratch,
+                                   Register spectreRegToZero, Label* label)
+{
+    MOZ_ASSERT(obj != scratch);
+    MOZ_ASSERT(spectreRegToZero != scratch);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        move32(Imm32(0), scratch);
+
+    branchPtr(cond, Address(obj, ShapedObject::offsetOfShape()), ImmGCPtr(shape), label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreMovePtr(cond, scratch, spectreRegToZero);
+}
+
+void
+MacroAssembler::branchTestObjShapeNoSpectreMitigations(Condition cond, Register obj,
+                                                       const Shape* shape, Label* label)
 {
     branchPtr(cond, Address(obj, ShapedObject::offsetOfShape()), ImmGCPtr(shape), label);
 }
 
 void
-MacroAssembler::branchTestObjShape(Condition cond, Register obj, Register shape, Label* label)
+MacroAssembler::branchTestObjShape(Condition cond, Register obj, Register shape, Register scratch,
+                                   Register spectreRegToZero, Label* label)
+{
+    MOZ_ASSERT(obj != scratch);
+    MOZ_ASSERT(obj != shape);
+    MOZ_ASSERT(spectreRegToZero != scratch);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        move32(Imm32(0), scratch);
+
+    branchPtr(cond, Address(obj, ShapedObject::offsetOfShape()), shape, label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreMovePtr(cond, scratch, spectreRegToZero);
+}
+
+void
+MacroAssembler::branchTestObjShapeNoSpectreMitigations(Condition cond, Register obj, Register shape,
+                                                       Label* label)
 {
     branchPtr(cond, Address(obj, ShapedObject::offsetOfShape()), shape, label);
 }
 
 void
+MacroAssembler::branchTestObjShapeUnsafe(Condition cond, Register obj, Register shape,
+                                         Label* label)
+{
+    branchTestObjShapeNoSpectreMitigations(cond, obj, shape, label);
+}
+
+void
 MacroAssembler::branchTestObjGroup(Condition cond, Register obj, const ObjectGroup* group,
-                                   Label* label)
+                                   Register scratch, Register spectreRegToZero, Label* label)
+{
+    MOZ_ASSERT(obj != scratch);
+    MOZ_ASSERT(spectreRegToZero != scratch);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        move32(Imm32(0), scratch);
+
+    branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), ImmGCPtr(group), label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreMovePtr(cond, scratch, spectreRegToZero);
+}
+
+void
+MacroAssembler::branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
+                                                       const ObjectGroup* group, Label* label)
 {
     branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), ImmGCPtr(group), label);
 }
 
 void
-MacroAssembler::branchTestObjGroup(Condition cond, Register obj, Register group, Label* label)
+MacroAssembler::branchTestObjGroupUnsafe(Condition cond, Register obj, const ObjectGroup* group,
+                                         Label* label)
+{
+    branchTestObjGroupNoSpectreMitigations(cond, obj, group, label);
+}
+
+void
+MacroAssembler::branchTestObjGroup(Condition cond, Register obj, Register group, Register scratch,
+                                   Register spectreRegToZero, Label* label)
 {
+    MOZ_ASSERT(obj != scratch);
+    MOZ_ASSERT(obj != group);
+    MOZ_ASSERT(spectreRegToZero != scratch);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        move32(Imm32(0), scratch);
+
+    branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), group, label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreMovePtr(cond, scratch, spectreRegToZero);
+}
+
+void
+MacroAssembler::branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj, Register group,
+                                                       Label* label)
+{
+    MOZ_ASSERT(obj != group);
     branchPtr(cond, Address(obj, JSObject::offsetOfGroup()), group, label);
 }
 
 void
 MacroAssembler::branchTestClassIsProxy(bool proxy, Register clasp, Label* label)
 {
     branchTest32(proxy ? Assembler::NonZero : Assembler::Zero,
                  Address(clasp, Class::offsetOfFlags()),
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3196,26 +3196,45 @@ MacroAssembler::branchIfNotInterpretedCo
     branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
 
     // Check if the CONSTRUCTOR bit is set.
     bits = IMM32_16ADJ(JSFunction::CONSTRUCTOR);
     branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
 }
 
 void
-MacroAssembler::branchTestObjGroup(Condition cond, Register obj, const Address& group,
-                                   Register scratch, Label* label)
+MacroAssembler::branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
+                                                       const Address& group, Register scratch,
+                                                       Label* label)
 {
     // Note: obj and scratch registers may alias.
+    MOZ_ASSERT(group.base != scratch);
+    MOZ_ASSERT(group.base != obj);
 
     loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
     branchPtr(cond, group, scratch, label);
 }
 
 void
+MacroAssembler::branchTestObjGroup(Condition cond, Register obj, const Address& group,
+                                   Register scratch, Register spectreRegToZero, Label* label)
+{
+    // Note: obj and scratch registers may alias.
+    MOZ_ASSERT(group.base != scratch);
+    MOZ_ASSERT(group.base != obj);
+    MOZ_ASSERT(scratch != spectreRegToZero);
+
+    loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
+    branchPtr(cond, group, scratch, label);
+
+    if (JitOptions.spectreObjectMitigationsMisc)
+        spectreZeroRegister(cond, scratch, spectreRegToZero);
+}
+
+void
 MacroAssembler::branchTestObjCompartment(Condition cond, Register obj, const Address& compartment,
                                          Register scratch, Label* label)
 {
     MOZ_ASSERT(obj != scratch);
     loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
     loadPtr(Address(scratch, ObjectGroup::offsetOfCompartment()), scratch);
     branchPtr(cond, compartment, scratch, label);
 }
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1172,28 +1172,62 @@ class MacroAssembler : public MacroAssem
     inline void branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
                                    Register scratch, Label* label);
 
     void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label);
 
     inline void branchIfObjectEmulatesUndefined(Register objReg, Register scratch, Label* slowCheck,
                                                 Label* label);
 
-    inline void branchTestObjClass(Condition cond, Register obj, Register scratch,
-                                   const js::Class* clasp, Label* label);
-    inline void branchTestObjClass(Condition cond, Register obj, Register scratch,
-                                   const Address& clasp, Label* label);
-    inline void branchTestObjShape(Condition cond, Register obj, const Shape* shape, Label* label);
-    inline void branchTestObjShape(Condition cond, Register obj, Register shape, Label* label);
+    // For all methods below: spectreRegToZero is a register that will be zeroed
+    // on speculatively executed code paths (when the branch should be taken but
+    // branch prediction speculates it isn't). Usually this will be the object
+    // register but the caller may pass a different register.
+
+    inline void branchTestObjClass(Condition cond, Register obj, const js::Class* clasp,
+                                   Register scratch, Register spectreRegToZero, Label* label);
+    inline void branchTestObjClassNoSpectreMitigations(Condition cond, Register obj,
+                                                       const js::Class* clasp, Register scratch,
+                                                       Label* label);
+
+    inline void branchTestObjClass(Condition cond, Register obj, const Address& clasp,
+                                   Register scratch, Register spectreRegToZero, Label* label);
+    inline void branchTestObjClassNoSpectreMitigations(Condition cond, Register obj,
+                                                       const Address& clasp, Register scratch,
+                                                       Label* label);
+
+    inline void branchTestObjShape(Condition cond, Register obj, const Shape* shape,
+                                   Register scratch, Register spectreRegToZero, Label* label);
+    inline void branchTestObjShapeNoSpectreMitigations(Condition cond, Register obj,
+                                                       const Shape* shape, Label* label);
+
+    inline void branchTestObjShape(Condition cond, Register obj, Register shape, Register scratch,
+                                   Register spectreRegToZero, Label* label);
+    inline void branchTestObjShapeNoSpectreMitigations(Condition cond, Register obj,
+                                                       Register shape, Label* label);
+
     inline void branchTestObjGroup(Condition cond, Register obj, const ObjectGroup* group,
-                                   Label* label);
-    inline void branchTestObjGroup(Condition cond, Register obj, Register group, Label* label);
+                                   Register scratch, Register spectreRegToZero, Label* label);
+    inline void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
+                                                       const ObjectGroup* group, Label* label);
+
+    inline void branchTestObjGroup(Condition cond, Register obj, Register group, Register scratch,
+                                   Register spectreRegToZero, Label* label);
+    inline void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
+                                                       Register group, Label* label);
 
     void branchTestObjGroup(Condition cond, Register obj, const Address& group, Register scratch,
-                            Label* label);
+                            Register spectreRegToZero, Label* label);
+    void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj, const Address& group,
+                                                Register scratch, Label* label);
+
+    // TODO: audit/fix callers to be Spectre safe.
+    inline void branchTestObjShapeUnsafe(Condition cond, Register obj, Register shape, Label* label);
+    inline void branchTestObjGroupUnsafe(Condition cond, Register obj, const ObjectGroup* group,
+                                         Label* label);
 
     void branchTestObjCompartment(Condition cond, Register obj, const Address& compartment,
                                   Register scratch, Label* label);
     void branchTestObjCompartment(Condition cond, Register obj, const JSCompartment* compartment,
                                   Register scratch, Label* label);
     void branchIfObjGroupHasNoAddendum(Register obj, Register scratch, Label* label);
     void branchIfPretenuredGroup(const ObjectGroup* group, Register scratch, Label* label);
 
@@ -1390,16 +1424,20 @@ class MacroAssembler : public MacroAssem
     inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
                               Register dest)
         DEFINED_ON(arm, arm64, mips_shared, x86, x64);
 
     // Conditional move for Spectre mitigations.
     inline void spectreMovePtr(Condition cond, Register src, Register dest)
         DEFINED_ON(arm, arm64, mips_shared, x86, x64);
 
+    // Zeroes dest if the condition is true.
+    inline void spectreZeroRegister(Condition cond, Register scratch, Register dest)
+        DEFINED_ON(arm, arm64, mips_shared, x86_shared);
+
     // Performs a bounds check and zeroes the index register if out-of-bounds
     // (to mitigate Spectre).
     inline void boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
         DEFINED_ON(arm, arm64, mips_shared, x86_shared);
     inline void boundsCheck32ForLoad(Register index, const Address& length, Register scratch,
                                      Label* failure)
         DEFINED_ON(arm, arm64, mips_shared, x86_shared);
--- a/js/src/jit/SharedIC.cpp
+++ b/js/src/jit/SharedIC.cpp
@@ -2534,21 +2534,23 @@ ICTypeMonitor_SingleObject::Compiler::ge
 
 bool
 ICTypeMonitor_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm)
 {
     Label failure;
     masm.branchTestObject(Assembler::NotEqual, R0, &failure);
     MaybeWorkAroundAmdBug(masm);
 
-    // Guard on the object's ObjectGroup.
+    // Guard on the object's ObjectGroup. No Spectre mitigations are needed
+    // here: we're just recording type information for Ion compilation and
+    // it's safe to speculatively return.
     Register obj = masm.extractObject(R0, ExtractTemp0);
     Address expectedGroup(ICStubReg, ICTypeMonitor_ObjectGroup::offsetOfGroup());
-    masm.branchTestObjGroup(Assembler::NotEqual, obj, expectedGroup, R1.scratchReg(),
-                            &failure);
+    masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, expectedGroup,
+                                                R1.scratchReg(), &failure);
     MaybeWorkAroundAmdBug(masm);
 
     EmitReturnFromIC(masm);
     MaybeWorkAroundAmdBug(masm);
 
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
     return true;
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -2183,16 +2183,22 @@ MacroAssembler::test32MovePtr(Condition 
 
 void
 MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
 {
     ma_mov(src, dest, LeaveCC, cond);
 }
 
 void
+MacroAssembler::spectreZeroRegister(Condition cond, Register, Register dest)
+{
+    ma_mov(Imm32(0), dest, cond);
+}
+
+void
 MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
 {
     MOZ_ASSERT(index != length);
     MOZ_ASSERT(length != scratch);
     MOZ_ASSERT(index != scratch);
 
     if (JitOptions.spectreIndexMasking)
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -1862,16 +1862,23 @@ MacroAssembler::test32MovePtr(Condition 
 
 void
 MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
 {
     Csel(ARMRegister(dest, 64), ARMRegister(src, 64), ARMRegister(dest, 64), cond);
 }
 
 void
+MacroAssembler::spectreZeroRegister(Condition cond, Register, Register dest)
+{
+    Csel(ARMRegister(dest, 64), ARMRegister(dest, 64), vixl::xzr,
+         Assembler::InvertCondition(cond));
+}
+
+void
 MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
 {
     MOZ_ASSERT(index != length);
     MOZ_ASSERT(length != scratch);
     MOZ_ASSERT(index != scratch);
 
     branch32(Assembler::BelowOrEqual, length, index, failure);
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -1032,16 +1032,22 @@ MacroAssembler::boundsCheck32ForLoad(Reg
 }
 
 void
 MacroAssembler::spectreMovePtr(Condition cond, Register src, Register dest)
 {
     MOZ_CRASH();
 }
 
+void
+MacroAssembler::spectreZeroRegister(Condition cond, Register scratch, Register dest)
+{
+    MOZ_CRASH();
+}
+
 // ========================================================================
 // Memory access primitives.
 void
 MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest)
 {
     MOZ_CRASH("NYI");
 }
 void
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -6586,27 +6586,30 @@ class LStoreUnboxedPointer : public LIns
     }
     const LAllocation* value() {
         return getOperand(2);
     }
 };
 
 // If necessary, convert an unboxed object in a particular group to its native
 // representation.
-class LConvertUnboxedObjectToNative : public LInstructionHelper<0, 1, 0>
+class LConvertUnboxedObjectToNative : public LInstructionHelper<1, 1, 1>
 {
   public:
     LIR_HEADER(ConvertUnboxedObjectToNative)
 
-    explicit LConvertUnboxedObjectToNative(const LAllocation& object)
+    LConvertUnboxedObjectToNative(const LAllocation& object, const LDefinition& temp)
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, object);
-    }
-
+        setTemp(0, temp);
+    }
+    const LDefinition* temp() {
+        return getTemp(0);
+    }
     MConvertUnboxedObjectToNative* mir() {
         return mir_->toConvertUnboxedObjectToNative();
     }
 };
 
 class LArrayPopShiftV : public LInstructionHelper<BOX_PIECES, 1, 2>
 {
   public:
@@ -7485,45 +7488,22 @@ class LGetPropertyCacheT : public LInstr
     }
     const LDefinition* temp() {
         return getTemp(0);
     }
 };
 
 // Emit code to load a boxed value from an object's slots if its shape matches
 // one of the shapes observed by the baseline IC, else bails out.
-class LGetPropertyPolymorphicV : public LInstructionHelper<BOX_PIECES, 1, 0>
+class LGetPropertyPolymorphicV : public LInstructionHelper<BOX_PIECES, 1, 1>
 {
   public:
     LIR_HEADER(GetPropertyPolymorphicV)
 
-    explicit LGetPropertyPolymorphicV(const LAllocation& obj)
-      : LInstructionHelper(classOpcode)
-    {
-        setOperand(0, obj);
-    }
-    const LAllocation* obj() {
-        return getOperand(0);
-    }
-    const MGetPropertyPolymorphic* mir() const {
-        return mir_->toGetPropertyPolymorphic();
-    }
-    const char* extraName() const {
-        return PropertyNameToExtraName(mir()->name());
-    }
-};
-
-// Emit code to load a typed value from an object's slots if its shape matches
-// one of the shapes observed by the baseline IC, else bails out.
-class LGetPropertyPolymorphicT : public LInstructionHelper<1, 1, 1>
-{
-  public:
-    LIR_HEADER(GetPropertyPolymorphicT)
-
-    LGetPropertyPolymorphicT(const LAllocation& obj, const LDefinition& temp)
+    LGetPropertyPolymorphicV(const LAllocation& obj, const LDefinition& temp)
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, obj);
         setTemp(0, temp);
     }
     const LAllocation* obj() {
         return getOperand(0);
     }
@@ -7533,72 +7513,112 @@ class LGetPropertyPolymorphicT : public 
     const MGetPropertyPolymorphic* mir() const {
         return mir_->toGetPropertyPolymorphic();
     }
     const char* extraName() const {
         return PropertyNameToExtraName(mir()->name());
     }
 };
 
+// Emit code to load a typed value from an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LGetPropertyPolymorphicT : public LInstructionHelper<1, 1, 2>
+{
+  public:
+    LIR_HEADER(GetPropertyPolymorphicT)
+
+    LGetPropertyPolymorphicT(const LAllocation& obj, const LDefinition& temp1,
+                             const LDefinition& temp2)
+      : LInstructionHelper(classOpcode)
+    {
+        setOperand(0, obj);
+        setTemp(0, temp1);
+        setTemp(1, temp2);
+    }
+    const LAllocation* obj() {
+        return getOperand(0);
+    }
+    const LDefinition* temp1() {
+        return getTemp(0);
+    }
+    const LDefinition* temp2() {
+        return getTemp(1);
+    }
+    const MGetPropertyPolymorphic* mir() const {
+        return mir_->toGetPropertyPolymorphic();
+    }
+    const char* extraName() const {
+        return PropertyNameToExtraName(mir()->name());
+    }
+};
+
 // Emit code to store a boxed value to an object's slots if its shape matches
 // one of the shapes observed by the baseline IC, else bails out.
-class LSetPropertyPolymorphicV : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+class LSetPropertyPolymorphicV : public LInstructionHelper<0, 1 + BOX_PIECES, 2>
 {
   public:
     LIR_HEADER(SetPropertyPolymorphicV)
 
     LSetPropertyPolymorphicV(const LAllocation& obj, const LBoxAllocation& value,
-                             const LDefinition& temp)
+                             const LDefinition& temp1, const LDefinition& temp2)
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, obj);
         setBoxOperand(Value, value);
-        setTemp(0, temp);
+        setTemp(0, temp1);
+        setTemp(1, temp2);
     }
 
     static const size_t Value = 1;
 
     const LAllocation* obj() {
         return getOperand(0);
     }
-    const LDefinition* temp() {
-        return getTemp(0);
+    const LDefinition* temp1() {
+        return getTemp(0);
+    }
+    const LDefinition* temp2() {
+        return getTemp(1);
     }
     const MSetPropertyPolymorphic* mir() const {
         return mir_->toSetPropertyPolymorphic();
     }
 };
 
 // Emit code to store a typed value to an object's slots if its shape matches
 // one of the shapes observed by the baseline IC, else bails out.
-class LSetPropertyPolymorphicT : public LInstructionHelper<0, 2, 1>
+class LSetPropertyPolymorphicT : public LInstructionHelper<0, 2, 2>
 {
     MIRType valueType_;
 
   public:
     LIR_HEADER(SetPropertyPolymorphicT)
 
     LSetPropertyPolymorphicT(const LAllocation& obj, const LAllocation& value, MIRType valueType,
-                             const LDefinition& temp)
+                             const LDefinition& temp1, const LDefinition& temp2)
       : LInstructionHelper(classOpcode),
         valueType_(valueType)
     {
         setOperand(0, obj);
         setOperand(1, value);
-        setTemp(0, temp);
+        setTemp(0, temp1);
+        setTemp(1, temp2);
     }
 
     const LAllocation* obj() {
         return getOperand(0);
     }
     const LAllocation* value() {
         return getOperand(1);
     }
-    const LDefinition* temp() {
-        return getTemp(0);
+    const LDefinition* temp1() {
+        return getTemp(0);
+    }
+    const LDefinition* temp2() {
+        return getTemp(1);
     }
     MIRType valueType() const {
         return valueType_;
     }
     const MSetPropertyPolymorphic* mir() const {
         return mir_->toSetPropertyPolymorphic();
     }
     const char* extraName() const {
@@ -8381,32 +8401,37 @@ class LRest : public LCallInstructionHel
     const LAllocation* numActuals() {
         return getOperand(0);
     }
     MRest* mir() const {
         return mir_->toRest();
     }
 };
 
-class LGuardReceiverPolymorphic : public LInstructionHelper<0, 1, 1>
+class LGuardReceiverPolymorphic : public LInstructionHelper<1, 1, 2>
 {
   public:
     LIR_HEADER(GuardReceiverPolymorphic)
 
-    LGuardReceiverPolymorphic(const LAllocation& in, const LDefinition& temp)
+    LGuardReceiverPolymorphic(const LAllocation& in, const LDefinition& temp1,
+                              const LDefinition& temp2)
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, in);
-        setTemp(0, temp);
+        setTemp(0, temp1);
+        setTemp(1, temp2);
     }
     const LAllocation* object() {
         return getOperand(0);
     }
-    const LDefinition* temp() {
-        return getTemp(0);
+    const LDefinition* temp1() {
+        return getTemp(0);
+    }
+    const LDefinition* temp2() {
+        return getTemp(1);
     }
     const MGuardReceiverPolymorphic* mir() const {
         return mir_->toGuardReceiverPolymorphic();
     }
 };
 
 class LGuardUnboxedExpando : public LInstructionHelper<0, 1, 0>
 {
@@ -8711,63 +8736,51 @@ class LGuardObjectIdentity : public LIns
     const LAllocation* expected() {
         return getOperand(1);
     }
     const MGuardObjectIdentity* mir() const {
         return mir_->toGuardObjectIdentity();
     }
 };
 
-class LGuardShape : public LInstructionHelper<0, 1, 0>
+class LGuardShape : public LInstructionHelper<1, 1, 1>
 {
   public:
     LIR_HEADER(GuardShape)
 
-    explicit LGuardShape(const LAllocation& in)
-      : LInstructionHelper(classOpcode)
-    {
-        setOperand(0, in);
-    }
-    const MGuardShape* mir() const {
-        return mir_->toGuardShape();
-    }
-};
-
-class LGuardObjectGroup : public LInstructionHelper<0, 1, 0>
-{
-  public:
-    LIR_HEADER(GuardObjectGroup)
-
-    explicit LGuardObjectGroup(const LAllocation& in)
-      : LInstructionHelper(classOpcode)
-    {
-        setOperand(0, in);
-    }
-    const MGuardObjectGroup* mir() const {
-        return mir_->toGuardObjectGroup();
-    }
-};
-
-// Guard against an object's class.
-class LGuardClass : public LInstructionHelper<0, 1, 1>
-{
-  public:
-    LIR_HEADER(GuardClass)
-
-    LGuardClass(const LAllocation& in, const LDefinition& temp)
+    LGuardShape(const LAllocation& in, const LDefinition& temp)
       : LInstructionHelper(classOpcode)
     {
         setOperand(0, in);
         setTemp(0, temp);
     }
-    const MGuardClass* mir() const {
-        return mir_->toGuardClass();
-    }
-    const LDefinition* tempInt() {
-        return getTemp(0);
+    const LDefinition* temp() {
+        return getTemp(0);
+    }
+    const MGuardShape* mir() const {
+        return mir_->toGuardShape();
+    }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<1, 1, 1>
+{
+  public:
+    LIR_HEADER(GuardObjectGroup)
+
+    LGuardObjectGroup(const LAllocation& in, const LDefinition& temp)
+      : LInstructionHelper(classOpcode)
+    {
+        setOperand(0, in);
+        setTemp(0, temp);
+    }
+    const LDefinition* temp() {
+        return getTemp(0);
+    }
+    const MGuardObjectGroup* mir() const {
+        return mir_->toGuardObjectGroup();
     }
 };
 
 // Guard against the sharedness of a TypedArray's memory.
 class LGuardSharedTypedArray : public LInstructionHelper<0, 1, 1>
 {
   public:
     LIR_HEADER(GuardSharedTypedArray)
--- a/js/src/jit/shared/LOpcodes-shared.h
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -260,17 +260,16 @@
     _(LoadSlotV)                    \
     _(LoadSlotT)                    \
     _(StoreSlotV)                   \
     _(StoreSlotT)                   \
     _(GuardShape)                   \
     _(GuardReceiverPolymorphic)     \
     _(GuardObjectGroup)             \
     _(GuardObjectIdentity)          \
-    _(GuardClass)                   \
     _(GuardUnboxedExpando)          \
     _(LoadUnboxedExpando)           \
     _(TypeBarrierV)                 \
     _(TypeBarrierO)                 \
     _(PostWriteBarrierO)            \
     _(PostWriteBarrierS)            \
     _(PostWriteBarrierV)            \
     _(PostWriteElementBarrierO)     \
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -1106,16 +1106,24 @@ void
 MacroAssembler::cmp32Move32(Condition cond, Register lhs, const Address& rhs, Register src,
                             Register dest)
 {
     cmp32(lhs, Operand(rhs));
     cmovCCl(cond, src, dest);
 }
 
 void
+MacroAssembler::spectreZeroRegister(Condition cond, Register scratch, Register dest)
+{
+    // Note: use movl instead of move32/xorl to ensure flags are not clobbered.
+    movl(Imm32(0), scratch);
+    spectreMovePtr(cond, scratch, dest);
+}
+
+void
 MacroAssembler::boundsCheck32ForLoad(Register index, Register length, Register scratch,
                                      Label* failure)
 {
     MOZ_ASSERT(index != length);
     MOZ_ASSERT(length != scratch);
     MOZ_ASSERT(index != scratch);
 
     if (JitOptions.spectreIndexMasking)
--- a/js/src/vm/UnboxedObject.cpp
+++ b/js/src/vm/UnboxedObject.cpp
@@ -533,38 +533,43 @@ UnboxedLayout::makeNativeGroup(JSContext
 
     nativeGroup->setOriginalUnboxedGroup(group);
 
     group->markStateChange(cx);
 
     return true;
 }
 
-/* static */ bool
+/* static */ NativeObject*
 UnboxedPlainObject::convertToNative(JSContext* cx, JSObject* obj)
 {
+    // This function returns the original object (instead of bool) to make sure
+    // Ion's LConvertUnboxedObjectToNative works correctly. If we return bool
+    // and use defineReuseInput, the object register is not preserved across the
+    // call.
+
     const UnboxedLayout& layout = obj->as<UnboxedPlainObject>().layout();
     UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
 
     if (!layout.nativeGroup()) {
         if (!UnboxedLayout::makeNativeGroup(cx, obj->group()))
-            return false;
+            return nullptr;
 
         // makeNativeGroup can reentrantly invoke this method.
         if (obj->is<PlainObject>())
-            return true;
+            return &obj->as<PlainObject>();
     }
 
     AutoValueVector values(cx);
     for (size_t i = 0; i < layout.properties().length(); i++) {
         // We might be reading properties off the object which have not been
         // initialized yet. Make sure any double values we read here are
         // canonicalized.
         if (!values.append(obj->as<UnboxedPlainObject>().getValue(layout.properties()[i], true)))
-            return false;
+            return nullptr;
     }
 
     // We are eliminating the expando edge with the conversion, so trigger a
     // pre barrier.
     JSObject::writeBarrierPre(expando);
 
     // Additionally trigger a post barrier on the expando itself. Whole cell
     // store buffer entries can be added on the original unboxed object for
@@ -574,52 +579,53 @@ UnboxedPlainObject::convertToNative(JSCo
         cx->zone()->group()->storeBuffer().putWholeCell(expando);
 
     obj->setGroup(layout.nativeGroup());
     obj->as<PlainObject>().setLastPropertyMakeNative(cx, layout.nativeShape());
 
     for (size_t i = 0; i < values.length(); i++)
         obj->as<PlainObject>().initSlotUnchecked(i, values[i]);
 
-    if (expando) {
-        // Add properties from the expando object to the object, in order.
-        // Suppress GC here, so that callers don't need to worry about this
-        // method collecting. The stuff below can only fail due to OOM, in
-        // which case the object will not have been completely filled back in.
-        gc::AutoSuppressGC suppress(cx);
+    if (!expando)
+        return &obj->as<PlainObject>();
+
+    // Add properties from the expando object to the object, in order.
+    // Suppress GC here, so that callers don't need to worry about this
+    // method collecting. The stuff below can only fail due to OOM, in
+    // which case the object will not have been completely filled back in.
+    gc::AutoSuppressGC suppress(cx);
 
-        Vector<jsid> ids(cx);
-        for (Shape::Range<NoGC> r(expando->lastProperty()); !r.empty(); r.popFront()) {
-            if (!ids.append(r.front().propid()))
-                return false;
-        }
-        for (size_t i = 0; i < expando->getDenseInitializedLength(); i++) {
-            if (!expando->getDenseElement(i).isMagic(JS_ELEMENTS_HOLE)) {
-                if (!ids.append(INT_TO_JSID(i)))
-                    return false;
-            }
-        }
-        ::Reverse(ids.begin(), ids.end());
-
-        RootedPlainObject nobj(cx, &obj->as<PlainObject>());
-        Rooted<UnboxedExpandoObject*> nexpando(cx, expando);
-        RootedId id(cx);
-        Rooted<PropertyDescriptor> desc(cx);
-        for (size_t i = 0; i < ids.length(); i++) {
-            id = ids[i];
-            if (!GetOwnPropertyDescriptor(cx, nexpando, id, &desc))
-                return false;
-            ObjectOpResult result;
-            if (!DefineProperty(cx, nobj, id, desc, result))
-                return false;
-            MOZ_ASSERT(result.ok());
+    Vector<jsid> ids(cx);
+    for (Shape::Range<NoGC> r(expando->lastProperty()); !r.empty(); r.popFront()) {
+        if (!ids.append(r.front().propid()))
+            return nullptr;
+    }
+    for (size_t i = 0; i < expando->getDenseInitializedLength(); i++) {
+        if (!expando->getDenseElement(i).isMagic(JS_ELEMENTS_HOLE)) {
+            if (!ids.append(INT_TO_JSID(i)))
+                return nullptr;
         }
     }
+    ::Reverse(ids.begin(), ids.end());
 
-    return true;
+    RootedPlainObject nobj(cx, &obj->as<PlainObject>());
+    Rooted<UnboxedExpandoObject*> nexpando(cx, expando);
+    RootedId id(cx);
+    Rooted<PropertyDescriptor> desc(cx);
+    for (size_t i = 0; i < ids.length(); i++) {
+        id = ids[i];
+        if (!GetOwnPropertyDescriptor(cx, nexpando, id, &desc))
+            return nullptr;
+        ObjectOpResult result;
+        if (!DefineProperty(cx, nobj, id, desc, result))
+            return nullptr;
+        MOZ_ASSERT(result.ok());
+    }
+
+    return nobj;
 }
 
 /* static */ JS::Result<UnboxedObject*, JS::OOM&>
 UnboxedObject::createInternal(JSContext* cx, js::gc::AllocKind kind, js::gc::InitialHeap heap,
                               js::HandleObjectGroup group)
 {
     const js::Class* clasp = group->clasp();
     MOZ_ASSERT(clasp == &UnboxedPlainObject::class_);
--- a/js/src/vm/UnboxedObject.h
+++ b/js/src/vm/UnboxedObject.h
@@ -292,17 +292,17 @@ class UnboxedPlainObject : public Unboxe
 
     bool containsUnboxedOrExpandoProperty(JSContext* cx, jsid id) const;
 
     static UnboxedExpandoObject* ensureExpando(JSContext* cx, Handle<UnboxedPlainObject*> obj);
 
     bool setValue(JSContext* cx, const UnboxedLayout::Property& property, const Value& v);
     Value getValue(const UnboxedLayout::Property& property, bool maybeUninitialized = false);
 
-    static bool convertToNative(JSContext* cx, JSObject* obj);
+    static NativeObject* convertToNative(JSContext* cx, JSObject* obj);
     static UnboxedPlainObject* create(JSContext* cx, HandleObjectGroup group,
                                       NewObjectKind newKind);
     static JSObject* createWithProperties(JSContext* cx, HandleObjectGroup group,
                                           NewObjectKind newKind, IdValuePair* properties);
 
     void fillAfterConvert(JSContext* cx,
                           Handle<GCVector<Value>> values, size_t* valueCursor);