Bug 986680 - Part 3/4 - Convert x64 to ScratchRegisterScope. r=nbp
authorSean Stangl <sstangl@mozilla.com>
Thu, 13 Aug 2015 13:55:58 -0700
changeset 259896 9bbda285615621242bae425e5c520b3c89331464
parent 259895 277ae9b3ad6f5351e17399ddac8952d66df628a0
child 259897 6de8ebf59df0150f939552417452a9b5d0ffbb92
push id29296
push userryanvm@gmail.com
push dateSun, 30 Aug 2015 19:45:10 +0000
treeherdermozilla-central@2ad5077d86ba [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnbp
bugs986680
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 986680 - Part 3/4 - Convert x64 to ScratchRegisterScope. r=nbp
js/src/jit/x64/BaselineIC-x64.cpp
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x64/MacroAssembler-x64.h
js/src/jit/x64/SharedICHelpers-x64.h
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.h
js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
--- a/js/src/jit/x64/BaselineIC-x64.cpp
+++ b/js/src/jit/x64/BaselineIC-x64.cpp
@@ -19,23 +19,24 @@ bool
 ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
 {
     // Guard that R0 is an integer and R1 is an integer.
     Label failure;
     masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
     masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
 
     // Directly compare the int32 payload of R0 and R1.
+    ScratchRegisterScope scratch(masm);
     Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
-    masm.mov(ImmWord(0), ScratchReg);
+    masm.mov(ImmWord(0), scratch);
     masm.cmp32(R0.valueReg(), R1.valueReg());
-    masm.setCC(cond, ScratchReg);
+    masm.setCC(cond, scratch);
 
     // Box the result and return
-    masm.boxValue(JSVAL_TYPE_BOOLEAN, ScratchReg, R0.valueReg());
+    masm.boxValue(JSVAL_TYPE_BOOLEAN, scratch, R0.valueReg());
     EmitReturnFromIC(masm);
 
     // Failure case - jump to next stub
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
 
     return true;
 }
@@ -45,16 +46,19 @@ ICCompare_Int32::Compiler::generateStubC
 bool
 ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
 {
     // Guard that R0 is an integer and R1 is an integer.
     Label failure;
     masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
     masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
 
+    // The scratch register is only used in the case of JSOP_URSH.
+    mozilla::Maybe<ScratchRegisterScope> scratch;
+
     Label revertRegister, maybeNegZero;
     switch(op_) {
       case JSOP_ADD:
         masm.unboxInt32(R0, ExtractTemp0);
         // Just jump to failure on overflow. R0 and R1 are preserved, so we can just jump to
         // the next stub.
         masm.addl(R1.valueReg(), ExtractTemp0);
         masm.j(Assembler::Overflow, &failure);
@@ -154,65 +158,72 @@ ICBinaryArith_Int32::Compiler::generateS
         break;
       case JSOP_RSH:
         masm.unboxInt32(R0, ExtractTemp0);
         masm.unboxInt32(R1, ecx);
         masm.sarl_cl(ExtractTemp0);
         masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
         break;
       case JSOP_URSH:
-        if (!allowDouble_)
-            masm.movq(R0.valueReg(), ScratchReg);
+        if (!allowDouble_) {
+            scratch.emplace(masm);
+            masm.movq(R0.valueReg(), *scratch);
+        }
 
         masm.unboxInt32(R0, ExtractTemp0);
         masm.unboxInt32(R1, ecx); // This clobbers R0
 
         masm.shrl_cl(ExtractTemp0);
         masm.test32(ExtractTemp0, ExtractTemp0);
         if (allowDouble_) {
             Label toUint;
             masm.j(Assembler::Signed, &toUint);
 
             // Box and return.
             masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
             EmitReturnFromIC(masm);
 
             masm.bind(&toUint);
-            masm.convertUInt32ToDouble(ExtractTemp0, ScratchDoubleReg);
-            masm.boxDouble(ScratchDoubleReg, R0);
+            ScratchDoubleScope scratchDouble(masm);
+            masm.convertUInt32ToDouble(ExtractTemp0, scratchDouble);
+            masm.boxDouble(scratchDouble, R0);
         } else {
             masm.j(Assembler::Signed, &revertRegister);
             masm.boxValue(JSVAL_TYPE_INT32, ExtractTemp0, R0.valueReg());
         }
         break;
       default:
         MOZ_CRASH("Unhandled op in BinaryArith_Int32");
     }
 
     // Return from stub.
     EmitReturnFromIC(masm);
 
     if (op_ == JSOP_MUL) {
         masm.bind(&maybeNegZero);
 
         // Result is -0 if exactly one of lhs or rhs is negative.
-        masm.movl(R0.valueReg(), ScratchReg);
-        masm.orl(R1.valueReg(), ScratchReg);
-        masm.j(Assembler::Signed, &failure);
+        {
+            ScratchRegisterScope scratch(masm);
+            masm.movl(R0.valueReg(), scratch);
+            masm.orl(R1.valueReg(), scratch);
+            masm.j(Assembler::Signed, &failure);
+        }
 
         // Result is +0.
         masm.moveValue(Int32Value(0), R0);
         EmitReturnFromIC(masm);
     }
 
     // Revert the content of R0 in the fallible >>> case.
     if (op_ == JSOP_URSH && !allowDouble_) {
+        // Scope continuation from JSOP_URSH case above.
         masm.bind(&revertRegister);
         // Restore tag and payload.
-        masm.movq(ScratchReg, R0.valueReg());
+        masm.movq(*scratch, R0.valueReg());
         // Fall through to failure.
     }
     // Failure case - jump to next stub
     masm.bind(&failure);
     EmitStubGuardFailure(masm);
 
     return true;
 }
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -67,20 +67,21 @@ CodeGeneratorX64::visitValue(LValue* val
 
 void
 CodeGeneratorX64::visitBox(LBox* box)
 {
     const LAllocation* in = box->getOperand(0);
     const LDefinition* result = box->getDef(0);
 
     if (IsFloatingPointType(box->type())) {
+        ScratchDoubleScope scratch(masm);
         FloatRegister reg = ToFloatRegister(in);
         if (box->type() == MIRType_Float32) {
-            masm.convertFloat32ToDouble(reg, ScratchDoubleReg);
-            reg = ScratchDoubleReg;
+            masm.convertFloat32ToDouble(reg, scratch);
+            reg = scratch;
         }
         masm.vmovq(reg, ToRegister(result));
     } else {
         masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result));
     }
 }
 
 void
@@ -143,44 +144,46 @@ CodeGeneratorX64::visitCompareB(LCompare
 
     const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
     const LAllocation* rhs = lir->rhs();
     const Register output = ToRegister(lir->output());
 
     MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
 
     // Load boxed boolean in ScratchReg.
+    ScratchRegisterScope scratch(masm);
     if (rhs->isConstant())
-        masm.moveValue(*rhs->toConstant(), ScratchReg);
+        masm.moveValue(*rhs->toConstant(), scratch);
     else
-        masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchReg);
+        masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
 
     // Perform the comparison.
-    masm.cmpPtr(lhs.valueReg(), ScratchReg);
+    masm.cmpPtr(lhs.valueReg(), scratch);
     masm.emitSet(JSOpToCondition(mir->compareType(), mir->jsop()), output);
 }
 
 void
 CodeGeneratorX64::visitCompareBAndBranch(LCompareBAndBranch* lir)
 {
     MCompare* mir = lir->cmpMir();
 
     const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
     const LAllocation* rhs = lir->rhs();
 
     MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
 
     // Load boxed boolean in ScratchReg.
+    ScratchRegisterScope scratch(masm);
     if (rhs->isConstant())
-        masm.moveValue(*rhs->toConstant(), ScratchReg);
+        masm.moveValue(*rhs->toConstant(), scratch);
     else
-        masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchReg);
+        masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), scratch);
 
     // Perform the comparison.
-    masm.cmpPtr(lhs.valueReg(), ScratchReg);
+    masm.cmpPtr(lhs.valueReg(), scratch);
     emitBranch(JSOpToCondition(mir->compareType(), mir->jsop()), lir->ifTrue(), lir->ifFalse());
 }
 
 void
 CodeGeneratorX64::visitCompareV(LCompareV* lir)
 {
     MCompare* mir = lir->mir();
     const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput);
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -314,25 +314,27 @@ MacroAssemblerX64::callWithExitFrame(Jit
     makeFrameDescriptor(dynStack, JitFrame_IonJS);
     asMasm().Push(dynStack);
     asMasm().call(target);
 }
 
 void
 MacroAssemblerX64::branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label)
 {
+    ScratchRegisterScope scratch(asMasm());
+
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
     MOZ_ASSERT(ptr != temp);
-    MOZ_ASSERT(ptr != ScratchReg);
+    MOZ_ASSERT(ptr != scratch);
 
     const Nursery& nursery = GetJitContext()->runtime->gcNursery();
-    movePtr(ImmWord(-ptrdiff_t(nursery.start())), ScratchReg);
-    addPtr(ptr, ScratchReg);
+    movePtr(ImmWord(-ptrdiff_t(nursery.start())), scratch);
+    addPtr(ptr, scratch);
     branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
-              ScratchReg, Imm32(nursery.nurserySize()), label);
+              scratch, Imm32(nursery.nurserySize()), label);
 }
 
 void
 MacroAssemblerX64::branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp,
                                               Label* label)
 {
     MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
 
@@ -340,20 +342,21 @@ MacroAssemblerX64::branchValueIsNurseryO
 
     // Avoid creating a bogus ObjectValue below.
     if (!nursery.exists())
         return;
 
     // 'Value' representing the start of the nursery tagged as a JSObject
     Value start = ObjectValue(*reinterpret_cast<JSObject*>(nursery.start()));
 
-    movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), ScratchReg);
-    addPtr(value.valueReg(), ScratchReg);
+    ScratchRegisterScope scratch(asMasm());
+    movePtr(ImmWord(-ptrdiff_t(start.asRawBits())), scratch);
+    addPtr(value.valueReg(), scratch);
     branchPtr(cond == Assembler::Equal ? Assembler::Below : Assembler::AboveOrEqual,
-              ScratchReg, Imm32(nursery.nurserySize()), label);
+              scratch, Imm32(nursery.nurserySize()), label);
 }
 
 void
 MacroAssemblerX64::profilerEnterFrame(Register framePtr, Register scratch)
 {
     AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
     loadPtr(activation, scratch);
     storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
--- a/js/src/jit/x64/MacroAssembler-x64.h
+++ b/js/src/jit/x64/MacroAssembler-x64.h
@@ -154,69 +154,74 @@ class MacroAssemblerX64 : public MacroAs
     }
     template <typename T>
     void storeValue(JSValueType type, Register reg, const T& dest) {
         // Value types with 32-bit payloads can be emitted as two 32-bit moves.
         if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
             movl(reg, Operand(dest));
             movl(Imm32(Upper32Of(GetShiftedTag(type))), ToUpper32(Operand(dest)));
         } else {
-            boxValue(type, reg, ScratchReg);
-            movq(ScratchReg, Operand(dest));
+            ScratchRegisterScope scratch(asMasm());
+            boxValue(type, reg, scratch);
+            movq(scratch, Operand(dest));
         }
     }
     template <typename T>
     void storeValue(const Value& val, const T& dest) {
+        ScratchRegisterScope scratch(asMasm());
         jsval_layout jv = JSVAL_TO_IMPL(val);
         if (val.isMarkable()) {
-            movWithPatch(ImmWord(jv.asBits), ScratchReg);
+            movWithPatch(ImmWord(jv.asBits), scratch);
             writeDataRelocation(val);
         } else {
-            mov(ImmWord(jv.asBits), ScratchReg);
+            mov(ImmWord(jv.asBits), scratch);
         }
-        movq(ScratchReg, Operand(dest));
+        movq(scratch, Operand(dest));
     }
     void storeValue(ValueOperand val, BaseIndex dest) {
         storeValue(val, Operand(dest));
     }
     void loadValue(Operand src, ValueOperand val) {
         movq(src, val.valueReg());
     }
     void loadValue(Address src, ValueOperand val) {
         loadValue(Operand(src), val);
     }
     void loadValue(const BaseIndex& src, ValueOperand val) {
         loadValue(Operand(src), val);
     }
     void tagValue(JSValueType type, Register payload, ValueOperand dest) {
-        MOZ_ASSERT(dest.valueReg() != ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(dest.valueReg() != scratch);
         if (payload != dest.valueReg())
             movq(payload, dest.valueReg());
-        mov(ImmShiftedTag(type), ScratchReg);
-        orq(ScratchReg, dest.valueReg());
+        mov(ImmShiftedTag(type), scratch);
+        orq(scratch, dest.valueReg());
     }
     void pushValue(ValueOperand val) {
         push(val.valueReg());
     }
     void popValue(ValueOperand val) {
         pop(val.valueReg());
     }
     void pushValue(const Value& val) {
         jsval_layout jv = JSVAL_TO_IMPL(val);
         if (val.isMarkable()) {
-            movWithPatch(ImmWord(jv.asBits), ScratchReg);
+            ScratchRegisterScope scratch(asMasm());
+            movWithPatch(ImmWord(jv.asBits), scratch);
             writeDataRelocation(val);
-            push(ScratchReg);
+            push(scratch);
         } else {
             push(ImmWord(jv.asBits));
         }
     }
     void pushValue(JSValueType type, Register reg) {
-        boxValue(type, reg, ScratchReg);
-        push(ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        boxValue(type, reg, scratch);
+        push(scratch);
     }
     void pushValue(const Address& addr) {
         push(Operand(addr));
     }
 
     void moveValue(const Value& val, Register dest) {
         jsval_layout jv = JSVAL_TO_IMPL(val);
         movWithPatch(ImmWord(jv.asBits), dest);
@@ -307,189 +312,226 @@ class MacroAssemblerX64 : public MacroAs
     }
     Condition testPrimitive(Condition cond, Register tag) {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
         cmp32(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET));
         return cond == Equal ? Below : AboveOrEqual;
     }
 
     Condition testUndefined(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testUndefined(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testUndefined(cond, scratch);
     }
     Condition testInt32(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testInt32(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testInt32(cond, scratch);
     }
     Condition testBoolean(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testBoolean(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testBoolean(cond, scratch);
     }
     Condition testDouble(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testDouble(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testDouble(cond, scratch);
     }
     Condition testNumber(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testNumber(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testNumber(cond, scratch);
     }
     Condition testNull(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testNull(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testNull(cond, scratch);
     }
     Condition testString(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testString(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testString(cond, scratch);
     }
     Condition testSymbol(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testSymbol(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testSymbol(cond, scratch);
     }
     Condition testObject(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testObject(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testObject(cond, scratch);
     }
     Condition testGCThing(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testGCThing(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testGCThing(cond, scratch);
     }
     Condition testPrimitive(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testPrimitive(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testPrimitive(cond, scratch);
     }
 
 
     Condition testUndefined(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testUndefined(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testUndefined(cond, scratch);
     }
     Condition testInt32(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testInt32(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testInt32(cond, scratch);
     }
     Condition testBoolean(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testBoolean(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testBoolean(cond, scratch);
     }
     Condition testDouble(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testDouble(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testDouble(cond, scratch);
     }
     Condition testNumber(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testNumber(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testNumber(cond, scratch);
     }
     Condition testNull(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testNull(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testNull(cond, scratch);
     }
     Condition testString(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testString(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testString(cond, scratch);
     }
     Condition testSymbol(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testSymbol(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testSymbol(cond, scratch);
     }
     Condition testObject(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testObject(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testObject(cond, scratch);
     }
     Condition testPrimitive(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testPrimitive(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testPrimitive(cond, scratch);
     }
     Condition testGCThing(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testGCThing(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testGCThing(cond, scratch);
     }
     Condition testMagic(Condition cond, const Address& src) {
-        splitTag(src, ScratchReg);
-        return testMagic(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testMagic(cond, scratch);
     }
 
 
     Condition testUndefined(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testUndefined(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testUndefined(cond, scratch);
     }
     Condition testNull(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testNull(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testNull(cond, scratch);
     }
     Condition testBoolean(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testBoolean(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testBoolean(cond, scratch);
     }
     Condition testString(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testString(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testString(cond, scratch);
     }
     Condition testSymbol(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testSymbol(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testSymbol(cond, scratch);
     }
     Condition testInt32(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testInt32(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testInt32(cond, scratch);
     }
     Condition testObject(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testObject(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testObject(cond, scratch);
     }
     Condition testDouble(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testDouble(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testDouble(cond, scratch);
     }
     Condition testMagic(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testMagic(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testMagic(cond, scratch);
     }
     Condition testGCThing(Condition cond, const BaseIndex& src) {
-        splitTag(src, ScratchReg);
-        return testGCThing(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testGCThing(cond, scratch);
     }
 
     Condition isMagic(Condition cond, const ValueOperand& src, JSWhyMagic why) {
         uint64_t magic = MagicValue(why).asRawBits();
         cmpPtr(src.valueReg(), ImmWord(magic));
         return cond;
     }
 
     void cmpPtr(Register lhs, const ImmWord rhs) {
-        MOZ_ASSERT(lhs != ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(lhs != scratch);
         if (intptr_t(rhs.value) <= INT32_MAX && intptr_t(rhs.value) >= INT32_MIN) {
             cmpPtr(lhs, Imm32(int32_t(rhs.value)));
         } else {
-            movePtr(rhs, ScratchReg);
-            cmpPtr(lhs, ScratchReg);
+            movePtr(rhs, scratch);
+            cmpPtr(lhs, scratch);
         }
     }
     void cmpPtr(Register lhs, const ImmPtr rhs) {
         cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
     }
     void cmpPtr(Register lhs, const ImmGCPtr rhs) {
-        MOZ_ASSERT(lhs != ScratchReg);
-        movePtr(rhs, ScratchReg);
-        cmpPtr(lhs, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(lhs != scratch);
+        movePtr(rhs, scratch);
+        cmpPtr(lhs, scratch);
     }
     void cmpPtr(Register lhs, const Imm32 rhs) {
         cmpq(rhs, lhs);
     }
     void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) {
-        MOZ_ASSERT(!lhs.containsReg(ScratchReg));
-        movePtr(rhs, ScratchReg);
-        cmpPtr(lhs, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(!lhs.containsReg(scratch));
+        movePtr(rhs, scratch);
+        cmpPtr(lhs, scratch);
     }
     void cmpPtr(const Operand& lhs, const ImmWord rhs) {
         if ((intptr_t)rhs.value <= INT32_MAX && (intptr_t)rhs.value >= INT32_MIN) {
             cmpPtr(lhs, Imm32((int32_t)rhs.value));
         } else {
-            movePtr(rhs, ScratchReg);
-            cmpPtr(lhs, ScratchReg);
+            ScratchRegisterScope scratch(asMasm());
+            movePtr(rhs, scratch);
+            cmpPtr(lhs, scratch);
         }
     }
     void cmpPtr(const Operand& lhs, const ImmPtr rhs) {
         cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
     }
     void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
         cmpPtr(Operand(lhs), rhs);
     }
@@ -543,22 +585,23 @@ class MacroAssemblerX64 : public MacroAs
     }
     void addPtr(Imm32 imm, const Address& dest) {
         addq(imm, Operand(dest));
     }
     void addPtr(Imm32 imm, const Operand& dest) {
         addq(imm, dest);
     }
     void addPtr(ImmWord imm, Register dest) {
-        MOZ_ASSERT(dest != ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(dest != scratch);
         if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
             addq(Imm32((int32_t)imm.value), dest);
         } else {
-            mov(imm, ScratchReg);
-            addq(ScratchReg, dest);
+            mov(imm, scratch);
+            addq(scratch, dest);
         }
     }
     void addPtr(ImmPtr imm, Register dest) {
         addPtr(ImmWord(uintptr_t(imm.value)), dest);
     }
     void addPtr(const Address& src, Register dest) {
         addq(Operand(src), dest);
     }
@@ -577,79 +620,87 @@ class MacroAssemblerX64 : public MacroAs
     void mulBy3(const Register& src, const Register& dest) {
         lea(Operand(src, src, TimesTwo), dest);
     }
 
     void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) {
         if (X86Encoding::IsAddressImmediate(lhs.addr)) {
             branch32(cond, Operand(lhs), rhs, label);
         } else {
-            mov(ImmPtr(lhs.addr), ScratchReg);
-            branch32(cond, Address(ScratchReg, 0), rhs, label);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(lhs.addr), scratch);
+            branch32(cond, Address(scratch, 0), rhs, label);
         }
     }
     void branch32(Condition cond, AsmJSAbsoluteAddress lhs, Imm32 rhs, Label* label) {
-        mov(AsmJSImmPtr(lhs.kind()), ScratchReg);
-        branch32(cond, Address(ScratchReg, 0), rhs, label);
+        ScratchRegisterScope scratch(asMasm());
+        mov(AsmJSImmPtr(lhs.kind()), scratch);
+        branch32(cond, Address(scratch, 0), rhs, label);
     }
     void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) {
         if (X86Encoding::IsAddressImmediate(lhs.addr)) {
             branch32(cond, Operand(lhs), rhs, label);
         } else {
-            mov(ImmPtr(lhs.addr), ScratchReg);
-            branch32(cond, Address(ScratchReg, 0), rhs, label);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(lhs.addr), scratch);
+            branch32(cond, Address(scratch, 0), rhs, label);
         }
     }
     void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label* label) {
         if (X86Encoding::IsAddressImmediate(address.addr)) {
             test32(Operand(address), imm);
         } else {
-            mov(ImmPtr(address.addr), ScratchReg);
-            test32(Operand(ScratchReg, 0), imm);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(address.addr), scratch);
+            test32(Operand(scratch, 0), imm);
         }
         j(cond, label);
     }
 
     // Specialization for AbsoluteAddress.
     void branchPtr(Condition cond, AbsoluteAddress addr, Register ptr, Label* label) {
-        MOZ_ASSERT(ptr != ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(ptr != scratch);
         if (X86Encoding::IsAddressImmediate(addr.addr)) {
             branchPtr(cond, Operand(addr), ptr, label);
         } else {
-            mov(ImmPtr(addr.addr), ScratchReg);
-            branchPtr(cond, Operand(ScratchReg, 0x0), ptr, label);
+            mov(ImmPtr(addr.addr), scratch);
+            branchPtr(cond, Operand(scratch, 0x0), ptr, label);
         }
     }
     void branchPtr(Condition cond, AbsoluteAddress addr, ImmWord ptr, Label* label) {
         if (X86Encoding::IsAddressImmediate(addr.addr)) {
             branchPtr(cond, Operand(addr), ptr, label);
         } else {
-            mov(ImmPtr(addr.addr), ScratchReg);
-            branchPtr(cond, Operand(ScratchReg, 0x0), ptr, label);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(addr.addr), scratch);
+            branchPtr(cond, Operand(scratch, 0x0), ptr, label);
         }
     }
     void branchPtr(Condition cond, AsmJSAbsoluteAddress addr, Register ptr, Label* label) {
-        MOZ_ASSERT(ptr != ScratchReg);
-        mov(AsmJSImmPtr(addr.kind()), ScratchReg);
-        branchPtr(cond, Operand(ScratchReg, 0x0), ptr, label);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(ptr != scratch);
+        mov(AsmJSImmPtr(addr.kind()), scratch);
+        branchPtr(cond, Operand(scratch, 0x0), ptr, label);
     }
 
     void branchPrivatePtr(Condition cond, Address lhs, ImmPtr ptr, Label* label) {
         branchPtr(cond, lhs, ImmWord(uintptr_t(ptr.value) >> 1), label);
     }
 
     void branchPrivatePtr(Condition cond, Address lhs, Register ptr, Label* label) {
-        if (ptr != ScratchReg)
-            movePtr(ptr, ScratchReg);
-        rshiftPtr(Imm32(1), ScratchReg);
-        branchPtr(cond, lhs, ScratchReg, label);
+        ScratchRegisterScope scratch(asMasm());
+        if (ptr != scratch)
+            movePtr(ptr, scratch);
+        rshiftPtr(Imm32(1), scratch);
+        branchPtr(cond, lhs, scratch, label);
     }
 
     template <typename T, typename S>
-    void branchPtr(Condition cond, T lhs, S ptr, Label* label) {
+    void branchPtr(Condition cond, const T& lhs, const S& ptr, Label* label) {
         cmpPtr(Operand(lhs), ptr);
         j(cond, label);
     }
 
     CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr) {
         JmpSrc src = jmpSrc(label);
         return CodeOffsetJump(size(), addPatchableJump(src, Relocation::HARDCODED));
     }
@@ -708,18 +759,19 @@ class MacroAssemblerX64 : public MacroAs
     }
     void movePtr(ImmGCPtr imm, Register dest) {
         movq(imm, dest);
     }
     void loadPtr(AbsoluteAddress address, Register dest) {
         if (X86Encoding::IsAddressImmediate(address.addr)) {
             movq(Operand(address), dest);
         } else {
-            mov(ImmPtr(address.addr), ScratchReg);
-            loadPtr(Address(ScratchReg, 0x0), dest);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(address.addr), scratch);
+            loadPtr(Address(scratch, 0x0), dest);
         }
     }
     void loadPtr(const Address& address, Register dest) {
         movq(Operand(address), dest);
     }
     void loadPtr(const Operand& src, Register dest) {
         movq(src, dest);
     }
@@ -729,61 +781,66 @@ class MacroAssemblerX64 : public MacroAs
     void loadPrivate(const Address& src, Register dest) {
         loadPtr(src, dest);
         shlq(Imm32(1), dest);
     }
     void load32(AbsoluteAddress address, Register dest) {
         if (X86Encoding::IsAddressImmediate(address.addr)) {
             movl(Operand(address), dest);
         } else {
-            mov(ImmPtr(address.addr), ScratchReg);
-            load32(Address(ScratchReg, 0x0), dest);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(address.addr), scratch);
+            load32(Address(scratch, 0x0), dest);
         }
     }
     template <typename T>
     void storePtr(ImmWord imm, T address) {
         if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
             movq(Imm32((int32_t)imm.value), Operand(address));
         } else {
-            mov(imm, ScratchReg);
-            movq(ScratchReg, Operand(address));
+            ScratchRegisterScope scratch(asMasm());
+            mov(imm, scratch);
+            movq(scratch, Operand(address));
         }
     }
     template <typename T>
     void storePtr(ImmPtr imm, T address) {
         storePtr(ImmWord(uintptr_t(imm.value)), address);
     }
     template <typename T>
     void storePtr(ImmGCPtr imm, T address) {
-        movq(imm, ScratchReg);
-        movq(ScratchReg, Operand(address));
+        ScratchRegisterScope scratch(asMasm());
+        movq(imm, scratch);
+        movq(scratch, Operand(address));
     }
     void storePtr(Register src, const Address& address) {
         movq(src, Operand(address));
     }
     void storePtr(Register src, const BaseIndex& address) {
         movq(src, Operand(address));
     }
     void storePtr(Register src, const Operand& dest) {
         movq(src, dest);
     }
     void storePtr(Register src, AbsoluteAddress address) {
         if (X86Encoding::IsAddressImmediate(address.addr)) {
             movq(src, Operand(address));
         } else {
-            mov(ImmPtr(address.addr), ScratchReg);
-            storePtr(src, Address(ScratchReg, 0x0));
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(address.addr), scratch);
+            storePtr(src, Address(scratch, 0x0));
         }
     }
     void store32(Register src, AbsoluteAddress address) {
         if (X86Encoding::IsAddressImmediate(address.addr)) {
             movl(src, Operand(address));
         } else {
-            mov(ImmPtr(address.addr), ScratchReg);
-            store32(src, Address(ScratchReg, 0x0));
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(address.addr), scratch);
+            store32(src, Address(scratch, 0x0));
         }
     }
     void rshiftPtr(Imm32 imm, Register dest) {
         shrq(imm, dest);
     }
     void rshiftPtrArithmetic(Imm32 imm, Register dest) {
         sarq(imm, dest);
     }
@@ -893,18 +950,19 @@ class MacroAssemblerX64 : public MacroAs
         j(cond, label);
     }
     void branchTestInt32(Condition cond, const Address& address, Label* label) {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
         branchTestInt32(cond, Operand(address), label);
     }
     void branchTestDouble(Condition cond, const Operand& operand, Label* label) {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
-        splitTag(operand, ScratchReg);
-        branchTestDouble(cond, ScratchReg, label);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(operand, scratch);
+        branchTestDouble(cond, scratch, label);
     }
     void branchTestDouble(Condition cond, const Address& address, Label* label) {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
         branchTestDouble(cond, Operand(address), label);
     }
     void branchTestBoolean(Condition cond, const Operand& operand, Label* label) {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
         cmp32(ToUpper32(operand), Imm32(Upper32Of(GetShiftedTag(JSVAL_TYPE_BOOLEAN))));
@@ -928,22 +986,24 @@ class MacroAssemblerX64 : public MacroAs
 
     // Perform a type-test on a full Value loaded into a register.
     // Clobbers the ScratchReg.
     void branchTestUndefined(Condition cond, const ValueOperand& src, Label* label) {
         cond = testUndefined(cond, src);
         j(cond, label);
     }
     void branchTestInt32(Condition cond, const ValueOperand& src, Label* label) {
-        splitTag(src, ScratchReg);
-        branchTestInt32(cond, ScratchReg, label);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        branchTestInt32(cond, scratch, label);
     }
     void branchTestBoolean(Condition cond, const ValueOperand& src, Label* label) {
-        splitTag(src, ScratchReg);
-        branchTestBoolean(cond, ScratchReg, label);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        branchTestBoolean(cond, scratch, label);
     }
     void branchTestDouble(Condition cond, const ValueOperand& src, Label* label) {
         cond = testDouble(cond, src);
         j(cond, label);
     }
     void branchTestNull(Condition cond, const ValueOperand& src, Label* label) {
         cond = testNull(cond, src);
         j(cond, label);
@@ -967,22 +1027,24 @@ class MacroAssemblerX64 : public MacroAs
 
     // Perform a type-test on a Value addressed by BaseIndex.
     // Clobbers the ScratchReg.
     void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) {
         cond = testUndefined(cond, address);
         j(cond, label);
     }
     void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) {
-        splitTag(address, ScratchReg);
-        branchTestInt32(cond, ScratchReg, label);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(address, scratch);
+        branchTestInt32(cond, scratch, label);
     }
     void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) {
-        splitTag(address, ScratchReg);
-        branchTestBoolean(cond, ScratchReg, label);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(address, scratch);
+        branchTestBoolean(cond, scratch, label);
     }
     void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) {
         cond = testDouble(cond, address);
         j(cond, label);
     }
     void branchTestNull(Condition cond, const BaseIndex& address, Label* label) {
         cond = testNull(cond, address);
         j(cond, label);
@@ -1017,26 +1079,28 @@ class MacroAssemblerX64 : public MacroAs
     }
     void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why,
                               Label* label)
     {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
         branchTestValue(cond, val, MagicValue(why), label);
     }
     Condition testMagic(Condition cond, const ValueOperand& src) {
-        splitTag(src, ScratchReg);
-        return testMagic(cond, ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        splitTag(src, scratch);
+        return testMagic(cond, scratch);
     }
     Condition testError(Condition cond, const ValueOperand& src) {
         return testMagic(cond, src);
     }
     void branchTestValue(Condition cond, const ValueOperand& value, const Value& v, Label* label) {
-        MOZ_ASSERT(value.valueReg() != ScratchReg);
-        moveValue(v, ScratchReg);
-        cmpPtr(value.valueReg(), ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(value.valueReg() != scratch);
+        moveValue(v, scratch);
+        cmpPtr(value.valueReg(), scratch);
         j(cond, label);
     }
     void branchTestValue(Condition cond, const Address& valaddr, const ValueOperand& value,
                          Label* label)
     {
         MOZ_ASSERT(cond == Equal || cond == NotEqual);
         branchPtr(cond, valaddr, value.valueReg(), label);
     }
@@ -1114,33 +1178,35 @@ class MacroAssemblerX64 : public MacroAs
     void notBoolean(const ValueOperand& val) {
         xorq(Imm32(1), val.valueReg());
     }
 
     // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean
     // instead if the source type is known.
     void unboxNonDouble(const ValueOperand& src, Register dest) {
         if (src.valueReg() == dest) {
-            mov(ImmWord(JSVAL_PAYLOAD_MASK), ScratchReg);
-            andq(ScratchReg, dest);
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmWord(JSVAL_PAYLOAD_MASK), scratch);
+            andq(scratch, dest);
         } else {
             mov(ImmWord(JSVAL_PAYLOAD_MASK), dest);
             andq(src.valueReg(), dest);
         }
     }
     void unboxNonDouble(const Operand& src, Register dest) {
         // Explicitly permits |dest| to be used in |src|.
-        MOZ_ASSERT(dest != ScratchReg);
+        ScratchRegisterScope scratch(asMasm());
+        MOZ_ASSERT(dest != scratch);
         if (src.containsReg(dest)) {
-            mov(ImmWord(JSVAL_PAYLOAD_MASK), ScratchReg);
+            mov(ImmWord(JSVAL_PAYLOAD_MASK), scratch);
             // If src is already a register, then src and dest are the same
             // thing and we don't need to move anything into dest.
             if (src.kind() != Operand::REG)
                 movq(src, dest);
-            andq(ScratchReg, dest);
+            andq(scratch, dest);
         } else {
             mov(ImmWord(JSVAL_PAYLOAD_MASK), dest);
             andq(src, dest);
         }
     }
 
     void unboxString(const ValueOperand& src, Register dest) { unboxNonDouble(src, dest); }
     void unboxString(const Operand& src, Register dest) { unboxNonDouble(src, dest); }
@@ -1254,18 +1320,19 @@ class MacroAssemblerX64 : public MacroAs
         Condition cond = testInt32Truthy(truthy, operand);
         j(cond, label);
     }
     void branchTestBooleanTruthy(bool truthy, const ValueOperand& operand, Label* label) {
         test32(operand.valueReg(), operand.valueReg());
         j(truthy ? NonZero : Zero, label);
     }
     Condition testStringTruthy(bool truthy, const ValueOperand& value) {
-        unboxString(value, ScratchReg);
-        cmp32(Operand(ScratchReg, JSString::offsetOfLength()), Imm32(0));
+        ScratchRegisterScope scratch(asMasm());
+        unboxString(value, scratch);
+        cmp32(Operand(scratch, JSString::offsetOfLength()), Imm32(0));
         return truthy ? Assembler::NotEqual : Assembler::Equal;
     }
     void branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label) {
         Condition cond = testStringTruthy(truthy, value);
         j(cond, label);
     }
 
     void loadInt32OrDouble(const Operand& operand, FloatRegister dest) {
@@ -1289,20 +1356,22 @@ class MacroAssemblerX64 : public MacroAs
     }
 
     template <typename T>
     void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest, MIRType slotType);
 
     template <typename T>
     void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
         switch (nbytes) {
-          case 8:
-            unboxNonDouble(value, ScratchReg);
-            storePtr(ScratchReg, address);
+          case 8: {
+            ScratchRegisterScope scratch(asMasm());
+            unboxNonDouble(value, scratch);
+            storePtr(scratch, address);
             return;
+          }
           case 4:
             store32(value.valueReg(), address);
             return;
           case 1:
             store8(value.valueReg(), address);
             return;
           default: MOZ_CRASH("Bad payload width");
         }
@@ -1319,35 +1388,37 @@ class MacroAssemblerX64 : public MacroAs
     void convertUInt32ToFloat32(Register src, FloatRegister dest) {
         vcvtsq2ss(src, dest, dest);
     }
 
     void inc64(AbsoluteAddress dest) {
         if (X86Encoding::IsAddressImmediate(dest.addr)) {
             addPtr(Imm32(1), Operand(dest));
         } else {
-            mov(ImmPtr(dest.addr), ScratchReg);
-            addPtr(Imm32(1), Address(ScratchReg, 0));
+            ScratchRegisterScope scratch(asMasm());
+            mov(ImmPtr(dest.addr), scratch);
+            addPtr(Imm32(1), Address(scratch, 0));
         }
     }
 
     void incrementInt32Value(const Address& addr) {
         addPtr(Imm32(1), addr);
     }
 
     // If source is a double, load it into dest. If source is int32,
     // convert it to double. Else, branch to failure.
     void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure) {
         Label isDouble, done;
         Register tag = splitTagForTest(source);
         branchTestDouble(Assembler::Equal, tag, &isDouble);
         branchTestInt32(Assembler::NotEqual, tag, failure);
 
-        unboxInt32(source, ScratchReg);
-        convertInt32ToDouble(ScratchReg, dest);
+        ScratchRegisterScope scratch(asMasm());
+        unboxInt32(source, scratch);
+        convertInt32ToDouble(scratch, dest);
         jump(&done);
 
         bind(&isDouble);
         unboxDouble(source, dest);
 
         bind(&done);
     }
 
@@ -1366,18 +1437,19 @@ class MacroAssemblerX64 : public MacroAs
                                 unsigned globalDataOffset)
     {
         uint8_t* nextInsn = code + patchAt.offset();
         MOZ_ASSERT(nextInsn <= globalData);
         uint8_t* target = globalData + globalDataOffset;
         ((int32_t*)nextInsn)[-1] = target - nextInsn;
     }
     void memIntToValue(Address Source, Address Dest) {
-        load32(Source, ScratchReg);
-        storeValue(JSVAL_TYPE_INT32, ScratchReg, Dest);
+        ScratchRegisterScope scratch(asMasm());
+        load32(Source, scratch);
+        storeValue(JSVAL_TYPE_INT32, scratch, Dest);
     }
 
     void branchPtrInNurseryRange(Condition cond, Register ptr, Register temp, Label* label);
     void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label);
 
     // Instrumentation for entering and leaving the profiler.
     void profilerEnterFrame(Register framePtr, Register scratch);
     void profilerExitFrame();
--- a/js/src/jit/x64/SharedICHelpers-x64.h
+++ b/js/src/jit/x64/SharedICHelpers-x64.h
@@ -67,43 +67,47 @@ inline void
 EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
 {
     masm.storePtr(reg, Address(StackPointer, 0));
 }
 
 inline void
 EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
 {
+    ScratchRegisterScope scratch(masm);
+
     // We an assume during this that R0 and R1 have been pushed.
-    masm.movq(BaselineFrameReg, ScratchReg);
-    masm.addq(Imm32(BaselineFrame::FramePointerOffset), ScratchReg);
-    masm.subq(BaselineStackReg, ScratchReg);
+    masm.movq(BaselineFrameReg, scratch);
+    masm.addq(Imm32(BaselineFrame::FramePointerOffset), scratch);
+    masm.subq(BaselineStackReg, scratch);
 
     // Store frame size without VMFunction arguments for GC marking.
-    masm.movq(ScratchReg, rdx);
+    masm.movq(scratch, rdx);
     masm.subq(Imm32(argSize), rdx);
     masm.store32(rdx, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
 
     // Push frame descriptor and perform the tail call.
-    masm.makeFrameDescriptor(ScratchReg, JitFrame_BaselineJS);
-    masm.push(ScratchReg);
+    masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
+    masm.push(scratch);
     masm.push(ICTailCallReg);
     masm.jmp(target);
 }
 
 inline void
 EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
 {
-    masm.movq(Operand(esp, stackSize), ScratchReg);
-    masm.shrq(Imm32(FRAMESIZE_SHIFT), ScratchReg);
-    masm.addq(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), ScratchReg);
+    ScratchRegisterScope scratch(masm);
+
+    masm.movq(Operand(esp, stackSize), scratch);
+    masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch);
+    masm.addq(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch);
 
     // Push frame descriptor and perform the tail call.
-    masm.makeFrameDescriptor(ScratchReg, JitFrame_IonJS);
-    masm.push(ScratchReg);
+    masm.makeFrameDescriptor(scratch, JitFrame_IonJS);
+    masm.push(scratch);
     masm.push(ICTailCallReg);
     masm.jmp(target);
 }
 
 inline void
 EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg)
 {
     // Compute stub frame size. We have to add two pointers: the stub reg and previous
@@ -113,18 +117,19 @@ EmitBaselineCreateStubFrameDescriptor(Ma
     masm.subq(BaselineStackReg, reg);
 
     masm.makeFrameDescriptor(reg, JitFrame_BaselineStub);
 }
 
 inline void
 EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
 {
-    EmitBaselineCreateStubFrameDescriptor(masm, ScratchReg);
-    masm.push(ScratchReg);
+    ScratchRegisterScope scratch(masm);
+    EmitBaselineCreateStubFrameDescriptor(masm, scratch);
+    masm.push(scratch);
     masm.call(target);
 }
 
 inline void
 EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
 {
     uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonStub);
     masm.Push(Imm32(descriptor));
@@ -142,29 +147,31 @@ EmitIonCallVM(JitCode* target, size_t st
 static const uint32_t STUB_FRAME_SIZE = 4 * sizeof(void*);
 static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = sizeof(void*);
 
 inline void
 EmitBaselineEnterStubFrame(MacroAssembler& masm, Register)
 {
     EmitRestoreTailCallReg(masm);
 
+    ScratchRegisterScope scratch(masm);
+
     // Compute frame size.
-    masm.movq(BaselineFrameReg, ScratchReg);
-    masm.addq(Imm32(BaselineFrame::FramePointerOffset), ScratchReg);
-    masm.subq(BaselineStackReg, ScratchReg);
+    masm.movq(BaselineFrameReg, scratch);
+    masm.addq(Imm32(BaselineFrame::FramePointerOffset), scratch);
+    masm.subq(BaselineStackReg, scratch);
 
-    masm.store32(ScratchReg, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+    masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
 
     // Note: when making changes here,  don't forget to update STUB_FRAME_SIZE
     // if needed.
 
     // Push frame descriptor and return address.
-    masm.makeFrameDescriptor(ScratchReg, JitFrame_BaselineJS);
-    masm.push(ScratchReg);
+    masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS);
+    masm.push(scratch);
     masm.push(ICTailCallReg);
 
     // Save old frame pointer, stack pointer and stub reg.
     masm.push(ICStubReg);
     masm.push(BaselineFrameReg);
     masm.mov(BaselineStackReg, BaselineFrameReg);
 }
 
@@ -179,19 +186,20 @@ EmitIonEnterStubFrame(MacroAssembler& ma
 inline void
 EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
 {
     // Ion frames do not save and restore the frame pointer. If we called
     // into Ion, we have to restore the stack pointer from the frame descriptor.
     // If we performed a VM call, the descriptor has been popped already so
     // in that case we use the frame pointer.
     if (calledIntoIon) {
-        masm.pop(ScratchReg);
-        masm.shrq(Imm32(FRAMESIZE_SHIFT), ScratchReg);
-        masm.addq(ScratchReg, BaselineStackReg);
+        ScratchRegisterScope scratch(masm);
+        masm.pop(scratch);
+        masm.shrq(Imm32(FRAMESIZE_SHIFT), scratch);
+        masm.addq(scratch, BaselineStackReg);
     } else {
         masm.mov(BaselineFrameReg, BaselineStackReg);
     }
 
     masm.pop(BaselineFrameReg);
     masm.pop(ICStubReg);
 
     // Pop return address.
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -2207,16 +2207,17 @@ class AssemblerX86Shared : public Assemb
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
     void vcmpps(uint8_t order, Operand src1, FloatRegister src0, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         // :TODO: (Bug 1132894) See LIRGeneratorX86Shared::lowerForFPU
+        // FIXME: This logic belongs in the MacroAssembler.
         if (!HasAVX() && !src0.aliases(dest)) {
             if (src1.kind() == Operand::FPREG &&
                 dest.aliases(FloatRegister::FromCode(src1.fpu())))
             {
                 vmovdqa(src1, ScratchSimdReg);
                 src1 = Operand(ScratchSimdReg);
             }
             vmovdqa(src0, dest);
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -96,28 +96,32 @@ CodeGeneratorX86Shared::visitTestDAndBra
     //            ---------
     //      NaN    1  1  1
     //        >    0  0  0
     //        <    0  0  1
     //        =    1  0  0
     //
     // NaN is falsey, so comparing against 0 and then using the Z flag is
     // enough to determine which branch to take.
-    masm.zeroDouble(ScratchDoubleReg);
-    masm.vucomisd(ScratchDoubleReg, ToFloatRegister(opd));
+    ScratchDoubleScope scratch(masm);
+    masm.zeroDouble(scratch);
+    masm.vucomisd(scratch, ToFloatRegister(opd));
     emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
 }
 
 void
 CodeGeneratorX86Shared::visitTestFAndBranch(LTestFAndBranch* test)
 {
     const LAllocation* opd = test->input();
     // vucomiss flags are the same as doubles; see comment above
-    masm.zeroFloat32(ScratchFloat32Reg);
-    masm.vucomiss(ScratchFloat32Reg, ToFloatRegister(opd));
+    {
+        ScratchFloat32Scope scratch(masm);
+        masm.zeroFloat32(scratch);
+        masm.vucomiss(scratch, ToFloatRegister(opd));
+    }
     emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
 }
 
 void
 CodeGeneratorX86Shared::visitBitAndAndBranch(LBitAndAndBranch* baab)
 {
     if (baab->right()->isConstant())
         masm.test32(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
@@ -204,34 +208,36 @@ CodeGeneratorX86Shared::visitNotD(LNotD*
     FloatRegister opd = ToFloatRegister(ins->input());
 
     // Not returns true if the input is a NaN. We don't have to worry about
     // it if we know the input is never NaN though.
     Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
     if (ins->mir()->operandIsNeverNaN())
         nanCond = Assembler::NaN_HandledByCond;
 
-    masm.zeroDouble(ScratchDoubleReg);
-    masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, ScratchDoubleReg);
+    ScratchDoubleScope scratch(masm);
+    masm.zeroDouble(scratch);
+    masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, scratch);
     masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
 }
 
 void
 CodeGeneratorX86Shared::visitNotF(LNotF* ins)
 {
     FloatRegister opd = ToFloatRegister(ins->input());
 
     // Not returns true if the input is a NaN. We don't have to worry about
     // it if we know the input is never NaN though.
     Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
     if (ins->mir()->operandIsNeverNaN())
         nanCond = Assembler::NaN_HandledByCond;
 
-    masm.zeroFloat32(ScratchFloat32Reg);
-    masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, ScratchFloat32Reg);
+    ScratchFloat32Scope scratch(masm);
+    masm.zeroFloat32(scratch);
+    masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, scratch);
     masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
 }
 
 void
 CodeGeneratorX86Shared::visitCompareDAndBranch(LCompareDAndBranch* comp)
 {
     FloatRegister lhs = ToFloatRegister(comp->left());
     FloatRegister rhs = ToFloatRegister(comp->right());
@@ -621,30 +627,30 @@ CodeGeneratorX86Shared::visitMinMaxF(LMi
 }
 
 void
 CodeGeneratorX86Shared::visitAbsD(LAbsD* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     MOZ_ASSERT(input == ToFloatRegister(ins->output()));
     // Load a value which is all ones except for the sign bit.
-    masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::kSignificandBits),
-                            ScratchDoubleReg);
-    masm.vandpd(ScratchDoubleReg, input, input);
+    ScratchDoubleScope scratch(masm);
+    masm.loadConstantDouble(SpecificNaN<double>(0, FloatingPoint<double>::kSignificandBits), scratch);
+    masm.vandpd(scratch, input, input);
 }
 
 void
 CodeGeneratorX86Shared::visitAbsF(LAbsF* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     MOZ_ASSERT(input == ToFloatRegister(ins->output()));
     // Same trick as visitAbsD above.
-    masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits),
-                             ScratchFloat32Reg);
-    masm.vandps(ScratchFloat32Reg, input, input);
+    ScratchFloat32Scope scratch(masm);
+    masm.loadConstantFloat32(SpecificNaN<float>(0, FloatingPoint<float>::kSignificandBits), scratch);
+    masm.vandps(scratch, input, input);
 }
 
 void
 CodeGeneratorX86Shared::visitClzI(LClzI* ins)
 {
     Register input = ToRegister(ins->input());
     Register output = ToRegister(ins->output());
 
@@ -680,39 +686,41 @@ CodeGeneratorX86Shared::visitSqrtF(LSqrt
 }
 
 void
 CodeGeneratorX86Shared::visitPowHalfD(LPowHalfD* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     FloatRegister output = ToFloatRegister(ins->output());
 
+    ScratchDoubleScope scratch(masm);
+
     Label done, sqrt;
 
     if (!ins->mir()->operandIsNeverNegativeInfinity()) {
         // Branch if not -Infinity.
-        masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
+        masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
 
         Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
         if (ins->mir()->operandIsNeverNaN())
             cond = Assembler::DoubleNotEqual;
-        masm.branchDouble(cond, input, ScratchDoubleReg, &sqrt);
+        masm.branchDouble(cond, input, scratch, &sqrt);
 
         // Math.pow(-Infinity, 0.5) == Infinity.
         masm.zeroDouble(input);
-        masm.subDouble(ScratchDoubleReg, input);
+        masm.subDouble(scratch, input);
         masm.jump(&done);
 
         masm.bind(&sqrt);
     }
 
     if (!ins->mir()->operandIsNeverNegativeZero()) {
         // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). Adding 0 converts any -0 to 0.
-        masm.zeroDouble(ScratchDoubleReg);
-        masm.addDouble(ScratchDoubleReg, input);
+        masm.zeroDouble(scratch);
+        masm.addDouble(scratch, input);
     }
 
     masm.vsqrtsd(input, output, output);
 
     masm.bind(&done);
 }
 
 class OutOfLineUndoALUOperation : public OutOfLineCodeBase<CodeGeneratorX86Shared>
@@ -1705,36 +1713,40 @@ CodeGeneratorX86Shared::visitMathF(LMath
         MOZ_CRASH("unexpected opcode");
     }
 }
 
 void
 CodeGeneratorX86Shared::visitFloor(LFloor* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
-    FloatRegister scratch = ScratchDoubleReg;
     Register output = ToRegister(lir->output());
 
     Label bailout;
 
     if (AssemblerX86Shared::HasSSE41()) {
         // Bail on negative-zero.
         masm.branchNegativeZero(input, output, &bailout);
         bailoutFrom(&bailout, lir->snapshot());
 
         // Round toward -Infinity.
-        masm.vroundsd(X86Encoding::RoundDown, input, scratch, scratch);
-
-        bailoutCvttsd2si(scratch, output, lir->snapshot());
+        {
+            ScratchDoubleScope scratch(masm);
+            masm.vroundsd(X86Encoding::RoundDown, input, scratch, scratch);
+            bailoutCvttsd2si(scratch, output, lir->snapshot());
+        }
     } else {
         Label negative, end;
 
         // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
-        masm.zeroDouble(scratch);
-        masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative);
+        {
+            ScratchDoubleScope scratch(masm);
+            masm.zeroDouble(scratch);
+            masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &negative);
+        }
 
         // Bail on negative-zero.
         masm.branchNegativeZero(input, output, &bailout);
         bailoutFrom(&bailout, lir->snapshot());
 
         // Input is non-negative, so truncation correctly rounds.
         bailoutCvttsd2si(input, output, lir->snapshot());
 
@@ -1745,53 +1757,60 @@ CodeGeneratorX86Shared::visitFloor(LFloo
         // native rounding mode matches JS semantics. Still better than callVM.
         masm.bind(&negative);
         {
             // Truncate and round toward zero.
             // This is off-by-one for everything but integer-valued inputs.
             bailoutCvttsd2si(input, output, lir->snapshot());
 
             // Test whether the input double was integer-valued.
-            masm.convertInt32ToDouble(output, scratch);
-            masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+            {
+                ScratchDoubleScope scratch(masm);
+                masm.convertInt32ToDouble(output, scratch);
+                masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+            }
 
             // Input is not integer-valued, so we rounded off-by-one in the
             // wrong direction. Correct by subtraction.
             masm.subl(Imm32(1), output);
             // Cannot overflow: output was already checked against INT_MIN.
         }
 
         masm.bind(&end);
     }
 }
 
 void
 CodeGeneratorX86Shared::visitFloorF(LFloorF* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
-    FloatRegister scratch = ScratchFloat32Reg;
     Register output = ToRegister(lir->output());
 
     Label bailout;
 
     if (AssemblerX86Shared::HasSSE41()) {
         // Bail on negative-zero.
         masm.branchNegativeZeroFloat32(input, output, &bailout);
         bailoutFrom(&bailout, lir->snapshot());
 
         // Round toward -Infinity.
-        masm.vroundss(X86Encoding::RoundDown, input, scratch, scratch);
-
-        bailoutCvttss2si(scratch, output, lir->snapshot());
+        {
+            ScratchFloat32Scope scratch(masm);
+            masm.vroundss(X86Encoding::RoundDown, input, scratch, scratch);
+            bailoutCvttss2si(scratch, output, lir->snapshot());
+        }
     } else {
         Label negative, end;
 
         // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
-        masm.zeroFloat32(scratch);
-        masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative);
+        {
+            ScratchFloat32Scope scratch(masm);
+            masm.zeroFloat32(scratch);
+            masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &negative);
+        }
 
         // Bail on negative-zero.
         masm.branchNegativeZeroFloat32(input, output, &bailout);
         bailoutFrom(&bailout, lir->snapshot());
 
         // Input is non-negative, so truncation correctly rounds.
         bailoutCvttss2si(input, output, lir->snapshot());
 
@@ -1802,34 +1821,37 @@ CodeGeneratorX86Shared::visitFloorF(LFlo
         // native rounding mode matches JS semantics. Still better than callVM.
         masm.bind(&negative);
         {
             // Truncate and round toward zero.
             // This is off-by-one for everything but integer-valued inputs.
             bailoutCvttss2si(input, output, lir->snapshot());
 
             // Test whether the input double was integer-valued.
-            masm.convertInt32ToFloat32(output, scratch);
-            masm.branchFloat(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+            {
+                ScratchFloat32Scope scratch(masm);
+                masm.convertInt32ToFloat32(output, scratch);
+                masm.branchFloat(Assembler::DoubleEqualOrUnordered, input, scratch, &end);
+            }
 
             // Input is not integer-valued, so we rounded off-by-one in the
             // wrong direction. Correct by subtraction.
             masm.subl(Imm32(1), output);
             // Cannot overflow: output was already checked against INT_MIN.
         }
 
         masm.bind(&end);
     }
 }
 
 void
 CodeGeneratorX86Shared::visitCeil(LCeil* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
-    FloatRegister scratch = ScratchDoubleReg;
+    ScratchDoubleScope scratch(masm);
     Register output = ToRegister(lir->output());
 
     Label bailout, lessThanMinusOne;
 
     // Bail on ]-1; -0] range
     masm.loadConstantDouble(-1, scratch);
     masm.branchDouble(Assembler::DoubleLessThanOrEqualOrUnordered, input,
                       scratch, &lessThanMinusOne);
@@ -1871,17 +1893,17 @@ CodeGeneratorX86Shared::visitCeil(LCeil*
 
     masm.bind(&end);
 }
 
 void
 CodeGeneratorX86Shared::visitCeilF(LCeilF* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
-    FloatRegister scratch = ScratchFloat32Reg;
+    ScratchFloat32Scope scratch(masm);
     Register output = ToRegister(lir->output());
 
     Label bailout, lessThanMinusOne;
 
     // Bail on ]-1; -0] range
     masm.loadConstantFloat32(-1.f, scratch);
     masm.branchFloat(Assembler::DoubleLessThanOrEqualOrUnordered, input,
                      scratch, &lessThanMinusOne);
@@ -1924,17 +1946,17 @@ CodeGeneratorX86Shared::visitCeilF(LCeil
     masm.bind(&end);
 }
 
 void
 CodeGeneratorX86Shared::visitRound(LRound* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     FloatRegister temp = ToFloatRegister(lir->temp());
-    FloatRegister scratch = ScratchDoubleReg;
+    ScratchDoubleScope scratch(masm);
     Register output = ToRegister(lir->output());
 
     Label negativeOrZero, negative, end, bailout;
 
     // Branch to a slow path for non-positive inputs. Doesn't catch NaN.
     masm.zeroDouble(scratch);
     masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
     masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, scratch, &negativeOrZero);
@@ -2013,17 +2035,17 @@ CodeGeneratorX86Shared::visitRound(LRoun
     masm.bind(&end);
 }
 
 void
 CodeGeneratorX86Shared::visitRoundF(LRoundF* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     FloatRegister temp = ToFloatRegister(lir->temp());
-    FloatRegister scratch = ScratchFloat32Reg;
+    ScratchFloat32Scope scratch(masm);
     Register output = ToRegister(lir->output());
 
     Label negativeOrZero, negative, end, bailout;
 
     // Branch to a slow path for non-positive inputs. Doesn't catch NaN.
     masm.zeroFloat32(scratch);
     masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
     masm.branchFloat(Assembler::DoubleLessThanOrEqual, input, scratch, &negativeOrZero);
@@ -2224,21 +2246,22 @@ CodeGeneratorX86Shared::visitFloat32x4To
 
     masm.convertFloat32x4ToInt32x4(in, out);
 
     OutOfLineSimdFloatToIntCheck *ool = new(alloc()) OutOfLineSimdFloatToIntCheck(temp, in, ins);
     addOutOfLineCode(ool, ins->mir());
 
     static const SimdConstant InvalidResult = SimdConstant::SplatX4(int32_t(-2147483648));
 
-    masm.loadConstantInt32x4(InvalidResult, ScratchSimdReg);
-    masm.packedEqualInt32x4(Operand(out), ScratchSimdReg);
+    ScratchSimdScope scratch(masm);
+    masm.loadConstantInt32x4(InvalidResult, scratch);
+    masm.packedEqualInt32x4(Operand(out), scratch);
     // TODO (bug 1156228): If we have SSE4.1, we can use PTEST here instead of
     // the two following instructions.
-    masm.vmovmskps(ScratchSimdReg, temp);
+    masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(0));
     masm.j(Assembler::NotEqual, ool->entry());
 
     masm.bind(ool->rejoin());
 }
 
 void
 CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck *ool)
@@ -2249,25 +2272,26 @@ CodeGeneratorX86Shared::visitOutOfLineSi
     Label bail;
     Label* onConversionError = gen->conversionErrorLabel();
     if (!onConversionError)
         onConversionError = &bail;
 
     FloatRegister input = ool->input();
     Register temp = ool->temp();
 
-    masm.loadConstantFloat32x4(Int32MinX4, ScratchSimdReg);
-    masm.vcmpleps(Operand(input), ScratchSimdReg, ScratchSimdReg);
-    masm.vmovmskps(ScratchSimdReg, temp);
+    ScratchSimdScope scratch(masm);
+    masm.loadConstantFloat32x4(Int32MinX4, scratch);
+    masm.vcmpleps(Operand(input), scratch, scratch);
+    masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(15));
     masm.j(Assembler::NotEqual, onConversionError);
 
-    masm.loadConstantFloat32x4(Int32MaxX4, ScratchSimdReg);
-    masm.vcmpleps(Operand(input), ScratchSimdReg, ScratchSimdReg);
-    masm.vmovmskps(ScratchSimdReg, temp);
+    masm.loadConstantFloat32x4(Int32MaxX4, scratch);
+    masm.vcmpleps(Operand(input), scratch, scratch);
+    masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(0));
     masm.j(Assembler::NotEqual, onConversionError);
 
     masm.jump(ool->rejoin());
 
     if (bail.used()) {
         masm.bind(&bail);
         bailout(ool->ins()->snapshot());
@@ -2375,18 +2399,19 @@ CodeGeneratorX86Shared::visitSimdExtract
     SimdLane lane = ins->lane();
     if (lane == LaneX) {
         // The value we want to extract is in the low double-word
         masm.moveLowInt32(input, output);
     } else if (AssemblerX86Shared::HasSSE41()) {
         masm.vpextrd(lane, input, output);
     } else {
         uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
-        masm.shuffleInt32(mask, input, ScratchSimdReg);
-        masm.moveLowInt32(ScratchSimdReg, output);
+        ScratchSimdScope scratch(masm);
+        masm.shuffleInt32(mask, input, scratch);
+        masm.moveLowInt32(scratch, output);
     }
 }
 
 void
 CodeGeneratorX86Shared::visitSimdExtractElementF(LSimdExtractElementF* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     FloatRegister output = ToFloatRegister(ins->output());
@@ -2536,17 +2561,18 @@ CodeGeneratorX86Shared::visitSimdGeneral
 void
 CodeGeneratorX86Shared::visitSimdGeneralShuffleI(LSimdGeneralShuffleI* ins)
 {
     visitSimdGeneralShuffle<int32_t, Register>(ins, ToRegister(ins->temp()));
 }
 void
 CodeGeneratorX86Shared::visitSimdGeneralShuffleF(LSimdGeneralShuffleF* ins)
 {
-    visitSimdGeneralShuffle<float, FloatRegister>(ins, ScratchFloat32Reg);
+    ScratchFloat32Scope scratch(masm);
+    visitSimdGeneralShuffle<float, FloatRegister>(ins, scratch);
 }
 
 void
 CodeGeneratorX86Shared::visitSimdSwizzleI(LSimdSwizzleI* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     FloatRegister output = ToFloatRegister(ins->output());
 
@@ -2738,73 +2764,77 @@ CodeGeneratorX86Shared::visitSimdShuffle
     }
 
     // Two elements from one vector, two other elements from the other
     MOZ_ASSERT(numLanesFromLHS == 2);
 
     // TODO Here and below, symmetric case would be more handy to avoid a move,
     // but can't be reached because operands would get swapped (bug 1084404).
     if (ins->lanesMatch(2, 3, 6, 7)) {
+        ScratchSimdScope scratch(masm);
         if (AssemblerX86Shared::HasAVX()) {
-            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, ScratchSimdReg);
+            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
             masm.vmovhlps(lhs, rhsCopy, out);
         } else {
-            masm.loadAlignedFloat32x4(rhs, ScratchSimdReg);
-            masm.vmovhlps(lhs, ScratchSimdReg, ScratchSimdReg);
-            masm.moveFloat32x4(ScratchSimdReg, out);
+            masm.loadAlignedFloat32x4(rhs, scratch);
+            masm.vmovhlps(lhs, scratch, scratch);
+            masm.moveFloat32x4(scratch, out);
         }
         return;
     }
 
     if (ins->lanesMatch(0, 1, 4, 5)) {
         FloatRegister rhsCopy;
+        ScratchSimdScope scratch(masm);
         if (rhs.kind() == Operand::FPREG) {
             // No need to make an actual copy, since the operand is already
             // in a register, and it won't be clobbered by the vmovlhps.
             rhsCopy = FloatRegister::FromCode(rhs.fpu());
         } else {
-            masm.loadAlignedFloat32x4(rhs, ScratchSimdReg);
-            rhsCopy = ScratchSimdReg;
+            masm.loadAlignedFloat32x4(rhs, scratch);
+            rhsCopy = scratch;
         }
         masm.vmovlhps(rhsCopy, lhs, out);
         return;
     }
 
     if (ins->lanesMatch(0, 4, 1, 5)) {
         masm.vunpcklps(rhs, lhs, out);
         return;
     }
 
     // TODO swapped case would be better (bug 1084404)
     if (ins->lanesMatch(4, 0, 5, 1)) {
+        ScratchSimdScope scratch(masm);
         if (AssemblerX86Shared::HasAVX()) {
-            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, ScratchSimdReg);
+            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
             masm.vunpcklps(lhs, rhsCopy, out);
         } else {
-            masm.loadAlignedFloat32x4(rhs, ScratchSimdReg);
-            masm.vunpcklps(lhs, ScratchSimdReg, ScratchSimdReg);
-            masm.moveFloat32x4(ScratchSimdReg, out);
+            masm.loadAlignedFloat32x4(rhs, scratch);
+            masm.vunpcklps(lhs, scratch, scratch);
+            masm.moveFloat32x4(scratch, out);
         }
         return;
     }
 
     if (ins->lanesMatch(2, 6, 3, 7)) {
         masm.vunpckhps(rhs, lhs, out);
         return;
     }
 
     // TODO swapped case would be better (bug 1084404)
     if (ins->lanesMatch(6, 2, 7, 3)) {
+        ScratchSimdScope scratch(masm);
         if (AssemblerX86Shared::HasAVX()) {
-            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, ScratchSimdReg);
+            FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
             masm.vunpckhps(lhs, rhsCopy, out);
         } else {
-            masm.loadAlignedFloat32x4(rhs, ScratchSimdReg);
-            masm.vunpckhps(lhs, ScratchSimdReg, ScratchSimdReg);
-            masm.moveFloat32x4(ScratchSimdReg, out);
+            masm.loadAlignedFloat32x4(rhs, scratch);
+            masm.vunpckhps(lhs, scratch, scratch);
+            masm.moveFloat32x4(scratch, out);
         }
         return;
     }
 
     // In one vshufps
     if (x < 4 && y < 4) {
         mask = MacroAssembler::ComputeShuffleMask(x, y, z % 4, w % 4);
         masm.vshufps(mask, rhs, lhs, out);
@@ -2848,59 +2878,61 @@ void
 CodeGeneratorX86Shared::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* ins)
 {
     static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
 
     FloatRegister lhs = ToFloatRegister(ins->lhs());
     Operand rhs = ToOperand(ins->rhs());
     MOZ_ASSERT(ToFloatRegister(ins->output()) == lhs);
 
+    ScratchSimdScope scratch(masm);
+
     MSimdBinaryComp::Operation op = ins->operation();
     switch (op) {
       case MSimdBinaryComp::greaterThan:
         masm.packedGreaterThanInt32x4(rhs, lhs);
         return;
       case MSimdBinaryComp::equal:
         masm.packedEqualInt32x4(rhs, lhs);
         return;
       case MSimdBinaryComp::lessThan:
         // src := rhs
         if (rhs.kind() == Operand::FPREG)
-            masm.moveInt32x4(ToFloatRegister(ins->rhs()), ScratchSimdReg);
+            masm.moveInt32x4(ToFloatRegister(ins->rhs()), scratch);
         else
-            masm.loadAlignedInt32x4(rhs, ScratchSimdReg);
+            masm.loadAlignedInt32x4(rhs, scratch);
 
         // src := src > lhs (i.e. lhs < rhs)
         // Improve by doing custom lowering (rhs is tied to the output register)
-        masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), ScratchSimdReg);
-        masm.moveInt32x4(ScratchSimdReg, lhs);
+        masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), scratch);
+        masm.moveInt32x4(scratch, lhs);
         return;
       case MSimdBinaryComp::notEqual:
         // Ideally for notEqual, greaterThanOrEqual, and lessThanOrEqual, we
         // should invert the comparison by, e.g. swapping the arms of a select
         // if that's what it's used in.
-        masm.loadConstantInt32x4(allOnes, ScratchSimdReg);
+        masm.loadConstantInt32x4(allOnes, scratch);
         masm.packedEqualInt32x4(rhs, lhs);
-        masm.bitwiseXorX4(Operand(ScratchSimdReg), lhs);
+        masm.bitwiseXorX4(Operand(scratch), lhs);
         return;
       case MSimdBinaryComp::greaterThanOrEqual:
         // src := rhs
         if (rhs.kind() == Operand::FPREG)
-            masm.moveInt32x4(ToFloatRegister(ins->rhs()), ScratchSimdReg);
+            masm.moveInt32x4(ToFloatRegister(ins->rhs()), scratch);
         else
-            masm.loadAlignedInt32x4(rhs, ScratchSimdReg);
-        masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), ScratchSimdReg);
+            masm.loadAlignedInt32x4(rhs, scratch);
+        masm.packedGreaterThanInt32x4(ToOperand(ins->lhs()), scratch);
         masm.loadConstantInt32x4(allOnes, lhs);
-        masm.bitwiseXorX4(Operand(ScratchSimdReg), lhs);
+        masm.bitwiseXorX4(Operand(scratch), lhs);
         return;
       case MSimdBinaryComp::lessThanOrEqual:
         // lhs <= rhs is equivalent to !(rhs < lhs), which we compute here.
-        masm.loadConstantInt32x4(allOnes, ScratchSimdReg);
+        masm.loadConstantInt32x4(allOnes, scratch);
         masm.packedGreaterThanInt32x4(rhs, lhs);
-        masm.bitwiseXorX4(Operand(ScratchSimdReg), lhs);
+        masm.bitwiseXorX4(Operand(scratch), lhs);
         return;
     }
     MOZ_CRASH("unexpected SIMD op");
 }
 
 void
 CodeGeneratorX86Shared::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* ins)
 {
@@ -2933,41 +2965,43 @@ CodeGeneratorX86Shared::visitSimdBinaryC
 
 void
 CodeGeneratorX86Shared::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* ins)
 {
     FloatRegister lhs = ToFloatRegister(ins->lhs());
     Operand rhs = ToOperand(ins->rhs());
     FloatRegister output = ToFloatRegister(ins->output());
 
+    ScratchSimdScope scratch(masm);
+
     MSimdBinaryArith::Operation op = ins->operation();
     switch (op) {
       case MSimdBinaryArith::Op_add:
         masm.vpaddd(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_sub:
         masm.vpsubd(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_mul: {
         if (AssemblerX86Shared::HasSSE41()) {
             masm.vpmulld(rhs, lhs, output);
             return;
         }
 
-        masm.loadAlignedInt32x4(rhs, ScratchSimdReg);
-        masm.vpmuludq(lhs, ScratchSimdReg, ScratchSimdReg);
-        // ScratchSimdReg contains (Rx, _, Rz, _) where R is the resulting vector.
+        masm.loadAlignedInt32x4(rhs, scratch);
+        masm.vpmuludq(lhs, scratch, scratch);
+        // scratch contains (Rx, _, Rz, _) where R is the resulting vector.
 
         FloatRegister temp = ToFloatRegister(ins->temp());
         masm.vpshufd(MacroAssembler::ComputeShuffleMask(LaneY, LaneY, LaneW, LaneW), lhs, lhs);
         masm.vpshufd(MacroAssembler::ComputeShuffleMask(LaneY, LaneY, LaneW, LaneW), rhs, temp);
         masm.vpmuludq(temp, lhs, lhs);
         // lhs contains (Ry, _, Rw, _) where R is the resulting vector.
 
-        masm.vshufps(MacroAssembler::ComputeShuffleMask(LaneX, LaneZ, LaneX, LaneZ), ScratchSimdReg, lhs, lhs);
+        masm.vshufps(MacroAssembler::ComputeShuffleMask(LaneX, LaneZ, LaneX, LaneZ), scratch, lhs, lhs);
         // lhs contains (Ry, Rw, Rx, Rz)
         masm.vshufps(MacroAssembler::ComputeShuffleMask(LaneZ, LaneX, LaneW, LaneY), lhs, lhs, lhs);
         return;
       }
       case MSimdBinaryArith::Op_div:
         // x86 doesn't have SIMD i32 div.
         break;
       case MSimdBinaryArith::Op_max:
@@ -2987,56 +3021,58 @@ CodeGeneratorX86Shared::visitSimdBinaryA
 
 void
 CodeGeneratorX86Shared::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* ins)
 {
     FloatRegister lhs = ToFloatRegister(ins->lhs());
     Operand rhs = ToOperand(ins->rhs());
     FloatRegister output = ToFloatRegister(ins->output());
 
+    ScratchSimdScope scratch(masm);
+
     MSimdBinaryArith::Operation op = ins->operation();
     switch (op) {
       case MSimdBinaryArith::Op_add:
         masm.vaddps(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_sub:
         masm.vsubps(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_mul:
         masm.vmulps(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_div:
         masm.vdivps(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_max: {
-        FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, ScratchSimdReg);
-        masm.vcmpunordps(rhs, lhsCopy, ScratchSimdReg);
+        FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, scratch);
+        masm.vcmpunordps(rhs, lhsCopy, scratch);
 
         FloatRegister tmp = ToFloatRegister(ins->temp());
         FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, tmp);
         masm.vmaxps(Operand(lhs), rhsCopy, tmp);
         masm.vmaxps(rhs, lhs, output);
 
         masm.vandps(tmp, output, output);
-        masm.vorps(ScratchSimdReg, output, output); // or in the all-ones NaNs
+        masm.vorps(scratch, output, output); // or in the all-ones NaNs
         return;
       }
       case MSimdBinaryArith::Op_min: {
-        FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, ScratchSimdReg);
-        masm.vminps(Operand(lhs), rhsCopy, ScratchSimdReg);
+        FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
+        masm.vminps(Operand(lhs), rhsCopy, scratch);
         masm.vminps(rhs, lhs, output);
-        masm.vorps(ScratchSimdReg, output, output); // NaN or'd with arbitrary bits is NaN
+        masm.vorps(scratch, output, output); // NaN or'd with arbitrary bits is NaN
         return;
       }
       case MSimdBinaryArith::Op_minNum: {
         FloatRegister tmp = ToFloatRegister(ins->temp());
         masm.loadConstantInt32x4(SimdConstant::SplatX4(int32_t(0x80000000)), tmp);
 
-        FloatRegister mask = ScratchSimdReg;
-        FloatRegister tmpCopy = masm.reusedInputFloat32x4(tmp, ScratchSimdReg);
+        FloatRegister mask = scratch;
+        FloatRegister tmpCopy = masm.reusedInputFloat32x4(tmp, scratch);
         masm.vpcmpeqd(Operand(lhs), tmpCopy, mask);
         masm.vandps(tmp, mask, mask);
 
         FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, tmp);
         masm.vminps(rhs, lhsCopy, tmp);
         masm.vorps(mask, tmp, tmp);
 
         FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, mask);
@@ -3052,31 +3088,31 @@ CodeGeneratorX86Shared::visitSimdBinaryA
                 masm.moveFloat32x4(lhs, output);
             masm.vandps(Operand(mask), output, output);
             masm.vandnps(Operand(tmp), mask, mask);
             masm.vorps(Operand(mask), output, output);
         }
         return;
       }
       case MSimdBinaryArith::Op_maxNum: {
-        FloatRegister mask = ScratchSimdReg;
+        FloatRegister mask = scratch;
         masm.loadConstantInt32x4(SimdConstant::SplatX4(0), mask);
         masm.vpcmpeqd(Operand(lhs), mask, mask);
 
         FloatRegister tmp = ToFloatRegister(ins->temp());
         masm.loadConstantInt32x4(SimdConstant::SplatX4(int32_t(0x80000000)), tmp);
         masm.vandps(tmp, mask, mask);
 
         FloatRegister lhsCopy = masm.reusedInputFloat32x4(lhs, tmp);
         masm.vmaxps(rhs, lhsCopy, tmp);
         masm.vandnps(Operand(tmp), mask, mask);
 
         // Ensure tmp always contains the temporary result
         mask = tmp;
-        tmp = ScratchSimdReg;
+        tmp = scratch;
 
         FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, mask);
         masm.vcmpneqps(rhs, rhsCopy, mask);
 
         if (AssemblerX86Shared::HasAVX()) {
             masm.vblendvps(mask, lhs, tmp, output);
         } else {
             // Emulate vblendvps.
@@ -3228,28 +3264,28 @@ CodeGeneratorX86Shared::visitSimdShift(L
           case MSimdShift::ursh:
             masm.packedUnsignedRightShiftByScalar(count, out);
             return;
         }
         MOZ_CRASH("unexpected SIMD bitwise op");
     }
 
     MOZ_ASSERT(val->isRegister());
-    FloatRegister tmp = ScratchFloat32Reg;
-    masm.vmovd(ToRegister(val), tmp);
+    ScratchFloat32Scope scratch(masm);
+    masm.vmovd(ToRegister(val), scratch);
 
     switch (ins->operation()) {
       case MSimdShift::lsh:
-        masm.packedLeftShiftByScalar(tmp, out);
+        masm.packedLeftShiftByScalar(scratch, out);
         return;
       case MSimdShift::rsh:
-        masm.packedRightShiftByScalar(tmp, out);
+        masm.packedRightShiftByScalar(scratch, out);
         return;
       case MSimdShift::ursh:
-        masm.packedUnsignedRightShiftByScalar(tmp, out);
+        masm.packedUnsignedRightShiftByScalar(scratch, out);
         return;
     }
     MOZ_CRASH("unexpected SIMD bitwise op");
 }
 
 void
 CodeGeneratorX86Shared::visitSimdSelect(LSimdSelect* ins)
 {
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -13,44 +13,45 @@
 
 using namespace js;
 using namespace js::jit;
 
 // Note: this function clobbers the input register.
 void
 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
 {
-    MOZ_ASSERT(input != ScratchDoubleReg);
+    ScratchDoubleScope scratch(*this);
+    MOZ_ASSERT(input != scratch);
     Label positive, done;
 
     // <= 0 or NaN --> 0
-    zeroDouble(ScratchDoubleReg);
-    branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
+    zeroDouble(scratch);
+    branchDouble(DoubleGreaterThan, input, scratch, &positive);
     {
         move32(Imm32(0), output);
         jump(&done);
     }
 
     bind(&positive);
 
     // Add 0.5 and truncate.
-    loadConstantDouble(0.5, ScratchDoubleReg);
-    addDouble(ScratchDoubleReg, input);
+    loadConstantDouble(0.5, scratch);
+    addDouble(scratch, input);
 
     Label outOfRange;
 
     // Truncate to int32 and ensure the result <= 255. This relies on the
     // processor setting output to a value > 255 for doubles outside the int32
     // range (for instance 0x80000000).
     vcvttsd2si(input, output);
     branch32(Assembler::Above, output, Imm32(255), &outOfRange);
     {
         // Check if we had a tie.
-        convertInt32ToDouble(output, ScratchDoubleReg);
-        branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
+        convertInt32ToDouble(output, scratch);
+        branchDouble(DoubleNotEqual, input, scratch, &done);
 
         // It was a tie. Mask out the ones bit to get an even value.
         // See also js_TypedArray_uint8_clamp_double.
         and32(Imm32(~1), output);
         jump(&done);
     }
 
     // > 255 --> 255
@@ -129,21 +130,23 @@ MacroAssemblerX86Shared::branchNegativeZ
     // Determines whether the low double contained in the XMM register reg
     // is equal to -0.0.
 
 #if defined(JS_CODEGEN_X86)
     Label nonZero;
 
     // if not already compared to zero
     if (maybeNonZero) {
+        ScratchDoubleScope scratchDouble(asMasm());
+
         // Compare to zero. Lets through {0, -0}.
-        zeroDouble(ScratchDoubleReg);
+        zeroDouble(scratchDouble);
 
         // If reg is non-zero, jump to nonZero.
-        branchDouble(DoubleNotEqual, reg, ScratchDoubleReg, &nonZero);
+        branchDouble(DoubleNotEqual, reg, scratchDouble, &nonZero);
     }
     // Input register is either zero or negative zero. Retrieve sign of input.
     vmovmskpd(reg, scratch);
 
     // If reg is 1 or 3, input is negative zero.
     // If reg is 0 or 2, input is a normal zero.
     branchTest32(NonZero, scratch, Imm32(1), label);
 
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -676,18 +676,19 @@ class MacroAssemblerX86Shared : public A
         convertInt32ToFloat32(Operand(src), dest);
     }
     void convertInt32ToFloat32(const Operand& src, FloatRegister dest) {
         // Clear the output register first to break dependencies; see above;
         zeroFloat32(dest);
         vcvtsi2ss(src, dest, dest);
     }
     Condition testDoubleTruthy(bool truthy, FloatRegister reg) {
-        zeroDouble(ScratchDoubleReg);
-        vucomisd(reg, ScratchDoubleReg);
+        ScratchDoubleScope scratch(asMasm());
+        zeroDouble(scratch);
+        vucomisd(reg, scratch);
         return truthy ? NonZero : Zero;
     }
     void branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) {
         Condition cond = testDoubleTruthy(truthy, reg);
         j(cond, label);
     }
 
     // Class which ensures that registers used in byte ops are compatible with
@@ -903,28 +904,30 @@ class MacroAssemblerX86Shared : public A
     void zeroDouble(FloatRegister reg) {
         vxorpd(reg, reg, reg);
     }
     void zeroFloat32(FloatRegister reg) {
         vxorps(reg, reg, reg);
     }
     void negateDouble(FloatRegister reg) {
         // From MacroAssemblerX86Shared::maybeInlineDouble
-        vpcmpeqw(ScratchDoubleReg, ScratchDoubleReg, ScratchDoubleReg);
-        vpsllq(Imm32(63), ScratchDoubleReg, ScratchDoubleReg);
+        ScratchDoubleScope scratch(asMasm());
+        vpcmpeqw(scratch, scratch, scratch);
+        vpsllq(Imm32(63), scratch, scratch);
 
         // XOR the float in a float register with -0.0.
-        vxorpd(ScratchDoubleReg, reg, reg); // s ^ 0x80000000000000
+        vxorpd(scratch, reg, reg); // s ^ 0x80000000000000
     }
     void negateFloat(FloatRegister reg) {
-        vpcmpeqw(ScratchFloat32Reg, ScratchFloat32Reg, ScratchFloat32Reg);
-        vpsllq(Imm32(31), ScratchFloat32Reg, ScratchFloat32Reg);
+        ScratchFloat32Scope scratch(asMasm());
+        vpcmpeqw(scratch, scratch, scratch);
+        vpsllq(Imm32(31), scratch, scratch);
 
         // XOR the float in a float register with -0.0.
-        vxorps(ScratchFloat32Reg, reg, reg); // s ^ 0x80000000
+        vxorps(scratch, reg, reg); // s ^ 0x80000000
     }
     void addDouble(FloatRegister src, FloatRegister dest) {
         vaddsd(src, dest, dest);
     }
     void subDouble(FloatRegister src, FloatRegister dest) {
         vsubsd(src, dest, dest);
     }
     void mulDouble(FloatRegister src, FloatRegister dest) {
@@ -991,27 +994,29 @@ class MacroAssemblerX86Shared : public A
     }
     void loadInt32x2(const BaseIndex& src, FloatRegister dest) {
         vmovq(Operand(src), dest);
     }
     void loadInt32x3(const BaseIndex& src, FloatRegister dest) {
         BaseIndex srcZ(src);
         srcZ.offset += 2 * sizeof(int32_t);
 
+        ScratchSimdScope scratch(asMasm());
         vmovq(Operand(src), dest);
-        vmovd(Operand(srcZ), ScratchSimdReg);
-        vmovlhps(ScratchSimdReg, dest, dest);
+        vmovd(Operand(srcZ), scratch);
+        vmovlhps(scratch, dest, dest);
     }
     void loadInt32x3(const Address& src, FloatRegister dest) {
         Address srcZ(src);
         srcZ.offset += 2 * sizeof(int32_t);
 
+        ScratchSimdScope scratch(asMasm());
         vmovq(Operand(src), dest);
-        vmovd(Operand(srcZ), ScratchSimdReg);
-        vmovlhps(ScratchSimdReg, dest, dest);
+        vmovd(Operand(srcZ), scratch);
+        vmovlhps(scratch, dest, dest);
     }
 
     void loadAlignedInt32x4(const Address& src, FloatRegister dest) {
         vmovdqa(Operand(src), dest);
     }
     void loadAlignedInt32x4(const Operand& src, FloatRegister dest) {
         vmovdqa(src, dest);
     }
@@ -1054,25 +1059,27 @@ class MacroAssemblerX86Shared : public A
     }
     void storeInt32x2(FloatRegister src, const BaseIndex& dest) {
         vmovq(src, Operand(dest));
     }
     void storeInt32x3(FloatRegister src, const Address& dest) {
         Address destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         vmovq(src, Operand(dest));
-        vmovhlps(src, ScratchSimdReg, ScratchSimdReg);
-        vmovd(ScratchSimdReg, Operand(destZ));
+        ScratchSimdScope scratch(asMasm());
+        vmovhlps(src, scratch, scratch);
+        vmovd(scratch, Operand(destZ));
     }
     void storeInt32x3(FloatRegister src, const BaseIndex& dest) {
         BaseIndex destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         vmovq(src, Operand(dest));
-        vmovhlps(src, ScratchSimdReg, ScratchSimdReg);
-        vmovd(ScratchSimdReg, Operand(destZ));
+        ScratchSimdScope scratch(asMasm());
+        vmovhlps(src, scratch, scratch);
+        vmovd(scratch, Operand(destZ));
     }
 
     void storeUnalignedInt32x4(FloatRegister src, const Address& dest) {
         vmovdqu(src, Operand(dest));
     }
     void storeUnalignedInt32x4(FloatRegister src, const BaseIndex& dest) {
         vmovdqu(src, Operand(dest));
     }
@@ -1123,47 +1130,51 @@ class MacroAssemblerX86Shared : public A
     void packedUnsignedRightShiftByScalar(Imm32 count, FloatRegister dest) {
         vpsrld(count, dest, dest);
     }
 
     void loadFloat32x3(const Address& src, FloatRegister dest) {
         Address srcZ(src);
         srcZ.offset += 2 * sizeof(float);
         vmovsd(src, dest);
-        vmovss(srcZ, ScratchSimdReg);
-        vmovlhps(ScratchSimdReg, dest, dest);
+        ScratchSimdScope scratch(asMasm());
+        vmovss(srcZ, scratch);
+        vmovlhps(scratch, dest, dest);
     }
     void loadFloat32x3(const BaseIndex& src, FloatRegister dest) {
         BaseIndex srcZ(src);
         srcZ.offset += 2 * sizeof(float);
         vmovsd(src, dest);
-        vmovss(srcZ, ScratchSimdReg);
-        vmovlhps(ScratchSimdReg, dest, dest);
+        ScratchSimdScope scratch(asMasm());
+        vmovss(srcZ, scratch);
+        vmovlhps(scratch, dest, dest);
     }
 
     void loadAlignedFloat32x4(const Address& src, FloatRegister dest) {
         vmovaps(Operand(src), dest);
     }
     void loadAlignedFloat32x4(const Operand& src, FloatRegister dest) {
         vmovaps(src, dest);
     }
 
     void storeFloat32x3(FloatRegister src, const Address& dest) {
         Address destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         storeDouble(src, dest);
-        vmovhlps(src, ScratchSimdReg, ScratchSimdReg);
-        storeFloat32(ScratchSimdReg, destZ);
+        ScratchSimdScope scratch(asMasm());
+        vmovhlps(src, scratch, scratch);
+        storeFloat32(scratch, destZ);
     }
     void storeFloat32x3(FloatRegister src, const BaseIndex& dest) {
         BaseIndex destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         storeDouble(src, dest);
-        vmovhlps(src, ScratchSimdReg, ScratchSimdReg);
-        storeFloat32(ScratchSimdReg, destZ);
+        ScratchSimdScope scratch(asMasm());
+        vmovhlps(src, scratch, scratch);
+        storeFloat32(scratch, destZ);
     }
     void storeAlignedFloat32x4(FloatRegister src, const Address& dest) {
         vmovaps(src, Operand(dest));
     }
     void moveFloat32x4(FloatRegister src, FloatRegister dest) {
         vmovaps(src, dest);
     }
     FloatRegister reusedInputFloat32x4(FloatRegister src, FloatRegister dest) {
@@ -1305,37 +1316,38 @@ class MacroAssemblerX86Shared : public A
     // the given snapshot. This function overwrites the scratch float register.
     void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
                               bool negativeZeroCheck = true)
     {
         // Check for -0.0
         if (negativeZeroCheck)
             branchNegativeZero(src, dest, fail);
 
+        ScratchDoubleScope scratch(asMasm());
         vcvttsd2si(src, dest);
-        convertInt32ToDouble(dest, ScratchDoubleReg);
-        vucomisd(ScratchDoubleReg, src);
+        convertInt32ToDouble(dest, scratch);
+        vucomisd(scratch, src);
         j(Assembler::Parity, fail);
         j(Assembler::NotEqual, fail);
-
     }
 
     // Checks whether a float32 is representable as a 32-bit integer. If so, the
     // integer is written to the output register. Otherwise, a bailout is taken to
     // the given snapshot. This function overwrites the scratch float register.
     void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
                                bool negativeZeroCheck = true)
     {
         // Check for -0.0
         if (negativeZeroCheck)
             branchNegativeZeroFloat32(src, dest, fail);
 
+        ScratchFloat32Scope scratch(asMasm());
         vcvttss2si(src, dest);
-        convertInt32ToFloat32(dest, ScratchFloat32Reg);
-        vucomiss(ScratchFloat32Reg, src);
+        convertInt32ToFloat32(dest, scratch);
+        vucomiss(scratch, src);
         j(Assembler::Parity, fail);
         j(Assembler::NotEqual, fail);
     }
 
     void clampIntToUint8(Register reg) {
         Label inRange;
         branchTest32(Assembler::Zero, reg, Imm32(0xffffff00), &inRange);
         {
--- a/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
@@ -246,42 +246,46 @@ MoveEmitterX86::breakCycle(const MoveOpe
     //   (A -> B)
     //   (B -> A)
     //
     // This case handles (A -> B), which we reach first. We save B, then allow
     // the original move to continue.
     switch (type) {
       case MoveOp::INT32X4:
         if (to.isMemory()) {
-            masm.loadAlignedInt32x4(toAddress(to), ScratchSimdReg);
-            masm.storeAlignedInt32x4(ScratchSimdReg, cycleSlot());
+            ScratchSimdScope scratch(masm);
+            masm.loadAlignedInt32x4(toAddress(to), scratch);
+            masm.storeAlignedInt32x4(scratch, cycleSlot());
         } else {
             masm.storeAlignedInt32x4(to.floatReg(), cycleSlot());
         }
         break;
       case MoveOp::FLOAT32X4:
         if (to.isMemory()) {
-            masm.loadAlignedFloat32x4(toAddress(to), ScratchSimdReg);
-            masm.storeAlignedFloat32x4(ScratchSimdReg, cycleSlot());
+            ScratchSimdScope scratch(masm);
+            masm.loadAlignedFloat32x4(toAddress(to), scratch);
+            masm.storeAlignedFloat32x4(scratch, cycleSlot());
         } else {
             masm.storeAlignedFloat32x4(to.floatReg(), cycleSlot());
         }
         break;
       case MoveOp::FLOAT32:
         if (to.isMemory()) {
-            masm.loadFloat32(toAddress(to), ScratchFloat32Reg);
-            masm.storeFloat32(ScratchFloat32Reg, cycleSlot());
+            ScratchFloat32Scope scratch(masm);
+            masm.loadFloat32(toAddress(to), scratch);
+            masm.storeFloat32(scratch, cycleSlot());
         } else {
             masm.storeFloat32(to.floatReg(), cycleSlot());
         }
         break;
       case MoveOp::DOUBLE:
         if (to.isMemory()) {
-            masm.loadDouble(toAddress(to), ScratchDoubleReg);
-            masm.storeDouble(ScratchDoubleReg, cycleSlot());
+            ScratchDoubleScope scratch(masm);
+            masm.loadDouble(toAddress(to), scratch);
+            masm.storeDouble(scratch, cycleSlot());
         } else {
             masm.storeDouble(to.floatReg(), cycleSlot());
         }
         break;
       case MoveOp::INT32:
 #ifdef JS_CODEGEN_X64
         // x64 can't pop to a 32-bit destination, so don't push.
         if (to.isMemory()) {
@@ -309,48 +313,52 @@ MoveEmitterX86::completeCycle(const Move
     //
     // This case handles (B -> A), which we reach last. We emit a move from the
     // saved value of B, to A.
     switch (type) {
       case MoveOp::INT32X4:
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
         if (to.isMemory()) {
-            masm.loadAlignedInt32x4(cycleSlot(), ScratchSimdReg);
-            masm.storeAlignedInt32x4(ScratchSimdReg, toAddress(to));
+            ScratchSimdScope scratch(masm);
+            masm.loadAlignedInt32x4(cycleSlot(), scratch);
+            masm.storeAlignedInt32x4(scratch, toAddress(to));
         } else {
             masm.loadAlignedInt32x4(cycleSlot(), to.floatReg());
         }
         break;
       case MoveOp::FLOAT32X4:
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
         if (to.isMemory()) {
-            masm.loadAlignedFloat32x4(cycleSlot(), ScratchSimdReg);
-            masm.storeAlignedFloat32x4(ScratchSimdReg, toAddress(to));
+            ScratchSimdScope scratch(masm);
+            masm.loadAlignedFloat32x4(cycleSlot(), scratch);
+            masm.storeAlignedFloat32x4(scratch, toAddress(to));
         } else {
             masm.loadAlignedFloat32x4(cycleSlot(), to.floatReg());
         }
         break;
       case MoveOp::FLOAT32:
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float));
         if (to.isMemory()) {
-            masm.loadFloat32(cycleSlot(), ScratchFloat32Reg);
-            masm.storeFloat32(ScratchFloat32Reg, toAddress(to));
+            ScratchFloat32Scope scratch(masm);
+            masm.loadFloat32(cycleSlot(), scratch);
+            masm.storeFloat32(scratch, toAddress(to));
         } else {
             masm.loadFloat32(cycleSlot(), to.floatReg());
         }
         break;
       case MoveOp::DOUBLE:
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(double));
         if (to.isMemory()) {
-            masm.loadDouble(cycleSlot(), ScratchDoubleReg);
-            masm.storeDouble(ScratchDoubleReg, toAddress(to));
+            ScratchDoubleScope scratch(masm);
+            masm.loadDouble(cycleSlot(), scratch);
+            masm.storeDouble(scratch, toAddress(to));
         } else {
             masm.loadDouble(cycleSlot(), to.floatReg());
         }
         break;
       case MoveOp::INT32:
 #ifdef JS_CODEGEN_X64
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t));
@@ -448,18 +456,19 @@ MoveEmitterX86::emitFloat32Move(const Mo
             masm.moveFloat32(from.floatReg(), to.floatReg());
         else
             masm.storeFloat32(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
         masm.loadFloat32(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
-        masm.loadFloat32(toAddress(from), ScratchFloat32Reg);
-        masm.storeFloat32(ScratchFloat32Reg, toAddress(to));
+        ScratchFloat32Scope scratch(masm);
+        masm.loadFloat32(toAddress(from), scratch);
+        masm.storeFloat32(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
 {
     MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isDouble());
     MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isDouble());
@@ -469,18 +478,19 @@ MoveEmitterX86::emitDoubleMove(const Mov
             masm.moveDouble(from.floatReg(), to.floatReg());
         else
             masm.storeDouble(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
         masm.loadDouble(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
-        masm.loadDouble(toAddress(from), ScratchDoubleReg);
-        masm.storeDouble(ScratchDoubleReg, toAddress(to));
+        ScratchDoubleScope scratch(masm);
+        masm.loadDouble(toAddress(from), scratch);
+        masm.storeDouble(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::emitInt32X4Move(const MoveOperand& from, const MoveOperand& to)
 {
     MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isInt32x4());
     MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isInt32x4());
@@ -490,18 +500,19 @@ MoveEmitterX86::emitInt32X4Move(const Mo
             masm.moveInt32x4(from.floatReg(), to.floatReg());
         else
             masm.storeAlignedInt32x4(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
         masm.loadAlignedInt32x4(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
-        masm.loadAlignedInt32x4(toAddress(from), ScratchSimdReg);
-        masm.storeAlignedInt32x4(ScratchSimdReg, toAddress(to));
+        ScratchSimdScope scratch(masm);
+        masm.loadAlignedInt32x4(toAddress(from), scratch);
+        masm.storeAlignedInt32x4(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::emitFloat32X4Move(const MoveOperand& from, const MoveOperand& to)
 {
     MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isFloat32x4());
     MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isFloat32x4());
@@ -511,18 +522,19 @@ MoveEmitterX86::emitFloat32X4Move(const 
             masm.moveFloat32x4(from.floatReg(), to.floatReg());
         else
             masm.storeAlignedFloat32x4(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
         masm.loadAlignedFloat32x4(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
-        masm.loadAlignedFloat32x4(toAddress(from), ScratchSimdReg);
-        masm.storeAlignedFloat32x4(ScratchSimdReg, toAddress(to));
+        ScratchSimdScope scratch(masm);
+        masm.loadAlignedFloat32x4(toAddress(from), scratch);
+        masm.storeAlignedFloat32x4(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::assertDone()
 {
     MOZ_ASSERT(!inCycle_);
 }