Bug 1226027 - Use Simd128 register content type. r=bbouvier
authorJakob Stoklund Olesen <jolesen@mozilla.com>
Mon, 30 Nov 2015 16:16:28 -0800
changeset 308941 a7a1efdcec6ed4b572bcd4b5eadc0700cde879e4
parent 308940 64bb552ce03ac84f4a7ac3bf9012f71c673ad976
child 308942 ddaae692ddd9a7427fe18e93334d80a1f49bac96
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1226027
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1226027 - Use Simd128 register content type. r=bbouvier In preparation for the addition of a new set of SIMD types, collapse all of the 128-bit SIMD types into a single content type for a FloatRegister. This saves bits in TypedRegisterSet and prevents us from overflowing the uint64_t bit mask currently used. For consistency, provide global variables ReturnSimd128Reg and ScratchSimd128Reg, and rename ScratchSimdScope to ScratchSimd128Scope.
js/src/asmjs/WasmStubs.cpp
js/src/jit/JitFrames.cpp
js/src/jit/LIR.h
js/src/jit/Lowering.cpp
js/src/jit/Recover.cpp
js/src/jit/RegisterSets.h
js/src/jit/arm/Architecture-arm.h
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm64/Architecture-arm64.h
js/src/jit/arm64/Assembler-arm64.h
js/src/jit/mips-shared/Architecture-mips-shared.h
js/src/jit/mips-shared/Assembler-mips-shared.h
js/src/jit/mips32/Architecture-mips32.h
js/src/jit/mips64/Architecture-mips64.h
js/src/jit/none/Architecture-none.h
js/src/jit/none/MacroAssembler-none.h
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/Lowering-shared-inl.h
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/Assembler-x64.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x86-shared/Architecture-x86-shared.cpp
js/src/jit/x86-shared/Architecture-x86-shared.h
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
js/src/jit/x86-shared/MacroAssembler-x86-shared.h
js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
--- a/js/src/asmjs/WasmStubs.cpp
+++ b/js/src/asmjs/WasmStubs.cpp
@@ -215,23 +215,23 @@ GenerateEntry(MacroAssembler& masm, AsmJ
                 masm.loadDouble(src, ScratchDoubleReg);
                 masm.storeDouble(ScratchDoubleReg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                 break;
               case MIRType_Float32:
                 masm.loadFloat32(src, ScratchFloat32Reg);
                 masm.storeFloat32(ScratchFloat32Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                 break;
               case MIRType_Int32x4:
-                masm.loadUnalignedInt32x4(src, ScratchSimdReg);
-                masm.storeAlignedInt32x4(ScratchSimdReg,
+                masm.loadUnalignedInt32x4(src, ScratchSimd128Reg);
+                masm.storeAlignedInt32x4(ScratchSimd128Reg,
                                          Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                 break;
               case MIRType_Float32x4:
-                masm.loadUnalignedFloat32x4(src, ScratchSimdReg);
-                masm.storeAlignedFloat32x4(ScratchSimdReg,
+                masm.loadUnalignedFloat32x4(src, ScratchSimd128Reg);
+                masm.storeAlignedFloat32x4(ScratchSimd128Reg,
                                            Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                 break;
               default:
                 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
             }
             break;
         }
     }
@@ -263,21 +263,21 @@ GenerateEntry(MacroAssembler& masm, AsmJ
         masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
         // Fall through as ReturnDoubleReg now contains a Double
       case ExprType::F64:
         masm.canonicalizeDouble(ReturnDoubleReg);
         masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
         break;
       case ExprType::I32x4:
         // We don't have control on argv alignment, do an unaligned access.
-        masm.storeUnalignedInt32x4(ReturnInt32x4Reg, Address(argv, 0));
+        masm.storeUnalignedInt32x4(ReturnSimd128Reg, Address(argv, 0));
         break;
       case ExprType::F32x4:
         // We don't have control on argv alignment, do an unaligned access.
-        masm.storeUnalignedFloat32x4(ReturnFloat32x4Reg, Address(argv, 0));
+        masm.storeUnalignedFloat32x4(ReturnSimd128Reg, Address(argv, 0));
         break;
     }
 
     // Restore clobbered non-volatile registers of the caller.
     masm.PopRegsInMask(NonVolatileRegs);
     MOZ_ASSERT(masm.framePushed() == 0);
 
     masm.move32(Imm32(true), ReturnReg);
--- a/js/src/jit/JitFrames.cpp
+++ b/js/src/jit/JitFrames.cpp
@@ -2599,18 +2599,17 @@ MachineState::FromBailout(RegisterDump::
     for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
     }
 #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
-        machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Int32x4), &fpregs[i]);
-        machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Float32x4), &fpregs[i]);
+        machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Simd128), &fpregs[i]);
     }
 #elif defined(JS_CODEGEN_ARM64)
     for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single), &fpregs[i]);
         machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Double), &fpregs[i]);
     }
 
 #elif defined(JS_CODEGEN_NONE)
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -479,20 +479,18 @@ class LDefinition
         return type() == INT32X4 || type() == FLOAT32X4;
     }
     bool isCompatibleReg(const AnyRegister& r) const {
         if (isFloatReg() && r.isFloat()) {
             if (type() == FLOAT32)
                 return r.fpu().isSingle();
             if (type() == DOUBLE)
                 return r.fpu().isDouble();
-            if (type() == INT32X4)
-                return r.fpu().isInt32x4();
-            if (type() == FLOAT32X4)
-                return r.fpu().isFloat32x4();
+            if (isSimdType())
+                return r.fpu().isSimd128();
             MOZ_CRASH("Unexpected MDefinition type");
         }
         return !isFloatReg() && !r.isFloat();
     }
     bool isCompatibleDef(const LDefinition& other) const {
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
         if (isFloatReg() && other.isFloatReg())
             return type() == other.type();
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3841,20 +3841,18 @@ void
 LIRGenerator::visitAsmJSReturn(MAsmJSReturn* ins)
 {
     MDefinition* rval = ins->getOperand(0);
     LAsmJSReturn* lir = new(alloc()) LAsmJSReturn;
     if (rval->type() == MIRType_Float32)
         lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
     else if (rval->type() == MIRType_Double)
         lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
-    else if (rval->type() == MIRType_Int32x4)
-        lir->setOperand(0, useFixed(rval, ReturnInt32x4Reg));
-    else if (rval->type() == MIRType_Float32x4)
-        lir->setOperand(0, useFixed(rval, ReturnFloat32x4Reg));
+    else if (IsSimdType(rval->type()))
+        lir->setOperand(0, useFixed(rval, ReturnSimd128Reg));
     else if (rval->type() == MIRType_Int32)
         lir->setOperand(0, useFixed(rval, ReturnReg));
     else
         MOZ_CRASH("Unexpected asm.js return type");
     add(lir);
 }
 
 void
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -1336,22 +1336,22 @@ RSimdBox::recover(JSContext* cx, Snapsho
 {
     JSObject* resultObject = nullptr;
     RValueAllocation a = iter.readAllocation();
     MOZ_ASSERT(iter.allocationReadable(a));
     const FloatRegisters::RegisterContent* raw = iter.floatAllocationPointer(a);
     switch (SimdTypeDescr::Type(type_)) {
       case SimdTypeDescr::Int32x4:
         MOZ_ASSERT_IF(a.mode() == RValueAllocation::ANY_FLOAT_REG,
-                      a.fpuReg().isInt32x4());
+                      a.fpuReg().isSimd128());
         resultObject = js::CreateSimd<Int32x4>(cx, (const Int32x4::Elem*) raw);
         break;
       case SimdTypeDescr::Float32x4:
         MOZ_ASSERT_IF(a.mode() == RValueAllocation::ANY_FLOAT_REG,
-                      a.fpuReg().isFloat32x4());
+                      a.fpuReg().isSimd128());
         resultObject = js::CreateSimd<Float32x4>(cx, (const Float32x4::Elem*) raw);
         break;
       case SimdTypeDescr::Float64x2:
         MOZ_CRASH("NYI, RSimdBox of Float64x2");
         break;
       case SimdTypeDescr::Int8x16:
         MOZ_CRASH("NYI, RSimdBox of Int8x16");
         break;
--- a/js/src/jit/RegisterSets.h
+++ b/js/src/jit/RegisterSets.h
@@ -637,22 +637,22 @@ class AllocatableSetAccessors<RegisterSe
         set_.fpus().takeAllocatable(reg);
     }
 };
 
 
 // The LiveSet accessors are used to collect a list of allocated
 // registers. Taking or adding a register should *not* consider the aliases, as
 // we care about interpreting the registers with the correct type.  For example,
-// on x64, where one float registers can be interpreted as an Int32x4, a Double,
-// or a Float, adding xmm0 as an Int32x4, does not make the register available
+// on x64, where one float registers can be interpreted as an Simd128, a Double,
+// or a Float, adding xmm0 as an Simd128, does not make the register available
 // as a Double.
 //
 //     LiveFloatRegisterSet regs;
-//     regs.add(xmm0.asInt32x4());
+//     regs.add(xmm0.asSimd128());
 //     regs.take(xmm0); // Assert!
 //
 // These accessors are useful for recording the result of a register allocator,
 // such as what the Backtracking allocator do on the Safepoints.
 template <typename Set>
 class LiveSetAccessors
 {
   public:
--- a/js/src/jit/arm/Architecture-arm.h
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -386,36 +386,34 @@ class VFPRegister
     bool operator==(const VFPRegister& other) const {
         MOZ_ASSERT(!isInvalid());
         MOZ_ASSERT(!other.isInvalid());
         return kind == other.kind && code_ == other.code_;
     }
 
     bool isSingle() const { return kind == Single; }
     bool isDouble() const { return kind == Double; }
-    bool isInt32x4() const { return false; }
-    bool isFloat32x4() const { return false; }
+    bool isSimd128() const { return false; }
     bool isFloat() const { return (kind == Double) || (kind == Single); }
     bool isInt() const { return (kind == UInt) || (kind == Int); }
     bool isSInt() const { return kind == Int; }
     bool isUInt() const { return kind == UInt; }
     bool equiv(const VFPRegister& other) const { return other.kind == kind; }
     size_t size() const { return (kind == Double) ? 8 : 4; }
     bool isInvalid() const;
     bool isMissing() const;
 
     VFPRegister doubleOverlay(unsigned int which = 0) const;
     VFPRegister singleOverlay(unsigned int which = 0) const;
     VFPRegister sintOverlay(unsigned int which = 0) const;
     VFPRegister uintOverlay(unsigned int which = 0) const;
 
     VFPRegister asSingle() const { return singleOverlay(); }
     VFPRegister asDouble() const { return doubleOverlay(); }
-    VFPRegister asInt32x4() const { MOZ_CRASH("NYI"); }
-    VFPRegister asFloat32x4() const { MOZ_CRASH("NYI"); }
+    VFPRegister asSimd128() const { MOZ_CRASH("NYI"); }
 
     struct VFPRegIndexSplit;
     VFPRegIndexSplit encode();
 
     // For serializing values.
     struct VFPRegIndexSplit {
         const uint32_t block : 4;
         const uint32_t bit : 1;
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -115,21 +115,20 @@ static MOZ_CONSTEXPR_VAR FloatRegister I
 
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = r3;
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = r2;
 static MOZ_CONSTEXPR_VAR Register StackPointer = sp;
 static MOZ_CONSTEXPR_VAR Register FramePointer = InvalidReg;
 static MOZ_CONSTEXPR_VAR Register ReturnReg = r0;
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = { FloatRegisters::d0, VFPRegister::Single };
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = { FloatRegisters::d0, VFPRegister::Double};
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnInt32x4Reg = InvalidFloatReg;
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32x4Reg = InvalidFloatReg;
+static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = InvalidFloatReg;
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = { FloatRegisters::d30, VFPRegister::Single };
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = { FloatRegisters::d15, VFPRegister::Double };
-static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimdReg = InvalidFloatReg;
+static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = InvalidFloatReg;
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchUIntReg = { FloatRegisters::d15, VFPRegister::UInt };
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchIntReg = { FloatRegisters::d15, VFPRegister::Int };
 
 struct ScratchFloat32Scope : public AutoFloatRegisterScope
 {
     explicit ScratchFloat32Scope(MacroAssembler& masm)
       : AutoFloatRegisterScope(masm, ScratchFloat32Reg)
     { }
--- a/js/src/jit/arm64/Architecture-arm64.h
+++ b/js/src/jit/arm64/Architecture-arm64.h
@@ -406,20 +406,17 @@ struct FloatRegister
     }
 
     bool isSingle() const {
         return k_ == FloatRegisters::Single;
     }
     bool isDouble() const {
         return k_ == FloatRegisters::Double;
     }
-    bool isInt32x4() const {
-        return false;
-    }
-    bool isFloat32x4() const {
+    bool isSimd128() const {
         return false;
     }
 
     static uint32_t FirstBit(SetType x) {
         JS_STATIC_ASSERT(sizeof(SetType) == 8);
         return mozilla::CountTrailingZeroes64(x);
     }
     static uint32_t LastBit(SetType x) {
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -38,19 +38,16 @@ static constexpr FloatRegister ScratchDo
 static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::d0, FloatRegisters::Double };
 
 static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::s0, FloatRegisters::Single };
 static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::s31, FloatRegisters::Single };
 
 static constexpr Register InvalidReg = { Registers::invalid_reg };
 static constexpr FloatRegister InvalidFloatReg = { FloatRegisters::invalid_fpreg, FloatRegisters::Single };
 
-static constexpr FloatRegister ReturnInt32x4Reg = InvalidFloatReg;
-static constexpr FloatRegister ReturnFloat32x4Reg = InvalidFloatReg;
-
 static constexpr Register OsrFrameReg = { Registers::x3 };
 static constexpr Register ArgumentsRectifierReg = { Registers::x8 };
 static constexpr Register CallTempReg0 = { Registers::x9 };
 static constexpr Register CallTempReg1 = { Registers::x10 };
 static constexpr Register CallTempReg2 = { Registers::x11 };
 static constexpr Register CallTempReg3 = { Registers::x12 };
 static constexpr Register CallTempReg4 = { Registers::x13 };
 static constexpr Register CallTempReg5 = { Registers::x14 };
@@ -59,18 +56,18 @@ static constexpr Register PreBarrierReg 
 
 static constexpr Register ReturnReg = { Registers::x0 };
 static constexpr Register JSReturnReg = { Registers::x2 };
 static constexpr Register FramePointer = { Registers::fp };
 static constexpr Register ZeroRegister = { Registers::sp };
 static constexpr ARMRegister ZeroRegister64 = { Registers::sp, 64 };
 static constexpr ARMRegister ZeroRegister32 = { Registers::sp, 32 };
 
-static constexpr FloatRegister ReturnSimdReg = InvalidFloatReg;
-static constexpr FloatRegister ScratchSimdReg = InvalidFloatReg;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
 
 // StackPointer is intentionally undefined on ARM64 to prevent misuse:
 //  using sp as a base register is only valid if sp % 16 == 0.
 static constexpr Register RealStackPointer = { Registers::sp };
 
 static constexpr Register PseudoStackPointer = { Registers::x28 };
 static constexpr ARMRegister PseudoStackPointer64 = { Registers::x28, 64 };
 static constexpr ARMRegister PseudoStackPointer32 = { Registers::x28, 32 };
--- a/js/src/jit/mips-shared/Architecture-mips-shared.h
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.h
@@ -284,18 +284,17 @@ class FloatRegistersMIPSShared
 };
 
 template <typename T>
 class TypedRegisterSet;
 
 class FloatRegisterMIPSShared
 {
   public:
-    bool isInt32x4() const { return false; }
-    bool isFloat32x4() const { return false; }
+    bool isSimd128() const { return false; }
 
     typedef FloatRegistersMIPSShared::SetType SetType;
 
     static uint32_t SetSize(SetType x) {
         static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
         return mozilla::CountPopulation32(x);
     }
     static uint32_t FirstBit(SetType x) {
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -99,19 +99,18 @@ static MOZ_CONSTEXPR_VAR Register HeapRe
 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = a1;
 
 static MOZ_CONSTEXPR_VAR Register InvalidReg = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg;
 
 static MOZ_CONSTEXPR_VAR Register StackPointer = sp;
 static MOZ_CONSTEXPR_VAR Register FramePointer = InvalidReg;
 static MOZ_CONSTEXPR_VAR Register ReturnReg = v0;
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnInt32x4Reg = InvalidFloatReg;
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32x4Reg = InvalidFloatReg;
-static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimdReg = InvalidFloatReg;
+static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = InvalidFloatReg;
 
 // A bias applied to the GlobalReg to allow the use of instructions with small
 // negative immediate offsets which doubles the range of global data that can be
 // accessed with a single instruction.
 static const int32_t AsmJSGlobalRegBias = 32768;
 
 // Registers used in the GenerateFFIIonExit Enable Activation block.
 static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = t0;
--- a/js/src/jit/mips32/Architecture-mips32.h
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -161,18 +161,17 @@ class FloatRegister : public FloatRegist
 
     FloatRegister doubleOverlay(unsigned int which = 0) const;
     FloatRegister singleOverlay(unsigned int which = 0) const;
     FloatRegister sintOverlay(unsigned int which = 0) const;
     FloatRegister uintOverlay(unsigned int which = 0) const;
 
     FloatRegister asSingle() const { return singleOverlay(); }
     FloatRegister asDouble() const { return doubleOverlay(); }
-    FloatRegister asInt32x4() const { MOZ_CRASH("NYI"); }
-    FloatRegister asFloat32x4() const { MOZ_CRASH("NYI"); }
+    FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
 
     Code code() const {
         MOZ_ASSERT(!isInvalid());
         return Code(code_  | (kind_ << 5));
     }
     Encoding encoding() const {
         MOZ_ASSERT(!isInvalid());
         return Encoding(code_);
--- a/js/src/jit/mips64/Architecture-mips64.h
+++ b/js/src/jit/mips64/Architecture-mips64.h
@@ -126,18 +126,17 @@ class FloatRegister : public FloatRegist
     bool isSingle() const { return kind_ == Codes::Single; }
     bool isDouble() const { return kind_ == Codes::Double; }
 
     FloatRegister singleOverlay() const;
     FloatRegister doubleOverlay() const;
 
     FloatRegister asSingle() const { return singleOverlay(); }
     FloatRegister asDouble() const { return doubleOverlay(); }
-    FloatRegister asInt32x4() const { MOZ_CRASH("NYI"); }
-    FloatRegister asFloat32x4() const { MOZ_CRASH("NYI"); }
+    FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
 
     Code code() const {
         MOZ_ASSERT(!isInvalid());
         return Code(reg_ | (kind_ << 5));
     }
     Encoding encoding() const {
         MOZ_ASSERT(!isInvalid());
         MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -99,22 +99,20 @@ struct FloatRegister
 
     Code _;
 
     static uint32_t FirstBit(SetType) { MOZ_CRASH(); }
     static uint32_t LastBit(SetType) { MOZ_CRASH(); }
     static FloatRegister FromCode(uint32_t) { MOZ_CRASH(); }
     bool isSingle() const { MOZ_CRASH(); }
     bool isDouble() const { MOZ_CRASH(); }
-    bool isInt32x4() const { MOZ_CRASH(); }
-    bool isFloat32x4() const { MOZ_CRASH(); }
+    bool isSimd128() const { MOZ_CRASH(); }
     FloatRegister asSingle() const { MOZ_CRASH(); }
     FloatRegister asDouble() const { MOZ_CRASH(); }
-    FloatRegister asInt32x4() const { MOZ_CRASH(); }
-    FloatRegister asFloat32x4() const { MOZ_CRASH(); }
+    FloatRegister asSimd128() const { MOZ_CRASH(); }
     Code code() const { MOZ_CRASH(); }
     Encoding encoding() const { MOZ_CRASH(); }
     const char* name() const { MOZ_CRASH(); }
     bool volatile_() const { MOZ_CRASH(); }
     bool operator != (FloatRegister) const { MOZ_CRASH(); }
     bool operator == (FloatRegister) const { MOZ_CRASH(); }
     bool aliases(FloatRegister) const { MOZ_CRASH(); }
     uint32_t numAliased() const { MOZ_CRASH(); }
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -15,21 +15,20 @@
 namespace js {
 namespace jit {
 
 static MOZ_CONSTEXPR_VAR Register StackPointer = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register FramePointer = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register ReturnReg = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = { FloatRegisters::invalid_reg };
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = { FloatRegisters::invalid_reg };
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnInt32x4Reg = { FloatRegisters::invalid_reg };
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32x4Reg = { FloatRegisters::invalid_reg };
+static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = { FloatRegisters::invalid_reg };
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = { FloatRegisters::invalid_reg };
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = { FloatRegisters::invalid_reg };
-static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimdReg = { FloatRegisters::invalid_reg };
+static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = { FloatRegisters::invalid_reg };
 static MOZ_CONSTEXPR_VAR FloatRegister InvalidFloatReg = { FloatRegisters::invalid_reg };
 
 static MOZ_CONSTEXPR_VAR Register OsrFrameReg = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register PreBarrierReg = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = { Registers::invalid_reg };
 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = { Registers::invalid_reg };
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1152,19 +1152,17 @@ class StoreOp
         masm.storePtr(reg, dump);
     }
     void operator()(FloatRegister reg, Address dump) {
         if (reg.isDouble())
             masm.storeDouble(reg, dump);
         else if (reg.isSingle())
             masm.storeFloat32(reg, dump);
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-        else if (reg.isInt32x4())
-            masm.storeUnalignedInt32x4(reg, dump);
-        else if (reg.isFloat32x4())
+        else if (reg.isSimd128())
             masm.storeUnalignedFloat32x4(reg, dump);
 #endif
         else
             MOZ_CRASH("Unexpected register type.");
     }
 };
 
 static void
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -155,20 +155,20 @@ LIRGeneratorShared::defineReturn(LInstru
         break;
       case MIRType_Float32:
         lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32, LFloatReg(ReturnFloat32Reg)));
         break;
       case MIRType_Double:
         lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
         break;
       case MIRType_Int32x4:
-        lir->setDef(0, LDefinition(vreg, LDefinition::INT32X4, LFloatReg(ReturnInt32x4Reg)));
+        lir->setDef(0, LDefinition(vreg, LDefinition::INT32X4, LFloatReg(ReturnSimd128Reg)));
         break;
       case MIRType_Float32x4:
-        lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32X4, LFloatReg(ReturnFloat32x4Reg)));
+        lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32X4, LFloatReg(ReturnSimd128Reg)));
         break;
       default:
         LDefinition::Type type = LDefinition::TypeFrom(mir->type());
         MOZ_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32);
         lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ReturnReg)));
         break;
     }
 
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -50,26 +50,21 @@ ABIArgGenerator::next(MIRType type)
         break;
       case MIRType_Float32:
         current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
         break;
       case MIRType_Double:
         current_ = ABIArg(FloatArgRegs[regIndex_++]);
         break;
       case MIRType_Int32x4:
-        // On Win64, >64 bit args need to be passed by reference, but asm.js
-        // doesn't allow passing SIMD values to FFIs. The only way to reach
-        // here is asm to asm calls, so we can break the ABI here.
-        current_ = ABIArg(FloatArgRegs[regIndex_++].asInt32x4());
-        break;
       case MIRType_Float32x4:
         // On Win64, >64 bit args need to be passed by reference, but asm.js
         // doesn't allow passing SIMD values to FFIs. The only way to reach
         // here is asm to asm calls, so we can break the ABI here.
-        current_ = ABIArg(FloatArgRegs[regIndex_++].asFloat32x4());
+        current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
         break;
       default:
         MOZ_CRASH("Unexpected argument type");
     }
     return current_;
 #else
     switch (type) {
       case MIRType_Int32:
@@ -96,20 +91,17 @@ ABIArgGenerator::next(MIRType type)
       case MIRType_Int32x4:
       case MIRType_Float32x4:
         if (floatRegIndex_ == NumFloatArgRegs) {
             stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
             current_ = ABIArg(stackOffset_);
             stackOffset_ += Simd128DataSize;
             break;
         }
-        if (type == MIRType_Int32x4)
-            current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asInt32x4());
-        else
-            current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asFloat32x4());
+        current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
         break;
       default:
         MOZ_CRASH("Unexpected argument type");
     }
     return current_;
 #endif
 }
 
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -80,21 +80,20 @@ struct ScratchRegisterScope : public Aut
       : AutoRegisterScope(masm, ScratchReg)
     { }
 };
 
 static MOZ_CONSTEXPR_VAR Register ReturnReg = rax;
 static MOZ_CONSTEXPR_VAR Register HeapReg = r15;
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnInt32x4Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Int32x4);
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32x4Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Float32x4);
+static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Single);
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm15, FloatRegisters::Double);
-static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimdReg = xmm15;
+static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = xmm15;
 
 // Avoid rbp, which is the FramePointer, which is unavailable in some modes.
 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = r8;
 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = rax;
 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = rdi;
 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = rbx;
 static MOZ_CONSTEXPR_VAR Register CallTempReg3 = rcx;
 static MOZ_CONSTEXPR_VAR Register CallTempReg4 = rsi;
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -331,24 +331,25 @@ CodeGeneratorX64::emitSimdLoad(LAsmJSLoa
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 2, srcAddr,
                                     *ins->output()->output());
         masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
 
         // Load Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = after;
-        loadSimd(type, 1, srcAddrZ, ScratchSimdReg);
+        loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
         after = masm.size();
-        verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 1, srcAddrZ, LFloatReg(ScratchSimdReg));
+        verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 1, srcAddrZ,
+                                    LFloatReg(ScratchSimd128Reg));
         masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw,
                                     AsmJSHeapAccess::NoLengthCheck, 8));
 
         // Move ZW atop XY
-        masm.vmovlhps(ScratchSimdReg, out, out);
+        masm.vmovlhps(ScratchSimd128Reg, out, out);
     } else {
         uint32_t before = masm.size();
         loadSimd(type, numElems, srcAddr, out);
         uint32_t after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr, *ins->output()->output());
         masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
     }
 
@@ -478,21 +479,22 @@ CodeGeneratorX64::emitSimdStore(LAsmJSSt
             ? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
             : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
 
         // It's possible that the Z could be out of bounds when the XY is in
         // bounds. To avoid storing the XY before the exception is thrown, we
         // store the Z first, and record its offset in the AsmJSHeapAccess so
         // that the signal handler knows to check the bounds of the full
         // access, rather than just the Z.
-        masm.vmovhlps(in, ScratchSimdReg, ScratchSimdReg);
+        masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
         uint32_t before = masm.size();
-        storeSimd(type, 1, ScratchSimdReg, dstAddrZ);
+        storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
         uint32_t after = masm.size();
-        verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 1, dstAddrZ, LFloatReg(ScratchSimdReg));
+        verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 1, dstAddrZ,
+                                    LFloatReg(ScratchSimd128Reg));
         masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset, 8));
 
         // Store XY
         before = after;
         storeSimd(type, 2, in, dstAddr);
         after = masm.size();
         verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 2, dstAddr, *ins->value());
         masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw));
--- a/js/src/jit/x86-shared/Architecture-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.cpp
@@ -58,29 +58,26 @@ js::jit::FloatRegister::ReduceSetForPush
 
     return FloatRegisterSet(bits);
 }
 
 uint32_t
 js::jit::FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
 {
     SetType all = s.bits();
-    SetType float32x4Set =
-        (all >> (uint32_t(Codes::Float32x4) * Codes::TotalPhys)) & Codes::AllPhysMask;
-    SetType int32x4Set =
-        (all >> (uint32_t(Codes::Int32x4) * Codes::TotalPhys)) & Codes::AllPhysMask;
+    SetType set128b =
+        (all >> (uint32_t(Codes::Simd128) * Codes::TotalPhys)) & Codes::AllPhysMask;
     SetType doubleSet =
         (all >> (uint32_t(Codes::Double) * Codes::TotalPhys)) & Codes::AllPhysMask;
     SetType singleSet =
         (all >> (uint32_t(Codes::Single) * Codes::TotalPhys)) & Codes::AllPhysMask;
 
     // PushRegsInMask pushes the largest register first, and thus avoids pushing
     // aliased registers. So we have to filter out the physical registers which
     // are already pushed as part of larger registers.
-    SetType set128b = int32x4Set | float32x4Set;
     SetType set64b = doubleSet & ~set128b;
     SetType set32b = singleSet & ~set64b  & ~set128b;
 
     static_assert(Codes::AllPhysMask <= 0xffff, "We can safely use CountPopulation32");
     uint32_t count32b = mozilla::CountPopulation32(set32b);
 
 #if defined(JS_CODEGEN_X64)
     // If we have an odd number of 32 bits values, then we increase the size to
--- a/js/src/jit/x86-shared/Architecture-x86-shared.h
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.h
@@ -188,20 +188,19 @@ class Registers {
 
 typedef Registers::SetType PackedRegisterMask;
 
 class FloatRegisters {
   public:
     typedef X86Encoding::XMMRegisterID Encoding;
 
     enum ContentType {
-        Single,
-        Double,
-        Int32x4,
-        Float32x4,
+        Single,     // 32-bit float.
+        Double,     // 64-bit double.
+        Simd128,    // 128-bit SIMD type (int32x4, bool16x8, etc).
         NumTypes
     };
 
     // Content spilled during bailouts.
     union RegisterContent {
         float s;
         double d;
         int32_t i4[4];
@@ -239,20 +238,19 @@ class FloatRegisters {
     static_assert(sizeof(SetType) * 8 >= Total,
                   "SetType should be large enough to enumerate all registers.");
 
     // Magic values which are used to duplicate a mask of physical register for
     // a specific type of register. A multiplication is used to copy and shift
     // the bits of the physical register mask.
     static const SetType SpreadSingle = SetType(1) << (uint32_t(Single) * TotalPhys);
     static const SetType SpreadDouble = SetType(1) << (uint32_t(Double) * TotalPhys);
-    static const SetType SpreadInt32x4 = SetType(1) << (uint32_t(Int32x4) * TotalPhys);
-    static const SetType SpreadFloat32x4 = SetType(1) << (uint32_t(Float32x4) * TotalPhys);
+    static const SetType SpreadSimd128 = SetType(1) << (uint32_t(Simd128) * TotalPhys);
     static const SetType SpreadScalar = SpreadSingle | SpreadDouble;
-    static const SetType SpreadVector = SpreadInt32x4 | SpreadFloat32x4;
+    static const SetType SpreadVector = SpreadSimd128;
     static const SetType Spread = SpreadScalar | SpreadVector;
 
     static const SetType AllPhysMask = ((1 << TotalPhys) - 1);
     static const SetType AllMask = AllPhysMask * Spread;
     static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
 
 #if defined(JS_CODEGEN_X86)
     static const SetType NonAllocatableMask =
@@ -353,32 +351,30 @@ struct FloatRegister {
 
     static FloatRegister FromCode(uint32_t i) {
         MOZ_ASSERT(i < Codes::Total);
         return FloatRegister(i & RegMask, Codes::ContentType(i >> RegSize));
     }
 
     bool isSingle() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Single; }
     bool isDouble() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Double; }
-    bool isInt32x4() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Int32x4; }
-    bool isFloat32x4() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Float32x4; }
+    bool isSimd128() const { MOZ_ASSERT(!isInvalid()); return type_ == Codes::Simd128; }
     bool isInvalid() const { return isInvalid_; }
 
     FloatRegister asSingle() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Single); }
     FloatRegister asDouble() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Double); }
-    FloatRegister asInt32x4() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Int32x4); }
-    FloatRegister asFloat32x4() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Float32x4); }
+    FloatRegister asSimd128() const { MOZ_ASSERT(!isInvalid()); return FloatRegister(reg_, Codes::Simd128); }
 
     uint32_t size() const {
         MOZ_ASSERT(!isInvalid());
         if (isSingle())
             return sizeof(float);
         if (isDouble())
             return sizeof(double);
-        MOZ_ASSERT(isInt32x4() || isFloat32x4());
+        MOZ_ASSERT(isSimd128());
         return 4 * sizeof(int32_t);
     }
 
     Code code() const {
         MOZ_ASSERT(!isInvalid());
         MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
         // :TODO: ARM is doing the same thing, but we should avoid this, except
         // that the RegisterSets depends on this.
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -31,20 +31,20 @@ struct ScratchFloat32Scope : public Auto
 
 struct ScratchDoubleScope : public AutoFloatRegisterScope
 {
     explicit ScratchDoubleScope(MacroAssembler& masm)
       : AutoFloatRegisterScope(masm, ScratchDoubleReg)
     { }
 };
 
-struct ScratchSimdScope : public AutoFloatRegisterScope
+struct ScratchSimd128Scope : public AutoFloatRegisterScope
 {
-    explicit ScratchSimdScope(MacroAssembler& masm)
-      : AutoFloatRegisterScope(masm, ScratchSimdReg)
+    explicit ScratchSimd128Scope(MacroAssembler& masm)
+      : AutoFloatRegisterScope(masm, ScratchSimd128Reg)
     { }
 };
 
 class Operand
 {
   public:
     enum Kind {
         REG,
@@ -2202,18 +2202,18 @@ class AssemblerX86Shared : public Assemb
     void vcmpps(uint8_t order, Operand src1, FloatRegister src0, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         // :TODO: (Bug 1132894) See LIRGeneratorX86Shared::lowerForFPU
         // FIXME: This logic belongs in the MacroAssembler.
         if (!HasAVX() && !src0.aliases(dest)) {
             if (src1.kind() == Operand::FPREG &&
                 dest.aliases(FloatRegister::FromCode(src1.fpu())))
             {
-                vmovdqa(src1, ScratchSimdReg);
-                src1 = Operand(ScratchSimdReg);
+                vmovdqa(src1, ScratchSimd128Reg);
+                src1 = Operand(ScratchSimd128Reg);
             }
             vmovdqa(src0, dest);
             src0 = dest;
         }
         switch (src1.kind()) {
           case Operand::FPREG:
             masm.vcmpps_rr(order, src1.fpu(), src0.encoding(), dest.encoding());
             break;
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -2246,17 +2246,17 @@ CodeGeneratorX86Shared::visitFloat32x4To
 
     masm.convertFloat32x4ToInt32x4(in, out);
 
     OutOfLineSimdFloatToIntCheck *ool = new(alloc()) OutOfLineSimdFloatToIntCheck(temp, in, ins);
     addOutOfLineCode(ool, ins->mir());
 
     static const SimdConstant InvalidResult = SimdConstant::SplatX4(int32_t(-2147483648));
 
-    ScratchSimdScope scratch(masm);
+    ScratchSimd128Scope scratch(masm);
     masm.loadConstantInt32x4(InvalidResult, scratch);
     masm.packedEqualInt32x4(Operand(out), scratch);
     // TODO (bug 1156228): If we have SSE4.1, we can use PTEST here instead of
     // the two following instructions.
     masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(0));
     masm.j(Assembler::NotEqual, ool->entry());
 
@@ -2270,17 +2270,17 @@ CodeGeneratorX86Shared::visitOutOfLineSi
     static const SimdConstant Int32MinX4 = SimdConstant::SplatX4(-2147483648.f);
 
     Label bail;
     Label* onConversionError = gen->compilingAsmJS() ? masm.asmOnConversionErrorLabel() : &bail;
 
     FloatRegister input = ool->input();
     Register temp = ool->temp();
 
-    ScratchSimdScope scratch(masm);
+    ScratchSimd128Scope scratch(masm);
     masm.loadConstantFloat32x4(Int32MinX4, scratch);
     masm.vcmpleps(Operand(input), scratch, scratch);
     masm.vmovmskps(scratch, temp);
     masm.cmp32(temp, Imm32(15));
     masm.j(Assembler::NotEqual, onConversionError);
 
     masm.loadConstantFloat32x4(Int32MaxX4, scratch);
     masm.vcmpleps(Operand(input), scratch, scratch);
@@ -2397,17 +2397,17 @@ CodeGeneratorX86Shared::visitSimdExtract
     SimdLane lane = ins->lane();
     if (lane == LaneX) {
         // The value we want to extract is in the low double-word
         masm.moveLowInt32(input, output);
     } else if (AssemblerX86Shared::HasSSE41()) {
         masm.vpextrd(lane, input, output);
     } else {
         uint32_t mask = MacroAssembler::ComputeShuffleMask(lane);
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         masm.shuffleInt32(mask, input, scratch);
         masm.moveLowInt32(scratch, output);
     }
 }
 
 void
 CodeGeneratorX86Shared::visitSimdExtractElementF(LSimdExtractElementF* ins)
 {
@@ -2762,31 +2762,31 @@ CodeGeneratorX86Shared::visitSimdShuffle
     }
 
     // Two elements from one vector, two other elements from the other
     MOZ_ASSERT(numLanesFromLHS == 2);
 
     // TODO Here and below, symmetric case would be more handy to avoid a move,
     // but can't be reached because operands would get swapped (bug 1084404).
     if (ins->lanesMatch(2, 3, 6, 7)) {
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         if (AssemblerX86Shared::HasAVX()) {
             FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
             masm.vmovhlps(lhs, rhsCopy, out);
         } else {
             masm.loadAlignedFloat32x4(rhs, scratch);
             masm.vmovhlps(lhs, scratch, scratch);
             masm.moveFloat32x4(scratch, out);
         }
         return;
     }
 
     if (ins->lanesMatch(0, 1, 4, 5)) {
         FloatRegister rhsCopy;
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         if (rhs.kind() == Operand::FPREG) {
             // No need to make an actual copy, since the operand is already
             // in a register, and it won't be clobbered by the vmovlhps.
             rhsCopy = FloatRegister::FromCode(rhs.fpu());
         } else {
             masm.loadAlignedFloat32x4(rhs, scratch);
             rhsCopy = scratch;
         }
@@ -2796,17 +2796,17 @@ CodeGeneratorX86Shared::visitSimdShuffle
 
     if (ins->lanesMatch(0, 4, 1, 5)) {
         masm.vunpcklps(rhs, lhs, out);
         return;
     }
 
     // TODO swapped case would be better (bug 1084404)
     if (ins->lanesMatch(4, 0, 5, 1)) {
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         if (AssemblerX86Shared::HasAVX()) {
             FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
             masm.vunpcklps(lhs, rhsCopy, out);
         } else {
             masm.loadAlignedFloat32x4(rhs, scratch);
             masm.vunpcklps(lhs, scratch, scratch);
             masm.moveFloat32x4(scratch, out);
         }
@@ -2815,17 +2815,17 @@ CodeGeneratorX86Shared::visitSimdShuffle
 
     if (ins->lanesMatch(2, 6, 3, 7)) {
         masm.vunpckhps(rhs, lhs, out);
         return;
     }
 
     // TODO swapped case would be better (bug 1084404)
     if (ins->lanesMatch(6, 2, 7, 3)) {
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         if (AssemblerX86Shared::HasAVX()) {
             FloatRegister rhsCopy = masm.reusedInputAlignedFloat32x4(rhs, scratch);
             masm.vunpckhps(lhs, rhsCopy, out);
         } else {
             masm.loadAlignedFloat32x4(rhs, scratch);
             masm.vunpckhps(lhs, scratch, scratch);
             masm.moveFloat32x4(scratch, out);
         }
@@ -2876,17 +2876,17 @@ void
 CodeGeneratorX86Shared::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* ins)
 {
     static const SimdConstant allOnes = SimdConstant::SplatX4(-1);
 
     FloatRegister lhs = ToFloatRegister(ins->lhs());
     Operand rhs = ToOperand(ins->rhs());
     MOZ_ASSERT(ToFloatRegister(ins->output()) == lhs);
 
-    ScratchSimdScope scratch(masm);
+    ScratchSimd128Scope scratch(masm);
 
     MSimdBinaryComp::Operation op = ins->operation();
     switch (op) {
       case MSimdBinaryComp::greaterThan:
         masm.packedGreaterThanInt32x4(rhs, lhs);
         return;
       case MSimdBinaryComp::equal:
         masm.packedEqualInt32x4(rhs, lhs);
@@ -2963,17 +2963,17 @@ CodeGeneratorX86Shared::visitSimdBinaryC
 
 void
 CodeGeneratorX86Shared::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* ins)
 {
     FloatRegister lhs = ToFloatRegister(ins->lhs());
     Operand rhs = ToOperand(ins->rhs());
     FloatRegister output = ToFloatRegister(ins->output());
 
-    ScratchSimdScope scratch(masm);
+    ScratchSimd128Scope scratch(masm);
 
     MSimdBinaryArith::Operation op = ins->operation();
     switch (op) {
       case MSimdBinaryArith::Op_add:
         masm.vpaddd(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_sub:
         masm.vpsubd(rhs, lhs, output);
@@ -3019,17 +3019,17 @@ CodeGeneratorX86Shared::visitSimdBinaryA
 
 void
 CodeGeneratorX86Shared::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* ins)
 {
     FloatRegister lhs = ToFloatRegister(ins->lhs());
     Operand rhs = ToOperand(ins->rhs());
     FloatRegister output = ToFloatRegister(ins->output());
 
-    ScratchSimdScope scratch(masm);
+    ScratchSimd128Scope scratch(masm);
 
     MSimdBinaryArith::Operation op = ins->operation();
     switch (op) {
       case MSimdBinaryArith::Op_add:
         masm.vaddps(rhs, lhs, output);
         return;
       case MSimdBinaryArith::Op_sub:
         masm.vsubps(rhs, lhs, output);
@@ -3685,18 +3685,16 @@ CodeGeneratorX86Shared::visitMemoryBarri
         masm.storeLoadFence();
 }
 
 void
 CodeGeneratorX86Shared::setReturnDoubleRegs(LiveRegisterSet* regs)
 {
     MOZ_ASSERT(ReturnFloat32Reg.encoding() == X86Encoding::xmm0);
     MOZ_ASSERT(ReturnDoubleReg.encoding() == X86Encoding::xmm0);
-    MOZ_ASSERT(ReturnInt32x4Reg.encoding() == X86Encoding::xmm0);
-    MOZ_ASSERT(ReturnFloat32x4Reg.encoding() == X86Encoding::xmm0);
+    MOZ_ASSERT(ReturnSimd128Reg.encoding() == X86Encoding::xmm0);
     regs->add(ReturnFloat32Reg);
     regs->add(ReturnDoubleReg);
-    regs->add(ReturnInt32x4Reg);
-    regs->add(ReturnFloat32x4Reg);
+    regs->add(ReturnSimd128Reg);
 }
 
 } // namespace jit
 } // namespace js
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -395,19 +395,17 @@ MacroAssembler::PushRegsInMask(LiveRegis
         FloatRegister reg = *iter;
         diffF -= reg.size();
         numFpu -= 1;
         Address spillAddress(StackPointer, diffF);
         if (reg.isDouble())
             storeDouble(reg, spillAddress);
         else if (reg.isSingle())
             storeFloat32(reg, spillAddress);
-        else if (reg.isInt32x4())
-            storeUnalignedInt32x4(reg, spillAddress);
-        else if (reg.isFloat32x4())
+        else if (reg.isSimd128())
             storeUnalignedFloat32x4(reg, spillAddress);
         else
             MOZ_CRASH("Unknown register type.");
     }
     MOZ_ASSERT(numFpu == 0);
     // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
     // GetPushBytesInSize.
     diffF -= diffF % sizeof(uintptr_t);
@@ -431,19 +429,17 @@ MacroAssembler::PopRegsInMaskIgnore(Live
         if (ignore.has(reg))
             continue;
 
         Address spillAddress(StackPointer, diffF);
         if (reg.isDouble())
             loadDouble(spillAddress, reg);
         else if (reg.isSingle())
             loadFloat32(spillAddress, reg);
-        else if (reg.isInt32x4())
-            loadUnalignedInt32x4(spillAddress, reg);
-        else if (reg.isFloat32x4())
+        else if (reg.isSimd128())
             loadUnalignedFloat32x4(spillAddress, reg);
         else
             MOZ_CRASH("Unknown register type.");
     }
     freeStack(reservedF);
     MOZ_ASSERT(numFpu == 0);
     // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
     // GetPushBytesInSize.
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -1004,26 +1004,26 @@ class MacroAssemblerX86Shared : public A
     }
     void loadInt32x2(const BaseIndex& src, FloatRegister dest) {
         vmovq(Operand(src), dest);
     }
     void loadInt32x3(const BaseIndex& src, FloatRegister dest) {
         BaseIndex srcZ(src);
         srcZ.offset += 2 * sizeof(int32_t);
 
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovq(Operand(src), dest);
         vmovd(Operand(srcZ), scratch);
         vmovlhps(scratch, dest, dest);
     }
     void loadInt32x3(const Address& src, FloatRegister dest) {
         Address srcZ(src);
         srcZ.offset += 2 * sizeof(int32_t);
 
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovq(Operand(src), dest);
         vmovd(Operand(srcZ), scratch);
         vmovlhps(scratch, dest, dest);
     }
 
     void loadAlignedInt32x4(const Address& src, FloatRegister dest) {
         vmovdqa(Operand(src), dest);
     }
@@ -1069,25 +1069,25 @@ class MacroAssemblerX86Shared : public A
     }
     void storeInt32x2(FloatRegister src, const BaseIndex& dest) {
         vmovq(src, Operand(dest));
     }
     void storeInt32x3(FloatRegister src, const Address& dest) {
         Address destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         vmovq(src, Operand(dest));
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovhlps(src, scratch, scratch);
         vmovd(scratch, Operand(destZ));
     }
     void storeInt32x3(FloatRegister src, const BaseIndex& dest) {
         BaseIndex destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         vmovq(src, Operand(dest));
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovhlps(src, scratch, scratch);
         vmovd(scratch, Operand(destZ));
     }
 
     void storeUnalignedInt32x4(FloatRegister src, const Address& dest) {
         vmovdqu(src, Operand(dest));
     }
     void storeUnalignedInt32x4(FloatRegister src, const BaseIndex& dest) {
@@ -1140,49 +1140,49 @@ class MacroAssemblerX86Shared : public A
     void packedUnsignedRightShiftByScalar(Imm32 count, FloatRegister dest) {
         vpsrld(count, dest, dest);
     }
 
     void loadFloat32x3(const Address& src, FloatRegister dest) {
         Address srcZ(src);
         srcZ.offset += 2 * sizeof(float);
         vmovsd(src, dest);
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovss(srcZ, scratch);
         vmovlhps(scratch, dest, dest);
     }
     void loadFloat32x3(const BaseIndex& src, FloatRegister dest) {
         BaseIndex srcZ(src);
         srcZ.offset += 2 * sizeof(float);
         vmovsd(src, dest);
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovss(srcZ, scratch);
         vmovlhps(scratch, dest, dest);
     }
 
     void loadAlignedFloat32x4(const Address& src, FloatRegister dest) {
         vmovaps(Operand(src), dest);
     }
     void loadAlignedFloat32x4(const Operand& src, FloatRegister dest) {
         vmovaps(src, dest);
     }
 
     void storeFloat32x3(FloatRegister src, const Address& dest) {
         Address destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         storeDouble(src, dest);
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovhlps(src, scratch, scratch);
         storeFloat32(scratch, destZ);
     }
     void storeFloat32x3(FloatRegister src, const BaseIndex& dest) {
         BaseIndex destZ(dest);
         destZ.offset += 2 * sizeof(int32_t);
         storeDouble(src, dest);
-        ScratchSimdScope scratch(asMasm());
+        ScratchSimd128Scope scratch(asMasm());
         vmovhlps(src, scratch, scratch);
         storeFloat32(scratch, destZ);
     }
     void storeAlignedFloat32x4(FloatRegister src, const Address& dest) {
         vmovaps(src, Operand(dest));
     }
     void moveFloat32x4(FloatRegister src, FloatRegister dest) {
         vmovaps(src, dest);
--- a/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
@@ -246,26 +246,26 @@ MoveEmitterX86::breakCycle(const MoveOpe
     //   (A -> B)
     //   (B -> A)
     //
     // This case handles (A -> B), which we reach first. We save B, then allow
     // the original move to continue.
     switch (type) {
       case MoveOp::INT32X4:
         if (to.isMemory()) {
-            ScratchSimdScope scratch(masm);
+            ScratchSimd128Scope scratch(masm);
             masm.loadAlignedInt32x4(toAddress(to), scratch);
             masm.storeAlignedInt32x4(scratch, cycleSlot());
         } else {
             masm.storeAlignedInt32x4(to.floatReg(), cycleSlot());
         }
         break;
       case MoveOp::FLOAT32X4:
         if (to.isMemory()) {
-            ScratchSimdScope scratch(masm);
+            ScratchSimd128Scope scratch(masm);
             masm.loadAlignedFloat32x4(toAddress(to), scratch);
             masm.storeAlignedFloat32x4(scratch, cycleSlot());
         } else {
             masm.storeAlignedFloat32x4(to.floatReg(), cycleSlot());
         }
         break;
       case MoveOp::FLOAT32:
         if (to.isMemory()) {
@@ -313,28 +313,28 @@ MoveEmitterX86::completeCycle(const Move
     //
     // This case handles (B -> A), which we reach last. We emit a move from the
     // saved value of B, to A.
     switch (type) {
       case MoveOp::INT32X4:
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
         if (to.isMemory()) {
-            ScratchSimdScope scratch(masm);
+            ScratchSimd128Scope scratch(masm);
             masm.loadAlignedInt32x4(cycleSlot(), scratch);
             masm.storeAlignedInt32x4(scratch, toAddress(to));
         } else {
             masm.loadAlignedInt32x4(cycleSlot(), to.floatReg());
         }
         break;
       case MoveOp::FLOAT32X4:
         MOZ_ASSERT(pushedAtCycle_ != -1);
         MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
         if (to.isMemory()) {
-            ScratchSimdScope scratch(masm);
+            ScratchSimd128Scope scratch(masm);
             masm.loadAlignedFloat32x4(cycleSlot(), scratch);
             masm.storeAlignedFloat32x4(scratch, toAddress(to));
         } else {
             masm.loadAlignedFloat32x4(cycleSlot(), to.floatReg());
         }
         break;
       case MoveOp::FLOAT32:
         MOZ_ASSERT(pushedAtCycle_ != -1);
@@ -487,52 +487,52 @@ MoveEmitterX86::emitDoubleMove(const Mov
         masm.loadDouble(toAddress(from), scratch);
         masm.storeDouble(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::emitInt32X4Move(const MoveOperand& from, const MoveOperand& to)
 {
-    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isInt32x4());
-    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isInt32x4());
+    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
+    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
 
     if (from.isFloatReg()) {
         if (to.isFloatReg())
             masm.moveInt32x4(from.floatReg(), to.floatReg());
         else
             masm.storeAlignedInt32x4(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
         masm.loadAlignedInt32x4(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         masm.loadAlignedInt32x4(toAddress(from), scratch);
         masm.storeAlignedInt32x4(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::emitFloat32X4Move(const MoveOperand& from, const MoveOperand& to)
 {
-    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isFloat32x4());
-    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isFloat32x4());
+    MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
+    MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
 
     if (from.isFloatReg()) {
         if (to.isFloatReg())
             masm.moveFloat32x4(from.floatReg(), to.floatReg());
         else
             masm.storeAlignedFloat32x4(from.floatReg(), toAddress(to));
     } else if (to.isFloatReg()) {
         masm.loadAlignedFloat32x4(toAddress(from), to.floatReg());
     } else {
         // Memory to memory move.
         MOZ_ASSERT(from.isMemory());
-        ScratchSimdScope scratch(masm);
+        ScratchSimd128Scope scratch(masm);
         masm.loadAlignedFloat32x4(toAddress(from), scratch);
         masm.storeAlignedFloat32x4(scratch, toAddress(to));
     }
 }
 
 void
 MoveEmitterX86::assertDone()
 {
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -41,22 +41,20 @@ static MOZ_CONSTEXPR_VAR FloatRegister I
 
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Type = ecx;
 static MOZ_CONSTEXPR_VAR Register JSReturnReg_Data = edx;
 static MOZ_CONSTEXPR_VAR Register StackPointer = esp;
 static MOZ_CONSTEXPR_VAR Register FramePointer = ebp;
 static MOZ_CONSTEXPR_VAR Register ReturnReg = eax;
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
 static MOZ_CONSTEXPR_VAR FloatRegister ReturnDoubleReg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnInt32x4Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Int32x4);
-static MOZ_CONSTEXPR_VAR FloatRegister ReturnFloat32x4Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Float32x4);
+static MOZ_CONSTEXPR_VAR FloatRegister ReturnSimd128Reg = FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchFloat32Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
 static MOZ_CONSTEXPR_VAR FloatRegister ScratchDoubleReg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
-static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimdReg = xmm7;
-static MOZ_CONSTEXPR_VAR FloatRegister ScratchInt32x4Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Int32x4);
+static MOZ_CONSTEXPR_VAR FloatRegister ScratchSimd128Reg = FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
 
 // Avoid ebp, which is the FramePointer, which is unavailable in some modes.
 static MOZ_CONSTEXPR_VAR Register ArgumentsRectifierReg = esi;
 static MOZ_CONSTEXPR_VAR Register CallTempReg0 = edi;
 static MOZ_CONSTEXPR_VAR Register CallTempReg1 = eax;
 static MOZ_CONSTEXPR_VAR Register CallTempReg2 = ebx;
 static MOZ_CONSTEXPR_VAR Register CallTempReg3 = ecx;
 static MOZ_CONSTEXPR_VAR Register CallTempReg4 = esi;
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -414,22 +414,22 @@ CodeGeneratorX86::emitSimdLoad(LAsmJSLoa
         loadSimd(type, 2, srcAddr, out);
         uint32_t after = masm.size();
         masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
 
         // Load Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = after;
-        loadSimd(type, 1, srcAddrZ, ScratchSimdReg);
+        loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
         after = masm.size();
         masm.append(AsmJSHeapAccess(before, after));
 
         // Move ZW atop XY
-        masm.vmovlhps(ScratchSimdReg, out, out);
+        masm.vmovlhps(ScratchSimd128Reg, out, out);
     } else {
         uint32_t before = masm.size();
         loadSimd(type, numElems, srcAddr, out);
         uint32_t after = masm.size();
         masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
     }
 
     if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
@@ -588,23 +588,23 @@ CodeGeneratorX86::emitSimdStore(LAsmJSSt
             : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
 
         // Store XY
         uint32_t before = masm.size();
         storeSimd(type, 2, in, dstAddr);
         uint32_t after = masm.size();
         masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
 
-        masm.vmovhlps(in, ScratchSimdReg, ScratchSimdReg);
+        masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
 
         // Store Z (W is zeroed)
         // This is still in bounds, as we've checked with a manual bounds check
         // or we had enough space for sure when removing the bounds check.
         before = masm.size();
-        storeSimd(type, 1, ScratchSimdReg, dstAddrZ);
+        storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
         after = masm.size();
         masm.append(AsmJSHeapAccess(before, after));
     } else {
         uint32_t before = masm.size();
         storeSimd(type, numElems, in, dstAddr);
         uint32_t after = masm.size();
         masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
     }
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -44,30 +44,30 @@ MacroAssemblerX86::convertUInt64ToDouble
         convertUInt32ToDouble(src.low, ScratchDoubleReg);
         addDouble(ScratchDoubleReg, dest);
         return;
     }
 
     // Following operation uses entire 128-bit of dest XMM register.
     // Currently higher 64-bit is free when we have access to lower 64-bit.
     MOZ_ASSERT(dest.size() == 8);
-    FloatRegister dest128 = FloatRegister(dest.encoding(), FloatRegisters::Int32x4);
+    FloatRegister dest128 = FloatRegister(dest.encoding(), FloatRegisters::Simd128);
 
     // Assume that src is represented as following:
     //   src      = 0x HHHHHHHH LLLLLLLL
 
     // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch):
     //   dest     = 0x 00000000 00000000  00000000 LLLLLLLL
     //   scratch  = 0x 00000000 00000000  00000000 HHHHHHHH
     vmovd(src.low, dest128);
-    vmovd(src.high, ScratchInt32x4Reg);
+    vmovd(src.high, ScratchSimd128Reg);
 
     // Unpack and interleave dest and scratch to dest:
     //   dest     = 0x 00000000 00000000  HHHHHHHH LLLLLLLL
-    vpunpckldq(ScratchInt32x4Reg, dest128, dest128);
+    vpunpckldq(ScratchSimd128Reg, dest128, dest128);
 
     // Unpack and interleave dest and a constant C1 to dest:
     //   C1       = 0x 00000000 00000000  45300000 43300000
     //   dest     = 0x 45300000 HHHHHHHH  43300000 LLLLLLLL
     // here, each 64-bit part of dest represents following double:
     //   HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000
     //   LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL
     movePtr(ImmPtr(TO_DOUBLE), temp);