Bug 1631228 - wasm ion simd, part 0: remove old SIMD MIRTypes. r=bbouvier
authorLars T Hansen <lhansen@mozilla.com>
Wed, 20 May 2020 07:01:23 +0000
changeset 530960 53f7064dc0dee9dc33d26364fed4866fbe21c621
parent 530959 5c834683487fb3a438004cc64b2a6a7a48cc265e
child 530961 ae9d7bc02ca4e966bfab2a6a5624c4a74519df13
push id37435
push userapavel@mozilla.com
push dateWed, 20 May 2020 15:28:23 +0000
treeherdermozilla-central@5415da14ec9a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbbouvier
bugs1631228
milestone78.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1631228 - wasm ion simd, part 0: remove old SIMD MIRTypes. r=bbouvier Remove the MIRTypes for SIMD.js and instead add a single new type, MIRType::Simd128, which is int/float and lane-width agnostic. Also rename Scalar::V128 as Scalar::Simd128. Remove various things that referenced the old types but were only useful for SIMD.js. Differential Revision: https://phabricator.services.mozilla.com/D71818
js/src/builtin/AtomicsObject.cpp
js/src/builtin/TypedObject.cpp
js/src/jit/CacheIR.cpp
js/src/jit/CacheIRCompiler.cpp
js/src/jit/CodeGenerator.cpp
js/src/jit/IonAnalysis.cpp
js/src/jit/IonTypes.h
js/src/jit/LIR.cpp
js/src/jit/LIR.h
js/src/jit/MIR.h
js/src/jit/MoveResolver.h
js/src/jit/RangeAnalysis.cpp
js/src/jit/StackSlotAllocator.h
js/src/jit/arm64/MacroAssembler-arm64.cpp
js/src/jit/shared/CodeGenerator-shared.cpp
js/src/jit/shared/Lowering-shared-inl.h
js/src/jit/x64/Assembler-x64.cpp
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86-shared/Lowering-x86-shared.cpp
js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
js/src/jit/x86-shared/MoveEmitter-x86-shared.h
js/src/jit/x86/Assembler-x86.cpp
js/src/jit/x86/Lowering-x86.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
js/src/jsfriendapi.h
js/src/vm/CommonPropertyNames.h
js/src/vm/TypedArrayObject.cpp
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmStubs.cpp
js/src/wasm/WasmTypes.cpp
js/src/wasm/WasmTypes.h
js/src/wasm/WasmValidate.cpp
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -262,17 +262,17 @@ bool perform(JSContext* cx, HandleValue 
       return F<int64_t>::run(cx, viewData.cast<int64_t*>() + offset, args...);
     case Scalar::BigUint64:
       return F<uint64_t>::run(cx, viewData.cast<uint64_t*>() + offset, args...);
     case Scalar::Float32:
     case Scalar::Float64:
     case Scalar::Uint8Clamped:
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       break;
   }
   MOZ_CRASH("Unsupported TypedArray type");
 }
 
 template <typename T>
 struct DoCompareExchange {
   static bool run(JSContext* cx, SharedMem<T*> addr, HandleValue oldv,
--- a/js/src/builtin/TypedObject.cpp
+++ b/js/src/builtin/TypedObject.cpp
@@ -243,17 +243,17 @@ uint32_t ScalarTypeDescr::alignment(Type
 /*static*/ const char* ScalarTypeDescr::typeName(Type type) {
   switch (type) {
 #define NUMERIC_TYPE_TO_STRING(constant_, type_, name_) \
   case constant_:                                       \
     return #name_;
     JS_FOR_EACH_SCALAR_TYPE_REPR(NUMERIC_TYPE_TO_STRING)
 #undef NUMERIC_TYPE_TO_STRING
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::MaxTypedArrayViewType:
       break;
   }
   MOZ_CRASH("Invalid type");
 }
 
 bool ScalarTypeDescr::call(JSContext* cx, unsigned argc, Value* vp) {
   CallArgs args = CallArgsFromVp(argc, vp);
@@ -292,17 +292,17 @@ bool ScalarTypeDescr::call(JSContext* cx
       return false;                                   \
     }                                                 \
     args.rval().setBigInt(ret);                       \
     return true;                                      \
   }
     JS_FOR_EACH_SCALAR_BIGINT_TYPE_REPR(BIGINT_CALL)
 #undef BIGINT_CALL
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH();
   }
   return true;
 }
 
 /* static */
 TypeDescr* GlobalObject::getOrCreateScalarTypeDescr(
@@ -961,17 +961,17 @@ StructTypeDescr* StructMetaTypeDescr::cr
       return nullptr;
     }
 
     CheckedInt32 offset;
     if (fieldProps[i].alignAsInt64) {
       offset = layout.addField(ScalarTypeDescr::alignment(Scalar::Int64),
                                fieldType->size());
     } else if (fieldProps[i].alignAsV128) {
-      offset = layout.addField(ScalarTypeDescr::alignment(Scalar::V128),
+      offset = layout.addField(ScalarTypeDescr::alignment(Scalar::Simd128),
                                fieldType->size());
     } else {
       offset = layout.addField(fieldType->alignment(), fieldType->size());
     }
     if (!offset.isValid()) {
       JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
                                 JSMSG_TYPEDOBJECT_TOO_BIG);
       return nullptr;
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -3502,17 +3502,17 @@ OperandId SetPropIRGenerator::emitNumeri
       return writer.guardToUint8Clamped(valId);
 
     case Scalar::BigInt64:
     case Scalar::BigUint64:
       return writer.guardToBigInt(valId);
 
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       break;
   }
   MOZ_CRASH("Unsupported TypedArray type");
 }
 
 AttachDecision SetPropIRGenerator::tryAttachTypedObjectProperty(
     HandleObject obj, ObjOperandId objId, HandleId id, ValOperandId rhsId) {
   if (!obj->is<TypedObject>()) {
--- a/js/src/jit/CacheIRCompiler.cpp
+++ b/js/src/jit/CacheIRCompiler.cpp
@@ -3801,17 +3801,17 @@ bool CacheIRCompiler::emitStoreTypedElem
 
     case Scalar::BigInt64:
     case Scalar::BigUint64:
       valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
       break;
 
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("Unsupported TypedArray type");
   }
 
   AutoScratchRegister scratch1(allocator, masm);
   Maybe<AutoScratchRegister> scratch2;
   Maybe<AutoSpectreBoundsScratchRegister> spectreScratch;
   if (Scalar::isBigIntType(elementType)) {
     scratch2.emplace(allocator, masm);
@@ -4088,17 +4088,17 @@ bool CacheIRCompiler::emitStoreTypedObje
 
     case Scalar::BigInt64:
     case Scalar::BigUint64:
       valBigInt.emplace(allocator.useRegister(masm, BigIntOperandId(rhsId)));
       break;
 
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("Unsupported TypedArray type");
   }
 
   AutoScratchRegister scratch(allocator, masm);
   Maybe<AutoScratchRegister> bigIntScratch;
   if (Scalar::isBigIntType(type)) {
     bigIntScratch.emplace(allocator, masm);
   }
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -4189,21 +4189,18 @@ void CodeGenerator::visitMoveGroup(LMove
         moveType = MoveOp::INT32;
         break;
       case LDefinition::FLOAT32:
         moveType = MoveOp::FLOAT32;
         break;
       case LDefinition::DOUBLE:
         moveType = MoveOp::DOUBLE;
         break;
-      case LDefinition::SIMD128INT:
-        moveType = MoveOp::SIMD128INT;
-        break;
-      case LDefinition::SIMD128FLOAT:
-        moveType = MoveOp::SIMD128FLOAT;
+      case LDefinition::SIMD128:
+        moveType = MoveOp::SIMD128;
         break;
       default:
         MOZ_CRASH("Unexpected move type");
     }
 
     masm.propagateOOM(
         resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
   }
@@ -8131,23 +8128,17 @@ void CodeGenerator::visitWasmLoadSlot(LW
       masm.loadDouble(addr, dst.fpu());
       break;
     case MIRType::Pointer:
     case MIRType::RefOrNull:
       masm.loadPtr(addr, dst.gpr());
       break;
     // Aligned access: code is aligned on PageSize + there is padding
     // before the global data section.
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4:
-    case MIRType::Float32x4:
+    case MIRType::Simd128:
     default:
       MOZ_CRASH("unexpected type in LoadPrimitiveValue");
   }
 }
 
 void CodeGenerator::visitWasmStoreSlot(LWasmStoreSlot* ins) {
   MIRType type = ins->type();
   Register container = ToRegister(ins->containerRef());
@@ -8166,23 +8157,17 @@ void CodeGenerator::visitWasmStoreSlot(L
       break;
     case MIRType::Pointer:
       // This could be correct, but it would be a new usage, so check carefully.
       MOZ_CRASH("Unexpected type in visitWasmStoreSlot.");
     case MIRType::RefOrNull:
       MOZ_CRASH("Bad type in visitWasmStoreSlot. Use LWasmStoreRef.");
     // Aligned access: code is aligned on PageSize + there is padding
     // before the global data section.
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4:
-    case MIRType::Float32x4:
+    case MIRType::Simd128:
     default:
       MOZ_CRASH("unexpected type in StorePrimitiveValue");
   }
 }
 
 void CodeGenerator::visitWasmDerivedPointer(LWasmDerivedPointer* ins) {
   masm.movePtr(ToRegister(ins->base()), ToRegister(ins->output()));
   masm.addPtr(Imm32(int32_t(ins->offset())), ToRegister(ins->output()));
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -3055,34 +3055,27 @@ static bool IsResumableMIRType(MIRType t
     case MIRType::Symbol:
     case MIRType::BigInt:
     case MIRType::Object:
     case MIRType::MagicOptimizedArguments:
     case MIRType::MagicOptimizedOut:
     case MIRType::MagicUninitializedLexical:
     case MIRType::MagicIsConstructing:
     case MIRType::Value:
-    case MIRType::Int32x4:
-    case MIRType::Int16x8:
-    case MIRType::Int8x16:
-    case MIRType::Float32x4:
-    case MIRType::Bool32x4:
-    case MIRType::Bool16x8:
-    case MIRType::Bool8x16:
+    case MIRType::Simd128:
       return true;
 
     case MIRType::MagicHole:
     case MIRType::ObjectOrNull:
     case MIRType::None:
     case MIRType::Slots:
     case MIRType::Elements:
     case MIRType::Pointer:
     case MIRType::Shape:
     case MIRType::ObjectGroup:
-    case MIRType::Doublex2:  // NYI, see also RSimdBox::recover
     case MIRType::Int64:
     case MIRType::RefOrNull:
     case MIRType::StackResults:
       return false;
   }
   MOZ_CRASH("Unknown MIRType.");
 }
 
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -270,20 +270,20 @@ inline const char* BailoutKindString(Bai
   }
 
   MOZ_CRASH("Invalid BailoutKind");
 }
 
 static const uint32_t ELEMENT_TYPE_BITS = 5;
 static const uint32_t ELEMENT_TYPE_SHIFT = 0;
 static const uint32_t ELEMENT_TYPE_MASK = (1 << ELEMENT_TYPE_BITS) - 1;
-static const uint32_t VECTOR_SCALE_BITS = 3;
-static const uint32_t VECTOR_SCALE_SHIFT =
+static const uint32_t VECTOR_TYPE_BITS = 1;
+static const uint32_t VECTOR_TYPE_SHIFT =
     ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
-static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
+static const uint32_t VECTOR_TYPE_MASK = (1 << VECTOR_TYPE_BITS) - 1;
 
 // The integer SIMD types have a lot of operations that do the exact same thing
 // for signed and unsigned integer types. Sometimes it is simpler to treat
 // signed and unsigned integer SIMD types as the same type, using a SimdSign to
 // distinguish the few cases where there is a difference.
 enum class SimdSign {
   // Signedness is not applicable to this type. (i.e., Float or Bool).
   NotApplicable,
@@ -504,16 +504,17 @@ enum class MIRType : uint8_t {
   Int32,
   Int64,
   Double,
   Float32,
   // Types above have trivial conversion to a number.
   String,
   Symbol,
   BigInt,
+  Simd128,
   // Types above are primitive (including undefined and null).
   Object,
   MagicOptimizedArguments,    // JS_OPTIMIZED_ARGUMENTS magic value.
   MagicOptimizedOut,          // JS_OPTIMIZED_OUT magic value.
   MagicHole,                  // JS_ELEMENTS_HOLE magic value.
   MagicIsConstructing,        // JS_IS_CONSTRUCTING magic value.
   MagicUninitializedLexical,  // JS_UNINITIALIZED_LEXICAL magic value.
   // Types above are specialized.
@@ -522,32 +523,19 @@ enum class MIRType : uint8_t {
   None,          // Invalid, used as a placeholder.
   Slots,         // A slots vector
   Elements,      // An elements vector
   Pointer,       // An opaque pointer that receives no special treatment
   RefOrNull,     // Wasm Ref/AnyRef/NullRef: a raw JSObject* or a raw (void*)0
   StackResults,  // Wasm multi-value stack result area, which may contain refs
   Shape,         // A Shape pointer.
   ObjectGroup,   // An ObjectGroup pointer.
-  Last = ObjectGroup,
-  // Representing both SIMD.IntBxN and SIMD.UintBxN.
-  Int8x16 = Int32 | (4 << VECTOR_SCALE_SHIFT),
-  Int16x8 = Int32 | (3 << VECTOR_SCALE_SHIFT),
-  Int32x4 = Int32 | (2 << VECTOR_SCALE_SHIFT),
-  Float32x4 = Float32 | (2 << VECTOR_SCALE_SHIFT),
-  Bool8x16 = Boolean | (4 << VECTOR_SCALE_SHIFT),
-  Bool16x8 = Boolean | (3 << VECTOR_SCALE_SHIFT),
-  Bool32x4 = Boolean | (2 << VECTOR_SCALE_SHIFT),
-  Doublex2 = Double | (1 << VECTOR_SCALE_SHIFT)
+  Last = ObjectGroup
 };
 
-static inline bool IsSimdType(MIRType type) {
-  return ((uint8_t(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK) != 0;
-}
-
 static inline MIRType MIRTypeFromValueType(JSValueType type) {
   // This function does not deal with magic types. Magic constants should be
   // filtered out in MIRTypeFromValue.
   switch (type) {
     case JSVAL_TYPE_DOUBLE:
       return MIRType::Double;
     case JSVAL_TYPE_INT32:
       return MIRType::Int32;
@@ -612,17 +600,17 @@ static inline size_t MIRTypeToSize(MIRTy
     case MIRType::Int32:
       return 4;
     case MIRType::Int64:
       return 8;
     case MIRType::Float32:
       return 4;
     case MIRType::Double:
       return 8;
-    case MIRType::Int8x16:
+    case MIRType::Simd128:
       return 16;
     case MIRType::Pointer:
     case MIRType::RefOrNull:
       return sizeof(uintptr_t);
     default:
       MOZ_CRASH("MIRTypeToSize - unhandled case");
   }
 }
@@ -676,32 +664,18 @@ static inline const char* StringFromMIRT
     case MIRType::RefOrNull:
       return "RefOrNull";
     case MIRType::StackResults:
       return "StackResults";
     case MIRType::Shape:
       return "Shape";
     case MIRType::ObjectGroup:
       return "ObjectGroup";
-    case MIRType::Int32x4:
-      return "Int32x4";
-    case MIRType::Int16x8:
-      return "Int16x8";
-    case MIRType::Int8x16:
-      return "Int8x16";
-    case MIRType::Float32x4:
-      return "Float32x4";
-    case MIRType::Bool32x4:
-      return "Bool32x4";
-    case MIRType::Bool16x8:
-      return "Bool16x8";
-    case MIRType::Bool8x16:
-      return "Bool8x16";
-    case MIRType::Doublex2:
-      return "Doublex2";
+    case MIRType::Simd128:
+      return "Simd128";
   }
   MOZ_CRASH("Unknown MIRType.");
 }
 
 static inline bool IsIntType(MIRType type) {
   return type == MIRType::Int32 || type == MIRType::Int64;
 }
 
@@ -752,18 +726,18 @@ static inline MIRType ScalarTypeToMIRTyp
       return MIRType::Int64;
     case Scalar::Float32:
       return MIRType::Float32;
     case Scalar::Float64:
       return MIRType::Double;
     case Scalar::BigInt64:
     case Scalar::BigUint64:
       MOZ_CRASH("NYI");
-    case Scalar::V128:
-      return MIRType::Int8x16;
+    case Scalar::Simd128:
+      return MIRType::Simd128;
     case Scalar::MaxTypedArrayViewType:
       break;
   }
   MOZ_CRASH("unexpected kind");
 }
 
 #ifdef DEBUG
 
--- a/js/src/jit/LIR.cpp
+++ b/js/src/jit/LIR.cpp
@@ -358,20 +358,18 @@ static const char* DefTypeName(LDefiniti
     case LDefinition::OBJECT:
       return "o";
     case LDefinition::SLOTS:
       return "s";
     case LDefinition::FLOAT32:
       return "f";
     case LDefinition::DOUBLE:
       return "d";
-    case LDefinition::SIMD128INT:
-      return "simd128int";
-    case LDefinition::SIMD128FLOAT:
-      return "simd128float";
+    case LDefinition::SIMD128:
+      return "simd128";
     case LDefinition::STACKRESULTS:
       return "stackresults";
 #  ifdef JS_NUNBOX32
     case LDefinition::TYPE:
       return "t";
     case LDefinition::PAYLOAD:
       return "p";
 #  else
@@ -638,17 +636,17 @@ void LInstruction::initSafepoint(TempAll
 bool LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type) {
 #ifdef DEBUG
   MOZ_ASSERT(from != to);
   for (size_t i = 0; i < moves_.length(); i++) {
     MOZ_ASSERT(to != moves_[i].to());
   }
 
   // Check that SIMD moves are aligned according to ABI requirements.
-  if (LDefinition(type).isSimdType()) {
+  if (LDefinition(type).type() == LDefinition::SIMD128) {
     MOZ_ASSERT(from.isMemory() || from.isFloatReg());
     if (from.isMemory()) {
       if (from.isArgument()) {
         MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
       } else {
         MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
       }
     }
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -478,41 +478,40 @@ class LDefinition {
     STACK,
 
     // One definition per instruction must re-use the first input
     // allocation, which (for now) must be a register.
     MUST_REUSE_INPUT
   };
 
   enum Type {
-    GENERAL,     // Generic, integer or pointer-width data (GPR).
-    INT32,       // int32 data (GPR).
-    OBJECT,      // Pointer that may be collected as garbage (GPR).
-    SLOTS,       // Slots/elements pointer that may be moved by minor GCs (GPR).
-    FLOAT32,     // 32-bit floating-point value (FPU).
-    DOUBLE,      // 64-bit floating-point value (FPU).
-    SIMD128INT,  // 128-bit SIMD integer vector (FPU).
-    SIMD128FLOAT,  // 128-bit SIMD floating point vector (FPU).
+    GENERAL,  // Generic, integer or pointer-width data (GPR).
+    INT32,    // int32 data (GPR).
+    OBJECT,   // Pointer that may be collected as garbage (GPR).
+    SLOTS,    // Slots/elements pointer that may be moved by minor GCs (GPR).
+    FLOAT32,  // 32-bit floating-point value (FPU).
+    DOUBLE,   // 64-bit floating-point value (FPU).
+    SIMD128,  // 128-bit SIMD vector (FPU).
     STACKRESULTS,  // A variable-size stack allocation that may contain objects.
 #ifdef JS_NUNBOX32
     // A type virtual register must be followed by a payload virtual
     // register, as both will be tracked as a single gcthing.
     TYPE,
     PAYLOAD
 #else
     BOX  // Joined box, for punbox systems. (GPR, gcthing)
 #endif
   };
 
   void set(uint32_t index, Type type, Policy policy) {
     static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
     bits_ =
         (index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
 #ifndef ENABLE_WASM_SIMD
-    MOZ_ASSERT(!isSimdType());
+    MOZ_ASSERT(type() != SIMD128);
 #endif
   }
 
  public:
   LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
     set(index, type, policy);
   }
 
@@ -531,28 +530,25 @@ class LDefinition {
   LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
 
   static LDefinition BogusTemp() { return LDefinition(); }
 
   Policy policy() const {
     return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
   }
   Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
-  bool isSimdType() const {
-    return type() == SIMD128INT || type() == SIMD128FLOAT;
-  }
   bool isCompatibleReg(const AnyRegister& r) const {
     if (isFloatReg() && r.isFloat()) {
       if (type() == FLOAT32) {
         return r.fpu().isSingle();
       }
       if (type() == DOUBLE) {
         return r.fpu().isDouble();
       }
-      if (isSimdType()) {
+      if (type() == SIMD128) {
         return r.fpu().isSimd128();
       }
       MOZ_CRASH("Unexpected MDefinition type");
     }
     return !isFloatReg() && !r.isFloat();
   }
   bool isCompatibleDef(const LDefinition& other) const {
 #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
@@ -561,17 +557,17 @@ class LDefinition {
     }
     return !isFloatReg() && !other.isFloatReg();
 #else
     return isFloatReg() == other.isFloatReg();
 #endif
   }
 
   bool isFloatReg() const {
-    return type() == FLOAT32 || type() == DOUBLE || isSimdType();
+    return type() == FLOAT32 || type() == DOUBLE || type() == SIMD128;
   }
   uint32_t virtualRegister() const {
     uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
     // MOZ_ASSERT(index != 0);
     return index;
   }
   LAllocation* output() { return &output_; }
   const LAllocation* output() const { return &output_; }
@@ -627,25 +623,18 @@ class LDefinition {
       case MIRType::Pointer:
         return LDefinition::GENERAL;
 #if defined(JS_PUNBOX64)
       case MIRType::Int64:
         return LDefinition::GENERAL;
 #endif
       case MIRType::StackResults:
         return LDefinition::STACKRESULTS;
-      case MIRType::Int8x16:
-      case MIRType::Int16x8:
-      case MIRType::Int32x4:
-      case MIRType::Bool8x16:
-      case MIRType::Bool16x8:
-      case MIRType::Bool32x4:
-        return LDefinition::SIMD128INT;
-      case MIRType::Float32x4:
-        return LDefinition::SIMD128FLOAT;
+      case MIRType::Simd128:
+        return LDefinition::SIMD128;
       default:
         MOZ_CRASH("unexpected type");
     }
   }
 
   UniqueChars toString() const;
 
 #ifdef JS_JITSPEW
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -11804,17 +11804,17 @@ class MWasmAtomicBinopHeap : public MVar
 };
 
 class MWasmLoadGlobalVar : public MUnaryInstruction, public NoTypePolicy::Data {
   MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant,
                      MDefinition* tlsPtr)
       : MUnaryInstruction(classOpcode, tlsPtr),
         globalDataOffset_(globalDataOffset),
         isConstant_(isConstant) {
-    MOZ_ASSERT(IsNumberType(type) || IsSimdType(type) ||
+    MOZ_ASSERT(IsNumberType(type) || type == MIRType::Simd128 ||
                type == MIRType::Pointer || type == MIRType::RefOrNull);
     setResultType(type);
     setMovable();
   }
 
   unsigned globalDataOffset_;
   bool isConstant_;
 
--- a/js/src/jit/MoveResolver.h
+++ b/js/src/jit/MoveResolver.h
@@ -171,17 +171,17 @@ class MoveOp {
   MoveOperand from_;
   MoveOperand to_;
   bool cycleBegin_;
   bool cycleEnd_;
   int cycleBeginSlot_;
   int cycleEndSlot_;
 
  public:
-  enum Type { GENERAL, INT32, FLOAT32, DOUBLE, SIMD128INT, SIMD128FLOAT };
+  enum Type { GENERAL, INT32, FLOAT32, DOUBLE, SIMD128 };
 
  protected:
   Type type_;
 
   // If cycleBegin_ is true, endCycleType_ is the type of the move at the end
   // of the cycle. For example, given these moves:
   //       INT32 move a -> b
   //     GENERAL move b -> a
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -1742,17 +1742,17 @@ static Range* GetArrayBufferViewRange(Te
     case Scalar::Int16:
       return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
     case Scalar::Int32:
       return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
 
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::Float32:
     case Scalar::Float64:
     case Scalar::MaxTypedArrayViewType:
       break;
   }
   return nullptr;
 }
 
--- a/js/src/jit/StackSlotAllocator.h
+++ b/js/src/jit/StackSlotAllocator.h
@@ -101,18 +101,17 @@ class StackSlotAllocator {
       case LDefinition::BOX:
 #endif
 #ifdef JS_NUNBOX32
       case LDefinition::TYPE:
       case LDefinition::PAYLOAD:
 #endif
       case LDefinition::DOUBLE:
         return 8;
-      case LDefinition::SIMD128INT:
-      case LDefinition::SIMD128FLOAT:
+      case LDefinition::SIMD128:
         return 16;
       case LDefinition::STACKRESULTS:
         MOZ_CRASH("Stack results area must be allocated manually");
     }
     MOZ_CRASH("Unknown slot type");
   }
 
   uint32_t allocateSlot(LDefinition::Type type) {
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -348,17 +348,17 @@ void MacroAssemblerCompat::wasmLoadImpl(
         Ldr(SelectFPReg(outany, out64, 32), srcAddr);
         break;
       case Scalar::Float64:
         Ldr(SelectFPReg(outany, out64, 64), srcAddr);
         break;
       case Scalar::Uint8Clamped:
       case Scalar::BigInt64:
       case Scalar::BigUint64:
-      case Scalar::V128:
+      case Scalar::Simd128:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
   }
 
   asMasm().memoryBarrierAfter(access.sync());
 }
 
@@ -408,17 +408,17 @@ void MacroAssemblerCompat::wasmStoreImpl
         Str(SelectFPReg(valany, val64, 32), dstAddr);
         break;
       case Scalar::Float64:
         Str(SelectFPReg(valany, val64, 64), dstAddr);
         break;
       case Scalar::Uint8Clamped:
       case Scalar::BigInt64:
       case Scalar::BigUint64:
-      case Scalar::V128:
+      case Scalar::Simd128:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
   }
 
   asMasm().memoryBarrierAfter(access.sync());
 }
 
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -418,23 +418,17 @@ void CodeGeneratorShared::encodeAllocati
       } else if (payload->isFloatReg()) {
         alloc = RValueAllocation::Double(ToFloatRegister(payload));
       } else {
         MOZ_CRASH("Unexpected payload type.");
       }
       break;
     }
     case MIRType::Float32:
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Float32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4: {
+    case MIRType::Simd128: {
       LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
       if (payload->isConstant()) {
         MConstant* constant = mir->toConstant();
         uint32_t index;
         masm.propagateOOM(
             graph.addConstantToPool(constant->toJSValue(), &index));
         alloc = RValueAllocation::ConstantPool(index);
         break;
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -286,27 +286,18 @@ void LIRGeneratorShared::defineReturn(LI
     case MIRType::Float32:
       lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32,
                                  LFloatReg(ReturnFloat32Reg)));
       break;
     case MIRType::Double:
       lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE,
                                  LFloatReg(ReturnDoubleReg)));
       break;
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4:
-      lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128INT,
-                                 LFloatReg(ReturnSimd128Reg)));
-      break;
-    case MIRType::Float32x4:
-      lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128FLOAT,
+    case MIRType::Simd128:
+      lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128,
                                  LFloatReg(ReturnSimd128Reg)));
       break;
     default:
       LDefinition::Type type = LDefinition::TypeFrom(mir->type());
       MOZ_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32);
       lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ReturnReg)));
       break;
   }
--- a/js/src/jit/x64/Assembler-x64.cpp
+++ b/js/src/jit/x64/Assembler-x64.cpp
@@ -24,17 +24,17 @@ ABIArgGenerator::ABIArgGenerator()
 #endif
       current_() {
 }
 
 ABIArg ABIArgGenerator::next(MIRType type) {
 #if defined(XP_WIN)
   static_assert(NumIntArgRegs == NumFloatArgRegs);
   if (regIndex_ == NumIntArgRegs) {
-    if (IsSimdType(type)) {
+    if (type == MIRType::Simd128) {
       // On Win64, >64 bit args need to be passed by reference.  However, wasm
       // doesn't allow passing SIMD values to JS, so the only way to reach this
       // is wasm to wasm calls.  Ergo we can break the native ABI here and use
       // the Wasm ABI instead.
       stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
       current_ = ABIArg(stackOffset_);
       stackOffset_ += Simd128DataSize;
     } else {
@@ -52,23 +52,17 @@ ABIArg ABIArgGenerator::next(MIRType typ
       current_ = ABIArg(IntArgRegs[regIndex_++]);
       break;
     case MIRType::Float32:
       current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
       break;
     case MIRType::Double:
       current_ = ABIArg(FloatArgRegs[regIndex_++]);
       break;
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Float32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4:
+    case MIRType::Simd128:
       // On Win64, >64 bit args need to be passed by reference, but wasm
       // doesn't allow passing SIMD values to FFIs. The only way to reach
       // here is asm to asm calls, so we can break the ABI here.
       current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
       break;
     default:
       MOZ_CRASH("Unexpected argument type");
   }
@@ -95,23 +89,17 @@ ABIArg ABIArgGenerator::next(MIRType typ
         break;
       }
       if (type == MIRType::Float32) {
         current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
       } else {
         current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
       }
       break;
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Float32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4:
+    case MIRType::Simd128:
       if (floatRegIndex_ == NumFloatArgRegs) {
         stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
         current_ = ABIArg(stackOffset_);
         stackOffset_ += Simd128DataSize;
         break;
       }
       current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
       break;
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -375,17 +375,17 @@ void CodeGeneratorX64::wasmStore(const w
       case Scalar::Uint16:
         masm.movw(cst, dstAddr);
         break;
       case Scalar::Int32:
       case Scalar::Uint32:
         masm.movl(cst, dstAddr);
         break;
       case Scalar::Int64:
-      case Scalar::V128:
+      case Scalar::Simd128:
       case Scalar::Float32:
       case Scalar::Float64:
       case Scalar::Uint8Clamped:
       case Scalar::BigInt64:
       case Scalar::BigUint64:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -208,17 +208,17 @@ void LIRGenerator::visitWasmStore(MWasmS
       }
       break;
     case Scalar::Float32:
     case Scalar::Float64:
       valueAlloc = useRegisterAtStart(value);
       break;
     case Scalar::BigInt64:
     case Scalar::BigUint64:
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::Uint8Clamped:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected array type");
   }
 
   LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
   auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc);
   add(lir, ins);
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -586,17 +586,17 @@ void MacroAssembler::wasmLoad(const wasm
       movl(srcAddr, out.gpr());
       break;
     case Scalar::Float32:
       loadFloat32(srcAddr, out.fpu());
       break;
     case Scalar::Float64:
       loadDouble(srcAddr, out.fpu());
       break;
-    case Scalar::V128:
+    case Scalar::Simd128:
       vmovups(srcAddr, out.fpu());
       break;
     case Scalar::Int64:
       MOZ_CRASH("int64 loads must use load64");
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::Uint8Clamped:
     case Scalar::MaxTypedArrayViewType:
@@ -631,17 +631,17 @@ void MacroAssembler::wasmLoadI64(const w
     case Scalar::Uint32:
       movl(srcAddr, out.reg);
       break;
     case Scalar::Int64:
       movq(srcAddr, out.reg);
       break;
     case Scalar::Float32:
     case Scalar::Float64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("float loads must use wasmLoad");
     case Scalar::Uint8Clamped:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected scalar type for wasmLoadI64");
   }
 
@@ -670,17 +670,17 @@ void MacroAssembler::wasmStore(const was
       movq(value.gpr(), dstAddr);
       break;
     case Scalar::Float32:
       storeUncanonicalizedFloat32(value.fpu(), dstAddr);
       break;
     case Scalar::Float64:
       storeUncanonicalizedDouble(value.fpu(), dstAddr);
       break;
-    case Scalar::V128:
+    case Scalar::Simd128:
       vmovups(value.fpu(), dstAddr);
       break;
     case Scalar::Uint8Clamped:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected array type");
   }
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -410,17 +410,17 @@ void CodeGenerator::visitAsmJSLoadHeap(L
 }
 
 void CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(
     OutOfLineLoadTypedArrayOutOfBounds* ool) {
   switch (ool->viewType()) {
     case Scalar::Int64:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected array type");
     case Scalar::Float32:
       masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
       break;
     case Scalar::Float64:
       masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
       break;
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -337,17 +337,17 @@ void LIRGenerator::visitAsmJSStoreHeap(M
     case Scalar::Float64:
       // For now, don't allow constant values. The immediate operand affects
       // instruction layout which affects patching.
       lir = new (alloc())
           LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
                           limitAlloc, memoryBaseAlloc);
       break;
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("NYI");
     case Scalar::Uint8Clamped:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected array type");
   }
   add(lir, ins);
--- a/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.cpp
@@ -158,21 +158,18 @@ void MoveEmitterX86::emit(const MoveReso
         emitDoubleMove(from, to);
         break;
       case MoveOp::INT32:
         emitInt32Move(from, to, moves, i);
         break;
       case MoveOp::GENERAL:
         emitGeneralMove(from, to, moves, i);
         break;
-      case MoveOp::SIMD128INT:
-        emitSimd128IntMove(from, to);
-        break;
-      case MoveOp::SIMD128FLOAT:
-        emitSimd128FloatMove(from, to);
+      case MoveOp::SIMD128:
+        emitSimd128Move(from, to);
         break;
       default:
         MOZ_CRASH("Unexpected move type");
     }
   }
 }
 
 MoveEmitterX86::~MoveEmitterX86() { assertDone(); }
@@ -242,34 +239,25 @@ Operand MoveEmitterX86::toPopOperand(con
 void MoveEmitterX86::breakCycle(const MoveOperand& to, MoveOp::Type type) {
   // There is some pattern:
   //   (A -> B)
   //   (B -> A)
   //
   // This case handles (A -> B), which we reach first. We save B, then allow
   // the original move to continue.
   switch (type) {
-    case MoveOp::SIMD128INT:
+    case MoveOp::SIMD128:
       if (to.isMemory()) {
         ScratchSimd128Scope scratch(masm);
         masm.loadAlignedSimd128Int(toAddress(to), scratch);
         masm.storeAlignedSimd128Int(scratch, cycleSlot());
       } else {
         masm.storeAlignedSimd128Int(to.floatReg(), cycleSlot());
       }
       break;
-    case MoveOp::SIMD128FLOAT:
-      if (to.isMemory()) {
-        ScratchSimd128Scope scratch(masm);
-        masm.loadAlignedSimd128Float(toAddress(to), scratch);
-        masm.storeAlignedSimd128Float(scratch, cycleSlot());
-      } else {
-        masm.storeAlignedSimd128Float(to.floatReg(), cycleSlot());
-      }
-      break;
     case MoveOp::FLOAT32:
       if (to.isMemory()) {
         ScratchFloat32Scope scratch(masm);
         masm.loadFloat32(toAddress(to), scratch);
         masm.storeFloat32(scratch, cycleSlot());
       } else {
         masm.storeFloat32(to.floatReg(), cycleSlot());
       }
@@ -305,38 +293,27 @@ void MoveEmitterX86::breakCycle(const Mo
 void MoveEmitterX86::completeCycle(const MoveOperand& to, MoveOp::Type type) {
   // There is some pattern:
   //   (A -> B)
   //   (B -> A)
   //
   // This case handles (B -> A), which we reach last. We emit a move from the
   // saved value of B, to A.
   switch (type) {
-    case MoveOp::SIMD128INT:
+    case MoveOp::SIMD128:
       MOZ_ASSERT(pushedAtCycle_ != -1);
       MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
       if (to.isMemory()) {
         ScratchSimd128Scope scratch(masm);
         masm.loadAlignedSimd128Int(cycleSlot(), scratch);
         masm.storeAlignedSimd128Int(scratch, toAddress(to));
       } else {
         masm.loadAlignedSimd128Int(cycleSlot(), to.floatReg());
       }
       break;
-    case MoveOp::SIMD128FLOAT:
-      MOZ_ASSERT(pushedAtCycle_ != -1);
-      MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
-      if (to.isMemory()) {
-        ScratchSimd128Scope scratch(masm);
-        masm.loadAlignedSimd128Float(cycleSlot(), scratch);
-        masm.storeAlignedSimd128Float(scratch, toAddress(to));
-      } else {
-        masm.loadAlignedSimd128Float(cycleSlot(), to.floatReg());
-      }
-      break;
     case MoveOp::FLOAT32:
       MOZ_ASSERT(pushedAtCycle_ != -1);
       MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float));
       if (to.isMemory()) {
         ScratchFloat32Scope scratch(masm);
         masm.loadFloat32(cycleSlot(), scratch);
         masm.storeFloat32(scratch, toAddress(to));
       } else {
@@ -480,18 +457,18 @@ void MoveEmitterX86::emitDoubleMove(cons
     // Memory to memory move.
     MOZ_ASSERT(from.isMemory());
     ScratchDoubleScope scratch(masm);
     masm.loadDouble(toAddress(from), scratch);
     masm.storeDouble(scratch, toAddress(to));
   }
 }
 
-void MoveEmitterX86::emitSimd128IntMove(const MoveOperand& from,
-                                        const MoveOperand& to) {
+void MoveEmitterX86::emitSimd128Move(const MoveOperand& from,
+                                     const MoveOperand& to) {
   MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
   MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
 
   if (from.isFloatReg()) {
     if (to.isFloatReg()) {
       masm.moveSimd128Int(from.floatReg(), to.floatReg());
     } else {
       masm.storeAlignedSimd128Int(from.floatReg(), toAddress(to));
@@ -502,38 +479,16 @@ void MoveEmitterX86::emitSimd128IntMove(
     // Memory to memory move.
     MOZ_ASSERT(from.isMemory());
     ScratchSimd128Scope scratch(masm);
     masm.loadAlignedSimd128Int(toAddress(from), scratch);
     masm.storeAlignedSimd128Int(scratch, toAddress(to));
   }
 }
 
-void MoveEmitterX86::emitSimd128FloatMove(const MoveOperand& from,
-                                          const MoveOperand& to) {
-  MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
-  MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
-
-  if (from.isFloatReg()) {
-    if (to.isFloatReg()) {
-      masm.moveSimd128Float(from.floatReg(), to.floatReg());
-    } else {
-      masm.storeAlignedSimd128Float(from.floatReg(), toAddress(to));
-    }
-  } else if (to.isFloatReg()) {
-    masm.loadAlignedSimd128Float(toAddress(from), to.floatReg());
-  } else {
-    // Memory to memory move.
-    MOZ_ASSERT(from.isMemory());
-    ScratchSimd128Scope scratch(masm);
-    masm.loadAlignedSimd128Float(toAddress(from), scratch);
-    masm.storeAlignedSimd128Float(scratch, toAddress(to));
-  }
-}
-
 void MoveEmitterX86::assertDone() { MOZ_ASSERT(!inCycle_); }
 
 void MoveEmitterX86::finish() {
   assertDone();
 
   masm.freeStack(masm.framePushed() - pushedAtStart_);
 }
 
--- a/js/src/jit/x86-shared/MoveEmitter-x86-shared.h
+++ b/js/src/jit/x86-shared/MoveEmitter-x86-shared.h
@@ -41,18 +41,17 @@ class MoveEmitterX86 {
                                bool allGeneralRegs, bool allFloatRegs,
                                size_t swapCount);
   void emitInt32Move(const MoveOperand& from, const MoveOperand& to,
                      const MoveResolver& moves, size_t i);
   void emitGeneralMove(const MoveOperand& from, const MoveOperand& to,
                        const MoveResolver& moves, size_t i);
   void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
   void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
-  void emitSimd128FloatMove(const MoveOperand& from, const MoveOperand& to);
-  void emitSimd128IntMove(const MoveOperand& from, const MoveOperand& to);
+  void emitSimd128Move(const MoveOperand& from, const MoveOperand& to);
   void breakCycle(const MoveOperand& to, MoveOp::Type type);
   void completeCycle(const MoveOperand& to, MoveOp::Type type);
 
  public:
   explicit MoveEmitterX86(MacroAssembler& masm);
   ~MoveEmitterX86();
   void emit(const MoveResolver& moves);
   void finish();
--- a/js/src/jit/x86/Assembler-x86.cpp
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -24,23 +24,17 @@ ABIArg ABIArgGenerator::next(MIRType typ
       current_ = ABIArg(stackOffset_);
       stackOffset_ += sizeof(uint32_t);
       break;
     case MIRType::Double:
     case MIRType::Int64:
       current_ = ABIArg(stackOffset_);
       stackOffset_ += sizeof(uint64_t);
       break;
-    case MIRType::Int8x16:
-    case MIRType::Int16x8:
-    case MIRType::Int32x4:
-    case MIRType::Float32x4:
-    case MIRType::Bool8x16:
-    case MIRType::Bool16x8:
-    case MIRType::Bool32x4:
+    case MIRType::Simd128:
       // On Win64, >64 bit args need to be passed by reference.  However, wasm
       // doesn't allow passing SIMD values to JS, so the only way to reach this
       // is wasm to wasm calls.  Ergo we can break the native ABI here and use
       // the Wasm ABI instead.
       stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
       current_ = ABIArg(stackOffset_);
       stackOffset_ += Simd128DataSize;
       break;
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -381,17 +381,17 @@ void LIRGenerator::visitWasmStore(MWasmS
           LWasmStoreI64(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
       add(lir, ins);
       return;
     }
     case Scalar::Uint8Clamped:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::MaxTypedArrayViewType:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("unexpected array type");
   }
 
   auto* lir = new (alloc())
       LWasmStore(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
   add(lir, ins);
 }
 
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -605,17 +605,17 @@ void MacroAssembler::wasmLoad(const wasm
       break;
     case Scalar::Float32:
       vmovss(srcAddr, out.fpu());
       break;
     case Scalar::Float64:
       vmovsd(srcAddr, out.fpu());
       break;
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::Uint8Clamped:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected type");
   }
 
   memoryBarrierAfter(access.sync());
@@ -679,17 +679,17 @@ void MacroAssembler::wasmLoadI64(const w
       append(access, size());
       movl(HighWord(srcAddr), out.high);
 
       break;
     }
     case Scalar::Float32:
     case Scalar::Float64:
       MOZ_CRASH("non-int64 loads should use load()");
-    case Scalar::V128:
+    case Scalar::Simd128:
     case Scalar::Uint8Clamped:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
     case Scalar::MaxTypedArrayViewType:
       MOZ_CRASH("unexpected array type");
   }
 
   memoryBarrierAfter(access.sync());
@@ -723,17 +723,17 @@ void MacroAssembler::wasmStore(const was
     case Scalar::Float64:
       vmovsd(value.fpu(), dstAddr);
       break;
     case Scalar::Int64:
       MOZ_CRASH("Should be handled in storeI64.");
     case Scalar::MaxTypedArrayViewType:
     case Scalar::BigInt64:
     case Scalar::BigUint64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("unexpected type");
   }
 
   memoryBarrierAfter(access.sync());
 }
 
 void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
                                   Register64 value, Operand dstAddr) {
--- a/js/src/jsfriendapi.h
+++ b/js/src/jsfriendapi.h
@@ -1323,17 +1323,17 @@ enum Type {
   BigUint64,
 
   /**
    * Types that don't have their own TypedArray equivalent, for now.
    */
   MaxTypedArrayViewType,
 
   Int64,
-  V128,
+  Simd128,
 };
 
 static inline size_t byteSize(Type atype) {
   switch (atype) {
     case Int8:
     case Uint8:
     case Uint8Clamped:
       return 1;
@@ -1344,17 +1344,17 @@ static inline size_t byteSize(Type atype
     case Uint32:
     case Float32:
       return 4;
     case Int64:
     case Float64:
     case BigInt64:
     case BigUint64:
       return 8;
-    case V128:
+    case Simd128:
       return 16;
     case MaxTypedArrayViewType:
       break;
   }
   MOZ_CRASH("invalid scalar type");
 }
 
 static inline bool isSignedIntType(Type atype) {
@@ -1367,17 +1367,17 @@ static inline bool isSignedIntType(Type 
       return true;
     case Uint8:
     case Uint8Clamped:
     case Uint16:
     case Uint32:
     case Float32:
     case Float64:
     case BigUint64:
-    case V128:
+    case Simd128:
       return false;
     case MaxTypedArrayViewType:
       break;
   }
   MOZ_CRASH("invalid scalar type");
 }
 
 static inline bool isBigIntType(Type atype) {
@@ -1390,17 +1390,17 @@ static inline bool isBigIntType(Type aty
     case Int32:
     case Int64:
     case Uint8:
     case Uint8Clamped:
     case Uint16:
     case Uint32:
     case Float32:
     case Float64:
-    case V128:
+    case Simd128:
       return false;
     case MaxTypedArrayViewType:
       break;
   }
   MOZ_CRASH("invalid scalar type");
 }
 
 } /* namespace Scalar */
--- a/js/src/vm/CommonPropertyNames.h
+++ b/js/src/vm/CommonPropertyNames.h
@@ -40,20 +40,16 @@
   MACRO(AsyncGeneratorReturn, AsyncGeneratorReturn, "AsyncGeneratorReturn")    \
   MACRO(AsyncGeneratorThrow, AsyncGeneratorThrow, "AsyncGeneratorThrow")       \
   MACRO(AsyncWrapped, AsyncWrapped, "AsyncWrapped")                            \
   MACRO(async, async, "async")                                                 \
   MACRO(autoAllocateChunkSize, autoAllocateChunkSize, "autoAllocateChunkSize") \
   MACRO(await, await, "await")                                                 \
   MACRO(bigint64, bigint64, "bigint64")                                        \
   MACRO(biguint64, biguint64, "biguint64")                                     \
-  MACRO(Bool8x16, Bool8x16, "Bool8x16")                                        \
-  MACRO(Bool16x8, Bool16x8, "Bool16x8")                                        \
-  MACRO(Bool32x4, Bool32x4, "Bool32x4")                                        \
-  MACRO(Bool64x2, Bool64x2, "Bool64x2")                                        \
   MACRO(boundWithSpace, boundWithSpace, "bound ")                              \
   MACRO(break, break_, "break")                                                \
   MACRO(breakdown, breakdown, "breakdown")                                     \
   MACRO(buffer, buffer, "buffer")                                              \
   MACRO(builder, builder, "builder")                                           \
   MACRO(by, by, "by")                                                          \
   MACRO(byob, byob, "byob")                                                    \
   MACRO(byteAlignment, byteAlignment, "byteAlignment")                         \
@@ -160,19 +156,17 @@
   MACRO(find, find, "find")                                                    \
   MACRO(findIndex, findIndex, "findIndex")                                     \
   MACRO(firstDayOfWeek, firstDayOfWeek, "firstDayOfWeek")                      \
   MACRO(fix, fix, "fix")                                                       \
   MACRO(flags, flags, "flags")                                                 \
   MACRO(flat, flat, "flat")                                                    \
   MACRO(flatMap, flatMap, "flatMap")                                           \
   MACRO(float32, float32, "float32")                                           \
-  MACRO(Float32x4, Float32x4, "Float32x4")                                     \
   MACRO(float64, float64, "float64")                                           \
-  MACRO(Float64x2, Float64x2, "Float64x2")                                     \
     MACRO(for, for_, "for")                                                    \
   MACRO(forceInterpreter, forceInterpreter, "forceInterpreter")                \
   MACRO(forEach, forEach, "forEach")                                           \
   MACRO(format, format, "format")                                              \
   MACRO(fraction, fraction, "fraction")                                        \
   MACRO(fractionalSecond, fractionalSecond, "fractionalSecond")                \
   MACRO(frame, frame, "frame")                                                 \
   MACRO(from, from, "from")                                                    \
@@ -235,19 +229,16 @@
         "InitializeRelativeTimeFormat")                                        \
   MACRO(innermost, innermost, "innermost")                                     \
   MACRO(inNursery, inNursery, "inNursery")                                     \
   MACRO(input, input, "input")                                                 \
   MACRO(instanceof, instanceof, "instanceof")                                  \
   MACRO(int8, int8, "int8")                                                    \
   MACRO(int16, int16, "int16")                                                 \
   MACRO(int32, int32, "int32")                                                 \
-  MACRO(Int8x16, Int8x16, "Int8x16")                                           \
-  MACRO(Int16x8, Int16x8, "Int16x8")                                           \
-  MACRO(Int32x4, Int32x4, "Int32x4")                                           \
   MACRO(integer, integer, "integer")                                           \
   MACRO(interface, interface, "interface")                                     \
   MACRO(InterpretGeneratorResume, InterpretGeneratorResume,                    \
         "InterpretGeneratorResume")                                            \
   MACRO(InvalidDate, InvalidDate, "Invalid Date")                              \
   MACRO(isBreakpoint, isBreakpoint, "isBreakpoint")                            \
   MACRO(isEntryPoint, isEntryPoint, "isEntryPoint")                            \
   MACRO(isExtensible, isExtensible, "isExtensible")                            \
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -86,17 +86,17 @@ bool TypedArrayObject::convertForSideEff
     case Scalar::Float32:
     case Scalar::Float64:
     case Scalar::Uint8Clamped: {
       double ignore;
       return ToNumber(cx, v, &ignore);
     }
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       MOZ_CRASH("Unsupported TypedArray type");
   }
   MOZ_ASSERT_UNREACHABLE("Invalid scalar type");
   return false;
 }
 
 /* static */
 bool TypedArrayObject::is(HandleValue v) {
@@ -2069,17 +2069,17 @@ bool TypedArrayObject::getElement<CanGC>
   switch (type()) {
 #define GET_ELEMENT(T, N) \
   case Scalar::N:         \
     return N##Array::getElement(cx, this, index, val);
     JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENT)
 #undef GET_ELEMENT
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       break;
   }
 
   MOZ_CRASH("Unknown TypedArray type");
 }
 
 template <>
 bool TypedArrayObject::getElement<NoGC>(
@@ -2094,17 +2094,17 @@ bool TypedArrayObject::getElementPure(ui
   switch (type()) {
 #define GET_ELEMENT_PURE(T, N) \
   case Scalar::N:              \
     return N##Array::getElementPure(this, index, vp);
     JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENT_PURE)
 #undef GET_ELEMENT
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       break;
   }
 
   MOZ_CRASH("Unknown TypedArray type");
 }
 
 /* static */
 bool TypedArrayObject::getElements(JSContext* cx,
@@ -2122,17 +2122,17 @@ bool TypedArrayObject::getElements(JSCon
         return false;                                                          \
       }                                                                        \
     }                                                                          \
     return true;
     JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENTS)
 #undef GET_ELEMENTS
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       break;
   }
 
   MOZ_CRASH("Unknown TypedArray type");
 }
 
 /***
  *** JS impl
@@ -2495,17 +2495,17 @@ bool js::SetTypedArrayElement(JSContext*
   switch (tobj->type()) {
 #define SET_TYPED_ARRAY_ELEMENT(T, N) \
   case Scalar::N:                     \
     return TypedArrayObjectTemplate<T>::setElement(cx, obj, index, v, result);
     JS_FOR_EACH_TYPED_ARRAY(SET_TYPED_ARRAY_ELEMENT)
 #undef SET_TYPED_ARRAY_ELEMENT
     case Scalar::MaxTypedArrayViewType:
     case Scalar::Int64:
-    case Scalar::V128:
+    case Scalar::Simd128:
       break;
   }
 
   MOZ_CRASH("Unsupported TypedArray type");
 }
 
 /* ES6 draft rev 34 (2015 Feb 20) 9.4.5.3 [[DefineOwnProperty]] step 3.c. */
 bool js::DefineTypedArrayElement(JSContext* cx, HandleObject obj,
@@ -2555,17 +2555,17 @@ bool js::DefineTypedArrayElement(JSConte
 #define DEFINE_TYPED_ARRAY_ELEMENT(T, N)                              \
   case Scalar::N:                                                     \
     return TypedArrayObjectTemplate<T>::defineElement(cx, obj, index, \
                                                       desc.value(), result);
       JS_FOR_EACH_TYPED_ARRAY(DEFINE_TYPED_ARRAY_ELEMENT)
 #undef DEFINE_TYPED_ARRAY_ELEMENT
       case Scalar::MaxTypedArrayViewType:
       case Scalar::Int64:
-      case Scalar::V128:
+      case Scalar::Simd128:
         break;
     }
 
     MOZ_CRASH("Unsupported TypedArray type");
   }
 
   // Step xii.
   return result.succeed();
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -277,17 +277,17 @@ static constexpr Register RabaldrScratch
 #    error "Bad configuration"
 #  endif
 #endif
 
 template <MIRType t>
 struct RegTypeOf {
 #ifdef ENABLE_WASM_SIMD
   static_assert(t == MIRType::Float32 || t == MIRType::Double ||
-                    t == MIRType::Int8x16,
+                    t == MIRType::Simd128,
                 "Float mask type");
 #else
   static_assert(t == MIRType::Float32 || t == MIRType::Double,
                 "Float mask type");
 #endif
 };
 
 template <>
@@ -295,17 +295,17 @@ struct RegTypeOf<MIRType::Float32> {
   static constexpr RegTypeName value = RegTypeName::Float32;
 };
 template <>
 struct RegTypeOf<MIRType::Double> {
   static constexpr RegTypeName value = RegTypeName::Float64;
 };
 #ifdef ENABLE_WASM_SIMD
 template <>
-struct RegTypeOf<MIRType::Int8x16> {
+struct RegTypeOf<MIRType::Simd128> {
   static constexpr RegTypeName value = RegTypeName::Vector128;
 };
 #endif
 
 // The strongly typed register wrappers are especially useful to distinguish
 // float registers from double registers, but they also clearly distinguish
 // 32-bit registers from 64-bit register pairs on 32-bit systems.
 
@@ -875,20 +875,20 @@ class BaseRegAlloc {
     if (!isAvailableF64(specific)) {
       bc->sync();
     }
     allocFPU(specific);
   }
 
 #ifdef ENABLE_WASM_SIMD
   MOZ_MUST_USE RegV128 needV128() {
-    if (!hasFPU<MIRType::Int8x16>()) {
+    if (!hasFPU<MIRType::Simd128>()) {
       bc->sync();
     }
-    return RegV128(allocFPU<MIRType::Int8x16>());
+    return RegV128(allocFPU<MIRType::Simd128>());
   }
 
   void needV128(RegV128 specific) {
     if (!isAvailableV128(specific)) {
       bc->sync();
     }
     allocFPU(specific);
   }
@@ -1181,17 +1181,17 @@ void BaseLocalIter::settle() {
         concreteType = MIRType::Pointer;
         [[fallthrough]];
       case MIRType::Int32:
       case MIRType::Int64:
       case MIRType::Double:
       case MIRType::Float32:
       case MIRType::RefOrNull:
 #ifdef ENABLE_WASM_SIMD
-      case MIRType::Int8x16:
+      case MIRType::Simd128:
 #endif
         if (argsIter_->argInRegister()) {
           frameOffset_ = pushLocal(MIRTypeToSize(concreteType));
         } else {
           frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
         }
         break;
       default:
@@ -3635,17 +3635,17 @@ class BaseCompiler final : public BaseCo
     masm.loadConstantSimd128(SimdConstant::CreateX16((int8_t*)f.bytes), dest);
   }
 
   void loadMemV128(const Stk& src, RegV128 dest) {
     fr.loadStackV128(src.offs(), dest);
   }
 
   void loadLocalV128(const Stk& src, RegV128 dest) {
-    fr.loadLocalV128(localFromSlot(src.slot(), MIRType::Int8x16), dest);
+    fr.loadLocalV128(localFromSlot(src.slot(), MIRType::Simd128), dest);
   }
 
   void loadRegisterV128(const Stk& src, RegV128 dest) {
     moveV128(src.v128reg(), dest);
   }
 #endif
 
   void loadI32(const Stk& src, RegI32 dest) {
@@ -5214,17 +5214,17 @@ class BaseCompiler final : public BaseCo
         }
         case MIRType::Double:
           fr.storeLocalF64(RegF64(i->fpu()), l);
           break;
         case MIRType::Float32:
           fr.storeLocalF32(RegF32(i->fpu()), l);
           break;
 #ifdef ENABLE_WASM_SIMD
-        case MIRType::Int8x16:
+        case MIRType::Simd128:
           fr.storeLocalV128(RegV128(i->fpu()), l);
           break;
 #endif
         default:
           MOZ_CRASH("Function argument type");
       }
     }
 
@@ -5578,17 +5578,17 @@ class BaseCompiler final : public BaseCo
 #endif
         } else {
           loadI64(arg, RegI64(argLoc.gpr64()));
         }
         break;
       }
       case ValType::V128: {
 #ifdef ENABLE_WASM_SIMD
-        ABIArg argLoc = call->abi.next(MIRType::Int8x16);
+        ABIArg argLoc = call->abi.next(MIRType::Simd128);
         switch (argLoc.kind()) {
           case ABIArg::Stack: {
             ScratchV128 scratch(*this);
             loadV128(arg, scratch);
             masm.storeUnalignedSimd128(
                 scratch,
                 Address(masm.getStackPointer(), argLoc.offsetFromArgBase()));
             break;
@@ -9915,17 +9915,17 @@ void BaseCompiler::pushReturnValueOfCall
       break;
     }
     case MIRType::Double: {
       RegF64 rv = captureReturnedF64(call);
       pushF64(rv);
       break;
     }
 #ifdef ENABLE_WASM_SIMD
-    case MIRType::Int8x16: {
+    case MIRType::Simd128: {
       RegV128 rv = captureReturnedV128(call);
       pushV128(rv);
       break;
     }
 #endif
     case MIRType::RefOrNull: {
       RegPtr rv = captureReturnedRef();
       pushRef(rv);
@@ -10440,17 +10440,17 @@ bool BaseCompiler::emitSetOrTeeLocal(uin
         pushF32(rv);
       }
       break;
     }
     case ValType::V128: {
 #ifdef ENABLE_WASM_SIMD
       RegV128 rv = popV128();
       syncLocal(slot);
-      fr.storeLocalV128(rv, localFromSlot(slot, MIRType::Int8x16));
+      fr.storeLocalV128(rv, localFromSlot(slot, MIRType::Simd128));
       if (isSetLocal) {
         freeV128(rv);
       } else {
         pushV128(rv);
       }
       break;
 #else
       MOZ_CRASH("No SIMD support");
@@ -14546,17 +14546,17 @@ bool BaseCompiler::emitBody() {
             V128 v128;
             CHECK(iter_.readV128Const(&v128));
             if (!deadCode_) {
               pushV128(v128);
             }
             NEXT();
           }
           case uint32_t(SimdOp::V128Load):
-            CHECK_NEXT(emitLoad(ValType::V128, Scalar::V128));
+            CHECK_NEXT(emitLoad(ValType::V128, Scalar::Simd128));
           case uint32_t(SimdOp::V8x16LoadSplat):
             CHECK_NEXT(emitLoadSplat(Scalar::Uint8));
           case uint32_t(SimdOp::V16x8LoadSplat):
             CHECK_NEXT(emitLoadSplat(Scalar::Uint16));
           case uint32_t(SimdOp::V32x4LoadSplat):
             CHECK_NEXT(emitLoadSplat(Scalar::Uint32));
           case uint32_t(SimdOp::V64x2LoadSplat):
             CHECK_NEXT(emitLoadSplat(Scalar::Int64));
@@ -14568,17 +14568,17 @@ bool BaseCompiler::emitBody() {
             CHECK_NEXT(emitLoadExtend(Scalar::Int16));
           case uint32_t(SimdOp::I32x4LoadU16x4):
             CHECK_NEXT(emitLoadExtend(Scalar::Uint16));
           case uint32_t(SimdOp::I64x2LoadS32x2):
             CHECK_NEXT(emitLoadExtend(Scalar::Int32));
           case uint32_t(SimdOp::I64x2LoadU32x2):
             CHECK_NEXT(emitLoadExtend(Scalar::Uint32));
           case uint32_t(SimdOp::V128Store):
-            CHECK_NEXT(emitStore(ValType::V128, Scalar::V128));
+            CHECK_NEXT(emitStore(ValType::V128, Scalar::Simd128));
           default:
             break;
         }  // switch (op.b1)
         return iter_.unrecognizedOpcode(&op);
       }
 #endif  // ENABLE_WASM_SIMD
 
       // "Miscellaneous" operations
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -361,17 +361,17 @@ static void SetupABIArguments(MacroAssem
                       "ExportArg must be big enough to store SIMD values");
         switch (type) {
           case MIRType::Double:
             masm.loadDouble(src, iter->fpu());
             break;
           case MIRType::Float32:
             masm.loadFloat32(src, iter->fpu());
             break;
-          case MIRType::Int8x16:
+          case MIRType::Simd128:
 #ifdef ENABLE_WASM_SIMD
             // We will reach this point when we generate interpreter entry stubs
             // for exports that receive v128 values, but the code will never be
             // executed because such exports cannot be called from JS.
             masm.breakpoint();
             break;
 #else
             MOZ_CRASH("V128 not supported in SetupABIArguments");
@@ -408,17 +408,17 @@ static void SetupABIArguments(MacroAssem
           }
           case MIRType::Float32: {
             ScratchFloat32Scope fpscratch(masm);
             masm.loadFloat32(src, fpscratch);
             masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
                                                  iter->offsetFromArgBase()));
             break;
           }
-          case MIRType::Int8x16: {
+          case MIRType::Simd128: {
 #ifdef ENABLE_WASM_SIMD
             // We will reach this point when we generate interpreter entry stubs
             // for exports that receive v128 values, but the code will never be
             // executed because such exports cannot be called from JS.
             masm.breakpoint();
             break;
 #else
             MOZ_CRASH("V128 not supported in SetupABIArguments");
@@ -1637,17 +1637,17 @@ static void StackCopy(MacroAssembler& ma
     GenPrintF32(DebugChannel::Import, masm, fpscratch);
     masm.storeFloat32(fpscratch, dst);
   } else if (type == MIRType::Double) {
     ScratchDoubleScope fpscratch(masm);
     masm.loadDouble(src, fpscratch);
     GenPrintF64(DebugChannel::Import, masm, fpscratch);
     masm.storeDouble(fpscratch, dst);
 #ifdef ENABLE_WASM_SIMD
-  } else if (type == MIRType::Int8x16) {
+  } else if (type == MIRType::Simd128) {
     ScratchSimd128Scope fpscratch(masm);
     masm.loadUnalignedSimd128(src, fpscratch);
     GenPrintV128(DebugChannel::Import, masm, fpscratch);
     masm.storeUnalignedSimd128(fpscratch, dst);
 #endif
   } else {
     MOZ_CRASH("StackCopy: unexpected type");
   }
@@ -1757,17 +1757,17 @@ static void FillArgumentArrayForExit(
             masm.canonicalizeDouble(fpscratch);
             GenPrintF64(DebugChannel::Import, masm, fpscratch);
             masm.boxDouble(fpscratch, dst);
           } else {
             // Preserve the NaN pattern in the input.
             GenPrintF32(DebugChannel::Import, masm, srcReg);
             masm.storeFloat32(srcReg, dst);
           }
-        } else if (type == MIRType::Int8x16) {
+        } else if (type == MIRType::Simd128) {
           // The value should never escape; the call will be stopped later as
           // the import is being called.  But we should generate something sane
           // here for the boxed case since a debugger or the stack walker may
           // observe something.
           ScratchDoubleScope dscratch(masm);
           masm.loadConstantDouble(0, dscratch);
           GenPrintF64(DebugChannel::Import, masm, dscratch);
           if (toValue) {
@@ -1812,17 +1812,17 @@ static void FillArgumentArrayForExit(
               masm.loadFloat32(src, fscratch);
               masm.convertFloat32ToDouble(fscratch, dscratch);
             } else {
               masm.loadDouble(src, dscratch);
             }
             masm.canonicalizeDouble(dscratch);
             GenPrintF64(DebugChannel::Import, masm, dscratch);
             masm.boxDouble(dscratch, dst);
-          } else if (type == MIRType::Int8x16) {
+          } else if (type == MIRType::Simd128) {
             // The value should never escape; the call will be stopped later as
             // the import is being called.  But we should generate something
             // sane here for the boxed case since a debugger or the stack walker
             // may observe something.
             ScratchDoubleScope dscratch(masm);
             masm.loadConstantDouble(0, dscratch);
             GenPrintF64(DebugChannel::Import, masm, dscratch);
             masm.boxDouble(dscratch, dst);
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -707,17 +707,17 @@ bool DebugFrame::getLocal(uint32_t local
       break;
     case jit::MIRType::Double:
       vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
       break;
     case jit::MIRType::RefOrNull:
       vp.set(ObjectOrNullValue(*(JSObject**)dataPtr));
       break;
 #ifdef ENABLE_WASM_SIMD
-    case jit::MIRType::Int8x16:
+    case jit::MIRType::Simd128:
       vp.set(NumberValue(0));
       break;
 #endif
     default:
       MOZ_CRASH("local type");
   }
   return true;
 }
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -493,17 +493,17 @@ class ValType {
         tc_ = PackTypeCode(TypeCode::I64);
         break;
       case jit::MIRType::Float32:
         tc_ = PackTypeCode(TypeCode::F32);
         break;
       case jit::MIRType::Double:
         tc_ = PackTypeCode(TypeCode::F64);
         break;
-      case jit::MIRType::Int8x16:
+      case jit::MIRType::Simd128:
         tc_ = PackTypeCode(TypeCode::V128);
         break;
       default:
         MOZ_CRASH("ValType(MIRType): unexpected type");
     }
   }
 
   static ValType fromNonRefTypeCode(TypeCode tc) {
@@ -663,17 +663,17 @@ static inline jit::MIRType ToMIRType(Val
       return jit::MIRType::Int32;
     case ValType::I64:
       return jit::MIRType::Int64;
     case ValType::F32:
       return jit::MIRType::Float32;
     case ValType::F64:
       return jit::MIRType::Double;
     case ValType::V128:
-      return jit::MIRType::Int8x16;
+      return jit::MIRType::Simd128;
     case ValType::Ref:
       return jit::MIRType::RefOrNull;
   }
   MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
 }
 
 static inline bool IsNumberType(ValType vt) { return !vt.isReference(); }
 
--- a/js/src/wasm/WasmValidate.cpp
+++ b/js/src/wasm/WasmValidate.cpp
@@ -1671,17 +1671,17 @@ static bool DecodeStructType(Decoder& d,
         break;
       case ValType::F32:
         offset = layout.addScalar(Scalar::Float32);
         break;
       case ValType::F64:
         offset = layout.addScalar(Scalar::Float64);
         break;
       case ValType::V128:
-        offset = layout.addScalar(Scalar::V128);
+        offset = layout.addScalar(Scalar::Simd128);
         break;
       case ValType::Ref:
         switch (fields[i].type.refTypeKind()) {
           case RefType::TypeIndex:
             offset = layout.addReference(ReferenceType::TYPE_OBJECT);
             break;
           case RefType::Func:
           case RefType::Any: