Bug 1135042: Inline SIMD stores in Ion; r=bhackett
authorBenjamin Bouvier <benj@benj.me>
Fri, 27 Feb 2015 18:30:22 +0100
changeset 231860 3f54fe544025d1a49bbf51ca1f40761a2cbe3e5f
parent 231859 ddee53b10d77a87b6b553d1fe0a3de71b9448eb2
child 231861 caea9f8b038e81c7a896654f17a9f7489ba179d3
push id14263
push userryanvm@gmail.com
push dateWed, 04 Mar 2015 21:57:17 +0000
treeherderb2g-inbound@a83cba22add9 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbhackett
bugs1135042
milestone39.0a1
Bug 1135042: Inline SIMD stores in Ion; r=bhackett
js/src/jit-test/tests/SIMD/store.js
js/src/jit/CodeGenerator.cpp
js/src/jit/IonBuilder.h
js/src/jit/IonTypes.h
js/src/jit/Lowering.cpp
js/src/jit/MCallOptimize.cpp
js/src/jit/MIR.h
js/src/jit/MacroAssembler.cpp
js/src/jit/RangeAnalysis.cpp
js/src/jit/TypePolicy.cpp
js/src/jit/TypePolicy.h
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/mips/MacroAssembler-mips.h
js/src/jit/shared/MacroAssembler-x86-shared.h
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/SIMD/store.js
@@ -0,0 +1,62 @@
+load(libdir + 'simd.js');
+
+setJitCompilerOption("ion.warmup.trigger", 40);
+
+function f() {
+    var f32 = new Float32Array(16);
+    for (var i = 0; i < 16; i++)
+        f32[i] = i + 1;
+
+    var f64 = new Float64Array(f32.buffer);
+    var i32 = new Int32Array(f32.buffer);
+    var u32 = new Uint32Array(f32.buffer);
+    var i16 = new Int16Array(f32.buffer);
+    var u16 = new Uint16Array(f32.buffer);
+    var i8  = new Int8Array(f32.buffer);
+    var u8  = new Uint8Array(f32.buffer);
+
+    var f4 = SIMD.float32x4(42, 43, 44, 45);
+
+    function check() {
+        assertEq(f32[0], 42);
+        assertEq(f32[1], 43);
+        assertEq(f32[2], 44);
+        assertEq(f32[3], 45);
+
+        f32[0] = 1;
+        f32[1] = 2;
+        f32[2] = 3;
+        f32[3] = 4;
+    }
+
+    for (var i = 0; i < 150; i++) {
+        SIMD.float32x4.store(f64, 0, f4);
+        check();
+        SIMD.float32x4.store(f32, 0, f4);
+        check();
+        SIMD.float32x4.store(i32, 0, f4);
+        check();
+        SIMD.float32x4.store(u32, 0, f4);
+        check();
+        SIMD.float32x4.store(i16, 0, f4);
+        check();
+        SIMD.float32x4.store(u16, 0, f4);
+        check();
+        SIMD.float32x4.store(i8, 0, f4);
+        check();
+        SIMD.float32x4.store(u8, 0, f4);
+        check();
+
+        var caught = false;
+        try {
+            SIMD.float32x4.store(i8, (i < 149) ? 0 : (16 << 2) - (4 << 2) + 1, f4);
+            check();
+        } catch (e) {
+            caught = true;
+        }
+        assertEq(i < 149 || caught, true);
+    }
+}
+
+f();
+
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -8711,44 +8711,47 @@ CodeGenerator::visitLoadTypedArrayElemen
     if (fail.used())
         bailoutFrom(&fail, lir->snapshot());
 
     masm.bind(&done);
 }
 
 template <typename T>
 static inline void
-StoreToTypedArray(MacroAssembler &masm, Scalar::Type arrayType, const LAllocation *value, const T &dest)
-{
-    if (arrayType == Scalar::Float32 || arrayType == Scalar::Float64) {
-        masm.storeToTypedFloatArray(arrayType, ToFloatRegister(value), dest);
+StoreToTypedArray(MacroAssembler &masm, Scalar::Type writeType, const LAllocation *value, const T &dest)
+{
+    if (Scalar::isSimdType(writeType) ||
+        writeType == Scalar::Float32 ||
+        writeType == Scalar::Float64)
+    {
+        masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
     } else {
         if (value->isConstant())
-            masm.storeToTypedIntArray(arrayType, Imm32(ToInt32(value)), dest);
+            masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
         else
-            masm.storeToTypedIntArray(arrayType, ToRegister(value), dest);
+            masm.storeToTypedIntArray(writeType, ToRegister(value), dest);
     }
 }
 
 void
 CodeGenerator::visitStoreTypedArrayElement(LStoreTypedArrayElement *lir)
 {
     Register elements = ToRegister(lir->elements());
     const LAllocation *value = lir->value();
 
-    Scalar::Type arrayType = lir->mir()->arrayType();
-    int width = Scalar::byteSize(arrayType);
+    Scalar::Type writeType = lir->mir()->writeType();
+    int width = Scalar::byteSize(lir->mir()->arrayType());
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width + lir->mir()->offsetAdjustment());
-        StoreToTypedArray(masm, arrayType, value, dest);
+        StoreToTypedArray(masm, writeType, value, dest);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
                        lir->mir()->offsetAdjustment());
-        StoreToTypedArray(masm, arrayType, value, dest);
+        StoreToTypedArray(masm, writeType, value, dest);
     }
 }
 
 void
 CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole *lir)
 {
     Register elements = ToRegister(lir->elements());
     const LAllocation *value = lir->value();
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -833,17 +833,21 @@ class IonBuilder
                                   SimdTypeDescr::Type type);
     InliningStatus inlineSimdSplat(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type);
     InliningStatus inlineSimdSwizzle(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type);
     InliningStatus inlineSimdCheck(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type);
     InliningStatus inlineSimdConvert(CallInfo &callInfo, JSNative native, bool isCast,
                                      SimdTypeDescr::Type from, SimdTypeDescr::Type to);
     InliningStatus inlineSimdSelect(CallInfo &callInfo, JSNative native, bool isElementWise,
                                     SimdTypeDescr::Type type);
+
+    bool prepareForSimdLoadStore(CallInfo &callInfo, Scalar::Type simdType, MInstruction **elements,
+                                 MDefinition **index, Scalar::Type *arrayType);
     InliningStatus inlineSimdLoad(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type);
+    InliningStatus inlineSimdStore(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type);
 
     // Utility intrinsics.
     InliningStatus inlineIsCallable(CallInfo &callInfo);
     InliningStatus inlineIsObject(CallInfo &callInfo);
     InliningStatus inlineToObject(CallInfo &callInfo);
     InliningStatus inlineToInteger(CallInfo &callInfo);
     InliningStatus inlineToString(CallInfo &callInfo);
     InliningStatus inlineDump(CallInfo &callInfo);
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -548,16 +548,42 @@ IsMagicType(MIRType type)
 // SIMD kind. It is the Y part of the name "Foo x Y".
 static inline unsigned
 SimdTypeToLength(MIRType type)
 {
     MOZ_ASSERT(IsSimdType(type));
     return 1 << ((type >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
 }
 
+static inline MIRType
+ScalarTypeToMIRType(Scalar::Type type)
+{
+    switch (type) {
+      case Scalar::Int8:
+      case Scalar::Uint8:
+      case Scalar::Int16:
+      case Scalar::Uint16:
+      case Scalar::Int32:
+      case Scalar::Uint32:
+      case Scalar::Uint8Clamped:
+        return MIRType_Int32;
+      case Scalar::Float32:
+        return MIRType_Float32;
+      case Scalar::Float64:
+        return MIRType_Double;
+      case Scalar::Float32x4:
+        return MIRType_Float32x4;
+      case Scalar::Int32x4:
+        return MIRType_Int32x4;
+      case Scalar::MaxTypedArrayViewType:
+        break;
+    }
+    MOZ_CRASH("unexpected SIMD kind");
+}
+
 static inline unsigned
 ScalarTypeToLength(Scalar::Type type)
 {
     switch (type) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -2976,29 +2976,32 @@ LIRGenerator::visitLoadTypedArrayElement
 }
 
 void
 LIRGenerator::visitStoreTypedArrayElement(MStoreTypedArrayElement *ins)
 {
     MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
 
-    if (ins->isFloatArray()) {
+    if (ins->isSimdWrite()) {
+        MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32x4, ins->value()->type() == MIRType_Float32x4);
+        MOZ_ASSERT_IF(ins->writeType() == Scalar::Int32x4, ins->value()->type() == MIRType_Int32x4);
+    } else if (ins->isFloatArray()) {
         MOZ_ASSERT_IF(ins->arrayType() == Scalar::Float32, ins->value()->type() == MIRType_Float32);
         MOZ_ASSERT_IF(ins->arrayType() == Scalar::Float64, ins->value()->type() == MIRType_Double);
     } else {
         MOZ_ASSERT(ins->value()->type() == MIRType_Int32);
     }
 
     LUse elements = useRegister(ins->elements());
     LAllocation index = useRegisterOrConstant(ins->index());
     LAllocation value;
 
     // For byte arrays, the value has to be in a byte register on x86.
-    if (ins->isByteArray())
+    if (ins->isByteArray() && !ins->isSimdWrite())
         value = useByteOpRegisterOrNonDoubleConstant(ins->value());
     else
         value = useRegisterOrNonDoubleConstant(ins->value());
 
     // Optimization opportunity for atomics: on some platforms there
     // is a store instruction that incorporates the necessary
     // barriers, and we could use that instead of separate barrier and
     // store instructions.  See bug #1077027.
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -354,16 +354,22 @@ IonBuilder::inlineNativeCall(CallInfo &c
     if (native == js::simd_int32x4_swizzle)
         return inlineSimdSwizzle(callInfo, native, SimdTypeDescr::TYPE_INT32);
 
     if (native == js::simd_int32x4_load)
         return inlineSimdLoad(callInfo, native, SimdTypeDescr::TYPE_INT32);
     if (native == js::simd_float32x4_load)
         return inlineSimdLoad(callInfo, native, SimdTypeDescr::TYPE_FLOAT32);
 
+
+    if (native == js::simd_int32x4_store)
+        return inlineSimdStore(callInfo, native, SimdTypeDescr::TYPE_INT32);
+    if (native == js::simd_float32x4_store)
+        return inlineSimdStore(callInfo, native, SimdTypeDescr::TYPE_FLOAT32);
+
     return InliningStatus_NotInlined;
 }
 
 IonBuilder::InliningStatus
 IonBuilder::inlineNativeGetter(CallInfo &callInfo, JSFunction *target)
 {
     MOZ_ASSERT(target->isNative());
     JSNative native = target->native();
@@ -3129,59 +3135,102 @@ SimdTypeToScalarType(SimdTypeDescr::Type
     switch (type) {
       case SimdTypeDescr::TYPE_FLOAT32: return Scalar::Float32x4;
       case SimdTypeDescr::TYPE_INT32:   return Scalar::Int32x4;
       case SimdTypeDescr::TYPE_FLOAT64: break;
     }
     MOZ_CRASH("unexpected simd type");
 }
 
+bool
+IonBuilder::prepareForSimdLoadStore(CallInfo &callInfo, Scalar::Type simdType, MInstruction **elements,
+                                    MDefinition **index, Scalar::Type *arrayType)
+{
+    MDefinition *array = callInfo.getArg(0);
+    *index = callInfo.getArg(1);
+
+    if (!ElementAccessIsAnyTypedArray(constraints(), array, *index, arrayType))
+        return false;
+
+    MInstruction *indexAsInt32 = MToInt32::New(alloc(), *index);
+    current->add(indexAsInt32);
+    *index = indexAsInt32;
+
+    MDefinition *indexForBoundsCheck = *index;
+
+    // Artificially make sure the index is in bounds by adding the difference
+    // number of slots needed (e.g. reading from Float32Array we need to make
+    // sure to be in bounds for 4 slots, so add 3, etc.).
+    MOZ_ASSERT(Scalar::byteSize(simdType) % Scalar::byteSize(*arrayType) == 0);
+    int32_t suppSlotsNeeded = Scalar::byteSize(simdType) / Scalar::byteSize(*arrayType) - 1;
+    if (suppSlotsNeeded) {
+        MConstant *suppSlots = constant(Int32Value(suppSlotsNeeded));
+        MAdd *addedIndex = MAdd::New(alloc(), *index, suppSlots);
+        // We're fine even with the add overflows, as long as the generated code
+        // for the bounds check uses an unsigned comparison.
+        addedIndex->setInt32();
+        current->add(addedIndex);
+        indexForBoundsCheck = addedIndex;
+    }
+
+    MInstruction *length;
+    addTypedArrayLengthAndData(array, SkipBoundsCheck, index, &length, elements);
+
+    MInstruction *check = MBoundsCheck::New(alloc(), indexForBoundsCheck, length);
+    current->add(check);
+    return true;
+}
+
 IonBuilder::InliningStatus
 IonBuilder::inlineSimdLoad(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type)
 {
     InlineTypedObject *templateObj = nullptr;
     if (!checkInlineSimd(callInfo, native, type, 2, &templateObj))
         return InliningStatus_NotInlined;
 
-    MDefinition *array = callInfo.getArg(0);
-    MDefinition *index = callInfo.getArg(1);
-
+    Scalar::Type simdType = SimdTypeToScalarType(type);
+
+    MDefinition *index = nullptr;
+    MInstruction *elements = nullptr;
     Scalar::Type arrayType;
-    if (!ElementAccessIsAnyTypedArray(constraints(), array, index, &arrayType))
+    if (!prepareForSimdLoadStore(callInfo, simdType, &elements, &index, &arrayType))
         return InliningStatus_NotInlined;
 
-    MInstruction *indexAsInt32 = MToInt32::New(alloc(), index);
-    current->add(indexAsInt32);
-    index = indexAsInt32;
-
-    MDefinition *indexForBoundsCheck = index;
-
-    // Artificially make sure the index is in bounds by adding the difference
-    // number of slots needed (e.g. reading from Float32Array we need to make
-    // sure to be in bounds for 4 slots, so add 3, etc.).
-    MOZ_ASSERT(Simd128DataSize % Scalar::byteSize(arrayType) == 0);
-    int32_t suppSlotsNeeded = Simd128DataSize / Scalar::byteSize(arrayType) - 1;
-    if (suppSlotsNeeded) {
-        MConstant *suppSlots = constant(Int32Value(suppSlotsNeeded));
-        MAdd *addedIndex = MAdd::New(alloc(), index, suppSlots);
-        // Even if this addition overflows, we're fine because the code generated
-        // for the bounds check uses uint32 arithmetic
-        addedIndex->setInt32();
-        current->add(addedIndex);
-        indexForBoundsCheck = addedIndex;
-    }
-
-    MInstruction *length;
-    MInstruction *elements;
-    addTypedArrayLengthAndData(array, SkipBoundsCheck, &index, &length, &elements);
-
-    MInstruction *check = MBoundsCheck::New(alloc(), indexForBoundsCheck, length);
-    current->add(check);
-
     MLoadTypedArrayElement *load = MLoadTypedArrayElement::New(alloc(), elements, index, arrayType);
     load->setResultType(SimdTypeDescrToMIRType(type));
-    load->setReadType(SimdTypeToScalarType(type));
+    load->setReadType(simdType);
 
     return boxSimd(callInfo, load, templateObj);
 }
 
+IonBuilder::InliningStatus
+IonBuilder::inlineSimdStore(CallInfo &callInfo, JSNative native, SimdTypeDescr::Type type)
+{
+    InlineTypedObject *templateObj = nullptr;
+    if (!checkInlineSimd(callInfo, native, type, 3, &templateObj))
+        return InliningStatus_NotInlined;
+
+    Scalar::Type simdType = SimdTypeToScalarType(type);
+
+    MDefinition *index = nullptr;
+    MInstruction *elements = nullptr;
+    Scalar::Type arrayType;
+    if (!prepareForSimdLoadStore(callInfo, simdType, &elements, &index, &arrayType))
+        return InliningStatus_NotInlined;
+
+    MDefinition *valueToWrite = callInfo.getArg(2);
+    MStoreTypedArrayElement *store = MStoreTypedArrayElement::New(alloc(), elements, index,
+                                                                  valueToWrite, arrayType);
+    store->setWriteType(simdType);
+
+    current->add(store);
+    current->push(valueToWrite);
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    if (!resumeAfter(store))
+        return InliningStatus_Error;
+
+    return InliningStatus_Inlined;
+}
+
 } // namespace jit
 } // namespace js
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -9063,27 +9063,29 @@ class MLoadTypedArrayElementStatic
     void collectRangeInfoPreTrunc() MOZ_OVERRIDE;
 };
 
 class MStoreTypedArrayElement
   : public MTernaryInstruction,
     public StoreTypedArrayPolicy::Data
 {
     Scalar::Type arrayType_;
+    Scalar::Type writeType_;
     bool requiresBarrier_;
     int32_t offsetAdjustment_;
 
     // See note in MStoreElementCommon.
     bool racy_;
 
     MStoreTypedArrayElement(MDefinition *elements, MDefinition *index, MDefinition *value,
                             Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier,
                             int32_t offsetAdjustment)
       : MTernaryInstruction(elements, index, value),
         arrayType_(arrayType),
+        writeType_(arrayType),
         requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
         offsetAdjustment_(offsetAdjustment),
         racy_(false)
     {
         if (requiresBarrier_)
             setGuard();         // Not removable or movable
         else
             setMovable();
@@ -9099,19 +9101,28 @@ class MStoreTypedArrayElement
                                         MDefinition *value, Scalar::Type arrayType,
                                         MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier,
                                         int32_t offsetAdjustment = 0)
     {
         return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType,
                                                   requiresBarrier, offsetAdjustment);
     }
 
+    void setWriteType(Scalar::Type type) {
+        writeType_ = type;
+    }
+    Scalar::Type writeType() const {
+        return writeType_;
+    }
     Scalar::Type arrayType() const {
         return arrayType_;
     }
+    bool isSimdWrite() const {
+        return Scalar::isSimdType(writeType());
+    }
     bool isByteArray() const {
         return arrayType_ == Scalar::Int8 ||
                arrayType_ == Scalar::Uint8 ||
                arrayType_ == Scalar::Uint8Clamped;
     }
     bool isFloatArray() const {
         return arrayType_ == Scalar::Float32 ||
                arrayType_ == Scalar::Float64;
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -289,16 +289,22 @@ StoreToTypedFloatArray(MacroAssembler &m
         break;
       case Scalar::Float64:
 #ifdef JS_MORE_DETERMINISTIC
         // See the comment in TypedArrayObjectTemplate::doubleToNative.
         masm.canonicalizeDouble(value);
 #endif
         masm.storeDouble(value, dest);
         break;
+      case Scalar::Float32x4:
+        masm.storeUnalignedFloat32x4(value, dest);
+        break;
+      case Scalar::Int32x4:
+        masm.storeUnalignedInt32x4(value, dest);
+        break;
       default:
         MOZ_CRASH("Invalid typed array type");
     }
 }
 
 void
 MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
                                        const BaseIndex &dest)
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -2640,17 +2640,17 @@ MToDouble::operandTruncateKind(size_t in
     // MToDouble propagates its truncate kind to its operand.
     return truncateKind();
 }
 
 MDefinition::TruncateKind
 MStoreTypedArrayElement::operandTruncateKind(size_t index) const
 {
     // An integer store truncates the stored value.
-    return index == 2 && !isFloatArray() ? Truncate : NoTruncate;
+    return index == 2 && !isFloatArray() && !isSimdWrite() ? Truncate : NoTruncate;
 }
 
 MDefinition::TruncateKind
 MStoreTypedArrayElementHole::operandTruncateKind(size_t index) const
 {
     // An integer store truncates the stored value.
     return index == 3 && !isFloatArray() ? Truncate : NoTruncate;
 }
--- a/js/src/jit/TypePolicy.cpp
+++ b/js/src/jit/TypePolicy.cpp
@@ -871,19 +871,23 @@ InstanceOfPolicy::adjustInputs(TempAlloc
     // Box first operand if it isn't object
     if (def->getOperand(0)->type() != MIRType_Object)
         BoxPolicy<0>::staticAdjustInputs(alloc, def);
 
     return true;
 }
 
 bool
-StoreTypedArrayPolicy::adjustValueInput(TempAllocator &alloc, MInstruction *ins, int arrayType,
-                                        MDefinition *value, int valueOperand)
+StoreTypedArrayPolicy::adjustValueInput(TempAllocator &alloc, MInstruction *ins,
+                                        Scalar::Type writeType, MDefinition *value, int valueOperand)
 {
+    // Storing a SIMD value just implies that we might need a SimdUnbox.
+    if (Scalar::isSimdType(writeType))
+        return MaybeSimdUnbox(alloc, ins, ScalarTypeToMIRType(writeType), valueOperand);
+
     MDefinition *curValue = value;
     // First, ensure the value is int32, boolean, double or Value.
     // The conversion is based on TypedArrayObjectTemplate::setElementTail.
     switch (value->type()) {
       case MIRType_Int32:
       case MIRType_Double:
       case MIRType_Float32:
       case MIRType_Boolean:
@@ -914,17 +918,17 @@ StoreTypedArrayPolicy::adjustValueInput(
     }
 
     MOZ_ASSERT(value->type() == MIRType_Int32 ||
                value->type() == MIRType_Boolean ||
                value->type() == MIRType_Double ||
                value->type() == MIRType_Float32 ||
                value->type() == MIRType_Value);
 
-    switch (arrayType) {
+    switch (writeType) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
       case Scalar::Uint32:
         if (value->type() != MIRType_Int32) {
             value = MTruncateToInt32::New(alloc, value);
@@ -961,17 +965,17 @@ bool
 StoreTypedArrayPolicy::adjustInputs(TempAllocator &alloc, MInstruction *ins)
 {
     SingleObjectPolicy::staticAdjustInputs(alloc, ins);
 
     MStoreTypedArrayElement *store = ins->toStoreTypedArrayElement();
     MOZ_ASSERT(IsValidElementsType(store->elements(), store->offsetAdjustment()));
     MOZ_ASSERT(store->index()->type() == MIRType_Int32);
 
-    return adjustValueInput(alloc, ins, store->arrayType(), store->value(), 2);
+    return adjustValueInput(alloc, store, store->writeType(), store->value(), 2);
 }
 
 bool
 StoreTypedArrayHolePolicy::adjustInputs(TempAllocator &alloc, MInstruction *ins)
 {
     MStoreTypedArrayElementHole *store = ins->toStoreTypedArrayElementHole();
     MOZ_ASSERT(store->elements()->type() == MIRType_Elements);
     MOZ_ASSERT(store->index()->type() == MIRType_Int32);
--- a/js/src/jit/TypePolicy.h
+++ b/js/src/jit/TypePolicy.h
@@ -437,17 +437,18 @@ class InstanceOfPolicy MOZ_FINAL : publi
 };
 
 class StoreTypedArrayHolePolicy;
 class StoreTypedArrayElementStaticPolicy;
 
 class StoreTypedArrayPolicy : public TypePolicy
 {
   private:
-    static bool adjustValueInput(TempAllocator &alloc, MInstruction *ins, int arrayType, MDefinition *value, int valueOperand);
+    static bool adjustValueInput(TempAllocator &alloc, MInstruction *ins, Scalar::Type arrayType,
+                                 MDefinition *value, int valueOperand);
 
     friend class StoreTypedArrayHolePolicy;
     friend class StoreTypedArrayElementStaticPolicy;
 
   public:
     EMPTY_DATA_;
     virtual bool adjustInputs(TempAllocator &alloc, MInstruction *ins) MOZ_OVERRIDE;
 };
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -1395,22 +1395,24 @@ class MacroAssemblerARMCompat : public M
 
     void loadPrivate(const Address &address, Register dest);
 
     void loadAlignedInt32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeAlignedInt32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void loadUnalignedInt32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadUnalignedInt32x4(const BaseIndex &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeUnalignedInt32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void storeUnalignedInt32x4(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
 
     void loadAlignedFloat32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeAlignedFloat32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void loadUnalignedFloat32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadUnalignedFloat32x4(const BaseIndex &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeUnalignedFloat32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void storeUnalignedFloat32x4(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
 
     void loadDouble(const Address &addr, FloatRegister dest);
     void loadDouble(const BaseIndex &src, FloatRegister dest);
 
     // Load a float value into a register, then expand it to a double.
     void loadFloatAsDouble(const Address &addr, FloatRegister dest);
     void loadFloatAsDouble(const BaseIndex &src, FloatRegister dest);
 
--- a/js/src/jit/mips/MacroAssembler-mips.h
+++ b/js/src/jit/mips/MacroAssembler-mips.h
@@ -1256,22 +1256,24 @@ public:
 
     void loadPrivate(const Address &address, Register dest);
 
     void loadAlignedInt32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeAlignedInt32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void loadUnalignedInt32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadUnalignedInt32x4(const BaseIndex &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeUnalignedInt32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void storeUnalignedInt32x4(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
 
     void loadAlignedFloat32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeAlignedFloat32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
     void loadUnalignedFloat32x4(const Address &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void loadUnalignedFloat32x4(const BaseIndex &addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
     void storeUnalignedFloat32x4(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+    void storeUnalignedFloat32x4(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
 
     void loadDouble(const Address &addr, FloatRegister dest);
     void loadDouble(const BaseIndex &src, FloatRegister dest);
 
     // Load a float value into a register, then expand it to a double.
     void loadFloatAsDouble(const Address &addr, FloatRegister dest);
     void loadFloatAsDouble(const BaseIndex &src, FloatRegister dest);
 
--- a/js/src/jit/shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.h
@@ -934,16 +934,19 @@ class MacroAssemblerX86Shared : public A
         vmovdqu(Operand(src), dest);
     }
     void loadUnalignedInt32x4(const Operand &src, FloatRegister dest) {
         vmovdqu(src, dest);
     }
     void storeUnalignedInt32x4(FloatRegister src, const Address &dest) {
         vmovdqu(src, Operand(dest));
     }
+    void storeUnalignedInt32x4(FloatRegister src, const BaseIndex &dest) {
+        vmovdqu(src, Operand(dest));
+    }
     void storeUnalignedInt32x4(FloatRegister src, const Operand &dest) {
         vmovdqu(src, dest);
     }
     void packedEqualInt32x4(const Operand &src, FloatRegister dest) {
         vpcmpeqd(src, dest, dest);
     }
     void packedGreaterThanInt32x4(const Operand &src, FloatRegister dest) {
         vpcmpgtd(src, dest, dest);
@@ -1018,16 +1021,19 @@ class MacroAssemblerX86Shared : public A
         vmovdqu(Operand(src), dest);
     }
     void loadUnalignedFloat32x4(const Operand &src, FloatRegister dest) {
         vmovups(src, dest);
     }
     void storeUnalignedFloat32x4(FloatRegister src, const Address &dest) {
         vmovups(src, Operand(dest));
     }
+    void storeUnalignedFloat32x4(FloatRegister src, const BaseIndex &dest) {
+        vmovups(src, Operand(dest));
+    }
     void storeUnalignedFloat32x4(FloatRegister src, const Operand &dest) {
         vmovups(src, dest);
     }
     void packedAddFloat32(const Operand &src, FloatRegister dest) {
         vaddps(src, dest, dest);
     }
     void packedSubFloat32(const Operand &src, FloatRegister dest) {
         vsubps(src, dest, dest);