Backed out changeset 39e6791cc5c5 (bug 1079361)
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Fri, 21 Nov 2014 12:45:43 +0100
changeset 241226 6ec1210bde249be5e6337d2f52b25a3c72fc613c
parent 241225 87ae4441812083f284cab7f29357be87512d0eb1
child 241227 2c39d4a57818b5bdb4b987146e8fb54c4e0be737
push id4311
push userraliiev@mozilla.com
push dateMon, 12 Jan 2015 19:37:41 +0000
treeherdermozilla-beta@150c9fed433b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1079361
milestone36.0a1
backs out39e6791cc5c5a0586f97094c83fc86c5699a1c63
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset 39e6791cc5c5 (bug 1079361)
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/asmjs/AsmJSValidate.cpp
js/src/asmjs/AsmJSValidate.h
js/src/builtin/SIMD.h
js/src/jit-test/tests/asm.js/testSIMD.js
js/src/jit/MIR.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/BaseAssembler-x86-shared.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/MacroAssembler-x86-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x86/Assembler-x86.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/Lowering-x86.cpp
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -749,84 +749,52 @@ AsmJSModule::staticallyLink(ExclusiveCon
         exitDatum.exit = interpExitTrampoline(exits_[i]);
         exitDatum.fun = nullptr;
         exitDatum.ionScript = nullptr;
     }
 
     MOZ_ASSERT(isStaticallyLinked());
 }
 
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-static inline size_t
-ViewTypeByteSize(AsmJSHeapAccess::ViewType vt)
-{
-    switch (vt) {
-      case AsmJSHeapAccess::Int8:
-      case AsmJSHeapAccess::Uint8:
-      case AsmJSHeapAccess::Uint8Clamped:
-      case AsmJSHeapAccess::Int16:
-      case AsmJSHeapAccess::Uint16:
-      case AsmJSHeapAccess::Int32:
-      case AsmJSHeapAccess::Uint32:
-      case AsmJSHeapAccess::Float32:
-      case AsmJSHeapAccess::Float64:
-        return 1 << TypedArrayShift(Scalar::Type(vt));
-      case AsmJSHeapAccess::Float32x4:
-      case AsmJSHeapAccess::Int32x4:
-        return 16;
-    }
-    MOZ_CRASH("unexpected view type");
-}
-#endif // JS_CODEGEN_X86 || JS_CODEGEN_X64
-
 void
 AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx)
 {
     MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
     MOZ_ASSERT(IsValidAsmJSHeapLength(heap->byteLength()));
     MOZ_ASSERT(dynamicallyLinked_);
     MOZ_ASSERT(!maybeHeap_);
 
     maybeHeap_ = heap;
     heapDatum() = heap->dataPointer();
 
 #if defined(JS_CODEGEN_X86)
     uint8_t *heapOffset = heap->dataPointer();
+    void *heapLength = (void*)heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess &access = heapAccesses_[i];
-        if (access.hasLengthCheck()) {
-            // An access is out-of-bounds iff
-            //      ptr + data-type-byte-size > heapLength
-            // i.e. ptr >= heapLength + 1 - data-type-byte-size
-            // (Note that we need >= as this is what codegen uses.)
-            AsmJSHeapAccess::ViewType vt = access.viewType();
-            X86Assembler::setPointer(access.patchLengthAt(code_),
-                                     (void*)(heap->byteLength() + 1 - ViewTypeByteSize(vt)));
-        }
+        if (access.hasLengthCheck())
+            X86Assembler::setPointer(access.patchLengthAt(code_), heapLength);
         void *addr = access.patchOffsetAt(code_);
         uint32_t disp = reinterpret_cast<uint32_t>(X86Assembler::getPointer(addr));
         MOZ_ASSERT(disp <= INT32_MAX);
         X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CODEGEN_X64)
     // Even with signal handling being used for most bounds checks, there may be
     // atomic operations that depend on explicit checks.
     //
     // If we have any explicit bounds checks, we need to patch the heap length
     // checks at the right places. All accesses that have been recorded are the
     // only ones that need bound checks (see also
     // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,AtomicBinop}Heap)
     int32_t heapLength = int32_t(intptr_t(heap->byteLength()));
     for (size_t i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess &access = heapAccesses_[i];
-        if (access.hasLengthCheck()) {
-            // See comment above for x86 codegen.
-            X86Assembler::setInt32(access.patchLengthAt(code_),
-                                   heapLength + 1 - ViewTypeByteSize(access.viewType()));
-        }
+        if (access.hasLengthCheck())
+            X86Assembler::setInt32(access.patchLengthAt(code_), heapLength);
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         jit::Assembler::UpdateBoundsCheck(heapLength,
                                           (jit::Instruction*)(heapAccesses_[i].offset() + code_));
     }
 #endif
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -345,30 +345,16 @@ SetXMMRegToNaN(AsmJSHeapAccess::ViewType
       }
       case AsmJSHeapAccess::Float64: {
         JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double));
         double *dbls = reinterpret_cast<double*>(xmm_reg);
         dbls[0] = GenericNaN();
         dbls[1] = 0;
         break;
       }
-      case AsmJSHeapAccess::Float32x4: {
-        JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float));
-        float *floats = reinterpret_cast<float*>(xmm_reg);
-        for (unsigned i = 0; i < 4; i++)
-            floats[i] = GenericNaN();
-        break;
-      }
-      case AsmJSHeapAccess::Int32x4: {
-        JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(int32_t));
-        int32_t *ints = reinterpret_cast<int32_t*>(xmm_reg);
-        for (unsigned i = 0; i < 4; i++)
-            ints[i] = 0;
-        break;
-      }
       case AsmJSHeapAccess::Int8:
       case AsmJSHeapAccess::Uint8:
       case AsmJSHeapAccess::Int16:
       case AsmJSHeapAccess::Uint16:
       case AsmJSHeapAccess::Int32:
       case AsmJSHeapAccess::Uint32:
       case AsmJSHeapAccess::Uint8Clamped:
         MOZ_CRASH("unexpected type in SetXMMRegToNaN");
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -5609,118 +5609,16 @@ CheckSimdShuffle(FunctionCompiler &f, Pa
 
     *def = f.shuffleSimd(vecs[0], vecs[1], lanes[0], lanes[1], lanes[2], lanes[3],
                          retType.toMIRType());
     *type = retType;
     return true;
 }
 
 static bool
-CheckSimdLoadStoreArgs(FunctionCompiler &f, ParseNode *call, Type retType,
-                       AsmJSHeapAccess::ViewType *viewType, MDefinition **index,
-                       NeedsBoundsCheck *needsBoundsCheck)
-{
-    MOZ_ASSERT(retType.isSimd());
-    if (retType.isInt32x4())
-        *viewType = AsmJSHeapAccess::Int32x4;
-    else if (retType.isFloat32x4())
-        *viewType = AsmJSHeapAccess::Float32x4;
-    else
-        MOZ_CRASH("unexpected SIMD type");
-
-    ParseNode *view = CallArgList(call);
-    if (!view->isKind(PNK_NAME))
-        return f.fail(view, "expected Uint8Array view as SIMD.*.store first argument");
-
-    const ModuleCompiler::Global *global = f.lookupGlobal(view->name());
-    if (!global ||
-        global->which() != ModuleCompiler::Global::ArrayView ||
-        global->viewType() != Scalar::Uint8)
-    {
-        return f.fail(view, "expected Uint8Array view as SIMD.*.store first argument");
-    }
-
-    *needsBoundsCheck = NEEDS_BOUNDS_CHECK;
-
-    ParseNode *indexExpr = NextNode(view);
-    uint32_t indexLit;
-    if (IsLiteralOrConstInt(f, indexExpr, &indexLit)) {
-        if (indexLit > INT32_MAX)
-            return f.fail(indexExpr, "constant index out of range");
-
-        if (!f.m().tryRequireHeapLengthToBeAtLeast(indexLit + Simd128DataSize)) {
-            return f.failf(indexExpr, "constant index outside heap size range declared by the "
-                                      "change-heap function (0x%x - 0x%x)",
-                                      f.m().minHeapLength(), f.m().module().maxHeapLength());
-        }
-
-        *needsBoundsCheck = NO_BOUNDS_CHECK;
-        *index = f.constant(Int32Value(indexLit), Type::Int);
-        return true;
-    }
-
-    f.enterHeapExpression();
-
-    Type indexType;
-    if (!CheckExpr(f, indexExpr, index, &indexType))
-        return false;
-    if (!indexType.isIntish())
-        return f.failf(indexExpr, "%s is not a subtype of intish", indexType.toChars());
-
-    f.leaveHeapExpression();
-
-    return true;
-}
-
-static bool
-CheckSimdLoad(FunctionCompiler &f, ParseNode *call, Type retType, MDefinition **def, Type *type)
-{
-    unsigned numArgs = CallArgListLength(call);
-    if (numArgs != 2)
-        return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs);
-
-    AsmJSHeapAccess::ViewType viewType;
-    MDefinition *index;
-    NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckSimdLoadStoreArgs(f, call, retType, &viewType, &index, &needsBoundsCheck))
-        return false;
-
-    *def = f.loadHeap(viewType, index, needsBoundsCheck);
-    *type = retType;
-    return true;
-}
-
-static bool
-CheckSimdStore(FunctionCompiler &f, ParseNode *call, Type retType, MDefinition **def, Type *type)
-{
-    unsigned numArgs = CallArgListLength(call);
-    if (numArgs != 3)
-        return f.failf(call, "expected 3 arguments to SIMD load, got %u", numArgs);
-
-    AsmJSHeapAccess::ViewType viewType;
-    MDefinition *index;
-    NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckSimdLoadStoreArgs(f, call, retType, &viewType, &index, &needsBoundsCheck))
-        return false;
-
-    ParseNode *vecExpr = NextNode(NextNode(CallArgList(call)));
-    MDefinition *vec;
-    Type vecType;
-    if (!CheckExpr(f, vecExpr, &vec, &vecType))
-        return false;
-    if (!(vecType <= retType))
-        return f.failf(vecExpr, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
-
-    f.storeHeap(viewType, index, vec, needsBoundsCheck);
-    *def = vec;
-    *type = vecType;
-    return true;
-}
-
-static bool
 CheckSimdOperationCall(FunctionCompiler &f, ParseNode *call, const ModuleCompiler::Global *global,
                        MDefinition **def, Type *type)
 {
     MOZ_ASSERT(global->isSimdOperation());
 
     Type retType = global->simdOperationType();
 
     switch (global->simdOperation()) {
@@ -5795,21 +5693,16 @@ CheckSimdOperationCall(FunctionCompiler 
       case AsmJSSimdOperation_reciprocalSqrt:
         return CheckSimdUnary(f, call, retType, MSimdUnaryArith::reciprocalSqrt, def, type);
 
       case AsmJSSimdOperation_swizzle:
         return CheckSimdSwizzle(f, call, retType, def, type);
       case AsmJSSimdOperation_shuffle:
         return CheckSimdShuffle(f, call, retType, def, type);
 
-      case AsmJSSimdOperation_load:
-        return CheckSimdLoad(f, call, retType, def, type);
-      case AsmJSSimdOperation_store:
-        return CheckSimdStore(f, call, retType, def, type);
-
       case AsmJSSimdOperation_splat: {
         DefinitionVector defs;
         if (!CheckSimdCallArgs(f, call, 1, CheckSimdScalarArgs(retType), &defs))
             return false;
         *def = f.splatSimd(defs[0], retType.toMIRType());
         *type = retType;
         return true;
       }
--- a/js/src/asmjs/AsmJSValidate.h
+++ b/js/src/asmjs/AsmJSValidate.h
@@ -52,19 +52,17 @@ ValidateAsmJS(ExclusiveContext *cx, AsmJ
 
 // The assumed page size; dynamically checked in ValidateAsmJS.
 const size_t AsmJSPageSize = 4096;
 
 #ifdef JS_CPU_X64
 // On x64, the internal ArrayBuffer data array is inflated to 4GiB (only the
 // byteLength portion of which is accessible) so that out-of-bounds accesses
 // (made using a uint32 index) are guaranteed to raise a SIGSEGV.
-// Unaligned accesses and mask optimizations might also try to access a few
-// bytes after this limit, so just inflate it by AsmJSPageSize.
-static const size_t AsmJSMappedSize = 4 * 1024ULL * 1024ULL * 1024ULL + AsmJSPageSize;
+static const size_t AsmJSMappedSize = 4 * 1024ULL * 1024ULL * 1024ULL;
 #endif
 
 // From the asm.js spec Linking section:
 //  the heap object's byteLength must be either
 //    2^n for n in [12, 24)
 //  or
 //    2^24 * n for n >= 1.
 
--- a/js/src/builtin/SIMD.h
+++ b/js/src/builtin/SIMD.h
@@ -168,19 +168,17 @@
     _(swizzle)                       \
     _(shuffle)                       \
     _(splat)                         \
     _(withX)                         \
     _(withY)                         \
     _(withZ)                         \
     _(withW)                         \
     _(not)                           \
-    _(neg)                           \
-    _(load)                          \
-    _(store)
+    _(neg)
 #define FORALL_SIMD_OP(_)            \
     FOREACH_INT32X4_SIMD_OP(_)       \
     FOREACH_FLOAT32X4_SIMD_OP(_)     \
     FOREACH_COMMONX4_SIMD_OP(_)
 
 namespace js {
 
 class SIMDObject : public JSObject
--- a/js/src/jit-test/tests/asm.js/testSIMD.js
+++ b/js/src/jit-test/tests/asm.js/testSIMD.js
@@ -940,234 +940,16 @@ assertAsmTypeFail('glob', USE_ASM + I32 
 // Can't pass SIMD arguments to FFI
 assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); func(x);} return f");
 assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); func(x);} return f");
 
 // Can't have FFI return SIMD values
 assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); x=i4(func());} return f");
 assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); x=f4(func());} return f");
 
-// Load / Store
-(function testLoadStore() {
-
-var IMPORTS = USE_ASM + 'var H=new glob.Uint8Array(heap); var i4=glob.SIMD.int32x4; var load=i4.load; var store=i4.store;';
-
-//      Bad number of args
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load();} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 4, 5);} return f");
-
-//      Bad type of args
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 5);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, 5.0);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0.;load(H, i);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=new glob.Int32Array(heap); function f(){var i=0;load(H2, i)} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=42; function f(){var i=0;load(H2, i)} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;load(H2, i)} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var f4=glob.SIMD.float32x4; function f(){var i=0;var vec=f4(1,2,3,4); store(H, i, vec)} return f");
-
-//      Bad coercions of returned values
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return load(H, i)|0;} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return +load(H, i);} return f");
-
-//      Literal index constants
-var buf = new ArrayBuffer(BUF_MIN);
-var asI32 = new Int32Array(buf);
-asI32[(BUF_MIN >> 2) - 4] = 4;
-asI32[(BUF_MIN >> 2) - 3] = 3;
-asI32[(BUF_MIN >> 2) - 2] = 2;
-asI32[(BUF_MIN >> 2) - 1] = 1;
-
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1) + ");} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 15) + ");} return f");
-asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 16) + ");} return f");
-
-assertAsmLinkFail(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return i4(load(H, " + (BUF_MIN - 15) + "));} return f"), this, {}, buf);
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return i4(load(H, " + (BUF_MIN - 16) + "));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return i4(load(H, " + BUF_MIN + " - 16 | 0));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
-
-var CONSTANT_INDEX = 42;
-var CONSTANT_BYTE_INDEX = CONSTANT_INDEX << 2;
-
-var loadStoreCode = `
-    "use asm";
-
-    var H = new glob.Uint8Array(heap);
-
-    var i4 = glob.SIMD.int32x4;
-    var i4load = i4.load;
-    var i4store = i4.store;
-
-    var f4 = glob.SIMD.float32x4;
-    var f4load = f4.load;
-    var f4store = f4.store;
-
-    function f32l(i) { i=i|0; return f4(f4load(H, i|0)); }
-    function f32lcst() { return f4(f4load(H, ${CONSTANT_BYTE_INDEX})); }
-    function f32s(i, vec) { i=i|0; vec=f4(vec); f4store(H, i|0, vec); }
-    function f32scst(vec) { vec=f4(vec); f4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
-
-    function i32l(i) { i=i|0; return i4(i4load(H, i|0)); }
-    function i32lcst() { return i4(i4load(H, ${CONSTANT_BYTE_INDEX})); }
-    function i32s(i, vec) { i=i|0; vec=i4(vec); i4store(H, i|0, vec); }
-    function i32scst(vec) { vec=i4(vec); i4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
-
-    function f32lbndcheck(i) {
-        i=i|0;
-        if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
-        if ((i|0) < 0) i = 0;
-        return f4(f4load(H, i|0));
-    }
-    function f32sbndcheck(i, vec) {
-        i=i|0;
-        vec=f4(vec);
-        if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
-        if ((i|0) < 0) i = 0;
-        return f4(f4store(H, i|0, vec));
-    }
-
-    return {
-        f32l: f32l,
-        f32lcst: f32lcst,
-        f32s: f32s,
-        f32scst: f32scst,
-        f32lbndcheck: f32lbndcheck,
-        f32sbndcheck: f32sbndcheck,
-        i32l: i32l,
-        i32lcst: i32lcst,
-        i32s: i32s,
-        i32scst: i32scst
-    }
-`;
-
-const SIZE = 0x8000;
-
-var F32 = new Float32Array(SIZE);
-var reset = function() {
-    for (var i = 0; i < SIZE; i++)
-        F32[i] = i + 1;
-};
-reset();
-
-var buf = F32.buffer;
-var m = asmLink(asmCompile('glob', 'ffi', 'heap', loadStoreCode), this, null, buf);
-
-function slice(TA, i, n) { return Array.prototype.slice.call(TA, i, i + n); }
-
-// Float32x4.load
-function f32l(n) { return m.f32l((n|0) << 2 | 0); };
-
-//      Correct accesses
-assertEqX4(f32l(0), slice(F32, 0, 4));
-assertEqX4(f32l(1), slice(F32, 1, 4));
-assertEqX4(f32l(SIZE - 4), slice(F32, SIZE - 4, 4));
-
-assertEqX4(m.f32lcst(), slice(F32, CONSTANT_INDEX, 4));
-assertEqX4(m.f32lbndcheck(CONSTANT_BYTE_INDEX), slice(F32, CONSTANT_INDEX, 4));
-
-//      OOB
-var BatNaN = [NaN, NaN, NaN, NaN] // NaNNaNNaNNaN etc.
-assertEqX4(f32l(-1), BatNaN);
-assertEqX4(f32l(SIZE), BatNaN);
-assertEqX4(f32l(SIZE - 1), BatNaN);
-assertEqX4(f32l(SIZE - 2), BatNaN);
-assertEqX4(f32l(SIZE - 3), BatNaN);
-
-// Float32x4.store
-function f32s(n, v) { return m.f32s((n|0) << 2 | 0, v); };
-
-var vec  = SIMD.float32x4(5,6,7,8);
-var vec2 = SIMD.float32x4(0,1,2,3);
-
-reset();
-f32s(0, vec);
-assertEqX4(vec, slice(F32, 0, 4));
-
-reset();
-f32s(0, vec2);
-assertEqX4(vec2, slice(F32, 0, 4));
-
-reset();
-f32s(4, vec);
-assertEqX4(vec, slice(F32, 4, 4));
-
-reset();
-m.f32scst(vec2);
-assertEqX4(vec2, slice(F32, CONSTANT_INDEX, 4));
-
-reset();
-m.f32sbndcheck(CONSTANT_BYTE_INDEX, vec);
-assertEqX4(vec, slice(F32, CONSTANT_INDEX, 4));
-
-//      OOB
-reset();
-f32s(SIZE - 3, vec);
-f32s(SIZE - 2, vec);
-f32s(SIZE - 1, vec);
-f32s(SIZE, vec);
-for (var i = 0; i < SIZE; i++)
-    assertEq(F32[i], i + 1);
-
-// Int32x4.load
-var I32 = new Int32Array(buf);
-reset = function () {
-    for (var i = 0; i < SIZE; i++)
-        I32[i] = i + 1;
-};
-reset();
-
-function i32(n) { return m.i32l((n|0) << 2 | 0); };
-
-//      Correct accesses
-assertEqX4(i32(0), slice(I32, 0, 4));
-assertEqX4(i32(1), slice(I32, 1, 4));
-assertEqX4(i32(SIZE - 4), slice(I32, SIZE - 4, 4));
-
-assertEqX4(m.i32lcst(), slice(I32, CONSTANT_INDEX, 4));
-
-//      OOB
-assertEqX4(i32(-1), [0,0,0,0]);
-assertEqX4(i32(SIZE), [0,0,0,0]);
-assertEqX4(i32(SIZE - 1), [0,0,0,0]);
-assertEqX4(i32(SIZE - 2), [0,0,0,0]);
-assertEqX4(i32(SIZE - 3), [0,0,0,0]);
-
-// Int32x4.store
-function i32s(n, v) { return m.i32s((n|0) << 2 | 0, v); };
-
-var vec  = SIMD.int32x4(5,6,7,8);
-var vec2 = SIMD.int32x4(0,1,2,3);
-
-reset();
-i32s(0, vec);
-assertEqX4(vec, slice(I32, 0, 4));
-
-reset();
-i32s(0, vec2);
-assertEqX4(vec2, slice(I32, 0, 4));
-
-reset();
-i32s(4, vec);
-assertEqX4(vec, slice(I32, 4, 4));
-
-reset();
-m.i32scst(vec2);
-assertEqX4(vec2, slice(I32, CONSTANT_INDEX, 4));
-
-//      OOB
-reset();
-i32s(SIZE - 3, vec);
-i32s(SIZE - 2, vec);
-i32s(SIZE - 1, vec);
-i32s(SIZE - 0, vec);
-for (var i = 0; i < SIZE; i++)
-    assertEq(I32[i], i + 1);
-
-})();
-
 // 3.3 Internal calls
 // asm.js -> asm.js
 // Retrieving values from asm.js
 var code = USE_ASM + I32 + I32A + `
     var check = ffi.check;
 
     function g() {
         var i = 0;
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -12136,22 +12136,16 @@ class MAsmJSLoadHeap : public MUnaryInst
             setResultType(MIRType_Int32);
             break;
           case AsmJSHeapAccess::Float32:
             setResultType(MIRType_Float32);
             break;
           case AsmJSHeapAccess::Float64:
             setResultType(MIRType_Double);
             break;
-          case AsmJSHeapAccess::Float32x4:
-            setResultType(MIRType_Float32x4);
-            break;
-          case AsmJSHeapAccess::Int32x4:
-            setResultType(MIRType_Int32x4);
-            break;
           case AsmJSHeapAccess::Uint8Clamped:
             MOZ_CRASH("unexpected uint8clamped load heap in asm.js");
         }
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap);
 
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -737,18 +737,16 @@ class AsmJSHeapAccess
          Uint8        = Scalar::Uint8,
          Int16        = Scalar::Int16,
          Uint16       = Scalar::Uint16,
          Int32        = Scalar::Int32,
          Uint32       = Scalar::Uint32,
          Float32      = Scalar::Float32,
          Float64      = Scalar::Float64,
          Uint8Clamped = Scalar::Uint8Clamped,
-         Float32x4,
-         Int32x4
     };
 
   private:
     uint32_t offset_;
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
     uint8_t opLength_;  // the length of the load/store instruction
     ViewType viewType_;
@@ -767,22 +765,21 @@ class AsmJSHeapAccess
     AsmJSHeapAccess(uint32_t offset, uint32_t after, ViewType viewType,
                     AnyRegister loadedReg, uint32_t cmp = NoLengthCheck)
       : offset_(offset),
         cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
         opLength_(after - offset),
         viewType_(viewType),
         loadedReg_(loadedReg.code())
     {}
-    AsmJSHeapAccess(uint32_t offset, uint8_t after, ViewType viewType,
-                    uint32_t cmp = NoLengthCheck)
+    AsmJSHeapAccess(uint32_t offset, uint8_t after, uint32_t cmp = NoLengthCheck)
       : offset_(offset),
         cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
         opLength_(after - offset),
-        viewType_(viewType),
+        viewType_(ViewType(-1)),
         loadedReg_(UINT8_MAX)
     {}
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     explicit AsmJSHeapAccess(uint32_t offset)
       : offset_(offset)
     {}
 #endif
 
--- a/js/src/jit/shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/shared/BaseAssembler-x86-shared.h
@@ -3222,29 +3222,16 @@ public:
 
     void movss_mr(const void* address, XMMRegisterID dst)
     {
         spew("movss      %p, %s", address, nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
     }
 
-    void movups_mr(const void* address, XMMRegisterID dst)
-    {
-        spew("movups     %p, %s", address, nameFPReg(dst));
-        m_formatter.twoByteOp(OP2_MOVPS_VpsWps, (RegisterID)dst, address);
-    }
-
-    void movdqu_mr(const void* address, XMMRegisterID dst)
-    {
-        spew("movdqu     %p, %s", address, nameFPReg(dst));
-        m_formatter.prefix(PRE_SSE_F3);
-        m_formatter.twoByteOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, address);
-    }
-
     void movsd_rm(XMMRegisterID src, const void* address)
     {
         spew("movsd      %s, %p", nameFPReg(src), address);
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
     }
 
     void movss_rm(XMMRegisterID src, const void* address)
@@ -3259,29 +3246,16 @@ public:
         spew("movdqa     %s, %p", nameFPReg(src), address);
         m_formatter.prefix(PRE_SSE_66);
         m_formatter.twoByteOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, address);
     }
 
     void movaps_rm(XMMRegisterID src, const void* address)
     {
         spew("movaps     %s, %p", nameFPReg(src), address);
-        m_formatter.twoByteOp(OP2_MOVAPS_WsdVsd, (RegisterID)src, address);
-    }
-
-    void movdqu_rm(XMMRegisterID src, const void* address)
-    {
-        spew("movdqu     %s, %p", nameFPReg(src), address);
-        m_formatter.prefix(PRE_SSE_F3);
-        m_formatter.twoByteOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, address);
-    }
-
-    void movups_rm(XMMRegisterID src, const void* address)
-    {
-        spew("movups     %s, %p", nameFPReg(src), address);
         m_formatter.twoByteOp(OP2_MOVPS_WpsVps, (RegisterID)src, address);
     }
 #ifdef JS_CODEGEN_X64
     JmpSrc movsd_ripr(XMMRegisterID dst)
     {
         spew("movsd      ?(%%rip), %s", nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F2);
         m_formatter.twoByteRipOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, 0);
@@ -3355,40 +3329,28 @@ public:
 
 
     void movups_rm(XMMRegisterID src, int offset, RegisterID base)
     {
         spew("movups     %s, %s0x%x(%s)",
              nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.twoByteOp(OP2_MOVPS_WpsVps, (RegisterID)src, base, offset);
     }
-    void movups_rm_disp32(XMMRegisterID src, int offset, RegisterID base)
-    {
-        spew("movups     %s, %s0x%x(%s)",
-             nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
-        m_formatter.twoByteOp_disp32(OP2_MOVPS_WpsVps, (RegisterID)src, base, offset);
-    }
     void movups_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         spew("movups     %s, %d(%s,%s,%d)",
              nameFPReg(src), offset, nameIReg(base), nameIReg(index), 1<<scale);
         m_formatter.twoByteOp(OP2_MOVPS_WpsVps, (RegisterID)src, base, index, scale, offset);
     }
     void movups_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         spew("movups     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
         m_formatter.twoByteOp(OP2_MOVPS_VpsWps, (RegisterID)dst, base, offset);
     }
-    void movups_mr_disp32(int offset, RegisterID base, XMMRegisterID dst)
-    {
-        spew("movups     %s0x%x(%s), %s",
-             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
-        m_formatter.twoByteOp_disp32(OP2_MOVPS_VpsWps, (RegisterID)dst, base, offset);
-    }
     void movups_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
     {
         spew("movups     %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), 1<<scale, nameFPReg(dst));
         m_formatter.twoByteOp(OP2_MOVPS_VpsWps, (RegisterID)dst, base, index, scale, offset);
     }
 
     void movapd_rr(XMMRegisterID src, XMMRegisterID dst)
@@ -3431,48 +3393,32 @@ public:
     void movdqu_rm(XMMRegisterID src, int offset, RegisterID base)
     {
         spew("movdqu     %s, %s0x%x(%s)",
              nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, base, offset);
     }
 
-    void movdqu_rm_disp32(XMMRegisterID src, int offset, RegisterID base)
-    {
-        spew("movdqu     %s, %s0x%x(%s)",
-             nameFPReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
-        m_formatter.prefix(PRE_SSE_F3);
-        m_formatter.twoByteOp_disp32(OP2_MOVDQ_WdqVdq, (RegisterID)src, base, offset);
-    }
-
     void movdqu_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
     {
         spew("movdqu     %s, %d(%s,%s,%d)",
              nameFPReg(src), offset, nameIReg(base), nameIReg(index), 1<<scale);
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVDQ_WdqVdq, (RegisterID)src, base, index, scale, offset);
     }
 
     void movdqu_mr(int offset, RegisterID base, XMMRegisterID dst)
     {
         spew("movdqu     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, base, offset);
     }
 
-    void movdqu_mr_disp32(int offset, RegisterID base, XMMRegisterID dst)
-    {
-        spew("movdqu     %s0x%x(%s), %s",
-             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
-        m_formatter.prefix(PRE_SSE_F3);
-        m_formatter.twoByteOp_disp32(OP2_MOVDQ_VdqWdq, (RegisterID)dst, base, offset);
-    }
-
     void movdqu_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
     {
         spew("movdqu     %d(%s,%s,%d), %s",
              offset, nameIReg(base), nameIReg(index), 1<<scale, nameFPReg(dst));
         m_formatter.prefix(PRE_SSE_F3);
         m_formatter.twoByteOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, base, index, scale, offset);
     }
 
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -348,22 +348,16 @@ CodeGeneratorX86Shared::visitOutOfLineLo
 {
     switch (ool->viewType()) {
       case AsmJSHeapAccess::Float32:
         masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
         break;
       case AsmJSHeapAccess::Float64:
         masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
         break;
-      case AsmJSHeapAccess::Float32x4:
-        masm.loadConstantFloat32x4(SimdConstant::SplatX4(float(GenericNaN())), ool->dest().fpu());
-        break;
-      case AsmJSHeapAccess::Int32x4:
-        masm.loadConstantInt32x4(SimdConstant::SplatX4(0), ool->dest().fpu());
-        break;
       case AsmJSHeapAccess::Int8:
       case AsmJSHeapAccess::Uint8:
       case AsmJSHeapAccess::Int16:
       case AsmJSHeapAccess::Uint16:
       case AsmJSHeapAccess::Int32:
       case AsmJSHeapAccess::Uint32:
       case AsmJSHeapAccess::Uint8Clamped:
         Register destReg = ool->dest().gpr();
--- a/js/src/jit/shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.h
@@ -839,25 +839,19 @@ class MacroAssemblerX86Shared : public A
         movdqa(src, Operand(dest));
     }
     void moveAlignedInt32x4(FloatRegister src, FloatRegister dest) {
         movdqa(src, dest);
     }
     void loadUnalignedInt32x4(const Address &src, FloatRegister dest) {
         movdqu(Operand(src), dest);
     }
-    void loadUnalignedInt32x4(const Operand &src, FloatRegister dest) {
-        movdqu(src, dest);
-    }
     void storeUnalignedInt32x4(FloatRegister src, const Address &dest) {
         movdqu(src, Operand(dest));
     }
-    void storeUnalignedInt32x4(FloatRegister src, const Operand &dest) {
-        movdqu(src, dest);
-    }
     void packedEqualInt32x4(const Operand &src, FloatRegister dest) {
         pcmpeqd(src, dest);
     }
     void packedGreaterThanInt32x4(const Operand &src, FloatRegister dest) {
         pcmpgtd(src, dest);
     }
     void packedAddInt32(const Operand &src, FloatRegister dest) {
         paddd(src, dest);
@@ -908,25 +902,19 @@ class MacroAssemblerX86Shared : public A
         movaps(src, Operand(dest));
     }
     void moveAlignedFloat32x4(FloatRegister src, FloatRegister dest) {
         movaps(src, dest);
     }
     void loadUnalignedFloat32x4(const Address &src, FloatRegister dest) {
         movups(Operand(src), dest);
     }
-    void loadUnalignedFloat32x4(const Operand &src, FloatRegister dest) {
-        movups(src, dest);
-    }
     void storeUnalignedFloat32x4(FloatRegister src, const Address &dest) {
         movups(src, Operand(dest));
     }
-    void storeUnalignedFloat32x4(FloatRegister src, const Operand &dest) {
-        movups(src, dest);
-    }
     void packedAddFloat32(const Operand &src, FloatRegister dest) {
         addps(src, dest);
     }
     void packedSubFloat32(const Operand &src, FloatRegister dest) {
         subps(src, dest);
     }
     void packedMulFloat32(const Operand &src, FloatRegister dest) {
         mulps(src, dest);
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -300,18 +300,16 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
       case AsmJSHeapAccess::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Int32:
       case AsmJSHeapAccess::Uint32:    masm.movl(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Float32:   masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
       case AsmJSHeapAccess::Float64:   masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
-      case AsmJSHeapAccess::Float32x4: masm.loadUnalignedFloat32x4(srcAddr, ToFloatRegister(out)); break;
-      case AsmJSHeapAccess::Int32x4:   masm.loadUnalignedInt32x4(srcAddr, ToFloatRegister(out)); break;
       case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
     }
     uint32_t after = masm.size();
     if (ool)
         masm.bind(ool->rejoin());
     memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
     return true;
@@ -348,40 +346,36 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
           case AsmJSHeapAccess::Int8:
           case AsmJSHeapAccess::Uint8:        masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break;
           case AsmJSHeapAccess::Int16:
           case AsmJSHeapAccess::Uint16:       masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break;
           case AsmJSHeapAccess::Int32:
           case AsmJSHeapAccess::Uint32:       masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break;
           case AsmJSHeapAccess::Float32:
           case AsmJSHeapAccess::Float64:
-          case AsmJSHeapAccess::Float32x4:
-          case AsmJSHeapAccess::Int32x4:
           case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
         }
     } else {
         switch (vt) {
           case AsmJSHeapAccess::Int8:
           case AsmJSHeapAccess::Uint8:        masm.movb(ToRegister(ins->value()), dstAddr); break;
           case AsmJSHeapAccess::Int16:
           case AsmJSHeapAccess::Uint16:       masm.movw(ToRegister(ins->value()), dstAddr); break;
           case AsmJSHeapAccess::Int32:
           case AsmJSHeapAccess::Uint32:       masm.movl(ToRegister(ins->value()), dstAddr); break;
           case AsmJSHeapAccess::Float32:      masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
           case AsmJSHeapAccess::Float64:      masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
-          case AsmJSHeapAccess::Float32x4:    masm.storeUnalignedFloat32x4(ToFloatRegister(ins->value()), dstAddr); break;
-          case AsmJSHeapAccess::Int32x4:      masm.storeUnalignedInt32x4(ToFloatRegister(ins->value()), dstAddr); break;
           case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
         }
     }
     uint32_t after = masm.size();
     if (rejoin.used())
         masm.bind(&rejoin);
     memoryBarrier(ins->mir()->barrierAfter());
-    masm.append(AsmJSHeapAccess(before, after, vt, maybeCmpOffset));
+    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
 {
     MAsmJSCompareExchangeHeap *mir = ins->mir();
 
@@ -413,17 +407,17 @@ CodeGeneratorX64::visitAsmJSCompareExcha
                                         srcAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
     uint32_t after = masm.size();
     if (rejoin.used())
         masm.bind(&rejoin);
-    masm.append(AsmJSHeapAccess(after, after, mir->viewType(), maybeCmpOffset));
+    masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
 {
     MAsmJSAtomicBinopHeap *mir = ins->mir();
 
@@ -464,17 +458,17 @@ CodeGeneratorX64::visitAsmJSAtomicBinopH
                                         srcAddr,
                                         temp,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
     }
     uint32_t after = masm.size();
     if (rejoin.used())
         masm.bind(&rejoin);
-    masm.append(AsmJSHeapAccess(after, after, mir->viewType(), maybeCmpOffset));
+    masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
 {
     MAsmJSLoadGlobalVar *mir = ins->mir();
 
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -168,18 +168,16 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAs
       case AsmJSHeapAccess::Int16:
       case AsmJSHeapAccess::Uint16:
       case AsmJSHeapAccess::Int32:
       case AsmJSHeapAccess::Uint32:
         lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterOrConstantAtStart(ins->value()));
         break;
       case AsmJSHeapAccess::Float32:
       case AsmJSHeapAccess::Float64:
-      case AsmJSHeapAccess::Float32x4:
-      case AsmJSHeapAccess::Int32x4:
         lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
         break;
       case AsmJSHeapAccess::Uint8Clamped:
         MOZ_CRASH("unexpected array type");
     }
 
     return add(lir, ins);
 }
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -460,26 +460,16 @@ class Assembler : public AssemblerX86Sha
         masm.movss_mr_disp32(src.offset, src.base.code(), dest.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
     CodeOffsetLabel movsdWithPatch(Address src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.movsd_mr_disp32(src.offset, src.base.code(), dest.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
-    CodeOffsetLabel movupsWithPatch(Address src, FloatRegister dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movups_mr_disp32(src.offset, src.base.code(), dest.code());
-        return CodeOffsetLabel(masm.currentOffset());
-    }
-    CodeOffsetLabel movdquWithPatch(Address src, FloatRegister dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movdqu_mr_disp32(src.offset, src.base.code(), dest.code());
-        return CodeOffsetLabel(masm.currentOffset());
-    }
 
     // Store to *(base + disp32) where disp32 can be patched.
     CodeOffsetLabel movbWithPatch(Register src, Address dest) {
         masm.movb_rm_disp32(src.code(), dest.offset, dest.base.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
     CodeOffsetLabel movwWithPatch(Register src, Address dest) {
         masm.movw_rm_disp32(src.code(), dest.offset, dest.base.code());
@@ -494,26 +484,16 @@ class Assembler : public AssemblerX86Sha
         masm.movss_rm_disp32(src.code(), dest.offset, dest.base.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
     CodeOffsetLabel movsdWithPatch(FloatRegister src, Address dest) {
         MOZ_ASSERT(HasSSE2());
         masm.movsd_rm_disp32(src.code(), dest.offset, dest.base.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
-    CodeOffsetLabel movupsWithPatch(FloatRegister src, Address dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movups_rm_disp32(src.code(), dest.offset, dest.base.code());
-        return CodeOffsetLabel(masm.currentOffset());
-    }
-    CodeOffsetLabel movdquWithPatch(FloatRegister src, Address dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movdqu_rm_disp32(src.code(), dest.offset, dest.base.code());
-        return CodeOffsetLabel(masm.currentOffset());
-    }
 
     // Load from *(addr + index*scale) where addr can be patched.
     CodeOffsetLabel movlWithPatch(PatchedAbsoluteAddress addr, Register index, Scale scale,
                                   Register dest)
     {
         masm.movl_mr(addr.addr, index.code(), scale, dest.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
@@ -549,31 +529,21 @@ class Assembler : public AssemblerX86Sha
         masm.movsd_mr(src.addr, dest.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
     CodeOffsetLabel movdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.movdqa_mr(src.addr, dest.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
-    CodeOffsetLabel movdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movdqu_mr(src.addr, dest.code());
-        return CodeOffsetLabel(masm.currentOffset());
-    }
     CodeOffsetLabel movapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
         MOZ_ASSERT(HasSSE2());
         masm.movaps_mr(src.addr, dest.code());
         return CodeOffsetLabel(masm.currentOffset());
     }
-    CodeOffsetLabel movupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movups_mr(src.addr, dest.code());
-        return CodeOffsetLabel(masm.currentOffset());
-    }
 
     // Store to *dest where dest can be patched.
     CodeOffsetLabel movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
         masm.movb_rm(src.code(), dest.addr);
         return CodeOffsetLabel(masm.currentOffset());
     }
     CodeOffsetLabel movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
         masm.movw_rm(src.code(), dest.addr);
@@ -598,26 +568,16 @@ class Assembler : public AssemblerX86Sha
         masm.movdqa_rm(src.code(), dest.addr);
         return CodeOffsetLabel(masm.currentOffset());
     }
     CodeOffsetLabel movapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
         MOZ_ASSERT(HasSSE2());
         masm.movaps_rm(src.code(), dest.addr);
         return CodeOffsetLabel(masm.currentOffset());
     }
-    CodeOffsetLabel movdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movdqu_rm(src.code(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
-    }
-    CodeOffsetLabel movupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
-        MOZ_ASSERT(HasSSE2());
-        masm.movups_rm(src.code(), dest.addr);
-        return CodeOffsetLabel(masm.currentOffset());
-    }
 
     void loadAsmJSActivation(Register dest) {
         CodeOffsetLabel label = movlWithPatch(PatchedAbsoluteAddress(), dest);
         append(AsmJSGlobalAccess(label, AsmJSActivationGlobalDataOffset));
     }
     void loadAsmJSHeapRegisterFromGlobalData() {
         // x86 doesn't have a pinned heap register.
     }
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -275,18 +275,16 @@ CodeGeneratorX86::loadViewTypeElement(As
       case AsmJSHeapAccess::Uint8Clamped:
       case AsmJSHeapAccess::Uint8:        masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Int16:        masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Uint16:       masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Int32:
       case AsmJSHeapAccess::Uint32:       masm.movlWithPatch(srcAddr, ToRegister(out)); break;
       case AsmJSHeapAccess::Float32:      masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break;
       case AsmJSHeapAccess::Float64:      masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break;
-      case AsmJSHeapAccess::Float32x4:    masm.movupsWithPatch(srcAddr, ToFloatRegister(out)); break;
-      case AsmJSHeapAccess::Int32x4:      masm.movdquWithPatch(srcAddr, ToFloatRegister(out)); break;
     }
 }
 
 template<typename T>
 bool
 CodeGeneratorX86::loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
                                              const LDefinition *out)
 {
@@ -421,30 +419,28 @@ CodeGeneratorX86::storeViewTypeElement(A
       case AsmJSHeapAccess::Uint8Clamped:
       case AsmJSHeapAccess::Uint8:        masm.movbWithPatch(ToRegister(value), dstAddr); break;
       case AsmJSHeapAccess::Int16:
       case AsmJSHeapAccess::Uint16:       masm.movwWithPatch(ToRegister(value), dstAddr); break;
       case AsmJSHeapAccess::Int32:
       case AsmJSHeapAccess::Uint32:       masm.movlWithPatch(ToRegister(value), dstAddr); break;
       case AsmJSHeapAccess::Float32:      masm.movssWithPatch(ToFloatRegister(value), dstAddr); break;
       case AsmJSHeapAccess::Float64:      masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break;
-      case AsmJSHeapAccess::Float32x4:    masm.movupsWithPatch(ToFloatRegister(value), dstAddr); break;
-      case AsmJSHeapAccess::Int32x4:      masm.movdquWithPatch(ToFloatRegister(value), dstAddr); break;
     }
 }
 
 template<typename T>
 void
 CodeGeneratorX86::storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt,
                                               const LAllocation *value, const T &dstAddr)
 {
     uint32_t before = masm.size();
     storeViewTypeElement(vt, value, dstAddr);
     uint32_t after = masm.size();
-    masm.append(AsmJSHeapAccess(before, after, vt));
+    masm.append(AsmJSHeapAccess(before, after));
 }
 
 bool
 CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
 {
     MStoreTypedArrayElementStatic *mir = ins->mir();
     AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType());
 
@@ -495,17 +491,17 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LA
     Label rejoin;
     masm.j(Assembler::AboveOrEqual, &rejoin);
 
     uint32_t before = masm.size();
     storeViewTypeElement(vt, value, dstAddr);
     uint32_t after = masm.size();
     masm.bind(&rejoin);
     memoryBarrier(ins->mir()->barrierAfter());
-    masm.append(AsmJSHeapAccess(before, after, vt, cmp.offset()));
+    masm.append(AsmJSHeapAccess(before, after, cmp.offset()));
     return true;
 }
 
 bool
 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
 {
     MAsmJSCompareExchangeHeap *mir = ins->mir();
 
@@ -534,17 +530,17 @@ CodeGeneratorX86::visitAsmJSCompareExcha
         masm.bind(&goahead);
     }
 
     // Add in the actual heap pointer explicitly, to avoid opening up
     // the abstraction that is compareExchangeToTypedIntArray at this time.
     uint32_t before = masm.size();
     masm.addl_wide(Imm32(0), ptrReg);
     uint32_t after = masm.size();
-    masm.append(AsmJSHeapAccess(before, after, mir->viewType(), maybeCmpOffset));
+    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
 
     Address memAddr(ToRegister(ptr), 0);
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         memAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
@@ -585,17 +581,17 @@ CodeGeneratorX86::visitAsmJSAtomicBinopH
         masm.bind(&goahead);
     }
 
     // Add in the actual heap pointer explicitly, to avoid opening up
     // the abstraction that is atomicBinopToTypedIntArray at this time.
     uint32_t before = masm.size();
     masm.addl_wide(Imm32(0), ptrReg);
     uint32_t after = masm.size();
-    masm.append(AsmJSHeapAccess(before, after, mir->viewType(), maybeCmpOffset));
+    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
 
     Address memAddr(ptrReg, 0);
     if (value->isConstant()) {
         masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         Imm32(ToInt32(value)),
                                         memAddr,
                                         temp,
                                         InvalidReg,
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -245,17 +245,16 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAs
         switch (ins->viewType()) {
           case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8:
             // See comment below.
             lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
             break;
           case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16:
           case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32:
           case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64:
-          case AsmJSHeapAccess::Float32x4: case AsmJSHeapAccess::Int32x4:
             // See comment below.
             lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
             break;
           case AsmJSHeapAccess::Uint8Clamped:
             MOZ_CRASH("unexpected array type");
         }
         return add(lir, ins);
     }
@@ -263,17 +262,16 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAs
     switch (ins->viewType()) {
       case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
         lir = new(alloc()) LAsmJSStoreHeap(useRegister(ins->ptr()), useFixed(ins->value(), eax));
         break;
       case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16:
       case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32:
       case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64:
-      case AsmJSHeapAccess::Float32x4: case AsmJSHeapAccess::Int32x4:
         // For now, don't allow constant values. The immediate operand
         // affects instruction layout which affects patching.
         lir = new(alloc()) LAsmJSStoreHeap(useRegisterAtStart(ptr), useRegisterAtStart(ins->value()));
         break;
       case AsmJSHeapAccess::Uint8Clamped:
         MOZ_CRASH("unexpected array type");
     }