Bug 1079361: Refactor AsmJSHeapAccess to include the view type of the heap access; r=luke
☠☠ backed out by 2c39d4a57818 ☠ ☠
authorBenjamin Bouvier <benj@benj.me>
Fri, 21 Nov 2014 12:12:29 +0100
changeset 216908 74527e0493c585acfb6eebd8d6aa622b7939d6a2
parent 216907 0432a14c9283f68b3f7487f20d14f9214b3fe8b4
child 216909 39e6791cc5c5a0586f97094c83fc86c5699a1c63
push id27868
push userkwierso@gmail.com
push dateSat, 22 Nov 2014 00:36:06 +0000
treeherdermozilla-central@7ab92d922d19 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1079361
milestone36.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1079361: Refactor AsmJSHeapAccess to include the view type of the heap access; r=luke
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/asmjs/AsmJSValidate.cpp
js/src/jit/MIR.h
js/src/jit/shared/Assembler-shared.h
js/src/jit/shared/CodeGenerator-x86-shared.cpp
js/src/jit/shared/CodeGenerator-x86-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/CodeGenerator-x86.h
js/src/jit/x86/Lowering-x86.cpp
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -326,55 +326,68 @@ ContextToPC(CONTEXT *context)
 #else
      return reinterpret_cast<uint8_t**>(&PC_sig(context));
 #endif
 }
 
 #if defined(JS_CODEGEN_X64)
 template <class T>
 static void
-SetXMMRegToNaN(bool isFloat32, T *xmm_reg)
+SetXMMRegToNaN(AsmJSHeapAccess::ViewType viewType, T *xmm_reg)
 {
-    if (isFloat32) {
+    switch (viewType) {
+      case AsmJSHeapAccess::Float32: {
         JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float));
         float *floats = reinterpret_cast<float*>(xmm_reg);
         floats[0] = GenericNaN();
         floats[1] = 0;
         floats[2] = 0;
         floats[3] = 0;
-    } else {
+        break;
+      }
+      case AsmJSHeapAccess::Float64: {
         JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double));
         double *dbls = reinterpret_cast<double*>(xmm_reg);
         dbls[0] = GenericNaN();
         dbls[1] = 0;
+        break;
+      }
+      case AsmJSHeapAccess::Int8:
+      case AsmJSHeapAccess::Uint8:
+      case AsmJSHeapAccess::Int16:
+      case AsmJSHeapAccess::Uint16:
+      case AsmJSHeapAccess::Int32:
+      case AsmJSHeapAccess::Uint32:
+      case AsmJSHeapAccess::Uint8Clamped:
+        MOZ_CRASH("unexpected type in SetXMMRegToNaN");
     }
 }
 
 # if !defined(XP_MACOSX)
 static void
-SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
+SetRegisterToCoercedUndefined(CONTEXT *context, AsmJSHeapAccess::ViewType viewType, AnyRegister reg)
 {
     if (reg.isFloat()) {
         switch (reg.fpu().code()) {
-          case X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break;
-          case X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break;
-          case X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break;
-          case X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break;
-          case X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break;
-          case X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break;
-          case X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break;
-          case X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break;
-          case X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break;
-          case X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break;
-          case X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break;
-          case X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break;
-          case X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break;
-          case X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break;
-          case X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break;
-          case X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break;
+          case X86Registers::xmm0:  SetXMMRegToNaN(viewType, &XMM_sig(context, 0)); break;
+          case X86Registers::xmm1:  SetXMMRegToNaN(viewType, &XMM_sig(context, 1)); break;
+          case X86Registers::xmm2:  SetXMMRegToNaN(viewType, &XMM_sig(context, 2)); break;
+          case X86Registers::xmm3:  SetXMMRegToNaN(viewType, &XMM_sig(context, 3)); break;
+          case X86Registers::xmm4:  SetXMMRegToNaN(viewType, &XMM_sig(context, 4)); break;
+          case X86Registers::xmm5:  SetXMMRegToNaN(viewType, &XMM_sig(context, 5)); break;
+          case X86Registers::xmm6:  SetXMMRegToNaN(viewType, &XMM_sig(context, 6)); break;
+          case X86Registers::xmm7:  SetXMMRegToNaN(viewType, &XMM_sig(context, 7)); break;
+          case X86Registers::xmm8:  SetXMMRegToNaN(viewType, &XMM_sig(context, 8)); break;
+          case X86Registers::xmm9:  SetXMMRegToNaN(viewType, &XMM_sig(context, 9)); break;
+          case X86Registers::xmm10: SetXMMRegToNaN(viewType, &XMM_sig(context, 10)); break;
+          case X86Registers::xmm11: SetXMMRegToNaN(viewType, &XMM_sig(context, 11)); break;
+          case X86Registers::xmm12: SetXMMRegToNaN(viewType, &XMM_sig(context, 12)); break;
+          case X86Registers::xmm13: SetXMMRegToNaN(viewType, &XMM_sig(context, 13)); break;
+          case X86Registers::xmm14: SetXMMRegToNaN(viewType, &XMM_sig(context, 14)); break;
+          case X86Registers::xmm15: SetXMMRegToNaN(viewType, &XMM_sig(context, 15)); break;
           default: MOZ_CRASH();
         }
     } else {
         switch (reg.gpr().code()) {
           case X86Registers::eax: RAX_sig(context) = 0; break;
           case X86Registers::ecx: RCX_sig(context) = 0; break;
           case X86Registers::edx: RDX_sig(context) = 0; break;
           case X86Registers::ebx: RBX_sig(context) = 0; break;
@@ -450,17 +463,17 @@ HandleFault(PEXCEPTION_POINTERS exceptio
 
     // We now know that this is an out-of-bounds access made by an asm.js
     // load/store that we should handle. If this is a load, assign the
     // JS-defined result value to the destination register (ToInt32(undefined)
     // or ToNumber(undefined), determined by the type of the destination
     // register) and set the PC to the next op. Upon return from the handler,
     // execution will resume at this next PC.
     if (heapAccess->isLoad())
-        SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
+        SetRegisterToCoercedUndefined(context, heapAccess->viewType(), heapAccess->loadedReg());
     *ppc += heapAccess->opLength();
 
     return true;
 # else
     return false;
 # endif
 }
 
@@ -500,34 +513,34 @@ SetRegisterToCoercedUndefined(mach_port_
         kern_return_t kret;
 
         x86_float_state64_t fstate;
         unsigned int count = x86_FLOAT_STATE64_COUNT;
         kret = thread_get_state(rtThread, x86_FLOAT_STATE64, (thread_state_t) &fstate, &count);
         if (kret != KERN_SUCCESS)
             return false;
 
-        bool f32 = heapAccess.isFloat32Load();
+        AsmJSHeapAccess::ViewType viewType = heapAccess.viewType();
         switch (heapAccess.loadedReg().fpu().code()) {
-          case X86Registers::xmm0:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm0); break;
-          case X86Registers::xmm1:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm1); break;
-          case X86Registers::xmm2:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm2); break;
-          case X86Registers::xmm3:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm3); break;
-          case X86Registers::xmm4:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm4); break;
-          case X86Registers::xmm5:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm5); break;
-          case X86Registers::xmm6:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm6); break;
-          case X86Registers::xmm7:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm7); break;
-          case X86Registers::xmm8:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm8); break;
-          case X86Registers::xmm9:  SetXMMRegToNaN(f32, &fstate.__fpu_xmm9); break;
-          case X86Registers::xmm10: SetXMMRegToNaN(f32, &fstate.__fpu_xmm10); break;
-          case X86Registers::xmm11: SetXMMRegToNaN(f32, &fstate.__fpu_xmm11); break;
-          case X86Registers::xmm12: SetXMMRegToNaN(f32, &fstate.__fpu_xmm12); break;
-          case X86Registers::xmm13: SetXMMRegToNaN(f32, &fstate.__fpu_xmm13); break;
-          case X86Registers::xmm14: SetXMMRegToNaN(f32, &fstate.__fpu_xmm14); break;
-          case X86Registers::xmm15: SetXMMRegToNaN(f32, &fstate.__fpu_xmm15); break;
+          case X86Registers::xmm0:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm0); break;
+          case X86Registers::xmm1:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm1); break;
+          case X86Registers::xmm2:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm2); break;
+          case X86Registers::xmm3:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm3); break;
+          case X86Registers::xmm4:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm4); break;
+          case X86Registers::xmm5:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm5); break;
+          case X86Registers::xmm6:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm6); break;
+          case X86Registers::xmm7:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm7); break;
+          case X86Registers::xmm8:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm8); break;
+          case X86Registers::xmm9:  SetXMMRegToNaN(viewType, &fstate.__fpu_xmm9); break;
+          case X86Registers::xmm10: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm10); break;
+          case X86Registers::xmm11: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm11); break;
+          case X86Registers::xmm12: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm12); break;
+          case X86Registers::xmm13: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm13); break;
+          case X86Registers::xmm14: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm14); break;
+          case X86Registers::xmm15: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm15); break;
           default: MOZ_CRASH();
         }
 
         kret = thread_set_state(rtThread, x86_FLOAT_STATE64, (thread_state_t)&fstate, x86_FLOAT_STATE64_COUNT);
         if (kret != KERN_SUCCESS)
             return false;
     } else {
         switch (heapAccess.loadedReg().gpr().code()) {
@@ -842,17 +855,17 @@ HandleFault(int signum, siginfo_t *info,
 
     // We now know that this is an out-of-bounds access made by an asm.js
     // load/store that we should handle. If this is a load, assign the
     // JS-defined result value to the destination register (ToInt32(undefined)
     // or ToNumber(undefined), determined by the type of the destination
     // register) and set the PC to the next op. Upon return from the handler,
     // execution will resume at this next PC.
     if (heapAccess->isLoad())
-        SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
+        SetRegisterToCoercedUndefined(context, heapAccess->viewType(), heapAccess->loadedReg());
     *ppc += heapAccess->opLength();
 
     return true;
 # else
     return false;
 # endif
 }
 
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -2776,28 +2776,29 @@ class FunctionCompiler
 
     void assign(const Local &local, MDefinition *def)
     {
         if (inDeadCode())
             return;
         curBlock_->setSlot(info().localSlot(local.slot), def);
     }
 
-    MDefinition *loadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk)
+    MDefinition *loadHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk)
     {
         if (inDeadCode())
             return nullptr;
 
         bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
         MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), vt, ptr, needsBoundsCheck);
         curBlock_->add(load);
         return load;
     }
 
-    void storeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
+    void storeHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, MDefinition *v,
+                   NeedsBoundsCheck chk)
     {
         if (inDeadCode())
             return;
 
         bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
         MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), vt, ptr, v, needsBoundsCheck);
         curBlock_->add(store);
     }
@@ -2805,53 +2806,56 @@ class FunctionCompiler
     void memoryBarrier(MemoryBarrierBits type)
     {
         if (inDeadCode())
             return;
         MMemoryBarrier *ins = MMemoryBarrier::New(alloc(), type);
         curBlock_->add(ins);
     }
 
-    MDefinition *atomicLoadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk)
+    MDefinition *atomicLoadHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, NeedsBoundsCheck chk)
     {
         if (inDeadCode())
             return nullptr;
 
         bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
         MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), vt, ptr, needsBoundsCheck,
                                                    MembarBeforeLoad, MembarAfterLoad);
         curBlock_->add(load);
         return load;
     }
 
-    void atomicStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
+    void atomicStoreHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr, MDefinition *v,
+                         NeedsBoundsCheck chk)
     {
         if (inDeadCode())
             return;
 
         bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
         MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), vt, ptr, v, needsBoundsCheck,
                                                       MembarBeforeStore, MembarAfterStore);
         curBlock_->add(store);
     }
 
-    MDefinition *atomicCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk)
+    MDefinition *atomicCompareExchangeHeap(AsmJSHeapAccess::ViewType vt, MDefinition *ptr,
+                                           MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk)
     {
         if (inDeadCode())
             return nullptr;
 
         // The code generator requires explicit bounds checking for compareExchange.
         bool needsBoundsCheck = true;
         MAsmJSCompareExchangeHeap *cas =
             MAsmJSCompareExchangeHeap::New(alloc(), vt, ptr, oldv, newv, needsBoundsCheck);
         curBlock_->add(cas);
         return cas;
     }
 
-    MDefinition *atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
+    MDefinition *atomicBinopHeap(js::jit::AtomicOp op, AsmJSHeapAccess::ViewType vt,
+                                 MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
     {
         if (inDeadCode())
             return nullptr;
 
         // The code generator requires explicit bounds checking for the binops.
         bool needsBoundsCheck = true;
         MAsmJSAtomicBinopHeap *binop =
             MAsmJSAtomicBinopHeap::New(alloc(), op, vt, ptr, v, needsBoundsCheck);
@@ -4469,17 +4473,17 @@ static bool
 CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type)
 {
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
     if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck))
         return false;
 
-    *def = f.loadHeap(viewType, pointerDef, needsBoundsCheck);
+    *def = f.loadHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, needsBoundsCheck);
     *type = TypedArrayLoadType(viewType);
     return true;
 }
 
 static bool
 CheckDotAccess(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type)
 {
     MOZ_ASSERT(elem->isKind(PNK_DOT));
@@ -4559,17 +4563,17 @@ CheckStoreArray(FunctionCompiler &f, Par
             rhsDef = f.unary<MToDouble>(rhsDef);
         else if (!rhsType.isMaybeDouble())
             return f.failf(lhs, "%s is not a subtype of float? or double?", rhsType.toChars());
         break;
       default:
         MOZ_CRASH("Unexpected view type");
     }
 
-    f.storeHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
+    f.storeHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, rhsDef, needsBoundsCheck);
 
     *def = rhsDef;
     *type = rhsType;
     return true;
 }
 
 static bool
 CheckAssignName(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type)
@@ -4827,17 +4831,17 @@ CheckAtomicsLoad(FunctionCompiler &f, Pa
     ParseNode *indexArg = NextNode(arrayArg);
 
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
     if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
         return false;
 
-    *def = f.atomicLoadHeap(viewType, pointerDef, needsBoundsCheck);
+    *def = f.atomicLoadHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, needsBoundsCheck);
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsStore(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
     if (CallArgListLength(call) != 3)
@@ -4856,17 +4860,17 @@ CheckAtomicsStore(FunctionCompiler &f, P
     MDefinition *rhsDef;
     Type rhsType;
     if (!CheckExpr(f, valueArg, &rhsDef, &rhsType))
         return false;
 
     if (!rhsType.isIntish())
         return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
 
-    f.atomicStoreHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
+    f.atomicStoreHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef, rhsDef, needsBoundsCheck);
 
     *def = rhsDef;
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsBinop(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type, js::jit::AtomicOp op)
@@ -4887,17 +4891,18 @@ CheckAtomicsBinop(FunctionCompiler &f, P
     MDefinition *valueArgDef;
     Type valueArgType;
     if (!CheckExpr(f, valueArg, &valueArgDef, &valueArgType))
         return false;
 
     if (!valueArgType.isIntish())
         return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
 
-    *def = f.atomicBinopHeap(op, viewType, pointerDef, valueArgDef, needsBoundsCheck);
+    *def = f.atomicBinopHeap(op, AsmJSHeapAccess::ViewType(viewType), pointerDef, valueArgDef,
+                             needsBoundsCheck);
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsCompareExchange(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
     if (CallArgListLength(call) != 4)
@@ -4925,17 +4930,18 @@ CheckAtomicsCompareExchange(FunctionComp
         return false;
 
     if (!oldValueArgType.isIntish())
         return f.failf(oldValueArg, "%s is not a subtype of intish", oldValueArgType.toChars());
 
     if (!newValueArgType.isIntish())
         return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
 
-    *def = f.atomicCompareExchangeHeap(viewType, pointerDef, oldValueArgDef, newValueArgDef, needsBoundsCheck);
+    *def = f.atomicCompareExchangeHeap(AsmJSHeapAccess::ViewType(viewType), pointerDef,
+                                       oldValueArgDef, newValueArgDef, needsBoundsCheck);
     *type = Type::Signed;
     return true;
 }
 
 static bool
 CheckAtomicsBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSAtomicsBuiltinFunction func,
                         MDefinition **resultDef, Type *resultType)
 {
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -12087,57 +12087,74 @@ class MAsmJSNeg : public MUnaryInstructi
     INSTRUCTION_HEADER(AsmJSNeg);
     static MAsmJSNeg *NewAsmJS(TempAllocator &alloc, MDefinition *op, MIRType type) {
         return new(alloc) MAsmJSNeg(op, type);
     }
 };
 
 class MAsmJSHeapAccess
 {
-    Scalar::Type viewType_;
+  protected:
+    typedef AsmJSHeapAccess::ViewType ViewType;
+
+  private:
+    ViewType viewType_;
     bool needsBoundsCheck_;
 
   public:
-    MAsmJSHeapAccess(Scalar::Type vt, bool needsBoundsCheck)
+    MAsmJSHeapAccess(ViewType vt, bool needsBoundsCheck)
       : viewType_(vt), needsBoundsCheck_(needsBoundsCheck)
     {}
 
-    Scalar::Type viewType() const { return viewType_; }
+    ViewType viewType() const { return viewType_; }
     bool needsBoundsCheck() const { return needsBoundsCheck_; }
     void removeBoundsCheck() { needsBoundsCheck_ = false; }
 };
 
 class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
 {
     MemoryBarrierBits barrierBefore_;
     MemoryBarrierBits barrierAfter_;
 
-    MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck,
+    MAsmJSLoadHeap(ViewType vt, MDefinition *ptr, bool needsBoundsCheck,
                    MemoryBarrierBits before, MemoryBarrierBits after)
       : MUnaryInstruction(ptr),
         MAsmJSHeapAccess(vt, needsBoundsCheck),
         barrierBefore_(before),
         barrierAfter_(after)
     {
         if (before|after)
             setGuard();         // Not removable
         else
             setMovable();
-        if (vt == Scalar::Float32)
+
+        switch (vt) {
+          case AsmJSHeapAccess::Int8:
+          case AsmJSHeapAccess::Uint8:
+          case AsmJSHeapAccess::Int16:
+          case AsmJSHeapAccess::Uint16:
+          case AsmJSHeapAccess::Int32:
+          case AsmJSHeapAccess::Uint32:
+            setResultType(MIRType_Int32);
+            break;
+          case AsmJSHeapAccess::Float32:
             setResultType(MIRType_Float32);
-        else if (vt == Scalar::Float64)
+            break;
+          case AsmJSHeapAccess::Float64:
             setResultType(MIRType_Double);
-        else
-            setResultType(MIRType_Int32);
+            break;
+          case AsmJSHeapAccess::Uint8Clamped:
+            MOZ_CRASH("unexpected uint8clamped load heap in asm.js");
+        }
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap);
 
-    static MAsmJSLoadHeap *New(TempAllocator &alloc, Scalar::Type vt,
+    static MAsmJSLoadHeap *New(TempAllocator &alloc, ViewType vt,
                                MDefinition *ptr, bool needsBoundsCheck,
                                MemoryBarrierBits barrierBefore = MembarNobits,
                                MemoryBarrierBits barrierAfter = MembarNobits)
     {
         return new(alloc) MAsmJSLoadHeap(vt, ptr, needsBoundsCheck, barrierBefore, barrierAfter);
     }
 
     MDefinition *ptr() const { return getOperand(0); }
@@ -12151,31 +12168,31 @@ class MAsmJSLoadHeap : public MUnaryInst
     bool mightAlias(const MDefinition *def) const;
 };
 
 class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess
 {
     MemoryBarrierBits barrierBefore_;
     MemoryBarrierBits barrierAfter_;
 
-    MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
+    MAsmJSStoreHeap(ViewType vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
                     MemoryBarrierBits before, MemoryBarrierBits after)
       : MBinaryInstruction(ptr, v),
         MAsmJSHeapAccess(vt, needsBoundsCheck),
         barrierBefore_(before),
         barrierAfter_(after)
     {
         if (before|after)
             setGuard();         // Not removable
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap);
 
-    static MAsmJSStoreHeap *New(TempAllocator &alloc, Scalar::Type vt,
+    static MAsmJSStoreHeap *New(TempAllocator &alloc, ViewType vt,
                                 MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
                                 MemoryBarrierBits barrierBefore = MembarNobits,
                                 MemoryBarrierBits barrierAfter = MembarNobits)
     {
         return new(alloc) MAsmJSStoreHeap(vt, ptr, v, needsBoundsCheck,
                                           barrierBefore, barrierAfter);
     }
 
@@ -12186,29 +12203,29 @@ class MAsmJSStoreHeap : public MBinaryIn
 
     AliasSet getAliasSet() const {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapAccess
 {
-    MAsmJSCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv,
+    MAsmJSCompareExchangeHeap(ViewType vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv,
                               bool needsBoundsCheck)
         : MTernaryInstruction(ptr, oldv, newv),
           MAsmJSHeapAccess(vt, needsBoundsCheck)
     {
         setGuard();             // Not removable
         setResultType(MIRType_Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSCompareExchangeHeap);
 
-    static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, Scalar::Type vt,
+    static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, ViewType vt,
                                           MDefinition *ptr, MDefinition *oldv,
                                           MDefinition *newv, bool needsBoundsCheck)
     {
         return new(alloc) MAsmJSCompareExchangeHeap(vt, ptr, oldv, newv, needsBoundsCheck);
     }
 
     MDefinition *ptr() const { return getOperand(0); }
     MDefinition *oldValue() const { return getOperand(1); }
@@ -12218,30 +12235,30 @@ class MAsmJSCompareExchangeHeap : public
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
 class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess
 {
     AtomicOp op_;
 
-    MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v,
+    MAsmJSAtomicBinopHeap(AtomicOp op, ViewType vt, MDefinition *ptr, MDefinition *v,
                           bool needsBoundsCheck)
         : MBinaryInstruction(ptr, v),
           MAsmJSHeapAccess(vt, needsBoundsCheck),
           op_(op)
     {
         setGuard();         // Not removable
         setResultType(MIRType_Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSAtomicBinopHeap);
 
-    static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, Scalar::Type vt,
+    static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, ViewType vt,
                                       MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
     {
         return new(alloc) MAsmJSAtomicBinopHeap(op, vt, ptr, v, needsBoundsCheck);
     }
 
     AtomicOp operation() const { return op_; }
     MDefinition *ptr() const { return getOperand(0); }
     MDefinition *value() const { return getOperand(1); }
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -726,46 +726,60 @@ static const unsigned AsmJSNaN32GlobalDa
 
 // Summarizes a heap access made by asm.js code that needs to be patched later
 // and/or looked up by the asm.js signal handlers. Different architectures need
 // to know different things (x64: offset and length, ARM: where to patch in
 // heap length, x86: where to patch in heap length and base) hence the massive
 // #ifdefery.
 class AsmJSHeapAccess
 {
+  public:
+    enum ViewType {
+         Int8         = Scalar::Int8,
+         Uint8        = Scalar::Uint8,
+         Int16        = Scalar::Int16,
+         Uint16       = Scalar::Uint16,
+         Int32        = Scalar::Int32,
+         Uint32       = Scalar::Uint32,
+         Float32      = Scalar::Float32,
+         Float64      = Scalar::Float64,
+         Uint8Clamped = Scalar::Uint8Clamped,
+    };
+
+  private:
     uint32_t offset_;
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     uint8_t cmpDelta_;  // the number of bytes from the cmp to the load/store instruction
     uint8_t opLength_;  // the length of the load/store instruction
-    uint8_t isFloat32Load_;
+    ViewType viewType_;
     AnyRegister::Code loadedReg_ : 8;
 #endif
 
     JS_STATIC_ASSERT(AnyRegister::Total < UINT8_MAX);
 
   public:
     AsmJSHeapAccess() {}
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     static const uint32_t NoLengthCheck = UINT32_MAX;
 
     // If 'cmp' equals 'offset' or if it is not supplied then the
     // cmpDelta_ is zero indicating that there is no length to patch.
-    AsmJSHeapAccess(uint32_t offset, uint32_t after, Scalar::Type vt,
+    AsmJSHeapAccess(uint32_t offset, uint32_t after, ViewType viewType,
                     AnyRegister loadedReg, uint32_t cmp = NoLengthCheck)
       : offset_(offset),
         cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
         opLength_(after - offset),
-        isFloat32Load_(vt == Scalar::Float32),
+        viewType_(viewType),
         loadedReg_(loadedReg.code())
     {}
     AsmJSHeapAccess(uint32_t offset, uint8_t after, uint32_t cmp = NoLengthCheck)
       : offset_(offset),
         cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
         opLength_(after - offset),
-        isFloat32Load_(false),
+        viewType_(ViewType(-1)),
         loadedReg_(UINT8_MAX)
     {}
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     explicit AsmJSHeapAccess(uint32_t offset)
       : offset_(offset)
     {}
 #endif
 
@@ -774,17 +788,17 @@ class AsmJSHeapAccess
 #if defined(JS_CODEGEN_X86)
     void *patchOffsetAt(uint8_t *code) const { return code + (offset_ + opLength_); }
 #endif
 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
     bool hasLengthCheck() const { return cmpDelta_ > 0; }
     void *patchLengthAt(uint8_t *code) const { return code + (offset_ - cmpDelta_); }
     unsigned opLength() const { return opLength_; }
     bool isLoad() const { return loadedReg_ != UINT8_MAX; }
-    bool isFloat32Load() const { return isFloat32Load_; }
+    ViewType viewType() const { return viewType_; }
     AnyRegister loadedReg() const { return AnyRegister::FromCode(loadedReg_); }
 #endif
 };
 
 typedef Vector<AsmJSHeapAccess, 0, SystemAllocPolicy> AsmJSHeapAccessVector;
 
 struct AsmJSGlobalAccess
 {
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -341,24 +341,33 @@ CodeGeneratorX86Shared::visitAsmJSPassSt
         }
     }
     return true;
 }
 
 bool
 CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool)
 {
-    if (ool->dest().isFloat()) {
-        if (ool->isFloat32Load())
-            masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
-        else
-            masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
-    } else {
+    switch (ool->viewType()) {
+      case AsmJSHeapAccess::Float32:
+        masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
+        break;
+      case AsmJSHeapAccess::Float64:
+        masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
+        break;
+      case AsmJSHeapAccess::Int8:
+      case AsmJSHeapAccess::Uint8:
+      case AsmJSHeapAccess::Int16:
+      case AsmJSHeapAccess::Uint16:
+      case AsmJSHeapAccess::Int32:
+      case AsmJSHeapAccess::Uint32:
+      case AsmJSHeapAccess::Uint8Clamped:
         Register destReg = ool->dest().gpr();
         masm.mov(ImmWord(0), destReg);
+        break;
     }
     masm.jmp(ool->rejoin());
     return true;
 }
 
 bool
 CodeGeneratorX86Shared::generateOutOfLineCode()
 {
--- a/js/src/jit/shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.h
@@ -33,24 +33,24 @@ class CodeGeneratorX86Shared : public Co
 
   protected:
 
     // Load a NaN or zero into a register for an out of bounds AsmJS or static
     // typed array load.
     class OutOfLineLoadTypedArrayOutOfBounds : public OutOfLineCodeBase<CodeGeneratorX86Shared>
     {
         AnyRegister dest_;
-        bool isFloat32Load_;
+        AsmJSHeapAccess::ViewType viewType_;
       public:
-        OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, bool isFloat32Load)
-          : dest_(dest), isFloat32Load_(isFloat32Load)
+        OutOfLineLoadTypedArrayOutOfBounds(AnyRegister dest, AsmJSHeapAccess::ViewType viewType)
+          : dest_(dest), viewType_(viewType)
         {}
 
         AnyRegister dest() const { return dest_; }
-        bool isFloat32Load() const { return isFloat32Load_; }
+        AsmJSHeapAccess::ViewType viewType() const { return viewType_; }
         bool accept(CodeGeneratorX86Shared *codegen) {
             return codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this);
         }
     };
 
     // Label for the common return path.
     NonAssertingLabel returnLabel_;
     NonAssertingLabel deoptLabel_;
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -264,68 +264,67 @@ CodeGeneratorX64::memoryBarrier(MemoryBa
     if (barrier & MembarStoreLoad)
         masm.storeLoadFence();
 }
 
 bool
 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
 {
     MAsmJSLoadHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+    AsmJSHeapAccess::ViewType vt = mir->viewType();
     const LAllocation *ptr = ins->ptr();
     const LDefinition *out = ins->output();
     Operand srcAddr(HeapReg);
 
     if (ptr->isConstant()) {
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         srcAddr = Operand(HeapReg, ptrImm);
     } else {
         srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
     }
 
     memoryBarrier(ins->mir()->barrierBefore());
     OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
     uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
-        bool isFloat32Load = vt == Scalar::Float32;
-        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
+        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
         if (!addOutOfLineCode(ool, ins->mir()))
             return false;
 
         CodeOffsetLabel cmp = masm.cmplWithPatch(ToRegister(ptr), Imm32(0));
         masm.j(Assembler::AboveOrEqual, ool->entry());
         maybeCmpOffset = cmp.offset();
     }
 
     uint32_t before = masm.size();
     switch (vt) {
-      case Scalar::Int8:    masm.movsbl(srcAddr, ToRegister(out)); break;
-      case Scalar::Uint8:   masm.movzbl(srcAddr, ToRegister(out)); break;
-      case Scalar::Int16:   masm.movswl(srcAddr, ToRegister(out)); break;
-      case Scalar::Uint16:  masm.movzwl(srcAddr, ToRegister(out)); break;
-      case Scalar::Int32:
-      case Scalar::Uint32:  masm.movl(srcAddr, ToRegister(out)); break;
-      case Scalar::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
-      case Scalar::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
-      default: MOZ_CRASH("unexpected array type");
+      case AsmJSHeapAccess::Int8:      masm.movsbl(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Uint8:     masm.movzbl(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Int16:     masm.movswl(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Uint16:    masm.movzwl(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Int32:
+      case AsmJSHeapAccess::Uint32:    masm.movl(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Float32:   masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
+      case AsmJSHeapAccess::Float64:   masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
+      case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
     }
     uint32_t after = masm.size();
     if (ool)
         masm.bind(ool->rejoin());
     memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
 {
     MAsmJSStoreHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+    AsmJSHeapAccess::ViewType vt = mir->viewType();
     const LAllocation *ptr = ins->ptr();
     Operand dstAddr(HeapReg);
 
     if (ptr->isConstant()) {
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         dstAddr = Operand(HeapReg, ptrImm);
     } else {
@@ -339,50 +338,55 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
         CodeOffsetLabel cmp = masm.cmplWithPatch(ToRegister(ptr), Imm32(0));
         masm.j(Assembler::AboveOrEqual, &rejoin);
         maybeCmpOffset = cmp.offset();
     }
 
     uint32_t before = masm.size();
     if (ins->value()->isConstant()) {
         switch (vt) {
-          case Scalar::Int8:
-          case Scalar::Uint8:   masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break;
-          case Scalar::Int16:
-          case Scalar::Uint16:  masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break;
-          case Scalar::Int32:
-          case Scalar::Uint32:  masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break;
-          default: MOZ_CRASH("unexpected array type");
+          case AsmJSHeapAccess::Int8:
+          case AsmJSHeapAccess::Uint8:        masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break;
+          case AsmJSHeapAccess::Int16:
+          case AsmJSHeapAccess::Uint16:       masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break;
+          case AsmJSHeapAccess::Int32:
+          case AsmJSHeapAccess::Uint32:       masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break;
+          case AsmJSHeapAccess::Float32:
+          case AsmJSHeapAccess::Float64:
+          case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
         }
     } else {
         switch (vt) {
-          case Scalar::Int8:
-          case Scalar::Uint8:   masm.movb(ToRegister(ins->value()), dstAddr); break;
-          case Scalar::Int16:
-          case Scalar::Uint16:  masm.movw(ToRegister(ins->value()), dstAddr); break;
-          case Scalar::Int32:
-          case Scalar::Uint32:  masm.movl(ToRegister(ins->value()), dstAddr); break;
-          case Scalar::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
-          case Scalar::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
-          default: MOZ_CRASH("unexpected array type");
+          case AsmJSHeapAccess::Int8:
+          case AsmJSHeapAccess::Uint8:        masm.movb(ToRegister(ins->value()), dstAddr); break;
+          case AsmJSHeapAccess::Int16:
+          case AsmJSHeapAccess::Uint16:       masm.movw(ToRegister(ins->value()), dstAddr); break;
+          case AsmJSHeapAccess::Int32:
+          case AsmJSHeapAccess::Uint32:       masm.movl(ToRegister(ins->value()), dstAddr); break;
+          case AsmJSHeapAccess::Float32:      masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
+          case AsmJSHeapAccess::Float64:      masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
+          case AsmJSHeapAccess::Uint8Clamped: MOZ_CRASH("unexpected array type");
         }
     }
     uint32_t after = masm.size();
     if (rejoin.used())
         masm.bind(&rejoin);
     memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
 {
     MAsmJSCompareExchangeHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+
+    MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
+    Scalar::Type vt = Scalar::Type(mir->viewType());
+
     const LAllocation *ptr = ins->ptr();
 
     MOZ_ASSERT(ptr->isRegister());
     BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
@@ -390,17 +394,17 @@ CodeGeneratorX64::visitAsmJSCompareExcha
     uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
     MOZ_ASSERT(mir->needsBoundsCheck());
     {
         maybeCmpOffset = masm.cmplWithPatch(ToRegister(ptr), Imm32(0)).offset();
         Label goahead;
         masm.j(Assembler::LessThan, &goahead);
         memoryBarrier(MembarFull);
         Register out = ToRegister(ins->output());
-        masm.xorl(out,out);
+        masm.xorl(out, out);
         masm.jmp(&rejoin);
         masm.bind(&goahead);
     }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
@@ -411,17 +415,20 @@ CodeGeneratorX64::visitAsmJSCompareExcha
     masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
 {
     MAsmJSAtomicBinopHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+
+    MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
+    Scalar::Type vt = Scalar::Type(mir->viewType());
+
     const LAllocation *ptr = ins->ptr();
     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     MOZ_ASSERT(ptr->isRegister());
     BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
 
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -158,29 +158,29 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAs
     // getting maximum performance in these cases) only allow constant
     // opererands when skipping bounds checks.
     LAllocation ptrAlloc = ins->needsBoundsCheck()
                            ? useRegisterAtStart(ptr)
                            : useRegisterOrNonNegativeConstantAtStart(ptr);
 
     LAsmJSStoreHeap *lir;
     switch (ins->viewType()) {
-      case Scalar::Int8:
-      case Scalar::Uint8:
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
+      case AsmJSHeapAccess::Int8:
+      case AsmJSHeapAccess::Uint8:
+      case AsmJSHeapAccess::Int16:
+      case AsmJSHeapAccess::Uint16:
+      case AsmJSHeapAccess::Int32:
+      case AsmJSHeapAccess::Uint32:
         lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterOrConstantAtStart(ins->value()));
         break;
-      case Scalar::Float32:
-      case Scalar::Float64:
+      case AsmJSHeapAccess::Float32:
+      case AsmJSHeapAccess::Float64:
         lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
         break;
-      default:
+      case AsmJSHeapAccess::Uint8Clamped:
         MOZ_CRASH("unexpected array type");
     }
 
     return add(lir, ins);
 }
 
 bool
 LIRGeneratorX64::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins)
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -262,74 +262,72 @@ CodeGeneratorX86::visitAsmJSUInt32ToFloa
 
     // Beware: convertUInt32ToFloat32 clobbers input.
     masm.convertUInt32ToFloat32(temp, output);
     return true;
 }
 
 template<typename T>
 void
-CodeGeneratorX86::loadViewTypeElement(Scalar::Type vt, const T &srcAddr,
+CodeGeneratorX86::loadViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
                                       const LDefinition *out)
 {
     switch (vt) {
-      case Scalar::Int8:    masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Uint8Clamped:
-      case Scalar::Uint8:   masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Int16:   masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Uint16:  masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Int32:
-      case Scalar::Uint32:  masm.movlWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Float32: masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break;
-      case Scalar::Float64: masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break;
-      default: MOZ_CRASH("unexpected array type");
+      case AsmJSHeapAccess::Int8:         masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Uint8Clamped:
+      case AsmJSHeapAccess::Uint8:        masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Int16:        masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Uint16:       masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Int32:
+      case AsmJSHeapAccess::Uint32:       masm.movlWithPatch(srcAddr, ToRegister(out)); break;
+      case AsmJSHeapAccess::Float32:      masm.movssWithPatch(srcAddr, ToFloatRegister(out)); break;
+      case AsmJSHeapAccess::Float64:      masm.movsdWithPatch(srcAddr, ToFloatRegister(out)); break;
     }
 }
 
 template<typename T>
 bool
-CodeGeneratorX86::loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr,
+CodeGeneratorX86::loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
                                              const LDefinition *out)
 {
     uint32_t before = masm.size();
     loadViewTypeElement(vt, srcAddr, out);
     uint32_t after = masm.size();
     masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out)));
     return true;
 }
 
 bool
 CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
 {
     const MLoadTypedArrayElementStatic *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
-    MOZ_ASSERT_IF(vt == Scalar::Float32, mir->type() == MIRType_Float32);
+    AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType());
+    MOZ_ASSERT_IF(vt == AsmJSHeapAccess::Float32, mir->type() == MIRType_Float32);
 
     Register ptr = ToRegister(ins->ptr());
     const LDefinition *out = ins->output();
 
     OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
-    bool isFloat32Load = (vt == Scalar::Float32);
     if (!mir->fallible()) {
-        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
+        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
         if (!addOutOfLineCode(ool, ins->mir()))
             return false;
     }
 
     masm.cmpl(ptr, Imm32(mir->length()));
     if (ool)
         masm.j(Assembler::AboveOrEqual, ool->entry());
     else if (!bailoutIf(Assembler::AboveOrEqual, ins->snapshot()))
         return false;
 
     Address srcAddr(ptr, (int32_t) mir->base());
     loadViewTypeElement(vt, srcAddr, out);
-    if (vt == Scalar::Float64)
+    if (vt == AsmJSHeapAccess::Float64)
         masm.canonicalizeDouble(ToFloatRegister(out));
-    if (vt == Scalar::Float32)
+    if (vt == AsmJSHeapAccess::Float32)
         masm.canonicalizeFloat(ToFloatRegister(out));
     if (ool)
         masm.bind(ool->rejoin());
     return true;
 }
 
 bool
 CodeGeneratorX86::visitAsmJSCall(LAsmJSCall *ins)
@@ -364,17 +362,17 @@ CodeGeneratorX86::memoryBarrier(MemoryBa
     if (barrier & MembarStoreLoad)
         masm.storeLoadFence();
 }
 
 bool
 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
 {
     const MAsmJSLoadHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+    AsmJSHeapAccess::ViewType vt = mir->viewType();
     const LAllocation *ptr = ins->ptr();
     const LDefinition *out = ins->output();
 
     memoryBarrier(ins->mir()->barrierBefore());
 
     if (ptr->isConstant()) {
         // The constant displacement still needs to be added to the as-yet-unknown
         // base address of the heap. For now, embed the displacement as an
@@ -390,18 +388,17 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAs
     Address srcAddr(ptrReg, 0);
 
     if (!mir->needsBoundsCheck()) {
         loadAndNoteViewTypeElement(vt, srcAddr, out);
         memoryBarrier(ins->mir()->barrierAfter());
         return true;
     }
 
-    bool isFloat32Load = vt == Scalar::Float32;
-    OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
+    OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
     if (!addOutOfLineCode(ool, mir))
         return false;
 
     CodeOffsetLabel cmp = masm.cmplWithPatch(ptrReg, Imm32(0));
     masm.j(Assembler::AboveOrEqual, ool->entry());
 
     uint32_t before = masm.size();
     loadViewTypeElement(vt, srcAddr, out);
@@ -409,49 +406,48 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAs
     masm.bind(ool->rejoin());
     memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), cmp.offset()));
     return true;
 }
 
 template<typename T>
 void
-CodeGeneratorX86::storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
+CodeGeneratorX86::storeViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value,
                                        const T &dstAddr)
 {
     switch (vt) {
-      case Scalar::Int8:
-      case Scalar::Uint8Clamped:
-      case Scalar::Uint8:   masm.movbWithPatch(ToRegister(value), dstAddr); break;
-      case Scalar::Int16:
-      case Scalar::Uint16:  masm.movwWithPatch(ToRegister(value), dstAddr); break;
-      case Scalar::Int32:
-      case Scalar::Uint32:  masm.movlWithPatch(ToRegister(value), dstAddr); break;
-      case Scalar::Float32: masm.movssWithPatch(ToFloatRegister(value), dstAddr); break;
-      case Scalar::Float64: masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break;
-      default: MOZ_CRASH("unexpected array type");
+      case AsmJSHeapAccess::Int8:
+      case AsmJSHeapAccess::Uint8Clamped:
+      case AsmJSHeapAccess::Uint8:        masm.movbWithPatch(ToRegister(value), dstAddr); break;
+      case AsmJSHeapAccess::Int16:
+      case AsmJSHeapAccess::Uint16:       masm.movwWithPatch(ToRegister(value), dstAddr); break;
+      case AsmJSHeapAccess::Int32:
+      case AsmJSHeapAccess::Uint32:       masm.movlWithPatch(ToRegister(value), dstAddr); break;
+      case AsmJSHeapAccess::Float32:      masm.movssWithPatch(ToFloatRegister(value), dstAddr); break;
+      case AsmJSHeapAccess::Float64:      masm.movsdWithPatch(ToFloatRegister(value), dstAddr); break;
     }
 }
 
 template<typename T>
 void
-CodeGeneratorX86::storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value,
-                                              const T &dstAddr)
+CodeGeneratorX86::storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt,
+                                              const LAllocation *value, const T &dstAddr)
 {
     uint32_t before = masm.size();
     storeViewTypeElement(vt, value, dstAddr);
     uint32_t after = masm.size();
     masm.append(AsmJSHeapAccess(before, after));
 }
 
 bool
 CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
 {
     MStoreTypedArrayElementStatic *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+    AsmJSHeapAccess::ViewType vt = AsmJSHeapAccess::ViewType(mir->viewType());
 
     Register ptr = ToRegister(ins->ptr());
     const LAllocation *value = ins->value();
 
     masm.cmpl(ptr, Imm32(mir->length()));
     Label rejoin;
     masm.j(Assembler::AboveOrEqual, &rejoin);
 
@@ -460,17 +456,17 @@ CodeGeneratorX86::visitStoreTypedArrayEl
     masm.bind(&rejoin);
     return true;
 }
 
 bool
 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
 {
     MAsmJSStoreHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+    AsmJSHeapAccess::ViewType vt = mir->viewType();
     const LAllocation *value = ins->value();
     const LAllocation *ptr = ins->ptr();
 
     memoryBarrier(ins->mir()->barrierBefore());
 
     if (ptr->isConstant()) {
         // The constant displacement still needs to be added to the as-yet-unknown
         // base address of the heap. For now, embed the displacement as an
@@ -503,17 +499,20 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LA
     masm.append(AsmJSHeapAccess(before, after, cmp.offset()));
     return true;
 }
 
 bool
 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
 {
     MAsmJSCompareExchangeHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+
+    MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
+    Scalar::Type vt = Scalar::Type(mir->viewType());
+
     const LAllocation *ptr = ins->ptr();
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
     MOZ_ASSERT(ptr->isRegister());
     // Set up the offset within the heap in the pointer reg.
     Register ptrReg = ToRegister(ptr);
 
@@ -550,17 +549,20 @@ CodeGeneratorX86::visitAsmJSCompareExcha
 
     return true;
 }
 
 bool
 CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
 {
     MAsmJSAtomicBinopHeap *mir = ins->mir();
-    Scalar::Type vt = mir->viewType();
+
+    MOZ_ASSERT(mir->viewType() <= AsmJSHeapAccess::Uint32);
+    Scalar::Type vt = Scalar::Type(mir->viewType());
+
     const LAllocation *ptr = ins->ptr();
     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
     MOZ_ASSERT(ptr->isRegister());
     // Set up the offset within the heap in the pointer reg.
     Register ptrReg = ToRegister(ptr);
--- a/js/src/jit/x86/CodeGenerator-x86.h
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -24,27 +24,28 @@ class CodeGeneratorX86 : public CodeGene
     }
 
   protected:
     ValueOperand ToValue(LInstruction *ins, size_t pos);
     ValueOperand ToOutValue(LInstruction *ins);
     ValueOperand ToTempValue(LInstruction *ins, size_t pos);
 
     template<typename T>
-    bool loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr,
+    bool loadAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
+                                    const LDefinition *out);
+    template<typename T>
+    void loadViewTypeElement(AsmJSHeapAccess::ViewType vt, const T &srcAddr,
                              const LDefinition *out);
     template<typename T>
-    void loadViewTypeElement(Scalar::Type vt, const T &srcAddr,
-                                       const LDefinition *out);
-    template<typename T>
-    void storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value,
+    void storeAndNoteViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value,
                                      const T &dstAddr);
     template<typename T>
-    void storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
+    void storeViewTypeElement(AsmJSHeapAccess::ViewType vt, const LAllocation *value,
                               const T &dstAddr);
+
     void memoryBarrier(MemoryBarrierBits barrier);
 
   public:
     CodeGeneratorX86(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
 
   public:
     bool visitBox(LBox *box);
     bool visitBoxFloatingPoint(LBoxFloatingPoint *box);
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -238,44 +238,46 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAs
     LAsmJSStoreHeap *lir;
     MOZ_ASSERT(ptr->type() == MIRType_Int32);
 
     if (ptr->isConstant() && !ins->needsBoundsCheck()) {
         int32_t ptrValue = ptr->toConstant()->value().toInt32();
         MOZ_ASSERT(ptrValue >= 0);
         LAllocation ptrAlloc = LAllocation(ptr->toConstant()->vp());
         switch (ins->viewType()) {
-          case Scalar::Int8: case Scalar::Uint8:
+          case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8:
             // See comment below.
             lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
             break;
-          case Scalar::Int16: case Scalar::Uint16:
-          case Scalar::Int32: case Scalar::Uint32:
-          case Scalar::Float32: case Scalar::Float64:
+          case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16:
+          case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32:
+          case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64:
             // See comment below.
             lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
             break;
-          default: MOZ_CRASH("unexpected array type");
+          case AsmJSHeapAccess::Uint8Clamped:
+            MOZ_CRASH("unexpected array type");
         }
         return add(lir, ins);
     }
 
     switch (ins->viewType()) {
-      case Scalar::Int8: case Scalar::Uint8:
+      case AsmJSHeapAccess::Int8: case AsmJSHeapAccess::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
         lir = new(alloc()) LAsmJSStoreHeap(useRegister(ins->ptr()), useFixed(ins->value(), eax));
         break;
-      case Scalar::Int16: case Scalar::Uint16:
-      case Scalar::Int32: case Scalar::Uint32:
-      case Scalar::Float32: case Scalar::Float64:
+      case AsmJSHeapAccess::Int16: case AsmJSHeapAccess::Uint16:
+      case AsmJSHeapAccess::Int32: case AsmJSHeapAccess::Uint32:
+      case AsmJSHeapAccess::Float32: case AsmJSHeapAccess::Float64:
         // For now, don't allow constant values. The immediate operand
         // affects instruction layout which affects patching.
         lir = new(alloc()) LAsmJSStoreHeap(useRegisterAtStart(ptr), useRegisterAtStart(ins->value()));
         break;
-      default: MOZ_CRASH("unexpected array type");
+      case AsmJSHeapAccess::Uint8Clamped:
+        MOZ_CRASH("unexpected array type");
     }
 
     return add(lir, ins);
 }
 
 bool
 LIRGeneratorX86::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins)
 {