Bug 1268024: Unrelated changes; r=luke
authorBenjamin Bouvier <benj@benj.me>
Wed, 29 Jun 2016 17:58:23 +0200
changeset 303425 d1abee3e755dcc38ce5b5a72f702b950f333853d
parent 303424 38c84afa8cde50514366e23d4405457c276361da
child 303426 a36da0eea7af1bdb7e51b69d5aa9454b98689ac3
push id30388
push usercbook@mozilla.com
push dateSat, 02 Jul 2016 09:15:23 +0000
treeherdermozilla-central@39dffbba7642 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1268024
milestone50.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1268024: Unrelated changes; r=luke MozReview-Commit-ID: E5NmH0fmpm7
js/src/asmjs/WasmBaselineCompile.cpp
js/src/asmjs/WasmIonCompile.cpp
js/src/asmjs/WasmSignalHandlers.cpp
js/src/jit-test/tests/wasm/spec.js
js/src/jit-test/tests/wasm/spec/func_ptrs.wast.js
js/src/jit/MIR.h
js/src/jit/MIRGenerator.h
js/src/jit/MIRGraph.cpp
js/src/jit/arm/Assembler-arm.cpp
js/src/jit/arm/Assembler-arm.h
js/src/jit/arm/MacroAssembler-arm.cpp
js/src/jit/arm/MacroAssembler-arm.h
js/src/jit/x64/Lowering-x64.cpp
js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
js/src/jit/x86/Lowering-x86.cpp
js/src/vm/SharedMem.h
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -5021,19 +5021,18 @@ BaseCompiler::emitLoad(ValType type, Sca
 {
     LinearMemoryAddress<Nothing> addr;
     if (!iter_.readLoad(type, Scalar::byteSize(viewType), &addr))
         return false;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType);
+    MWasmMemoryAccess access(viewType, addr.align);
     access.setOffset(addr.offset);
-    access.setAlign(addr.align);
 
     switch (type) {
       case ValType::I32: {
         RegI32 rp = popI32();
         loadHeap(access, rp, AnyReg(rp));
         pushI32(rp);
         break;
       }
@@ -5069,19 +5068,18 @@ BaseCompiler::emitStore(ValType resultTy
     LinearMemoryAddress<Nothing> addr;
     Nothing unused_value;
     if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
         return false;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType);
+    MWasmMemoryAccess access(viewType, addr.align);
     access.setOffset(addr.offset);
-    access.setAlign(addr.align);
 
     switch (resultType) {
       case ValType::I32: {
         RegI32 rp, rv;
         pop2xI32(&rp, &rv);
         storeHeap(access, rp, AnyReg(rv));
         freeI32(rp);
         pushI32(rv);
@@ -5344,19 +5342,18 @@ BaseCompiler::emitStoreWithCoercion(ValT
     LinearMemoryAddress<Nothing> addr;
     Nothing unused_value;
     if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
         return false;
 
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
-    MWasmMemoryAccess access(viewType);
+    MWasmMemoryAccess access(viewType, addr.align);
     access.setOffset(addr.offset);
-    access.setAlign(addr.align);
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64) {
         RegF32 rv = popF32();
         RegF64 rw = needF64();
         masm.convertFloat32ToDouble(rv.reg, rw.reg);
         RegI32 rp = popI32();
         storeHeap(access, rp, AnyReg(rw));
         pushF32(rv);
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -584,124 +584,105 @@ class FunctionCompiler
 
     void assign(unsigned slot, MDefinition* def)
     {
         if (inDeadCode())
             return;
         curBlock_->setSlot(info().localSlot(slot), def);
     }
 
-    MDefinition* loadHeap(MDefinition* base, const MWasmMemoryAccess& access)
+  private:
+    MDefinition* loadHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access)
     {
         if (inDeadCode())
             return nullptr;
-
-        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap");
         MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
         curBlock_->add(load);
         return load;
     }
 
-    MDefinition* loadSimdHeap(MDefinition* base, const MWasmMemoryAccess& access)
+    void storeHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
     {
         if (inDeadCode())
-            return nullptr;
-
-        MOZ_ASSERT(Scalar::isSimdType(access.accessType()),
-                   "loadSimdHeap can only load from a SIMD view");
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
-        curBlock_->add(load);
-        return load;
+            return;
+        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
+        curBlock_->add(store);
+    }
+
+  public:
+    MDefinition* loadHeap(MDefinition* base, const MWasmMemoryAccess& access)
+    {
+        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap");
+        return loadHeapPrivate(base, access);
+    }
+    MDefinition* loadSimdHeap(MDefinition* base, const MWasmMemoryAccess& access)
+    {
+        MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD loads should use loadHeap");
+        return loadHeapPrivate(base, access);
+    }
+    MDefinition* loadAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access)
+    {
+        return loadHeapPrivate(base, access);
     }
 
     void storeHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
     {
-        if (inDeadCode())
-            return;
-
-        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()),
-                   "SIMD stores should use storeSimdHeap");
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
-        curBlock_->add(store);
+        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD store should use storeSimdHeap");
+        storeHeapPrivate(base, access, v);
     }
-
     void storeSimdHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
     {
-        if (inDeadCode())
-            return;
-
-        MOZ_ASSERT(Scalar::isSimdType(access.accessType()),
-                   "storeSimdHeap can only load from a SIMD view");
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
-        curBlock_->add(store);
+        MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD stores should use storeHeap");
+        storeHeapPrivate(base, access, v);
     }
-
-    MDefinition* atomicLoadHeap(MDefinition* base, const MWasmMemoryAccess& access)
+    void storeAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
     {
-        if (inDeadCode())
-            return nullptr;
-
-        MAsmJSLoadHeap* load = MAsmJSLoadHeap::New(alloc(), base, access);
-        curBlock_->add(load);
-        return load;
-    }
-
-    void atomicStoreHeap(MDefinition* base, const MWasmMemoryAccess& access,
-                         MDefinition* v)
-    {
-        if (inDeadCode())
-            return;
-
-        MAsmJSStoreHeap* store = MAsmJSStoreHeap::New(alloc(), base, access, v);
-        curBlock_->add(store);
+        storeHeapPrivate(base, access, v);
     }
 
     MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
                                            MDefinition* oldv, MDefinition* newv)
     {
         if (inDeadCode())
             return nullptr;
 
-        MAsmJSCompareExchangeHeap* cas =
-            MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv);
+        auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv);
         curBlock_->add(cas);
         return cas;
     }
 
     MDefinition* atomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
                                     MDefinition* value)
     {
         if (inDeadCode())
             return nullptr;
 
-        MAsmJSAtomicExchangeHeap* cas =
-            MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value);
+        auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value);
         curBlock_->add(cas);
         return cas;
     }
 
     MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
                                  MDefinition* base, const MWasmMemoryAccess& access,
                                  MDefinition* v)
     {
         if (inDeadCode())
             return nullptr;
 
-        MAsmJSAtomicBinopHeap* binop =
-            MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v);
+        auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v);
         curBlock_->add(binop);
         return binop;
     }
 
     MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type)
     {
         if (inDeadCode())
             return nullptr;
-        MAsmJSLoadGlobalVar* load = MAsmJSLoadGlobalVar::New(alloc(), type, globalDataOffset,
-                                                             isConst);
+
+        auto* load = MAsmJSLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst);
         curBlock_->add(load);
         return load;
     }
 
     void storeGlobalVar(uint32_t globalDataOffset, MDefinition* v)
     {
         if (inDeadCode())
             return;
@@ -2102,18 +2083,17 @@ SetHeapAccessOffset(FunctionCompiler& f,
 
 static bool
 EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr))
         return false;
 
-    MWasmMemoryAccess access(viewType);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.iter().setResult(f.loadHeap(base, access));
     return true;
 }
@@ -2121,18 +2101,17 @@ EmitLoad(FunctionCompiler& f, ValType ty
 static bool
 EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.storeHeap(base, access, value);
     return true;
 }
@@ -2147,18 +2126,17 @@ EmitStoreWithCoercion(FunctionCompiler& 
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64)
         value = f.unary<MToDouble>(value);
     else if (resultType == ValType::F64 && viewType == Scalar::Float32)
         value = f.unary<MToFloat32>(value);
     else
         MOZ_CRASH("unexpected coerced store");
 
-    MWasmMemoryAccess access(viewType);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.storeHeap(base, access, value);
     return true;
 }
@@ -2224,60 +2202,57 @@ EmitBinaryMathBuiltinCall(FunctionCompil
 static bool
 EmitAtomicsLoad(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     if (!f.iter().readAtomicLoad(&addr, &viewType))
         return false;
 
-    MWasmMemoryAccess access(viewType, 0, MembarBeforeLoad, MembarAfterLoad);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align, 0, MembarBeforeLoad, MembarAfterLoad);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
-    f.iter().setResult(f.atomicLoadHeap(base, access));
+    f.iter().setResult(f.loadAtomicHeap(base, access));
     return true;
 }
 
 static bool
 EmitAtomicsStore(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicStore(&addr, &viewType, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, 0, MembarBeforeStore, MembarAfterStore);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align, 0, MembarBeforeStore, MembarAfterStore);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
-    f.atomicStoreHeap(base, access, value);
+    f.storeAtomicHeap(base, access, value);
     f.iter().setResult(value);
     return true;
 }
 
 static bool
 EmitAtomicsBinOp(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     jit::AtomicOp op;
     MDefinition* value;
     if (!f.iter().readAtomicBinOp(&addr, &viewType, &op, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicBinopHeap(op, base, access, value));
     return true;
 }
@@ -2287,18 +2262,17 @@ EmitAtomicsCompareExchange(FunctionCompi
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* oldValue;
     MDefinition* newValue;
     if (!f.iter().readAtomicCompareExchange(&addr, &viewType, &oldValue, &newValue))
         return false;
 
-    MWasmMemoryAccess access(viewType);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicCompareExchangeHeap(base, access, oldValue, newValue));
     return true;
 }
@@ -2307,18 +2281,17 @@ static bool
 EmitAtomicsExchange(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicExchange(&addr, &viewType, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base, IsAtomic::Yes))
         return false;
 
     f.iter().setResult(f.atomicExchangeHeap(base, access, value));
     return true;
 }
@@ -2535,18 +2508,17 @@ EmitSimdLoad(FunctionCompiler& f, ValTyp
 
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
         return false;
 
-    MWasmMemoryAccess access(viewType, numElems);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align, numElems);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.iter().setResult(f.loadSimdHeap(base, access));
     return true;
 }
@@ -2560,18 +2532,17 @@ EmitSimdStore(FunctionCompiler& f, ValTy
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
-    MWasmMemoryAccess access(viewType, numElems);
-    access.setAlign(addr.align);
+    MWasmMemoryAccess access(viewType, addr.align, numElems);
 
     MDefinition* base = addr.base;
     if (!SetHeapAccessOffset(f, addr.offset, &access, &base))
         return false;
 
     f.storeSimdHeap(base, access, value);
     return true;
 }
--- a/js/src/asmjs/WasmSignalHandlers.cpp
+++ b/js/src/asmjs/WasmSignalHandlers.cpp
@@ -353,17 +353,17 @@ struct macos_arm_context {
 #endif
 
 static uint8_t**
 ContextToPC(CONTEXT* context)
 {
 #ifdef JS_CODEGEN_NONE
     MOZ_CRASH();
 #else
-     return reinterpret_cast<uint8_t**>(&PC_sig(context));
+    return reinterpret_cast<uint8_t**>(&PC_sig(context));
 #endif
 }
 
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
 
 #if defined(JS_CODEGEN_X64)
 MOZ_COLD static void
 SetFPRegToNaN(size_t size, void* fp_reg)
--- a/js/src/jit-test/tests/wasm/spec.js
+++ b/js/src/jit-test/tests/wasm/spec.js
@@ -293,17 +293,18 @@ function exec(e) {
         let caught = false;
         let errMsg = e.list[2];
         assert(errMsg.quoted, "assert_trap second argument must be a string");
         errMsg.quoted = false;
         try {
             exec(e.list[1]);
         } catch(err) {
             caught = true;
-            assert(err.toString().indexOf(errMsg) !== -1, `expected error message "${errMsg}", got "${err}"`);
+            if (err.toString().indexOf(errMsg) === -1)
+                warn(`expected error message "${errMsg}", got "${err}"`);
         }
         assert(caught, "assert_trap exception not caught");
         return;
     }
 
     if(!handleNonStandard(exprName, e)) {
         assert(false, "NYI: " + e);
     }
--- a/js/src/jit-test/tests/wasm/spec/func_ptrs.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/func_ptrs.wast.js
@@ -1,4 +1,2 @@
 // |jit-test| test-also-wasm-baseline
-// TODO Pass the table index in the error message?
-quit();
 var importedArgs = ['func_ptrs.wast']; load(scriptdir + '../spec.js');
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -12918,28 +12918,29 @@ class MWasmMemoryAccess
     uint32_t align_;
     Scalar::Type accessType_ : 8;
     bool needsBoundsCheck_;
     unsigned numSimdElems_;
     MemoryBarrierBits barrierBefore_;
     MemoryBarrierBits barrierAfter_;
 
   public:
-    explicit MWasmMemoryAccess(Scalar::Type accessType, unsigned numSimdElems = 0,
+    explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, unsigned numSimdElems = 0,
                                MemoryBarrierBits barrierBefore = MembarNobits,
                                MemoryBarrierBits barrierAfter = MembarNobits)
       : offset_(0),
-        align_(Scalar::byteSize(accessType)),
+        align_(align),
         accessType_(accessType),
         needsBoundsCheck_(true),
         numSimdElems_(numSimdElems),
         barrierBefore_(barrierBefore),
         barrierAfter_(barrierAfter)
     {
         MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
+        MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
     }
 
     uint32_t offset() const { return offset_; }
     uint32_t endOffset() const { return offset() + byteSize(); }
     uint32_t align() const { return align_; }
     Scalar::Type accessType() const { return accessType_; }
     unsigned byteSize() const {
         return Scalar::isSimdType(accessType())
@@ -12960,23 +12961,22 @@ class MAsmJSLoadHeap
   : public MUnaryInstruction,
     public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
     MAsmJSLoadHeap(MDefinition* base, const MWasmMemoryAccess& access)
       : MUnaryInstruction(base),
         MWasmMemoryAccess(access)
     {
-        if (access.barrierBefore()|access.barrierAfter())
-            setGuard();         // Not removable
+        if (access.barrierBefore() | access.barrierAfter())
+            setGuard(); // Not removable
         else
             setMovable();
 
-        MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped,
-                   "unexpected load heap in asm.js");
+        MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in asm.js");
         setResultType(ScalarTypeToMIRType(access.accessType()));
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap)
     TRIVIAL_NEW_WRAPPERS
 
     MDefinition* base() const { return getOperand(0); }
@@ -12993,23 +12993,22 @@ class MAsmJSLoadHeap
     AliasType mightAlias(const MDefinition* def) const override;
 };
 
 class MAsmJSStoreHeap
   : public MBinaryInstruction,
     public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSStoreHeap(MDefinition* base, const MWasmMemoryAccess& access,
-                    MDefinition* v)
+    MAsmJSStoreHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
       : MBinaryInstruction(base, v),
         MWasmMemoryAccess(access)
     {
-        if (access.barrierBefore()|access.barrierAfter())
-            setGuard();         // Not removable
+        if (access.barrierBefore() | access.barrierAfter())
+            setGuard(); // Not removable
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap)
     TRIVIAL_NEW_WRAPPERS
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -217,17 +217,17 @@ class MIRGenerator
 
   public:
     AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; }
 #endif
 
   public:
     const JitCompileOptions options;
 
-    bool needsAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access) const;
+    bool needsBoundsCheckBranch(const MWasmMemoryAccess* access) const;
     size_t foldableOffsetRange(const MWasmMemoryAccess* access) const;
     size_t foldableOffsetRange(bool accessNeedsBoundsCheck, bool atomic) const;
 
   private:
     GraphSpewer gs_;
 
   public:
     GraphSpewer& graphSpewer() {
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -104,17 +104,17 @@ MIRGenerator::addAbortedPreliminaryGroup
             return;
     }
     AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!abortedPreliminaryGroups_.append(group))
         oomUnsafe.crash("addAbortedPreliminaryGroup");
 }
 
 bool
-MIRGenerator::needsAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access) const
+MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const
 {
     // A heap access needs a bounds-check branch if we're not relying on signal
     // handlers to catch errors, and if it's not proven to be within bounds.
     // We use signal-handlers on x64, but on x86 there isn't enough address
     // space for a guard region.  Also, on x64 the atomic loads and stores
     // can't (yet) use the signal handlers.
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
     if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -2054,17 +2054,17 @@ Assembler::as_extdtr(LoadStore ls, int s
             extra_bits2 |= 0x2;
         }
         break;
       case 64:
         extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
         extra_bits1 = 0;
         break;
       default:
-        MOZ_CRASH("SAY WHAT?");
+        MOZ_CRASH("unexpected size in as_extdtr");
     }
     return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 |
                      addr.encode() | RT(rt) | mode | c);
 }
 
 BufferOffset
 Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
                 DTMMode mode, DTMWriteBack wb, Condition c)
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -277,17 +277,16 @@ struct ImmType : public ImmTag
 enum Index {
     Offset = 0 << 21 | 1<<24,
     PreIndex = 1 << 21 | 1 << 24,
     PostIndex = 0 << 21 | 0 << 24
     // The docs were rather unclear on this. It sounds like
     // 1 << 21 | 0 << 24 encodes dtrt.
 };
 
-// Seriously, wtf arm
 enum IsImmOp2_ {
     IsImmOp2    = 1 << 25,
     IsNotImmOp2 = 0 << 25
 };
 enum IsImmDTR_ {
     IsImmDTR    = 0 << 25,
     IsNotImmDTR = 1 << 25
 };
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1121,18 +1121,18 @@ void
 MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode, Condition cc)
 {
     as_dtr(IsStore, 8, mode, rt, addr, cc);
 }
 
 // Specialty for moving N bits of data, where n == 8,16,32,64.
 BufferOffset
 MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
-                          Register rn, Register rm, Register rt,
-                          Index mode, Assembler::Condition cc, unsigned shiftAmount)
+                                    Register rn, Register rm, Register rt,
+                                    Index mode, Assembler::Condition cc, unsigned shiftAmount)
 {
     if (size == 32 || (size == 8 && !IsSigned))
         return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrRegImmShift(rm, LSL, shiftAmount)), cc);
 
     ScratchRegisterScope scratch(asMasm());
 
     if (shiftAmount != 0) {
         MOZ_ASSERT(rn != scratch);
@@ -1777,17 +1777,18 @@ MacroAssemblerARM::ma_vldr(VFPAddr addr,
 
 BufferOffset
 MacroAssemblerARM::ma_vldr(const Address& addr, VFPRegister dest, Condition cc)
 {
     return ma_vdtr(IsLoad, addr, dest, cc);
 }
 
 BufferOffset
-MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift, Condition cc)
+MacroAssemblerARM::ma_vldr(VFPRegister src, Register base, Register index, int32_t shift,
+                           Condition cc)
 {
     ScratchRegisterScope scratch(asMasm());
     as_add(scratch, base, lsl(index, shift), LeaveCC, cc);
     return ma_vldr(Address(scratch, 0), src, cc);
 }
 
 BufferOffset
 MacroAssemblerARM::ma_vstr(VFPRegister src, VFPAddr addr, Condition cc)
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -297,24 +297,26 @@ class MacroAssemblerARM : public Assembl
     void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrsh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrsb(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
     void ma_ldrd(EDtrAddr addr, Register rt, DebugOnly<Register> rt2, Index mode = Offset, Condition cc = Always);
     void ma_strb(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
     void ma_strh(Register rt, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
     void ma_strd(Register rt, DebugOnly<Register> rt2, EDtrAddr addr, Index mode = Offset, Condition cc = Always);
+
     // Specialty for moving N bits of data, where n == 8,16,32,64.
     BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
-                          Register rn, Register rm, Register rt,
-                          Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
+                                  Register rn, Register rm, Register rt,
+                                  Index mode = Offset, Condition cc = Always, unsigned scale = TimesOne);
 
     BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
-                          Register rn, Imm32 offset, Register rt,
-                          Index mode = Offset, Condition cc = Always);
+                                  Register rn, Imm32 offset, Register rt,
+                                  Index mode = Offset, Condition cc = Always);
+
     void ma_pop(Register r);
     void ma_push(Register r);
 
     void ma_vpop(VFPRegister r);
     void ma_vpush(VFPRegister r);
 
     // Barriers.
     void ma_dmb(BarrierOption option=BarrierSY);
@@ -388,24 +390,23 @@ class MacroAssemblerARM : public Assembl
 
     // Transfer (do not coerce) a gpr into a float
     void ma_vxfer(Register src, FloatRegister dest, Condition cc = Always);
     // Transfer (do not coerce) a couple of gpr into a double
     void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
 
     BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest, Condition cc = Always);
 
-
     BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
     BufferOffset ma_vldr(const Address& addr, VFPRegister dest, Condition cc = Always);
-    BufferOffset ma_vldr(VFPRegister src, Register base, Register index, int32_t shift = defaultShift, Condition cc = Always);
+    BufferOffset ma_vldr(VFPRegister src, Register base, Register index,
+                         int32_t shift = defaultShift, Condition cc = Always);
 
     BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
     BufferOffset ma_vstr(VFPRegister src, const Address& addr, Condition cc = Always);
-
     BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift,
                          int32_t offset, Condition cc = Always);
 
     void ma_call(ImmPtr dest);
 
     // Float registers can only be loaded/stored in continuous runs when using
     // vstm/vldm. This function breaks set into continuous runs and loads/stores
     // them at [rm]. rm will be modified and left in a state logically suitable
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -158,32 +158,32 @@ LIRGeneratorX64::visitAsmJSUnsignedToFlo
 void
 LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     LAsmJSStoreHeap* lir = nullptr;  // initialize to silence GCC warning
     switch (ins->accessType()) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -429,17 +429,17 @@ CodeGeneratorX86Shared::visitOffsetBound
 
 void
 CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access,
                                                    const MInstruction* mir,
                                                    Register ptr, Label* maybeFail)
 {
     // Emit a bounds-checking branch for |access|.
 
-    MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
+    MOZ_ASSERT(gen->needsBoundsCheckBranch(access));
 
     Label* pass = nullptr;
 
     // If we have a non-zero offset, it's possible that |ptr| itself is out of
     // bounds, while adding the offset computes an in-bounds address. To catch
     // this case, we need a second branch, which we emit out of line since it's
     // unlikely to be needed in normal programs.
     if (access->offset() != 0) {
@@ -466,31 +466,31 @@ CodeGeneratorX86Shared::emitAsmJSBoundsC
     masm.append(wasm::BoundsCheck(cmpOffset));
 }
 
 bool
 CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* access,
                                                           const MInstruction* mir,
                                                           const LAllocation* ptr)
 {
-    if (!gen->needsAsmJSBoundsCheckBranch(access))
+    if (!gen->needsBoundsCheckBranch(access))
         return false;
 
     emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
     return true;
 }
 
 bool
 CodeGeneratorX86Shared::maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
                                                       OutOfLineLoadTypedArrayOutOfBounds** ool)
 {
     MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
     *ool = nullptr;
 
-    if (!gen->needsAsmJSBoundsCheckBranch(mir))
+    if (!gen->needsBoundsCheckBranch(mir))
         return false;
 
     Label* rejoin = nullptr;
     if (!mir->isAtomicAccess()) {
         *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
                                                                mir->accessType());
         addOutOfLineCode(*ool, mir);
         rejoin = (*ool)->entry();
@@ -502,33 +502,33 @@ CodeGeneratorX86Shared::maybeEmitAsmJSLo
 
 bool
 CodeGeneratorX86Shared::maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
                                                        Label** rejoin)
 {
     MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
 
     *rejoin = nullptr;
-    if (!gen->needsAsmJSBoundsCheckBranch(mir))
+    if (!gen->needsBoundsCheckBranch(mir))
         return false;
 
     if (!mir->isAtomicAccess())
         *rejoin = alloc().lifoAlloc()->newInfallible<Label>();
 
     emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
     return true;
 }
 
 void
 CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access,
                                                            Register ptr)
 {
     // Clean up after performing a heap access checked by a branch.
 
-    MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
+    MOZ_ASSERT(gen->needsBoundsCheckBranch(access));
 
 #ifdef JS_CODEGEN_X64
     // If the offset is 0, we don't use an OffsetBoundsCheck.
     if (access->offset() != 0) {
         // Zero out the high 32 bits, in case the OffsetBoundsCheck code had to
         // sign-extend (movslq) the pointer value to get wraparound to work.
         masm.movl(ptr, ptr);
     }
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -206,52 +206,52 @@ LIRGeneratorX86::visitAsmJSUnsignedToFlo
 void
 LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
+    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     LAsmJSStoreHeap* lir = nullptr;
     switch (ins->accessType()) {
       case Scalar::Int8: case Scalar::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
         lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax));
         break;
       case Scalar::Int16: case Scalar::Uint16:
       case Scalar::Int32: case Scalar::Uint32:
       case Scalar::Float32: case Scalar::Float64:
       case Scalar::Float32x4:
       case Scalar::Int8x16:
       case Scalar::Int16x8:
       case Scalar::Int32x4:
-          // For now, don't allow constant values. The immediate operand
-          // affects instruction layout which affects patching.
-          lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
-          break;
+        // For now, don't allow constant values. The immediate operand affects
+        // instruction layout which affects patching.
+        lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
+        break;
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
     add(lir, ins);
 }
 
 void
--- a/js/src/vm/SharedMem.h
+++ b/js/src/vm/SharedMem.h
@@ -11,18 +11,18 @@
 
 template<typename T>
 class SharedMem
 {
     static_assert(mozilla::IsPointer<T>::value,
                   "SharedMem encapsulates pointer types");
 
     enum Sharedness {
-	IsUnshared,
-	IsShared
+        IsUnshared,
+        IsShared
     };
 
     T ptr_;
 #ifdef DEBUG
     Sharedness sharedness_;
 #endif
 
     SharedMem(T ptr, Sharedness sharedness)