Bug 1421244: Remove supports for atomics on ARM < v7; r=lth, sr=luke
authorBenjamin Bouvier <benj@benj.me>
Wed, 29 Nov 2017 13:24:10 +0100
changeset 706644 0c2a8520c1d265889f9955b32d00fd2875a4c6b7
parent 706571 07f0607be42d8e7524d43786ff802fdedfc3e23d
child 706645 48d573a890201a591bda202ba5db37eb8559e26f
push id91866
push userbmo:jbeich@FreeBSD.org
push dateSun, 03 Dec 2017 00:05:43 +0000
reviewerslth, luke
bugs1421244
milestone59.0a1
Bug 1421244: Remove supports for atomics on ARM < v7; r=lth, sr=luke MozReview-Commit-ID: LQKX0y49mlq
js/src/builtin/AtomicsObject.cpp
js/src/builtin/AtomicsObject.h
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/LIR-arm.h
js/src/jit/arm/LOpcodes-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/Simulator-arm.h
js/src/jit/arm64/vixl/Simulator-vixl.h
js/src/wasm/WasmBuiltins.cpp
js/src/wasm/WasmFrameIter.cpp
js/src/wasm/WasmIonCompile.cpp
js/src/wasm/WasmJS.cpp
js/src/wasm/WasmTypes.h
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -511,173 +511,16 @@ js::atomics_isLockFree(JSContext* cx, un
             args.rval().setBoolean(false);
             return true;
         }
     }
     args.rval().setBoolean(jit::AtomicOperations::isLockfreeJS(size));
     return true;
 }
 
-// asm.js callouts for platforms that do not have non-word-sized
-// atomics where we don't want to inline the logic for the atomics.
-//
-// Memory will always be shared since the callouts are only called from
-// code that checks that the memory is shared.
-//
-// To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
-// simulator build with ARMHWCAP=vfp set.  Do not set any other flags; other
-// vfp/neon flags force ARMv7 to be set.
-
-int32_t
-js::atomics_add_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformAdd::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformAdd::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformAdd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformAdd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_sub_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformSub::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformSub::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformSub::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformSub::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_and_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformAnd::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformAnd::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformAnd::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformAnd::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_or_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformOr::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformOr::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformOr::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformOr::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_xor_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return PerformXor::operate(heap.cast<int8_t*>() + offset, value);
-      case Scalar::Uint8:
-        return PerformXor::operate(heap.cast<uint8_t*>() + offset, value);
-      case Scalar::Int16:
-        return PerformXor::operate(heap.cast<int16_t*>() + (offset >> 1), value);
-      case Scalar::Uint16:
-        return PerformXor::operate(heap.cast<uint16_t*>() + (offset >> 1), value);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_xchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t value)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return ExchangeOrStore<DoExchange>(Scalar::Int8, value, heap, offset);
-      case Scalar::Uint8:
-        return ExchangeOrStore<DoExchange>(Scalar::Uint8, value, heap, offset);
-      case Scalar::Int16:
-        return ExchangeOrStore<DoExchange>(Scalar::Int16, value, heap, offset>>1);
-      case Scalar::Uint16:
-        return ExchangeOrStore<DoExchange>(Scalar::Uint16, value, heap, offset>>1);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
-int32_t
-js::atomics_cmpxchg_asm_callout(wasm::Instance* instance, int32_t vt, int32_t offset, int32_t oldval, int32_t newval)
-{
-    if (size_t(offset) >= instance->memory()->volatileMemoryLength())
-        return 0;
-
-    SharedMem<void*> heap = instance->memoryBase().cast<void*>();
-    switch (Scalar::Type(vt)) {
-      case Scalar::Int8:
-        return CompareExchange(Scalar::Int8, oldval, newval, heap, offset);
-      case Scalar::Uint8:
-        return CompareExchange(Scalar::Uint8, oldval, newval, heap, offset);
-      case Scalar::Int16:
-        return CompareExchange(Scalar::Int16, oldval, newval, heap, offset>>1);
-      case Scalar::Uint16:
-        return CompareExchange(Scalar::Uint16, oldval, newval, heap, offset>>1);
-      default:
-        MOZ_CRASH("Invalid size");
-    }
-}
-
 namespace js {
 
 // Represents one waiting worker.
 //
 // The type is declared opaque in SharedArrayObject.h.  Instances of
 // js::FutexWaiter are stack-allocated and linked onto a list across a
 // call to FutexThread::wait().
 //
--- a/js/src/builtin/AtomicsObject.h
+++ b/js/src/builtin/AtomicsObject.h
@@ -33,26 +33,16 @@ MOZ_MUST_USE bool atomics_add(JSContext*
 MOZ_MUST_USE bool atomics_sub(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_and(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_or(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_xor(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_isLockFree(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_wait(JSContext* cx, unsigned argc, Value* vp);
 MOZ_MUST_USE bool atomics_wake(JSContext* cx, unsigned argc, Value* vp);
 
-/* asm.js callouts */
-namespace wasm { class Instance; }
-int32_t atomics_add_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_sub_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_and_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_or_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_xor_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-int32_t atomics_cmpxchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t oldval, int32_t newval);
-int32_t atomics_xchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
-
 class FutexThread
 {
     friend class AutoLockFutexAPI;
 
 public:
     static MOZ_MUST_USE bool initialize();
     static void destroy();
 
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2544,83 +2544,31 @@ CodeGeneratorARM::visitWasmCompareExchan
     Register newval = ToRegister(ins->newValue());
 
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
-CodeGeneratorARM::visitWasmCompareExchangeCallout(LWasmCompareExchangeCallout* ins)
-{
-    const MWasmCompareExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    Register ptr = ToRegister(ins->ptr());
-    Register oldval = ToRegister(ins->oldval());
-    Register newval = ToRegister(ins->newval());
-    Register tls = ToRegister(ins->tls());
-    Register instance = ToRegister(ins->getTemp(0));
-    Register viewType = ToRegister(ins->getTemp(1));
-
-    MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
-
-    masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.ma_mov(Imm32(mir->access().type()), viewType);
-
-    masm.setupWasmABICall();
-    masm.passABIArg(instance);
-    masm.passABIArg(viewType);
-    masm.passABIArg(ptr);
-    masm.passABIArg(oldval);
-    masm.passABIArg(newval);
-    masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::AtomicCmpXchg);
-}
-
-void
 CodeGeneratorARM::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
 {
     MWasmAtomicExchangeHeap* mir = ins->mir();
 
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
 }
 
 void
-CodeGeneratorARM::visitWasmAtomicExchangeCallout(LWasmAtomicExchangeCallout* ins)
-{
-    const MWasmAtomicExchangeHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    Register ptr = ToRegister(ins->ptr());
-    Register value = ToRegister(ins->value());
-    Register tls = ToRegister(ins->tls());
-    Register instance = ToRegister(ins->getTemp(0));
-    Register viewType = ToRegister(ins->getTemp(1));
-
-    MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
-
-    masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.ma_mov(Imm32(mir->access().type()), viewType);
-
-    masm.setupWasmABICall();
-    masm.passABIArg(instance);
-    masm.passABIArg(viewType);
-    masm.passABIArg(ptr);
-    masm.passABIArg(value);
-    masm.callWithABI(mir->bytecodeOffset(), wasm::SymbolicAddress::AtomicXchg);
-}
-
-void
 CodeGeneratorARM::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
 {
     MWasmAtomicBinopHeap* mir = ins->mir();
     MOZ_ASSERT(mir->hasUses());
 
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
@@ -2658,59 +2606,16 @@ CodeGeneratorARM::visitWasmAtomicBinopHe
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
 }
 
 void
-CodeGeneratorARM::visitWasmAtomicBinopCallout(LWasmAtomicBinopCallout* ins)
-{
-    const MWasmAtomicBinopHeap* mir = ins->mir();
-    MOZ_ASSERT(mir->access().offset() == 0);
-
-    Register ptr = ToRegister(ins->ptr());
-    Register value = ToRegister(ins->value());
-    Register tls = ToRegister(ins->tls());
-    Register instance = ToRegister(ins->getTemp(0));
-    Register viewType = ToRegister(ins->getTemp(1));
-
-    masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
-    masm.move32(Imm32(mir->access().type()), viewType);
-
-    masm.setupWasmABICall();
-    masm.passABIArg(instance);
-    masm.passABIArg(viewType);
-    masm.passABIArg(ptr);
-    masm.passABIArg(value);
-
-    wasm::BytecodeOffset bytecodeOffset = mir->bytecodeOffset();
-    switch (mir->operation()) {
-      case AtomicFetchAddOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchAdd);
-        break;
-      case AtomicFetchSubOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchSub);
-        break;
-      case AtomicFetchAndOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchAnd);
-        break;
-      case AtomicFetchOrOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchOr);
-        break;
-      case AtomicFetchXorOp:
-        masm.callWithABI(bytecodeOffset, wasm::SymbolicAddress::AtomicFetchXor);
-        break;
-      default:
-        MOZ_CRASH("Unknown op");
-    }
-}
-
-void
 CodeGeneratorARM::visitWasmStackArg(LWasmStackArg* ins)
 {
     const MWasmStackArg* mir = ins->mir();
     Address dst(StackPointer, mir->spOffset());
     ScratchRegisterScope scratch(masm);
     SecondScratchRegisterScope scratch2(masm);
 
     if (ins->arg()->isConstant()) {
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -244,22 +244,19 @@ class CodeGeneratorARM : public CodeGene
     void visitWasmAddOffset(LWasmAddOffset* ins);
     void visitWasmStore(LWasmStore* ins);
     void visitWasmStoreI64(LWasmStoreI64* ins);
     void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
     void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
     void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
     void visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins);
-    void visitWasmCompareExchangeCallout(LWasmCompareExchangeCallout* ins);
     void visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins);
-    void visitWasmAtomicExchangeCallout(LWasmAtomicExchangeCallout* ins);
     void visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins);
     void visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect* ins);
-    void visitWasmAtomicBinopCallout(LWasmAtomicBinopCallout* ins);
     void visitWasmStackArg(LWasmStackArg* ins);
     void visitWasmTruncateToInt32(LWasmTruncateToInt32* ins);
     void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
     void visitCopySignD(LCopySignD* ins);
     void visitCopySignF(LCopySignF* ins);
 
     void visitMemoryBarrier(LMemoryBarrier* ins);
 
--- a/js/src/jit/arm/LIR-arm.h
+++ b/js/src/jit/arm/LIR-arm.h
@@ -457,108 +457,16 @@ class LSoftUDivOrMod : public LBinaryCal
         setOperand(1, rhs);
     }
 
     MInstruction* mir() {
         return mir_->toInstruction();
     }
 };
 
-class LWasmCompareExchangeCallout : public LCallInstructionHelper<1, 4, 2>
-{
-  public:
-    LIR_HEADER(WasmCompareExchangeCallout)
-    LWasmCompareExchangeCallout(const LAllocation& ptr, const LAllocation& oldval,
-                                const LAllocation& newval, const LAllocation& tls,
-                                const LDefinition& temp1, const LDefinition& temp2)
-    {
-        setOperand(0, ptr);
-        setOperand(1, oldval);
-        setOperand(2, newval);
-        setOperand(3, tls);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation* ptr() {
-        return getOperand(0);
-    }
-    const LAllocation* oldval() {
-        return getOperand(1);
-    }
-    const LAllocation* newval() {
-        return getOperand(2);
-    }
-    const LAllocation* tls() {
-        return getOperand(3);
-    }
-
-    const MWasmCompareExchangeHeap* mir() const {
-        return mir_->toWasmCompareExchangeHeap();
-    }
-};
-
-class LWasmAtomicExchangeCallout : public LCallInstructionHelper<1, 3, 2>
-{
-  public:
-    LIR_HEADER(WasmAtomicExchangeCallout)
-
-    LWasmAtomicExchangeCallout(const LAllocation& ptr, const LAllocation& value,
-                               const LAllocation& tls, const LDefinition& temp1,
-                               const LDefinition& temp2)
-    {
-        setOperand(0, ptr);
-        setOperand(1, value);
-        setOperand(2, tls);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation* ptr() {
-        return getOperand(0);
-    }
-    const LAllocation* value() {
-        return getOperand(1);
-    }
-    const LAllocation* tls() {
-        return getOperand(2);
-    }
-
-    const MWasmAtomicExchangeHeap* mir() const {
-        return mir_->toWasmAtomicExchangeHeap();
-    }
-};
-
-class LWasmAtomicBinopCallout : public LCallInstructionHelper<1, 3, 2>
-{
-  public:
-    LIR_HEADER(WasmAtomicBinopCallout)
-    LWasmAtomicBinopCallout(const LAllocation& ptr, const LAllocation& value,
-                            const LAllocation& tls, const LDefinition& temp1,
-                            const LDefinition& temp2)
-    {
-        setOperand(0, ptr);
-        setOperand(1, value);
-        setOperand(2, tls);
-        setTemp(0, temp1);
-        setTemp(1, temp2);
-    }
-    const LAllocation* ptr() {
-        return getOperand(0);
-    }
-    const LAllocation* value() {
-        return getOperand(1);
-    }
-    const LAllocation* tls() {
-        return getOperand(2);
-    }
-
-    const MWasmAtomicBinopHeap* mir() const {
-        return mir_->toWasmAtomicBinopHeap();
-    }
-};
-
 class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0>
 {
   public:
     LIR_HEADER(WasmTruncateToInt64);
 
     LWasmTruncateToInt64(const LAllocation& in) {
         setOperand(0, in);
     }
--- a/js/src/jit/arm/LOpcodes-arm.h
+++ b/js/src/jit/arm/LOpcodes-arm.h
@@ -12,19 +12,16 @@
 #define LIR_CPU_OPCODE_LIST(_)     \
     _(BoxFloatingPoint)            \
     _(SoftDivI)                    \
     _(SoftModI)                    \
     _(ModMaskI)                    \
     _(UDiv)                        \
     _(UMod)                        \
     _(SoftUDivOrMod)               \
-    _(WasmCompareExchangeCallout)  \
-    _(WasmAtomicExchangeCallout)   \
-    _(WasmAtomicBinopCallout)      \
     _(DivOrModI64)                 \
     _(UDivOrModI64)                \
     _(WasmTruncateToInt64)         \
     _(WasmAtomicLoadI64)           \
     _(WasmAtomicStoreI64)          \
     _(WasmCompareExchangeI64)      \
     _(WasmAtomicBinopI64)          \
     _(WasmAtomicExchangeI64)       \
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -927,29 +927,17 @@ LIRGeneratorARM::visitWasmCompareExchang
                                                                        Register64(IntArgReg3,
                                                                                   IntArgReg2)));
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
                                                     LAllocation(AnyRegister(IntArgReg0))));
         return;
     }
 
     MOZ_ASSERT(ins->access().type() < Scalar::Float32);
-
-    if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
-        MOZ_ASSERT(ins->access().offset() == 0);
-        LWasmCompareExchangeCallout* lir =
-            new(alloc()) LWasmCompareExchangeCallout(useFixedAtStart(base, IntArgReg2),
-                                                     useFixedAtStart(ins->oldValue(), IntArgReg3),
-                                                     useFixedAtStart(ins->newValue(), CallTempReg0),
-                                                     useFixedAtStart(ins->tls(), WasmTlsReg),
-                                                     tempFixed(IntArgReg0),
-                                                     tempFixed(IntArgReg1));
-        defineReturn(lir, ins);
-        return;
-    }
+    MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
 
     LWasmCompareExchangeHeap* lir =
         new(alloc()) LWasmCompareExchangeHeap(useRegister(base),
                                               useRegister(ins->oldValue()),
                                               useRegister(ins->newValue()));
 
     define(lir, ins);
 }
@@ -966,27 +954,17 @@ LIRGeneratorARM::visitWasmAtomicExchange
                                                                                  IntArgReg2)),
                                                         ins->access());
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
                                                     LAllocation(AnyRegister(IntArgReg0))));
         return;
     }
 
     MOZ_ASSERT(ins->access().type() < Scalar::Float32);
-
-    if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
-        MOZ_ASSERT(ins->access().offset() == 0);
-        // Call out on ARMv6.
-        defineReturn(new(alloc()) LWasmAtomicExchangeCallout(useFixedAtStart(ins->base(), IntArgReg2),
-                                                             useFixedAtStart(ins->value(), IntArgReg3),
-                                                             useFixedAtStart(ins->tls(), WasmTlsReg),
-                                                             tempFixed(IntArgReg0),
-                                                             tempFixed(IntArgReg1)), ins);
-        return;
-    }
+    MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
 
     const LAllocation base = useRegister(ins->base());
     const LAllocation value = useRegister(ins->value());
     define(new(alloc()) LWasmAtomicExchangeHeap(base, value), ins);
 }
 
 void
 LIRGeneratorARM::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
@@ -999,32 +977,21 @@ LIRGeneratorARM::visitWasmAtomicBinopHea
                                                      ins->access(),
                                                      ins->operation());
         defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(IntArgReg1)),
                                                     LAllocation(AnyRegister(IntArgReg0))));
         return;
     }
 
     MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+    MOZ_ASSERT(HasLDSTREXBHD(), "by HasCompilerSupport() constraints");
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
-        MOZ_ASSERT(ins->access().offset() == 0);
-        LWasmAtomicBinopCallout* lir =
-            new(alloc()) LWasmAtomicBinopCallout(useFixedAtStart(base, IntArgReg2),
-                                                 useFixedAtStart(ins->value(), IntArgReg3),
-                                                 useFixedAtStart(ins->tls(), WasmTlsReg),
-                                                 tempFixed(IntArgReg0),
-                                                 tempFixed(IntArgReg1));
-        defineReturn(lir, ins);
-        return;
-    }
-
     if (!ins->hasUses()) {
         LWasmAtomicBinopHeapForEffect* lir =
             new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
                                                        useRegister(ins->value()),
                                                        /* flagTemp= */ temp());
         add(lir, ins);
         return;
     }
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -107,21 +107,23 @@ class Simulator
     static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
     explicit Simulator(JSContext* cx);
     ~Simulator();
 
+    static bool supportsAtomics() { return HasLDSTREXBHD(); }
+
     // The currently executing Simulator instance. Potentially there can be one
     // for each native thread.
     static Simulator* Current();
 
-    static inline uintptr_t StackLimit() {
+    static uintptr_t StackLimit() {
         return Simulator::Current()->stackLimit();
     }
 
     // Disassemble some instructions starting at instr and print them
     // on stdout.  Useful for working within GDB after a MOZ_CRASH(),
     // among other things.
     //
     // Typical use within a crashed instruction decoding method is simply:
--- a/js/src/jit/arm64/vixl/Simulator-vixl.h
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -711,19 +711,22 @@ class Simulator : public DecoderVisitor 
   bool overRecursedWithExtra(uint32_t extra) const;
   int64_t call(uint8_t* entry, int argument_count, ...);
   static void* RedirectNativeFunction(void* nativeFunction, js::jit::ABIFunctionType type);
   void setGPR32Result(int32_t result);
   void setGPR64Result(int64_t result);
   void setFP32Result(float result);
   void setFP64Result(double result);
   void VisitCallRedirection(const Instruction* instr);
-  static inline uintptr_t StackLimit() {
+  static uintptr_t StackLimit() {
     return Simulator::Current()->stackLimit();
   }
+  static bool supportsAtomics() {
+    return true;
+  }
 
   void ResetState();
 
   // Run the simulator.
   virtual void Run();
   void RunFrom(const Instruction* first);
 
   // Simulation helpers.
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -501,37 +501,16 @@ AddressOf(SymbolicAddress imm, ABIFuncti
         return FuncCast(Int64ToFloat32, *abiType);
 #if defined(JS_CODEGEN_ARM)
       case SymbolicAddress::aeabi_idivmod:
         *abiType = Args_General2;
         return FuncCast(__aeabi_idivmod, *abiType);
       case SymbolicAddress::aeabi_uidivmod:
         *abiType = Args_General2;
         return FuncCast(__aeabi_uidivmod, *abiType);
-      case SymbolicAddress::AtomicCmpXchg:
-        *abiType = Args_General5;
-        return FuncCast(atomics_cmpxchg_asm_callout, *abiType);
-      case SymbolicAddress::AtomicXchg:
-        *abiType = Args_General4;
-        return FuncCast(atomics_xchg_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchAdd:
-        *abiType = Args_General4;
-        return FuncCast(atomics_add_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchSub:
-        *abiType = Args_General4;
-        return FuncCast(atomics_sub_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchAnd:
-        *abiType = Args_General4;
-        return FuncCast(atomics_and_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchOr:
-        *abiType = Args_General4;
-        return FuncCast(atomics_or_asm_callout, *abiType);
-      case SymbolicAddress::AtomicFetchXor:
-        *abiType = Args_General4;
-        return FuncCast(atomics_xor_asm_callout, *abiType);
 #endif
       case SymbolicAddress::ModD:
         *abiType = Args_Double_DoubleDouble;
         return FuncCast(NumberMod, *abiType);
       case SymbolicAddress::SinD:
         *abiType = Args_Double_Double;
         return FuncCast<double (double)>(sin, *abiType);
       case SymbolicAddress::CosD:
@@ -635,23 +614,16 @@ wasm::NeedsBuiltinThunk(SymbolicAddress 
       case SymbolicAddress::TruncateDoubleToInt64:
       case SymbolicAddress::Uint64ToDouble:
       case SymbolicAddress::Uint64ToFloat32:
       case SymbolicAddress::Int64ToDouble:
       case SymbolicAddress::Int64ToFloat32:
 #if defined(JS_CODEGEN_ARM)
       case SymbolicAddress::aeabi_idivmod:
       case SymbolicAddress::aeabi_uidivmod:
-      case SymbolicAddress::AtomicCmpXchg:
-      case SymbolicAddress::AtomicXchg:
-      case SymbolicAddress::AtomicFetchAdd:
-      case SymbolicAddress::AtomicFetchSub:
-      case SymbolicAddress::AtomicFetchAnd:
-      case SymbolicAddress::AtomicFetchOr:
-      case SymbolicAddress::AtomicFetchXor:
 #endif
       case SymbolicAddress::ModD:
       case SymbolicAddress::SinD:
       case SymbolicAddress::CosD:
       case SymbolicAddress::TanD:
       case SymbolicAddress::ASinD:
       case SymbolicAddress::ACosD:
       case SymbolicAddress::ATanD:
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -978,30 +978,16 @@ ThunkedNativeToDescription(SymbolicAddre
         return "call to native f64.convert_s/i64 (in wasm)";
       case SymbolicAddress::Int64ToFloat32:
         return "call to native f32.convert_s/i64 (in wasm)";
 #if defined(JS_CODEGEN_ARM)
       case SymbolicAddress::aeabi_idivmod:
         return "call to native i32.div_s (in wasm)";
       case SymbolicAddress::aeabi_uidivmod:
         return "call to native i32.div_u (in wasm)";
-      case SymbolicAddress::AtomicCmpXchg:
-        return "call to native atomic compare exchange (in wasm)";
-      case SymbolicAddress::AtomicXchg:
-        return "call to native atomic exchange (in wasm)";
-      case SymbolicAddress::AtomicFetchAdd:
-        return "call to native atomic fetch add (in wasm)";
-      case SymbolicAddress::AtomicFetchSub:
-        return "call to native atomic fetch sub (in wasm)";
-      case SymbolicAddress::AtomicFetchAnd:
-        return "call to native atomic fetch and (in wasm)";
-      case SymbolicAddress::AtomicFetchOr:
-        return "call to native atomic fetch or (in wasm)";
-      case SymbolicAddress::AtomicFetchXor:
-        return "call to native atomic fetch xor (in wasm)";
 #endif
       case SymbolicAddress::ModD:
         return "call to asm.js native f64 % (mod)";
       case SymbolicAddress::SinD:
         return "call to asm.js native f64 Math.sin";
       case SymbolicAddress::CosD:
         return "call to asm.js native f64 Math.cos";
       case SymbolicAddress::TanD:
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -796,19 +796,17 @@ class FunctionCompiler
 
     void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access, MDefinition** base)
     {
         MOZ_ASSERT(!inDeadCode());
 
         // Fold a constant base into the offset (so the base is 0 in which case
         // the codegen is optimized), if it doesn't wrap or trigger an
         // MWasmAddOffset.
-        if (!access->isAtomic() && !env_.isAsmJS() && // TODO bug 1421244
-            (*base)->isConstant())
-        {
+        if ((*base)->isConstant()) {
             uint32_t basePtr = (*base)->toConstant()->toInt32();
             uint32_t offset = access->offset();
 
             static_assert(OffsetGuardLimit < UINT32_MAX,
                           "checking for overflow against OffsetGuardLimit is enough.");
 
             if (offset < OffsetGuardLimit && basePtr < OffsetGuardLimit - offset) {
                 auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -69,16 +69,21 @@ wasm::HasCompilerSupport(JSContext* cx)
         return false;
 
 #ifdef ENABLE_WASM_THREAD_OPS
     // Wasm threads require 8-byte lock-free atomics.
     if (!jit::AtomicOperations::isLockfree8())
         return false;
 #endif
 
+#ifdef JS_SIMULATOR
+    if (!Simulator::supportsAtomics())
+        return false;
+#endif
+
 #if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
     return false;
 #else
     return true;
 #endif
 }
 
 bool
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -1307,23 +1307,16 @@ typedef Vector<CallSiteTarget, 0, System
 // during static linking.
 
 enum class SymbolicAddress
 {
     ToInt32,
 #if defined(JS_CODEGEN_ARM)
     aeabi_idivmod,
     aeabi_uidivmod,
-    AtomicCmpXchg,
-    AtomicXchg,
-    AtomicFetchAdd,
-    AtomicFetchSub,
-    AtomicFetchAnd,
-    AtomicFetchOr,
-    AtomicFetchXor,
 #endif
     ModD,
     SinD,
     CosD,
     TanD,
     ASinD,
     ACosD,
     ATanD,