Bug 1394420 - Consolidate feeling-lucky atomics. r=froydnj default tip
authorLars T Hansen <lhansen@mozilla.com>
Thu, 11 Oct 2018 14:54:25 +0200
changeset 454632 b2ffeeac7326
parent 454631 2f5be1913934
push id111302
push userlhansen@mozilla.com
push dateMon, 21 Jan 2019 07:34:02 +0000
treeherdermozilla-inbound@b2ffeeac7326 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersfroydnj
bugs1394420
milestone66.0a1
Bug 1394420 - Consolidate feeling-lucky atomics. r=froydnj With jitted primitives for racy atomic access in place, we can consolidate most C++ realizations of the atomic primitives into two headers, one for gcc/Clang and one for MSVC, that will be used as default fallbacks on non-tier-1 platforms. Non-tier-1 platforms can still implement their own atomics layer, as does MIPS already; we leave the MIPS code alone here.
js/src/jit/AtomicOperations.h
js/src/jit/arm/AtomicOperations-arm.h
js/src/jit/arm64/AtomicOperations-arm64-gcc.h
js/src/jit/arm64/AtomicOperations-arm64-msvc.h
js/src/jit/none/AtomicOperations-feeling-lucky.h
js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
js/src/jit/shared/AtomicOperations-feeling-lucky.h
js/src/jit/x86-shared/AtomicOperations-x86-shared-gcc.h
js/src/jit/x86-shared/AtomicOperations-x86-shared-msvc.h
--- a/js/src/jit/AtomicOperations.h
+++ b/js/src/jit/AtomicOperations.h
@@ -274,34 +274,16 @@ class AtomicOperations {
     memcpySafeWhenRacy(dest, src, nelem * sizeof(T));
   }
 
   template <typename T>
   static void podMoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src,
                                   size_t nelem) {
     memmoveSafeWhenRacy(dest, src, nelem * sizeof(T));
   }
-
-#ifdef DEBUG
-  // Constraints that must hold for atomic operations on all tier-1 platforms:
-  //
-  // - atomic cells can be 1, 2, 4, or 8 bytes
-  // - all atomic operations are lock-free, including 8-byte operations
-  // - atomic operations can only be performed on naturally aligned cells
-  //
-  // (Tier-2 and tier-3 platforms need not support 8-byte atomics, and if they
-  // do, they need not be lock-free.)
-
-  template <typename T>
-  static bool tier1Constraints(const T* addr) {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-    return (sizeof(T) < 8 || (hasAtomic8() && isLockfree8())) &&
-           !(uintptr_t(addr) & (sizeof(T) - 1));
-  }
-#endif
 };
 
 inline bool AtomicOperations::isLockfreeJS(int32_t size) {
   // Keep this in sync with visitAtomicIsLockFree() in jit/CodeGenerator.cpp.
 
   switch (size) {
     case 1:
       return true;
@@ -335,17 +317,17 @@ inline bool AtomicOperations::isLockfree
 // (and if the problem isn't just that the compiler uses a different name for a
 // known architecture), you have basically three options:
 //
 //  - find an already-supported compiler for the platform and use that instead
 //
 //  - write your own support code for the platform+compiler and create a new
 //    case below
 //
-//  - include jit/none/AtomicOperations-feeling-lucky.h in a case for the
+//  - include jit/shared/AtomicOperations-feeling-lucky.h in a case for the
 //    platform below, if you have a gcc-compatible compiler and truly feel
 //    lucky.  You may have to add a little code to that file, too.
 //
 // Simulators are confusing.  These atomic primitives must be compatible with
 // the code that the JIT emits, but of course for an ARM simulator running on
 // x86 the primitives here will be for x86, not for ARM, while the JIT emits ARM
 // code.  Our ARM simulator solves that the easy way: by using these primitives
 // to implement its atomic operations.  For other simulators there may need to
@@ -353,65 +335,45 @@ inline bool AtomicOperations::isLockfree
 // example, for our ARM64 simulator the primitives could in principle
 // participate in the memory exclusivity monitors implemented by the simulator.
 // Such a solution is likely to be difficult.
 
 #if defined(JS_SIMULATOR_MIPS32)
 #  if defined(__clang__) || defined(__GNUC__)
 #    include "jit/mips-shared/AtomicOperations-mips-shared.h"
 #  else
-#    error "No AtomicOperations support for this platform+compiler combination"
+#    error "AtomicOperations on MIPS-32 for unknown compiler"
 #  endif
 #elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
     defined(_M_IX86)
 #  if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
 #    include "jit/shared/AtomicOperations-shared-jit.h"
-#  elif defined(__clang__) || defined(__GNUC__)
-#    include "jit/x86-shared/AtomicOperations-x86-shared-gcc.h"
-#  elif defined(_MSC_VER)
-#    include "jit/x86-shared/AtomicOperations-x86-shared-msvc.h"
 #  else
-#    error "No AtomicOperations support for this platform+compiler combination"
+#    include "jit/shared/AtomicOperations-feeling-lucky.h"
 #  endif
 #elif defined(__arm__)
 #  if defined(JS_CODEGEN_ARM)
 #    include "jit/shared/AtomicOperations-shared-jit.h"
-#  elif defined(__clang__) || defined(__GNUC__)
-#    include "jit/arm/AtomicOperations-arm.h"
 #  else
-#    error "No AtomicOperations support for this platform+compiler combination"
+#    include "jit/shared/AtomicOperations-feeling-lucky.h"
 #  endif
 #elif defined(__aarch64__) || defined(_M_ARM64)
 #  if defined(JS_CODEGEN_ARM64)
 #    include "jit/shared/AtomicOperations-shared-jit.h"
-#  elif defined(__clang__) || defined(__GNUC__)
-#    include "jit/arm64/AtomicOperations-arm64-gcc.h"
-#  elif defined(_MSC_VER)
-#    include "jit/arm64/AtomicOperations-arm64-msvc.h"
 #  else
-#    error "No AtomicOperations support for this platform+compiler combination"
+#    include "jit/shared/AtomicOperations-feeling-lucky.h"
 #  endif
 #elif defined(__mips__)
 #  if defined(__clang__) || defined(__GNUC__)
 #    include "jit/mips-shared/AtomicOperations-mips-shared.h"
 #  else
-#    error "No AtomicOperations support for this platform+compiler combination"
+#    error "AtomicOperations on MIPS for an unknown compiler"
 #  endif
-#elif defined(__ppc__) || defined(__PPC__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
-#elif defined(__sparc__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
-#elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
-    defined(__PPC64LE__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
-#elif defined(__alpha__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
-#elif defined(__hppa__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
-#elif defined(__sh__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
-#elif defined(__s390__) || defined(__s390x__)
-#  include "jit/none/AtomicOperations-feeling-lucky.h"
+#elif defined(__ppc__) || defined(__PPC__) || defined(__sparc__) ||     \
+    defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+    defined(__PPC64LE__) || defined(__alpha__) || defined(__hppa__) ||  \
+    defined(__sh__) || defined(__s390__) || defined(__s390x__)
+#  include "jit/shared/AtomicOperations-feeling-lucky.h"
 #else
 #  error "No AtomicOperations support provided for this platform"
 #endif
 
 #endif  // jit_AtomicOperations_h
deleted file mode 100644
--- a/js/src/jit/arm/AtomicOperations-arm.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- * vim: set ts=8 sts=2 et sw=2 tw=80:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_arm_AtomicOperations_arm_h
-#define jit_arm_AtomicOperations_arm_h
-
-#include "jit/arm/Architecture-arm.h"
-
-#include "vm/ArrayBufferObject.h"
-
-// For documentation, see jit/AtomicOperations.h
-
-// NOTE, this file is *not* used with the ARM simulator, only when compiling for
-// actual ARM hardware.  The simulators get the files that are appropriate for
-// the hardware the simulator is running on.  See the comments before the
-// #include nest at the bottom of jit/AtomicOperations.h for more information.
-
-// Firefox requires gcc > 4.8, so we will always have the __atomic intrinsics
-// added for use in C++11 <atomic>.
-//
-// Note that using these intrinsics for most operations is not correct: the code
-// has undefined behavior.  The gcc documentation states that the compiler
-// assumes the code is race free.  This supposedly means C++ will allow some
-// instruction reorderings (effectively those allowed by TSO) even for seq_cst
-// ordered operations, but these reorderings are not allowed by JS.  To do
-// better we will end up with inline assembler or JIT-generated code.
-
-#if !defined(__clang__) && !defined(__GNUC__)
-#  error "This file only for gcc-compatible compilers"
-#endif
-
-inline bool js::jit::AtomicOperations::Initialize() {
-  // Nothing
-  return true;
-}
-
-inline void js::jit::AtomicOperations::ShutDown() {
-  // Nothing
-}
-
-inline bool js::jit::AtomicOperations::hasAtomic8() {
-  // This guard is really only for tier-2 and tier-3 systems: LDREXD and
-  // STREXD have been available since ARMv6K, and only ARMv7 and later are
-  // tier-1.
-  return HasLDSTREXBHD();
-}
-
-inline bool js::jit::AtomicOperations::isLockfree8() {
-  // The JIT and the C++ compiler must agree on whether to use atomics
-  // for 64-bit accesses.  There are two ways to do this: either the
-  // JIT defers to the C++ compiler (so if the C++ code is compiled
-  // for ARMv6, say, and __atomic_always_lock_free(8) is false, then the
-  // JIT ignores the fact that the program is running on ARMv7 or newer);
-  // or the C++ code in this file calls out to run-time generated code
-  // to do whatever the JIT does.
-  //
-  // For now, make the JIT defer to the C++ compiler when we know what
-  // the C++ compiler will do, otherwise assume a lock is needed.
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
-
-  return hasAtomic8() && __atomic_always_lock_free(sizeof(int64_t), 0);
-}
-
-inline void js::jit::AtomicOperations::fenceSeqCst() {
-  __atomic_thread_fence(__ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-  return v;
-}
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
-  return v;
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
-                                                          T newval) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
-                            __ATOMIC_SEQ_CST);
-  return oldval;
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_load(addr, &v, __ATOMIC_RELAXED);
-  return v;
-}
-
-namespace js {
-namespace jit {
-
-#define GCC_RACYLOADOP(T)                                         \
-  template <>                                                     \
-  inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
-    return *addr;                                                 \
-  }
-
-// On 32-bit platforms, loadSafeWhenRacy need not be access-atomic for 64-bit
-// data, so just use regular accesses instead of the expensive __atomic_load
-// solution which must use LDREXD/CLREX.
-#ifndef JS_64BIT
-GCC_RACYLOADOP(int64_t)
-GCC_RACYLOADOP(uint64_t)
-#endif
-
-// Float and double accesses are not access-atomic.
-GCC_RACYLOADOP(float)
-GCC_RACYLOADOP(double)
-
-// Clang requires a specialization for uint8_clamped.
-template <>
-inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
-    uint8_clamped* addr) {
-  uint8_t v;
-  __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
-  return uint8_clamped(v);
-}
-
-#undef GCC_RACYLOADOP
-
-}  // namespace jit
-}  // namespace js
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_store(addr, &val, __ATOMIC_RELAXED);
-}
-
-namespace js {
-namespace jit {
-
-#define GCC_RACYSTOREOP(T)                                                   \
-  template <>                                                                \
-  inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
-    *addr = val;                                                             \
-  }
-
-// On 32-bit platforms, storeSafeWhenRacy need not be access-atomic for 64-bit
-// data, so just use regular accesses instead of the expensive __atomic_store
-// solution which must use LDREXD/STREXD.
-#ifndef JS_64BIT
-GCC_RACYSTOREOP(int64_t)
-GCC_RACYSTOREOP(uint64_t)
-#endif
-
-// Float and double accesses are not access-atomic.
-GCC_RACYSTOREOP(float)
-GCC_RACYSTOREOP(double)
-
-// Clang requires a specialization for uint8_clamped.
-template <>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
-                                                         uint8_clamped val) {
-  __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
-}
-
-#undef GCC_RACYSTOREOP
-
-}  // namespace jit
-}  // namespace js
-
-inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
-                                                          const void* src,
-                                                          size_t nbytes) {
-  MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
-  MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
-  memcpy(dest, src, nbytes);
-}
-
-inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
-                                                           const void* src,
-                                                           size_t nbytes) {
-  memmove(dest, src, nbytes);
-}
-
-#endif  // jit_arm_AtomicOperations_arm_h
deleted file mode 100644
--- a/js/src/jit/arm64/AtomicOperations-arm64-gcc.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- * vim: set ts=8 sts=2 et sw=2 tw=80:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-/* For documentation, see jit/AtomicOperations.h */
-
-#ifndef jit_arm64_AtomicOperations_arm64_h
-#define jit_arm64_AtomicOperations_arm64_h
-
-#include "mozilla/Assertions.h"
-#include "mozilla/Types.h"
-
-#include "vm/ArrayBufferObject.h"
-
-#if !defined(__clang__) && !defined(__GNUC__)
-#  error "This file only for gcc-compatible compilers"
-#endif
-
-inline bool js::jit::AtomicOperations::Initialize() {
-  // Nothing
-  return true;
-}
-
-inline void js::jit::AtomicOperations::ShutDown() {
-  // Nothing
-}
-
-inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
-
-inline bool js::jit::AtomicOperations::isLockfree8() {
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
-  return true;
-}
-
-inline void js::jit::AtomicOperations::fenceSeqCst() {
-  __atomic_thread_fence(__ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-  return v;
-}
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
-  return v;
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
-                                                          T newval) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
-                            __ATOMIC_SEQ_CST);
-  return oldval;
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_load(addr, &v, __ATOMIC_RELAXED);
-  return v;
-}
-
-namespace js {
-namespace jit {
-
-// Clang requires a specialization for uint8_clamped.
-template <>
-inline js::uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
-    js::uint8_clamped* addr) {
-  uint8_t v;
-  __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
-  return js::uint8_clamped(v);
-}
-
-}  // namespace jit
-}  // namespace js
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_store(addr, &val, __ATOMIC_RELAXED);
-}
-
-namespace js {
-namespace jit {
-
-// Clang requires a specialization for uint8_clamped.
-template <>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(
-    js::uint8_clamped* addr, js::uint8_clamped val) {
-  __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
-}
-
-}  // namespace jit
-}  // namespace js
-
-inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
-                                                          const void* src,
-                                                          size_t nbytes) {
-  MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
-  MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
-  memcpy(dest, src, nbytes);
-}
-
-inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
-                                                           const void* src,
-                                                           size_t nbytes) {
-  memmove(dest, src, nbytes);
-}
-
-#endif  // jit_arm64_AtomicOperations_arm64_h
deleted file mode 100644
--- a/js/src/jit/arm64/AtomicOperations-arm64-msvc.h
+++ /dev/null
@@ -1,378 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- * vim: set ts=8 sts=2 et sw=2 tw=80:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef jit_shared_AtomicOperations_x86_shared_msvc_h
-#define jit_shared_AtomicOperations_x86_shared_msvc_h
-
-#include "mozilla/Assertions.h"
-#include "mozilla/Types.h"
-
-#if !defined(_MSC_VER)
-#  error "This file only for Microsoft Visual C++"
-#endif
-
-// For overall documentation, see jit/AtomicOperations.h/
-//
-// For general comments on lock-freedom, access-atomicity, and related matters
-// on x86 and x64, notably for justification of the implementations of the
-// 64-bit primitives on 32-bit systems, see the comment block in
-// AtomicOperations-x86-shared-gcc.h.
-
-// Below, _ReadWriteBarrier is a compiler directive, preventing reordering of
-// instructions and reuse of memory values across it in the compiler, but having
-// no impact on what the CPU does.
-
-// Note, here we use MSVC intrinsics directly.  But MSVC supports a slightly
-// higher level of function which uses the intrinsic when possible (8, 16, and
-// 32-bit operations, and 64-bit operations on 64-bit systems) and otherwise
-// falls back on CMPXCHG8B for 64-bit operations on 32-bit systems.  We could be
-// using those functions in many cases here (though not all).  I have not done
-// so because (a) I don't yet know how far back those functions are supported
-// and (b) I expect we'll end up dropping into assembler here eventually so as
-// to guarantee that the C++ compiler won't optimize the code.
-
-// Note, _InterlockedCompareExchange takes the *new* value as the second
-// argument and the *comparand* (expected old value) as the third argument.
-
-inline bool js::jit::AtomicOperations::Initialize() {
-  // Nothing
-  return true;
-}
-
-inline void js::jit::AtomicOperations::ShutDown() {
-  // Nothing
-}
-
-inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
-
-inline bool js::jit::AtomicOperations::isLockfree8() {
-  // The MSDN docs suggest very strongly that if code is compiled for Pentium
-  // or better the 64-bit primitives will be lock-free, see eg the "Remarks"
-  // secion of the page for _InterlockedCompareExchange64, currently here:
-  // https://msdn.microsoft.com/en-us/library/ttk2z1ws%28v=vs.85%29.aspx
-  //
-  // But I've found no way to assert that at compile time or run time, there
-  // appears to be no WinAPI is_lock_free() test.
-
-  return true;
-}
-
-inline void js::jit::AtomicOperations::fenceSeqCst() {
-  _ReadWriteBarrier();
-  // MemoryBarrier is defined in winnt.h, which we don't want to include here.
-  // This expression is the expansion of MemoryBarrier.
-  __dmb(_ARM64_BARRIER_SY);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  _ReadWriteBarrier();
-  T v = *addr;
-  _ReadWriteBarrier();
-  return v;
-}
-
-#ifdef _M_IX86
-namespace js {
-namespace jit {
-
-#  define MSC_LOADOP(T)                                                       \
-    template <>                                                               \
-    inline T AtomicOperations::loadSeqCst(T* addr) {                          \
-      MOZ_ASSERT(tier1Constraints(addr));                                     \
-      _ReadWriteBarrier();                                                    \
-      return (T)_InterlockedCompareExchange64((__int64 volatile*)addr, 0, 0); \
-    }
-
-MSC_LOADOP(int64_t)
-MSC_LOADOP(uint64_t)
-
-#  undef MSC_LOADOP
-
-}  // namespace jit
-}  // namespace js
-#endif  // _M_IX86
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  _ReadWriteBarrier();
-  *addr = val;
-  fenceSeqCst();
-}
-
-#ifdef _M_IX86
-namespace js {
-namespace jit {
-
-#  define MSC_STOREOP(T)                                             \
-    template <>                                                      \
-    inline void AtomicOperations::storeSeqCst(T* addr, T val) {      \
-      MOZ_ASSERT(tier1Constraints(addr));                            \
-      _ReadWriteBarrier();                                           \
-      T oldval = *addr;                                              \
-      for (;;) {                                                     \
-        T nextval = (T)_InterlockedCompareExchange64(                \
-            (__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
-        if (nextval == oldval) break;                                \
-        oldval = nextval;                                            \
-      }                                                              \
-      _ReadWriteBarrier();                                           \
-    }
-
-MSC_STOREOP(int64_t)
-MSC_STOREOP(uint64_t)
-
-#  undef MSC_STOREOP
-
-}  // namespace jit
-}  // namespace js
-#endif  // _M_IX86
-
-#define MSC_EXCHANGEOP(T, U, xchgop)                          \
-  template <>                                                 \
-  inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
-    MOZ_ASSERT(tier1Constraints(addr));                       \
-    return (T)xchgop((U volatile*)addr, (U)val);              \
-  }
-
-#ifdef _M_IX86
-#  define MSC_EXCHANGEOP_CAS(T)                                      \
-    template <>                                                      \
-    inline T AtomicOperations::exchangeSeqCst(T* addr, T val) {      \
-      MOZ_ASSERT(tier1Constraints(addr));                            \
-      _ReadWriteBarrier();                                           \
-      T oldval = *addr;                                              \
-      for (;;) {                                                     \
-        T nextval = (T)_InterlockedCompareExchange64(                \
-            (__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
-        if (nextval == oldval) break;                                \
-        oldval = nextval;                                            \
-      }                                                              \
-      _ReadWriteBarrier();                                           \
-      return oldval;                                                 \
-    }
-#endif  // _M_IX86
-
-namespace js {
-namespace jit {
-
-MSC_EXCHANGEOP(int8_t, char, _InterlockedExchange8)
-MSC_EXCHANGEOP(uint8_t, char, _InterlockedExchange8)
-MSC_EXCHANGEOP(int16_t, short, _InterlockedExchange16)
-MSC_EXCHANGEOP(uint16_t, short, _InterlockedExchange16)
-MSC_EXCHANGEOP(int32_t, long, _InterlockedExchange)
-MSC_EXCHANGEOP(uint32_t, long, _InterlockedExchange)
-
-#ifdef _M_IX86
-MSC_EXCHANGEOP_CAS(int64_t)
-MSC_EXCHANGEOP_CAS(uint64_t)
-#else
-MSC_EXCHANGEOP(int64_t, __int64, _InterlockedExchange64)
-MSC_EXCHANGEOP(uint64_t, __int64, _InterlockedExchange64)
-#endif
-
-}  // namespace jit
-}  // namespace js
-
-#undef MSC_EXCHANGEOP
-#undef MSC_EXCHANGEOP_CAS
-
-#define MSC_CAS(T, U, cmpxchg)                                        \
-  template <>                                                         \
-  inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
-                                                   T newval) {        \
-    MOZ_ASSERT(tier1Constraints(addr));                               \
-    return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval);       \
-  }
-
-namespace js {
-namespace jit {
-
-MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
-MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
-MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
-MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
-MSC_CAS(int32_t, long, _InterlockedCompareExchange)
-MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
-MSC_CAS(int64_t, __int64, _InterlockedCompareExchange64)
-MSC_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
-
-}  // namespace jit
-}  // namespace js
-
-#undef MSC_CAS
-
-#define MSC_FETCHADDOP(T, U, xadd)                            \
-  template <>                                                 \
-  inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
-    MOZ_ASSERT(tier1Constraints(addr));                       \
-    return (T)xadd((U volatile*)addr, (U)val);                \
-  }
-
-#define MSC_FETCHSUBOP(T)                                     \
-  template <>                                                 \
-  inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
-    return fetchAddSeqCst(addr, (T)(0 - val));                \
-  }
-
-#ifdef _M_IX86
-#  define MSC_FETCHADDOP_CAS(T)                                               \
-    template <>                                                               \
-    inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) {               \
-      MOZ_ASSERT(tier1Constraints(addr));                                     \
-      _ReadWriteBarrier();                                                    \
-      T oldval = *addr;                                                       \
-      for (;;) {                                                              \
-        T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr, \
-                                                     (__int64)(oldval + val), \
-                                                     (__int64)oldval);        \
-        if (nextval == oldval) break;                                         \
-        oldval = nextval;                                                     \
-      }                                                                       \
-      _ReadWriteBarrier();                                                    \
-      return oldval;                                                          \
-    }
-#endif  // _M_IX86
-
-namespace js {
-namespace jit {
-
-MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
-MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
-MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
-MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
-MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
-MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
-
-#ifdef _M_IX86
-MSC_FETCHADDOP_CAS(int64_t)
-MSC_FETCHADDOP_CAS(uint64_t)
-#else
-MSC_FETCHADDOP(int64_t, __int64, _InterlockedExchangeAdd64)
-MSC_FETCHADDOP(uint64_t, __int64, _InterlockedExchangeAdd64)
-#endif
-
-MSC_FETCHSUBOP(int8_t)
-MSC_FETCHSUBOP(uint8_t)
-MSC_FETCHSUBOP(int16_t)
-MSC_FETCHSUBOP(uint16_t)
-MSC_FETCHSUBOP(int32_t)
-MSC_FETCHSUBOP(uint32_t)
-MSC_FETCHSUBOP(int64_t)
-MSC_FETCHSUBOP(uint64_t)
-
-}  // namespace jit
-}  // namespace js
-
-#undef MSC_FETCHADDOP
-#undef MSC_FETCHADDOP_CAS
-#undef MSC_FETCHSUBOP
-
-#define MSC_FETCHBITOPX(T, U, name, op)             \
-  template <>                                       \
-  inline T AtomicOperations::name(T* addr, T val) { \
-    MOZ_ASSERT(tier1Constraints(addr));             \
-    return (T)op((U volatile*)addr, (U)val);        \
-  }
-
-#define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
-  MSC_FETCHBITOPX(T, U, fetchAndSeqCst, andop)   \
-  MSC_FETCHBITOPX(T, U, fetchOrSeqCst, orop)     \
-  MSC_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
-
-#ifdef _M_IX86
-#  define AND_OP &
-#  define OR_OP |
-#  define XOR_OP ^
-#  define MSC_FETCHBITOPX_CAS(T, name, OP)                                     \
-    template <>                                                                \
-    inline T AtomicOperations::name(T* addr, T val) {                          \
-      MOZ_ASSERT(tier1Constraints(addr));                                      \
-      _ReadWriteBarrier();                                                     \
-      T oldval = *addr;                                                        \
-      for (;;) {                                                               \
-        T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr,  \
-                                                     (__int64)(oldval OP val), \
-                                                     (__int64)oldval);         \
-        if (nextval == oldval) break;                                          \
-        oldval = nextval;                                                      \
-      }                                                                        \
-      _ReadWriteBarrier();                                                     \
-      return oldval;                                                           \
-    }
-
-#  define MSC_FETCHBITOP_CAS(T)                    \
-    MSC_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP) \
-    MSC_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP)   \
-    MSC_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
-
-#endif
-
-namespace js {
-namespace jit {
-
-MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8,
-               _InterlockedXor8)
-MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8,
-               _InterlockedXor8)
-MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16,
-               _InterlockedXor16)
-MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16,
-               _InterlockedXor16)
-MSC_FETCHBITOP(int32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
-MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
-
-#ifdef _M_IX86
-MSC_FETCHBITOP_CAS(int64_t)
-MSC_FETCHBITOP_CAS(uint64_t)
-#else
-MSC_FETCHBITOP(int64_t, __int64, _InterlockedAnd64, _InterlockedOr64,
-               _InterlockedXor64)
-MSC_FETCHBITOP(uint64_t, __int64, _InterlockedAnd64, _InterlockedOr64,
-               _InterlockedXor64)
-#endif
-
-}  // namespace jit
-}  // namespace js
-
-#undef MSC_FETCHBITOPX_CAS
-#undef MSC_FETCHBITOPX
-#undef MSC_FETCHBITOP_CAS
-#undef MSC_FETCHBITOP
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  // This is also appropriate for double, int64, and uint64 on 32-bit
-  // platforms since there are no guarantees of access-atomicity.
-  return *addr;
-}
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  // This is also appropriate for double, int64, and uint64 on 32-bit
-  // platforms since there are no guarantees of access-atomicity.
-  *addr = val;
-}
-
-inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
-                                                          const void* src,
-                                                          size_t nbytes) {
-  MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
-  MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
-  ::memcpy(dest, src, nbytes);
-}
-
-inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
-                                                           const void* src,
-                                                           size_t nbytes) {
-  ::memmove(dest, src, nbytes);
-}
-
-#endif  // jit_shared_AtomicOperations_x86_shared_msvc_h
rename from js/src/jit/none/AtomicOperations-feeling-lucky.h
rename to js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
--- a/js/src/jit/none/AtomicOperations-feeling-lucky.h
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
@@ -2,37 +2,50 @@
  * vim: set ts=8 sts=2 et sw=2 tw=80:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 /* For documentation, see jit/AtomicOperations.h, both the comment block at the
  * beginning and the #ifdef nest near the end.
  *
- * This is a common file for tier-3 platforms that are not providing
- * hardware-specific implementations of the atomic operations.  Please keep it
- * reasonably platform-independent by adding #ifdefs at the beginning as much as
- * possible, not throughout the file.
+ * This is a common file for tier-3 platforms (including simulators for our
+ * tier-1 platforms) that are not providing hardware-specific implementations of
+ * the atomic operations.  Please keep it reasonably platform-independent by
+ * adding #ifdefs at the beginning as much as possible, not throughout the file.
  *
  *
  * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  * !!!!                              NOTE                                 !!!!
  * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  *
  * The implementations in this file are NOT SAFE and cannot be safe even in
  * principle because they rely on C++ undefined behavior.  However, they are
  * frequently good enough for tier-3 platforms.
  */
 
-#ifndef jit_none_AtomicOperations_feeling_lucky_h
-#define jit_none_AtomicOperations_feeling_lucky_h
+#ifndef jit_shared_AtomicOperations_feeling_lucky_gcc_h
+#define jit_shared_AtomicOperations_feeling_lucky_gcc_h
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Types.h"
 
+// Explicitly exclude tier-1 platforms.
+
+#if ((defined(__x86_64__) || defined(_M_X64)) && defined(JS_CODEGEN_X64)) || \
+    ((defined(__i386__) || defined(_M_IX86)) && defined(JS_CODEGEN_X86)) ||  \
+    (defined(__arm__) && defined(JS_CODEGEN_ARM)) ||                         \
+    ((defined(__aarch64__) || defined(_M_ARM64)) && defined(JS_CODEGEN_ARM64))
+#  error "Do not use this code on a tier-1 platform when a JIT is available"
+#endif
+
+#if !(defined(__clang__) || defined(__GNUC__))
+#  error "This file only for gcc/Clang"
+#endif
+
 // 64-bit atomics are not required by the JS spec, and you can compile
 // SpiderMonkey without them.
 //
 // 64-bit lock-free atomics are however required for WebAssembly, and
 // WebAssembly will be disabled if you do not define both HAS_64BIT_ATOMICS and
 // HAS_64BIT_LOCKFREE.
 //
 // If you are only able to provide 64-bit non-lock-free atomics and you really
@@ -69,31 +82,25 @@
 #ifdef __sh__
 #  define GNUC_COMPATIBLE
 #endif
 
 #ifdef __s390__
 #  define GNUC_COMPATIBLE
 #endif
 
-#ifdef __s390x__
-#  define HAS_64BIT_ATOMICS
-#  define HAS_64BIT_LOCKFREE
-#  define GNUC_COMPATIBLE
-#endif
-
-// The default implementation tactic for gcc/clang is to use the newer
-// __atomic intrinsics added for use in C++11 <atomic>.  Where that
-// isn't available, we use GCC's older __sync functions instead.
+// The default implementation tactic for gcc/clang is to use the newer __atomic
+// intrinsics added for use in C++11 <atomic>.  Where that isn't available, we
+// use GCC's older __sync functions instead.
 //
-// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
-// compatible option for older compilers: enable this to use GCC's old
-// __sync functions instead of the newer __atomic functions.  This
-// will be required for GCC 4.6.x and earlier, and probably for Clang
-// 3.1, should we need to use those versions.
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward compatible
+// option for older compilers: enable this to use GCC's old __sync functions
+// instead of the newer __atomic functions.  This will be required for GCC 4.6.x
+// and earlier, and probably for Clang 3.1, should we need to use those
+// versions.  Firefox no longer supports compilers that old.
 
 //#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
 
 // Sanity check.
 
 #if defined(HAS_64BIT_LOCKFREE) && !defined(HAS_64BIT_ATOMICS)
 #  error "This combination of features is senseless, please fix"
 #endif
@@ -104,17 +111,18 @@ inline bool js::jit::AtomicOperations::I
   // Nothing
   return true;
 }
 
 inline void js::jit::AtomicOperations::ShutDown() {
   // Nothing
 }
 
-#ifdef GNUC_COMPATIBLE
+// When compiling with Clang on 32-bit linux it will be necessary to link with
+// -latomic to get the proper 64-bit intrinsics.
 
 inline bool js::jit::AtomicOperations::hasAtomic8() {
 #  if defined(HAS_64BIT_ATOMICS)
   return true;
 #  else
   return false;
 #  endif
 }
@@ -193,16 +201,51 @@ inline void AtomicOperations::storeSeqCs
   MOZ_CRASH("No 64-bit atomics");
 }
 
 }  // namespace jit
 }  // namespace js
 #  endif
 
 template <typename T>
+inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
+  static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+  T v;
+  __sync_synchronize();
+  do {
+    v = *addr;
+  } while (__sync_val_compare_and_swap(addr, v, val) != v);
+  return v;
+#  else
+  T v;
+  __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+  return v;
+#  endif
+}
+
+#  ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val) {
+  MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) {
+  MOZ_CRASH("No 64-bit atomics");
+}
+
+}  // namespace jit
+}  // namespace js
+#  endif
+
+template <typename T>
 inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
                                                           T newval) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
 #  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
   return __sync_val_compare_and_swap(addr, oldval, newval);
 #  else
   __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
                             __ATOMIC_SEQ_CST);
@@ -372,24 +415,30 @@ inline uint64_t AtomicOperations::fetchX
 }  // namespace js
 #  endif
 
 template <typename T>
 inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
   // This is actually roughly right even on 32-bit platforms since in that
   // case, double, int64, and uint64 loads need not be access-atomic.
+  //
+  // We could use __atomic_load, but it would be needlessly expensive on
+  // 32-bit platforms that could support it and just plain wrong on others.
   return *addr;
 }
 
 template <typename T>
 inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
   static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
   // This is actually roughly right even on 32-bit platforms since in that
   // case, double, int64, and uint64 loads need not be access-atomic.
+  //
+  // We could use __atomic_store, but it would be needlessly expensive on
+  // 32-bit platforms that could support it and just plain wrong on others.
   *addr = val;
 }
 
 inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
                                                           const void* src,
                                                           size_t nbytes) {
   MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
   MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
@@ -397,55 +446,13 @@ inline void js::jit::AtomicOperations::m
 }
 
 inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
                                                            const void* src,
                                                            size_t nbytes) {
   ::memmove(dest, src, nbytes);
 }
 
-template <typename T>
-inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
-  static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-#  ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-  T v;
-  __sync_synchronize();
-  do {
-    v = *addr;
-  } while (__sync_val_compare_and_swap(addr, v, val) != v);
-  return v;
-#  else
-  T v;
-  __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
-  return v;
-#  endif
-}
-
-#  ifndef HAS_64BIT_ATOMICS
-namespace js {
-namespace jit {
-
-template <>
-inline int64_t AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val) {
-  MOZ_CRASH("No 64-bit atomics");
-}
-
-template <>
-inline uint64_t AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) {
-  MOZ_CRASH("No 64-bit atomics");
-}
-
-}  // namespace jit
-}  // namespace js
-#  endif
-
-#else
-
-#  error "Either use GCC or Clang, or add code here"
-
-#endif
-
 #undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
-#undef GNUC_COMPATIBLE
 #undef HAS_64BIT_ATOMICS
 #undef HAS_64BIT_LOCKFREE
 
-#endif  // jit_none_AtomicOperations_feeling_lucky_h
+#endif  // jit_shared_AtomicOperations_feeling_lucky_gcc_h
rename from js/src/jit/x86-shared/AtomicOperations-x86-shared-msvc.h
rename to js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
--- a/js/src/jit/x86-shared/AtomicOperations-x86-shared-msvc.h
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
@@ -1,43 +1,45 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
  * vim: set ts=8 sts=2 et sw=2 tw=80:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-#ifndef jit_shared_AtomicOperations_x86_shared_msvc_h
-#define jit_shared_AtomicOperations_x86_shared_msvc_h
+#ifndef jit_shared_AtomicOperations_feeling_lucky_msvc_h
+#define jit_shared_AtomicOperations_feeling_lucky_msvc_h
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Types.h"
 
+// Explicitly exclude tier-1 platforms.
+
+#if ((defined(__x86_64__) || defined(_M_X64)) && defined(JS_CODEGEN_X64)) || \
+    ((defined(__i386__) || defined(_M_IX86)) && defined(JS_CODEGEN_X86)) ||  \
+    (defined(__arm__) && defined(JS_CODEGEN_ARM)) ||                         \
+    ((defined(__aarch64__) || defined(_M_ARM64)) && defined(JS_CODEGEN_ARM64))
+#  error "Do not use this code on a tier-1 platform when a JIT is available"
+#endif
+
 #if !defined(_MSC_VER)
 #  error "This file only for Microsoft Visual C++"
 #endif
 
-// For overall documentation, see jit/AtomicOperations.h/
-//
-// For general comments on lock-freedom, access-atomicity, and related matters
-// on x86 and x64, notably for justification of the implementations of the
-// 64-bit primitives on 32-bit systems, see the comment block in
-// AtomicOperations-x86-shared-gcc.h.
+// For overall documentation, see jit/AtomicOperations.h.
 
 // Below, _ReadWriteBarrier is a compiler directive, preventing reordering of
 // instructions and reuse of memory values across it in the compiler, but having
 // no impact on what the CPU does.
 
 // Note, here we use MSVC intrinsics directly.  But MSVC supports a slightly
 // higher level of function which uses the intrinsic when possible (8, 16, and
 // 32-bit operations, and 64-bit operations on 64-bit systems) and otherwise
 // falls back on CMPXCHG8B for 64-bit operations on 32-bit systems.  We could be
 // using those functions in many cases here (though not all).  I have not done
-// so because (a) I don't yet know how far back those functions are supported
-// and (b) I expect we'll end up dropping into assembler here eventually so as
-// to guarantee that the C++ compiler won't optimize the code.
+// so because I don't yet know how far back those functions are supported.
 
 // Note, _InterlockedCompareExchange takes the *new* value as the second
 // argument and the *comparand* (expected old value) as the third argument.
 
 inline bool js::jit::AtomicOperations::Initialize() {
   // Nothing
   return true;
 }
@@ -57,65 +59,69 @@ inline bool js::jit::AtomicOperations::i
   // But I've found no way to assert that at compile time or run time, there
   // appears to be no WinAPI is_lock_free() test.
 
   return true;
 }
 
 inline void js::jit::AtomicOperations::fenceSeqCst() {
   _ReadWriteBarrier();
+#if defined(_M_IX86) || defined(_M_X64)
   _mm_mfence();
+#elif defined(_M_ARM64)
+  // MemoryBarrier is defined in winnt.h, which we don't want to include here.
+  // This expression is the expansion of MemoryBarrier.
+  __dmb(_ARM64_BARRIER_SY);
+#else
+#error "Unknown hardware for MSVC"
+#endif
 }
 
 template <typename T>
 inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
   _ReadWriteBarrier();
   T v = *addr;
   _ReadWriteBarrier();
   return v;
 }
 
 #ifdef _M_IX86
 namespace js {
 namespace jit {
 
 #  define MSC_LOADOP(T)                                                       \
     template <>                                                               \
     inline T AtomicOperations::loadSeqCst(T* addr) {                          \
-      MOZ_ASSERT(tier1Constraints(addr));                                     \
       _ReadWriteBarrier();                                                    \
       return (T)_InterlockedCompareExchange64((__int64 volatile*)addr, 0, 0); \
     }
 
 MSC_LOADOP(int64_t)
 MSC_LOADOP(uint64_t)
 
 #  undef MSC_LOADOP
 
 }  // namespace jit
 }  // namespace js
 #endif  // _M_IX86
 
 template <typename T>
 inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
   _ReadWriteBarrier();
   *addr = val;
   fenceSeqCst();
 }
 
 #ifdef _M_IX86
 namespace js {
 namespace jit {
 
 #  define MSC_STOREOP(T)                                             \
     template <>                                                      \
     inline void AtomicOperations::storeSeqCst(T* addr, T val) {      \
-      MOZ_ASSERT(tier1Constraints(addr));                            \
       _ReadWriteBarrier();                                           \
       T oldval = *addr;                                              \
       for (;;) {                                                     \
         T nextval = (T)_InterlockedCompareExchange64(                \
             (__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
         if (nextval == oldval) break;                                \
         oldval = nextval;                                            \
       }                                                              \
@@ -129,25 +135,23 @@ MSC_STOREOP(uint64_t)
 
 }  // namespace jit
 }  // namespace js
 #endif  // _M_IX86
 
 #define MSC_EXCHANGEOP(T, U, xchgop)                          \
   template <>                                                 \
   inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
-    MOZ_ASSERT(tier1Constraints(addr));                       \
     return (T)xchgop((U volatile*)addr, (U)val);              \
   }
 
 #ifdef _M_IX86
 #  define MSC_EXCHANGEOP_CAS(T)                                      \
     template <>                                                      \
     inline T AtomicOperations::exchangeSeqCst(T* addr, T val) {      \
-      MOZ_ASSERT(tier1Constraints(addr));                            \
       _ReadWriteBarrier();                                           \
       T oldval = *addr;                                              \
       for (;;) {                                                     \
         T nextval = (T)_InterlockedCompareExchange64(                \
             (__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
         if (nextval == oldval) break;                                \
         oldval = nextval;                                            \
       }                                                              \
@@ -179,17 +183,16 @@ MSC_EXCHANGEOP(uint64_t, __int64, _Inter
 
 #undef MSC_EXCHANGEOP
 #undef MSC_EXCHANGEOP_CAS
 
 #define MSC_CAS(T, U, cmpxchg)                                        \
   template <>                                                         \
   inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
                                                    T newval) {        \
-    MOZ_ASSERT(tier1Constraints(addr));                               \
     return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval);       \
   }
 
 namespace js {
 namespace jit {
 
 MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
 MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
@@ -203,31 +206,29 @@ MSC_CAS(uint64_t, __int64, _InterlockedC
 }  // namespace jit
 }  // namespace js
 
 #undef MSC_CAS
 
 #define MSC_FETCHADDOP(T, U, xadd)                            \
   template <>                                                 \
   inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
-    MOZ_ASSERT(tier1Constraints(addr));                       \
     return (T)xadd((U volatile*)addr, (U)val);                \
   }
 
 #define MSC_FETCHSUBOP(T)                                     \
   template <>                                                 \
   inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
     return fetchAddSeqCst(addr, (T)(0 - val));                \
   }
 
 #ifdef _M_IX86
 #  define MSC_FETCHADDOP_CAS(T)                                               \
     template <>                                                               \
     inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) {               \
-      MOZ_ASSERT(tier1Constraints(addr));                                     \
       _ReadWriteBarrier();                                                    \
       T oldval = *addr;                                                       \
       for (;;) {                                                              \
         T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr, \
                                                      (__int64)(oldval + val), \
                                                      (__int64)oldval);        \
         if (nextval == oldval) break;                                         \
         oldval = nextval;                                                     \
@@ -269,33 +270,31 @@ MSC_FETCHSUBOP(uint64_t)
 
 #undef MSC_FETCHADDOP
 #undef MSC_FETCHADDOP_CAS
 #undef MSC_FETCHSUBOP
 
 #define MSC_FETCHBITOPX(T, U, name, op)             \
   template <>                                       \
   inline T AtomicOperations::name(T* addr, T val) { \
-    MOZ_ASSERT(tier1Constraints(addr));             \
     return (T)op((U volatile*)addr, (U)val);        \
   }
 
 #define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
   MSC_FETCHBITOPX(T, U, fetchAndSeqCst, andop)   \
   MSC_FETCHBITOPX(T, U, fetchOrSeqCst, orop)     \
   MSC_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
 
 #ifdef _M_IX86
 #  define AND_OP &
 #  define OR_OP |
 #  define XOR_OP ^
 #  define MSC_FETCHBITOPX_CAS(T, name, OP)                                     \
     template <>                                                                \
     inline T AtomicOperations::name(T* addr, T val) {                          \
-      MOZ_ASSERT(tier1Constraints(addr));                                      \
       _ReadWriteBarrier();                                                     \
       T oldval = *addr;                                                        \
       for (;;) {                                                               \
         T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr,  \
                                                      (__int64)(oldval OP val), \
                                                      (__int64)oldval);         \
         if (nextval == oldval) break;                                          \
         oldval = nextval;                                                      \
@@ -340,25 +339,23 @@ MSC_FETCHBITOP(uint64_t, __int64, _Inter
 
 #undef MSC_FETCHBITOPX_CAS
 #undef MSC_FETCHBITOPX
 #undef MSC_FETCHBITOP_CAS
 #undef MSC_FETCHBITOP
 
 template <typename T>
 inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
   // This is also appropriate for double, int64, and uint64 on 32-bit
   // platforms since there are no guarantees of access-atomicity.
   return *addr;
 }
 
 template <typename T>
 inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
   // This is also appropriate for double, int64, and uint64 on 32-bit
   // platforms since there are no guarantees of access-atomicity.
   *addr = val;
 }
 
 inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
                                                           const void* src,
                                                           size_t nbytes) {
@@ -368,9 +365,9 @@ inline void js::jit::AtomicOperations::m
 }
 
 inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
                                                            const void* src,
                                                            size_t nbytes) {
   ::memmove(dest, src, nbytes);
 }
 
-#endif  // jit_shared_AtomicOperations_x86_shared_msvc_h
+#endif  // jit_shared_AtomicOperations_feeling_lucky_msvc_h
new file mode 100644
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky.h
@@ -0,0 +1,19 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_AtomicOperations_feeling_lucky_h
+#define jit_shared_AtomicOperations_feeling_lucky_h
+
+#if defined(__clang__) || defined(__GNUC__)
+# include "jit/shared/AtomicOperations-feeling-lucky-gcc.h"
+#elif defined(_MSC_VER)
+# include "jit/shared/AtomicOperations-feeling-lucky-msvc.h"
+#else
+# error "No AtomicOperations support for this platform+compiler combination"
+#endif
+
+#endif // jit_shared_AtomicOperations_feeling_lucky_h
+
deleted file mode 100644
--- a/js/src/jit/x86-shared/AtomicOperations-x86-shared-gcc.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
- * vim: set ts=8 sts=2 et sw=2 tw=80:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-/* For overall documentation, see jit/AtomicOperations.h */
-
-#ifndef jit_shared_AtomicOperations_x86_shared_gcc_h
-#define jit_shared_AtomicOperations_x86_shared_gcc_h
-
-#include "mozilla/Assertions.h"
-#include "mozilla/Types.h"
-
-#include "vm/ArrayBufferObject.h"
-
-#if !defined(__clang__) && !defined(__GNUC__)
-#  error "This file only for gcc-compatible compilers"
-#endif
-
-// Lock-freedom and access-atomicity on x86 and x64.
-//
-// In general, aligned accesses are access-atomic up to 8 bytes ever since the
-// Pentium; Firefox requires SSE2, which was introduced with the Pentium 4, so
-// we may assume access-atomicity.
-//
-// Four-byte accesses and smaller are simple:
-//  - Use MOV{B,W,L} to load and store.  Stores require a post-fence
-//    for sequential consistency as defined by the JS spec.  The fence
-//    can be MFENCE, or the store can be implemented using XCHG.
-//  - For compareExchange use LOCK; CMPXCGH{B,W,L}
-//  - For exchange, use XCHG{B,W,L}
-//  - For add, etc use LOCK; ADD{B,W,L} etc
-//
-// Eight-byte accesses are easy on x64:
-//  - Use MOVQ to load and store (again with a fence for the store)
-//  - For compareExchange, we use CMPXCHGQ
-//  - For exchange, we use XCHGQ
-//  - For add, etc use LOCK; ADDQ etc
-//
-// Eight-byte accesses are harder on x86:
-//  - For load, use a sequence of MOVL + CMPXCHG8B
-//  - For store, use a sequence of MOVL + a CMPXCGH8B in a loop,
-//    no additional fence required
-//  - For exchange, do as for store
-//  - For add, etc do as for store
-
-// Firefox requires gcc > 4.8, so we will always have the __atomic intrinsics
-// added for use in C++11 <atomic>.
-//
-// Note that using these intrinsics for most operations is not correct: the code
-// has undefined behavior.  The gcc documentation states that the compiler
-// assumes the code is race free.  This supposedly means C++ will allow some
-// instruction reorderings (effectively those allowed by TSO) even for seq_cst
-// ordered operations, but these reorderings are not allowed by JS.  To do
-// better we will end up with inline assembler or JIT-generated code.
-
-// For now, we require that the C++ compiler's atomics are lock free, even for
-// 64-bit accesses.
-
-inline bool js::jit::AtomicOperations::Initialize() {
-  // Nothing
-  return true;
-}
-
-inline void js::jit::AtomicOperations::ShutDown() {
-  // Nothing
-}
-
-// When compiling with Clang on 32-bit linux it will be necessary to link with
-// -latomic to get the proper 64-bit intrinsics.
-
-inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
-
-inline bool js::jit::AtomicOperations::isLockfree8() {
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
-  MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
-  return true;
-}
-
-inline void js::jit::AtomicOperations::fenceSeqCst() {
-  __atomic_thread_fence(__ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
-  return v;
-}
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
-  return v;
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
-                                                          T newval) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
-                            __ATOMIC_SEQ_CST);
-  return oldval;
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
-inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  T v;
-  __atomic_load(addr, &v, __ATOMIC_RELAXED);
-  return v;
-}
-
-namespace js {
-namespace jit {
-
-#define GCC_RACYLOADOP(T)                                         \
-  template <>                                                     \
-  inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
-    return *addr;                                                 \
-  }
-
-// On 32-bit platforms, loadSafeWhenRacy need not be access-atomic for 64-bit
-// data, so just use regular accesses instead of the expensive __atomic_load
-// solution which must use CMPXCHG8B.
-#ifndef JS_64BIT
-GCC_RACYLOADOP(int64_t)
-GCC_RACYLOADOP(uint64_t)
-#endif
-
-// Float and double accesses are not access-atomic.
-GCC_RACYLOADOP(float)
-GCC_RACYLOADOP(double)
-
-// Clang requires a specialization for uint8_clamped.
-template <>
-inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
-    uint8_clamped* addr) {
-  uint8_t v;
-  __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
-  return uint8_clamped(v);
-}
-
-#undef GCC_RACYLOADOP
-
-}  // namespace jit
-}  // namespace js
-
-template <typename T>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
-  MOZ_ASSERT(tier1Constraints(addr));
-  __atomic_store(addr, &val, __ATOMIC_RELAXED);
-}
-
-namespace js {
-namespace jit {
-
-#define GCC_RACYSTOREOP(T)                                                   \
-  template <>                                                                \
-  inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
-    *addr = val;                                                             \
-  }
-
-// On 32-bit platforms, storeSafeWhenRacy need not be access-atomic for 64-bit
-// data, so just use regular accesses instead of the expensive __atomic_store
-// solution which must use CMPXCHG8B.
-#ifndef JS_64BIT
-GCC_RACYSTOREOP(int64_t)
-GCC_RACYSTOREOP(uint64_t)
-#endif
-
-// Float and double accesses are not access-atomic.
-GCC_RACYSTOREOP(float)
-GCC_RACYSTOREOP(double)
-
-// Clang requires a specialization for uint8_clamped.
-template <>
-inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
-                                                         uint8_clamped val) {
-  __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
-}
-
-#undef GCC_RACYSTOREOP
-
-}  // namespace jit
-}  // namespace js
-
-inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
-                                                          const void* src,
-                                                          size_t nbytes) {
-  MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
-  MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
-  ::memcpy(dest, src, nbytes);
-}
-
-inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
-                                                           const void* src,
-                                                           size_t nbytes) {
-  ::memmove(dest, src, nbytes);
-}
-
-#endif  // jit_shared_AtomicOperations_x86_shared_gcc_h