Bug 1073096 - Support for Odin and asm.js. r=luke
authorLars T Hansen <lhansen@mozilla.com>
Thu, 20 Nov 2014 16:27:40 +0100
changeset 240968 d64f299df337d69c8750df659ae4cc0c4460e6a2
parent 240967 4e9c799bccc6ff3faa3b8049bebb42681c15ad81
child 240969 e3cba62ada404c2f5a915b0caf87d899bbdd9911
push id4311
push userraliiev@mozilla.com
push dateMon, 12 Jan 2015 19:37:41 +0000
treeherdermozilla-beta@150c9fed433b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1073096
milestone36.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1073096 - Support for Odin and asm.js. r=luke
js/src/asmjs/AsmJSLink.cpp
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSModule.h
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/asmjs/AsmJSValidate.cpp
js/src/jit-test/tests/asm.js/testAtomics.js
js/src/jit/LIR-Common.h
js/src/jit/LOpcodes.h
js/src/jit/MIR.h
js/src/jit/MOpcodes.h
js/src/jit/ParallelSafetyAnalysis.cpp
js/src/jit/arm/CodeGenerator-arm.cpp
js/src/jit/arm/CodeGenerator-arm.h
js/src/jit/arm/Lowering-arm.cpp
js/src/jit/arm/Lowering-arm.h
js/src/jit/none/Lowering-none.h
js/src/jit/shared/Assembler-x86-shared.h
js/src/jit/shared/BaseAssembler-x86-shared.h
js/src/jit/shared/Lowering-x86-shared.cpp
js/src/jit/shared/Lowering-x86-shared.h
js/src/jit/x64/CodeGenerator-x64.cpp
js/src/jit/x64/CodeGenerator-x64.h
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jit/x86/CodeGenerator-x86.h
--- a/js/src/asmjs/AsmJSLink.cpp
+++ b/js/src/asmjs/AsmJSLink.cpp
@@ -25,16 +25,17 @@
 #endif
 
 #include "jscntxt.h"
 #include "jsmath.h"
 #include "jsprf.h"
 #include "jswrapper.h"
 
 #include "asmjs/AsmJSModule.h"
+#include "builtin/AtomicsObject.h"
 #include "builtin/SIMD.h"
 #include "frontend/BytecodeCompiler.h"
 #include "jit/Ion.h"
 #include "jit/JitCommon.h"
 #ifdef JS_ION_PERF
 # include "jit/PerfSpewer.h"
 #endif
 #include "vm/ArrayBufferObject.h"
@@ -214,31 +215,30 @@ ValidateFFI(JSContext *cx, AsmJSModule::
     if (!v.isObject() || !v.toObject().is<JSFunction>())
         return LinkFail(cx, "FFI imports must be functions");
 
     (*ffis)[global.ffiIndex()].set(&v.toObject().as<JSFunction>());
     return true;
 }
 
 static bool
-ValidateArrayView(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
+ValidateArrayView(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal, bool isShared)
 {
     RootedPropertyName field(cx, global.maybeViewName());
     if (!field)
         return true;
 
     RootedValue v(cx);
     if (!GetDataProperty(cx, globalVal, field, &v))
         return false;
 
-    if (!IsTypedArrayConstructor(v, global.viewType()) &&
-        !IsSharedTypedArrayConstructor(v, global.viewType()))
-    {
+    bool tac = IsTypedArrayConstructor(v, global.viewType());
+    bool stac = IsSharedTypedArrayConstructor(v, global.viewType());
+    if (!((tac || stac) && stac == isShared))
         return LinkFail(cx, "bad typed array constructor");
-    }
 
     return true;
 }
 
 static bool
 ValidateByteLength(JSContext *cx, HandleValue globalVal)
 {
     RootedPropertyName field(cx, cx->names().byteLength);
@@ -402,16 +402,45 @@ ValidateSimdOperation(JSContext *cx, Asm
 #undef SET_NATIVE
     }
     if (!native || !IsNativeFunction(v, native))
         return LinkFail(cx, "bad SIMD.type.* operation");
     return true;
 }
 
 static bool
+ValidateAtomicsBuiltinFunction(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
+{
+    RootedValue v(cx);
+    if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
+        return false;
+    RootedPropertyName field(cx, global.atomicsName());
+    if (!GetDataProperty(cx, v, field, &v))
+        return false;
+
+    Native native = nullptr;
+    switch (global.atomicsBuiltinFunction()) {
+      case AsmJSAtomicsBuiltin_compareExchange: native = atomics_compareExchange; break;
+      case AsmJSAtomicsBuiltin_load: native = atomics_load; break;
+      case AsmJSAtomicsBuiltin_store: native = atomics_store; break;
+      case AsmJSAtomicsBuiltin_fence: native = atomics_fence; break;
+      case AsmJSAtomicsBuiltin_add: native = atomics_add; break;
+      case AsmJSAtomicsBuiltin_sub: native = atomics_sub; break;
+      case AsmJSAtomicsBuiltin_and: native = atomics_and; break;
+      case AsmJSAtomicsBuiltin_or: native = atomics_or; break;
+      case AsmJSAtomicsBuiltin_xor: native = atomics_xor; break;
+    }
+
+    if (!IsNativeFunction(v, native))
+        return LinkFail(cx, "bad Atomics.* builtin function");
+
+    return true;
+}
+
+static bool
 ValidateConstant(JSContext *cx, AsmJSModule::Global &global, HandleValue globalVal)
 {
     RootedPropertyName field(cx, global.constantName());
     RootedValue v(cx, globalVal);
 
     if (global.constantKind() == AsmJSModule::Global::MathConstant) {
         if (!GetDataProperty(cx, v, cx->names().Math, &v))
             return false;
@@ -528,28 +557,33 @@ DynamicallyLinkModule(JSContext *cx, Cal
             if (!ValidateGlobalVariable(cx, module, global, importVal))
                 return false;
             break;
           case AsmJSModule::Global::FFI:
             if (!ValidateFFI(cx, global, importVal, &ffis))
                 return false;
             break;
           case AsmJSModule::Global::ArrayView:
+          case AsmJSModule::Global::SharedArrayView:
           case AsmJSModule::Global::ArrayViewCtor:
-            if (!ValidateArrayView(cx, global, globalVal))
+            if (!ValidateArrayView(cx, global, globalVal, module.hasArrayView() && module.isSharedView()))
                 return false;
             break;
           case AsmJSModule::Global::ByteLength:
             if (!ValidateByteLength(cx, globalVal))
                 return false;
             break;
           case AsmJSModule::Global::MathBuiltinFunction:
             if (!ValidateMathBuiltinFunction(cx, global, globalVal))
                 return false;
             break;
+          case AsmJSModule::Global::AtomicsBuiltinFunction:
+            if (!ValidateAtomicsBuiltinFunction(cx, global, globalVal))
+                return false;
+            break;
           case AsmJSModule::Global::Constant:
             if (!ValidateConstant(cx, global, globalVal))
                 return false;
             break;
           case AsmJSModule::Global::SimdCtor:
             if (!ValidateSimdType(cx, global, globalVal))
                 return false;
             break;
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -773,22 +773,23 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
         if (access.hasLengthCheck())
             X86Assembler::setPointer(access.patchLengthAt(code_), heapLength);
         void *addr = access.patchOffsetAt(code_);
         uint32_t disp = reinterpret_cast<uint32_t>(X86Assembler::getPointer(addr));
         MOZ_ASSERT(disp <= INT32_MAX);
         X86Assembler::setPointer(addr, (void *)(heapOffset + disp));
     }
 #elif defined(JS_CODEGEN_X64)
-    if (usesSignalHandlersForOOB())
-        return;
-    // If we cannot use the signal handlers, we need to patch the heap length
+    // Even with signal handling being used for most bounds checks, there may be
+    // atomic operations that depend on explicit checks.
+    //
+    // If we have any explicit bounds checks, we need to patch the heap length
     // checks at the right places. All accesses that have been recorded are the
     // only ones that need bound checks (see also
-    // CodeGeneratorX64::visitAsmJS{Load,Store}Heap)
+    // CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,AtomicBinop}Heap)
     int32_t heapLength = int32_t(intptr_t(heap->byteLength()));
     for (size_t i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess &access = heapAccesses_[i];
         if (access.hasLengthCheck())
             X86Assembler::setInt32(access.patchLengthAt(code_), heapLength);
     }
 #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
     uint32_t heapLength = heap->byteLength();
--- a/js/src/asmjs/AsmJSModule.h
+++ b/js/src/asmjs/AsmJSModule.h
@@ -61,16 +61,30 @@ enum AsmJSMathBuiltinFunction
     AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
     AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
     AsmJSMathBuiltin_log, AsmJSMathBuiltin_pow, AsmJSMathBuiltin_sqrt,
     AsmJSMathBuiltin_abs, AsmJSMathBuiltin_atan2, AsmJSMathBuiltin_imul,
     AsmJSMathBuiltin_fround, AsmJSMathBuiltin_min, AsmJSMathBuiltin_max,
     AsmJSMathBuiltin_clz32
 };
 
+// The asm.js spec will recognize this set of builtin Atomics functions.
+enum AsmJSAtomicsBuiltinFunction
+{
+    AsmJSAtomicsBuiltin_compareExchange,
+    AsmJSAtomicsBuiltin_load,
+    AsmJSAtomicsBuiltin_store,
+    AsmJSAtomicsBuiltin_fence,
+    AsmJSAtomicsBuiltin_add,
+    AsmJSAtomicsBuiltin_sub,
+    AsmJSAtomicsBuiltin_and,
+    AsmJSAtomicsBuiltin_or,
+    AsmJSAtomicsBuiltin_xor
+};
+
 // Set of known global object SIMD's attributes, i.e. types
 enum AsmJSSimdType
 {
     AsmJSSimdType_int32x4,
     AsmJSSimdType_float32x4
 };
 
 // Set of known operations, for a given SIMD type (int32x4, float32x4,...)
@@ -195,18 +209,18 @@ class AsmJSNumLit
 //
 // NB: this means that AsmJSModule must be GC-safe.
 class AsmJSModule
 {
   public:
     class Global
     {
       public:
-        enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction, Constant,
-                     SimdCtor, SimdOperation, ByteLength };
+        enum Which { Variable, FFI, ArrayView, ArrayViewCtor, SharedArrayView, MathBuiltinFunction,
+                     AtomicsBuiltinFunction, Constant, SimdCtor, SimdOperation, ByteLength };
         enum VarInitKind { InitConstant, InitImport };
         enum ConstantKind { GlobalConstant, MathConstant };
 
       private:
         struct Pod {
             Which which_;
             union {
                 struct {
@@ -215,16 +229,17 @@ class AsmJSModule
                     union {
                         AsmJSCoercion coercion_;
                         AsmJSNumLit numLit_;
                     } u;
                 } var;
                 uint32_t ffiIndex_;
                 Scalar::Type viewType_;
                 AsmJSMathBuiltinFunction mathBuiltinFunc_;
+                AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
                 AsmJSSimdType simdCtorType_;
                 struct {
                     AsmJSSimdType type_;
                     AsmJSSimdOperation which_;
                 } simdOp;
                 struct {
                     ConstantKind kind_;
                     double value_;
@@ -284,31 +299,39 @@ class AsmJSModule
             MOZ_ASSERT(pod.which_ == FFI);
             return pod.u.ffiIndex_;
         }
         // When a view is created from an imported constructor:
         //   var I32 = stdlib.Int32Array;
         //   var i32 = new I32(buffer);
         // the second import has nothing to validate and thus has a null field.
         PropertyName *maybeViewName() const {
-            MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
+            MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == SharedArrayView || pod.which_ == ArrayViewCtor);
             return name_;
         }
         Scalar::Type viewType() const {
-            MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
+            MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == SharedArrayView || pod.which_ == ArrayViewCtor);
             return pod.u.viewType_;
         }
         PropertyName *mathName() const {
             MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
             return name_;
         }
+        PropertyName *atomicsName() const {
+            MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
+            return name_;
+        }
         AsmJSMathBuiltinFunction mathBuiltinFunction() const {
             MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
             return pod.u.mathBuiltinFunc_;
         }
+        AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
+            MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
+            return pod.u.atomicsBuiltinFunc_;
+        }
         AsmJSSimdType simdCtorType() const {
             MOZ_ASSERT(pod.which_ == SimdCtor);
             return pod.u.simdCtorType_;
         }
         PropertyName *simdCtorName() const {
             MOZ_ASSERT(pod.which_ == SimdCtor);
             return name_;
         }
@@ -778,16 +801,17 @@ class AsmJSModule
         uint32_t                          heapLengthMask_;
         uint32_t                          numGlobalScalarVars_;
         uint32_t                          numGlobalSimdVars_;
         uint32_t                          numFFIs_;
         uint32_t                          srcLength_;
         uint32_t                          srcLengthWithRightBrace_;
         bool                              strict_;
         bool                              hasArrayView_;
+        bool                              isSharedView_;
         bool                              hasFixedMinHeapLength_;
         bool                              usesSignalHandlers_;
     } pod;
 
     // These two fields need to be kept out pod as they depend on the position
     // of the module within the ScriptSource and thus aren't invariant with
     // respect to caching.
     const uint32_t                        srcStart_;
@@ -977,26 +1001,30 @@ class AsmJSModule
     bool addFFI(PropertyName *field, uint32_t *ffiIndex) {
         MOZ_ASSERT(!isFinishedWithModulePrologue());
         if (pod.numFFIs_ == UINT32_MAX)
             return false;
         Global g(Global::FFI, field);
         g.pod.u.ffiIndex_ = *ffiIndex = pod.numFFIs_++;
         return globals_.append(g);
     }
-    bool addArrayView(Scalar::Type vt, PropertyName *maybeField) {
+    bool addArrayView(Scalar::Type vt, PropertyName *maybeField, bool isSharedView) {
         MOZ_ASSERT(!isFinishedWithModulePrologue());
+        MOZ_ASSERT(!pod.hasArrayView_ || (pod.isSharedView_ == isSharedView));
         pod.hasArrayView_ = true;
+        pod.isSharedView_ = isSharedView;
         Global g(Global::ArrayView, maybeField);
         g.pod.u.viewType_ = vt;
         return globals_.append(g);
     }
-    bool addArrayViewCtor(Scalar::Type vt, PropertyName *field) {
+    bool addArrayViewCtor(Scalar::Type vt, PropertyName *field, bool isSharedView) {
         MOZ_ASSERT(!isFinishedWithModulePrologue());
         MOZ_ASSERT(field);
+        MOZ_ASSERT(!pod.isSharedView_ || isSharedView);
+        pod.isSharedView_ = isSharedView;
         Global g(Global::ArrayViewCtor, field);
         g.pod.u.viewType_ = vt;
         return globals_.append(g);
     }
     bool addByteLength() {
         MOZ_ASSERT(!isFinishedWithModulePrologue());
         Global g(Global::ByteLength, nullptr);
         return globals_.append(g);
@@ -1009,16 +1037,22 @@ class AsmJSModule
     }
     bool addMathBuiltinConstant(double value, PropertyName *field) {
         MOZ_ASSERT(!isFinishedWithModulePrologue());
         Global g(Global::Constant, field);
         g.pod.u.constant.value_ = value;
         g.pod.u.constant.kind_ = Global::MathConstant;
         return globals_.append(g);
     }
+    bool addAtomicsBuiltinFunction(AsmJSAtomicsBuiltinFunction func, PropertyName *field) {
+        MOZ_ASSERT(!isFinishedWithModulePrologue());
+        Global g(Global::AtomicsBuiltinFunction, field);
+        g.pod.u.atomicsBuiltinFunc_ = func;
+        return globals_.append(g);
+    }
     bool addSimdCtor(AsmJSSimdType type, PropertyName *field) {
         Global g(Global::SimdCtor, field);
         g.pod.u.simdCtorType_ = type;
         return globals_.append(g);
     }
     bool addSimdOperation(AsmJSSimdType type, AsmJSSimdOperation op, PropertyName *field) {
         Global g(Global::SimdOperation, field);
         g.pod.u.simdOp.type_ = type;
@@ -1033,32 +1067,41 @@ class AsmJSModule
         return globals_.append(g);
     }
     unsigned numGlobals() const {
         return globals_.length();
     }
     Global &global(unsigned i) {
         return globals_[i];
     }
+    bool isValidViewSharedness(bool shared) const {
+        if (pod.hasArrayView_)
+            return pod.isSharedView_ == shared;
+        return !pod.isSharedView_ || shared;
+    }
 
     /*************************************************************************/
 
     void startFunctionBodies() {
         MOZ_ASSERT(!isFinishedWithModulePrologue());
         pod.funcPtrTableAndExitBytes_ = 0;
         MOZ_ASSERT(isFinishedWithModulePrologue());
     }
 
     /*************************************************************************/
     // These functions are called while parsing/compiling function bodies:
 
     bool hasArrayView() const {
         MOZ_ASSERT(isFinishedWithModulePrologue());
         return pod.hasArrayView_;
     }
+    bool isSharedView() const {
+        MOZ_ASSERT(pod.hasArrayView_);
+        return pod.isSharedView_;
+    }
     void addChangeHeap(uint32_t mask, uint32_t min, uint32_t max) {
         MOZ_ASSERT(isFinishedWithModulePrologue());
         MOZ_ASSERT(!pod.hasFixedMinHeapLength_);
         MOZ_ASSERT(IsValidAsmJSHeapLength(mask + 1));
         MOZ_ASSERT(min >= RoundUpToNextValidAsmJSHeapLength(0));
         MOZ_ASSERT(max <= pod.maxHeapLength_);
         MOZ_ASSERT(min <= max);
         pod.heapLengthMask_ = mask;
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -452,16 +452,17 @@ HandleFault(PEXCEPTION_POINTERS exceptio
     // load/store that we should handle. If this is a load, assign the
     // JS-defined result value to the destination register (ToInt32(undefined)
     // or ToNumber(undefined), determined by the type of the destination
     // register) and set the PC to the next op. Upon return from the handler,
     // execution will resume at this next PC.
     if (heapAccess->isLoad())
         SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
     *ppc += heapAccess->opLength();
+
     return true;
 # else
     return false;
 # endif
 }
 
 static LONG WINAPI
 AsmJSFaultHandler(LPEXCEPTION_POINTERS exception)
@@ -843,16 +844,17 @@ HandleFault(int signum, siginfo_t *info,
     // load/store that we should handle. If this is a load, assign the
     // JS-defined result value to the destination register (ToInt32(undefined)
     // or ToNumber(undefined), determined by the type of the destination
     // register) and set the PC to the next op. Upon return from the handler,
     // execution will resume at this next PC.
     if (heapAccess->isLoad())
         SetRegisterToCoercedUndefined(context, heapAccess->isFloat32Load(), heapAccess->loadedReg());
     *ppc += heapAccess->opLength();
+
     return true;
 # else
     return false;
 # endif
 }
 
 static struct sigaction sPrevSEGVHandler;
 
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -1059,16 +1059,17 @@ class MOZ_STACK_CLASS ModuleCompiler
             ConstantLiteral,
             ConstantImport,
             Function,
             FuncPtrTable,
             FFI,
             ArrayView,
             ArrayViewCtor,
             MathBuiltinFunction,
+            AtomicsBuiltinFunction,
             SimdCtor,
             SimdOperation,
             ByteLength,
             ChangeHeap
         };
 
       private:
         Which which_;
@@ -1078,16 +1079,17 @@ class MOZ_STACK_CLASS ModuleCompiler
                 uint32_t index_;
                 AsmJSNumLit literalValue_;
             } varOrConst;
             uint32_t funcIndex_;
             uint32_t funcPtrTableIndex_;
             uint32_t ffiIndex_;
             Scalar::Type viewType_;
             AsmJSMathBuiltinFunction mathBuiltinFunc_;
+            AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
             AsmJSSimdType simdCtorType_;
             struct {
                 AsmJSSimdType type_;
                 AsmJSSimdOperation which_;
             } simdOp;
             struct {
                 uint32_t srcBegin_;
                 uint32_t srcEnd_;
@@ -1125,27 +1127,37 @@ class MOZ_STACK_CLASS ModuleCompiler
         uint32_t funcPtrTableIndex() const {
             MOZ_ASSERT(which_ == FuncPtrTable);
             return u.funcPtrTableIndex_;
         }
         unsigned ffiIndex() const {
             MOZ_ASSERT(which_ == FFI);
             return u.ffiIndex_;
         }
+        bool isAnyArrayView() const {
+            return which_ == ArrayView || which_ == ArrayViewCtor;
+        }
         Scalar::Type viewType() const {
-            MOZ_ASSERT(which_ == ArrayView || which_ == ArrayViewCtor);
+            MOZ_ASSERT(isAnyArrayView());
             return u.viewType_;
         }
         bool isMathFunction() const {
             return which_ == MathBuiltinFunction;
         }
         AsmJSMathBuiltinFunction mathBuiltinFunction() const {
             MOZ_ASSERT(which_ == MathBuiltinFunction);
             return u.mathBuiltinFunc_;
         }
+        bool isAtomicsFunction() const {
+            return which_ == AtomicsBuiltinFunction;
+        }
+        AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
+            MOZ_ASSERT(which_ == AtomicsBuiltinFunction);
+            return u.atomicsBuiltinFunc_;
+        }
         bool isSimdCtor() const {
             return which_ == SimdCtor;
         }
         AsmJSSimdType simdCtorType() const {
             MOZ_ASSERT(which_ == SimdCtor);
             return u.simdCtorType_;
         }
         bool isSimdOperation() const {
@@ -1273,16 +1285,17 @@ class MOZ_STACK_CLASS ModuleCompiler
 
         PropertyName *name;
         unsigned ms;
         unsigned line;
         unsigned column;
     };
 
     typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
+    typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
     typedef HashMap<PropertyName*, AsmJSSimdOperation> SimdOperationNameMap;
     typedef HashMap<PropertyName*, Global*> GlobalMap;
     typedef Vector<Func*> FuncVector;
     typedef Vector<AsmJSGlobalAccess> GlobalAccessVector;
     typedef Vector<SlowFunction> SlowFunctionVector;
     typedef Vector<ArrayView> ArrayViewVector;
 
     ExclusiveContext *             cx_;
@@ -1296,16 +1309,17 @@ class MOZ_STACK_CLASS ModuleCompiler
     PropertyName *                 moduleFunctionName_;
 
     GlobalMap                      globals_;
     FuncVector                     functions_;
     FuncPtrTableVector             funcPtrTables_;
     ArrayViewVector                arrayViews_;
     ExitMap                        exits_;
     MathNameMap                    standardLibraryMathNames_;
+    AtomicsNameMap                 standardLibraryAtomicsNames_;
     SimdOperationNameMap           standardLibrarySimdOpNames_;
     NonAssertingLabel              stackOverflowLabel_;
     NonAssertingLabel              asyncInterruptLabel_;
     NonAssertingLabel              syncInterruptLabel_;
     NonAssertingLabel              onDetachedLabel_;
 
     UniquePtr<char[], JS::FreePolicy> errorString_;
     uint32_t                       errorOffset_;
@@ -1328,16 +1342,22 @@ class MOZ_STACK_CLASS ModuleCompiler
     }
     bool addStandardLibraryMathName(const char *name, double cst) {
         JSAtom *atom = Atomize(cx_, name, strlen(name));
         if (!atom)
             return false;
         MathBuiltin builtin(cst);
         return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin);
     }
+    bool addStandardLibraryAtomicsName(const char *name, AsmJSAtomicsBuiltinFunction func) {
+        JSAtom *atom = Atomize(cx_, name, strlen(name));
+        if (!atom)
+            return false;
+        return standardLibraryAtomicsNames_.putNew(atom->asPropertyName(), func);
+    }
     bool addStandardLibrarySimdOpName(const char *name, AsmJSSimdOperation op) {
         JSAtom *atom = Atomize(cx_, name, strlen(name));
         if (!atom)
             return false;
         return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
     }
 
   public:
@@ -1349,16 +1369,17 @@ class MOZ_STACK_CLASS ModuleCompiler
         moduleFunctionNode_(parser.pc->maybeFunction),
         moduleFunctionName_(nullptr),
         globals_(cx),
         functions_(cx),
         funcPtrTables_(cx),
         arrayViews_(cx),
         exits_(cx),
         standardLibraryMathNames_(cx),
+        standardLibraryAtomicsNames_(cx),
         standardLibrarySimdOpNames_(cx),
         errorString_(nullptr),
         errorOffset_(UINT32_MAX),
         errorOverRecursed_(false),
         usecBefore_(PRMJ_Now()),
         slowFunctions_(cx),
         finishedFunctionBodies_(false),
         supportsSimd_(cx->jitSupportsSimd()),
@@ -1410,16 +1431,30 @@ class MOZ_STACK_CLASS ModuleCompiler
             !addStandardLibraryMathName("LOG10E", M_LOG10E) ||
             !addStandardLibraryMathName("PI", M_PI) ||
             !addStandardLibraryMathName("SQRT1_2", M_SQRT1_2) ||
             !addStandardLibraryMathName("SQRT2", M_SQRT2))
         {
             return false;
         }
 
+        if (!standardLibraryAtomicsNames_.init() ||
+            !addStandardLibraryAtomicsName("compareExchange", AsmJSAtomicsBuiltin_compareExchange) ||
+            !addStandardLibraryAtomicsName("load", AsmJSAtomicsBuiltin_load) ||
+            !addStandardLibraryAtomicsName("store", AsmJSAtomicsBuiltin_store) ||
+            !addStandardLibraryAtomicsName("fence", AsmJSAtomicsBuiltin_fence) ||
+            !addStandardLibraryAtomicsName("add", AsmJSAtomicsBuiltin_add) ||
+            !addStandardLibraryAtomicsName("sub", AsmJSAtomicsBuiltin_sub) ||
+            !addStandardLibraryAtomicsName("and", AsmJSAtomicsBuiltin_and) ||
+            !addStandardLibraryAtomicsName("or", AsmJSAtomicsBuiltin_or) ||
+            !addStandardLibraryAtomicsName("xor", AsmJSAtomicsBuiltin_xor))
+        {
+            return false;
+        }
+
 #define ADDSTDLIBSIMDOPNAME(op) || !addStandardLibrarySimdOpName(#op, AsmJSSimdOperation_##op)
         if (!standardLibrarySimdOpNames_.init()
             FORALL_SIMD_OP(ADDSTDLIBSIMDOPNAME))
         {
             return false;
         }
 #undef ADDSTDLIBSIMDOPNAME
 
@@ -1539,16 +1574,23 @@ class MOZ_STACK_CLASS ModuleCompiler
     }
     bool lookupStandardLibraryMathName(PropertyName *name, MathBuiltin *mathBuiltin) const {
         if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) {
             *mathBuiltin = p->value();
             return true;
         }
         return false;
     }
+    bool lookupStandardLibraryAtomicsName(PropertyName *name, AsmJSAtomicsBuiltinFunction *atomicsBuiltin) const {
+        if (AtomicsNameMap::Ptr p = standardLibraryAtomicsNames_.lookup(name)) {
+            *atomicsBuiltin = p->value();
+            return true;
+        }
+        return false;
+    }
     bool lookupStandardSimdOpName(PropertyName *name, AsmJSSimdOperation *op) const {
         if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
             *op = p->value();
             return true;
         }
         return false;
     }
     ExitMap::Range allExits() const {
@@ -1655,45 +1697,54 @@ class MOZ_STACK_CLASS ModuleCompiler
         if (!global)
             return false;
         uint32_t index;
         if (!module_->addFFI(field, &index))
             return false;
         global->u.ffiIndex_ = index;
         return globals_.putNew(varName, global);
     }
-    bool addArrayView(PropertyName *varName, Scalar::Type vt, PropertyName *maybeField) {
+    bool addArrayView(PropertyName *varName, Scalar::Type vt, PropertyName *maybeField, bool isSharedView) {
         if (!arrayViews_.append(ArrayView(varName, vt)))
             return false;
         Global *global = moduleLifo_.new_<Global>(Global::ArrayView);
         if (!global)
             return false;
-        if (!module_->addArrayView(vt, maybeField))
+        if (!module_->addArrayView(vt, maybeField, isSharedView))
             return false;
         global->u.viewType_ = vt;
         return globals_.putNew(varName, global);
     }
-    bool addArrayViewCtor(PropertyName *varName, Scalar::Type vt, PropertyName *fieldName) {
+    bool addArrayViewCtor(PropertyName *varName, Scalar::Type vt, PropertyName *fieldName, bool isSharedView) {
         Global *global = moduleLifo_.new_<Global>(Global::ArrayViewCtor);
         if (!global)
             return false;
-        if (!module_->addArrayViewCtor(vt, fieldName))
+        if (!module_->addArrayViewCtor(vt, fieldName, isSharedView))
             return false;
         global->u.viewType_ = vt;
         return globals_.putNew(varName, global);
     }
     bool addMathBuiltinFunction(PropertyName *varName, AsmJSMathBuiltinFunction func, PropertyName *fieldName) {
         if (!module_->addMathBuiltinFunction(func, fieldName))
             return false;
         Global *global = moduleLifo_.new_<Global>(Global::MathBuiltinFunction);
         if (!global)
             return false;
         global->u.mathBuiltinFunc_ = func;
         return globals_.putNew(varName, global);
     }
+    bool addAtomicsBuiltinFunction(PropertyName *varName, AsmJSAtomicsBuiltinFunction func, PropertyName *fieldName) {
+        if (!module_->addAtomicsBuiltinFunction(func, fieldName))
+            return false;
+        Global *global = moduleLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
+        if (!global)
+            return false;
+        global->u.atomicsBuiltinFunc_ = func;
+        return globals_.putNew(varName, global);
+    }
     bool addSimdCtor(PropertyName *varName, AsmJSSimdType type, PropertyName *fieldName) {
         if (!module_->addSimdCtor(type, fieldName))
             return false;
         Global *global = moduleLifo_.new_<Global>(Global::SimdCtor);
         if (!global)
             return false;
         global->u.simdCtorType_ = type;
         return globals_.putNew(varName, global);
@@ -2746,16 +2797,73 @@ class FunctionCompiler
         if (inDeadCode())
             return;
 
         bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
         MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), vt, ptr, v, needsBoundsCheck);
         curBlock_->add(store);
     }
 
+    void memoryBarrier(MemoryBarrierBits type)
+    {
+        if (inDeadCode())
+            return;
+        MMemoryBarrier *ins = MMemoryBarrier::New(alloc(), type);
+        curBlock_->add(ins);
+    }
+
+    MDefinition *atomicLoadHeap(Scalar::Type vt, MDefinition *ptr, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
+        MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), vt, ptr, needsBoundsCheck,
+                                                   MembarBeforeLoad, MembarAfterLoad);
+        curBlock_->add(load);
+        return load;
+    }
+
+    void atomicStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return;
+
+        bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
+        MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), vt, ptr, v, needsBoundsCheck,
+                                                      MembarBeforeStore, MembarAfterStore);
+        curBlock_->add(store);
+    }
+
+    MDefinition *atomicCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        // The code generator requires explicit bounds checking for compareExchange.
+        bool needsBoundsCheck = true;
+        MAsmJSCompareExchangeHeap *cas =
+            MAsmJSCompareExchangeHeap::New(alloc(), vt, ptr, oldv, newv, needsBoundsCheck);
+        curBlock_->add(cas);
+        return cas;
+    }
+
+    MDefinition *atomicBinopHeap(js::jit::AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v, NeedsBoundsCheck chk)
+    {
+        if (inDeadCode())
+            return nullptr;
+
+        // The code generator requires explicit bounds checking for the binops.
+        bool needsBoundsCheck = true;
+        MAsmJSAtomicBinopHeap *binop =
+            MAsmJSAtomicBinopHeap::New(alloc(), op, vt, ptr, v, needsBoundsCheck);
+        curBlock_->add(binop);
+        return binop;
+    }
+
     MDefinition *loadGlobalVar(const ModuleCompiler::Global &global)
     {
         if (inDeadCode())
             return nullptr;
 
         MIRType type = global.varOrConstType().toMIRType();
 
         unsigned globalDataOffset;
@@ -3643,37 +3751,63 @@ CheckGlobalVariableInitImport(ModuleComp
     AsmJSCoercion coercion;
     ParseNode *coercedExpr;
     if (!CheckTypeAnnotation(m, initNode, &coercion, &coercedExpr))
         return false;
     return CheckGlobalVariableImportExpr(m, varName, coercion, coercedExpr, isConst);
 }
 
 static bool
-IsArrayViewCtorName(ModuleCompiler &m, PropertyName *name, Scalar::Type *type)
+IsArrayViewCtorName(ModuleCompiler &m, PropertyName *name, Scalar::Type *type, bool *shared)
 {
     JSAtomState &names = m.cx()->names();
-    if (name == names.Int8Array || name == names.SharedInt8Array)
+    *shared = false;
+    if (name == names.Int8Array) {
         *type = Scalar::Int8;
-    else if (name == names.Uint8Array || name == names.SharedUint8Array)
+    } else if (name == names.Uint8Array) {
         *type = Scalar::Uint8;
-    else if (name == names.Int16Array || name == names.SharedInt16Array)
+    } else if (name == names.Int16Array) {
         *type = Scalar::Int16;
-    else if (name == names.Uint16Array || name == names.SharedUint16Array)
+    } else if (name == names.Uint16Array) {
         *type = Scalar::Uint16;
-    else if (name == names.Int32Array || name == names.SharedInt32Array)
+    } else if (name == names.Int32Array) {
         *type = Scalar::Int32;
-    else if (name == names.Uint32Array || name == names.SharedUint32Array)
+    } else if (name == names.Uint32Array) {
         *type = Scalar::Uint32;
-    else if (name == names.Float32Array || name == names.SharedFloat32Array)
+    } else if (name == names.Float32Array) {
         *type = Scalar::Float32;
-    else if (name == names.Float64Array || name == names.SharedFloat64Array)
+    } else if (name == names.Float64Array) {
         *type = Scalar::Float64;
-    else
-        return false;
+    } else if (name == names.SharedInt8Array) {
+        *shared = true;
+        *type = Scalar::Int8;
+    } else if (name == names.SharedUint8Array) {
+        *shared = true;
+        *type = Scalar::Uint8;
+    } else if (name == names.SharedInt16Array) {
+        *shared = true;
+        *type = Scalar::Int16;
+    } else if (name == names.SharedUint16Array) {
+        *shared = true;
+        *type = Scalar::Uint16;
+    } else if (name == names.SharedInt32Array) {
+        *shared = true;
+        *type = Scalar::Int32;
+    } else if (name == names.SharedUint32Array) {
+        *shared = true;
+        *type = Scalar::Uint32;
+    } else if (name == names.SharedFloat32Array) {
+        *shared = true;
+        *type = Scalar::Float32;
+    } else if (name == names.SharedFloat64Array) {
+        *shared = true;
+        *type = Scalar::Float64;
+    } else {
+        return false;
+    }
     return true;
 }
 
 static bool
 CheckNewArrayViewArgs(ModuleCompiler &m, ParseNode *ctorExpr, PropertyName *bufferName)
 {
     ParseNode *bufArg = NextNode(ctorExpr);
     if (!bufArg || NextNode(bufArg) != nullptr)
@@ -3695,24 +3829,25 @@ CheckNewArrayView(ModuleCompiler &m, Pro
     PropertyName *bufferName = m.module().bufferArgumentName();
     if (!bufferName)
         return m.fail(newExpr, "cannot create array view without an asm.js heap parameter");
 
     ParseNode *ctorExpr = ListHead(newExpr);
 
     PropertyName *field;
     Scalar::Type type;
+    bool shared = false;
     if (ctorExpr->isKind(PNK_DOT)) {
         ParseNode *base = DotBase(ctorExpr);
 
         if (!IsUseOfName(base, globalName))
             return m.failName(base, "expecting '%s.*Array", globalName);
 
         field = DotMember(ctorExpr);
-        if (!IsArrayViewCtorName(m, field, &type))
+        if (!IsArrayViewCtorName(m, field, &type, &shared))
             return m.fail(ctorExpr, "could not match typed array name");
     } else {
         if (!ctorExpr->isKind(PNK_NAME))
             return m.fail(ctorExpr, "expecting name of imported array view constructor");
 
         PropertyName *globalName = ctorExpr->name();
         const ModuleCompiler::Global *global = m.lookupGlobal(globalName);
         if (!global)
@@ -3723,17 +3858,20 @@ CheckNewArrayView(ModuleCompiler &m, Pro
 
         field = nullptr;
         type = global->viewType();
     }
 
     if (!CheckNewArrayViewArgs(m, ctorExpr, bufferName))
         return false;
 
-    return m.addArrayView(varName, type, field);
+    if (!m.module().isValidViewSharedness(shared))
+        return m.failName(ctorExpr, "%s has different sharedness than previous view constructors", globalName);
+
+    return m.addArrayView(varName, type, field, shared);
 }
 
 static bool
 IsSimdTypeName(ModuleCompiler &m, PropertyName *name, AsmJSSimdType *type)
 {
     if (name == m.cx()->names().int32x4) {
         *type = AsmJSSimdType_int32x4;
         return true;
@@ -3777,16 +3915,28 @@ CheckGlobalMathImport(ModuleCompiler &m,
         return m.addMathBuiltinConstant(varName, mathBuiltin.u.cst, field);
       default:
         break;
     }
     MOZ_CRASH("unexpected or uninitialized math builtin type");
 }
 
 static bool
+CheckGlobalAtomicsImport(ModuleCompiler &m, ParseNode *initNode, PropertyName *varName,
+                         PropertyName *field)
+{
+    // Atomics builtin, with the form glob.Atomics.[[builtin]]
+    AsmJSAtomicsBuiltinFunction func;
+    if (!m.lookupStandardLibraryAtomicsName(field, &func))
+        return m.failName(initNode, "'%s' is not a standard Atomics builtin", field);
+
+    return m.addAtomicsBuiltinFunction(varName, func, field);
+}
+
+static bool
 CheckGlobalSimdImport(ModuleCompiler &m, ParseNode *initNode, PropertyName *varName,
                       PropertyName *field)
 {
     if (!m.supportsSimd())
         return m.fail(initNode, "SIMD is not supported on this platform");
 
     // SIMD constructor, with the form glob.SIMD.[[type]]
     AsmJSSimdType simdType;
@@ -3812,51 +3962,57 @@ CheckGlobalSimdOperationImport(ModuleCom
 static bool
 CheckGlobalDotImport(ModuleCompiler &m, PropertyName *varName, ParseNode *initNode)
 {
     ParseNode *base = DotBase(initNode);
     PropertyName *field = DotMember(initNode);
 
     if (base->isKind(PNK_DOT)) {
         ParseNode *global = DotBase(base);
-        PropertyName *mathOrSimd = DotMember(base);
+        PropertyName *mathOrAtomicsOrSimd = DotMember(base);
 
         PropertyName *globalName = m.module().globalArgumentName();
         if (!globalName)
             return m.fail(base, "import statement requires the module have a stdlib parameter");
 
         if (!IsUseOfName(global, globalName)) {
             if (global->isKind(PNK_DOT)) {
                 return m.failName(base, "imports can have at most two dot accesses "
                                         "(e.g. %s.Math.sin)", globalName);
             }
             return m.failName(base, "expecting %s.*", globalName);
         }
 
-        if (mathOrSimd == m.cx()->names().Math)
+        if (mathOrAtomicsOrSimd == m.cx()->names().Math)
             return CheckGlobalMathImport(m, initNode, varName, field);
-        if (mathOrSimd == m.cx()->names().SIMD)
+        if (mathOrAtomicsOrSimd == m.cx()->names().Atomics)
+            return CheckGlobalAtomicsImport(m, initNode, varName, field);
+        if (mathOrAtomicsOrSimd == m.cx()->names().SIMD)
             return CheckGlobalSimdImport(m, initNode, varName, field);
         return m.failName(base, "expecting %s.{Math|SIMD}", globalName);
     }
 
     if (!base->isKind(PNK_NAME))
         return m.fail(base, "expected name of variable or parameter");
 
     if (base->name() == m.module().globalArgumentName()) {
         if (field == m.cx()->names().NaN)
             return m.addGlobalConstant(varName, GenericNaN(), field);
         if (field == m.cx()->names().Infinity)
             return m.addGlobalConstant(varName, PositiveInfinity<double>(), field);
         if (field == m.cx()->names().byteLength)
             return m.addByteLength(varName);
 
         Scalar::Type type;
-        if (IsArrayViewCtorName(m, field, &type))
-            return m.addArrayViewCtor(varName, type, field);
+        bool shared = false;
+        if (IsArrayViewCtorName(m, field, &type, &shared)) {
+            if (!m.module().isValidViewSharedness(shared))
+                return m.failName(initNode, "'%s' has different sharedness than previous view constructors", field);
+            return m.addArrayViewCtor(varName, type, field, shared);
+        }
 
         return m.failName(initNode, "'%s' is not a standard constant or typed array name", field);
     }
 
     if (base->name() == m.module().importArgumentName())
         return m.addFFI(varName, field);
 
     const ModuleCompiler::Global *global = m.lookupGlobal(base->name());
@@ -4154,16 +4310,17 @@ CheckVarRef(FunctionCompiler &f, ParseNo
           case ModuleCompiler::Global::ConstantImport:
           case ModuleCompiler::Global::Variable:
             *def = f.loadGlobalVar(*global);
             *type = global->varOrConstType();
             break;
           case ModuleCompiler::Global::Function:
           case ModuleCompiler::Global::FFI:
           case ModuleCompiler::Global::MathBuiltinFunction:
+          case ModuleCompiler::Global::AtomicsBuiltinFunction:
           case ModuleCompiler::Global::FuncPtrTable:
           case ModuleCompiler::Global::ArrayView:
           case ModuleCompiler::Global::ArrayViewCtor:
           case ModuleCompiler::Global::SimdCtor:
           case ModuleCompiler::Global::SimdOperation:
           case ModuleCompiler::Global::ByteLength:
           case ModuleCompiler::Global::ChangeHeap:
             return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name);
@@ -4203,28 +4360,26 @@ FoldMaskedArrayIndex(FunctionCompiler &f
         *indexExpr = indexNode;
         return true;
     }
 
     return false;
 }
 
 static bool
-CheckArrayAccess(FunctionCompiler &f, ParseNode *elem, Scalar::Type *viewType,
-                 MDefinition **def, NeedsBoundsCheck *needsBoundsCheck)
-{
-    ParseNode *viewName = ElemBase(elem);
-    ParseNode *indexExpr = ElemIndex(elem);
+CheckArrayAccess(FunctionCompiler &f, ParseNode *viewName, ParseNode *indexExpr,
+                 Scalar::Type *viewType, MDefinition **def, NeedsBoundsCheck *needsBoundsCheck)
+{
     *needsBoundsCheck = NEEDS_BOUNDS_CHECK;
 
     if (!viewName->isKind(PNK_NAME))
         return f.fail(viewName, "base of array access must be a typed array view name");
 
     const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
-    if (!global || global->which() != ModuleCompiler::Global::ArrayView)
+    if (!global || !global->isAnyArrayView())
         return f.fail(viewName, "base of array access must be a typed array view name");
 
     *viewType = global->viewType();
 
     uint32_t index;
     if (IsLiteralOrConstInt(f, indexExpr, &index)) {
         uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
         if (byteOffset > INT32_MAX)
@@ -4311,17 +4466,17 @@ CheckArrayAccess(FunctionCompiler &f, Pa
 }
 
 static bool
 CheckLoadArray(FunctionCompiler &f, ParseNode *elem, MDefinition **def, Type *type)
 {
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckArrayAccess(f, elem, &viewType, &pointerDef, &needsBoundsCheck))
+    if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType, &pointerDef, &needsBoundsCheck))
         return false;
 
     *def = f.loadHeap(viewType, pointerDef, needsBoundsCheck);
     *type = TypedArrayLoadType(viewType);
     return true;
 }
 
 static bool
@@ -4366,17 +4521,17 @@ CheckDotAccess(FunctionCompiler &f, Pars
 }
 
 static bool
 CheckStoreArray(FunctionCompiler &f, ParseNode *lhs, ParseNode *rhs, MDefinition **def, Type *type)
 {
     Scalar::Type viewType;
     MDefinition *pointerDef;
     NeedsBoundsCheck needsBoundsCheck;
-    if (!CheckArrayAccess(f, lhs, &viewType, &pointerDef, &needsBoundsCheck))
+    if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType, &pointerDef, &needsBoundsCheck))
         return false;
 
     f.enterHeapExpression();
 
     MDefinition *rhsDef;
     Type rhsType;
     if (!CheckExpr(f, rhs, &rhsDef, &rhsType))
         return false;
@@ -4616,16 +4771,203 @@ CheckMathMinMax(FunctionCompiler &f, Par
 
         lastDef = f.minMax(lastDef, nextDef, firstType.toMIRType(), isMax);
     }
 
     *def = lastDef;
     return true;
 }
 
+static bool
+CheckSharedArrayAtomicAccess(FunctionCompiler &f, ParseNode *viewName, ParseNode *indexExpr,
+                             Scalar::Type *viewType, MDefinition** pointerDef,
+                             NeedsBoundsCheck *needsBoundsCheck)
+{
+    if (!CheckArrayAccess(f, viewName, indexExpr, viewType, pointerDef, needsBoundsCheck))
+        return false;
+
+    // Atomic accesses may be made on shared integer arrays only.
+
+    // The global will be sane, CheckArrayAccess checks it.
+    const ModuleCompiler::Global *global = f.lookupGlobal(viewName->name());
+    if (global->which() != ModuleCompiler::Global::ArrayView || !f.m().module().isSharedView())
+        return f.fail(viewName, "base of array access must be a shared typed array view name");
+
+    switch (*viewType) {
+      case Scalar::Int8:
+      case Scalar::Int16:
+      case Scalar::Int32:
+      case Scalar::Uint8:
+      case Scalar::Uint16:
+      case Scalar::Uint32:
+        return true;
+      default:
+        return f.failf(viewName, "not an integer array");
+    }
+
+    return true;
+}
+
+static bool
+CheckAtomicsFence(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
+{
+    if (CallArgListLength(call) != 0)
+        return f.fail(call, "Atomics.fence must be passed 0 arguments");
+
+    f.memoryBarrier(MembarFull);
+    *type = Type::Void;
+    return true;
+}
+
+static bool
+CheckAtomicsLoad(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
+{
+    if (CallArgListLength(call) != 2)
+        return f.fail(call, "Atomics.load must be passed 2 arguments");
+
+    ParseNode *arrayArg = CallArgList(call);
+    ParseNode *indexArg = NextNode(arrayArg);
+
+    Scalar::Type viewType;
+    MDefinition *pointerDef;
+    NeedsBoundsCheck needsBoundsCheck;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+        return false;
+
+    *def = f.atomicLoadHeap(viewType, pointerDef, needsBoundsCheck);
+    *type = Type::Signed;
+    return true;
+}
+
+static bool
+CheckAtomicsStore(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
+{
+    if (CallArgListLength(call) != 3)
+        return f.fail(call, "Atomics.store must be passed 3 arguments");
+
+    ParseNode *arrayArg = CallArgList(call);
+    ParseNode *indexArg = NextNode(arrayArg);
+    ParseNode *valueArg = NextNode(indexArg);
+
+    Scalar::Type viewType;
+    MDefinition *pointerDef;
+    NeedsBoundsCheck needsBoundsCheck;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+        return false;
+
+    MDefinition *rhsDef;
+    Type rhsType;
+    if (!CheckExpr(f, valueArg, &rhsDef, &rhsType))
+        return false;
+
+    if (!rhsType.isIntish())
+        return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
+
+    f.atomicStoreHeap(viewType, pointerDef, rhsDef, needsBoundsCheck);
+
+    *def = rhsDef;
+    *type = Type::Signed;
+    return true;
+}
+
+static bool
+CheckAtomicsBinop(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type, js::jit::AtomicOp op)
+{
+    if (CallArgListLength(call) != 3)
+        return f.fail(call, "Atomics binary operator must be passed 3 arguments");
+
+    ParseNode *arrayArg = CallArgList(call);
+    ParseNode *indexArg = NextNode(arrayArg);
+    ParseNode *valueArg = NextNode(indexArg);
+
+    Scalar::Type viewType;
+    MDefinition *pointerDef;
+    NeedsBoundsCheck needsBoundsCheck;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+        return false;
+
+    MDefinition *valueArgDef;
+    Type valueArgType;
+    if (!CheckExpr(f, valueArg, &valueArgDef, &valueArgType))
+        return false;
+
+    if (!valueArgType.isIntish())
+        return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
+
+    *def = f.atomicBinopHeap(op, viewType, pointerDef, valueArgDef, needsBoundsCheck);
+    *type = Type::Signed;
+    return true;
+}
+
+static bool
+CheckAtomicsCompareExchange(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
+{
+    if (CallArgListLength(call) != 4)
+        return f.fail(call, "Atomics.compareExchange must be passed 4 arguments");
+
+    ParseNode *arrayArg = CallArgList(call);
+    ParseNode *indexArg = NextNode(arrayArg);
+    ParseNode *oldValueArg = NextNode(indexArg);
+    ParseNode *newValueArg = NextNode(oldValueArg);
+
+    Scalar::Type viewType;
+    MDefinition *pointerDef;
+    NeedsBoundsCheck needsBoundsCheck;
+    if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType, &pointerDef, &needsBoundsCheck))
+        return false;
+
+    MDefinition *oldValueArgDef;
+    Type oldValueArgType;
+    if (!CheckExpr(f, oldValueArg, &oldValueArgDef, &oldValueArgType))
+        return false;
+
+    MDefinition *newValueArgDef;
+    Type newValueArgType;
+    if (!CheckExpr(f, newValueArg, &newValueArgDef, &newValueArgType))
+        return false;
+
+    if (!oldValueArgType.isIntish())
+        return f.failf(oldValueArg, "%s is not a subtype of intish", oldValueArgType.toChars());
+
+    if (!newValueArgType.isIntish())
+        return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
+
+    *def = f.atomicCompareExchangeHeap(viewType, pointerDef, oldValueArgDef, newValueArgDef, needsBoundsCheck);
+    *type = Type::Signed;
+    return true;
+}
+
+static bool
+CheckAtomicsBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSAtomicsBuiltinFunction func,
+                        MDefinition **resultDef, Type *resultType)
+{
+    switch (func) {
+      case AsmJSAtomicsBuiltin_compareExchange:
+        return CheckAtomicsCompareExchange(f, callNode, resultDef, resultType);
+      case AsmJSAtomicsBuiltin_load:
+        return CheckAtomicsLoad(f, callNode, resultDef, resultType);
+      case AsmJSAtomicsBuiltin_store:
+        return CheckAtomicsStore(f, callNode, resultDef, resultType);
+      case AsmJSAtomicsBuiltin_fence:
+        return CheckAtomicsFence(f, callNode, resultDef, resultType);
+      case AsmJSAtomicsBuiltin_add:
+        return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchAddOp);
+      case AsmJSAtomicsBuiltin_sub:
+        return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchSubOp);
+      case AsmJSAtomicsBuiltin_and:
+        return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchAndOp);
+      case AsmJSAtomicsBuiltin_or:
+        return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchOrOp);
+      case AsmJSAtomicsBuiltin_xor:
+        return CheckAtomicsBinop(f, callNode, resultDef, resultType, AtomicFetchXorOp);
+      default:
+        MOZ_CRASH("unexpected atomicsBuiltin function");
+    }
+}
+
 typedef bool (*CheckArgType)(FunctionCompiler &f, ParseNode *argNode, Type type);
 
 static bool
 CheckCallArgs(FunctionCompiler &f, ParseNode *callNode, CheckArgType checkArg,
               FunctionCompiler::Call *call)
 {
     f.startCallArgs(call);
 
@@ -5401,16 +5743,18 @@ static bool
 CheckUncoercedCall(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type)
 {
     MOZ_ASSERT(expr->isKind(PNK_CALL));
 
     const ModuleCompiler::Global *global;
     if (IsCallToGlobal(f.m(), expr, &global)) {
         if (global->isMathFunction())
             return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), def, type);
+        if (global->isAtomicsFunction())
+            return CheckAtomicsBuiltinCall(f, expr, global->atomicsBuiltinFunction(), def, type);
         if (global->isSimdCtor())
             return CheckSimdCtorCall(f, expr, global, def, type);
         if (global->isSimdOperation())
             return CheckSimdOperationCall(f, expr, global, def, type);
     }
 
     return f.fail(expr, "all function calls must either be calls to standard lib math functions, "
                         "ignored (via f(); or comma-expression), coerced to signed (via f()|0), "
@@ -5505,16 +5849,28 @@ CheckCoercedSimdCall(FunctionCompiler &f
             return false;
     }
 
     MOZ_ASSERT(type->isSimd());
     return CoerceResult(f, call, retType, *def, *type, def, type);
 }
 
 static bool
+CheckCoercedAtomicsBuiltinCall(FunctionCompiler &f, ParseNode *callNode,
+                               AsmJSAtomicsBuiltinFunction func, RetType retType,
+                               MDefinition **resultDef, Type *resultType)
+{
+    MDefinition *operand;
+    Type actualRetType;
+    if (!CheckAtomicsBuiltinCall(f, callNode, func, &operand, &actualRetType))
+        return false;
+    return CoerceResult(f, callNode, retType, operand, actualRetType, resultDef, resultType);
+}
+
+static bool
 CheckCoercedCall(FunctionCompiler &f, ParseNode *call, RetType retType, MDefinition **def, Type *type)
 {
     JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
 
     if (!f.canCall()) {
         return f.fail(call, "call expressions may not be nested inside heap expressions when "
                             "the module contains a change-heap function");
     }
@@ -5536,16 +5892,18 @@ CheckCoercedCall(FunctionCompiler &f, Pa
     PropertyName *calleeName = callee->name();
 
     if (const ModuleCompiler::Global *global = f.lookupGlobal(calleeName)) {
         switch (global->which()) {
           case ModuleCompiler::Global::FFI:
             return CheckFFICall(f, call, global->ffiIndex(), retType, def, type);
           case ModuleCompiler::Global::MathBuiltinFunction:
             return CheckCoercedMathBuiltinCall(f, call, global->mathBuiltinFunction(), retType, def, type);
+          case ModuleCompiler::Global::AtomicsBuiltinFunction:
+            return CheckCoercedAtomicsBuiltinCall(f, call, global->atomicsBuiltinFunction(), retType, def, type);
           case ModuleCompiler::Global::ConstantLiteral:
           case ModuleCompiler::Global::ConstantImport:
           case ModuleCompiler::Global::Variable:
           case ModuleCompiler::Global::FuncPtrTable:
           case ModuleCompiler::Global::ArrayView:
           case ModuleCompiler::Global::ArrayViewCtor:
           case ModuleCompiler::Global::ByteLength:
           case ModuleCompiler::Global::ChangeHeap:
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/asm.js/testAtomics.js
@@ -0,0 +1,253 @@
+if (!this.SharedArrayBuffer || !this.SharedInt32Array || !this.Atomics)
+    quit();
+
+function loadModule_int32(stdlib, foreign, heap) {
+    "use asm";
+
+    var atomic_fence = stdlib.Atomics.fence;
+    var atomic_load = stdlib.Atomics.load;
+    var atomic_store = stdlib.Atomics.store;
+    var atomic_cmpxchg = stdlib.Atomics.compareExchange;
+    var atomic_add = stdlib.Atomics.add;
+    var atomic_sub = stdlib.Atomics.sub;
+    var atomic_and = stdlib.Atomics.and;
+    var atomic_or = stdlib.Atomics.or;
+    var atomic_xor = stdlib.Atomics.xor;
+
+    var i32a = new stdlib.SharedInt32Array(heap);
+
+    function do_fence() {
+	atomic_fence();
+    }
+
+    // Load element 0
+    function do_load() {
+	var v = 0;
+	v = atomic_load(i32a, 0);
+	return v|0;
+    }
+
+    // Load element i
+    function do_load_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_load(i32a, i>>2);
+	return v|0;
+    }
+
+    // Store 37 in element 0
+    function do_store() {
+	var v = 0;
+	v = atomic_store(i32a, 0, 37);
+	return v|0;
+    }
+
+    // Store 37 in element i
+    function do_store_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_store(i32a, i>>2, 37);
+	return v|0;
+    }
+
+    // Add 37 to element 10
+    function do_add() {
+	var v = 0;
+	v = atomic_add(i32a, 10, 37);
+	return v|0;
+    }
+
+    // Add 37 to element i
+    function do_add_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_add(i32a, i>>2, 37);
+	return v|0;
+    }
+
+    // Subtract 148 from element 20
+    function do_sub() {
+	var v = 0;
+	v = atomic_sub(i32a, 20, 148);
+	return v|0;
+    }
+
+    // Subtract 148 from element i
+    function do_sub_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_sub(i32a, i>>2, 148);
+	return v|0;
+    }
+
+    // AND 0x33333333 into element 30
+    function do_and() {
+	var v = 0;
+	v = atomic_and(i32a, 30, 0x33333333);
+	return v|0;
+    }
+
+    // AND 0x33333333 into element i
+    function do_and_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_and(i32a, i>>2, 0x33333333);
+	return v|0;
+    }
+
+    // OR 0x33333333 into element 40
+    function do_or() {
+	var v = 0;
+	v = atomic_or(i32a, 40, 0x33333333);
+	return v|0;
+    }
+
+    // OR 0x33333333 into element i
+    function do_or_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_or(i32a, i>>2, 0x33333333);
+	return v|0;
+    }
+
+    // XOR 0x33333333 into element 50
+    function do_xor() {
+	var v = 0;
+	v = atomic_xor(i32a, 50, 0x33333333);
+	return v|0;
+    }
+
+    // XOR 0x33333333 into element i
+    function do_xor_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_xor(i32a, i>>2, 0x33333333);
+	return v|0;
+    }
+
+    // CAS element 100: 0 -> -1
+    function do_cas1() {
+	var v = 0;
+	v = atomic_cmpxchg(i32a, 100, 0, -1);
+	return v|0;
+    }
+
+    // CAS element 100: -1 -> 0x5A5A5A5A
+    function do_cas2() {
+	var v = 0;
+	v = atomic_cmpxchg(i32a, 100, -1, 0x5A5A5A5A);
+	return v|0;
+    }
+
+    // CAS element i: 0 -> -1
+    function do_cas1_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_cmpxchg(i32a, i>>2, 0, -1);
+	return v|0;
+    }
+
+    // CAS element i: -1 -> 0x5A5A5A5A
+    function do_cas2_i(i) {
+	i = i|0;
+	var v = 0;
+	v = atomic_cmpxchg(i32a, i>>2, -1, 0x5A5A5A5A);
+	return v|0;
+    }
+
+    return { fence: do_fence,
+	     load: do_load,
+	     load_i: do_load_i,
+	     store: do_store,
+	     store_i: do_store_i,
+	     add: do_add,
+	     add_i: do_add_i,
+	     sub: do_sub,
+	     sub_i: do_sub_i,
+	     and: do_and,
+	     and_i: do_and_i,
+	     or: do_or,
+	     or_i: do_or_i,
+	     xor: do_xor,
+	     xor_i: do_xor_i,
+	     cas1: do_cas1,
+	     cas2: do_cas2,
+	     cas1_i: do_cas1_i,
+	     cas2_i: do_cas2_i };
+}
+
+// TODO: byte arrays
+// TODO: halfword arrays
+// TODO: signed vs unsigned; negative results
+
+var heap = new SharedArrayBuffer(65536);
+var i32a = new SharedInt32Array(heap);
+var module = loadModule_int32(this, {}, heap);
+
+var size = 4;
+
+module.fence();
+
+i32a[0] = 12345;
+assertEq(module.load(), 12345);
+assertEq(module.load_i(size*0), 12345);
+
+assertEq(module.store(), 37);
+assertEq(i32a[0], 37);
+assertEq(module.store_i(size*0), 37);
+
+i32a[10] = 18;
+assertEq(module.add(), 18);
+assertEq(i32a[10], 18+37);
+assertEq(module.add_i(size*10), 18+37);
+assertEq(i32a[10], 18+37+37);
+
+i32a[20] = 4972;
+assertEq(module.sub(), 4972);
+assertEq(i32a[20], 4972 - 148);
+assertEq(module.sub_i(size*20), 4972 - 148);
+assertEq(i32a[20], 4972 - 148 - 148);
+
+i32a[30] = 0x66666666;
+assertEq(module.and(), 0x66666666);
+assertEq(i32a[30], 0x22222222);
+i32a[30] = 0x66666666;
+assertEq(module.and_i(size*30), 0x66666666);
+assertEq(i32a[30], 0x22222222);
+
+i32a[40] = 0x22222222;
+assertEq(module.or(), 0x22222222);
+assertEq(i32a[40], 0x33333333);
+i32a[40] = 0x22222222;
+assertEq(module.or_i(size*40), 0x22222222);
+assertEq(i32a[40], 0x33333333);
+
+i32a[50] = 0x22222222;
+assertEq(module.xor(), 0x22222222);
+assertEq(i32a[50], 0x11111111);
+i32a[50] = 0x22222222;
+assertEq(module.xor_i(size*50), 0x22222222);
+assertEq(i32a[50], 0x11111111);
+
+i32a[100] = 0;
+assertEq(module.cas1(), 0);
+assertEq(module.cas2(), -1);
+assertEq(i32a[100], 0x5A5A5A5A);
+
+i32a[100] = 0;
+assertEq(module.cas1_i(size*100), 0);
+assertEq(module.cas2_i(size*100), -1);
+assertEq(i32a[100], 0x5A5A5A5A);
+
+// Out-of-bounds accesses.
+
+assertEq(module.cas1_i(size*20000), 0);
+assertEq(module.cas2_i(size*20000), 0);
+
+assertEq(module.or_i(size*20001), 0);
+assertEq(module.xor_i(size*20001), 0);
+assertEq(module.and_i(size*20001), 0);
+assertEq(module.add_i(size*20001), 0);
+assertEq(module.sub_i(size*20001), 0);
+
+print("Done");
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef jit_LIR_Common_h
 #define jit_LIR_Common_h
 
+#include "jit/AtomicOp.h"
 #include "jit/shared/Assembler-shared.h"
 
 // This file declares LIR instructions that are common to every platform.
 
 namespace js {
 namespace jit {
 
 class Range;
@@ -6585,16 +6586,70 @@ class LAsmJSStoreHeap : public LInstruct
     const LAllocation *ptr() {
         return getOperand(0);
     }
     const LAllocation *value() {
         return getOperand(1);
     }
 };
 
+class LAsmJSCompareExchangeHeap : public LInstructionHelper<1, 3, 0>
+{
+  public:
+    LIR_HEADER(AsmJSCompareExchangeHeap);
+
+    LAsmJSCompareExchangeHeap(const LAllocation &ptr, const LAllocation &oldValue,
+                              const LAllocation &newValue)
+    {
+        setOperand(0, ptr);
+        setOperand(1, oldValue);
+        setOperand(2, newValue);
+    }
+
+    const LAllocation *ptr() {
+        return getOperand(0);
+    }
+    const LAllocation *oldValue() {
+        return getOperand(1);
+    }
+    const LAllocation *newValue() {
+        return getOperand(2);
+    }
+
+    MAsmJSCompareExchangeHeap *mir() const {
+        return mir_->toAsmJSCompareExchangeHeap();
+    }
+};
+
+class LAsmJSAtomicBinopHeap : public LInstructionHelper<1, 2, 1>
+{
+  public:
+    LIR_HEADER(AsmJSAtomicBinopHeap);
+    LAsmJSAtomicBinopHeap(const LAllocation &ptr, const LAllocation &value,
+                          const LDefinition &temp)
+    {
+        setOperand(0, ptr);
+        setOperand(1, value);
+        setTemp(0, temp);
+    }
+    const LAllocation *ptr() {
+        return getOperand(0);
+    }
+    const LAllocation *value() {
+        return getOperand(1);
+    }
+    const LDefinition *temp() {
+        return getTemp(0);
+    }
+
+    MAsmJSAtomicBinopHeap *mir() const {
+        return mir_->toAsmJSAtomicBinopHeap();
+    }
+};
+
 class LAsmJSLoadGlobalVar : public LInstructionHelper<1, 0, 0>
 {
   public:
     LIR_HEADER(AsmJSLoadGlobalVar);
     MAsmJSLoadGlobalVar *mir() const {
         return mir_->toAsmJSLoadGlobalVar();
     }
 };
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -328,16 +328,18 @@
     _(AsmJSLoadGlobalVar)           \
     _(AsmJSStoreGlobalVar)          \
     _(AsmJSLoadFFIFunc)             \
     _(AsmJSParameter)               \
     _(AsmJSReturn)                  \
     _(AsmJSVoidReturn)              \
     _(AsmJSPassStackArg)            \
     _(AsmJSCall)                    \
+    _(AsmJSCompareExchangeHeap)     \
+    _(AsmJSAtomicBinopHeap)         \
     _(InterruptCheckPar)            \
     _(RecompileCheck)               \
     _(MemoryBarrier)                \
     _(AssertRangeI)                 \
     _(AssertRangeD)                 \
     _(AssertRangeF)                 \
     _(AssertRangeV)                 \
     _(LexicalCheck)                 \
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -12102,61 +12102,152 @@ class MAsmJSHeapAccess
 
     Scalar::Type viewType() const { return viewType_; }
     bool needsBoundsCheck() const { return needsBoundsCheck_; }
     void removeBoundsCheck() { needsBoundsCheck_ = false; }
 };
 
 class MAsmJSLoadHeap : public MUnaryInstruction, public MAsmJSHeapAccess
 {
-    MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck)
-      : MUnaryInstruction(ptr), MAsmJSHeapAccess(vt, needsBoundsCheck)
-    {
-        setMovable();
+    MemoryBarrierBits barrierBefore_;
+    MemoryBarrierBits barrierAfter_;
+
+    MAsmJSLoadHeap(Scalar::Type vt, MDefinition *ptr, bool needsBoundsCheck,
+                   MemoryBarrierBits before, MemoryBarrierBits after)
+      : MUnaryInstruction(ptr),
+        MAsmJSHeapAccess(vt, needsBoundsCheck),
+        barrierBefore_(before),
+        barrierAfter_(after)
+    {
+        if (before|after)
+            setGuard();         // Not removable
+        else
+            setMovable();
         if (vt == Scalar::Float32)
             setResultType(MIRType_Float32);
         else if (vt == Scalar::Float64)
             setResultType(MIRType_Double);
         else
             setResultType(MIRType_Int32);
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap);
 
     static MAsmJSLoadHeap *New(TempAllocator &alloc, Scalar::Type vt,
-                               MDefinition *ptr, bool needsBoundsCheck)
-    {
-        return new(alloc) MAsmJSLoadHeap(vt, ptr, needsBoundsCheck);
+                               MDefinition *ptr, bool needsBoundsCheck,
+                               MemoryBarrierBits barrierBefore = MembarNobits,
+                               MemoryBarrierBits barrierAfter = MembarNobits)
+    {
+        return new(alloc) MAsmJSLoadHeap(vt, ptr, needsBoundsCheck, barrierBefore, barrierAfter);
     }
 
     MDefinition *ptr() const { return getOperand(0); }
+    MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
+    MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
 
     bool congruentTo(const MDefinition *ins) const;
     AliasSet getAliasSet() const {
         return AliasSet::Load(AliasSet::AsmJSHeap);
     }
     bool mightAlias(const MDefinition *def) const;
 };
 
 class MAsmJSStoreHeap : public MBinaryInstruction, public MAsmJSHeapAccess
 {
-    MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
-      : MBinaryInstruction(ptr, v) , MAsmJSHeapAccess(vt, needsBoundsCheck)
-    {}
+    MemoryBarrierBits barrierBefore_;
+    MemoryBarrierBits barrierAfter_;
+
+    MAsmJSStoreHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
+                    MemoryBarrierBits before, MemoryBarrierBits after)
+      : MBinaryInstruction(ptr, v),
+        MAsmJSHeapAccess(vt, needsBoundsCheck),
+        barrierBefore_(before),
+        barrierAfter_(after)
+    {
+        if (before|after)
+            setGuard();         // Not removable
+    }
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap);
 
     static MAsmJSStoreHeap *New(TempAllocator &alloc, Scalar::Type vt,
-                                MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
-    {
-        return new(alloc) MAsmJSStoreHeap(vt, ptr, v, needsBoundsCheck);
-    }
-
+                                MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
+                                MemoryBarrierBits barrierBefore = MembarNobits,
+                                MemoryBarrierBits barrierAfter = MembarNobits)
+    {
+        return new(alloc) MAsmJSStoreHeap(vt, ptr, v, needsBoundsCheck,
+                                          barrierBefore, barrierAfter);
+    }
+
+    MDefinition *ptr() const { return getOperand(0); }
+    MDefinition *value() const { return getOperand(1); }
+    MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
+    MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
+
+    AliasSet getAliasSet() const {
+        return AliasSet::Store(AliasSet::AsmJSHeap);
+    }
+};
+
+class MAsmJSCompareExchangeHeap : public MTernaryInstruction, public MAsmJSHeapAccess
+{
+    MAsmJSCompareExchangeHeap(Scalar::Type vt, MDefinition *ptr, MDefinition *oldv, MDefinition *newv,
+                              bool needsBoundsCheck)
+        : MTernaryInstruction(ptr, oldv, newv),
+          MAsmJSHeapAccess(vt, needsBoundsCheck)
+    {
+        setGuard();             // Not removable
+        setResultType(MIRType_Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(AsmJSCompareExchangeHeap);
+
+    static MAsmJSCompareExchangeHeap *New(TempAllocator &alloc, Scalar::Type vt,
+                                          MDefinition *ptr, MDefinition *oldv,
+                                          MDefinition *newv, bool needsBoundsCheck)
+    {
+        return new(alloc) MAsmJSCompareExchangeHeap(vt, ptr, oldv, newv, needsBoundsCheck);
+    }
+
+    MDefinition *ptr() const { return getOperand(0); }
+    MDefinition *oldValue() const { return getOperand(1); }
+    MDefinition *newValue() const { return getOperand(2); }
+
+    AliasSet getAliasSet() const {
+        return AliasSet::Store(AliasSet::AsmJSHeap);
+    }
+};
+
+class MAsmJSAtomicBinopHeap : public MBinaryInstruction, public MAsmJSHeapAccess
+{
+    AtomicOp op_;
+
+    MAsmJSAtomicBinopHeap(AtomicOp op, Scalar::Type vt, MDefinition *ptr, MDefinition *v,
+                          bool needsBoundsCheck)
+        : MBinaryInstruction(ptr, v),
+          MAsmJSHeapAccess(vt, needsBoundsCheck),
+          op_(op)
+    {
+        setGuard();         // Not removable
+        setResultType(MIRType_Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(AsmJSAtomicBinopHeap);
+
+    static MAsmJSAtomicBinopHeap *New(TempAllocator &alloc, AtomicOp op, Scalar::Type vt,
+                                      MDefinition *ptr, MDefinition *v, bool needsBoundsCheck)
+    {
+        return new(alloc) MAsmJSAtomicBinopHeap(op, vt, ptr, v, needsBoundsCheck);
+    }
+
+    AtomicOp operation() const { return op_; }
     MDefinition *ptr() const { return getOperand(0); }
     MDefinition *value() const { return getOperand(1); }
 
     AliasSet getAliasSet() const {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -256,16 +256,18 @@ namespace jit {
     _(LambdaPar)                                                            \
     _(RestPar)                                                              \
     _(ForkJoinContext)                                                      \
     _(ForkJoinGetSlice)                                                     \
     _(GuardThreadExclusive)                                                 \
     _(InterruptCheckPar)                                                    \
     _(RecompileCheck)                                                       \
     _(MemoryBarrier)                                                        \
+    _(AsmJSCompareExchangeHeap)                                             \
+    _(AsmJSAtomicBinopHeap)                                                 \
     _(UnknownValue)                                                         \
     _(LexicalCheck)                                                         \
     _(ThrowUninitializedLexical)                                            \
     _(Debugger)
 
 // Forward declarations of MIR types.
 #define FORWARD_DECLARE(op) class M##op;
  MIR_OPCODE_LIST(FORWARD_DECLARE)
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ b/js/src/jit/ParallelSafetyAnalysis.cpp
@@ -353,16 +353,18 @@ class ParallelSafetyVisitor : public MDe
     UNSAFE_OP(AsmJSVoidReturn)
     UNSAFE_OP(AsmJSPassStackArg)
     UNSAFE_OP(AsmJSParameter)
     UNSAFE_OP(AsmJSCall)
     DROP_OP(RecompileCheck)
     UNSAFE_OP(CompareExchangeTypedArrayElement)
     UNSAFE_OP(AtomicTypedArrayElementBinop)
     UNSAFE_OP(MemoryBarrier)
+    UNSAFE_OP(AsmJSCompareExchangeHeap)
+    UNSAFE_OP(AsmJSAtomicBinopHeap)
     UNSAFE_OP(UnknownValue)
     UNSAFE_OP(LexicalCheck)
     UNSAFE_OP(ThrowUninitializedLexical)
     UNSAFE_OP(Debugger)
 
     // It looks like these could easily be made safe:
     UNSAFE_OP(ConvertElementsToDoubles)
     UNSAFE_OP(MaybeCopyElementsForWrite)
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2011,16 +2011,28 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA
         masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
                               ToRegister(ins->value()), Offset, Assembler::Below);
     }
     masm.append(AsmJSHeapAccess(bo.getOffset()));
     return true;
 }
 
 bool
+CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
+{
+    MOZ_CRASH("NYI");
+}
+
+bool
+CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
+{
+    MOZ_CRASH("NYI");
+}
+
+bool
 CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
 {
     const MAsmJSPassStackArg *mir = ins->mir();
     Operand dst(StackPointer, mir->spOffset());
     if (ins->arg()->isConstant()) {
         //masm.as_bkpt();
         masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
     } else {
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -201,16 +201,18 @@ class CodeGeneratorARM : public CodeGene
     bool visitNegI(LNegI *lir);
     bool visitNegD(LNegD *lir);
     bool visitNegF(LNegF *lir);
     bool visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins);
     bool visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins);
     bool visitAsmJSCall(LAsmJSCall *ins);
     bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
     bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
+    bool visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
+    bool visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
     bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
     bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
     bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
     bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
     bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
 
     bool visitForkJoinGetSlice(LForkJoinGetSlice *ins);
 
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -631,8 +631,20 @@ LIRGeneratorARM::visitCompareExchangeTyp
     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
         tempDef = temp();
 
     LCompareExchangeTypedArrayElement *lir =
         new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
 
     return define(lir, ins);
 }
+
+bool
+LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins)
+{
+    MOZ_CRASH("NYI");
+}
+
+bool
+LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins)
+{
+    MOZ_CRASH("NYI");
+}
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -96,16 +96,18 @@ class LIRGeneratorARM : public LIRGenera
     bool lowerPhi(MPhi *phi);
     bool visitGuardShape(MGuardShape *ins);
     bool visitGuardObjectType(MGuardObjectType *ins);
     bool visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins);
     bool visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins);
     bool visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
+    bool visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
+    bool visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
     bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
     bool visitForkJoinGetSlice(MForkJoinGetSlice *ins);
     bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
     bool visitSimdSplatX4(MSimdSplatX4 *ins);
     bool visitSimdValueX4(MSimdValueX4 *ins);
     bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
     bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
 };
--- a/js/src/jit/none/Lowering-none.h
+++ b/js/src/jit/none/Lowering-none.h
@@ -75,16 +75,18 @@ class LIRGeneratorNone : public LIRGener
     bool visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins) { MOZ_CRASH(); }
     bool visitAsmJSLoadHeap(MAsmJSLoadHeap *ins) { MOZ_CRASH(); }
     bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins) { MOZ_CRASH(); }
     bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins) { MOZ_CRASH(); }
     bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins) { MOZ_CRASH(); }
     bool visitForkJoinGetSlice(MForkJoinGetSlice *ins) { MOZ_CRASH(); }
     bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins) { MOZ_CRASH(); }
     bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins) { MOZ_CRASH(); }
+    bool visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins) { MOZ_CRASH(); }
+    bool visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins) { MOZ_CRASH(); }
 
     LTableSwitch *newLTableSwitch(LAllocation, LDefinition, MTableSwitch *) { MOZ_CRASH(); }
     LTableSwitchV *newLTableSwitchV(MTableSwitch *) { MOZ_CRASH(); }
     bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins) { MOZ_CRASH(); }
     bool visitSimdSplatX4(MSimdSplatX4 *ins) { MOZ_CRASH(); }
     bool visitSimdValueX4(MSimdValueX4 *lir) { MOZ_CRASH(); }
 };
 
--- a/js/src/jit/shared/Assembler-x86-shared.h
+++ b/js/src/jit/shared/Assembler-x86-shared.h
@@ -1067,16 +1067,19 @@ class AssemblerX86Shared : public Assemb
             MOZ_CRASH("unexpected operand kind");
             break;
         }
     }
 
     void addl(Imm32 imm, Register dest) {
         masm.addl_ir(imm.value, dest.code());
     }
+    void addl_wide(Imm32 imm, Register dest) {
+        masm.addl_ir_wide(imm.value, dest.code());
+    }
     void addl(Imm32 imm, const Operand &op) {
         switch (op.kind()) {
           case Operand::REG:
             masm.addl_ir(imm.value, op.reg());
             break;
           case Operand::MEM_REG_DISP:
             masm.addl_im(imm.value, op.disp(), op.base());
             break;
--- a/js/src/jit/shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/shared/BaseAssembler-x86-shared.h
@@ -605,16 +605,23 @@ public:
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
             m_formatter.immediate8(imm);
         } else {
             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
             m_formatter.immediate32(imm);
         }
     }
+    void addl_ir_wide(int imm, RegisterID dst)
+    {
+        // 32-bit immediate always, for patching.
+        spew("addl       $0x%x, %s", imm, nameIReg(4,dst));
+        m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+        m_formatter.immediate32(imm);
+    }
 
     void addl_im(int imm, int offset, RegisterID base)
     {
         spew("addl       $%d, %s0x%x(%s)",
              imm, PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         if (CAN_SIGN_EXTEND_8_32(imm)) {
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
             m_formatter.immediate8(imm);
--- a/js/src/jit/shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/shared/Lowering-x86-shared.cpp
@@ -484,16 +484,139 @@ LIRGeneratorX86Shared::visitAtomicTypedA
 
     LAtomicTypedArrayElementBinop *lir =
         new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
 
     return fixedOutput ? defineFixed(lir, ins, LAllocation(AnyRegister(eax))) : define(lir, ins);
 }
 
 bool
+LIRGeneratorX86Shared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins)
+{
+    MDefinition *ptr = ins->ptr();
+    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+
+    bool byteArray = false;
+    switch (ins->viewType()) {
+      case Scalar::Int8:
+      case Scalar::Uint8:
+        byteArray = true;
+        break;
+      case Scalar::Int16:
+      case Scalar::Uint16:
+      case Scalar::Int32:
+      case Scalar::Uint32:
+        break;
+      default:
+        MOZ_CRASH("Unexpected array type");
+    }
+
+    // Register allocation:
+    //
+    // The output must be eax.
+    //
+    // oldval must be in a register (it'll eventually end up in eax so
+    // ideally it's there to begin with).
+    //
+    // newval will need to be in a register.  If the source is a byte
+    // array then the newval must be a register that has a byte size:
+    // ebx, ecx, or edx, since eax is taken for the output in this
+    // case.  We pick ebx but it would be more flexible to pick any of
+    // the three that wasn't being used.
+    //
+    // Bug #1077036 describes some optimization opportunities.
+
+    const LAllocation newval = byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
+    const LAllocation oldval = useRegister(ins->oldValue());
+
+    LAsmJSCompareExchangeHeap *lir =
+        new(alloc()) LAsmJSCompareExchangeHeap(useRegister(ptr), oldval, newval);
+
+    return defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+bool
+LIRGeneratorX86Shared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins)
+{
+    MDefinition *ptr = ins->ptr();
+    MOZ_ASSERT(ptr->type() == MIRType_Int32);
+
+    bool byteArray = false;
+    switch (ins->viewType()) {
+      case Scalar::Int8:
+      case Scalar::Uint8:
+        byteArray = true;
+        break;
+      case Scalar::Int16:
+      case Scalar::Uint16:
+      case Scalar::Int32:
+      case Scalar::Uint32:
+        break;
+      default:
+        MOZ_CRASH("Unexpected array type");
+    }
+
+    // Register allocation:
+    //
+    // For ADD and SUB we'll use XADD:
+    //
+    //    movl       value, output
+    //    lock xaddl output, mem
+    //
+    // For the 8-bit variants XADD needs a byte register for the
+    // output only, we can still set up with movl; just pin the output
+    // to eax (or ebx / ecx / edx).
+    //
+    // For AND/OR/XOR we need to use a CMPXCHG loop:
+    //
+    //    movl          *mem, eax
+    // L: mov           eax, temp
+    //    andl          value, temp
+    //    lock cmpxchg  temp, mem  ; reads eax also
+    //    jnz           L
+    //    ; result in eax
+    //
+    // Note the placement of L, cmpxchg will update eax with *mem if
+    // *mem does not have the expected value, so reloading it at the
+    // top of the loop is redundant.
+    //
+    // We want to fix eax as the output.  We also need a temp for
+    // the intermediate value.
+    //
+    // For the 8-bit variants the temp must have a byte register.
+    //
+    // There are optimization opportunities:
+    //  - when the result is unused, Bug #1077014.
+    //  - better register allocation and instruction selection, Bug #1077036.
+
+    bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+    LDefinition tempDef = LDefinition::BogusTemp();
+    LAllocation value;
+
+    // Optimization opportunity: "value" need not be pinned to something that
+    // has a byte register unless the back-end insists on using a byte move
+    // for the setup or the payload computation, which really it need not do.
+
+    if (byteArray) {
+        value = useFixed(ins->value(), ebx);
+        if (bitOp)
+            tempDef = tempFixed(ecx);
+    } else {
+        value = useRegister(ins->value());
+        if (bitOp)
+            tempDef = temp();
+    }
+
+    LAsmJSAtomicBinopHeap *lir =
+        new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr), value, tempDef);
+
+    return defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+bool
 LIRGeneratorX86Shared::visitSimdTernaryBitwise(MSimdTernaryBitwise *ins)
 {
     MOZ_ASSERT(IsSimdType(ins->type()));
 
     if (ins->type() == MIRType_Int32x4 || ins->type() == MIRType_Float32x4) {
         LSimdSelect *lins = new(alloc()) LSimdSelect;
 
         // This must be useRegisterAtStart() because it is destroyed.
--- a/js/src/jit/shared/Lowering-x86-shared.h
+++ b/js/src/jit/shared/Lowering-x86-shared.h
@@ -52,14 +52,16 @@ class LIRGeneratorX86Shared : public LIR
     bool lowerTruncateDToInt32(MTruncateToInt32 *ins);
     bool lowerTruncateFToInt32(MTruncateToInt32 *ins);
     bool visitForkJoinGetSlice(MForkJoinGetSlice *ins);
     bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
     bool visitSimdSplatX4(MSimdSplatX4 *ins);
     bool visitSimdValueX4(MSimdValueX4 *ins);
     bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
     bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
+    bool visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap *ins);
+    bool visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap *ins);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_shared_Lowering_x86_shared_h */
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -253,16 +253,23 @@ CodeGeneratorX64::visitAsmJSCall(LAsmJSC
     masm.branchPtr(Assembler::Equal, HeapReg, scratch, &ok);
     masm.breakpoint();
     masm.bind(&ok);
 #endif
 
     return true;
 }
 
+void
+CodeGeneratorX64::memoryBarrier(MemoryBarrierBits barrier)
+{
+    if (barrier & MembarStoreLoad)
+        masm.storeLoadFence();
+}
+
 bool
 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
 {
     MAsmJSLoadHeap *mir = ins->mir();
     Scalar::Type vt = mir->viewType();
     const LAllocation *ptr = ins->ptr();
     const LDefinition *out = ins->output();
     Operand srcAddr(HeapReg);
@@ -270,16 +277,17 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
     if (ptr->isConstant()) {
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         srcAddr = Operand(HeapReg, ptrImm);
     } else {
         srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
     }
 
+    memoryBarrier(ins->mir()->barrierBefore());
     OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
     uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
         bool isFloat32Load = vt == Scalar::Float32;
         ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
         if (!addOutOfLineCode(ool, ins->mir()))
             return false;
 
@@ -298,16 +306,17 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAs
       case Scalar::Uint32:  masm.movl(srcAddr, ToRegister(out)); break;
       case Scalar::Float32: masm.loadFloat32(srcAddr, ToFloatRegister(out)); break;
       case Scalar::Float64: masm.loadDouble(srcAddr, ToFloatRegister(out)); break;
       default: MOZ_CRASH("unexpected array type");
     }
     uint32_t after = masm.size();
     if (ool)
         masm.bind(ool->rejoin());
+    memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
     return true;
 }
 
 bool
 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
 {
     MAsmJSStoreHeap *mir = ins->mir();
@@ -318,16 +327,17 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
     if (ptr->isConstant()) {
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         dstAddr = Operand(HeapReg, ptrImm);
     } else {
         dstAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
     }
 
+    memoryBarrier(ins->mir()->barrierBefore());
     Label rejoin;
     uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
     if (mir->needsBoundsCheck()) {
         CodeOffsetLabel cmp = masm.cmplWithPatch(ToRegister(ptr), Imm32(0));
         masm.j(Assembler::AboveOrEqual, &rejoin);
         maybeCmpOffset = cmp.offset();
     }
 
@@ -353,21 +363,109 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LA
           case Scalar::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
           case Scalar::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
           default: MOZ_CRASH("unexpected array type");
         }
     }
     uint32_t after = masm.size();
     if (rejoin.used())
         masm.bind(&rejoin);
+    memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
     return true;
 }
 
 bool
+CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
+{
+    MAsmJSCompareExchangeHeap *mir = ins->mir();
+    Scalar::Type vt = mir->viewType();
+    const LAllocation *ptr = ins->ptr();
+
+    MOZ_ASSERT(ptr->isRegister());
+    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
+
+    Register oldval = ToRegister(ins->oldValue());
+    Register newval = ToRegister(ins->newValue());
+
+    Label rejoin;
+    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    MOZ_ASSERT(mir->needsBoundsCheck());
+    {
+        maybeCmpOffset = masm.cmplWithPatch(ToRegister(ptr), Imm32(0)).offset();
+        Label goahead;
+        masm.j(Assembler::LessThan, &goahead);
+        memoryBarrier(MembarFull);
+        Register out = ToRegister(ins->output());
+        masm.xorl(out,out);
+        masm.jmp(&rejoin);
+        masm.bind(&goahead);
+    }
+    masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+                                        srcAddr,
+                                        oldval,
+                                        newval,
+                                        InvalidReg,
+                                        ToAnyRegister(ins->output()));
+    uint32_t after = masm.size();
+    if (rejoin.used())
+        masm.bind(&rejoin);
+    masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
+    return true;
+}
+
+bool
+CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
+{
+    MAsmJSAtomicBinopHeap *mir = ins->mir();
+    Scalar::Type vt = mir->viewType();
+    const LAllocation *ptr = ins->ptr();
+    Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+    const LAllocation* value = ins->value();
+    AtomicOp op = mir->operation();
+
+    MOZ_ASSERT(ptr->isRegister());
+    BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
+
+    Label rejoin;
+    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+    MOZ_ASSERT(mir->needsBoundsCheck());
+    {
+        maybeCmpOffset = masm.cmplWithPatch(ToRegister(ptr), Imm32(0)).offset();
+        Label goahead;
+        masm.j(Assembler::LessThan, &goahead);
+        memoryBarrier(MembarFull);
+        Register out = ToRegister(ins->output());
+        masm.xorl(out,out);
+        masm.jmp(&rejoin);
+        masm.bind(&goahead);
+    }
+    if (value->isConstant()) {
+        masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+                                        Imm32(ToInt32(value)),
+                                        srcAddr,
+                                        temp,
+                                        InvalidReg,
+                                        ToAnyRegister(ins->output()));
+    } else {
+        masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+                                        ToRegister(value),
+                                        srcAddr,
+                                        temp,
+                                        InvalidReg,
+                                        ToAnyRegister(ins->output()));
+    }
+    uint32_t after = masm.size();
+    if (rejoin.used())
+        masm.bind(&rejoin);
+    masm.append(AsmJSHeapAccess(after, after, maybeCmpOffset));
+    return true;
+}
+
+bool
 CodeGeneratorX64::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
 {
     MAsmJSLoadGlobalVar *mir = ins->mir();
 
     MIRType type = mir->type();
     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
 
     CodeOffsetLabel label;
--- a/js/src/jit/x64/CodeGenerator-x64.h
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -20,16 +20,17 @@ class CodeGeneratorX64 : public CodeGene
 
   protected:
     ValueOperand ToValue(LInstruction *ins, size_t pos);
     ValueOperand ToOutValue(LInstruction *ins);
     ValueOperand ToTempValue(LInstruction *ins, size_t pos);
 
     void storeUnboxedValue(const LAllocation *value, MIRType valueType,
                            Operand dest, MIRType slotType);
+    void memoryBarrier(MemoryBarrierBits barrier);
 
   public:
     CodeGeneratorX64(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
 
   public:
     bool visitValue(LValue *value);
     bool visitBox(LBox *box);
     bool visitUnbox(LUnbox *unbox);
@@ -39,16 +40,18 @@ class CodeGeneratorX64 : public CodeGene
     bool visitCompareVAndBranch(LCompareVAndBranch *lir);
     bool visitTruncateDToInt32(LTruncateDToInt32 *ins);
     bool visitTruncateFToInt32(LTruncateFToInt32 *ins);
     bool visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins);
     bool visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins);
     bool visitAsmJSCall(LAsmJSCall *ins);
     bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
     bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
+    bool visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
+    bool visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
     bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
     bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
     bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
     bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
     bool visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble *lir);
     bool visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir);
 };
 
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -353,51 +353,66 @@ CodeGeneratorX86::visitAsmJSCall(LAsmJSC
             masm.loadDouble(op, ReturnDoubleReg);
             masm.freeStack(sizeof(double));
         }
     }
 
     return true;
 }
 
+void
+CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier)
+{
+    if (barrier & MembarStoreLoad)
+        masm.storeLoadFence();
+}
+
 bool
 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
 {
     const MAsmJSLoadHeap *mir = ins->mir();
     Scalar::Type vt = mir->viewType();
     const LAllocation *ptr = ins->ptr();
     const LDefinition *out = ins->output();
 
+    memoryBarrier(ins->mir()->barrierBefore());
+
     if (ptr->isConstant()) {
         // The constant displacement still needs to be added to the as-yet-unknown
         // base address of the heap. For now, embed the displacement as an
         // immediate in the instruction. This displacement will fixed up when the
         // base address is known during dynamic linking (AsmJSModule::initHeap).
         PatchedAbsoluteAddress srcAddr((void *) ptr->toConstant()->toInt32());
-        return loadAndNoteViewTypeElement(vt, srcAddr, out);
+        loadAndNoteViewTypeElement(vt, srcAddr, out);
+        memoryBarrier(ins->mir()->barrierAfter());
+        return true;
     }
 
     Register ptrReg = ToRegister(ptr);
     Address srcAddr(ptrReg, 0);
 
-    if (!mir->needsBoundsCheck())
-        return loadAndNoteViewTypeElement(vt, srcAddr, out);
+    if (!mir->needsBoundsCheck()) {
+        loadAndNoteViewTypeElement(vt, srcAddr, out);
+        memoryBarrier(ins->mir()->barrierAfter());
+        return true;
+    }
 
     bool isFloat32Load = vt == Scalar::Float32;
     OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
     if (!addOutOfLineCode(ool, mir))
         return false;
 
     CodeOffsetLabel cmp = masm.cmplWithPatch(ptrReg, Imm32(0));
     masm.j(Assembler::AboveOrEqual, ool->entry());
 
     uint32_t before = masm.size();
     loadViewTypeElement(vt, srcAddr, out);
     uint32_t after = masm.size();
     masm.bind(ool->rejoin());
+    memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), cmp.offset()));
     return true;
 }
 
 template<typename T>
 void
 CodeGeneratorX86::storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
                                        const T &dstAddr)
@@ -449,47 +464,156 @@ CodeGeneratorX86::visitStoreTypedArrayEl
 bool
 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
 {
     MAsmJSStoreHeap *mir = ins->mir();
     Scalar::Type vt = mir->viewType();
     const LAllocation *value = ins->value();
     const LAllocation *ptr = ins->ptr();
 
+    memoryBarrier(ins->mir()->barrierBefore());
+
     if (ptr->isConstant()) {
         // The constant displacement still needs to be added to the as-yet-unknown
         // base address of the heap. For now, embed the displacement as an
         // immediate in the instruction. This displacement will fixed up when the
         // base address is known during dynamic linking (AsmJSModule::initHeap).
         PatchedAbsoluteAddress dstAddr((void *) ptr->toConstant()->toInt32());
         storeAndNoteViewTypeElement(vt, value, dstAddr);
+        memoryBarrier(ins->mir()->barrierAfter());
         return true;
     }
 
     Register ptrReg = ToRegister(ptr);
     Address dstAddr(ptrReg, 0);
 
     if (!mir->needsBoundsCheck()) {
         storeAndNoteViewTypeElement(vt, value, dstAddr);
+        memoryBarrier(ins->mir()->barrierAfter());
         return true;
     }
 
     CodeOffsetLabel cmp = masm.cmplWithPatch(ptrReg, Imm32(0));
     Label rejoin;
     masm.j(Assembler::AboveOrEqual, &rejoin);
 
     uint32_t before = masm.size();
     storeViewTypeElement(vt, value, dstAddr);
     uint32_t after = masm.size();
     masm.bind(&rejoin);
+    memoryBarrier(ins->mir()->barrierAfter());
     masm.append(AsmJSHeapAccess(before, after, cmp.offset()));
     return true;
 }
 
 bool
+CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
+{
+    MAsmJSCompareExchangeHeap *mir = ins->mir();
+    Scalar::Type vt = mir->viewType();
+    const LAllocation *ptr = ins->ptr();
+    Register oldval = ToRegister(ins->oldValue());
+    Register newval = ToRegister(ins->newValue());
+
+    MOZ_ASSERT(ptr->isRegister());
+    // Set up the offset within the heap in the pointer reg.
+    Register ptrReg = ToRegister(ptr);
+
+    Label rejoin;
+    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+
+    if (mir->needsBoundsCheck()) {
+        maybeCmpOffset = masm.cmplWithPatch(ptrReg, Imm32(0)).offset();
+        Label goahead;
+        masm.j(Assembler::LessThan, &goahead);
+        memoryBarrier(MembarFull);
+        Register out = ToRegister(ins->output());
+        masm.xorl(out,out);
+        masm.jmp(&rejoin);
+        masm.bind(&goahead);
+    }
+
+    // Add in the actual heap pointer explicitly, to avoid opening up
+    // the abstraction that is compareExchangeToTypedIntArray at this time.
+    uint32_t before = masm.size();
+    masm.addl_wide(Imm32(0), ptrReg);
+    uint32_t after = masm.size();
+    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+
+    Address memAddr(ToRegister(ptr), 0);
+    masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+                                        memAddr,
+                                        oldval,
+                                        newval,
+                                        InvalidReg,
+                                        ToAnyRegister(ins->output()));
+    if (rejoin.used())
+        masm.bind(&rejoin);
+
+    return true;
+}
+
+bool
+CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
+{
+    MAsmJSAtomicBinopHeap *mir = ins->mir();
+    Scalar::Type vt = mir->viewType();
+    const LAllocation *ptr = ins->ptr();
+    Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+    const LAllocation* value = ins->value();
+    AtomicOp op = mir->operation();
+
+    MOZ_ASSERT(ptr->isRegister());
+    // Set up the offset within the heap in the pointer reg.
+    Register ptrReg = ToRegister(ptr);
+
+    Label rejoin;
+    uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
+
+    if (mir->needsBoundsCheck()) {
+        maybeCmpOffset = masm.cmplWithPatch(ptrReg, Imm32(0)).offset();
+        Label goahead;
+        masm.j(Assembler::LessThan, &goahead);
+        memoryBarrier(MembarFull);
+        Register out = ToRegister(ins->output());
+        masm.xorl(out,out);
+        masm.jmp(&rejoin);
+        masm.bind(&goahead);
+    }
+
+    // Add in the actual heap pointer explicitly, to avoid opening up
+    // the abstraction that is atomicBinopToTypedIntArray at this time.
+    uint32_t before = masm.size();
+    masm.addl_wide(Imm32(0), ptrReg);
+    uint32_t after = masm.size();
+    masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
+
+    Address memAddr(ptrReg, 0);
+    if (value->isConstant()) {
+        masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+                                        Imm32(ToInt32(value)),
+                                        memAddr,
+                                        temp,
+                                        InvalidReg,
+                                        ToAnyRegister(ins->output()));
+    } else {
+        masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+                                        ToRegister(value),
+                                        memAddr,
+                                        temp,
+                                        InvalidReg,
+                                        ToAnyRegister(ins->output()));
+    }
+    if (rejoin.used())
+        masm.bind(&rejoin);
+
+    return true;
+}
+
+bool
 CodeGeneratorX86::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
 {
     MAsmJSLoadGlobalVar *mir = ins->mir();
     MIRType type = mir->type();
     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
 
     CodeOffsetLabel label;
     switch (type) {
--- a/js/src/jit/x86/CodeGenerator-x86.h
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -35,16 +35,18 @@ class CodeGeneratorX86 : public CodeGene
     void loadViewTypeElement(Scalar::Type vt, const T &srcAddr,
                                        const LDefinition *out);
     template<typename T>
     void storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value,
                                      const T &dstAddr);
     template<typename T>
     void storeViewTypeElement(Scalar::Type vt, const LAllocation *value,
                               const T &dstAddr);
+    void memoryBarrier(MemoryBarrierBits barrier);
+
   public:
     CodeGeneratorX86(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
 
   public:
     bool visitBox(LBox *box);
     bool visitBoxFloatingPoint(LBoxFloatingPoint *box);
     bool visitUnbox(LUnbox *unbox);
     bool visitValue(LValue *value);
@@ -56,16 +58,18 @@ class CodeGeneratorX86 : public CodeGene
     bool visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir);
     bool visitTruncateDToInt32(LTruncateDToInt32 *ins);
     bool visitTruncateFToInt32(LTruncateFToInt32 *ins);
     bool visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins);
     bool visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins);
     bool visitAsmJSCall(LAsmJSCall *ins);
     bool visitAsmJSLoadHeap(LAsmJSLoadHeap *ins);
     bool visitAsmJSStoreHeap(LAsmJSStoreHeap *ins);
+    bool visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins);
+    bool visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins);
     bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
     bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
     bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
     bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
 
     bool visitOutOfLineTruncate(OutOfLineTruncate *ool);
     bool visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32 *ool);
 };