Bug 1084248 - no undefined behavior, take 2. r=waldo
authorLars T Hansen <lhansen@mozilla.com>
Thu, 01 Oct 2015 00:46:08 +0200
changeset 265430 d914aab2d74a9537b91fdc45fc3f2aac2354e567
parent 265429 dd393058f3c3cda65d30592de7eed357a721ccf0
child 265431 76c69f9cfbd41cdfebe2e477ab4d06f564491398
push id17798
push usercbook@mozilla.com
push dateThu, 01 Oct 2015 12:18:13 +0000
treeherderb2g-inbound@cb44ba24be12 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerswaldo
bugs1084248
milestone44.0a1
Bug 1084248 - no undefined behavior, take 2. r=waldo
js/src/asmjs/AsmJSModule.cpp
js/src/asmjs/AsmJSModule.h
js/src/asmjs/AsmJSSignalHandlers.cpp
js/src/builtin/AtomicsObject.cpp
js/src/builtin/SIMD.cpp
js/src/gc/Nursery.h
js/src/jit/BaselineIC.cpp
js/src/jit/IonBuilder.cpp
js/src/jit/Lowering.cpp
js/src/jit/MIR.cpp
js/src/jit/MIR.h
js/src/jit/RangeAnalysis.cpp
js/src/jit/x86/CodeGenerator-x86.cpp
js/src/jsapi.cpp
js/src/jsarray.cpp
js/src/jsiter.cpp
js/src/jsobj.cpp
js/src/vm/ArrayBufferObject-inl.h
js/src/vm/ArrayBufferObject.cpp
js/src/vm/ArrayBufferObject.h
js/src/vm/NativeObject.cpp
js/src/vm/SelfHosting.cpp
js/src/vm/SharedArrayObject.cpp
js/src/vm/SharedArrayObject.h
js/src/vm/SharedTypedArrayObject.cpp
js/src/vm/SharedTypedArrayObject.h
js/src/vm/TypedArrayCommon.h
js/src/vm/TypedArrayObject.cpp
js/src/vm/TypedArrayObject.h
--- a/js/src/asmjs/AsmJSModule.cpp
+++ b/js/src/asmjs/AsmJSModule.cpp
@@ -818,20 +818,23 @@ void
 AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared*> heap, JSContext* cx)
 {
     MOZ_ASSERT_IF(heap->is<ArrayBufferObject>(), heap->as<ArrayBufferObject>().isAsmJS());
     MOZ_ASSERT(IsValidAsmJSHeapLength(heap->byteLength()));
     MOZ_ASSERT(dynamicallyLinked_);
     MOZ_ASSERT(!maybeHeap_);
 
     maybeHeap_ = heap;
-    heapDatum() = heap->dataPointer();
+    // heapDatum() may point to shared memory but that memory is only
+    // accessed from maybeHeap(), which wraps it, and from
+    // hasDetachedHeap(), which checks it for null.
+    heapDatum() = heap->dataPointerMaybeShared().unwrap(/*safe - explained above*/);
 
 #if defined(JS_CODEGEN_X86)
-    uint8_t* heapOffset = heap->dataPointer();
+    uint8_t* heapOffset = heap->dataPointerMaybeShared().unwrap(/*safe - used for value*/);
     uint32_t heapLength = heap->byteLength();
     for (unsigned i = 0; i < heapAccesses_.length(); i++) {
         const jit::AsmJSHeapAccess& access = heapAccesses_[i];
         // An access is out-of-bounds iff
         //      ptr + offset + data-type-byte-size > heapLength
         // i.e. ptr > heapLength - data-type-byte-size - offset.
         // data-type-byte-size and offset are already included in the addend
         // so we just have to add the heap length here.
@@ -867,17 +870,17 @@ AsmJSModule::initHeap(Handle<ArrayBuffer
 }
 
 void
 AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared* maybePrevBuffer)
 {
 #if defined(JS_CODEGEN_X86)
     if (maybePrevBuffer) {
         // Subtract out the base-pointer added by AsmJSModule::initHeap.
-        uint8_t* ptrBase = maybePrevBuffer->dataPointer();
+        uint8_t* ptrBase = maybePrevBuffer->dataPointerMaybeShared().unwrap(/*safe - used for value*/);
         uint32_t heapLength = maybePrevBuffer->byteLength();
         for (unsigned i = 0; i < heapAccesses_.length(); i++) {
             const jit::AsmJSHeapAccess& access = heapAccesses_[i];
             // Subtract the heap length back out, leaving the raw displacement in place.
             if (access.hasLengthCheck())
                 X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
             void* addr = access.patchHeapPtrImmAt(code_);
             uint8_t* ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
--- a/js/src/asmjs/AsmJSModule.h
+++ b/js/src/asmjs/AsmJSModule.h
@@ -1396,20 +1396,24 @@ class AsmJSModule
     }
     bool active() const {
         return activation() != nullptr;
     }
     static unsigned heapGlobalDataOffset() {
         JS_STATIC_ASSERT(jit::AsmJSHeapGlobalDataOffset == sizeof(void*));
         return sizeof(void*);
     }
+  private:
+    // The pointer may reference shared memory, use with care.
+    // Generally you want to use maybeHeap(), not heapDatum().
     uint8_t*& heapDatum() const {
         MOZ_ASSERT(isFinished());
         return *(uint8_t**)(globalData() + heapGlobalDataOffset());
     }
+  public:
     static unsigned nan64GlobalDataOffset() {
         static_assert(jit::AsmJSNaN64GlobalDataOffset % sizeof(double) == 0,
                       "Global data NaN should be aligned");
         return heapGlobalDataOffset() + sizeof(void*);
     }
     static unsigned nan32GlobalDataOffset() {
         static_assert(jit::AsmJSNaN32GlobalDataOffset % sizeof(double) == 0,
                       "Global data NaN should be aligned");
@@ -1556,19 +1560,20 @@ class AsmJSModule
     uint8_t* interruptExit() const {
         MOZ_ASSERT(isDynamicallyLinked());
         return interruptExit_;
     }
     uint8_t* outOfBoundsExit() const {
         MOZ_ASSERT(isDynamicallyLinked());
         return outOfBoundsExit_;
     }
-    uint8_t* maybeHeap() const {
+    SharedMem<uint8_t*> maybeHeap() const {
         MOZ_ASSERT(isDynamicallyLinked());
-        return heapDatum();
+        return hasArrayView() && isSharedView() ? SharedMem<uint8_t*>::shared(heapDatum())
+            : SharedMem<uint8_t*>::unshared(heapDatum());
     }
     ArrayBufferObjectMaybeShared* maybeHeapBufferObject() const {
         MOZ_ASSERT(isDynamicallyLinked());
         return maybeHeap_;
     }
     size_t heapLength() const;
     bool profilingEnabled() const {
         MOZ_ASSERT(isDynamicallyLinked());
--- a/js/src/asmjs/AsmJSSignalHandlers.cpp
+++ b/js/src/asmjs/AsmJSSignalHandlers.cpp
@@ -17,19 +17,22 @@
  */
 
 #include "asmjs/AsmJSSignalHandlers.h"
 
 #include "mozilla/DebugOnly.h"
 #include "mozilla/PodOperations.h"
 
 #include "asmjs/AsmJSModule.h"
+#include "jit/AtomicOperations.h"
 #include "jit/Disassembler.h"
 #include "vm/Runtime.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 using namespace js;
 using namespace js::jit;
 
 using JS::GenericNaN;
 using mozilla::DebugOnly;
 using mozilla::PodArrayZero;
 
 #if defined(ANDROID)
@@ -379,60 +382,60 @@ SetFPRegToNaN(size_t size, void* fp_reg)
 
 MOZ_COLD static void
 SetGPRegToZero(void* gp_reg)
 {
     memset(gp_reg, 0, sizeof(intptr_t));
 }
 
 MOZ_COLD static void
-SetFPRegToLoadedValue(const void* addr, size_t size, void* fp_reg)
+SetFPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* fp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
     memset(fp_reg, 0, Simd128DataSize);
-    memcpy(fp_reg, addr, size);
+    AtomicOperations::memcpySafeWhenRacy(fp_reg, addr, size);
 }
 
 MOZ_COLD static void
-SetGPRegToLoadedValue(const void* addr, size_t size, void* gp_reg)
+SetGPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* gp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= sizeof(void*));
     memset(gp_reg, 0, sizeof(void*));
-    memcpy(gp_reg, addr, size);
+    AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
 }
 
 MOZ_COLD static void
-SetGPRegToLoadedValueSext32(const void* addr, size_t size, void* gp_reg)
+SetGPRegToLoadedValueSext32(SharedMem<void*> addr, size_t size, void* gp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= sizeof(int32_t));
-    int8_t msb = static_cast<const int8_t*>(addr)[size - 1];
+    int8_t msb = AtomicOperations::loadSafeWhenRacy(SharedMem<uint8_t*>(addr) + (size - 1));
     memset(gp_reg, 0, sizeof(void*));
     memset(gp_reg, msb >> 7, sizeof(int32_t));
-    memcpy(gp_reg, addr, size);
+    AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
 }
 
 MOZ_COLD static void
-StoreValueFromFPReg(void* addr, size_t size, const void* fp_reg)
+StoreValueFromFPReg(SharedMem<void*> addr, size_t size, const void* fp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
-    memcpy(addr, fp_reg, size);
+    AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(fp_reg), size);
 }
 
 MOZ_COLD static void
-StoreValueFromGPReg(void* addr, size_t size, const void* gp_reg)
+StoreValueFromGPReg(SharedMem<void*> addr, size_t size, const void* gp_reg)
 {
     MOZ_RELEASE_ASSERT(size <= sizeof(void*));
-    memcpy(addr, gp_reg, size);
+    AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(gp_reg), size);
 }
 
 MOZ_COLD static void
-StoreValueFromGPImm(void* addr, size_t size, int32_t imm)
+StoreValueFromGPImm(SharedMem<void*> addr, size_t size, int32_t imm)
 {
     MOZ_RELEASE_ASSERT(size <= sizeof(imm));
-    memcpy(addr, &imm, size);
+    AtomicOperations::memcpySafeWhenRacy(addr, static_cast<void*>(&imm), size);
 }
 
 # if !defined(XP_DARWIN)
 MOZ_COLD static void*
 AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
 {
     switch (encoding) {
       case X86Encoding::xmm0:  return &XMM_sig(context, 0);
@@ -539,34 +542,34 @@ SetRegisterToCoercedUndefined(EMULATOR_C
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
     else
         SetGPRegToZero(AddressOfGPRegisterSlot(context, value.gpr()));
 }
 
 MOZ_COLD static void
-SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, const void* addr, size_t size,
+SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
                          const Disassembler::OtherOperand& value)
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         SetFPRegToLoadedValue(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
     else
         SetGPRegToLoadedValue(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
 }
 
 MOZ_COLD static void
-SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT* context, const void* addr, size_t size,
+SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
                                const Disassembler::OtherOperand& value)
 {
     SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
 }
 
 MOZ_COLD static void
-StoreValueFromRegister(EMULATOR_CONTEXT* context, void* addr, size_t size,
+StoreValueFromRegister(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
                        const Disassembler::OtherOperand& value)
 {
     if (value.kind() == Disassembler::OtherOperand::FPR)
         StoreValueFromFPReg(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
     else if (value.kind() == Disassembler::OtherOperand::GPR)
         StoreValueFromGPReg(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
     else
         StoreValueFromGPImm(addr, size, value.imm());
@@ -576,24 +579,24 @@ MOZ_COLD static uint8_t*
 ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddress& address)
 {
     MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
 
     uintptr_t result = address.disp();
 
     if (address.hasBase()) {
         uintptr_t base;
-        StoreValueFromGPReg(&base, sizeof(uintptr_t),
+        StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.base()));
         result += base;
     }
 
     if (address.hasIndex()) {
         uintptr_t index;
-        StoreValueFromGPReg(&index, sizeof(uintptr_t),
+        StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.index()));
         result += index * (1 << address.scale());
     }
 
     return reinterpret_cast<uint8_t*>(result);
 }
 
 MOZ_COLD static uint8_t*
@@ -616,23 +619,23 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
 #if defined(JS_CODEGEN_X64)
     // Check x64 asm.js heap access invariants.
     MOZ_RELEASE_ASSERT(address.disp() >= 0);
     MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
     MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
     MOZ_RELEASE_ASSERT(address.scale() == 0);
     if (address.hasBase()) {
         uintptr_t base;
-        StoreValueFromGPReg(&base, sizeof(uintptr_t),
+        StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.base()));
         MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == module.maybeHeap());
     }
     if (address.hasIndex()) {
         uintptr_t index;
-        StoreValueFromGPReg(&index, sizeof(uintptr_t),
+        StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
                             AddressOfGPRegisterSlot(context, address.index()));
         MOZ_RELEASE_ASSERT(uint32_t(index) == index);
     }
 #endif
 
     // Determine the actual effective address of the faulting access. We can't
     // rely on the faultingAddress given to us by the OS, because we need the
     // address of the start of the access, and the OS may sometimes give us an
@@ -657,17 +660,17 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
     // gets done at full pointer width, so it doesn't get properly wrapped.
     // We support this by extending AsmJSMappedSize to the greatest size
     // that could be reached by such an unwrapped address, and then when we
     // arrive here in the signal handler for such an access, we compute the
     // fully wrapped address, and perform the load or store on it.
     //
     // Taking a signal is really slow, but in theory programs really shouldn't
     // be hitting this anyway.
-    intptr_t unwrappedOffset = accessAddress - module.maybeHeap();
+    intptr_t unwrappedOffset = accessAddress - module.maybeHeap().unwrap(/*safe - for value*/);
     uint32_t wrappedOffset = uint32_t(unwrappedOffset);
     size_t size = access.size();
     MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
     bool inBounds = wrappedOffset < module.heapLength() &&
                     wrappedOffset + size < module.heapLength();
 
     // If this is storing Z of an XYZ, check whether X is also in bounds, so
     // that we don't store anything before throwing.
@@ -675,29 +678,29 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
     uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - heapAccess->offsetWithinWholeSimdVector());
     if (wrappedBaseOffset >= module.heapLength())
         inBounds = false;
 
     if (inBounds) {
         // We now know that this is an access that is actually in bounds when
         // properly wrapped. Complete the load or store with the wrapped
         // address.
-        uint8_t* wrappedAddress = module.maybeHeap() + wrappedOffset;
+        SharedMem<uint8_t*> wrappedAddress = module.maybeHeap() + wrappedOffset;
         MOZ_RELEASE_ASSERT(wrappedAddress >= module.maybeHeap());
         MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
         MOZ_RELEASE_ASSERT(wrappedAddress + size <= module.maybeHeap() + module.heapLength());
         switch (access.kind()) {
           case Disassembler::HeapAccess::Load:
-            SetRegisterToLoadedValue(context, wrappedAddress, size, access.otherOperand());
+            SetRegisterToLoadedValue(context, SharedMem<void*>(wrappedAddress), size, access.otherOperand());
             break;
           case Disassembler::HeapAccess::LoadSext32:
-            SetRegisterToLoadedValueSext32(context, wrappedAddress, size, access.otherOperand());
+            SetRegisterToLoadedValueSext32(context, SharedMem<void*>(wrappedAddress), size, access.otherOperand());
             break;
           case Disassembler::HeapAccess::Store:
-            StoreValueFromRegister(context, wrappedAddress, size, access.otherOperand());
+            StoreValueFromRegister(context, SharedMem<void*>(wrappedAddress), size, access.otherOperand());
             break;
           case Disassembler::HeapAccess::Unknown:
             MOZ_CRASH("Failed to disassemble instruction");
         }
     } else {
         // We now know that this is an out-of-bounds access made by an asm.js
         // load/store that we should handle.
 
--- a/js/src/builtin/AtomicsObject.cpp
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -117,60 +117,67 @@ js::atomics_fence(JSContext* cx, unsigne
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     jit::AtomicOperations::fenceSeqCst();
     args.rval().setUndefined();
     return true;
 }
 
 static int32_t
-CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidate, void* viewData,
-                uint32_t offset, bool* badArrayType=nullptr)
+CompareExchange(Scalar::Type viewType, int32_t oldCandidate, int32_t newCandidate,
+                SharedMem<void*> viewData, uint32_t offset, bool* badArrayType = nullptr)
 {
     switch (viewType) {
       case Scalar::Int8: {
         int8_t oldval = (int8_t)oldCandidate;
         int8_t newval = (int8_t)newCandidate;
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((int8_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<int8_t*>(viewData) + offset,
+                                                              oldval, newval);
         return oldval;
       }
       case Scalar::Uint8: {
         uint8_t oldval = (uint8_t)oldCandidate;
         uint8_t newval = (uint8_t)newCandidate;
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((uint8_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint8_t*>(viewData) + offset,
+                                                              oldval, newval);
         return oldval;
       }
       case Scalar::Uint8Clamped: {
         uint8_t oldval = ClampIntForUint8Array(oldCandidate);
         uint8_t newval = ClampIntForUint8Array(newCandidate);
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((uint8_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint8_t*>(viewData) + offset,
+                                                              oldval, newval);
         return oldval;
       }
       case Scalar::Int16: {
         int16_t oldval = (int16_t)oldCandidate;
         int16_t newval = (int16_t)newCandidate;
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((int16_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<int16_t*>(viewData) + offset,
+                                                              oldval, newval);
         return oldval;
       }
       case Scalar::Uint16: {
         uint16_t oldval = (uint16_t)oldCandidate;
         uint16_t newval = (uint16_t)newCandidate;
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((uint16_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint16_t*>(viewData) + offset,
+                                                              oldval, newval);
         return oldval;
       }
       case Scalar::Int32: {
         int32_t oldval = oldCandidate;
         int32_t newval = newCandidate;
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((int32_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<int32_t*>(viewData) + offset,
+                                                              oldval, newval);
         return oldval;
       }
       case Scalar::Uint32: {
         uint32_t oldval = (uint32_t)oldCandidate;
         uint32_t newval = (uint32_t)newCandidate;
-        oldval = jit::AtomicOperations::compareExchangeSeqCst((uint32_t*)viewData + offset, oldval, newval);
+        oldval = jit::AtomicOperations::compareExchangeSeqCst(SharedMem<uint32_t*>(viewData) + offset,
+                                                              oldval, newval);
         return (int32_t)oldval;
       }
       default:
         if (badArrayType)
             *badArrayType = true;
         return 0;
     }
 }
@@ -194,17 +201,18 @@ js::atomics_compareExchange(JSContext* c
     int32_t oldCandidate;
     if (!ToInt32(cx, oldv, &oldCandidate))
         return false;
     int32_t newCandidate;
     if (!ToInt32(cx, newv, &newCandidate))
         return false;
 
     bool badType = false;
-    int32_t result = CompareExchange(view->type(), oldCandidate, newCandidate, view->viewData(), offset, &badType);
+    int32_t result = CompareExchange(view->type(), oldCandidate, newCandidate,
+                                     view->viewDataShared(), offset, &badType);
 
     if (badType)
         return ReportBadArrayType(cx);
 
     if (view->type() == Scalar::Uint32)
         r.setNumber((double)(uint32_t)result);
     else
         r.setInt32(result);
@@ -221,105 +229,106 @@ js::atomics_load(JSContext* cx, unsigned
 
     Rooted<SharedTypedArrayObject*> view(cx, nullptr);
     if (!GetSharedTypedArray(cx, objv, &view))
         return false;
     uint32_t offset;
     if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset))
         return false;
 
+    SharedMem<void*> viewData = view->viewDataShared();
     switch (view->type()) {
       case Scalar::Uint8:
       case Scalar::Uint8Clamped: {
-        uint8_t v = jit::AtomicOperations::loadSeqCst((uint8_t*)view->viewData() + offset);
+        uint8_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint8_t*>(viewData) + offset);
         r.setInt32(v);
         return true;
       }
       case Scalar::Int8: {
-        int8_t v = jit::AtomicOperations::loadSeqCst((uint8_t*)view->viewData() + offset);
+        int8_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint8_t*>(viewData) + offset);
         r.setInt32(v);
         return true;
       }
       case Scalar::Int16: {
-        int16_t v = jit::AtomicOperations::loadSeqCst((int16_t*)view->viewData() + offset);
+        int16_t v = jit::AtomicOperations::loadSeqCst(SharedMem<int16_t*>(viewData) + offset);
         r.setInt32(v);
         return true;
       }
       case Scalar::Uint16: {
-        uint16_t v = jit::AtomicOperations::loadSeqCst((uint16_t*)view->viewData() + offset);
+        uint16_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint16_t*>(viewData) + offset);
         r.setInt32(v);
         return true;
       }
       case Scalar::Int32: {
-        int32_t v = jit::AtomicOperations::loadSeqCst((int32_t*)view->viewData() + offset);
+        int32_t v = jit::AtomicOperations::loadSeqCst(SharedMem<int32_t*>(viewData) + offset);
         r.setInt32(v);
         return true;
       }
       case Scalar::Uint32: {
-        uint32_t v = jit::AtomicOperations::loadSeqCst((uint32_t*)view->viewData() + offset);
+        uint32_t v = jit::AtomicOperations::loadSeqCst(SharedMem<uint32_t*>(viewData) + offset);
         r.setNumber(v);
         return true;
       }
       default:
         return ReportBadArrayType(cx);
     }
 }
 
 enum XchgStoreOp {
     DoExchange,
     DoStore
 };
 
 template<XchgStoreOp op>
 static int32_t
-ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, void* viewData, uint32_t offset,
-                bool* badArrayType=nullptr)
+ExchangeOrStore(Scalar::Type viewType, int32_t numberValue, SharedMem<void*> viewData,
+                uint32_t offset, bool* badArrayType = nullptr)
 {
 #define INT_OP(ptr, value)                                         \
     JS_BEGIN_MACRO                                                 \
     if (op == DoStore)                                             \
         jit::AtomicOperations::storeSeqCst(ptr, value);            \
     else                                                           \
         value = jit::AtomicOperations::exchangeSeqCst(ptr, value); \
     JS_END_MACRO
 
     switch (viewType) {
       case Scalar::Int8: {
         int8_t value = (int8_t)numberValue;
-        INT_OP((int8_t*)viewData + offset, value);
+        INT_OP(SharedMem<int8_t*>(viewData) + offset, value);
         return value;
       }
       case Scalar::Uint8: {
         uint8_t value = (uint8_t)numberValue;
-        INT_OP((uint8_t*)viewData + offset, value);
+        INT_OP(SharedMem<uint8_t*>(viewData) + offset, value);
         return value;
       }
       case Scalar::Uint8Clamped: {
         uint8_t value = ClampIntForUint8Array(numberValue);
-        INT_OP((uint8_t*)viewData + offset, value);
+        INT_OP(SharedMem<uint8_t*>(viewData) + offset, value);
         return value;
       }
       case Scalar::Int16: {
         int16_t value = (int16_t)numberValue;
-        INT_OP((int16_t*)viewData + offset, value);
+        INT_OP(SharedMem<int16_t*>(viewData) + offset, value);
         return value;
       }
       case Scalar::Uint16: {
         uint16_t value = (uint16_t)numberValue;
-        INT_OP((uint16_t*)viewData + offset, value);
+        INT_OP(SharedMem<uint16_t*>(viewData) + offset, value);
         return value;
       }
       case Scalar::Int32: {
         int32_t value = numberValue;
-        INT_OP((int32_t*)viewData + offset, value);
+        INT_OP(SharedMem<int32_t*>(viewData) + offset, value);
         return value;
       }
       case Scalar::Uint32: {
         uint32_t value = (uint32_t)numberValue;
-        INT_OP((uint32_t*)viewData + offset, value);
+        INT_OP(SharedMem<uint32_t*>(viewData) + offset, value);
         return (int32_t)value;
       }
       default:
         if (badArrayType)
             *badArrayType = true;
         return 0;
     }
 #undef INT_OP
@@ -341,17 +350,18 @@ ExchangeOrStore(JSContext* cx, unsigned 
     uint32_t offset;
     if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset))
         return false;
     int32_t numberValue;
     if (!ToInt32(cx, valv, &numberValue))
         return false;
 
     bool badType = false;
-    int32_t result = ExchangeOrStore<op>(view->type(), numberValue, view->viewData(), offset, &badType);
+    int32_t result = ExchangeOrStore<op>(view->type(), numberValue, view->viewDataShared(), offset,
+                                         &badType);
 
     if (badType)
         return ReportBadArrayType(cx);
 
     if (view->type() == Scalar::Uint32)
         r.setNumber((double)(uint32_t)result);
     else
         r.setInt32(result);
@@ -380,79 +390,80 @@ AtomicsBinop(JSContext* cx, HandleValue 
         return false;
     uint32_t offset;
     if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset))
         return false;
     int32_t numberValue;
     if (!ToInt32(cx, valv, &numberValue))
         return false;
 
+    SharedMem<void*> viewData = view->viewDataShared();
     switch (view->type()) {
       case Scalar::Int8: {
         int8_t v = (int8_t)numberValue;
-        r.setInt32(T::operate((int8_t*)view->viewData() + offset, v));
+        r.setInt32(T::operate(SharedMem<int8_t*>(viewData) + offset, v));
         return true;
       }
       case Scalar::Uint8: {
         uint8_t v = (uint8_t)numberValue;
-        r.setInt32(T::operate((uint8_t*)view->viewData() + offset, v));
+        r.setInt32(T::operate(SharedMem<uint8_t*>(viewData) + offset, v));
         return true;
       }
       case Scalar::Uint8Clamped: {
         // Spec says:
         //  - clamp the input value
         //  - perform the operation
         //  - clamp the result
         //  - store the result
         // This requires a CAS loop.
         int32_t value = ClampIntForUint8Array(numberValue);
-        uint8_t* loc = (uint8_t*)view->viewData() + offset;
+        SharedMem<uint8_t*> loc = SharedMem<uint8_t*>(viewData) + offset;
         for (;;) {
-            uint8_t old = *loc;
+            uint8_t old = jit::AtomicOperations::loadSafeWhenRacy(loc);
             uint8_t result = (uint8_t)ClampIntForUint8Array(T::perform(old, value));
             uint8_t tmp = jit::AtomicOperations::compareExchangeSeqCst(loc, old, result);
             if (tmp == old) {
                 r.setInt32(old);
                 break;
             }
         }
         return true;
       }
       case Scalar::Int16: {
         int16_t v = (int16_t)numberValue;
-        r.setInt32(T::operate((int16_t*)view->viewData() + offset, v));
+        r.setInt32(T::operate(SharedMem<int16_t*>(viewData) + offset, v));
         return true;
       }
       case Scalar::Uint16: {
         uint16_t v = (uint16_t)numberValue;
-        r.setInt32(T::operate((uint16_t*)view->viewData() + offset, v));
+        r.setInt32(T::operate(SharedMem<uint16_t*>(viewData) + offset, v));
         return true;
       }
       case Scalar::Int32: {
         int32_t v = numberValue;
-        r.setInt32(T::operate((int32_t*)view->viewData() + offset, v));
+        r.setInt32(T::operate(SharedMem<int32_t*>(viewData) + offset, v));
         return true;
       }
       case Scalar::Uint32: {
         uint32_t v = (uint32_t)numberValue;
-        r.setNumber((double)T::operate((uint32_t*)view->viewData() + offset, v));
+        r.setNumber((double)T::operate(SharedMem<uint32_t*>(viewData) + offset, v));
         return true;
       }
       default:
         return ReportBadArrayType(cx);
     }
 }
 
 #define INTEGRAL_TYPES_FOR_EACH(NAME) \
-    static int8_t operate(int8_t* addr, int8_t v) { return NAME(addr, v); } \
-    static uint8_t operate(uint8_t* addr, uint8_t v) { return NAME(addr, v); } \
-    static int16_t operate(int16_t* addr, int16_t v) { return NAME(addr, v); } \
-    static uint16_t operate(uint16_t* addr, uint16_t v) { return NAME(addr, v); } \
-    static int32_t operate(int32_t* addr, int32_t v) { return NAME(addr, v); } \
-    static uint32_t operate(uint32_t* addr, uint32_t v) { return NAME(addr, v); }
+    static int8_t operate(SharedMem<int8_t*> addr, int8_t v) { return NAME(addr, v); } \
+    static uint8_t operate(SharedMem<uint8_t*> addr, uint8_t v) { return NAME(addr, v); } \
+    static int16_t operate(SharedMem<int16_t*> addr, int16_t v) { return NAME(addr, v); } \
+    static uint16_t operate(SharedMem<uint16_t*> addr, uint16_t v) { return NAME(addr, v); } \
+    static int32_t operate(SharedMem<int32_t*> addr, int32_t v) { return NAME(addr, v); } \
+    static uint32_t operate(SharedMem<uint32_t*> addr, uint32_t v) { return NAME(addr, v); }
 
 class PerformAdd
 {
 public:
     INTEGRAL_TYPES_FOR_EACH(jit::AtomicOperations::fetchAddSeqCst)
     static int32_t perform(int32_t x, int32_t y) { return x + y; }
 };
 
@@ -530,143 +541,146 @@ js::atomics_isLockFree(JSContext* cx, un
     }
     args.rval().setBoolean(jit::AtomicOperations::isLockfree(v.toInt32()));
     return true;
 }
 
 // asm.js callouts for platforms that do not have non-word-sized
 // atomics where we don't want to inline the logic for the atomics.
 //
+// Memory will always be shared since the callouts are only called from
+// code that checks that the memory is shared.
+//
 // To test this, either run on eg Raspberry Pi Model 1, or invoke the ARM
 // simulator build with ARMHWCAP=vfp set.  Do not set any other flags; other
 // vfp/neon flags force ARMv7 to be set.
 
 static void
-GetCurrentAsmJSHeap(void** heap, size_t* length)
+GetCurrentAsmJSHeap(SharedMem<void*>* heap, size_t* length)
 {
     JSRuntime* rt = js::TlsPerThreadData.get()->runtimeFromMainThread();
     AsmJSModule& mod = rt->asmJSActivationStack()->module();
-    *heap = mod.heapDatum();
+    *heap = SharedMem<void*>(mod.maybeHeap());
     *length = mod.heapLength();
 }
 
 int32_t
 js::atomics_add_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
-        return PerformAdd::operate((int8_t*)heap + offset, value);
+        return PerformAdd::operate(SharedMem<int8_t*>(heap) + offset, value);
       case Scalar::Uint8:
-        return PerformAdd::operate((uint8_t*)heap + offset, value);
+        return PerformAdd::operate(SharedMem<uint8_t*>(heap) + offset, value);
       case Scalar::Int16:
-        return PerformAdd::operate((int16_t*)heap + (offset >> 1), value);
+        return PerformAdd::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
       case Scalar::Uint16:
-        return PerformAdd::operate((uint16_t*)heap + (offset >> 1), value);
+        return PerformAdd::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
       default:
         MOZ_CRASH("Invalid size");
     }
 }
 
 int32_t
 js::atomics_sub_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
-        return PerformSub::operate((int8_t*)heap + offset, value);
+        return PerformSub::operate(SharedMem<int8_t*>(heap) + offset, value);
       case Scalar::Uint8:
-        return PerformSub::operate((uint8_t*)heap + offset, value);
+        return PerformSub::operate(SharedMem<uint8_t*>(heap) + offset, value);
       case Scalar::Int16:
-        return PerformSub::operate((int16_t*)heap + (offset >> 1), value);
+        return PerformSub::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
       case Scalar::Uint16:
-        return PerformSub::operate((uint16_t*)heap + (offset >> 1), value);
+        return PerformSub::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
       default:
         MOZ_CRASH("Invalid size");
     }
 }
 
 int32_t
 js::atomics_and_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
-        return PerformAnd::operate((int8_t*)heap + offset, value);
+        return PerformAnd::operate(SharedMem<int8_t*>(heap) + offset, value);
       case Scalar::Uint8:
-        return PerformAnd::operate((uint8_t*)heap + offset, value);
+        return PerformAnd::operate(SharedMem<uint8_t*>(heap) + offset, value);
       case Scalar::Int16:
-        return PerformAnd::operate((int16_t*)heap + (offset >> 1), value);
+        return PerformAnd::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
       case Scalar::Uint16:
-        return PerformAnd::operate((uint16_t*)heap + (offset >> 1), value);
+        return PerformAnd::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
       default:
         MOZ_CRASH("Invalid size");
     }
 }
 
 int32_t
 js::atomics_or_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
-        return PerformOr::operate((int8_t*)heap + offset, value);
+        return PerformOr::operate(SharedMem<int8_t*>(heap) + offset, value);
       case Scalar::Uint8:
-        return PerformOr::operate((uint8_t*)heap + offset, value);
+        return PerformOr::operate(SharedMem<uint8_t*>(heap) + offset, value);
       case Scalar::Int16:
-        return PerformOr::operate((int16_t*)heap + (offset >> 1), value);
+        return PerformOr::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
       case Scalar::Uint16:
-        return PerformOr::operate((uint16_t*)heap + (offset >> 1), value);
+        return PerformOr::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
       default:
         MOZ_CRASH("Invalid size");
     }
 }
 
 int32_t
 js::atomics_xor_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
-        return PerformXor::operate((int8_t*)heap + offset, value);
+        return PerformXor::operate(SharedMem<int8_t*>(heap) + offset, value);
       case Scalar::Uint8:
-        return PerformXor::operate((uint8_t*)heap + offset, value);
+        return PerformXor::operate(SharedMem<uint8_t*>(heap) + offset, value);
       case Scalar::Int16:
-        return PerformXor::operate((int16_t*)heap + (offset >> 1), value);
+        return PerformXor::operate(SharedMem<int16_t*>(heap) + (offset >> 1), value);
       case Scalar::Uint16:
-        return PerformXor::operate((uint16_t*)heap + (offset >> 1), value);
+        return PerformXor::operate(SharedMem<uint16_t*>(heap) + (offset >> 1), value);
       default:
         MOZ_CRASH("Invalid size");
     }
 }
 
 int32_t
 js::atomics_xchg_asm_callout(int32_t vt, int32_t offset, int32_t value)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
         return ExchangeOrStore<DoExchange>(Scalar::Int8, value, heap, offset);
       case Scalar::Uint8:
@@ -678,17 +692,17 @@ js::atomics_xchg_asm_callout(int32_t vt,
       default:
         MOZ_CRASH("Invalid size");
     }
 }
 
 int32_t
 js::atomics_cmpxchg_asm_callout(int32_t vt, int32_t offset, int32_t oldval, int32_t newval)
 {
-    void* heap;
+    SharedMem<void*> heap;
     size_t heapLength;
     GetCurrentAsmJSHeap(&heap, &heapLength);
     if (size_t(offset) >= heapLength)
         return 0;
     switch (Scalar::Type(vt)) {
       case Scalar::Int8:
         return CompareExchange(Scalar::Int8, oldval, newval, heap, offset);
       case Scalar::Uint8:
@@ -792,18 +806,18 @@ js::atomics_futexWait(JSContext* cx, uns
         else if (timeout_ms < 0)
             timeout_ms = 0;
     }
 
     // This lock also protects the "waiters" field on SharedArrayRawBuffer,
     // and it provides the necessary memory fence.
     AutoLockFutexAPI lock;
 
-    int32_t* addr = (int32_t*)view->viewData() + offset;
-    if (*addr != value) {
+    SharedMem<int32_t*>(addr) = SharedMem<int32_t*>(view->viewDataShared()) + offset;
+    if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
         r.setInt32(AtomicsObject::FutexNotequal);
         return true;
     }
 
     Rooted<SharedArrayBufferObject*> sab(cx, &view->buffer()->as<SharedArrayBufferObject>());
     SharedArrayRawBuffer* sarb = sab->rawBufferObject();
 
     FutexWaiter w(offset, rt);
@@ -908,18 +922,18 @@ js::atomics_futexWakeOrRequeue(JSContext
     if (!ToInt32(cx, valv, &value))
         return false;
     uint32_t offset2;
     if (!GetSharedTypedArrayIndex(cx, idx2v, view, &offset2))
         return false;
 
     AutoLockFutexAPI lock;
 
-    int32_t* addr = (int32_t*)view->viewData() + offset1;
-    if (*addr != value) {
+    SharedMem<int32_t*> addr = SharedMem<int32_t*>(view->viewDataShared()) + offset1;
+    if (jit::AtomicOperations::loadSafeWhenRacy(addr) != value) {
         r.setInt32(AtomicsObject::FutexNotequal);
         return true;
     }
 
     Rooted<SharedArrayBufferObject*> sab(cx, &view->buffer()->as<SharedArrayBufferObject>());
     SharedArrayRawBuffer* sarb = sab->rawBufferObject();
 
     // Walk the list of waiters looking for those waiting on offset1.
--- a/js/src/builtin/SIMD.cpp
+++ b/js/src/builtin/SIMD.cpp
@@ -1143,19 +1143,19 @@ Load(JSContext* cx, unsigned argc, Value
         return false;
 
     Rooted<TypeDescr*> typeDescr(cx, &V::GetTypeDescr(*cx->global()));
     MOZ_ASSERT(typeDescr);
     Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, typeDescr, 0));
     if (!result)
         return false;
 
-    Elem* src = reinterpret_cast<Elem*>(static_cast<char*>(AnyTypedArrayViewData(typedArray)) + byteStart);
+    SharedMem<Elem*> src = SharedMem<Elem*>(AnyTypedArrayViewData(typedArray).addBytes(byteStart));
     Elem* dst = reinterpret_cast<Elem*>(result->typedMem());
-    memcpy(dst, src, sizeof(Elem) * NumElem);
+    jit::AtomicOperations::memcpySafeWhenRacy(dst, src, sizeof(Elem) * NumElem);
 
     args.rval().setObject(*result);
     return true;
 }
 
 template<class V, unsigned NumElem>
 static bool
 Store(JSContext* cx, unsigned argc, Value* vp)
@@ -1170,18 +1170,18 @@ Store(JSContext* cx, unsigned argc, Valu
     RootedObject typedArray(cx);
     if (!TypedArrayFromArgs<Elem, NumElem>(cx, args, &typedArray, &byteStart))
         return false;
 
     if (!IsVectorObject<V>(args[2]))
         return ErrorBadArgs(cx);
 
     Elem* src = TypedObjectMemory<Elem*>(args[2]);
-    Elem* dst = reinterpret_cast<Elem*>(static_cast<char*>(AnyTypedArrayViewData(typedArray)) + byteStart);
-    memcpy(dst, src, sizeof(Elem) * NumElem);
+    SharedMem<Elem*> dst = SharedMem<Elem*>(AnyTypedArrayViewData(typedArray).addBytes(byteStart));
+    js::jit::AtomicOperations::memcpySafeWhenRacy(dst, src, sizeof(Elem) * NumElem);
 
     args.rval().setObject(args[2].toObject());
     return true;
 }
 
 #define DEFINE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands)       \
 bool                                                               \
 js::simd_float32x4_##Name(JSContext* cx, unsigned argc, Value* vp) \
--- a/js/src/gc/Nursery.h
+++ b/js/src/gc/Nursery.h
@@ -15,16 +15,17 @@
 #include "gc/Heap.h"
 #include "gc/Memory.h"
 #include "js/Class.h"
 #include "js/GCAPI.h"
 #include "js/HashTable.h"
 #include "js/HeapAPI.h"
 #include "js/Value.h"
 #include "js/Vector.h"
+#include "vm/SharedMem.h"
 
 namespace JS {
 struct Zone;
 } // namespace JS
 
 namespace js {
 
 class ObjectElements;
@@ -125,16 +126,20 @@ class Nursery
     /*
      * Check whether an arbitrary pointer is within the nursery. This is
      * slower than IsInsideNursery(Cell*), but works on all types of pointers.
      */
     MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
     MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
         return uintptr_t(p) >= heapStart_ && uintptr_t(p) < heapEnd_;
     }
+    template<typename T>
+    bool isInside(const SharedMem<T>& p) const {
+        return isInside(p.unwrap(/*safe - used for value in comparison above*/));
+    }
 
     /*
      * Allocate and return a pointer to a new GC object with its |slots|
      * pointer pre-filled. Returns nullptr if the Nursery is full.
      */
     JSObject* allocateObject(JSContext* cx, size_t size, size_t numDynamic, const js::Class* clasp);
 
     /* Allocate a buffer for a given zone, using the nursery if possible. */
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -28,16 +28,17 @@
 #include "js/Conversions.h"
 #include "js/TraceableVector.h"
 #include "vm/Opcodes.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsboolinlines.h"
 #include "jsscriptinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
 #include "jit/JitFrames-inl.h"
 #include "jit/MacroAssembler-inl.h"
 #include "jit/shared/Lowering-shared-inl.h"
 #include "vm/Interpreter-inl.h"
 #include "vm/ScopeObject-inl.h"
 #include "vm/StringObject-inl.h"
 #include "vm/UnboxedObject-inl.h"
 
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -9165,17 +9165,17 @@ IonBuilder::addTypedArrayLengthAndData(M
     JSObject* tarr = nullptr;
 
     if (obj->isConstantValue() && obj->constantValue().isObject())
         tarr = &obj->constantValue().toObject();
     else if (obj->resultTypeSet())
         tarr = obj->resultTypeSet()->maybeSingleton();
 
     if (tarr) {
-        void* data = AnyTypedArrayViewData(tarr);
+        SharedMem<void*> data = AnyTypedArrayViewData(tarr);
         // Bug 979449 - Optimistically embed the elements and use TI to
         //              invalidate if we move them.
         bool isTenured = !tarr->runtimeFromMainThread()->gc.nursery.isInside(data);
         if (isTenured && tarr->isSingleton()) {
             // The 'data' pointer of TypedArrayObject can change in rare circumstances
             // (ArrayBufferObject::changeContents).
             TypeSet::ObjectKey* tarrKey = TypeSet::ObjectKey::get(tarr);
             if (!tarrKey->unknownProperties()) {
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -2214,17 +2214,19 @@ void
 LIRGenerator::visitElements(MElements* ins)
 {
     define(new(alloc()) LElements(useRegisterAtStart(ins->object())), ins);
 }
 
 void
 LIRGenerator::visitConstantElements(MConstantElements* ins)
 {
-    define(new(alloc()) LPointer(ins->value(), LPointer::NON_GC_THING), ins);
+    define(new(alloc()) LPointer(ins->value().unwrap(/*safe - pointer does not flow back to C++*/),
+                                 LPointer::NON_GC_THING),
+           ins);
 }
 
 void
 LIRGenerator::visitConvertElementsToDoubles(MConvertElementsToDoubles* ins)
 {
     LInstruction* check = new(alloc()) LConvertElementsToDoubles(useRegister(ins->elements()));
     add(check, ins);
     assignSafepoint(check, ins);
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -2,16 +2,17 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/MIR.h"
 
 #include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerPrintfMacros.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/SizePrintfMacros.h"
 
 #include <ctype.h>
 
 #include "jslibmath.h"
 #include "jsstr.h"
 
@@ -1083,17 +1084,17 @@ MCompare::printOpcode(GenericPrinter& ou
     MDefinition::printOpcode(out);
     out.printf(" %s", js_CodeName[jsop()]);
 }
 
 void
 MConstantElements::printOpcode(GenericPrinter& out) const
 {
     PrintOpcodeName(out, op());
-    out.printf(" %p", value());
+    out.printf(" 0x%" PRIxPTR, value().asValue());
 }
 
 void
 MLoadUnboxedScalar::printOpcode(GenericPrinter& out) const
 {
     MDefinition::printOpcode(out);
     out.printf(" %s", ScalarTypeDescr::typeName(storageType()));
 }
@@ -4567,17 +4568,17 @@ InlinePropertyTable::buildTypeSetForFunc
         return nullptr;
     for (size_t i = 0; i < numEntries(); i++) {
         if (entries_[i]->func == func)
             types->addType(TypeSet::ObjectType(entries_[i]->group), alloc);
     }
     return types;
 }
 
-void*
+SharedMem<void*>
 MLoadTypedArrayElementStatic::base() const
 {
     return AnyTypedArrayViewData(someTypedArray_);
 }
 
 size_t
 MLoadTypedArrayElementStatic::length() const
 {
@@ -4596,17 +4597,17 @@ MLoadTypedArrayElementStatic::congruentT
         return false;
     if (accessType() != other->accessType())
         return false;
     if (base() != other->base())
         return false;
     return congruentIfOperandsEqual(other);
 }
 
-void*
+SharedMem<void*>
 MStoreTypedArrayElementStatic::base() const
 {
     return AnyTypedArrayViewData(someTypedArray_);
 }
 
 bool
 MGetElementCache::allowDoubleResult() const
 {
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -22,16 +22,17 @@
 #include "jit/InlineList.h"
 #include "jit/JitAllocPolicy.h"
 #include "jit/MacroAssembler.h"
 #include "jit/MOpcodes.h"
 #include "jit/TypedObjectPrediction.h"
 #include "jit/TypePolicy.h"
 #include "vm/ArrayObject.h"
 #include "vm/ScopeObject.h"
+#include "vm/SharedMem.h"
 #include "vm/TypedArrayCommon.h"
 #include "vm/UnboxedObject.h"
 
 // Undo windows.h damage on Win64
 #undef MemoryBarrier
 
 namespace js {
 
@@ -7768,43 +7769,43 @@ class MElements
     AliasSet getAliasSet() const override {
         return AliasSet::Load(AliasSet::ObjectFields);
     }
     bool mightAlias(const MDefinition* store) const override;
 
     ALLOW_CLONE(MElements)
 };
 
-// A constant value for some object's array elements or typed array elements.
+// A constant value for some object's typed array elements.
 class MConstantElements : public MNullaryInstruction
 {
-    void* value_;
+    SharedMem<void*> value_;
 
   protected:
-    explicit MConstantElements(void* v)
+    explicit MConstantElements(SharedMem<void*> v)
       : value_(v)
     {
         setResultType(MIRType_Elements);
         setMovable();
     }
 
   public:
     INSTRUCTION_HEADER(ConstantElements)
-    static MConstantElements* New(TempAllocator& alloc, void* v) {
+    static MConstantElements* New(TempAllocator& alloc, SharedMem<void*> v) {
         return new(alloc) MConstantElements(v);
     }
 
-    void* value() const {
+    SharedMem<void*> value() const {
         return value_;
     }
 
     void printOpcode(GenericPrinter& out) const override;
 
     HashNumber valueHash() const override {
-        return (HashNumber)(size_t) value_;
+        return (HashNumber)(size_t) value_.asValue();
     }
 
     bool congruentTo(const MDefinition* ins) const override {
         return ins->isConstantElements() && ins->toConstantElements()->value() == value();
     }
 
     AliasSet getAliasSet() const override {
         return AliasSet::None();
@@ -9643,17 +9644,17 @@ class MLoadTypedArrayElementStatic
     {
         return new(alloc) MLoadTypedArrayElementStatic(someTypedArray, ptr, offset,
                                                        needsBoundsCheck);
     }
 
     Scalar::Type accessType() const {
         return AnyTypedArrayType(someTypedArray_);
     }
-    void* base() const;
+    SharedMem<void*> base() const;
     size_t length() const;
 
     MDefinition* ptr() const { return getOperand(0); }
     int32_t offset() const { return offset_; }
     void setOffset(int32_t offset) { offset_ = offset; }
     bool congruentTo(const MDefinition* ins) const override;
     AliasSet getAliasSet() const override {
         return AliasSet::Load(AliasSet::UnboxedElement);
@@ -9907,17 +9908,17 @@ class MStoreTypedArrayElementStatic :
         return new(alloc) MStoreTypedArrayElementStatic(someTypedArray, ptr, v,
                                                         offset, needsBoundsCheck);
     }
 
     Scalar::Type accessType() const {
         return writeType();
     }
 
-    void* base() const;
+    SharedMem<void*> base() const;
     size_t length() const;
 
     MDefinition* ptr() const { return getOperand(0); }
     MDefinition* value() const { return getOperand(1); }
     bool needsBoundsCheck() const { return needsBoundsCheck_; }
     void setNeedsBoundsCheck(bool v) { needsBoundsCheck_ = v; }
     int32_t offset() const { return offset_; }
     void setOffset(int32_t offset) { offset_ = offset; }
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -14,16 +14,18 @@
 #include "jit/MIR.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
 #include "js/Conversions.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsopcodeinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 using namespace js;
 using namespace js::jit;
 
 using mozilla::Abs;
 using mozilla::CountLeadingZeroes32;
 using mozilla::NumberEqualsInt32;
 using mozilla::ExponentComponent;
 using mozilla::FloorLog2;
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -296,17 +296,17 @@ CodeGeneratorX86::visitLoadTypedArrayEle
 
         masm.cmpPtr(ptr, ImmWord(mir->length()));
         if (ool)
             masm.j(Assembler::AboveOrEqual, ool->entry());
         else
             bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
     }
 
-    Operand srcAddr(ptr, int32_t(mir->base()) + int32_t(offset));
+    Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
     load(accessType, srcAddr, out);
     if (accessType == Scalar::Float64)
         masm.canonicalizeDouble(ToFloatRegister(out));
     if (accessType == Scalar::Float32)
         masm.canonicalizeFloat(ToFloatRegister(out));
     if (ool)
         masm.bind(ool->rejoin());
 }
@@ -501,27 +501,27 @@ CodeGeneratorX86::visitStoreTypedArrayEl
 {
     MStoreTypedArrayElementStatic* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
     const LAllocation* value = ins->value();
     uint32_t offset = mir->offset();
 
     if (!mir->needsBoundsCheck()) {
-        Operand dstAddr(ptr, int32_t(mir->base()) + int32_t(offset));
+        Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
         store(accessType, value, dstAddr);
         return;
     }
 
     MOZ_ASSERT(offset == 0);
     masm.cmpPtr(ptr, ImmWord(mir->length()));
     Label rejoin;
     masm.j(Assembler::AboveOrEqual, &rejoin);
 
-    Operand dstAddr(ptr, (int32_t) mir->base());
+    Operand dstAddr(ptr, int32_t(mir->base().asValue()));
     store(accessType, value, dstAddr);
     masm.bind(&rejoin);
 }
 
 void
 CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
                             const Operand& dstAddr)
 {
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -86,16 +86,18 @@
 #include "vm/TypedArrayCommon.h"
 #include "vm/WrapperObject.h"
 #include "vm/Xdr.h"
 
 #include "jsatominlines.h"
 #include "jsfuninlines.h"
 #include "jsscriptinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 #include "vm/Interpreter-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/String-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::Maybe;
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -33,16 +33,18 @@
 #include "vm/ArgumentsObject.h"
 #include "vm/Interpreter.h"
 #include "vm/Shape.h"
 #include "vm/StringBuffer.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsatominlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 #include "vm/ArgumentsObject-inl.h"
 #include "vm/ArrayObject-inl.h"
 #include "vm/Interpreter-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/Runtime-inl.h"
 #include "vm/UnboxedObject-inl.h"
 
 using namespace js;
--- a/js/src/jsiter.cpp
+++ b/js/src/jsiter.cpp
@@ -30,16 +30,18 @@
 #include "vm/GlobalObject.h"
 #include "vm/Interpreter.h"
 #include "vm/Shape.h"
 #include "vm/StopIterationObject.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsscriptinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 #include "vm/NativeObject-inl.h"
 #include "vm/Stack-inl.h"
 #include "vm/String-inl.h"
 
 using namespace js;
 using namespace js::gc;
 using JS::ForOfIterator;
 
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -54,16 +54,18 @@
 #include "vm/Shape.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsatominlines.h"
 #include "jsboolinlines.h"
 #include "jscntxtinlines.h"
 #include "jscompartmentinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 #include "vm/ArrayObject-inl.h"
 #include "vm/BooleanObject-inl.h"
 #include "vm/Interpreter-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/NumberObject-inl.h"
 #include "vm/Runtime-inl.h"
 #include "vm/Shape-inl.h"
 #include "vm/StringObject-inl.h"
--- a/js/src/vm/ArrayBufferObject-inl.h
+++ b/js/src/vm/ArrayBufferObject-inl.h
@@ -9,35 +9,37 @@
 
 /* Utilities and common inline code for ArrayBufferObject and SharedArrayBufferObject */
 
 #include "vm/ArrayBufferObject.h"
 
 #include "js/Value.h"
 
 #include "vm/SharedArrayObject.h"
+#include "vm/SharedMem.h"
 
 namespace js {
 
+inline SharedMem<uint8_t*>
+ArrayBufferObjectMaybeShared::dataPointerMaybeShared()
+{
+    ArrayBufferObjectMaybeShared* buf = this;
+    if (buf->is<ArrayBufferObject>())
+        return buf->as<ArrayBufferObject>().dataPointerShared();
+    return buf->as<SharedArrayBufferObject>().dataPointerShared();
+}
+
 inline uint32_t
 AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf)
 {
     if (buf->is<ArrayBufferObject>())
         return buf->as<ArrayBufferObject>().byteLength();
     return buf->as<SharedArrayBufferObject>().byteLength();
 }
 
-inline uint8_t*
-AnyArrayBufferDataPointer(const ArrayBufferObjectMaybeShared* buf)
-{
-    if (buf->is<ArrayBufferObject>())
-        return buf->as<ArrayBufferObject>().dataPointer();
-    return buf->as<SharedArrayBufferObject>().dataPointer();
-}
-
 inline ArrayBufferObjectMaybeShared&
 AsAnyArrayBuffer(HandleValue val)
 {
     if (val.toObject().is<ArrayBufferObject>())
         return val.toObject().as<ArrayBufferObject>();
     return val.toObject().as<SharedArrayBufferObject>();
 }
 
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -721,16 +721,22 @@ ArrayBufferObject::inlineDataPointer() c
 }
 
 uint8_t*
 ArrayBufferObject::dataPointer() const
 {
     return static_cast<uint8_t*>(getSlot(DATA_SLOT).toPrivate());
 }
 
+SharedMem<uint8_t*>
+ArrayBufferObject::dataPointerShared() const
+{
+    return SharedMem<uint8_t*>::unshared(getSlot(DATA_SLOT).toPrivate());
+}
+
 void
 ArrayBufferObject::releaseData(FreeOp* fop)
 {
     MOZ_ASSERT(ownsData());
 
     switch (bufferKind()) {
       case PLAIN:
       case ASMJS_MALLOCED:
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -6,16 +6,17 @@
 
 #ifndef vm_ArrayBufferObject_h
 #define vm_ArrayBufferObject_h
 
 #include "jsobj.h"
 
 #include "builtin/TypedObjectConstants.h"
 #include "vm/Runtime.h"
+#include "vm/SharedMem.h"
 
 typedef struct JSProperty JSProperty;
 
 namespace js {
 
 class ArrayBufferViewObject;
 
 // The inheritance hierarchy for the various classes relating to typed arrays
@@ -70,29 +71,26 @@ class ArrayBufferViewObject;
 // that (3) may only be pointed to by the typed array the data is inline with.
 //
 // During a minor GC, (3) and (4) may move. During a compacting GC, (2), (3),
 // and (4) may move.
 
 class ArrayBufferObjectMaybeShared;
 
 uint32_t AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf);
-uint8_t* AnyArrayBufferDataPointer(const ArrayBufferObjectMaybeShared* buf);
 ArrayBufferObjectMaybeShared& AsAnyArrayBuffer(HandleValue val);
 
 class ArrayBufferObjectMaybeShared : public NativeObject
 {
   public:
     uint32_t byteLength() {
         return AnyArrayBufferByteLength(this);
     }
 
-    uint8_t* dataPointer() {
-        return AnyArrayBufferDataPointer(this);
-    }
+    inline SharedMem<uint8_t*> dataPointerMaybeShared();
 };
 
 /*
  * ArrayBufferObject
  *
  * This class holds the underlying raw buffer that the various ArrayBufferViews
  * (eg DataViewObject, the TypedArrays, TypedObjects) access. It can be created
  * explicitly and used to construct an ArrayBufferView, or can be created
@@ -293,16 +291,17 @@ class ArrayBufferObject : public ArrayBu
     void changeViewContents(JSContext* cx, ArrayBufferViewObject* view,
                             uint8_t* oldDataPointer, BufferContents newContents);
     void setFirstView(ArrayBufferViewObject* view);
 
     uint8_t* inlineDataPointer() const;
 
   public:
     uint8_t* dataPointer() const;
+    SharedMem<uint8_t*> dataPointerShared() const;
     size_t byteLength() const;
     BufferContents contents() const {
         return BufferContents(dataPointer(), bufferKind());
     }
     bool hasInlineData() const {
         return dataPointer() == inlineDataPointer();
     }
 
--- a/js/src/vm/NativeObject.cpp
+++ b/js/src/vm/NativeObject.cpp
@@ -14,16 +14,17 @@
 #include "gc/Marking.h"
 #include "js/Value.h"
 #include "vm/Debugger.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsobjinlines.h"
 
 #include "gc/Nursery-inl.h"
+#include "jit/AtomicOperations-inl.h"
 #include "vm/ArrayObject-inl.h"
 #include "vm/ScopeObject-inl.h"
 #include "vm/Shape-inl.h"
 
 using namespace js;
 
 using JS::GenericNaN;
 using mozilla::ArrayLength;
--- a/js/src/vm/SelfHosting.cpp
+++ b/js/src/vm/SelfHosting.cpp
@@ -35,16 +35,18 @@
 #include "vm/GeneratorObject.h"
 #include "vm/Interpreter.h"
 #include "vm/String.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsfuninlines.h"
 #include "jsscriptinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 #include "vm/BooleanObject-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/NumberObject-inl.h"
 #include "vm/StringObject-inl.h"
 
 using namespace js;
 using namespace js::selfhosted;
 
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -18,20 +18,23 @@
 #ifndef XP_WIN
 # include <sys/mman.h>
 #endif
 #ifdef MOZ_VALGRIND
 # include <valgrind/memcheck.h>
 #endif
 
 #include "asmjs/AsmJSValidate.h"
+#include "vm/SharedMem.h"
 #include "vm/TypedArrayCommon.h"
 
 #include "jsobjinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
+
 using namespace js;
 
 static inline void*
 MapMemory(size_t length, bool commit)
 {
 #ifdef XP_WIN
     int prot = (commit ? MEM_COMMIT : MEM_RESERVE);
     int flags = (commit ? PAGE_READWRITE : PAGE_NOACCESS);
@@ -148,29 +151,33 @@ SharedArrayRawBuffer::addReference()
 void
 SharedArrayRawBuffer::dropReference()
 {
     // Drop the reference to the buffer.
     uint32_t refcount = --this->refcount; // Atomic.
 
     // If this was the final reference, release the buffer.
     if (refcount == 0) {
-        uint8_t* p = this->dataPointer() - AsmJSPageSize;
-        MOZ_ASSERT(uintptr_t(p) % AsmJSPageSize == 0);
+        SharedMem<uint8_t*> p = this->dataPointerShared() - AsmJSPageSize;
+
+        MOZ_ASSERT(p.asValue() % AsmJSPageSize == 0);
+
+        uint8_t* address = p.unwrap(/*safe - only reference*/);
 #if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
         numLive--;
-        UnmapMemory(p, SharedArrayMappedSize);
+        UnmapMemory(address, SharedArrayMappedSize);
 #       if defined(MOZ_VALGRIND) \
            && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
         // Tell Valgrind/Memcheck to recommence reporting accesses in the
         // previously-inaccessible region.
-        VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(p, SharedArrayMappedSize);
+        VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address,
+                                                      SharedArrayMappedSize);
 #       endif
 #else
-        UnmapMemory(p, this->length + AsmJSPageSize);
+        UnmapMemory(address, this->length + AsmJSPageSize);
 #endif
     }
 }
 
 const JSFunctionSpec SharedArrayBufferObject::jsfuncs[] = {
     /* Nothing yet */
     JS_FS_END
 };
@@ -388,37 +395,35 @@ js::AsSharedArrayBuffer(HandleObject obj
     MOZ_ASSERT(IsSharedArrayBuffer(obj));
     return obj->as<SharedArrayBufferObject>();
 }
 
 JS_FRIEND_API(void)
 js::GetSharedArrayBufferViewLengthAndData(JSObject* obj, uint32_t* length, uint8_t** data)
 {
     MOZ_ASSERT(obj->is<SharedTypedArrayObject>());
-
     *length = obj->as<SharedTypedArrayObject>().byteLength();
-
-    *data = static_cast<uint8_t*>(obj->as<SharedTypedArrayObject>().viewData());
+    *data = static_cast<uint8_t*>(obj->as<SharedTypedArrayObject>().viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(void)
 js::GetSharedArrayBufferLengthAndData(JSObject* obj, uint32_t* length, uint8_t** data)
 {
     MOZ_ASSERT(obj->is<SharedArrayBufferObject>());
     *length = obj->as<SharedArrayBufferObject>().byteLength();
-    *data = obj->as<SharedArrayBufferObject>().dataPointer();
+    *data = obj->as<SharedArrayBufferObject>().dataPointerShared().unwrap(/*safe - caller knows*/);
 }
 
 JS_FRIEND_API(bool)
 JS_IsSharedArrayBufferObject(JSObject* obj)
 {
     obj = CheckedUnwrap(obj);
     return obj ? obj->is<SharedArrayBufferObject>() : false;
 }
 
 JS_FRIEND_API(uint8_t*)
 JS_GetSharedArrayBufferData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
-    return obj->as<SharedArrayBufferObject>().dataPointer();
+    return obj->as<SharedArrayBufferObject>().dataPointerShared().unwrap(/*safe - caller knows*/);
 }
--- a/js/src/vm/SharedArrayObject.h
+++ b/js/src/vm/SharedArrayObject.h
@@ -52,17 +52,17 @@ class SharedArrayRawBuffer
     FutexWaiter* waiters_;
 
   protected:
     SharedArrayRawBuffer(uint8_t* buffer, uint32_t length)
       : refcount(1),
         length(length),
         waiters_(nullptr)
     {
-        MOZ_ASSERT(buffer == dataPointer());
+        MOZ_ASSERT(buffer == dataPointerShared());
     }
 
   public:
     static SharedArrayRawBuffer* New(JSContext* cx, uint32_t length);
 
     // This may be called from multiple threads.  The caller must take
     // care of mutual exclusion.
     FutexWaiter* waiters() const {
@@ -70,21 +70,22 @@ class SharedArrayRawBuffer
     }
 
     // This may be called from multiple threads.  The caller must take
     // care of mutual exclusion.
     void setWaiters(FutexWaiter* waiters) {
         waiters_ = waiters;
     }
 
-    inline uint8_t* dataPointer() const {
-        return ((uint8_t*)this) + sizeof(SharedArrayRawBuffer);
+    SharedMem<uint8_t*> dataPointerShared() const {
+        uint8_t* ptr = reinterpret_cast<uint8_t*>(const_cast<SharedArrayRawBuffer*>(this));
+        return SharedMem<uint8_t*>::shared(ptr + sizeof(SharedArrayRawBuffer));
     }
 
-    inline uint32_t byteLength() const {
+    uint32_t byteLength() const {
         return length;
     }
 
     void addReference();
     void dropReference();
 };
 
 /*
@@ -138,29 +139,29 @@ class SharedArrayBufferObject : public A
 
     static void addSizeOfExcludingThis(JSObject* obj, mozilla::MallocSizeOf mallocSizeOf,
                                        JS::ClassInfo* info);
 
     SharedArrayRawBuffer* rawBufferObject() const;
 
     // Invariant: This method does not cause GC and can be called
     // without anchoring the object it is called on.
-    void* globalID() const {
+    uintptr_t globalID() const {
         // The buffer address is good enough as an ID provided the memory is not shared
         // between processes or, if it is, it is mapped to the same address in every
         // process.  (At the moment, shared memory cannot be shared between processes.)
-        return dataPointer();
+        return dataPointerShared().asValue();
     }
 
     uint32_t byteLength() const {
         return rawBufferObject()->byteLength();
     }
 
-    uint8_t* dataPointer() const {
-        return rawBufferObject()->dataPointer();
+    SharedMem<uint8_t*> dataPointerShared() const {
+        return rawBufferObject()->dataPointerShared();
     }
 
 private:
     void acceptRawBuffer(SharedArrayRawBuffer* buffer);
     void dropRawBuffer();
 };
 
 bool IsSharedArrayBuffer(HandleValue v);
--- a/js/src/vm/SharedTypedArrayObject.cpp
+++ b/js/src/vm/SharedTypedArrayObject.cpp
@@ -27,27 +27,30 @@
 # include "jswin.h"
 #endif
 #include "jswrapper.h"
 
 #include "asmjs/AsmJSModule.h"
 #include "asmjs/AsmJSValidate.h"
 #include "gc/Barrier.h"
 #include "gc/Marking.h"
+#include "jit/AtomicOperations.h"
 #include "js/Conversions.h"
 #include "vm/ArrayBufferObject.h"
 #include "vm/GlobalObject.h"
 #include "vm/Interpreter.h"
 #include "vm/SharedArrayObject.h"
+#include "vm/SharedMem.h"
 #include "vm/TypedArrayCommon.h"
 #include "vm/WrapperObject.h"
 
 #include "jsatominlines.h"
 #include "jsobjinlines.h"
 
+#include "jit/AtomicOperations-inl.h"
 #include "vm/Shape-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::IsNaN;
 using mozilla::NegativeInfinity;
 using mozilla::PodCopy;
@@ -65,18 +68,18 @@ TypedArrayLayout SharedTypedArrayObject:
 inline void
 InitSharedArrayBufferViewDataPointer(SharedTypedArrayObject* obj, SharedArrayBufferObject* buffer, size_t byteOffset)
 {
     /*
      * N.B. The base of the array's data is stored in the object's
      * private data rather than a slot to avoid the restriction that
      * private Values that are pointers must have the low bits clear.
      */
-    MOZ_ASSERT(buffer->dataPointer() != nullptr);
-    obj->initPrivate(buffer->dataPointer() + byteOffset);
+    MOZ_ASSERT(buffer->dataPointerShared() != nullptr);
+    obj->initPrivate(buffer->dataPointerShared().unwrap(/*safe - read only by viewDataShared*/) + byteOffset);
 }
 
 // See note in TypedArrayObject.cpp about how we can probably merge
 // the below type with the one in that file, once TypedArrayObject is
 // less dissimilar from SharedTypedArrayObject (ie, when it is closer
 // to ES6).
 
 template<typename NativeType>
@@ -360,17 +363,17 @@ class SharedTypedArrayObjectTemplate : p
                                     attrs);
     }
 
     static const NativeType
     getIndex(JSObject* obj, uint32_t index)
     {
         SharedTypedArrayObject& tarray = obj->as<SharedTypedArrayObject>();
         MOZ_ASSERT(index < tarray.length());
-        return static_cast<const NativeType*>(tarray.viewData())[index];
+        return jit::AtomicOperations::loadSafeWhenRacy(SharedMem<NativeType*>(tarray.viewDataShared()) + index);
     }
 
     static void
     setIndexValue(SharedTypedArrayObject& tarray, uint32_t index, double d)
     {
         // If the array is an integer array, we only handle up to
         // 32-bit ints from this point on.  if we want to handle
         // 64-bit ints, we'll need some changes.
@@ -392,17 +395,17 @@ class SharedTypedArrayObjectTemplate : p
             setIndex(tarray, index, NativeType(n));
         }
     }
 
     static void
     setIndex(SharedTypedArrayObject& tarray, uint32_t index, NativeType val)
     {
         MOZ_ASSERT(index < tarray.length());
-        static_cast<NativeType*>(tarray.viewData())[index] = val;
+        jit::AtomicOperations::storeSafeWhenRacy(SharedMem<NativeType*>(tarray.viewDataShared()) + index, val);
     }
 
     static Value getIndexValue(JSObject* tarray, uint32_t index);
 
     static bool fun_subarray(JSContext* cx, unsigned argc, Value* vp);
     static bool fun_copyWithin(JSContext* cx, unsigned argc, Value* vp);
     static bool fun_set(JSContext* cx, unsigned argc, Value* vp);
 
@@ -671,17 +674,17 @@ IMPL_SHARED_TYPED_ARRAY_JSAPI_CONSTRUCTO
                                                                                             \
       const Class* clasp = obj->getClass();                                                 \
       const Scalar::Type id = SharedTypedArrayObjectTemplate<InternalType>::ArrayTypeID();  \
       if (clasp != &SharedTypedArrayObject::classes[id])                                    \
           return nullptr;                                                                   \
                                                                                             \
       SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();                    \
       *length = tarr->length();                                                             \
-      *data = static_cast<ExternalType*>(tarr->viewData());                                \
+      *data = static_cast<ExternalType*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/)); \
                                                                                             \
       return obj;                                                                           \
   }
 
 IMPL_SHARED_TYPED_ARRAY_COMBINED_UNWRAPPERS(Int8, int8_t, int8_t)
 IMPL_SHARED_TYPED_ARRAY_COMBINED_UNWRAPPERS(Uint8, uint8_t, uint8_t)
 IMPL_SHARED_TYPED_ARRAY_COMBINED_UNWRAPPERS(Uint8Clamped, uint8_t, uint8_clamped)
 IMPL_SHARED_TYPED_ARRAY_COMBINED_UNWRAPPERS(Int16, int16_t, int16_t)
@@ -1004,105 +1007,105 @@ SharedTypedArrayObject::setElement(Share
 JS_FRIEND_API(int8_t*)
 JS_GetSharedInt8ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Int8);
-    return static_cast<int8_t*>(tarr->viewData());
+    return static_cast<int8_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(uint8_t*)
 JS_GetSharedUint8ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Uint8);
-    return static_cast<uint8_t*>(tarr->viewData());
+    return static_cast<uint8_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(uint8_t*)
 JS_GetSharedUint8ClampedArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Uint8Clamped);
-    return static_cast<uint8_t*>(tarr->viewData());
+    return static_cast<uint8_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(int16_t*)
 JS_GetSharedInt16ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Int16);
-    return static_cast<int16_t*>(tarr->viewData());
+    return static_cast<int16_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(uint16_t*)
 JS_GetSharedUint16ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Uint16);
-    return static_cast<uint16_t*>(tarr->viewData());
+    return static_cast<uint16_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(int32_t*)
 JS_GetSharedInt32ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Int32);
-    return static_cast<int32_t*>(tarr->viewData());
+    return static_cast<int32_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(uint32_t*)
 JS_GetSharedUint32ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Uint32);
-    return static_cast<uint32_t*>(tarr->viewData());
+    return static_cast<uint32_t*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(float*)
 JS_GetSharedFloat32ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Float32);
-    return static_cast<float*>(tarr->viewData());
+    return static_cast<float*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 JS_FRIEND_API(double*)
 JS_GetSharedFloat64ArrayData(JSObject* obj, const JS::AutoCheckCannotGC&)
 {
     obj = CheckedUnwrap(obj);
     if (!obj)
         return nullptr;
     SharedTypedArrayObject* tarr = &obj->as<SharedTypedArrayObject>();
     MOZ_ASSERT((int32_t) tarr->type() == Scalar::Float64);
-    return static_cast<double*>(tarr->viewData());
+    return static_cast<double*>(tarr->viewDataShared().unwrap(/*safe - caller knows*/));
 }
 
 #undef IMPL_SHARED_TYPED_ARRAY_STATICS
 #undef IMPL_SHARED_TYPED_ARRAY_JSAPI_CONSTRUCTORS
 #undef IMPL_SHARED_TYPED_ARRAY_COMBINED_UNWRAPPERS
 #undef SHARED_TYPED_ARRAY_CLASS_SPEC
 #undef IMPL_SHARED_TYPED_ARRAY_PROTO_CLASS
 #undef IMPL_SHARED_TYPED_ARRAY_FAST_CLASS
--- a/js/src/vm/SharedTypedArrayObject.h
+++ b/js/src/vm/SharedTypedArrayObject.h
@@ -9,16 +9,17 @@
 
 #include "jsobj.h"
 
 #include "builtin/TypedObjectConstants.h"
 #include "gc/Barrier.h"
 #include "js/Class.h"
 #include "vm/ArrayBufferObject.h"
 #include "vm/SharedArrayObject.h"
+#include "vm/SharedMem.h"
 #include "vm/TypedArrayObject.h"
 
 typedef struct JSProperty JSProperty;
 
 namespace js {
 
 // Note that the representation of a SharedTypedArrayObject is the
 // same as the representation of a TypedArrayObject, see comments in
@@ -71,18 +72,18 @@ class SharedTypedArrayObject : public Na
     static bool isOriginalLengthGetter(Scalar::Type type, Native native);
 
     SharedArrayBufferObject* buffer() const;
 
     inline Scalar::Type type() const;
 
     inline size_t bytesPerElement() const;
 
-    void* viewData() const {
-        return getPrivate(DATA_SLOT);
+    SharedMem<void*> viewDataShared() const {
+        return SharedMem<void*>::shared(getPrivate(DATA_SLOT));
     }
     uint32_t byteOffset() const {
         return byteOffsetValue(const_cast<SharedTypedArrayObject*>(this)).toInt32();
     }
     uint32_t byteLength() const {
         return byteLengthValue(const_cast<SharedTypedArrayObject*>(this)).toInt32();
     }
     uint32_t length() const {
--- a/js/src/vm/TypedArrayCommon.h
+++ b/js/src/vm/TypedArrayCommon.h
@@ -12,16 +12,18 @@
 #include "mozilla/Assertions.h"
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/PodOperations.h"
 
 #include "jsarray.h"
 #include "jscntxt.h"
 #include "jsnum.h"
 
+#include "jit/AtomicOperations.h"
+
 #include "js/Conversions.h"
 #include "js/Value.h"
 
 #include "vm/SharedTypedArrayObject.h"
 #include "vm/TypedArrayObject.h"
 
 namespace js {
 
@@ -104,22 +106,22 @@ AnyTypedArrayShape(JSObject* obj)
 inline const TypedArrayLayout&
 AnyTypedArrayLayout(const JSObject* obj)
 {
     if (obj->is<TypedArrayObject>())
         return obj->as<TypedArrayObject>().layout();
     return obj->as<SharedTypedArrayObject>().layout();
 }
 
-inline void*
+inline SharedMem<void*>
 AnyTypedArrayViewData(const JSObject* obj)
 {
     if (obj->is<TypedArrayObject>())
-        return obj->as<TypedArrayObject>().viewData();
-    return obj->as<SharedTypedArrayObject>().viewData();
+        return obj->as<TypedArrayObject>().viewDataShared();
+    return obj->as<SharedTypedArrayObject>().viewDataShared();
 }
 
 inline uint32_t
 AnyTypedArrayBytesPerElement(const JSObject* obj)
 {
     if (obj->is<TypedArrayObject>())
         return obj->as<TypedArrayObject>().bytesPerElement();
     return obj->as<SharedTypedArrayObject>().bytesPerElement();
@@ -134,17 +136,65 @@ AnyTypedArrayByteLength(const JSObject* 
 }
 
 inline bool
 IsAnyTypedArrayClass(const Class* clasp)
 {
     return IsTypedArrayClass(clasp) || IsSharedTypedArrayClass(clasp);
 }
 
-template<class SpecificArray>
+class SharedOps
+{
+  public:
+    template<typename T>
+    static T load(SharedMem<T*> addr) {
+        return js::jit::AtomicOperations::loadSafeWhenRacy(addr);
+    }
+
+    template<typename T>
+    static void store(SharedMem<T*> addr, T value) {
+        js::jit::AtomicOperations::storeSafeWhenRacy(addr, value);
+    }
+
+    template<typename T>
+    static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+        js::jit::AtomicOperations::memcpySafeWhenRacy(dest, src, size);
+    }
+
+    template<typename T>
+    static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+        js::jit::AtomicOperations::memmoveSafeWhenRacy(dest, src, size);
+    }
+};
+
+class UnsharedOps
+{
+  public:
+    template<typename T>
+    static T load(SharedMem<T*> addr) {
+        return *addr.unwrapUnshared();
+    }
+
+    template<typename T>
+    static void store(SharedMem<T*> addr, T value) {
+        *addr.unwrapUnshared() = value;
+    }
+
+    template<typename T>
+    static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+        ::memcpy(dest.unwrapUnshared(), src.unwrapUnshared(), size);
+    }
+
+    template<typename T>
+    static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+        ::memmove(dest.unwrapUnshared(), src.unwrapUnshared(), size);
+    }
+};
+
+template<class SpecificArray, typename Ops>
 class ElementSpecific
 {
     typedef typename SpecificArray::ElementType T;
     typedef typename SpecificArray::SomeTypedArray SomeTypedArray;
 
   public:
     /*
      * Copy |source|'s elements into |target|, starting at |target[offset]|.
@@ -163,88 +213,80 @@ class ElementSpecific
         MOZ_ASSERT(AnyTypedArrayLength(source) <= target->length() - offset);
 
         if (source->is<SomeTypedArray>()) {
             Rooted<SomeTypedArray*> src(cx, source.as<SomeTypedArray>());
             if (SomeTypedArray::sameBuffer(target, src))
                 return setFromOverlappingTypedArray(cx, target, src, offset);
         }
 
-        T* dest = static_cast<T*>(target->viewData()) + offset;
+        SharedMem<T*> dest = SharedMem<T*>(AnyTypedArrayViewData(target)) + offset;
         uint32_t count = AnyTypedArrayLength(source);
 
         if (AnyTypedArrayType(source) == target->type()) {
-            mozilla::PodCopy(dest, static_cast<T*>(AnyTypedArrayViewData(source)), count);
+            Ops::memcpy(SharedMem<void*>(dest), AnyTypedArrayViewData(source), count*sizeof(T));
             return true;
         }
 
+        // Inhibit unaligned accesses on ARM (bug 1097253, a compiler bug).
 #ifdef __arm__
-#  define JS_VOLATILE_ARM volatile // Inhibit unaligned accesses on ARM.
+#  define JS_VOLATILE_ARM volatile
 #else
-#  define JS_VOLATILE_ARM /* nothing */
+#  define JS_VOLATILE_ARM
 #endif
 
-        void* data = AnyTypedArrayViewData(source);
+        SharedMem<void*> data = AnyTypedArrayViewData(source);
         switch (AnyTypedArrayType(source)) {
           case Scalar::Int8: {
-            JS_VOLATILE_ARM
-            int8_t* src = static_cast<int8_t*>(data);
-
+            SharedMem<JS_VOLATILE_ARM int8_t*> src = SharedMem<JS_VOLATILE_ARM int8_t*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Uint8:
           case Scalar::Uint8Clamped: {
-            JS_VOLATILE_ARM
-            uint8_t* src = static_cast<uint8_t*>(data);
+            SharedMem<JS_VOLATILE_ARM uint8_t*> src = SharedMem<JS_VOLATILE_ARM uint8_t*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Int16: {
-            JS_VOLATILE_ARM
-            int16_t* src = static_cast<int16_t*>(data);
+            SharedMem<JS_VOLATILE_ARM int16_t*> src = SharedMem<JS_VOLATILE_ARM int16_t*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Uint16: {
-            JS_VOLATILE_ARM
-            uint16_t* src = static_cast<uint16_t*>(data);
+            SharedMem<JS_VOLATILE_ARM uint16_t*> src = SharedMem<JS_VOLATILE_ARM uint16_t*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Int32: {
-            JS_VOLATILE_ARM
-            int32_t* src = static_cast<int32_t*>(data);
+            SharedMem<JS_VOLATILE_ARM int32_t*> src = SharedMem<JS_VOLATILE_ARM int32_t*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Uint32: {
-            JS_VOLATILE_ARM
-            uint32_t* src = static_cast<uint32_t*>(data);
+            SharedMem<JS_VOLATILE_ARM uint32_t*> src = SharedMem<JS_VOLATILE_ARM uint32_t*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Float32: {
-            JS_VOLATILE_ARM
-            float* src = static_cast<float*>(data);
+            SharedMem<JS_VOLATILE_ARM float*> src = SharedMem<JS_VOLATILE_ARM float*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           case Scalar::Float64: {
-            JS_VOLATILE_ARM
-            double* src = static_cast<double*>(data);
+            SharedMem<JS_VOLATILE_ARM double*> src = SharedMem<JS_VOLATILE_ARM double*>(data);
             for (uint32_t i = 0; i < count; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(Ops::load(src++)));
             break;
           }
           default:
             MOZ_CRASH("setFromAnyTypedArray with a typed array with bogus type");
         }
 
 #undef JS_VOLATILE_ARM
 
@@ -266,26 +308,26 @@ class ElementSpecific
                    "use setFromAnyTypedArray instead of this method");
 
         uint32_t i = 0;
         if (source->isNative()) {
             // Attempt fast-path infallible conversion of dense elements up to
             // the first potentially side-effectful lookup or conversion.
             uint32_t bound = Min(source->as<NativeObject>().getDenseInitializedLength(), len);
 
-            T* dest = static_cast<T*>(target->viewData()) + offset;
+            SharedMem<T*> dest = SharedMem<T*>(AnyTypedArrayViewData(target)) + offset;
 
             MOZ_ASSERT(!canConvertInfallibly(MagicValue(JS_ELEMENTS_HOLE)),
                        "the following loop must abort on holes");
 
             const Value* srcValues = source->as<NativeObject>().getDenseElements();
             for (; i < bound; i++) {
                 if (!canConvertInfallibly(srcValues[i]))
                     break;
-                dest[i] = infallibleValueToNative(srcValues[i]);
+                Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
             }
             if (i == len)
                 return true;
         }
 
         // Convert and copy any remaining elements generically.
         RootedValue v(cx);
         for (; i < len; i++) {
@@ -296,18 +338,17 @@ class ElementSpecific
             if (!valueToNative(cx, v, &n))
                 return false;
 
             len = Min(len, target->length());
             if (i >= len)
                 break;
 
             // Compute every iteration in case getElement/valueToNative is wacky.
-            void* data = target->viewData();
-            static_cast<T*>(data)[offset + i] = n;
+            Ops::store(SharedMem<T*>(AnyTypedArrayViewData(target)) + offset + i, n);
         }
 
         return true;
     }
 
   private:
     static bool
     setFromOverlappingTypedArray(JSContext* cx,
@@ -319,81 +360,81 @@ class ElementSpecific
                    "calling wrong setFromTypedArray specialization");
         MOZ_ASSERT(SomeTypedArray::sameBuffer(target, source),
                    "provided arrays don't actually overlap, so it's "
                    "undesirable to use this method");
 
         MOZ_ASSERT(offset <= target->length());
         MOZ_ASSERT(source->length() <= target->length() - offset);
 
-        T* dest = static_cast<T*>(target->viewData()) + offset;
+        SharedMem<T*> dest = SharedMem<T*>(AnyTypedArrayViewData(target)) + offset;
         uint32_t len = source->length();
 
         if (source->type() == target->type()) {
-            mozilla::PodMove(dest, static_cast<T*>(source->viewData()), len);
+            Ops::memmove(dest, SharedMem<T*>(AnyTypedArrayViewData(source)), len*sizeof(T));
             return true;
         }
 
         // Copy |source| in case it overlaps the target elements being set.
         size_t sourceByteLen = len * source->bytesPerElement();
         void* data = target->zone()->template pod_malloc<uint8_t>(sourceByteLen);
         if (!data)
             return false;
-        mozilla::PodCopy(static_cast<uint8_t*>(data),
-                         static_cast<uint8_t*>(source->viewData()),
-                         sourceByteLen);
+        Ops::memcpy(SharedMem<void*>::unshared(data),
+                    AnyTypedArrayViewData(source),
+                    sourceByteLen);
 
         switch (source->type()) {
           case Scalar::Int8: {
             int8_t* src = static_cast<int8_t*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Uint8:
           case Scalar::Uint8Clamped: {
             uint8_t* src = static_cast<uint8_t*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Int16: {
             int16_t* src = static_cast<int16_t*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Uint16: {
             uint16_t* src = static_cast<uint16_t*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Int32: {
             int32_t* src = static_cast<int32_t*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Uint32: {
             uint32_t* src = static_cast<uint32_t*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Float32: {
             float* src = static_cast<float*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           case Scalar::Float64: {
             double* src = static_cast<double*>(data);
             for (uint32_t i = 0; i < len; ++i)
-                *dest++ = T(*src++);
+                Ops::store(dest++, T(*src++));
             break;
           }
           default:
             MOZ_CRASH("setFromOverlappingTypedArray with a typed array with bogus type");
         }
 
         js_free(data);
         return true;
@@ -459,16 +500,37 @@ class ElementSpecific
         if (SpecificArray::ArrayTypeID() == Scalar::Uint8Clamped)
             return T(d);
         if (TypeIsUnsigned<T>())
             return T(JS::ToUint32(d));
         return T(JS::ToInt32(d));
     }
 };
 
+template<class SomeTypedArray>
+inline bool
+EitherShared(Handle<SomeTypedArray*> target, HandleObject source)
+{
+    return target->template is<SharedTypedArrayObject>() || source->is<SharedTypedArrayObject>();
+}
+
+template<>
+inline bool
+EitherShared(Handle<SharedTypedArrayObject*> target, HandleObject source)
+{
+    return true;
+}
+
+template<>
+inline bool
+EitherShared(Handle<TypedArrayObject*> target, HandleObject source)
+{
+    return source->is<SharedTypedArrayObject>();
+}
+
 template<typename SomeTypedArray>
 class TypedArrayMethods
 {
     static_assert(mozilla::IsSame<SomeTypedArray, TypedArrayObject>::value ||
                   mozilla::IsSame<SomeTypedArray, SharedTypedArrayObject>::value,
                   "methods must be shared/unshared-specific, not "
                   "element-type-specific");
 
@@ -656,18 +718,18 @@ class TypedArrayMethods
         uint32_t viewByteLength = obj->byteLength();
         MOZ_ASSERT(byteSize <= viewByteLength);
         MOZ_ASSERT(byteDest <= viewByteLength);
         MOZ_ASSERT(byteSrc <= viewByteLength);
         MOZ_ASSERT(byteDest <= viewByteLength - byteSize);
         MOZ_ASSERT(byteSrc <= viewByteLength - byteSize);
 #endif
 
-        uint8_t* data = static_cast<uint8_t*>(obj->viewData());
-        mozilla::PodMove(&data[byteDest], &data[byteSrc], byteSize);
+        SharedMem<uint8_t*> data = SharedMem<uint8_t*>(AnyTypedArrayViewData(obj));
+        SharedOps::memmove(data + byteDest, data + byteSrc, byteSize);
 
         // Step 19.
         args.rval().set(args.thisv());
         return true;
     }
 
     /* set(array[, offset]) */
     static bool
@@ -737,69 +799,109 @@ class TypedArrayMethods
 
   private:
     static bool
     setFromAnyTypedArray(JSContext* cx, Handle<SomeTypedArray*> target, HandleObject source,
                          uint32_t offset)
     {
         MOZ_ASSERT(IsAnyTypedArray(source), "use setFromNonTypedArray");
 
+        bool isShared = EitherShared(target, source);
+
         switch (target->type()) {
           case Scalar::Int8:
-            return ElementSpecific<Int8ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Int8ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Int8ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Uint8:
-            return ElementSpecific<Uint8ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Uint8ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Uint8ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Int16:
-            return ElementSpecific<Int16ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Int16ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Int16ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Uint16:
-            return ElementSpecific<Uint16ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Uint16ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Uint16ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Int32:
-            return ElementSpecific<Int32ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Int32ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Int32ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Uint32:
-            return ElementSpecific<Uint32ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Uint32ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Uint32ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Float32:
-            return ElementSpecific<Float32ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Float32ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Float32ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Float64:
-            return ElementSpecific<Float64ArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Float64ArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Float64ArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Uint8Clamped:
-            return ElementSpecific<Uint8ClampedArrayType>::setFromAnyTypedArray(cx, target, source, offset);
+            if (isShared)
+                return ElementSpecific<Uint8ClampedArrayType, SharedOps>::setFromAnyTypedArray(cx, target, source, offset);
+            return ElementSpecific<Uint8ClampedArrayType, UnsharedOps>::setFromAnyTypedArray(cx, target, source, offset);
           case Scalar::Float32x4:
           case Scalar::Int32x4:
           case Scalar::MaxTypedArrayViewType:
             break;
         }
 
         MOZ_CRASH("nonsense target element type");
     }
 
     static bool
     setFromNonTypedArray(JSContext* cx, Handle<SomeTypedArray*> target, HandleObject source,
                          uint32_t len, uint32_t offset)
     {
         MOZ_ASSERT(!IsAnyTypedArray(source), "use setFromAnyTypedArray");
 
+        bool isShared = EitherShared(target, source);
+
         switch (target->type()) {
           case Scalar::Int8:
-            return ElementSpecific<Int8ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Int8ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Int8ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Uint8:
-            return ElementSpecific<Uint8ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Uint8ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Uint8ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Int16:
-            return ElementSpecific<Int16ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Int16ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Int16ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Uint16:
-            return ElementSpecific<Uint16ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Uint16ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Uint16ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Int32:
-            return ElementSpecific<Int32ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Int32ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Int32ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Uint32:
-            return ElementSpecific<Uint32ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Uint32ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Uint32ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Float32:
-            return ElementSpecific<Float32ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Float32ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Float32ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Float64:
-            return ElementSpecific<Float64ArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Float64ArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Float64ArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Uint8Clamped:
-            return ElementSpecific<Uint8ClampedArrayType>::setFromNonTypedArray(cx, target, source, len, offset);
+            if (isShared)
+                return ElementSpecific<Uint8ClampedArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
+            return ElementSpecific<Uint8ClampedArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
           case Scalar::Float32x4:
           case Scalar::Int32x4:
           case Scalar::MaxTypedArrayViewType:
             break;
         }
 
         MOZ_CRASH("bad target array type");
     }
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -35,16 +35,17 @@
 #include "vm/ArrayBufferObject.h"
 #include "vm/GlobalObject.h"
 #include "vm/Interpreter.h"
 #include "vm/TypedArrayCommon.h"
 #include "vm/WrapperObject.h"
 
 #include "jsatominlines.h"
 
+#include "jit/AtomicOperations-inl.h"
 #include "vm/NativeObject-inl.h"
 #include "vm/Shape-inl.h"
 
 using namespace js;
 using namespace js::gc;
 
 using mozilla::IsNaN;
 using mozilla::NegativeInfinity;
--- a/js/src/vm/TypedArrayObject.h
+++ b/js/src/vm/TypedArrayObject.h
@@ -172,16 +172,19 @@ class TypedArrayObject : public NativeOb
     uint32_t length() const {
         return lengthValue(const_cast<TypedArrayObject*>(this)).toInt32();
     }
 
     void* viewData() const {
         // Keep synced with js::Get<Type>ArrayLengthAndData in jsfriendapi.h!
         return static_cast<void*>(getPrivate(TypedArrayLayout::DATA_SLOT));
     }
+    SharedMem<void*> viewDataShared() const {
+        return SharedMem<void*>::unshared(viewData());
+    }
 
     Value getElement(uint32_t index);
     static void setElement(TypedArrayObject& obj, uint32_t index, double d);
 
     void neuter(void* newData);
 
     /*
      * Byte length above which created typed arrays and data views will have