Merge inbound to m-c. a=merge
authorRyan VanderMeulen <ryanvm@gmail.com>
Thu, 23 Oct 2014 13:54:47 -0400
changeset 228168 d8de0d7e52e09b884e11c441975692a63758349e
parent 228091 9781037ac4081aa0b075e2ac29f515ea3c325bbd (current diff)
parent 228167 3ab73d1619b993f9c57822527d988dca64877c11 (diff)
child 228195 cda97cb2f4ac695d3451f61189fdd83a31cacfb6
child 228224 216752a6f12bc33b8674919e9993a0aa0bfbc5ee
child 228240 ab90d3ab521e0cf3ce245b98a9d0ec5899567ea5
push id7326
push userbhearsum@mozilla.com
push dateFri, 28 Nov 2014 15:58:42 +0000
treeherdermozilla-aurora@d3a3b2a0f2f8 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone36.0a1
Merge inbound to m-c. a=merge
media/libcubeb/README
testing/web-platform/meta/dom/nodes/MutationObserver-attributes.html.ini
testing/web-platform/meta/dom/nodes/MutationObserver-characterData.html.ini
--- a/accessible/generic/Accessible.h
+++ b/accessible/generic/Accessible.h
@@ -1175,17 +1175,17 @@ private:
  * This class makes sure required tasks are done before and after tree
  * mutations. Currently this only includes group info invalidation. You must
  * have an object of this class on the stack when calling methods that mutate
  * the accessible tree.
  */
 class AutoTreeMutation
 {
 public:
-  AutoTreeMutation(Accessible* aRoot, bool aInvalidationRequired = true) :
+  explicit AutoTreeMutation(Accessible* aRoot, bool aInvalidationRequired = true) :
     mInvalidationRequired(aInvalidationRequired), mRoot(aRoot)
   {
     MOZ_ASSERT(!(mRoot->mStateFlags & Accessible::eSubtreeMutating));
     mRoot->mStateFlags |= Accessible::eSubtreeMutating;
   }
   ~AutoTreeMutation()
   {
     if (mInvalidationRequired)
--- a/accessible/generic/BaseAccessibles.h
+++ b/accessible/generic/BaseAccessibles.h
@@ -113,17 +113,17 @@ protected:
 
 /**
  * A wrapper accessible around native accessible to connect it with
  * crossplatform accessible tree.
  */
 class DummyAccessible : public AccessibleWrap
 {
 public:
-  DummyAccessible(DocAccessible* aDocument = nullptr) :
+  explicit DummyAccessible(DocAccessible* aDocument = nullptr) :
     AccessibleWrap(nullptr, aDocument) { }
 
   virtual uint64_t NativeState() MOZ_OVERRIDE MOZ_FINAL;
   virtual uint64_t NativeInteractiveState() const MOZ_OVERRIDE MOZ_FINAL;
   virtual uint64_t NativeLinkState() const MOZ_OVERRIDE MOZ_FINAL;
   virtual bool NativelyUnavailable() const MOZ_OVERRIDE MOZ_FINAL;
   virtual void ApplyARIAState(uint64_t* aState) const MOZ_OVERRIDE MOZ_FINAL;
 
--- a/accessible/ipc/DocAccessibleChild.h
+++ b/accessible/ipc/DocAccessibleChild.h
@@ -17,17 +17,17 @@ class AccShowEvent;
 
   /*
    * These objects handle content side communication for an accessible document,
    * and their lifetime is the same as the document they represent.
    */
 class DocAccessibleChild : public PDocAccessibleChild
 {
 public:
-  DocAccessibleChild(DocAccessible* aDoc) :
+  explicit DocAccessibleChild(DocAccessible* aDoc) :
     mDoc(aDoc)
   { MOZ_COUNT_CTOR(DocAccessibleChild); }
   ~DocAccessibleChild()
   {
     mDoc->SetIPCDoc(nullptr);
     MOZ_COUNT_DTOR(DocAccessibleChild);
   }
 
--- a/accessible/ipc/DocAccessibleParent.h
+++ b/accessible/ipc/DocAccessibleParent.h
@@ -96,17 +96,17 @@ public:
     mAccessibles.RemoveEntry(aAccessible->ID());
   }
 
 private:
 
   class ProxyEntry : public PLDHashEntryHdr
   {
   public:
-    ProxyEntry(const void*) : mProxy(nullptr) {}
+    explicit ProxyEntry(const void*) : mProxy(nullptr) {}
     ProxyEntry(ProxyEntry&& aOther) :
       mProxy(aOther.mProxy) { aOther.mProxy = nullptr; }
     ~ProxyEntry() { delete mProxy; }
 
     typedef uint64_t KeyType;
     typedef const void* KeyTypePointer;
 
     bool KeyEquals(const void* aKey) const
--- a/accessible/xpcom/xpcAccessibleApplication.h
+++ b/accessible/xpcom/xpcAccessibleApplication.h
@@ -16,17 +16,18 @@ namespace a11y {
 
 /**
  * XPCOM wrapper around ApplicationAccessible class.
  */
 class xpcAccessibleApplication : public xpcAccessibleGeneric,
                                  public nsIAccessibleApplication
 {
 public:
-  xpcAccessibleApplication(Accessible* aIntl) : xpcAccessibleGeneric(aIntl) { }
+  explicit xpcAccessibleApplication(Accessible* aIntl) :
+    xpcAccessibleGeneric(aIntl) { }
 
   NS_DECL_ISUPPORTS_INHERITED
 
   // nsIAccessibleApplication
   NS_IMETHOD GetAppName(nsAString& aName) MOZ_FINAL;
   NS_IMETHOD GetAppVersion(nsAString& aVersion) MOZ_FINAL;
   NS_IMETHOD GetPlatformName(nsAString& aName) MOZ_FINAL;
   NS_IMETHOD GetPlatformVersion(nsAString& aVersion) MOZ_FINAL;
--- a/accessible/xpcom/xpcAccessibleDocument.h
+++ b/accessible/xpcom/xpcAccessibleDocument.h
@@ -19,17 +19,17 @@ namespace a11y {
 
 /**
  * XPCOM wrapper around DocAccessible class.
  */
 class xpcAccessibleDocument : public xpcAccessibleHyperText,
                               public nsIAccessibleDocument
 {
 public:
-  xpcAccessibleDocument(DocAccessible* aIntl) :
+  explicit xpcAccessibleDocument(DocAccessible* aIntl) :
     xpcAccessibleHyperText(aIntl), mCache(kDefaultCacheLength) { }
 
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(xpcAccessibleDocument,
                                            xpcAccessibleGeneric)
 
   // nsIAccessibleDocument
   NS_IMETHOD GetURL(nsAString& aURL) MOZ_FINAL;
--- a/accessible/xpcom/xpcAccessibleGeneric.h
+++ b/accessible/xpcom/xpcAccessibleGeneric.h
@@ -21,17 +21,17 @@ namespace a11y {
  * XPCOM wrapper around Accessible class.
  */
 class xpcAccessibleGeneric : public xpcAccessible,
                              public xpcAccessibleHyperLink,
                              public xpcAccessibleSelectable,
                              public xpcAccessibleValue
 {
 public:
-  xpcAccessibleGeneric(Accessible* aInternal) :
+  explicit xpcAccessibleGeneric(Accessible* aInternal) :
     mIntl(aInternal), mSupportedIfaces(0)
   {
     if (mIntl->IsSelect())
       mSupportedIfaces |= eSelectable;
     if (mIntl->HasNumericValue())
       mSupportedIfaces |= eValue;
     if (mIntl->IsLink())
       mSupportedIfaces |= eHyperLink;
--- a/accessible/xpcom/xpcAccessibleHyperText.h
+++ b/accessible/xpcom/xpcAccessibleHyperText.h
@@ -18,17 +18,18 @@ namespace mozilla {
 namespace a11y {
 
 class xpcAccessibleHyperText : public xpcAccessibleGeneric,
                                public nsIAccessibleText,
                                public nsIAccessibleEditableText,
                                public nsIAccessibleHyperText
 {
 public:
-  xpcAccessibleHyperText(Accessible* aIntl) : xpcAccessibleGeneric(aIntl)
+  explicit xpcAccessibleHyperText(Accessible* aIntl) :
+    xpcAccessibleGeneric(aIntl)
   {
     if (mIntl->IsHyperText() && mIntl->AsHyperText()->IsTextRole())
       mSupportedIfaces |= eText;
   }
 
   NS_DECL_ISUPPORTS_INHERITED
 
   NS_DECL_NSIACCESSIBLETEXT
--- a/accessible/xpcom/xpcAccessibleImage.h
+++ b/accessible/xpcom/xpcAccessibleImage.h
@@ -13,17 +13,18 @@
 
 namespace mozilla {
 namespace a11y {
 
 class xpcAccessibleImage : public xpcAccessibleGeneric,
                            public nsIAccessibleImage
 {
 public:
-  xpcAccessibleImage(Accessible* aIntl) : xpcAccessibleGeneric(aIntl) { }
+  explicit xpcAccessibleImage(Accessible* aIntl) :
+    xpcAccessibleGeneric(aIntl) { }
 
   NS_DECL_ISUPPORTS_INHERITED
 
   NS_IMETHOD GetImagePosition(uint32_t aCoordType,
                               int32_t* aX, int32_t* aY) MOZ_FINAL;
   NS_IMETHOD GetImageSize(int32_t* aWidth, int32_t* aHeight) MOZ_FINAL;
 
 protected:
--- a/accessible/xpcom/xpcAccessibleTable.h
+++ b/accessible/xpcom/xpcAccessibleTable.h
@@ -15,17 +15,18 @@ namespace a11y {
 
 /**
  * XPCOM wrapper around TableAccessible class.
  */
 class xpcAccessibleTable : public xpcAccessibleGeneric,
                            public nsIAccessibleTable
 {
 public:
-  xpcAccessibleTable(Accessible* aIntl) : xpcAccessibleGeneric(aIntl) { }
+  explicit xpcAccessibleTable(Accessible* aIntl) :
+    xpcAccessibleGeneric(aIntl) { }
 
   NS_DECL_ISUPPORTS_INHERITED
 
   // nsIAccessibleTable
   NS_IMETHOD GetCaption(nsIAccessible** aCaption) MOZ_FINAL;
   NS_IMETHOD GetSummary(nsAString& aSummary) MOZ_FINAL;
   NS_IMETHOD GetColumnCount(int32_t* aColumnCount) MOZ_FINAL;
   NS_IMETHOD GetRowCount(int32_t* aRowCount) MOZ_FINAL;
--- a/accessible/xpcom/xpcAccessibleTableCell.h
+++ b/accessible/xpcom/xpcAccessibleTableCell.h
@@ -16,17 +16,18 @@ namespace a11y {
 
 /**
  * XPCOM wrapper around TableAccessibleCell class.
  */
 class xpcAccessibleTableCell : public xpcAccessibleHyperText,
                                public nsIAccessibleTableCell
 {
 public:
-  xpcAccessibleTableCell(Accessible* aIntl) : xpcAccessibleHyperText(aIntl) { }
+  explicit xpcAccessibleTableCell(Accessible* aIntl) :
+    xpcAccessibleHyperText(aIntl) { }
 
   NS_DECL_ISUPPORTS_INHERITED
 
   // nsIAccessibleTableCell
   NS_IMETHOD GetTable(nsIAccessibleTable** aTable) MOZ_FINAL;
   NS_IMETHOD GetColumnIndex(int32_t* aColIdx) MOZ_FINAL;
   NS_IMETHOD GetRowIndex(int32_t* aRowIdx) MOZ_FINAL;
   NS_IMETHOD GetColumnExtent(int32_t* aExtent) MOZ_FINAL;
--- a/browser/locales/all-locales
+++ b/browser/locales/all-locales
@@ -20,17 +20,16 @@ ja
 ja-JP-mac
 kk
 ko
 lt
 lv
 nb-NO
 nl
 nn-NO
-oc
 pl
 pt-BR
 pt-PT
 ru
 sk
 sl
 sv-SE
 th
--- a/content/base/src/Element.cpp
+++ b/content/base/src/Element.cpp
@@ -958,16 +958,26 @@ Element::CreateShadowRoot(ErrorResult& a
   nsXBLPrototypeBinding* protoBinding = new nsXBLPrototypeBinding();
   aError = protoBinding->Init(NS_LITERAL_CSTRING("shadowroot"),
                               docInfo, this, true);
   if (aError.Failed()) {
     delete protoBinding;
     return nullptr;
   }
 
+  nsIDocument* doc = GetCrossShadowCurrentDoc();
+  nsIContent* destroyedFramesFor = nullptr;
+  if (doc) {
+    nsIPresShell* shell = doc->GetShell();
+    if (shell) {
+      shell->DestroyFramesFor(this, &destroyedFramesFor);
+    }
+  }
+  MOZ_ASSERT(!GetPrimaryFrame());
+
   // Unlike for XBL, false is the default for inheriting style.
   protoBinding->SetInheritsStyle(false);
 
   // Calling SetPrototypeBinding takes ownership of protoBinding.
   docInfo->SetPrototypeBinding(NS_LITERAL_CSTRING("shadowroot"), protoBinding);
 
   nsRefPtr<ShadowRoot> shadowRoot = new ShadowRoot(this, nodeInfo.forget(),
                                                    protoBinding);
@@ -993,21 +1003,21 @@ Element::CreateShadowRoot(ErrorResult& a
   nsRefPtr<nsXBLBinding> xblBinding = new nsXBLBinding(shadowRoot, protoBinding);
   shadowRoot->SetAssociatedBinding(xblBinding);
   xblBinding->SetBoundElement(this);
 
   SetXBLBinding(xblBinding);
 
   // Recreate the frame for the bound content because binding a ShadowRoot
   // changes how things are rendered.
-  nsIDocument* doc = GetCrossShadowCurrentDoc();
   if (doc) {
-    nsIPresShell *shell = doc->GetShell();
+    MOZ_ASSERT(doc == GetCrossShadowCurrentDoc());
+    nsIPresShell* shell = doc->GetShell();
     if (shell) {
-      shell->RecreateFramesFor(this);
+      shell->CreateFramesFor(destroyedFramesFor);
     }
   }
 
   return shadowRoot.forget();
 }
 
 NS_IMPL_CYCLE_COLLECTION(DestinationInsertionPointList, mParent, mDestinationPoints)
 
--- a/content/base/src/nsDOMMutationObserver.cpp
+++ b/content/base/src/nsDOMMutationObserver.cpp
@@ -448,56 +448,92 @@ nsDOMMutationObserver::RescheduleForRun(
 }
 
 void
 nsDOMMutationObserver::Observe(nsINode& aTarget,
                                const mozilla::dom::MutationObserverInit& aOptions,
                                mozilla::ErrorResult& aRv)
 {
 
-  if (!(aOptions.mChildList || aOptions.mAttributes || aOptions.mCharacterData)) {
-    aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+  bool childList = aOptions.mChildList;
+  bool attributes =
+    aOptions.mAttributes.WasPassed() &&
+    aOptions.mAttributes.Value();
+  bool characterData =
+    aOptions.mCharacterData.WasPassed() &&
+    aOptions.mCharacterData.Value();
+  bool subtree = aOptions.mSubtree;
+  bool attributeOldValue =
+    aOptions.mAttributeOldValue.WasPassed() &&
+    aOptions.mAttributeOldValue.Value();
+  bool characterDataOldValue =
+    aOptions.mCharacterDataOldValue.WasPassed() &&
+    aOptions.mCharacterDataOldValue.Value();
+
+  if (!aOptions.mAttributes.WasPassed() &&
+      (aOptions.mAttributeOldValue.WasPassed() ||
+       aOptions.mAttributeFilter.WasPassed())) {
+    attributes = true;
+  }
+
+  if (!aOptions.mCharacterData.WasPassed() &&
+      aOptions.mCharacterDataOldValue.WasPassed()) {
+    characterData = true;
+  }
+
+  if (!(childList || attributes || characterData)) {
+    aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
     return;
   }
-  if (aOptions.mAttributeOldValue && !aOptions.mAttributes) {
-    aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+
+  if (aOptions.mAttributeOldValue.WasPassed() &&
+      aOptions.mAttributeOldValue.Value() &&
+      aOptions.mAttributes.WasPassed() &&
+      !aOptions.mAttributes.Value()) {
+    aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
     return;
   }
-  if (aOptions.mCharacterDataOldValue && !aOptions.mCharacterData) {
-    aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
+
+  if (aOptions.mAttributeFilter.WasPassed() &&
+      aOptions.mAttributes.WasPassed() &&
+      !aOptions.mAttributes.Value()) {
+    aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
+    return;
+  }
+
+  if (aOptions.mCharacterDataOldValue.WasPassed() &&
+      aOptions.mCharacterDataOldValue.Value() &&
+      aOptions.mCharacterData.WasPassed() &&
+      !aOptions.mCharacterData.Value()) {
+    aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
     return;
   }
 
   nsCOMArray<nsIAtom> filters;
   bool allAttrs = true;
   if (aOptions.mAttributeFilter.WasPassed()) {
     allAttrs = false;
     const mozilla::dom::Sequence<nsString>& filtersAsString =
       aOptions.mAttributeFilter.Value();
     uint32_t len = filtersAsString.Length();
-
-    if (len != 0 && !aOptions.mAttributes) {
-      aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR);
-      return;
-    }
     filters.SetCapacity(len);
 
     for (uint32_t i = 0; i < len; ++i) {
       nsCOMPtr<nsIAtom> a = do_GetAtom(filtersAsString[i]);
       filters.AppendObject(a);
     }
   }
 
   nsMutationReceiver* r = GetReceiverFor(&aTarget, true);
-  r->SetChildList(aOptions.mChildList);
-  r->SetAttributes(aOptions.mAttributes);
-  r->SetCharacterData(aOptions.mCharacterData);
-  r->SetSubtree(aOptions.mSubtree);
-  r->SetAttributeOldValue(aOptions.mAttributeOldValue);
-  r->SetCharacterDataOldValue(aOptions.mCharacterDataOldValue);
+  r->SetChildList(childList);
+  r->SetAttributes(attributes);
+  r->SetCharacterData(characterData);
+  r->SetSubtree(subtree);
+  r->SetAttributeOldValue(attributeOldValue);
+  r->SetCharacterDataOldValue(characterDataOldValue);
   r->SetAttributeFilter(filters);
   r->SetAllAttributes(allAttrs);
   r->RemoveClones();
 
 #ifdef DEBUG
   for (int32_t i = 0; i < mReceivers.Count(); ++i) {
     NS_WARN_IF_FALSE(mReceivers[i]->Target(),
                      "All the receivers should have a target!");
@@ -536,21 +572,21 @@ nsDOMMutationObserver::TakeRecords(
 void
 nsDOMMutationObserver::GetObservingInfo(nsTArray<Nullable<MutationObservingInfo> >& aResult)
 {
   aResult.SetCapacity(mReceivers.Count());
   for (int32_t i = 0; i < mReceivers.Count(); ++i) {
     MutationObservingInfo& info = aResult.AppendElement()->SetValue();
     nsMutationReceiver* mr = mReceivers[i];
     info.mChildList = mr->ChildList();
-    info.mAttributes = mr->Attributes();
-    info.mCharacterData = mr->CharacterData();
+    info.mAttributes.Construct(mr->Attributes());
+    info.mCharacterData.Construct(mr->CharacterData());
     info.mSubtree = mr->Subtree();
-    info.mAttributeOldValue = mr->AttributeOldValue();
-    info.mCharacterDataOldValue = mr->CharacterDataOldValue();
+    info.mAttributeOldValue.Construct(mr->AttributeOldValue());
+    info.mCharacterDataOldValue.Construct(mr->CharacterDataOldValue());
     nsCOMArray<nsIAtom>& filters = mr->AttributeFilter();
     if (filters.Count()) {
       info.mAttributeFilter.Construct();
       mozilla::dom::Sequence<nsString>& filtersAsStrings =
         info.mAttributeFilter.Value();
       for (int32_t j = 0; j < filters.Count(); ++j) {
         filtersAsStrings.AppendElement(nsDependentAtomString(filters[j]));
       }
--- a/content/base/test/test_mutationobservers.html
+++ b/content/base/test/test_mutationobservers.html
@@ -96,56 +96,49 @@ function runTest() {
 
   var e = null;
   try {
     m.observe(document, {});
   } catch (ex) {
     e = ex;
   }
   ok(e, "Should have thrown an exception");
-  is(e.name, "SyntaxError", "Should have thrown SyntaxError");
-  is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
+  is(e.name, "TypeError", "Should have thrown TypeError");
 
   e = null;
   try {
     m.observe(document, { childList: true, attributeOldValue: true });
   } catch (ex) {
     e = ex;
   }
-  ok(e, "Should have thrown an exception");
-  is(e.name, "SyntaxError", "Should have thrown SyntaxError");
-  is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
+  ok(!e, "Shouldn't have thrown an exception");
 
   e = null;
   try {
     m.observe(document, { childList: true, attributeFilter: ["foo"] });
   } catch (ex) {
     e = ex;
   }
-  ok(e, "Should have thrown an exception");
-  is(e.name, "SyntaxError", "Should have thrown SyntaxError");
-  is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
+  ok(!e, "Shouldn't have thrown an exception");
 
   e = null;
   try {
     m.observe(document, { childList: true, characterDataOldValue: true });
   } catch (ex) {
     e = ex;
   }
-  ok(e, "Should have thrown an exception");
-  is(e.name, "SyntaxError", "Should have thrown SyntaxError");
-  is(e.code, DOMException.SYNTAX_ERR, "Should have thrown DOMException.SYNTAX_ERR");
+  ok(!e, "Shouldn't have thrown an exception");
 
   e = null;
   try {
     m.observe(document);
   } catch (ex) {
     e = ex;
   }
-  ok(e, "Should have thrown an exception");  
+  ok(e, "Should have thrown an exception");
 
   m = new M(function(records, observer) {
       is(observer, m, "2nd parameter should be the mutation observer");
       is(observer, this, "2nd parameter should be 'this'");
       is(records.length, 1, "Should have one record.");
       is(records[0].type, "attributes", "Should have got attributes record");
       is(records[0].target, div, "Should have got div as target");
       is(records[0].attributeName, "foo", "Should have got record about foo attribute");
--- a/content/media/fmp4/MP4Reader.cpp
+++ b/content/media/fmp4/MP4Reader.cpp
@@ -589,16 +589,22 @@ MP4Reader::ResetDecode()
 void
 MP4Reader::Output(TrackType aTrack, MediaData* aSample)
 {
 #ifdef LOG_SAMPLE_DECODE
   LOG("Decoded %s sample time=%lld dur=%lld",
       TrackTypeToStr(aTrack), aSample->mTime, aSample->mDuration);
 #endif
 
+  if (!aSample) {
+    NS_WARNING("MP4Reader::Output() passed a null sample");
+    Error(aTrack);
+    return;
+  }
+
   DecoderData& data = GetDecoderData(aTrack);
   // Don't accept output while we're flushing.
   MonitorAutoLock mon(data.mMonitor);
   if (data.mIsFlushing) {
     delete aSample;
     LOG("MP4Reader produced output while flushing, discarding.");
     mon.NotifyAll();
     return;
--- a/content/media/fmp4/eme/EMEH264Decoder.cpp
+++ b/content/media/fmp4/eme/EMEH264Decoder.cpp
@@ -151,25 +151,26 @@ EMEH264Decoder::Decoded(GMPVideoi420Fram
   // V plane (Cr)
   b.mPlanes[2].mData = aDecodedFrame->Buffer(kGMPVPlane);
   b.mPlanes[2].mStride = aDecodedFrame->Stride(kGMPVPlane);
   b.mPlanes[2].mHeight = height / 2;
   b.mPlanes[2].mWidth = width / 2;
   b.mPlanes[2].mOffset = 0;
   b.mPlanes[2].mSkip = 0;
 
+  gfx::IntRect pictureRegion(0, 0, width, height);
   VideoData *v = VideoData::Create(mVideoInfo,
-                                    mImageContainer,
-                                    mLastStreamOffset,
-                                    aDecodedFrame->Timestamp(),
-                                    aDecodedFrame->Duration(),
-                                    b,
-                                    false,
-                                    -1,
-                                    ToIntRect(mPictureRegion));
+                                   mImageContainer,
+                                   mLastStreamOffset,
+                                   aDecodedFrame->Timestamp(),
+                                   aDecodedFrame->Duration(),
+                                   b,
+                                   false,
+                                   -1,
+                                   pictureRegion);
   aDecodedFrame->Destroy();
   mCallback->Output(v);
 }
 
 void
 EMEH264Decoder::ReceivedDecodedReferenceFrame(const uint64_t aPictureId)
 {
   // Ignore.
@@ -256,17 +257,16 @@ EMEH264Decoder::GmpInit()
   rv = mGMP->InitDecode(codec,
                         codecSpecific,
                         this,
                         PR_GetNumberOfProcessors());
   NS_ENSURE_SUCCESS(rv, rv);
 
   mVideoInfo.mDisplay = nsIntSize(mConfig.display_width, mConfig.display_height);
   mVideoInfo.mHasVideo = true;
-  mPictureRegion = nsIntRect(0, 0, mConfig.display_width, mConfig.display_height);
 
   return NS_OK;
 }
 
 nsresult
 EMEH264Decoder::GmpInput(MP4Sample* aSample)
 {
   MOZ_ASSERT(IsOnGMPThread());
--- a/content/media/fmp4/eme/EMEH264Decoder.h
+++ b/content/media/fmp4/eme/EMEH264Decoder.h
@@ -94,17 +94,16 @@ private:
 
   nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
   nsCOMPtr<nsIThread> mGMPThread;
   nsRefPtr<CDMProxy> mProxy;
   GMPVideoDecoderProxy* mGMP;
   GMPVideoHost* mHost;
 
   VideoInfo mVideoInfo;
-  nsIntRect mPictureRegion;
   const mp4_demuxer::VideoDecoderConfig& mConfig;
   nsRefPtr<layers::ImageContainer> mImageContainer;
   nsRefPtr<MediaTaskQueue> mTaskQueue;
   MediaDataDecoderCallback* mCallback;
   int64_t mLastStreamOffset;
   Monitor mMonitor;
   bool mFlushComplete;
 };
--- a/content/media/gmp/GMPParent.cpp
+++ b/content/media/gmp/GMPParent.cpp
@@ -354,31 +354,54 @@ GMPParent::Shutdown()
   if (!mDeleteProcessOnlyOnUnload) {
     // Destroy ourselves and rise from the fire to save memory
     nsRefPtr<GMPParent> self(this);
     mService->ReAddOnGMPThread(self);
   } // else we've been asked to die and stay dead
   MOZ_ASSERT(mState == GMPStateNotLoaded);
 }
 
+class NotifyGMPShutdownTask : public nsRunnable {
+public:
+  NotifyGMPShutdownTask(const nsAString& aNodeId)
+    : mNodeId(aNodeId)
+  {
+  }
+  NS_IMETHOD Run() {
+    MOZ_ASSERT(NS_IsMainThread());
+    nsCOMPtr<nsIObserverService> obsService = mozilla::services::GetObserverService();
+    MOZ_ASSERT(obsService);
+    if (obsService) {
+      obsService->NotifyObservers(nullptr, "gmp-shutdown", mNodeId.get());
+    }
+    return NS_OK;
+  }
+  nsString mNodeId;
+};
+
 void
 GMPParent::DeleteProcess()
 {
   LOGD(("%s::%s: %p", __CLASS__, __FUNCTION__, this));
 
   if (mState != GMPStateClosing) {
     // Don't Close() twice!
     // Probably remove when bug 1043671 is resolved
     mState = GMPStateClosing;
     Close();
   }
   mProcess->Delete();
   LOGD(("%s::%s: Shut down process %p", __CLASS__, __FUNCTION__, (void *) mProcess));
   mProcess = nullptr;
   mState = GMPStateNotLoaded;
+
+  NS_DispatchToMainThread(
+    new NotifyGMPShutdownTask(NS_ConvertUTF8toUTF16(mNodeId)),
+    NS_DISPATCH_NORMAL);
+
 }
 
 void
 GMPParent::VideoDecoderDestroyed(GMPVideoDecoderParent* aDecoder)
 {
   MOZ_ASSERT(GMPThread() == NS_GetCurrentThread());
 
   // If the constructor fails, we'll get called before it's added
@@ -979,16 +1002,20 @@ GMPParent::SetNodeId(const nsACString& a
   MOZ_ASSERT(CanBeUsedFrom(aNodeId));
   mNodeId = aNodeId;
 }
 
 bool
 GMPParent::RecvAsyncShutdownRequired()
 {
   LOGD(("%s::%s: %p", __CLASS__, __FUNCTION__, this));
+  if (mAsyncShutdownRequired) {
+    NS_WARNING("Received AsyncShutdownRequired message more than once!");
+    return true;
+  }
   mAsyncShutdownRequired = true;
   mService->AsyncShutdownNeeded(this);
   return true;
 }
 
 bool
 GMPParent::RecvAsyncShutdownComplete()
 {
--- a/content/media/gmp/GMPService.cpp
+++ b/content/media/gmp/GMPService.cpp
@@ -513,16 +513,17 @@ GeckoMediaPluginService::GetGMPDecryptor
 }
 
 void
 GeckoMediaPluginService::AsyncShutdownNeeded(GMPParent* aParent)
 {
   LOGD(("%s::%s %p", __CLASS__, __FUNCTION__, aParent));
   MOZ_ASSERT(NS_GetCurrentThread() == mGMPThread);
 
+  MOZ_ASSERT(!mAsyncShutdownPlugins.Contains(aParent));
   mAsyncShutdownPlugins.AppendElement(aParent);
 }
 
 void
 GeckoMediaPluginService::AsyncShutdownComplete(GMPParent* aParent)
 {
   LOGD(("%s::%s %p", __CLASS__, __FUNCTION__, aParent));
   MOZ_ASSERT(NS_GetCurrentThread() == mGMPThread);
--- a/content/media/gtest/TestGMPCrossOrigin.cpp
+++ b/content/media/gtest/TestGMPCrossOrigin.cpp
@@ -101,16 +101,76 @@ GMPTestRunner::RunTestGMPCrossOrigin()
               encoder1->ParentID() == encoder2->ParentID());
 
   if (decoder1) decoder1->Close();
   if (decoder2) decoder2->Close();
   if (encoder1) encoder1->Close();
   if (encoder2) encoder2->Close();
 }
 
+static already_AddRefed<nsIThread>
+GetGMPThread()
+{
+  nsRefPtr<GeckoMediaPluginService> service =
+    GeckoMediaPluginService::GetGeckoMediaPluginService();
+  nsCOMPtr<nsIThread> thread;
+  EXPECT_TRUE(NS_SUCCEEDED(service->GetThread(getter_AddRefs(thread))));
+  return thread.forget();
+}
+
+class GMPShutdownObserver : public nsIRunnable
+                          , public nsIObserver {
+public:
+  GMPShutdownObserver(nsIRunnable* aShutdownTask,
+                      nsIRunnable* Continuation,
+                      const nsACString& aNodeId)
+    : mShutdownTask(aShutdownTask)
+    , mContinuation(Continuation)
+    , mNodeId(NS_ConvertUTF8toUTF16(aNodeId))
+  {}
+
+  NS_DECL_THREADSAFE_ISUPPORTS
+
+  NS_IMETHOD Run() MOZ_OVERRIDE {
+    MOZ_ASSERT(NS_IsMainThread());
+    nsCOMPtr<nsIObserverService> observerService =
+        mozilla::services::GetObserverService();
+    EXPECT_TRUE(observerService);
+    observerService->AddObserver(this, "gmp-shutdown", false);
+
+    nsCOMPtr<nsIThread> thread(GetGMPThread());
+    thread->Dispatch(mShutdownTask, NS_DISPATCH_NORMAL);
+    return NS_OK;
+  }
+
+  NS_IMETHOD Observe(nsISupports* aSubject,
+                     const char* aTopic,
+                     const char16_t* aSomeData) MOZ_OVERRIDE
+  {
+    if (!strcmp(aTopic, "gmp-shutdown") &&
+        mNodeId.Equals(nsDependentString(aSomeData))) {
+      nsCOMPtr<nsIObserverService> observerService =
+          mozilla::services::GetObserverService();
+      EXPECT_TRUE(observerService);
+      observerService->RemoveObserver(this, "gmp-shutdown");
+      nsCOMPtr<nsIThread> thread(GetGMPThread());
+      thread->Dispatch(mContinuation, NS_DISPATCH_NORMAL);
+    }
+    return NS_OK;
+  }
+
+private:
+  virtual ~GMPShutdownObserver() {}
+  nsRefPtr<nsIRunnable> mShutdownTask;
+  nsRefPtr<nsIRunnable> mContinuation;
+  const nsString mNodeId;
+};
+
+NS_IMPL_ISUPPORTS(GMPShutdownObserver, nsIRunnable, nsIObserver)
+
 class NotifyObserversTask : public nsRunnable {
 public:
   NotifyObserversTask(const char* aTopic)
     : mTopic(aTopic)
   {}
   NS_IMETHOD Run() {
     MOZ_ASSERT(NS_IsMainThread());
     nsCOMPtr<nsIObserverService> observerService =
@@ -225,25 +285,16 @@ AssertIsOnGMPThread()
   service->GetThread(getter_AddRefs(thread));
   MOZ_ASSERT(thread);
   nsCOMPtr<nsIThread> currentThread;
   DebugOnly<nsresult> rv = NS_GetCurrentThread(getter_AddRefs(currentThread));
   MOZ_ASSERT(NS_SUCCEEDED(rv));
   MOZ_ASSERT(currentThread == thread);
 }
 
-static already_AddRefed<nsIThread>
-GetGMPThread()
-{
-  nsRefPtr<GeckoMediaPluginService> service =
-    GeckoMediaPluginService::GetGeckoMediaPluginService();
-  nsCOMPtr<nsIThread> thread;
-  EXPECT_TRUE(NS_SUCCEEDED(service->GetThread(getter_AddRefs(thread))));
-  return thread.forget();
-}
 class GMPStorageTest : public GMPDecryptorProxyCallback
 {
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(GMPStorageTest)
 
   void DoTest(void (GMPStorageTest::*aTestMethod)()) {
     EnsureNSSInitializedChromeOrContent();
     nsCOMPtr<nsIThread> thread(GetGMPThread());
     ClearGMPStorage(NS_NewRunnableMethod(this, aTestMethod), thread);
@@ -318,25 +369,23 @@ class GMPStorageTest : public GMPDecrypt
 
   void CreateDecryptor(const nsAString& aOrigin,
                        const nsAString& aTopLevelOrigin,
                        bool aInPBMode) {
     nsRefPtr<GeckoMediaPluginService> service =
       GeckoMediaPluginService::GetGeckoMediaPluginService();
     EXPECT_TRUE(service);
 
-    const nsCString nodeId = GetNodeId(aOrigin,
-                                       aTopLevelOrigin,
-                                       aInPBMode);
-    EXPECT_TRUE(!nodeId.IsEmpty());
+    mNodeId = GetNodeId(aOrigin, aTopLevelOrigin, aInPBMode);
+    EXPECT_TRUE(!mNodeId.IsEmpty());
 
     nsTArray<nsCString> tags;
     tags.AppendElement(NS_LITERAL_CSTRING("fake"));
 
-    nsresult rv = service->GetGMPDecryptor(&tags, nodeId, &mDecryptor);
+    nsresult rv = service->GetGMPDecryptor(&tags, mNodeId, &mDecryptor);
     EXPECT_TRUE(NS_SUCCEEDED(rv));
     EXPECT_TRUE(!!mDecryptor);
 
     if (mDecryptor) {
       mDecryptor->Init(this);
     }
   }
 
@@ -391,17 +440,16 @@ class GMPStorageTest : public GMPDecrypt
                     false);
 
     Expect(NS_LITERAL_CSTRING("retrieve crossOriginTestRecordId succeeded (length 0 bytes)"),
            NS_NewRunnableMethod(this, &GMPStorageTest::SetFinished));
     Update(NS_LITERAL_CSTRING("retrieve crossOriginTestRecordId"));
   }
 
   void TestPBStorage() {
-
     // Open decryptor on one, origin, write a record, close decryptor,
     // open another, and test that record can be read, close decryptor,
     // then send pb-last-context-closed notification, then open decryptor
     // and check that it can't read that data; it should have been purged.
     CreateDecryptor(NS_LITERAL_STRING("pb1.com"),
                     NS_LITERAL_STRING("pb2.com"),
                     true);
 
@@ -435,34 +483,109 @@ class GMPStorageTest : public GMPDecrypt
     CreateDecryptor(NS_LITERAL_STRING("pb1.com"),
                     NS_LITERAL_STRING("pb2.com"),
                     true);
 
     Expect(NS_LITERAL_CSTRING("retrieve pbdata succeeded (length 0 bytes)"),
            NS_NewRunnableMethod(this,
               &GMPStorageTest::SetFinished));
     Update(NS_LITERAL_CSTRING("retrieve pbdata"));
+  }
 
+  void CreateAsyncShutdownTimeoutGMP(const nsAString& aOrigin1,
+                                     const nsAString& aOrigin2) {
+    CreateDecryptor(aOrigin1, aOrigin2, false);
+    Update(NS_LITERAL_CSTRING("shutdown-mode timeout"));
+    Shutdown();
+  }
+
+  void TestAsyncShutdownTimeout() {
+    // Create decryptors that timeout in their async shutdown.
+    // If the gtest hangs on shutdown, test fails!
+    CreateAsyncShutdownTimeoutGMP(NS_LITERAL_STRING("example7.com"),
+                                  NS_LITERAL_STRING("example8.com"));
+    CreateAsyncShutdownTimeoutGMP(NS_LITERAL_STRING("example9.com"),
+                                  NS_LITERAL_STRING("example10.com"));
+    CreateAsyncShutdownTimeoutGMP(NS_LITERAL_STRING("example11.com"),
+                                  NS_LITERAL_STRING("example12.com"));
+    SetFinished();
+  };
+
+  void TestAsyncShutdownStorage() {
+    // Test that a GMP can write to storage during shutdown, and retrieve
+    // that written data in a subsequent session.
+    CreateDecryptor(NS_LITERAL_STRING("example13.com"),
+                    NS_LITERAL_STRING("example14.com"),
+                    false);
+
+    // Instruct the GMP to write a token (the current timestamp, so it's
+    // unique) during async shutdown, then shutdown the plugin, re-create
+    // it, and check that the token was successfully stored.
+    auto t = time(0);
+    nsCString update("shutdown-mode token ");
+    nsCString token;
+    token.AppendInt((int64_t)t);
+    update.Append(token);
+
+    // Wait for a response from the GMP, so we know it's had time to receive
+    // the token.
+    nsCString response("shutdown-token received ");
+    response.Append(token);
+    Expect(response, NS_NewRunnableMethodWithArg<nsCString>(this,
+      &GMPStorageTest::TestAsyncShutdownStorage_ReceivedShutdownToken, token));
+
+    Update(update);
+  }
+
+  void TestAsyncShutdownStorage_ReceivedShutdownToken(const nsCString& aToken) {
+    ShutdownThen(NS_NewRunnableMethodWithArg<nsCString>(this,
+      &GMPStorageTest::TestAsyncShutdownStorage_AsyncShutdownComplete, aToken));
+  }
+
+  void TestAsyncShutdownStorage_AsyncShutdownComplete(const nsCString& aToken) {
+    // Create a new instance of the plugin, retrieve the token written
+    // during shutdown and verify it is correct.
+    CreateDecryptor(NS_LITERAL_STRING("example13.com"),
+                    NS_LITERAL_STRING("example14.com"),
+                    false);
+    nsCString response("retrieved shutdown-token ");
+    response.Append(aToken);
+    Expect(response,
+           NS_NewRunnableMethod(this, &GMPStorageTest::SetFinished));
+    Update(NS_LITERAL_CSTRING("retrieve-shutdown-token"));
   }
 
   void Expect(const nsCString& aMessage, nsIRunnable* aContinuation) {
     mExpected.AppendElement(ExpectedMessage(aMessage, aContinuation));
   }
 
   void AwaitFinished() {
     while (!mFinished) {
       NS_ProcessNextEvent(nullptr, true);
     }
     mFinished = false;
   }
 
+  void ShutdownThen(nsIRunnable* aContinuation) {
+    EXPECT_TRUE(!!mDecryptor);
+    if (!mDecryptor) {
+      return;
+    }
+    EXPECT_FALSE(mNodeId.IsEmpty());
+    nsRefPtr<GMPShutdownObserver> task(
+      new GMPShutdownObserver(NS_NewRunnableMethod(this, &GMPStorageTest::Shutdown),
+                              aContinuation, mNodeId));
+    NS_DispatchToMainThread(task, NS_DISPATCH_NORMAL);
+  }
+
   void Shutdown() {
     if (mDecryptor) {
       mDecryptor->Close();
       mDecryptor = nullptr;
+      mNodeId = EmptyCString();
     }
   }
 
   void Dummy() {
   }
 
   void SetFinished() {
     mFinished = true;
@@ -526,16 +649,17 @@ private:
     nsRefPtr<nsIRunnable> mContinuation;
   };
 
   nsTArray<ExpectedMessage> mExpected;
 
   GMPDecryptorProxy* mDecryptor;
   Monitor mMonitor;
   Atomic<bool> mFinished;
+  nsCString mNodeId;
 };
 
 void
 GMPTestRunner::DoTest(void (GMPTestRunner::*aTestMethod)())
 {
   nsRefPtr<GeckoMediaPluginService> service =
     GeckoMediaPluginService::GetGeckoMediaPluginService();
   nsCOMPtr<nsIThread> thread;
@@ -568,8 +692,18 @@ TEST(GeckoMediaPlugins, GMPStorageCrossO
   nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
   runner->DoTest(&GMPStorageTest::TestCrossOriginStorage);
 }
 
 TEST(GeckoMediaPlugins, GMPStoragePrivateBrowsing) {
   nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
   runner->DoTest(&GMPStorageTest::TestPBStorage);
 }
+
+TEST(GeckoMediaPlugins, GMPStorageAsyncShutdownTimeout) {
+  nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
+  runner->DoTest(&GMPStorageTest::TestAsyncShutdownTimeout);
+}
+
+TEST(GeckoMediaPlugins, GMPStorageAsyncShutdownStorage) {
+  nsRefPtr<GMPStorageTest> runner = new GMPStorageTest();
+  runner->DoTest(&GMPStorageTest::TestAsyncShutdownStorage);
+}
--- a/dom/base/WindowNamedPropertiesHandler.h
+++ b/dom/base/WindowNamedPropertiesHandler.h
@@ -15,40 +15,39 @@ namespace dom {
 class WindowNamedPropertiesHandler : public BaseDOMProxyHandler
 {
 public:
   MOZ_CONSTEXPR WindowNamedPropertiesHandler()
     : BaseDOMProxyHandler(nullptr, /* hasPrototype = */ true)
   {
   }
   virtual bool
-  preventExtensions(JSContext* aCx, JS::Handle<JSObject*> aProxy) const MOZ_OVERRIDE
-  {
-    // Throw a TypeError, per WebIDL.
-    JS_ReportErrorNumber(aCx, js_GetErrorMessage, nullptr,
-                         JSMSG_CANT_CHANGE_EXTENSIBILITY);
-    return false;
-  }
-  virtual bool
   getOwnPropDescriptor(JSContext* aCx, JS::Handle<JSObject*> aProxy,
                        JS::Handle<jsid> aId,
                        bool /* unused */,
                        JS::MutableHandle<JSPropertyDescriptor> aDesc)
                        const MOZ_OVERRIDE;
   virtual bool
   defineProperty(JSContext* aCx, JS::Handle<JSObject*> aProxy,
                  JS::Handle<jsid> aId,
                  JS::MutableHandle<JSPropertyDescriptor> aDesc) const MOZ_OVERRIDE;
   virtual bool
   ownPropNames(JSContext* aCx, JS::Handle<JSObject*> aProxy, unsigned flags,
                JS::AutoIdVector& aProps) const MOZ_OVERRIDE;
   virtual bool
   delete_(JSContext* aCx, JS::Handle<JSObject*> aProxy, JS::Handle<jsid> aId,
           bool* aBp) const MOZ_OVERRIDE;
   virtual bool
+  preventExtensions(JSContext* aCx, JS::Handle<JSObject*> aProxy,
+                    bool *succeeded) const MOZ_OVERRIDE
+  {
+    *succeeded = false;
+    return true;
+  }
+  virtual bool
   isExtensible(JSContext* aCx, JS::Handle<JSObject*> aProxy,
                bool* aIsExtensible) const MOZ_OVERRIDE
   {
     *aIsExtensible = true;
     return true;
   }
   virtual const char*
   className(JSContext *aCx, JS::Handle<JSObject*> aProxy) const MOZ_OVERRIDE
--- a/dom/base/nsGlobalWindow.cpp
+++ b/dom/base/nsGlobalWindow.cpp
@@ -615,20 +615,21 @@ public:
   virtual bool ownPropertyKeys(JSContext *cx,
                                JS::Handle<JSObject*> proxy,
                                JS::AutoIdVector &props) const MOZ_OVERRIDE;
   virtual bool delete_(JSContext *cx, JS::Handle<JSObject*> proxy,
                        JS::Handle<jsid> id,
                        bool *bp) const MOZ_OVERRIDE;
   virtual bool enumerate(JSContext *cx, JS::Handle<JSObject*> proxy,
                          JS::AutoIdVector &props) const MOZ_OVERRIDE;
+  virtual bool preventExtensions(JSContext *cx,
+                                 JS::Handle<JSObject*> proxy,
+                                 bool *succeeded) const MOZ_OVERRIDE;
   virtual bool isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible)
                             const MOZ_OVERRIDE;
-  virtual bool preventExtensions(JSContext *cx,
-                                 JS::Handle<JSObject*> proxy) const MOZ_OVERRIDE;
   virtual bool has(JSContext *cx, JS::Handle<JSObject*> proxy,
                    JS::Handle<jsid> id, bool *bp) const MOZ_OVERRIDE;
   virtual bool get(JSContext *cx, JS::Handle<JSObject*> proxy,
                    JS::Handle<JSObject*> receiver,
                    JS::Handle<jsid> id,
                    JS::MutableHandle<JS::Value> vp) const MOZ_OVERRIDE;
   virtual bool set(JSContext *cx, JS::Handle<JSObject*> proxy,
                    JS::Handle<JSObject*> receiver,
@@ -701,37 +702,16 @@ const js::Class OuterWindowProxyClass =
         PROXY_MAKE_EXT(
             nullptr, /* outerObject */
             js::proxy_innerObject,
             nullptr, /* iteratorObject */
             false,   /* isWrappedNative */
             nsOuterWindowProxy::ObjectMoved
         ));
 
-bool
-nsOuterWindowProxy::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy,
-                                 bool *extensible) const
-{
-  // If [[Extensible]] could be false, then navigating a window could navigate
-  // to a window that's [[Extensible]] after being at one that wasn't: an
-  // invariant violation.  So always report true for this.
-  *extensible = true;
-  return true;
-}
-
-bool
-nsOuterWindowProxy::preventExtensions(JSContext *cx,
-                                      JS::Handle<JSObject*> proxy) const
-{
-  // See above.
-  JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr,
-                       JSMSG_CANT_CHANGE_EXTENSIBILITY);
-  return false;
-}
-
 const char *
 nsOuterWindowProxy::className(JSContext *cx, JS::Handle<JSObject*> proxy) const
 {
     MOZ_ASSERT(js::IsProxy(proxy));
 
     return "Window";
 }
 
@@ -860,16 +840,37 @@ nsOuterWindowProxy::enumerate(JSContext 
   JS::AutoIdVector innerProps(cx);
   if (!js::Wrapper::enumerate(cx, proxy, innerProps)) {
     return false;
   }
   return js::AppendUnique(cx, props, innerProps);
 }
 
 bool
+nsOuterWindowProxy::preventExtensions(JSContext *cx,
+                                      JS::Handle<JSObject*> proxy,
+                                      bool *succeeded) const
+{
+  // If [[Extensible]] could be false, then navigating a window could navigate
+  // to a window that's [[Extensible]] after being at one that wasn't: an
+  // invariant violation.  So never change a window's extensibility.
+  *succeeded = false;
+  return true;
+}
+
+bool
+nsOuterWindowProxy::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy,
+                                 bool *extensible) const
+{
+  // See above.
+  *extensible = true;
+  return true;
+}
+
+bool
 nsOuterWindowProxy::has(JSContext *cx, JS::Handle<JSObject*> proxy,
                         JS::Handle<jsid> id, bool *bp) const
 {
   if (nsCOMPtr<nsIDOMWindow> frame = GetSubframeWindow(cx, proxy, id)) {
     *bp = true;
     return true;
   }
 
--- a/dom/bindings/DOMJSProxyHandler.cpp
+++ b/dom/bindings/DOMJSProxyHandler.cpp
@@ -136,30 +136,29 @@ DOMProxyHandler::EnsureExpandoObject(JSC
 
   cache->SetPreservingWrapper(true);
   js::SetProxyExtra(obj, JSPROXYSLOT_EXPANDO, ObjectValue(*expando));
 
   return expando;
 }
 
 bool
-DOMProxyHandler::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible) const
+DOMProxyHandler::preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy,
+                                   bool *succeeded) const
 {
   // always extensible per WebIDL
-  *extensible = true;
+  *succeeded = false;
   return true;
 }
 
 bool
-DOMProxyHandler::preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy) const
+DOMProxyHandler::isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible) const
 {
-  // Throw a TypeError, per WebIDL.
-  JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr,
-                       JSMSG_CANT_CHANGE_EXTENSIBILITY);
-  return false;
+  *extensible = true;
+  return true;
 }
 
 bool
 BaseDOMProxyHandler::getPropertyDescriptor(JSContext* cx,
                                            JS::Handle<JSObject*> proxy,
                                            JS::Handle<jsid> id,
                                            MutableHandle<JSPropertyDescriptor> desc) const
 {
--- a/dom/bindings/DOMJSProxyHandler.h
+++ b/dom/bindings/DOMJSProxyHandler.h
@@ -110,19 +110,20 @@ public:
     bool unused;
     return defineProperty(cx, proxy, id, desc, &unused);
   }
   virtual bool defineProperty(JSContext* cx, JS::Handle<JSObject*> proxy, JS::Handle<jsid> id,
                               JS::MutableHandle<JSPropertyDescriptor> desc, bool* defined)
                               const;
   bool delete_(JSContext* cx, JS::Handle<JSObject*> proxy,
                JS::Handle<jsid> id, bool* bp) const MOZ_OVERRIDE;
+  bool preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy,
+                         bool *succeeded) const MOZ_OVERRIDE;
   bool isExtensible(JSContext *cx, JS::Handle<JSObject*> proxy, bool *extensible)
                     const MOZ_OVERRIDE;
-  bool preventExtensions(JSContext *cx, JS::Handle<JSObject*> proxy) const MOZ_OVERRIDE;
   bool has(JSContext* cx, JS::Handle<JSObject*> proxy, JS::Handle<jsid> id,
            bool* bp) const MOZ_OVERRIDE;
   bool set(JSContext *cx, JS::Handle<JSObject*> proxy, JS::Handle<JSObject*> receiver,
            JS::Handle<jsid> id, bool strict, JS::MutableHandle<JS::Value> vp)
            const MOZ_OVERRIDE;
 
   /*
    * If assigning to proxy[id] hits a named setter with OverrideBuiltins or
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -77,16 +77,17 @@
 #include "SandboxHal.h"
 #include "nsDebugImpl.h"
 #include "nsHashPropertyBag.h"
 #include "nsLayoutStylesheetCache.h"
 #include "nsIJSRuntimeService.h"
 #include "nsThreadManager.h"
 #include "nsAnonymousTemporaryFile.h"
 #include "nsISpellChecker.h"
+#include "nsClipboardProxy.h"
 
 #include "IHistory.h"
 #include "nsNetUtil.h"
 
 #include "base/message_loop.h"
 #include "base/process_util.h"
 #include "base/task.h"
 
@@ -715,19 +716,25 @@ ContentChild::InitXPCOM()
         return;
     }
 
     mConsoleListener = new ConsoleListener(this);
     if (NS_FAILED(svc->RegisterListener(mConsoleListener)))
         NS_WARNING("Couldn't register console listener for child process");
 
     bool isOffline;
-    SendGetXPCOMProcessAttributes(&isOffline, &mAvailableDictionaries);
+    ClipboardCapabilities clipboardCaps;
+    SendGetXPCOMProcessAttributes(&isOffline, &mAvailableDictionaries, &clipboardCaps);
     RecvSetOffline(isOffline);
 
+    nsCOMPtr<nsIClipboard> clipboard(do_GetService("@mozilla.org/widget/clipboard;1"));
+    if (nsCOMPtr<nsIClipboardProxy> clipboardProxy = do_QueryInterface(clipboard)) {
+        clipboardProxy->SetCapabilities(clipboardCaps);
+    }
+
     DebugOnly<FileUpdateDispatcher*> observer = FileUpdateDispatcher::GetSingleton();
     NS_ASSERTION(observer, "FileUpdateDispatcher is null");
 
     // This object is held alive by the observer service.
     nsRefPtr<SystemMessageHandledObserver> sysMsgObserver =
         new SystemMessageHandledObserver();
     sysMsgObserver->Init();
 
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -2497,18 +2497,23 @@ ContentParent::RecvAddNewProcess(const u
     // Resend pref updates to the forked child.
     for (int i = 0; i < numNuwaPrefUpdates; i++) {
         content->SendPreferenceUpdate(sNuwaPrefUpdates->ElementAt(i));
     }
 
     // Update offline settings.
     bool isOffline;
     InfallibleTArray<nsString> unusedDictionaries;
-    RecvGetXPCOMProcessAttributes(&isOffline, &unusedDictionaries);
+    ClipboardCapabilities clipboardCaps;
+    RecvGetXPCOMProcessAttributes(&isOffline, &unusedDictionaries,
+                                  &clipboardCaps);
     content->SendSetOffline(isOffline);
+    MOZ_ASSERT(!clipboardCaps.supportsSelectionClipboard() &&
+               !clipboardCaps.supportsFindClipboard(),
+               "Unexpected values");
 
     PreallocatedProcessManager::PublishSpareProcess(content);
     return true;
 #else
     NS_ERROR("ContentParent::RecvAddNewProcess() not implemented!");
     return false;
 #endif
 }
@@ -2782,28 +2787,38 @@ ContentParent::RecvGetProcessAttributes(
     *aIsForApp = IsForApp();
     *aIsForBrowser = mIsForBrowser;
 
     return true;
 }
 
 bool
 ContentParent::RecvGetXPCOMProcessAttributes(bool* aIsOffline,
-                                             InfallibleTArray<nsString>* dictionaries)
+                                             InfallibleTArray<nsString>* dictionaries,
+                                             ClipboardCapabilities* clipboardCaps)
 {
     nsCOMPtr<nsIIOService> io(do_GetIOService());
     MOZ_ASSERT(io, "No IO service?");
     DebugOnly<nsresult> rv = io->GetOffline(aIsOffline);
     MOZ_ASSERT(NS_SUCCEEDED(rv), "Failed getting offline?");
 
     nsCOMPtr<nsISpellChecker> spellChecker(do_GetService(NS_SPELLCHECKER_CONTRACTID));
     MOZ_ASSERT(spellChecker, "No spell checker?");
 
     spellChecker->GetDictionaryList(dictionaries);
 
+    nsCOMPtr<nsIClipboard> clipboard(do_GetService("@mozilla.org/widget/clipboard;1"));
+    MOZ_ASSERT(clipboard, "No clipboard?");
+
+    rv = clipboard->SupportsSelectionClipboard(&clipboardCaps->supportsSelectionClipboard());
+    MOZ_ASSERT(NS_SUCCEEDED(rv));
+
+    rv = clipboard->SupportsFindClipboard(&clipboardCaps->supportsFindClipboard());
+    MOZ_ASSERT(NS_SUCCEEDED(rv));
+
     return true;
 }
 
 mozilla::jsipc::PJavaScriptParent *
 ContentParent::AllocPJavaScriptParent()
 {
     MOZ_ASSERT(!ManagedPJavaScriptParent().Length());
     return nsIContentParent::AllocPJavaScriptParent();
--- a/dom/ipc/ContentParent.h
+++ b/dom/ipc/ContentParent.h
@@ -408,17 +408,18 @@ private:
     PBackgroundParent*
     AllocPBackgroundParent(Transport* aTransport, ProcessId aOtherProcess)
                            MOZ_OVERRIDE;
 
     virtual bool RecvGetProcessAttributes(uint64_t* aId,
                                           bool* aIsForApp,
                                           bool* aIsForBrowser) MOZ_OVERRIDE;
     virtual bool RecvGetXPCOMProcessAttributes(bool* aIsOffline,
-                                               InfallibleTArray<nsString>* dictionaries)
+                                               InfallibleTArray<nsString>* dictionaries,
+                                               ClipboardCapabilities* clipboardCaps)
         MOZ_OVERRIDE;
 
     virtual bool DeallocPJavaScriptParent(mozilla::jsipc::PJavaScriptParent*) MOZ_OVERRIDE;
 
     virtual bool DeallocPRemoteSpellcheckEngineParent(PRemoteSpellcheckEngineParent*) MOZ_OVERRIDE;
     virtual PBrowserParent* AllocPBrowserParent(const IPCTabContext& aContext,
                                                 const uint32_t& aChromeFlags,
                                                 const uint64_t& aId,
--- a/dom/ipc/PContent.ipdl
+++ b/dom/ipc/PContent.ipdl
@@ -310,16 +310,21 @@ struct VolumeInfo {
   int32_t mountGeneration;
   bool isMediaPresent;
   bool isSharing;
   bool isFormatting;
   bool isFake;
   bool isUnmounting;
 };
 
+struct ClipboardCapabilities {
+  bool supportsSelectionClipboard;
+  bool supportsFindClipboard;
+};
+
 union MaybeFileDesc {
     FileDescriptor;
     void_t;
 };
 
 prio(normal upto high) intr protocol PContent
 {
     parent opens PCompositor;
@@ -512,17 +517,18 @@ parent:
      * |isForBrowser|, we're loading <browser>.  When |!isForApp &&
      * !isForBrowser|, we're probably loading <xul:browser remote>.
      *
      * Keep the return values in sync with PBrowser()!
      */
     sync GetProcessAttributes()
         returns (uint64_t id, bool isForApp, bool isForBrowser);
     sync GetXPCOMProcessAttributes()
-        returns (bool isOffline, nsString[] dictionaries);
+        returns (bool isOffline, nsString[] dictionaries,
+                 ClipboardCapabilities clipboardCaps);
 
     sync CreateChildProcess(IPCTabContext context,
                             ProcessPriority priority)
         returns (uint64_t id, bool isForApp, bool isForBrowser);
     intr BridgeToChildProcess(uint64_t id);
 
     async PJavaScript();
 
--- a/dom/media/gmp-plugin/gmp-fake.cpp
+++ b/dom/media/gmp-plugin/gmp-fake.cpp
@@ -397,17 +397,20 @@ extern "C" {
   GMPGetAPI (const char* aApiName, void* aHostAPI, void** aPluginApi) {
     if (!strcmp (aApiName, "decode-video")) {
       *aPluginApi = new FakeVideoDecoder (static_cast<GMPVideoHost*> (aHostAPI));
       return GMPNoErr;
     } else if (!strcmp (aApiName, "encode-video")) {
       *aPluginApi = new FakeVideoEncoder (static_cast<GMPVideoHost*> (aHostAPI));
       return GMPNoErr;
     } else if (!strcmp (aApiName, "eme-decrypt")) {
-      *aPluginApi = new FakeDecryptor(static_cast<GMPDecryptorHost*> (aHostAPI));
+      *aPluginApi = new FakeDecryptor();
+      return GMPNoErr;
+    } else if (!strcmp (aApiName, "async-shutdown")) {
+      *aPluginApi = new TestAsyncShutdown(static_cast<GMPAsyncShutdownHost*> (aHostAPI));
       return GMPNoErr;
     }
     return GMPGenericErr;
   }
 
   PUBLIC_FUNC void
   GMPShutdown (void) {
     g_platform_api = NULL;
--- a/dom/media/gmp-plugin/gmp-test-decryptor.cpp
+++ b/dom/media/gmp-plugin/gmp-test-decryptor.cpp
@@ -28,19 +28,18 @@ static bool sMultiClientTest = false;
 void
 MaybeFinish()
 {
   if (sFinishedTruncateTest && sFinishedReplaceTest && sMultiClientTest) {
     FakeDecryptor::Message("test-storage complete");
   }
 }
 
-FakeDecryptor::FakeDecryptor(GMPDecryptorHost* aHost)
-  : mHost(aHost)
-  , mCallback(nullptr)
+FakeDecryptor::FakeDecryptor()
+  : mCallback(nullptr)
 {
   assert(!sInstance);
   sInstance = this;
 }
 
 void FakeDecryptor::DecryptingComplete()
 {
   sInstance = nullptr;
@@ -75,18 +74,18 @@ public:
     , mThen(aThen)
   {}
   void Run() MOZ_OVERRIDE {
     ReadRecord(mId, mThen);
   }
   void Destroy() MOZ_OVERRIDE {
     delete this;
   }
+  string mId;
   ReadContinuation* mThen;
-  string mId;
 };
 
 class TestEmptyContinuation : public ReadContinuation {
 public:
   void ReadComplete(GMPErr aErr, const std::string& aData) MOZ_OVERRIDE {
     if (aData != "") {
       FakeDecryptor::Message("FAIL TestEmptyContinuation record was not truncated");
     }
@@ -173,17 +172,17 @@ public:
   virtual void OpenComplete(GMPErr aStatus, GMPRecord* aRecord) MOZ_OVERRIDE {
     if (GMP_FAILED(aStatus)) {
       FakeDecryptor::Message("FAIL OpenAgainContinuation to open record initially.");
       sMultiClientTest = true;
       MaybeFinish();
       return;
     }
 
-    auto err = GMPOpenRecord(OpenAgainRecordId, new OpenedSecondTimeContinuation(aRecord));
+    GMPOpenRecord(OpenAgainRecordId, new OpenedSecondTimeContinuation(aRecord));
 
     delete this;
   }
 };
 
 void
 FakeDecryptor::TestStorage()
 {
@@ -263,16 +262,41 @@ public:
       FakeDecryptor::Message("retrieve " + mRecordId + " succeeded (length " +
                              len + " bytes)");
     }
     delete this;
   }
   string mRecordId;
 };
 
+class ReportReadRecordContinuation : public ReadContinuation {
+public:
+  ReportReadRecordContinuation(const string& aRecordId)
+    : mRecordId(aRecordId)
+  {}
+  void ReadComplete(GMPErr aErr, const std::string& aData) MOZ_OVERRIDE {
+    if (GMP_FAILED(aErr)) {
+      FakeDecryptor::Message("retrieved " + mRecordId + " failed");
+    } else {
+      FakeDecryptor::Message("retrieved " + mRecordId + " " + aData);
+    }
+    delete this;
+  }
+  string mRecordId;
+};
+
+enum ShutdownMode {
+  ShutdownNormal,
+  ShutdownTimeout,
+  ShutdownStoreToken
+};
+
+static ShutdownMode sShutdownMode = ShutdownNormal;
+static string sShutdownToken = "";
+
 void
 FakeDecryptor::UpdateSession(uint32_t aPromiseId,
                              const char* aSessionId,
                              uint32_t aSessionIdLength,
                              const uint8_t* aResponse,
                              uint32_t aResponseSize)
 {
   std::string response((const char*)aResponse, (const char*)(aResponse)+aResponseSize);
@@ -285,10 +309,53 @@ FakeDecryptor::UpdateSession(uint32_t aP
     const string& id = tokens[1];
     const string& value = tokens[2];
     WriteRecord(id,
                 value,
                 new ReportWritten(id, value));
   } else if (task == "retrieve") {
     const string& id = tokens[1];
     ReadRecord(id, new ReportReadStatusContinuation(id));
+  } else if (task == "shutdown-mode") {
+    const string& mode = tokens[1];
+    if (mode == "timeout") {
+      sShutdownMode = ShutdownTimeout;
+    } else if (mode == "token") {
+      sShutdownMode = ShutdownStoreToken;
+      sShutdownToken = tokens[2];
+      Message("shutdown-token received " + sShutdownToken);
+    }
+  } else if (task == "retrieve-shutdown-token") {
+    ReadRecord("shutdown-token", new ReportReadRecordContinuation("shutdown-token"));
   }
 }
+
+class CompleteShutdownTask : public GMPTask {
+public:
+  CompleteShutdownTask(GMPAsyncShutdownHost* aHost)
+    : mHost(aHost)
+  {
+  }
+  virtual void Run() {
+    mHost->ShutdownComplete();
+  }
+  virtual void Destroy() { delete this; }
+  GMPAsyncShutdownHost* mHost;
+};
+
+void
+TestAsyncShutdown::BeginShutdown() {
+  switch (sShutdownMode) {
+    case ShutdownNormal:
+      mHost->ShutdownComplete();
+      break;
+    case ShutdownTimeout:
+      // Don't do anything; wait for timeout, Gecko should kill
+      // the plugin and recover.
+      break;
+    case ShutdownStoreToken:
+      // Store message, then shutdown.
+      WriteRecord("shutdown-token",
+                  sShutdownToken,
+                  new CompleteShutdownTask(mHost));
+      break;
+  }
+}
--- a/dom/media/gmp-plugin/gmp-test-decryptor.h
+++ b/dom/media/gmp-plugin/gmp-test-decryptor.h
@@ -9,17 +9,17 @@
 #include "gmp-decryption.h"
 #include "gmp-async-shutdown.h"
 #include <string>
 #include "mozilla/Attributes.h"
 
 class FakeDecryptor : public GMPDecryptor {
 public:
 
-  FakeDecryptor(GMPDecryptorHost* aHost);
+  FakeDecryptor();
 
   virtual void Init(GMPDecryptorCallback* aCallback) MOZ_OVERRIDE {
     mCallback = aCallback;
   }
 
   virtual void CreateSession(uint32_t aPromiseId,
                              const char* aInitDataType,
                              uint32_t aInitDataTypeSize,
@@ -65,17 +65,28 @@ public:
   }
 
   virtual void DecryptingComplete() MOZ_OVERRIDE;
 
   static void Message(const std::string& aMessage);
 
 private:
 
+  virtual ~FakeDecryptor() {}
   static FakeDecryptor* sInstance;
 
   void TestStorage();
 
   GMPDecryptorCallback* mCallback;
-  GMPDecryptorHost* mHost;
+};
+
+class TestAsyncShutdown : public GMPAsyncShutdown {
+public:
+  TestAsyncShutdown(GMPAsyncShutdownHost* aHost)
+    : mHost(aHost)
+  {
+  }
+  virtual void BeginShutdown() MOZ_OVERRIDE;
+private:
+  GMPAsyncShutdownHost* mHost;
 };
 
 #endif
--- a/dom/media/gmp-plugin/gmp-test-storage.h
+++ b/dom/media/gmp-plugin/gmp-test-storage.h
@@ -7,16 +7,17 @@
 #define TEST_GMP_STORAGE_H__
 
 #include "gmp-errors.h"
 #include "gmp-platform.h"
 #include <string>
 
 class ReadContinuation {
 public:
+  virtual ~ReadContinuation() {}
   virtual void ReadComplete(GMPErr aErr, const std::string& aData) = 0;
 };
 
 // Reads a record to storage using GMPRecord.
 // Calls ReadContinuation with read data.
 GMPErr
 ReadRecord(const std::string& aRecordName,
            ReadContinuation* aContinuation);
@@ -40,16 +41,17 @@ GMPOpenRecord(const char* aName,
               GMPRecord** aOutRecord,
               GMPRecordClient* aClient);
 
 GMPErr
 GMPRunOnMainThread(GMPTask* aTask);
 
 class OpenContinuation {
 public:
+  virtual ~OpenContinuation() {}
   virtual void OpenComplete(GMPErr aStatus, GMPRecord* aRecord) = 0;
 };
 
 GMPErr
 GMPOpenRecord(const std::string& aRecordName,
            OpenContinuation* aContinuation);
 
 #endif // TEST_GMP_STORAGE_H__
--- a/dom/tests/mochitest/general/test_interfaces.html
+++ b/dom/tests/mochitest/general/test_interfaces.html
@@ -80,16 +80,17 @@ var ecmaGlobals =
     {name: "SharedUint8ClampedArray", nightly: true},
     {name: "SharedInt16Array", nightly: true},
     {name: "SharedUint16Array", nightly: true},
     {name: "SharedInt32Array", nightly: true},
     {name: "SharedUint32Array", nightly: true},
     {name: "SharedFloat32Array", nightly: true},
     {name: "SharedFloat64Array", nightly: true},
     {name: "SIMD", nightly: true},
+    {name: "Atomics", nightly: true},
     "StopIteration",
     "String",
     "SyntaxError",
     {name: "TypedObject", nightly: true},
     "TypeError",
     "Uint16Array",
     "Uint32Array",
     "Uint8Array",
--- a/dom/webidl/MutationObserver.webidl
+++ b/dom/webidl/MutationObserver.webidl
@@ -42,20 +42,20 @@ interface MutationObserver {
   [ChromeOnly]
   readonly attribute MutationCallback mutationCallback;
 };
 
 callback MutationCallback = void (sequence<MutationRecord> mutations, MutationObserver observer);
 
 dictionary MutationObserverInit {
   boolean childList = false;
-  boolean attributes = false;
-  boolean characterData = false;
+  boolean attributes;
+  boolean characterData;
   boolean subtree = false;
-  boolean attributeOldValue = false;
-  boolean characterDataOldValue = false;
+  boolean attributeOldValue;
+  boolean characterDataOldValue;
   sequence<DOMString> attributeFilter;
 };
 
 dictionary MutationObservingInfo : MutationObserverInit
 {
   Node? observedNode = null;
 };
--- a/dom/workers/test/test_worker_interfaces.js
+++ b/dom/workers/test/test_worker_interfaces.js
@@ -54,16 +54,17 @@ var ecmaGlobals =
     {name: "SharedUint8ClampedArray", nightly: true},
     {name: "SharedInt16Array", nightly: true},
     {name: "SharedUint16Array", nightly: true},
     {name: "SharedInt32Array", nightly: true},
     {name: "SharedUint32Array", nightly: true},
     {name: "SharedFloat32Array", nightly: true},
     {name: "SharedFloat64Array", nightly: true},
     {name: "SIMD", nightly: true},
+    {name: "Atomics", nightly: true},
     "StopIteration",
     "String",
     "SyntaxError",
     {name: "TypedObject", nightly: true},
     "TypeError",
     "Uint16Array",
     "Uint32Array",
     "Uint8Array",
--- a/dom/xbl/nsXBLService.cpp
+++ b/dom/xbl/nsXBLService.cpp
@@ -111,41 +111,49 @@ public:
   void DocumentLoaded(nsIDocument* aBindingDoc)
   {
     // We only need the document here to cause frame construction, so
     // we need the current doc, not the owner doc.
     nsIDocument* doc = mBoundElement->GetCurrentDoc();
     if (!doc)
       return;
 
+    // Destroy the frames for mBoundElement.
+    nsIContent* destroyedFramesFor = nullptr;
+    nsIPresShell* shell = doc->GetShell();
+    if (shell) {
+      shell->DestroyFramesFor(mBoundElement, &destroyedFramesFor);
+    }
+    MOZ_ASSERT(!mBoundElement->GetPrimaryFrame());
+
     // Get the binding.
     bool ready = false;
     nsXBLService::GetInstance()->BindingReady(mBoundElement, mBindingURI, &ready);
     if (!ready)
       return;
 
     // If |mBoundElement| is (in addition to having binding |mBinding|)
     // also a descendant of another element with binding |mBinding|,
     // then we might have just constructed it due to the
     // notification of its parent.  (We can know about both if the
     // binding loads were triggered from the DOM rather than frame
     // construction.)  So we have to check both whether the element
     // has a primary frame and whether it's in the undisplayed map
     // before sending a ContentInserted notification, or bad things
     // will happen.
-    nsIPresShell *shell = doc->GetShell();
+    MOZ_ASSERT(shell == doc->GetShell());
     if (shell) {
       nsIFrame* childFrame = mBoundElement->GetPrimaryFrame();
       if (!childFrame) {
         // Check to see if it's in the undisplayed content map.
         nsStyleContext* sc =
           shell->FrameManager()->GetUndisplayedContent(mBoundElement);
 
         if (!sc) {
-          shell->RecreateFramesFor(mBoundElement);
+          shell->CreateFramesFor(destroyedFramesFor);
         }
       }
     }
   }
 
   nsXBLBindingRequest(nsIURI* aURI, nsIContent* aBoundElement)
     : mBindingURI(aURI),
       mBoundElement(aBoundElement)
--- a/extensions/spellcheck/src/mozInlineSpellWordUtil.cpp
+++ b/extensions/spellcheck/src/mozInlineSpellWordUtil.cpp
@@ -662,17 +662,17 @@ mozInlineSpellWordUtil::MapDOMPositionTo
 namespace {
 
 template<class T>
 class FirstLargerOffset
 {
   int32_t mSoftTextOffset;
 
 public:
-  FirstLargerOffset(int32_t aSoftTextOffset) : mSoftTextOffset(aSoftTextOffset) {}
+  explicit FirstLargerOffset(int32_t aSoftTextOffset) : mSoftTextOffset(aSoftTextOffset) {}
   int operator()(const T& t) const {
   // We want the first larger offset, so never return 0 (which would
   // short-circuit evaluation before finding the last such offset).
     return mSoftTextOffset < t.mSoftTextOffset ? -1 : 1;
   }
 };
 
 template<class T>
--- a/gfx/layers/composite/ContainerLayerComposite.cpp
+++ b/gfx/layers/composite/ContainerLayerComposite.cpp
@@ -61,60 +61,16 @@ LayerHasCheckerboardingAPZC(Layer* aLaye
       }
       return true;
     }
     break;
   }
   return false;
 }
 
-/**
- * Returns a rectangle of content painted opaquely by aLayer. Very consertative;
- * bails by returning an empty rect in any tricky situations.
- */
-static nsIntRect
-GetOpaqueRect(Layer* aLayer)
-{
-  nsIntRect result;
-  gfx::Matrix matrix;
-  bool is2D = aLayer->AsLayerComposite()->GetShadowTransform().Is2D(&matrix);
-
-  // Just bail if there's anything difficult to handle.
-  if (!is2D || aLayer->GetMaskLayer() ||
-    aLayer->GetIsFixedPosition() ||
-    aLayer->GetIsStickyPosition() ||
-    aLayer->GetEffectiveOpacity() != 1.0f ||
-    matrix.HasNonIntegerTranslation()) {
-    return result;
-  }
-
-  if (aLayer->GetContentFlags() & Layer::CONTENT_OPAQUE) {
-    result = aLayer->GetEffectiveVisibleRegion().GetLargestRectangle();
-  } else {
-    // Drill down into RefLayers because that's what we particularly care about;
-    // layer construction for aLayer will not have known about the opaqueness
-    // of any RefLayer subtrees.
-    RefLayer* refLayer = aLayer->AsRefLayer();
-    if (refLayer && refLayer->GetFirstChild()) {
-      result = GetOpaqueRect(refLayer->GetFirstChild());
-    }
-  }
-
-  // Translate our opaque region to cover the child
-  gfx::Point point = matrix.GetTranslation();
-  result.MoveBy(static_cast<int>(point.x), static_cast<int>(point.y));
-
-  const nsIntRect* clipRect = aLayer->GetEffectiveClipRect();
-  if (clipRect) {
-    result.IntersectRect(result, *clipRect);
-  }
-
-  return result;
-}
-
 static void DrawLayerInfo(const RenderTargetIntRect& aClipRect,
                           LayerManagerComposite* aManager,
                           Layer* aLayer)
 {
 
   if (aLayer->GetType() == Layer::LayerType::TYPE_CONTAINER) {
     // XXX - should figure out a way to render this, but for now this
     // is hard to do, since it will often get superimposed over the first
@@ -157,22 +113,20 @@ static void PrintUniformityInfo(Layer* a
   LayerTranslationPayload* payload = new LayerTranslationPayload(aLayer, translation);
   PROFILER_MARKER_PAYLOAD("LayerTranslation", payload);
 #endif
 }
 
 /* all of the per-layer prepared data we need to maintain */
 struct PreparedLayer
 {
-  PreparedLayer(LayerComposite *aLayer, RenderTargetIntRect aClipRect, bool aRestoreVisibleRegion, nsIntRegion &aVisibleRegion) :
-    mLayer(aLayer), mClipRect(aClipRect), mRestoreVisibleRegion(aRestoreVisibleRegion), mSavedVisibleRegion(aVisibleRegion) {}
+  PreparedLayer(LayerComposite *aLayer, RenderTargetIntRect aClipRect) :
+    mLayer(aLayer), mClipRect(aClipRect) {}
   LayerComposite* mLayer;
   RenderTargetIntRect mClipRect;
-  bool mRestoreVisibleRegion;
-  nsIntRegion mSavedVisibleRegion;
 };
 
 /* all of the prepared data that we need in RenderLayer() */
 struct PreparedData
 {
   RefPtr<CompositingRenderTarget> mTmpTarget;
   nsAutoTArray<PreparedLayer, 12> mLayers;
   bool mNeedsSurfaceCopy;
@@ -218,48 +172,18 @@ ContainerPrepare(ContainerT* aContainer,
         !quad.Intersects(compositor->ClipRectInLayersCoordinates(layerToRender->GetLayer(), clipRect)) &&
         !LayerHasCheckerboardingAPZC(layerToRender->GetLayer(), nullptr)) {
       CULLING_LOG("Sublayer %p is clipped entirely\n", layerToRender->GetLayer());
       continue;
     }
 
     CULLING_LOG("Preparing sublayer %p\n", layerToRender->GetLayer());
 
-    nsIntRegion savedVisibleRegion;
-    bool restoreVisibleRegion = false;
-    gfx::Matrix matrix;
-    bool is2D = layerToRender->GetLayer()->GetBaseTransform().Is2D(&matrix);
-    if (i + 1 < children.Length() &&
-        is2D && !matrix.HasNonIntegerTranslation()) {
-      LayerComposite* nextLayer = static_cast<LayerComposite*>(children.ElementAt(i + 1)->ImplData());
-      CULLING_LOG("Culling against %p\n", nextLayer->GetLayer());
-      nsIntRect nextLayerOpaqueRect;
-      if (nextLayer && nextLayer->GetLayer()) {
-        nextLayerOpaqueRect = GetOpaqueRect(nextLayer->GetLayer());
-        gfx::Point point = matrix.GetTranslation();
-        nextLayerOpaqueRect.MoveBy(static_cast<int>(-point.x), static_cast<int>(-point.y));
-        CULLING_LOG("  point %i, %i\n", static_cast<int>(-point.x), static_cast<int>(-point.y));
-        CULLING_LOG("  opaque rect %i, %i, %i, %i\n", nextLayerOpaqueRect.x, nextLayerOpaqueRect.y, nextLayerOpaqueRect.width, nextLayerOpaqueRect.height);
-      }
-      if (!nextLayerOpaqueRect.IsEmpty()) {
-        CULLING_LOG("  draw\n");
-        savedVisibleRegion = layerToRender->GetShadowVisibleRegion();
-        nsIntRegion visibleRegion;
-        visibleRegion.Sub(savedVisibleRegion, nextLayerOpaqueRect);
-        if (visibleRegion.IsEmpty()) {
-          continue;
-        }
-        layerToRender->SetShadowVisibleRegion(visibleRegion);
-        restoreVisibleRegion = true;
-      } else {
-        CULLING_LOG("  skip\n");
-      }
-    }
     layerToRender->Prepare(clipRect);
-    aContainer->mPrepared->mLayers.AppendElement(PreparedLayer(layerToRender, clipRect, restoreVisibleRegion, savedVisibleRegion));
+    aContainer->mPrepared->mLayers.AppendElement(PreparedLayer(layerToRender, clipRect));
   }
 
   CULLING_LOG("Preparing container layer %p\n", aContainer->GetLayer());
 
   /**
    * Setup our temporary surface for rendering the contents of this container.
    */
 
@@ -320,21 +244,16 @@ RenderLayers(ContainerT* aContainer,
         gfx::Rect fbRect(clearRect.x, clearRect.y, clearRect.width, clearRect.height);
         compositor->ClearRect(fbRect);
         layerToRender->SetClearRect(nsIntRect(0, 0, 0, 0));
       }
     } else {
       layerToRender->RenderLayer(RenderTargetPixel::ToUntyped(clipRect));
     }
 
-    if (preparedData.mRestoreVisibleRegion) {
-      // Restore the region in case it's not covered by opaque content next time
-      layerToRender->SetShadowVisibleRegion(preparedData.mSavedVisibleRegion);
-    }
-
     if (gfxPrefs::UniformityInfo()) {
       PrintUniformityInfo(layer);
     }
 
     if (gfxPrefs::DrawLayerInfo()) {
       DrawLayerInfo(clipRect, aManager, layer);
     }
 
--- a/gfx/layers/composite/LayerManagerComposite.cpp
+++ b/gfx/layers/composite/LayerManagerComposite.cpp
@@ -192,16 +192,65 @@ LayerManagerComposite::BeginTransactionW
   }
 
   mIsCompositorReady = true;
   mCompositor->SetTargetContext(aTarget, aRect);
   mTarget = aTarget;
   mTargetBounds = aRect;
 }
 
+void
+LayerManagerComposite::ApplyOcclusionCulling(Layer* aLayer, nsIntRegion& aOpaqueRegion)
+{
+  nsIntRegion localOpaque;
+  Matrix transform2d;
+  bool isTranslation = false;
+  // If aLayer has a simple transform (only an integer translation) then we
+  // can easily convert aOpaqueRegion into pre-transform coordinates and include
+  // that region.
+  if (aLayer->GetLocalTransform().Is2D(&transform2d)) {
+    if (transform2d.IsIntegerTranslation()) {
+      isTranslation = true;
+      localOpaque = aOpaqueRegion;
+      localOpaque.MoveBy(-transform2d._31, -transform2d._32);
+    }
+  }
+
+  // Subtract any areas that we know to be opaque from our
+  // visible region.
+  LayerComposite *composite = aLayer->AsLayerComposite();
+  if (!localOpaque.IsEmpty()) {
+    nsIntRegion visible = composite->GetShadowVisibleRegion();
+    visible.Sub(visible, localOpaque);
+    composite->SetShadowVisibleRegion(visible);
+  }
+
+  // Compute occlusions for our descendants (in front-to-back order) and allow them to
+  // contribute to localOpaque.
+  for (Layer* child = aLayer->GetLastChild(); child; child = child->GetPrevSibling()) {
+    ApplyOcclusionCulling(child, localOpaque);
+  }
+
+  // If we have a simple transform, then we can add our opaque area into
+  // aOpaqueRegion.
+  if (isTranslation &&
+      !aLayer->GetMaskLayer() &&
+      aLayer->GetLocalOpacity() == 1.0f) {
+    if (aLayer->GetContentFlags() & Layer::CONTENT_OPAQUE) {
+      localOpaque.Or(localOpaque, composite->GetShadowVisibleRegion());
+    }
+    localOpaque.MoveBy(transform2d._31, transform2d._32);
+    const nsIntRect* clip = aLayer->GetEffectiveClipRect();
+    if (clip) {
+      localOpaque.And(localOpaque, *clip);
+    }
+    aOpaqueRegion.Or(aOpaqueRegion, localOpaque);
+  }
+}
+
 bool
 LayerManagerComposite::EndEmptyTransaction(EndTransactionFlags aFlags)
 {
   NS_ASSERTION(mInTransaction, "Didn't call BeginTransaction?");
   if (!mRoot) {
     mInTransaction = false;
     mIsCompositorReady = false;
     return false;
@@ -252,16 +301,19 @@ LayerManagerComposite::EndTransaction(Dr
       // properties.
       mRoot->ApplyPendingUpdatesToSubtree();
     }
 
     // The results of our drawing always go directly into a pixel buffer,
     // so we don't need to pass any global transform here.
     mRoot->ComputeEffectiveTransforms(gfx::Matrix4x4());
 
+    nsIntRegion opaque;
+    ApplyOcclusionCulling(mRoot, opaque);
+
     Render();
     mGeometryChanged = false;
   } else {
     // Modified layer tree
     mGeometryChanged = true;
   }
 
   mCompositor->ClearTargetContext();
--- a/gfx/layers/composite/LayerManagerComposite.h
+++ b/gfx/layers/composite/LayerManagerComposite.h
@@ -161,16 +161,23 @@ public:
   virtual bool AreComponentAlphaLayersEnabled() MOZ_OVERRIDE;
 
   virtual TemporaryRef<DrawTarget>
     CreateOptimalMaskDrawTarget(const IntSize &aSize) MOZ_OVERRIDE;
 
   virtual const char* Name() const MOZ_OVERRIDE { return ""; }
 
   /**
+   * Restricts the shadow visible region of layers that are covered with
+   * opaque content. aOpaqueRegion is the region already known to be covered
+   * with opaque content, in the post-transform coordinate space of aLayer.
+   */
+  void ApplyOcclusionCulling(Layer* aLayer, nsIntRegion& aOpaqueRegion);
+
+  /**
    * RAII helper class to add a mask effect with the compositable from aMaskLayer
    * to the EffectChain aEffect and notify the compositable when we are done.
    */
   class AutoAddMaskEffect
   {
   public:
     AutoAddMaskEffect(Layer* aMaskLayer,
                       EffectChain& aEffect,
--- a/gfx/layers/d3d11/CompositorD3D11.cpp
+++ b/gfx/layers/d3d11/CompositorD3D11.cpp
@@ -409,19 +409,18 @@ CompositorD3D11::CreateRenderTarget(cons
   if (aRect.width * aRect.height == 0) {
     return nullptr;
   }
 
   CD3D11_TEXTURE2D_DESC desc(DXGI_FORMAT_B8G8R8A8_UNORM, aRect.width, aRect.height, 1, 1,
                              D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET);
 
   RefPtr<ID3D11Texture2D> texture;
-  mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
-  NS_ASSERTION(texture, "Could not create texture");
-  if (!texture) {
+  HRESULT hr = mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
+  if (Failed(hr) || !texture) {
     return nullptr;
   }
 
   RefPtr<CompositingRenderTargetD3D11> rt = new CompositingRenderTargetD3D11(texture, aRect.TopLeft());
   rt->SetSize(IntSize(aRect.width, aRect.height));
 
   if (aInit == INIT_MODE_CLEAR) {
     FLOAT clear[] = { 0, 0, 0, 0 };
@@ -442,19 +441,19 @@ CompositorD3D11::CreateRenderTargetFromS
     return nullptr;
   }
 
   CD3D11_TEXTURE2D_DESC desc(DXGI_FORMAT_B8G8R8A8_UNORM,
                              aRect.width, aRect.height, 1, 1,
                              D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET);
 
   RefPtr<ID3D11Texture2D> texture;
-  mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
+  HRESULT hr = mDevice->CreateTexture2D(&desc, nullptr, byRef(texture));
   NS_ASSERTION(texture, "Could not create texture");
-  if (!texture) {
+  if (Failed(hr) || !texture) {
     return nullptr;
   }
 
   if (aSource) {
     const CompositingRenderTargetD3D11* sourceD3D11 =
       static_cast<const CompositingRenderTargetD3D11*>(aSource);
 
     D3D11_BOX srcBox;
@@ -596,17 +595,22 @@ CompositorD3D11::DrawQuad(const gfx::Rec
     TextureSourceD3D11* source = maskEffect->mMaskTexture->AsSourceD3D11();
 
     if (!source) {
       NS_WARNING("Missing texture source!");
       return;
     }
 
     RefPtr<ID3D11ShaderResourceView> view;
-    mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
+    HRESULT hr = mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
+    if (Failed(hr)) {
+      // XXX - There's a chance we won't be able to render anything, should we
+      // just crash release builds?
+      return;
+    }
 
     ID3D11ShaderResourceView* srView = view;
     mContext->PSSetShaderResources(3, 1, &srView);
 
     const gfx::Matrix4x4& maskTransform = maskEffect->mMaskTransform;
     NS_ASSERTION(maskTransform.Is2D(), "How did we end up with a 3D transform here?!");
     Rect bounds = Rect(Point(), Size(maskEffect->mSize));
 
@@ -649,17 +653,22 @@ CompositorD3D11::DrawQuad(const gfx::Rec
       if (!source) {
         NS_WARNING("Missing texture source!");
         return;
       }
 
       SetPSForEffect(aEffectChain.mPrimaryEffect, maskType, texturedEffect->mTexture->GetFormat());
 
       RefPtr<ID3D11ShaderResourceView> view;
-      mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
+      HRESULT hr = mDevice->CreateShaderResourceView(source->GetD3D11Texture(), nullptr, byRef(view));
+      if (Failed(hr)) {
+        // XXX - There's a chance we won't be able to render anything, should we
+        // just crash release builds?
+        return;
+      }
 
       ID3D11ShaderResourceView* srView = view;
       mContext->PSSetShaderResources(0, 1, &srView);
 
       if (!texturedEffect->mPremultiplied) {
         mContext->OMSetBlendState(mAttachments->mNonPremulBlendState, sBlendFactor, 0xFFFFFFFF);
         restoreBlendMode = true;
       }
@@ -690,23 +699,37 @@ CompositorD3D11::DrawQuad(const gfx::Rec
         // because of unsupported dimensions (we don't tile YCbCr textures).
         return;
       }
 
       TextureSourceD3D11* sourceY  = source->GetSubSource(Y)->AsSourceD3D11();
       TextureSourceD3D11* sourceCb = source->GetSubSource(Cb)->AsSourceD3D11();
       TextureSourceD3D11* sourceCr = source->GetSubSource(Cr)->AsSourceD3D11();
 
+      HRESULT hr;
+
       RefPtr<ID3D11ShaderResourceView> views[3];
-      mDevice->CreateShaderResourceView(sourceY->GetD3D11Texture(),
-                                        nullptr, byRef(views[0]));
-      mDevice->CreateShaderResourceView(sourceCb->GetD3D11Texture(),
-                                        nullptr, byRef(views[1]));
-      mDevice->CreateShaderResourceView(sourceCr->GetD3D11Texture(),
-                                        nullptr, byRef(views[2]));
+
+      hr = mDevice->CreateShaderResourceView(sourceY->GetD3D11Texture(),
+                                             nullptr, byRef(views[0]));
+      if (Failed(hr)) {
+        return;
+      }
+
+      hr = mDevice->CreateShaderResourceView(sourceCb->GetD3D11Texture(),
+                                             nullptr, byRef(views[1]));
+      if (Failed(hr)) {
+        return;
+      }
+
+      hr = mDevice->CreateShaderResourceView(sourceCr->GetD3D11Texture(),
+                                             nullptr, byRef(views[2]));
+      if (Failed(hr)) {
+        return;
+      }
 
       ID3D11ShaderResourceView* srViews[3] = { views[0], views[1], views[2] };
       mContext->PSSetShaderResources(0, 3, srViews);
     }
     break;
   case EffectTypes::COMPONENT_ALPHA:
     {
       MOZ_ASSERT(gfxPrefs::ComponentAlphaEnabled());
@@ -723,18 +746,27 @@ CompositorD3D11::DrawQuad(const gfx::Rec
       }
 
       SetPSForEffect(aEffectChain.mPrimaryEffect, maskType, effectComponentAlpha->mOnWhite->GetFormat());
 
       SetSamplerForFilter(effectComponentAlpha->mFilter);
 
       mVSConstants.textureCoords = effectComponentAlpha->mTextureCoords;
       RefPtr<ID3D11ShaderResourceView> views[2];
-      mDevice->CreateShaderResourceView(sourceOnBlack->GetD3D11Texture(), nullptr, byRef(views[0]));
-      mDevice->CreateShaderResourceView(sourceOnWhite->GetD3D11Texture(), nullptr, byRef(views[1]));
+
+      HRESULT hr;
+
+      hr = mDevice->CreateShaderResourceView(sourceOnBlack->GetD3D11Texture(), nullptr, byRef(views[0]));
+      if (Failed(hr)) {
+        return;
+      }
+      hr = mDevice->CreateShaderResourceView(sourceOnWhite->GetD3D11Texture(), nullptr, byRef(views[1]));
+      if (Failed(hr)) {
+        return;
+      }
 
       ID3D11ShaderResourceView* srViews[2] = { views[0], views[1] };
       mContext->PSSetShaderResources(0, 2, srViews);
 
       mContext->OMSetBlendState(mAttachments->mComponentBlendState, sBlendFactor, 0xFFFFFFFF);
       restoreBlendMode = true;
     }
     break;
@@ -900,17 +932,17 @@ CompositorD3D11::UpdateRenderTarget()
     return;
   }
 
   HRESULT hr;
 
   nsRefPtr<ID3D11Texture2D> backBuf;
 
   hr = mSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)backBuf.StartAssignment());
-  if (FAILED(hr)) {
+  if (Failed(hr)) {
     return;
   }
 
   mDefaultRT = new CompositingRenderTargetD3D11(backBuf, IntPoint(0, 0));
   mDefaultRT->SetSize(mSize.ToIntSize());
 }
 
 bool
@@ -967,61 +999,32 @@ CompositorD3D11::CreateShaders()
                                   byRef(mAttachments->mRGBAShader[MaskType::Mask3d]));
   if (FAILED(hr)) {
     return false;
   }
 
   return true;
 }
 
-static
-bool ShouldRecoverFromMapFailure(HRESULT hr, ID3D11Device* device)
-{
-  // XXX - it would be nice to use gfxCriticalError, but it needs to
-  // be made to work off the main thread first.
-  if (SUCCEEDED(hr)) {
-    return true;
-  }
-  if (hr == DXGI_ERROR_DEVICE_REMOVED) {
-    switch (device->GetDeviceRemovedReason()) {
-      case DXGI_ERROR_DEVICE_HUNG:
-      case DXGI_ERROR_DEVICE_REMOVED:
-      case DXGI_ERROR_DEVICE_RESET:
-      case DXGI_ERROR_DRIVER_INTERNAL_ERROR:
-        return true;
-      case DXGI_ERROR_INVALID_CALL:
-      default:
-        return false;
-    }
-  }
-  return false;
-}
-
 bool
 CompositorD3D11::UpdateConstantBuffers()
 {
   HRESULT hr;
   D3D11_MAPPED_SUBRESOURCE resource;
 
   hr = mContext->Map(mAttachments->mVSConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
-  if (FAILED(hr)) {
-    if (ShouldRecoverFromMapFailure(hr, GetDevice())) {
-      return false;
-    }
-    MOZ_CRASH();
+  if (Failed(hr)) {
+    return false;
   }
   *(VertexShaderConstants*)resource.pData = mVSConstants;
   mContext->Unmap(mAttachments->mVSConstantBuffer, 0);
 
   hr = mContext->Map(mAttachments->mPSConstantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
-  if (FAILED(hr)) {
-    if (ShouldRecoverFromMapFailure(hr, GetDevice())) {
-      return false;
-    }
-    MOZ_CRASH();
+  if (Failed(hr)) {
+    return false;
   }
   *(PixelShaderConstants*)resource.pData = mPSConstants;
   mContext->Unmap(mAttachments->mPSConstantBuffer, 0);
 
   ID3D11Buffer *buffer = mAttachments->mVSConstantBuffer;
 
   mContext->VSSetConstantBuffers(0, 1, &buffer);
 
@@ -1077,10 +1080,51 @@ CompositorD3D11::PaintToTarget()
                                              SurfaceFormat::B8G8R8A8);
   mTarget->CopySurface(sourceSurface,
                        IntRect(0, 0, bbDesc.Width, bbDesc.Height),
                        IntPoint(-mTargetBounds.x, -mTargetBounds.y));
   mTarget->Flush();
   mContext->Unmap(readTexture, 0);
 }
 
+void
+CompositorD3D11::HandleError(HRESULT hr, Severity aSeverity)
+{
+  // XXX - It would be nice to use gfxCriticalError, but it needs to
+  // be made to work off the main thread first.
+  MOZ_ASSERT(aSeverity != DebugAssert);
+
+  if (aSeverity == Critical) {
+    MOZ_CRASH("Unrecoverable D3D11 error");
+  }
+
+  if (mDevice && hr == DXGI_ERROR_DEVICE_REMOVED) {
+    hr = mDevice->GetDeviceRemovedReason();
+  }
+
+  // Always crash if we are making invalid calls
+  if (hr == DXGI_ERROR_INVALID_CALL) {
+    MOZ_CRASH("Invalid D3D11 api call");
+  }
+
+  if (aSeverity == Recoverable) {
+    NS_WARNING("Encountered a recoverable D3D11 error");
+  }
+}
+
+bool
+CompositorD3D11::Failed(HRESULT hr, Severity aSeverity)
+{
+  if (FAILED(hr)) {
+    HandleError(hr, aSeverity);
+    return true;
+  }
+  return false;
+}
+
+bool
+CompositorD3D11::Succeeded(HRESULT hr, Severity aSeverity)
+{
+  return !Failed(hr, aSeverity);
+}
+
 }
 }
--- a/gfx/layers/d3d11/CompositorD3D11.h
+++ b/gfx/layers/d3d11/CompositorD3D11.h
@@ -138,16 +138,26 @@ public:
 
   virtual nsIWidget* GetWidget() const MOZ_OVERRIDE { return mWidget; }
 
   ID3D11Device* GetDevice() { return mDevice; }
 
   ID3D11DeviceContext* GetDC() { return mContext; }
 
 private:
+  enum Severity {
+    Recoverable,
+    DebugAssert,
+    Critical,
+  };
+
+  void HandleError(HRESULT hr, Severity aSeverity);
+  bool Failed(HRESULT hr, Severity aSeverity = DebugAssert);
+  bool Succeeded(HRESULT hr, Severity aSeverity = DebugAssert);
+
   // ensure mSize is up to date with respect to mWidget
   void EnsureSize();
   void VerifyBufferSize();
   void UpdateRenderTarget();
   bool CreateShaders();
   bool UpdateConstantBuffers();
   void SetSamplerForFilter(gfx::Filter aFilter);
   void SetPSForEffect(Effect *aEffect, MaskType aMaskType, gfx::SurfaceFormat aFormat);
--- a/gfx/layers/ipc/CompositorParent.cpp
+++ b/gfx/layers/ipc/CompositorParent.cpp
@@ -787,16 +787,36 @@ CompositorParent::CompositeCallback(Time
   } else {
     mLastCompose = TimeStamp::Now();
   }
 
   mCurrentCompositeTask = nullptr;
   CompositeToTarget(nullptr);
 }
 
+// Go down the composite layer tree, setting properties to match their
+// content-side counterparts.
+static void
+SetShadowProperties(Layer* aLayer)
+{
+  // FIXME: Bug 717688 -- Do these updates in LayerTransactionParent::RecvUpdate.
+  LayerComposite* layerComposite = aLayer->AsLayerComposite();
+  // Set the layerComposite's base transform to the layer's base transform.
+  layerComposite->SetShadowTransform(aLayer->GetBaseTransform());
+  layerComposite->SetShadowTransformSetByAnimation(false);
+  layerComposite->SetShadowVisibleRegion(aLayer->GetVisibleRegion());
+  layerComposite->SetShadowClipRect(aLayer->GetClipRect());
+  layerComposite->SetShadowOpacity(aLayer->GetOpacity());
+
+  for (Layer* child = aLayer->GetFirstChild();
+      child; child = child->GetNextSibling()) {
+    SetShadowProperties(child);
+  }
+}
+
 void
 CompositorParent::CompositeToTarget(DrawTarget* aTarget, const nsIntRect* aRect)
 {
   profiler_tracing("Paint", "Composite", TRACING_INTERVAL_START);
   PROFILER_LABEL("CompositorParent", "Composite",
     js::ProfileEntry::Category::GRAPHICS);
 
   MOZ_ASSERT(IsInCompositorThread(),
@@ -819,16 +839,18 @@ CompositorParent::CompositeToTarget(Draw
   AutoResolveRefLayers resolve(mCompositionManager);
 
   if (aTarget) {
     mLayerManager->BeginTransactionWithDrawTarget(aTarget, *aRect);
   } else {
     mLayerManager->BeginTransaction();
   }
 
+  SetShadowProperties(mLayerManager->GetRoot());
+
   if (mForceCompositionTask && !mOverrideComposeReadiness) {
     if (mCompositionManager->ReadyForCompose()) {
       mForceCompositionTask->Cancel();
       mForceCompositionTask = nullptr;
     } else {
       return;
     }
   }
@@ -899,36 +921,16 @@ CompositorParent::ForceComposeToTarget(D
 bool
 CompositorParent::CanComposite()
 {
   return mLayerManager &&
          mLayerManager->GetRoot() &&
          !mPaused;
 }
 
-// Go down the composite layer tree, setting properties to match their
-// content-side counterparts.
-static void
-SetShadowProperties(Layer* aLayer)
-{
-  // FIXME: Bug 717688 -- Do these updates in LayerTransactionParent::RecvUpdate.
-  LayerComposite* layerComposite = aLayer->AsLayerComposite();
-  // Set the layerComposite's base transform to the layer's base transform.
-  layerComposite->SetShadowTransform(aLayer->GetBaseTransform());
-  layerComposite->SetShadowTransformSetByAnimation(false);
-  layerComposite->SetShadowVisibleRegion(aLayer->GetVisibleRegion());
-  layerComposite->SetShadowClipRect(aLayer->GetClipRect());
-  layerComposite->SetShadowOpacity(aLayer->GetOpacity());
-
-  for (Layer* child = aLayer->GetFirstChild();
-      child; child = child->GetNextSibling()) {
-    SetShadowProperties(child);
-  }
-}
-
 void
 CompositorParent::ScheduleRotationOnCompositorThread(const TargetConfig& aTargetConfig,
                                                      bool aIsFirstPaint)
 {
   MOZ_ASSERT(IsInCompositorThread());
 
   if (!aIsFirstPaint &&
       !mCompositionManager->IsFirstPaint() &&
--- a/gfx/layers/opengl/CompositorOGL.cpp
+++ b/gfx/layers/opengl/CompositorOGL.cpp
@@ -168,16 +168,19 @@ CompositorOGL::CleanupResources()
 
   ctx->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, 0);
 
   if (mQuadVBO) {
     ctx->fDeleteBuffers(1, &mQuadVBO);
     mQuadVBO = 0;
   }
 
+  mGLContext->MakeCurrent();
+  mContextStateTracker.DestroyOGL(mGLContext);
+
   // On the main thread the Widget will be destroyed soon and calling MakeCurrent
   // after that could cause a crash (at least with GLX, see bug 1059793), unless
   // context is marked as destroyed.
   // There may be some textures still alive that will try to call MakeCurrent on
   // the context so let's make sure it is marked destroyed now.
   mGLContext->MarkDestroyed();
 
   mGLContext = nullptr;
@@ -659,16 +662,18 @@ CompositorOGL::CreateRenderTargetFromSou
 void
 CompositorOGL::SetRenderTarget(CompositingRenderTarget *aSurface)
 {
   MOZ_ASSERT(aSurface);
   CompositingRenderTargetOGL* surface
     = static_cast<CompositingRenderTargetOGL*>(aSurface);
   if (mCurrentRenderTarget != surface) {
     mCurrentRenderTarget = surface;
+    mContextStateTracker.PopOGLSection(gl(), "Frame");
+    mContextStateTracker.PushOGLSection(gl(), "Frame");
     surface->BindRenderTarget();
   }
 }
 
 CompositingRenderTarget*
 CompositorOGL::GetCurrentRenderTarget() const
 {
   return mCurrentRenderTarget;
@@ -763,16 +768,18 @@ CompositorOGL::BeginFrame(const nsIntReg
 #if MOZ_WIDGET_ANDROID
   TexturePoolOGL::Fill(gl());
 #endif
 
   mCurrentRenderTarget =
     CompositingRenderTargetOGL::RenderTargetForWindow(this,
                                                       IntSize(width, height));
   mCurrentRenderTarget->BindRenderTarget();
+
+  mContextStateTracker.PushOGLSection(gl(), "Frame");
 #ifdef DEBUG
   mWindowRenderTarget = mCurrentRenderTarget;
 #endif
 
   // Default blend function implements "OVER"
   mGLContext->fBlendFuncSeparate(LOCAL_GL_ONE, LOCAL_GL_ONE_MINUS_SRC_ALPHA,
                                  LOCAL_GL_ONE, LOCAL_GL_ONE);
   mGLContext->fEnable(LOCAL_GL_BLEND);
@@ -1340,16 +1347,18 @@ CompositorOGL::EndFrame()
     }
     RefPtr<DrawTarget> target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(IntSize(rect.width, rect.height), SurfaceFormat::B8G8R8A8);
     CopyToTarget(target, nsIntPoint(), Matrix());
 
     WriteSnapshotToDumpFile(this, target);
   }
 #endif
 
+  mContextStateTracker.PopOGLSection(gl(), "Frame");
+
   mFrameInProgress = false;
 
   if (mTarget) {
     CopyToTarget(mTarget, mTargetBounds.TopLeft(), Matrix());
     mGLContext->fBindBuffer(LOCAL_GL_ARRAY_BUFFER, 0);
     mCurrentRenderTarget = nullptr;
     return;
   }
--- a/gfx/layers/opengl/CompositorOGL.h
+++ b/gfx/layers/opengl/CompositorOGL.h
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*-
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_GFX_COMPOSITOROGL_H
 #define MOZILLA_GFX_COMPOSITOROGL_H
 
+#include "ContextStateTracker.h"
 #include "gfx2DGlue.h"
 #include "GLContextTypes.h"             // for GLContext, etc
 #include "GLDefs.h"                     // for GLuint, LOCAL_GL_TEXTURE_2D, etc
 #include "OGLShaderProgram.h"           // for ShaderProgramOGL, etc
 #include "Units.h"                      // for ScreenPoint
 #include "mozilla/Assertions.h"         // for MOZ_ASSERT, etc
 #include "mozilla/Attributes.h"         // for MOZ_OVERRIDE, MOZ_FINAL
 #include "mozilla/RefPtr.h"             // for TemporaryRef, RefPtr
@@ -382,16 +383,18 @@ private:
    * pointing upwards, but the layers/compositor coordinate system has the
    * y-axis pointing downwards, for good reason as Web pages are typically
    * scrolled downwards. So, some flipping has to take place; FlippedY does it.
    */
   GLint FlipY(GLint y) const { return mHeight - y; }
 
   RefPtr<CompositorTexturePoolOGL> mTexturePool;
 
+  ContextStateTrackerOGL mContextStateTracker;
+
   bool mDestroyed;
 
   /**
    * Height of the OpenGL context's primary framebuffer in pixels. Used by
    * FlipY for the y-flipping calculation.
    */
   GLint mHeight;
 
new file mode 100644
--- /dev/null
+++ b/gfx/thebes/ContextStateTracker.cpp
@@ -0,0 +1,137 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ContextStateTracker.h"
+#include "GLContext.h"
+#ifdef MOZ_ENABLE_PROFILER_SPS
+#include "ProfilerMarkers.h"
+#endif
+
+namespace mozilla {
+
+void
+ContextStateTrackerOGL::PushOGLSection(GLContext* aGL, const char* aSectionName)
+{
+  if (!profiler_feature_active("gpu")) {
+    return;
+  }
+
+  if (!aGL->IsSupported(gl::GLFeature::query_objects)) {
+    return;
+  }
+
+  if (mSectionStack.Length() > 0) {
+    // We need to end the query since we're starting a new section and restore it
+    // when this section is finished.
+    aGL->fEndQuery(LOCAL_GL_TIME_ELAPSED);
+    Top().mCpuTimeEnd = TimeStamp::Now();
+  }
+
+  ContextState newSection(aSectionName);
+
+  GLuint queryObject;
+  aGL->fGenQueries(1, &queryObject);
+  newSection.mStartQueryHandle = queryObject;
+  newSection.mCpuTimeStart = TimeStamp::Now();
+
+  aGL->fBeginQuery(LOCAL_GL_TIME_ELAPSED_EXT, queryObject);
+
+  mSectionStack.AppendElement(newSection);
+}
+
+void
+ContextStateTrackerOGL::PopOGLSection(GLContext* aGL, const char* aSectionName)
+{
+  // We might have ignored a section start if we started profiling
+  // in the middle section. If so we will ignore this unmatched end.
+  if (mSectionStack.Length() == 0) {
+    return;
+  }
+
+  int i = mSectionStack.Length() - 1;
+  MOZ_ASSERT(strcmp(mSectionStack[i].mSectionName, aSectionName) == 0);
+  aGL->fEndQuery(LOCAL_GL_TIME_ELAPSED);
+  mSectionStack[i].mCpuTimeEnd = TimeStamp::Now();
+  mCompletedSections.AppendElement(mSectionStack[i]);
+  mSectionStack.RemoveElementAt(i);
+
+  if (i - 1 >= 0) {
+    const char* sectionToRestore = Top().mSectionName;
+
+    // We need to restore the outer section
+    // Well do this by completing this section and adding a new
+    // one with the same name
+    mCompletedSections.AppendElement(Top());
+    mSectionStack.RemoveElementAt(i - 1);
+
+    ContextState newSection(sectionToRestore);
+
+    GLuint queryObject;
+    aGL->fGenQueries(1, &queryObject);
+    newSection.mStartQueryHandle = queryObject;
+    newSection.mCpuTimeStart = TimeStamp::Now();
+
+    aGL->fBeginQuery(LOCAL_GL_TIME_ELAPSED_EXT, queryObject);
+
+    mSectionStack.AppendElement(newSection);
+  }
+
+  Flush(aGL);
+}
+
+void
+ContextStateTrackerOGL::Flush(GLContext* aGL)
+{
+  TimeStamp now = TimeStamp::Now();
+
+  while (mCompletedSections.Length() != 0) {
+    // On mac we see QUERY_RESULT_AVAILABLE cause a GL flush if we query it
+    // too early. For profiling we rather have the last 200ms of data missing
+    // while causing let's measurement distortions.
+    if (mCompletedSections[0].mCpuTimeEnd + TimeDuration::FromMilliseconds(200) > now) {
+      break;
+    }
+
+    GLuint handle = mCompletedSections[0].mStartQueryHandle;
+
+    // We've waiting 200ms, content rendering at > 20 FPS will be ready. We
+    // shouldn't see any flushes now.
+    GLuint returned = 0;
+    aGL->fGetQueryObjectuiv(handle, LOCAL_GL_QUERY_RESULT_AVAILABLE, &returned);
+
+    if (!returned) {
+      break;
+    }
+
+    GLuint gpuTime = 0;
+    aGL->fGetQueryObjectuiv(handle, LOCAL_GL_QUERY_RESULT, &gpuTime);
+
+    aGL->fDeleteQueries(1, &handle);
+
+#ifdef MOZ_ENABLE_PROFILER_SPS
+    PROFILER_MARKER_PAYLOAD("gpu_timer_query", new GPUMarkerPayload(
+      mCompletedSections[0].mCpuTimeStart,
+      mCompletedSections[0].mCpuTimeEnd,
+      0,
+      gpuTime
+    ));
+#endif
+
+    mCompletedSections.RemoveElementAt(0);
+  }
+}
+
+void
+ContextStateTrackerOGL::DestroyOGL(GLContext* aGL)
+{
+  while (mCompletedSections.Length() != 0) {
+    GLuint handle = (GLuint)mCompletedSections[0].mStartQueryHandle;
+    aGL->fDeleteQueries(1, &handle);
+    mCompletedSections.RemoveElementAt(0);
+  }
+}
+
+}
+
new file mode 100644
--- /dev/null
+++ b/gfx/thebes/ContextStateTracker.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GFX_CONTEXTSTATETRACKER_H
+#define GFX_CONTEXTSTATETRACKER_H
+
+#include "GLTypes.h"
+#include "mozilla/TimeStamp.h"
+#include "nsTArray.h"
+#include <string.h>
+
+namespace mozilla {
+namespace gl {
+class GLContext;
+}
+
+/**
+ * This class tracks the state of the context for debugging and profiling.
+ * Each section pushes a new stack entry and must be matched by an end section.
+ * All nested section must be ended before ending a parent section.
+ */
+class ContextStateTracker {
+public:
+  ContextStateTracker() {}
+
+private:
+
+  bool IsProfiling() { return true; }
+
+protected:
+  typedef GLuint TimerQueryHandle;
+
+  class ContextState {
+  public:
+    ContextState(const char* aSectionName)
+      : mSectionName(aSectionName)
+    {}
+
+    const char* mSectionName;
+    mozilla::TimeStamp mCpuTimeStart;
+    mozilla::TimeStamp mCpuTimeEnd;
+    TimerQueryHandle mStartQueryHandle;
+  };
+
+  ContextState& Top() {
+    MOZ_ASSERT(mSectionStack.Length());
+    return mSectionStack[mSectionStack.Length() - 1];
+  }
+
+  nsTArray<ContextState> mCompletedSections;
+  nsTArray<ContextState> mSectionStack;
+};
+
+/*
+class ID3D11DeviceContext;
+
+class ContextStateTrackerD3D11 MOZ_FINAL : public ContextStateTracker {
+public:
+  // TODO Implement me
+  void PushD3D11Section(ID3D11DeviceContext* aCtxt, const char* aSectionName) {}
+  void PopD3D11Section(ID3D11DeviceContext* aCtxt, const char* aSectionName) {}
+  void DestroyD3D11(ID3D11DeviceContext* aCtxt) {}
+
+private:
+  void Flush();
+};
+*/
+
+class ContextStateTrackerOGL MOZ_FINAL : public ContextStateTracker {
+  typedef mozilla::gl::GLContext GLContext;
+public:
+  void PushOGLSection(GLContext* aGL, const char* aSectionName);
+  void PopOGLSection(GLContext* aGL, const char* aSectionName);
+  void DestroyOGL(GLContext* aGL);
+private:
+  void Flush(GLContext* aGL);
+};
+
+}
+#endif
+
--- a/gfx/thebes/moz.build
+++ b/gfx/thebes/moz.build
@@ -1,15 +1,16 @@
 # -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 EXPORTS += [
+    'ContextStateTracker.h',
     'DrawMode.h',
     'gfx2DGlue.h',
     'gfx3DMatrix.h',
     'gfxAlphaRecovery.h',
     'gfxASurface.h',
     'gfxBaseSharedMemorySurface.h',
     'gfxBlur.h',
     'gfxColor.h',
@@ -191,16 +192,17 @@ elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'wi
 # Are we targeting x86 or x64?  If so, build gfxAlphaRecoverySSE2.cpp.
 if CONFIG['INTEL_ARCHITECTURE']:
     SOURCES += ['gfxAlphaRecoverySSE2.cpp']
     # The file uses SSE2 intrinsics, so it needs special compile flags on some
     # compilers.
     SOURCES['gfxAlphaRecoverySSE2.cpp'].flags += CONFIG['SSE2_FLAGS']
 
 SOURCES += [
+    'ContextStateTracker.cpp',
     # Includes mac system header conflicting with point/size,
     # and includes glxXlibSurface.h which drags in Xrender.h
     'gfxASurface.cpp',
     # on X11, gfxDrawable.cpp includes X headers for an old workaround which
     # we could consider removing soon (affects Ubuntus older than 10.04 LTS)
     # which currently prevent it from joining UNIFIED_SOURCES.
     'gfxDrawable.cpp',
     # gfxPlatform.cpp includes mac system header conflicting with point/size
--- a/ipc/chromium/src/chrome/common/ipc_channel_posix.cc
+++ b/ipc/chromium/src/chrome/common/ipc_channel_posix.cc
@@ -378,24 +378,30 @@ bool Channel::ChannelImpl::EnqueueHelloM
     Close();
     return false;
   }
 
   OutputQueuePush(msg.release());
   return true;
 }
 
-static void
-ClearAndShrink(std::string& s, size_t capacity)
+void Channel::ChannelImpl::ClearAndShrinkInputOverflowBuf()
 {
-  // This swap trick is the closest thing C++ has to a guaranteed way to
-  // shrink the capacity of a string.
-  std::string tmp;
-  tmp.reserve(capacity);
-  s.swap(tmp);
+  // If input_overflow_buf_ has grown, shrink it back to its normal size.
+  static size_t previousCapacityAfterClearing = 0;
+  if (input_overflow_buf_.capacity() > previousCapacityAfterClearing) {
+    // This swap trick is the closest thing C++ has to a guaranteed way
+    // to shrink the capacity of a string.
+    std::string tmp;
+    tmp.reserve(Channel::kReadBufferSize);
+    input_overflow_buf_.swap(tmp);
+    previousCapacityAfterClearing = input_overflow_buf_.capacity();
+  } else {
+    input_overflow_buf_.clear();
+  }
 }
 
 bool Channel::ChannelImpl::Connect() {
   if (mode_ == MODE_SERVER && uses_fifo_) {
     if (server_listen_pipe_ == -1) {
       return false;
     }
     MessageLoopForIO::current()->WatchFileDescriptor(
@@ -514,17 +520,17 @@ bool Channel::ChannelImpl::ProcessIncomi
     const char *end;
     if (input_overflow_buf_.empty()) {
       overflowp = NULL;
       p = input_buf_;
       end = p + bytes_read;
     } else {
       if (input_overflow_buf_.size() >
          static_cast<size_t>(kMaximumMessageSize - bytes_read)) {
-        ClearAndShrink(input_overflow_buf_, Channel::kReadBufferSize);
+        ClearAndShrinkInputOverflowBuf();
         CHROMIUM_LOG(ERROR) << "IPC message is too big";
         return false;
       }
       input_overflow_buf_.append(input_buf_, bytes_read);
       overflowp = p = input_overflow_buf_.data();
       end = p + input_overflow_buf_.size();
     }
 
@@ -623,17 +629,17 @@ bool Channel::ChannelImpl::ProcessIncomi
         }
         p = message_tail;
       } else {
         // Last message is partial.
         break;
       }
     }
     if (end == p) {
-      ClearAndShrink(input_overflow_buf_, Channel::kReadBufferSize);
+      ClearAndShrinkInputOverflowBuf();
     } else if (!overflowp) {
       // p is from input_buf_
       input_overflow_buf_.assign(p, end - p);
     } else if (p > overflowp) {
       // p is from input_overflow_buf_
       input_overflow_buf_.erase(0, p - overflowp);
     }
     input_overflow_fds_ = std::vector<int>(&fds[fds_i], &fds[num_fds]);
--- a/ipc/chromium/src/chrome/common/ipc_channel_posix.h
+++ b/ipc/chromium/src/chrome/common/ipc_channel_posix.h
@@ -54,16 +54,18 @@ class Channel::ChannelImpl : public Mess
  private:
   void Init(Mode mode, Listener* listener);
   bool CreatePipe(const std::wstring& channel_id, Mode mode);
   bool EnqueueHelloMessage();
 
   bool ProcessIncomingMessages();
   bool ProcessOutgoingMessages();
 
+  void ClearAndShrinkInputOverflowBuf();
+
   // MessageLoopForIO::Watcher implementation.
   virtual void OnFileCanReadWithoutBlocking(int fd);
   virtual void OnFileCanWriteWithoutBlocking(int fd);
 
 #if defined(OS_MACOSX)
   void CloseDescriptors(uint32_t pending_fd_id);
 #endif
 
--- a/js/ipc/JavaScriptBase.h
+++ b/js/ipc/JavaScriptBase.h
@@ -30,18 +30,20 @@ class JavaScriptBase : public WrapperOwn
     virtual ~JavaScriptBase() {}
 
     virtual void ActorDestroy(WrapperOwner::ActorDestroyReason why) {
         WrapperOwner::ActorDestroy(why);
     }
 
     /*** IPC handlers ***/
 
-    bool RecvPreventExtensions(const uint64_t &objId, ReturnStatus *rs) {
-        return Answer::RecvPreventExtensions(ObjectId::deserialize(objId), rs);
+    bool RecvPreventExtensions(const uint64_t &objId, ReturnStatus *rs,
+                               bool *succeeded) {
+        return Answer::RecvPreventExtensions(ObjectId::deserialize(objId), rs,
+                                             succeeded);
     }
     bool RecvGetPropertyDescriptor(const uint64_t &objId, const JSIDVariant &id,
                                      ReturnStatus *rs,
                                      PPropertyDescriptor *out) {
         return Answer::RecvGetPropertyDescriptor(ObjectId::deserialize(objId), id, rs, out);
     }
     bool RecvGetOwnPropertyDescriptor(const uint64_t &objId,
                                         const JSIDVariant &id,
@@ -126,18 +128,19 @@ class JavaScriptBase : public WrapperOwn
         return Answer::RecvDropObject(ObjectId::deserialize(objId));
     }
 
     /*** Dummy call handlers ***/
 
     bool SendDropObject(const ObjectId &objId) {
         return Base::SendDropObject(objId.serialize());
     }
-    bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs) {
-        return Base::SendPreventExtensions(objId.serialize(), rs);
+    bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
+                               bool *succeeded) {
+        return Base::SendPreventExtensions(objId.serialize(), rs, succeeded);
     }
     bool SendGetPropertyDescriptor(const ObjectId &objId, const JSIDVariant &id,
                                      ReturnStatus *rs,
                                      PPropertyDescriptor *out) {
         return Base::SendGetPropertyDescriptor(objId.serialize(), id, rs, out);
     }
     bool SendGetOwnPropertyDescriptor(const ObjectId &objId,
                                       const JSIDVariant &id,
--- a/js/ipc/PJavaScript.ipdl
+++ b/js/ipc/PJavaScript.ipdl
@@ -19,17 +19,17 @@ prio(normal upto high) sync protocol PJa
 {
     manager PContent or PContentBridge;
 
 both:
     // Sent when a CPOW has been finalized and table entries can be freed up.
     async DropObject(uint64_t objId);
 
     // These roughly map to the ProxyHandler hooks that CPOWs need.
-    prio(high) sync PreventExtensions(uint64_t objId) returns (ReturnStatus rs);
+    prio(high) sync PreventExtensions(uint64_t objId) returns (ReturnStatus rs, bool result);
     prio(high) sync GetPropertyDescriptor(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, PPropertyDescriptor result);
     prio(high) sync GetOwnPropertyDescriptor(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, PPropertyDescriptor result);
     prio(high) sync DefineProperty(uint64_t objId, JSIDVariant id, PPropertyDescriptor descriptor) returns (ReturnStatus rs);
     prio(high) sync Delete(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, bool successful);
 
     prio(high) sync Has(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, bool has);
     prio(high) sync HasOwn(uint64_t objId, JSIDVariant id) returns (ReturnStatus rs, bool has);
     prio(high) sync Get(uint64_t objId, ObjectVariant receiver, JSIDVariant id) returns (ReturnStatus rs, JSVariant result);
--- a/js/ipc/WrapperAnswer.cpp
+++ b/js/ipc/WrapperAnswer.cpp
@@ -53,27 +53,30 @@ WrapperAnswer::fail(JSContext *cx, Retur
 bool
 WrapperAnswer::ok(ReturnStatus *rs)
 {
     *rs = ReturnStatus(ReturnSuccess());
     return true;
 }
 
 bool
-WrapperAnswer::RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs)
+WrapperAnswer::RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
+                                     bool *succeeded)
 {
     AutoSafeJSContext cx;
     JSAutoRequest request(cx);
 
+    *succeeded = false;
+
     RootedObject obj(cx, findObjectById(cx, objId));
     if (!obj)
         return fail(cx, rs);
 
     JSAutoCompartment comp(cx, obj);
-    if (!JS_PreventExtensions(cx, obj))
+    if (!JS_PreventExtensions(cx, obj, succeeded))
         return fail(cx, rs);
 
     LOG("%s.preventExtensions()", ReceiverObj(objId));
 
     return ok(rs);
 }
 
 static void
--- a/js/ipc/WrapperAnswer.h
+++ b/js/ipc/WrapperAnswer.h
@@ -13,17 +13,18 @@
 namespace mozilla {
 namespace jsipc {
 
 class WrapperAnswer : public virtual JavaScriptShared
 {
   public:
     explicit WrapperAnswer(JSRuntime *rt) : JavaScriptShared(rt) {}
 
-    bool RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs);
+    bool RecvPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
+                               bool *succeeded);
     bool RecvGetPropertyDescriptor(const ObjectId &objId, const JSIDVariant &id,
                                    ReturnStatus *rs,
                                    PPropertyDescriptor *out);
     bool RecvGetOwnPropertyDescriptor(const ObjectId &objId,
                                       const JSIDVariant &id,
                                       ReturnStatus *rs,
                                       PPropertyDescriptor *out);
     bool RecvDefineProperty(const ObjectId &objId, const JSIDVariant &id,
--- a/js/ipc/WrapperOwner.cpp
+++ b/js/ipc/WrapperOwner.cpp
@@ -66,18 +66,18 @@ class CPOWProxyHandler : public BaseProx
     virtual bool getOwnPropertyDescriptor(JSContext *cx, HandleObject proxy, HandleId id,
                                           MutableHandle<JSPropertyDescriptor> desc) const MOZ_OVERRIDE;
     virtual bool defineProperty(JSContext *cx, HandleObject proxy, HandleId id,
                                 MutableHandle<JSPropertyDescriptor> desc) const MOZ_OVERRIDE;
     virtual bool ownPropertyKeys(JSContext *cx, HandleObject proxy,
                                  AutoIdVector &props) const MOZ_OVERRIDE;
     virtual bool delete_(JSContext *cx, HandleObject proxy, HandleId id, bool *bp) const MOZ_OVERRIDE;
     virtual bool enumerate(JSContext *cx, HandleObject proxy, AutoIdVector &props) const MOZ_OVERRIDE;
+    virtual bool preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const MOZ_OVERRIDE;
     virtual bool isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const MOZ_OVERRIDE;
-    virtual bool preventExtensions(JSContext *cx, HandleObject proxy) const MOZ_OVERRIDE;
     virtual bool has(JSContext *cx, HandleObject proxy, HandleId id, bool *bp) const MOZ_OVERRIDE;
     virtual bool get(JSContext *cx, HandleObject proxy, HandleObject receiver,
                      HandleId id, MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool set(JSContext *cx, JS::HandleObject proxy, JS::HandleObject receiver,
                      JS::HandleId id, bool strict, JS::MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool call(JSContext *cx, HandleObject proxy, const CallArgs &args) const MOZ_OVERRIDE;
     virtual bool construct(JSContext *cx, HandleObject proxy, const CallArgs &args) const MOZ_OVERRIDE;
 
@@ -108,36 +108,16 @@ const CPOWProxyHandler CPOWProxyHandler:
     WrapperOwner *owner = OwnerOf(proxy);                               \
     if (!owner->active()) {                                             \
         JS_ReportError(cx, "cannot use a CPOW whose process is gone");  \
         return false;                                                   \
     }                                                                   \
     return owner->call args;
 
 bool
-CPOWProxyHandler::preventExtensions(JSContext *cx, HandleObject proxy) const
-{
-    FORWARD(preventExtensions, (cx, proxy));
-}
-
-bool
-WrapperOwner::preventExtensions(JSContext *cx, HandleObject proxy)
-{
-    ObjectId objId = idOf(proxy);
-
-    ReturnStatus status;
-    if (!SendPreventExtensions(objId, &status))
-        return ipcfail(cx);
-
-    LOG_STACK();
-
-    return ok(cx, status);
-}
-
-bool
 CPOWProxyHandler::getPropertyDescriptor(JSContext *cx, HandleObject proxy, HandleId id,
                                         MutableHandle<JSPropertyDescriptor> desc) const
 {
     FORWARD(getPropertyDescriptor, (cx, proxy, id, desc));
 }
 
 bool
 WrapperOwner::getPropertyDescriptor(JSContext *cx, HandleObject proxy, HandleId id,
@@ -471,16 +451,36 @@ CPOWProxyHandler::getOwnEnumerableProper
 
 bool
 WrapperOwner::getOwnEnumerablePropertyKeys(JSContext *cx, HandleObject proxy, AutoIdVector &props)
 {
     return getPropertyKeys(cx, proxy, JSITER_OWNONLY, props);
 }
 
 bool
+CPOWProxyHandler::preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const
+{
+    FORWARD(preventExtensions, (cx, proxy, succeeded));
+}
+
+bool
+WrapperOwner::preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded)
+{
+    ObjectId objId = idOf(proxy);
+
+    ReturnStatus status;
+    if (!SendPreventExtensions(objId, &status, succeeded))
+        return ipcfail(cx);
+
+    LOG_STACK();
+
+    return ok(cx, status);
+}
+
+bool
 CPOWProxyHandler::isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const
 {
     FORWARD(isExtensible, (cx, proxy, extensible));
 }
 
 bool
 WrapperOwner::isExtensible(JSContext *cx, HandleObject proxy, bool *extensible)
 {
--- a/js/ipc/WrapperOwner.h
+++ b/js/ipc/WrapperOwner.h
@@ -35,18 +35,18 @@ class WrapperOwner : public virtual Java
     // (The traps should be in the same order like js/src/jsproxy.h)
     bool getOwnPropertyDescriptor(JSContext *cx, JS::HandleObject proxy, JS::HandleId id,
                                   JS::MutableHandle<JSPropertyDescriptor> desc);
     bool defineProperty(JSContext *cx, JS::HandleObject proxy, JS::HandleId id,
                         JS::MutableHandle<JSPropertyDescriptor> desc);
     bool ownPropertyKeys(JSContext *cx, JS::HandleObject proxy, JS::AutoIdVector &props);
     bool delete_(JSContext *cx, JS::HandleObject proxy, JS::HandleId id, bool *bp);
     bool enumerate(JSContext *cx, JS::HandleObject proxy, JS::AutoIdVector &props);
+    bool preventExtensions(JSContext *cx, JS::HandleObject proxy, bool *succeeded);
     bool isExtensible(JSContext *cx, JS::HandleObject proxy, bool *extensible);
-    bool preventExtensions(JSContext *cx, JS::HandleObject proxy);
     bool has(JSContext *cx, JS::HandleObject proxy, JS::HandleId id, bool *bp);
     bool get(JSContext *cx, JS::HandleObject proxy, JS::HandleObject receiver,
              JS::HandleId id, JS::MutableHandleValue vp);
     bool set(JSContext *cx, JS::HandleObject proxy, JS::HandleObject receiver,
              JS::HandleId id, bool strict, JS::MutableHandleValue vp);
     bool callOrConstruct(JSContext *cx, JS::HandleObject proxy, const JS::CallArgs &args,
                          bool construct);
 
@@ -101,17 +101,18 @@ class WrapperOwner : public virtual Java
     // Check whether a return status is okay, and if not, propagate its error.
     bool ok(JSContext *cx, const ReturnStatus &status);
 
     bool inactive_;
 
     /*** Dummy call handlers ***/
   public:
     virtual bool SendDropObject(const ObjectId &objId) = 0;
-    virtual bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs) = 0;
+    virtual bool SendPreventExtensions(const ObjectId &objId, ReturnStatus *rs,
+                                       bool *succeeded) = 0;
     virtual bool SendGetPropertyDescriptor(const ObjectId &objId, const JSIDVariant &id,
                                            ReturnStatus *rs,
                                            PPropertyDescriptor *out) = 0;
     virtual bool SendGetOwnPropertyDescriptor(const ObjectId &objId,
                                               const JSIDVariant &id,
                                               ReturnStatus *rs,
                                               PPropertyDescriptor *out) = 0;
     virtual bool SendDefineProperty(const ObjectId &objId, const JSIDVariant &id,
--- a/js/src/asmjs/AsmJSValidate.cpp
+++ b/js/src/asmjs/AsmJSValidate.cpp
@@ -670,45 +670,16 @@ class RetType
           case Float32x4: return MIRType_Float32x4;
         }
         MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected return type");
     }
     bool operator==(RetType rhs) const { return which_ == rhs.which_; }
     bool operator!=(RetType rhs) const { return which_ != rhs.which_; }
 };
 
-// Represents the subset of Type that can be used as a return type of a builtin
-// Math function.
-class MathRetType
-{
-  public:
-    enum Which {
-        Double   = Type::Double,
-        Float    = Type::Float,
-        Floatish = Type::Floatish,
-        Signed   = Type::Signed,
-        Unsigned = Type::Unsigned
-    };
-
-  private:
-    Which which_;
-
-  public:
-    MathRetType() : which_(Which(-1)) {}
-    MOZ_IMPLICIT MathRetType(Which w) : which_(w) {}
-
-    Type toType() const {
-        return Type(Type::Which(which_));
-    }
-
-    Which which() const {
-        return which_;
-    }
-};
-
 namespace {
 
 // Represents the subset of Type that can be used as a variable or
 // argument's type. Note: AsmJSCoercion and VarType are kept separate to
 // make very clear the signed/int distinction: a coercion may explicitly sign
 // an *expression* but, when stored as a variable, this signedness information
 // is explicitly thrown away by the asm.js type system. E.g., in
 //
@@ -4403,17 +4374,17 @@ CheckAssign(FunctionCompiler &f, ParseNo
 
     if (lhs->getKind() == PNK_NAME)
         return CheckAssignName(f, lhs, rhs, def, type);
 
     return f.fail(assign, "left-hand side of assignment must be a variable or array access");
 }
 
 static bool
-CheckMathIMul(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
+CheckMathIMul(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
     if (CallArgListLength(call) != 2)
         return f.fail(call, "Math.imul must be passed 2 arguments");
 
     ParseNode *lhs = CallArgList(call);
     ParseNode *rhs = NextNode(lhs);
 
     MDefinition *lhsDef;
@@ -4427,143 +4398,141 @@ CheckMathIMul(FunctionCompiler &f, Parse
         return false;
 
     if (!lhsType.isIntish())
         return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
     if (!rhsType.isIntish())
         return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
 
     *def = f.mul(lhsDef, rhsDef, MIRType_Int32, MMul::Integer);
-    *type = MathRetType::Signed;
+    *type = Type::Signed;
     return true;
 }
 
 static bool
-CheckMathClz32(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
+CheckMathClz32(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
     if (CallArgListLength(call) != 1)
         return f.fail(call, "Math.clz32 must be passed 1 argument");
 
     ParseNode *arg = CallArgList(call);
 
     MDefinition *argDef;
     Type argType;
     if (!CheckExpr(f, arg, &argDef, &argType))
         return false;
 
     if (!argType.isIntish())
         return f.failf(arg, "%s is not a subtype of intish", argType.toChars());
 
     *def = f.unary<MClz>(argDef);
-    *type = MathRetType::Signed;
+    *type = Type::Fixnum;
     return true;
 }
 
 static bool
-CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
+CheckMathAbs(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
     if (CallArgListLength(call) != 1)
         return f.fail(call, "Math.abs must be passed 1 argument");
 
     ParseNode *arg = CallArgList(call);
 
     MDefinition *argDef;
     Type argType;
     if (!CheckExpr(f, arg, &argDef, &argType))
         return false;
 
     if (argType.isSigned()) {
         *def = f.unary<MAbs>(argDef, MIRType_Int32);
-        *type = MathRetType::Unsigned;
+        *type = Type::Unsigned;
         return true;
     }
 
     if (argType.isMaybeDouble()) {
         *def = f.unary<MAbs>(argDef, MIRType_Double);
-        *type = MathRetType::Double;
+        *type = Type::Double;
         return true;
     }
 
     if (argType.isMaybeFloat()) {
         *def = f.unary<MAbs>(argDef, MIRType_Float32);
-        *type = MathRetType::Floatish;
+        *type = Type::Floatish;
         return true;
     }
 
     return f.failf(call, "%s is not a subtype of signed, float? or double?", argType.toChars());
 }
 
 static bool
-CheckMathSqrt(FunctionCompiler &f, ParseNode *call, MDefinition **def, MathRetType *type)
+CheckMathSqrt(FunctionCompiler &f, ParseNode *call, MDefinition **def, Type *type)
 {
     if (CallArgListLength(call) != 1)
         return f.fail(call, "Math.sqrt must be passed 1 argument");
 
     ParseNode *arg = CallArgList(call);
 
     MDefinition *argDef;
     Type argType;
     if (!CheckExpr(f, arg, &argDef, &argType))
         return false;
 
     if (argType.isMaybeDouble()) {
         *def = f.unary<MSqrt>(argDef, MIRType_Double);
-        *type = MathRetType::Double;
+        *type = Type::Double;
         return true;
     }
 
     if (argType.isMaybeFloat()) {
         *def = f.unary<MSqrt>(argDef, MIRType_Float32);
-        *type = MathRetType::Floatish;
+        *type = Type::Floatish;
         return true;
     }
 
     return f.failf(call, "%s is neither a subtype of double? nor float?", argType.toChars());
 }
 
 static bool
-CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, bool isMax,
-                MathRetType *type)
+CheckMathMinMax(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, bool isMax, Type *type)
 {
     if (CallArgListLength(callNode) < 2)
         return f.fail(callNode, "Math.min/max must be passed at least 2 arguments");
 
     ParseNode *firstArg = CallArgList(callNode);
     MDefinition *firstDef;
     Type firstType;
     if (!CheckExpr(f, firstArg, &firstDef, &firstType))
         return false;
 
     if (firstType.isMaybeDouble()) {
-        *type = MathRetType::Double;
+        *type = Type::Double;
         firstType = Type::MaybeDouble;
     } else if (firstType.isMaybeFloat()) {
-        *type = MathRetType::Float;
+        *type = Type::Float;
         firstType = Type::MaybeFloat;
-    } else if (firstType.isInt()) {
-        *type = MathRetType::Signed;
-        firstType = Type::Int;
+    } else if (firstType.isSigned()) {
+        *type = Type::Signed;
+        firstType = Type::Signed;
     } else {
         return f.failf(firstArg, "%s is not a subtype of double?, float? or int",
                        firstType.toChars());
     }
 
-    MIRType opType = firstType.toMIRType();
     MDefinition *lastDef = firstDef;
     ParseNode *nextArg = NextNode(firstArg);
     for (unsigned i = 1; i < CallArgListLength(callNode); i++, nextArg = NextNode(nextArg)) {
         MDefinition *nextDef;
         Type nextType;
         if (!CheckExpr(f, nextArg, &nextDef, &nextType))
             return false;
 
         if (!(nextType <= firstType))
             return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars());
 
-        lastDef = f.minMax(lastDef, nextDef, opType, isMax);
+        lastDef = f.minMax(lastDef, nextDef, firstType.toMIRType(), isMax);
     }
 
     *def = lastDef;
     return true;
 }
 
 typedef bool (*CheckArgType)(FunctionCompiler &f, ParseNode *argNode, Type type);
 
@@ -4829,36 +4798,36 @@ CheckCoercionArg(FunctionCompiler &f, Pa
         MOZ_CRASH("not call coercions");
     }
 
     *type = retType.toType();
     return true;
 }
 
 static bool
-CheckMathFRound(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, MathRetType *type)
+CheckMathFRound(FunctionCompiler &f, ParseNode *callNode, MDefinition **def, Type *type)
 {
     if (CallArgListLength(callNode) != 1)
         return f.fail(callNode, "Math.fround must be passed 1 argument");
 
     ParseNode *argNode = CallArgList(callNode);
     MDefinition *argDef;
     Type argType;
     if (!CheckCoercionArg(f, argNode, AsmJS_FRound, &argDef, &argType))
         return false;
 
     MOZ_ASSERT(argType == Type::Float);
     *def = argDef;
-    *type = MathRetType::Float;
+    *type = Type::Float;
     return true;
 }
 
 static bool
 CheckMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltinFunction func,
-                     MDefinition **def, MathRetType *type)
+                     MDefinition **def, Type *type)
 {
     unsigned arity = 0;
     AsmJSImmKind doubleCallee, floatCallee;
     switch (func) {
       case AsmJSMathBuiltin_imul:   return CheckMathIMul(f, callNode, def, type);
       case AsmJSMathBuiltin_clz32:  return CheckMathClz32(f, callNode, def, type);
       case AsmJSMathBuiltin_abs:    return CheckMathAbs(f, callNode, def, type);
       case AsmJSMathBuiltin_sqrt:   return CheckMathSqrt(f, callNode, def, type);
@@ -4921,23 +4890,23 @@ CheckMathBuiltinCall(FunctionCompiler &f
     }
 
     f.finishCallArgs(&call);
 
     AsmJSImmKind callee = opIsDouble ? doubleCallee : floatCallee;
     if (!f.builtinCall(callee, call, varType.toMIRType(), def))
         return false;
 
-    *type = MathRetType(opIsDouble ? MathRetType::Double : MathRetType::Floatish);
+    *type = opIsDouble ? Type::Double : Type::Floatish;
     return true;
 }
 
 typedef Vector<MDefinition*, 4, SystemAllocPolicy> DefinitionVector;
 
-namespace {  
+namespace {
 // Include CheckSimdCallArgs in unnamed namespace to avoid MSVC name lookup bug.
 
 template<class CheckArgOp>
 static bool
 CheckSimdCallArgs(FunctionCompiler &f, ParseNode *call, unsigned expectedArity,
                   const CheckArgOp &checkArg, DefinitionVector *defs)
 {
     unsigned numArgs = CallArgListLength(call);
@@ -5343,24 +5312,18 @@ CheckSimdCtorCall(FunctionCompiler &f, P
 
 static bool
 CheckUncoercedCall(FunctionCompiler &f, ParseNode *expr, MDefinition **def, Type *type)
 {
     MOZ_ASSERT(expr->isKind(PNK_CALL));
 
     const ModuleCompiler::Global *global;
     if (IsCallToGlobal(f.m(), expr, &global)) {
-        if (global->isMathFunction()) {
-            MathRetType mathRetType;
-            if (!CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), def, &mathRetType))
-                return false;
-            *type = mathRetType.toType();
-            return true;
-        }
-
+        if (global->isMathFunction())
+            return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), def, type);
         if (global->isSimdCtor())
             return CheckSimdCtorCall(f, expr, global, def, type);
         if (global->isSimdOperation())
             return CheckSimdOperationCall(f, expr, global, def, type);
     }
 
     return f.fail(expr, "all function calls must either be calls to standard lib math functions, "
                         "ignored (via f(); or comma-expression), coerced to signed (via f()|0), "
@@ -5430,21 +5393,21 @@ CoerceResult(FunctionCompiler &f, ParseN
 
     return true;
 }
 
 static bool
 CheckCoercedMathBuiltinCall(FunctionCompiler &f, ParseNode *callNode, AsmJSMathBuiltinFunction func,
                             RetType retType, MDefinition **def, Type *type)
 {
-    MDefinition *result;
-    MathRetType resultType;
-    if (!CheckMathBuiltinCall(f, callNode, func, &result, &resultType))
-        return false;
-    return CoerceResult(f, callNode, retType, result, resultType.toType(), def, type);
+    MDefinition *resultDef;
+    Type resultType;
+    if (!CheckMathBuiltinCall(f, callNode, func, &resultDef, &resultType))
+        return false;
+    return CoerceResult(f, callNode, retType, resultDef, resultType, def, type);
 }
 
 static bool
 CheckCoercedSimdCall(FunctionCompiler &f, ParseNode *call, const ModuleCompiler::Global *global,
                      RetType retType, MDefinition **def, Type *type)
 {
     if (global->isSimdCtor()) {
         if (!CheckSimdCtorCall(f, call, global, def, type))
new file mode 100644
--- /dev/null
+++ b/js/src/builtin/AtomicsObject.cpp
@@ -0,0 +1,1090 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS Atomics pseudo-module.
+ *
+ * See "Spec: JavaScript Shared Memory, Atomics, and Locks" for the
+ * full specification.
+ *
+ * In addition to what is specified there, we throw an Error object if
+ * the futex API hooks have not been installed on the runtime.
+ * Essentially that is an implementation error at a higher level.
+ *
+ *
+ * Note on the current implementation of atomic operations.
+ *
+ * The Mozilla atomics are not sufficient to implement these APIs
+ * because we need to support 8-bit, 16-bit, and 32-bit data: the
+ * Mozilla atomics only support 32-bit data.
+ *
+ * At the moment we include mozilla/Atomics.h, which will define
+ * MOZ_HAVE_CXX11_ATOMICS and include <atomic> if we have C++11
+ * atomics.
+ *
+ * If MOZ_HAVE_CXX11_ATOMICS is set we'll use C++11 atomics.
+ *
+ * Otherwise, if the compiler has them we'll fall back on gcc/Clang
+ * intrinsics.
+ *
+ * Otherwise, if we're on VC++2012, we'll use C++11 atomics even if
+ * MOZ_HAVE_CXX11_ATOMICS is not defined.  The compiler has the
+ * atomics but they are disabled in Mozilla due to a performance bug.
+ * That performance bug does not affect the Atomics code.  See
+ * mozilla/Atomics.h for further comments on that bug.
+ *
+ * Otherwise, if we're on VC++2010 or VC++2008, we'll emulate the
+ * gcc/Clang intrinsics with simple code below using the VC++
+ * intrinsics, like the VC++2012 solution this is a stopgap since
+ * we're about to start using VC++2013 anyway.
+ *
+ * If none of those options are available then the build must disable
+ * shared memory, or compilation will fail with a predictable error.
+ */
+
+#include "builtin/AtomicsObject.h"
+
+#include "mozilla/Atomics.h"
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+
+#include "vm/GlobalObject.h"
+#include "vm/SharedTypedArrayObject.h"
+#include "vm/TypedArrayObject.h"
+
+#include "jsobjinlines.h"
+
+using namespace js;
+
+#if defined(MOZ_HAVE_CXX11_ATOMICS)
+# define CXX11_ATOMICS
+#elif defined(__clang__) || defined(__GNUC__)
+# define GNU_ATOMICS
+#elif _MSC_VER >= 1700 && _MSC_VER < 1800
+// Visual Studion 2012
+# define CXX11_ATOMICS
+# include <atomic>
+#elif defined(_MSC_VER)
+// Visual Studio 2010
+# define GNU_ATOMICS
+static inline void
+__sync_synchronize()
+{
+# if JS_BITS_PER_WORD == 32
+    // If configured for SSE2+ we can use the MFENCE instruction, available
+    // through the _mm_mfence intrinsic.  But for non-SSE2 systems we have
+    // to do something else.  Linux uses "lock add [esp], 0", so why not?
+    __asm lock add [esp], 0;
+# else
+    _mm_mfence();
+# endif
+}
+
+# define MSC_CAS(T, U, cmpxchg) \
+    static inline T \
+    __sync_val_compare_and_swap(T *addr, T oldval, T newval) { \
+        return (T)cmpxchg((U volatile*)addr, (U)oldval, (U)newval); \
+    }
+
+MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
+MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
+MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
+MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
+MSC_CAS(int32_t, long, _InterlockedCompareExchange)
+MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
+
+# define MSC_FETCHADDOP(T, U, xadd) \
+    static inline T \
+    __sync_fetch_and_add(T *addr, T val) { \
+        return (T)xadd((U volatile*)addr, (U)val); \
+    } \
+    static inline T \
+    __sync_fetch_and_sub(T *addr, T val) { \
+        return (T)xadd((U volatile*)addr, (U)-val); \
+    }
+
+MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
+MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
+MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
+MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
+MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
+MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
+
+# define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
+    static inline T \
+    __sync_fetch_and_and(T *addr, T val) { \
+        return (T)andop((U volatile*)addr, (U)val);  \
+    } \
+    static inline T \
+    __sync_fetch_and_or(T *addr, T val) { \
+        return (T)orop((U volatile*)addr, (U)val);  \
+    } \
+    static inline T \
+    __sync_fetch_and_xor(T *addr, T val) { \
+        return (T)xorop((U volatile*)addr, (U)val);  \
+    } \
+
+MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
+MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
+MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
+MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
+MSC_FETCHBITOP(int32_t, long,  _InterlockedAnd, _InterlockedOr, _InterlockedXor)
+MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
+
+# undef MSC_CAS
+# undef MSC_FETCHADDOP
+# undef MSC_FETCHBITOP
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+# error "Either disable JS shared memory or use a compiler that supports C++11 atomics or GCC/clang atomics"
+#endif
+
+const Class AtomicsObject::class_ = {
+    "Atomics",
+    JSCLASS_HAS_CACHED_PROTO(JSProto_Atomics),
+    JS_PropertyStub,
+    JS_DeletePropertyStub,
+    JS_PropertyStub,
+    JS_StrictPropertyStub,
+    JS_EnumerateStub,
+    JS_ResolveStub,
+    JS_ConvertStub,
+    nullptr,                 // finalize
+    nullptr,                 // call
+    nullptr,                 // hasInstance
+    nullptr,                 // construct
+    nullptr                  // trace
+};
+
+static bool
+ReportBadArrayType(JSContext *cx)
+{
+    JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_ATOMICS_BAD_ARRAY);
+    return false;
+}
+
+static bool
+ReportNoFutexes(JSContext *cx)
+{
+    JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_ATOMICS_NOT_INSTALLED);
+    return false;
+}
+
+static bool
+GetSharedTypedArray(JSContext *cx, HandleValue v,
+                    MutableHandle<SharedTypedArrayObject *> viewp)
+{
+    if (!v.isObject())
+        return ReportBadArrayType(cx);
+    if (!v.toObject().is<SharedTypedArrayObject>())
+        return ReportBadArrayType(cx);
+    viewp.set(&v.toObject().as<SharedTypedArrayObject>());
+    return true;
+}
+
+// Returns true so long as the conversion succeeds, and then *inRange
+// is set to false if the index is not in range.
+static bool
+GetSharedTypedArrayIndex(JSContext *cx, HandleValue v, Handle<SharedTypedArrayObject *> view,
+                         uint32_t* offset, bool* inRange)
+{
+    RootedId id(cx);
+    if (!ValueToId<CanGC>(cx, v, &id))
+        return false;
+    uint64_t index;
+    if (!IsTypedArrayIndex(id, &index) || index >= view->length()) {
+        *inRange = false;
+    } else {
+        *offset = (uint32_t)index;
+        *inRange = true;
+    }
+    return true;
+}
+
+void
+js::atomics_fullMemoryBarrier()
+{
+#if defined(CXX11_ATOMICS)
+    std::atomic_thread_fence(std::memory_order_seq_cst);
+#elif defined(GNU_ATOMICS)
+    __sync_synchronize();
+#endif
+}
+
+static bool
+atomics_fence_impl(JSContext *cx, MutableHandleValue r)
+{
+    atomics_fullMemoryBarrier();
+    r.setUndefined();
+    return true;
+}
+
+bool
+js::atomics_fence(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    return atomics_fence_impl(cx, args.rval());
+}
+
+bool
+js::atomics_compareExchange(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    HandleValue objv = args.get(0);
+    HandleValue idxv = args.get(1);
+    HandleValue oldv = args.get(2);
+    HandleValue newv = args.get(3);
+    MutableHandleValue r = args.rval();
+
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    uint32_t offset;
+    bool inRange;
+    if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset, &inRange))
+        return false;
+    int32_t oldCandidate;
+    if (!ToInt32(cx, oldv, &oldCandidate))
+        return false;
+    int32_t newCandidate;
+    if (!ToInt32(cx, newv, &newCandidate))
+        return false;
+
+    if (!inRange)
+        return atomics_fence_impl(cx, r);
+
+    // CAS always sets oldval to the old value of the cell.
+    // addr must be a T*, and oldval and newval should be variables of type T
+
+#if defined(CXX11_ATOMICS)
+# define CAS(T, addr, oldval, newval)                                    \
+    do {                                                                \
+        std::atomic_compare_exchange_strong(reinterpret_cast<std::atomic<T>*>(addr), &oldval, newval); \
+    } while(0)
+#elif defined(GNU_ATOMICS)
+# define CAS(T, addr, oldval, newval)                                    \
+    do {                                                                \
+        oldval = __sync_val_compare_and_swap(addr, (oldval), (newval)); \
+    } while(0)
+#else
+# define CAS(a, b, c, newval)  (void)newval
+#endif
+
+    switch (view->type()) {
+      case Scalar::Int8: {
+          int8_t oldval = (int8_t)oldCandidate;
+          int8_t newval = (int8_t)newCandidate;
+          CAS(int8_t, (int8_t*)view->viewData() + offset, oldval, newval);
+          r.setInt32(oldval);
+          return true;
+      }
+      case Scalar::Uint8: {
+          uint8_t oldval = (uint8_t)oldCandidate;
+          uint8_t newval = (uint8_t)newCandidate;
+          CAS(uint8_t, (uint8_t*)view->viewData() + offset, oldval, newval);
+          r.setInt32(oldval);
+          return true;
+      }
+      case Scalar::Uint8Clamped: {
+          uint8_t oldval = ClampIntForUint8Array(oldCandidate);
+          uint8_t newval = ClampIntForUint8Array(newCandidate);
+          CAS(uint8_t, (uint8_t*)view->viewData() + offset, oldval, newval);
+          r.setInt32(oldval);
+          return true;
+      }
+      case Scalar::Int16: {
+          int16_t oldval = (int16_t)oldCandidate;
+          int16_t newval = (int16_t)newCandidate;
+          CAS(int16_t, (int16_t*)view->viewData() + offset, oldval, newval);
+          r.setInt32(oldval);
+          return true;
+      }
+      case Scalar::Uint16: {
+          uint16_t oldval = (uint16_t)oldCandidate;
+          uint16_t newval = (uint16_t)newCandidate;
+          CAS(uint16_t, (uint16_t*)view->viewData() + offset, oldval, newval);
+          r.setInt32(oldval);
+          return true;
+      }
+      case Scalar::Int32: {
+          int32_t oldval = oldCandidate;
+          int32_t newval = newCandidate;
+          CAS(int32_t, (int32_t*)view->viewData() + offset, oldval, newval);
+          r.setInt32(oldval);
+          return true;
+      }
+      case Scalar::Uint32: {
+          uint32_t oldval = (uint32_t)oldCandidate;
+          uint32_t newval = (uint32_t)newCandidate;
+          CAS(uint32_t, (uint32_t*)view->viewData() + offset, oldval, newval);
+          r.setNumber((double)oldval);
+          return true;
+      }
+      default:
+        return ReportBadArrayType(cx);
+    }
+
+    // Do not undef CAS, it is used later
+}
+
+bool
+js::atomics_load(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    HandleValue objv = args.get(0);
+    HandleValue idxv = args.get(1);
+    MutableHandleValue r = args.rval();
+
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    uint32_t offset;
+    bool inRange;
+    if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset, &inRange))
+        return false;
+
+    if (!inRange)
+        return atomics_fence_impl(cx, r);
+
+    // LOAD sets v to the value of *addr
+    // addr must be a T*, and v must be a variable of type T
+
+#if defined(CXX11_ATOMICS)
+# define LOAD(T, addr, v)                                                \
+    do {                                                                \
+        v = std::atomic_load(reinterpret_cast<std::atomic<T>*>(addr));  \
+    } while(0)
+#elif defined(GNU_ATOMICS)
+# define LOAD(T, addr, v)                        \
+    do {                                        \
+        __sync_synchronize();                   \
+        v = *(addr);                            \
+        __sync_synchronize();                   \
+    } while(0)
+#else
+# define LOAD(a, b, v)  v = 0
+#endif
+
+    switch (view->type()) {
+      case Scalar::Uint8:
+      case Scalar::Uint8Clamped: {
+          uint8_t v;
+          LOAD(uint8_t, (uint8_t*)view->viewData() + offset, v);
+          r.setInt32(v);
+          return true;
+      }
+      case Scalar::Int8: {
+          int8_t v;
+          LOAD(int8_t, (int8_t*)view->viewData() + offset, v);
+          r.setInt32(v);
+          return true;
+      }
+      case Scalar::Int16: {
+          int16_t v;
+          LOAD(int16_t, (int16_t*)view->viewData() + offset, v);
+          r.setInt32(v);
+          return true;
+      }
+      case Scalar::Uint16: {
+          uint16_t v;
+          LOAD(uint16_t, (uint16_t*)view->viewData() + offset, v);
+          r.setInt32(v);
+          return true;
+      }
+      case Scalar::Int32: {
+          int32_t v;
+          LOAD(int32_t, (int32_t*)view->viewData() + offset, v);
+          r.setInt32(v);
+          return true;
+      }
+      case Scalar::Uint32: {
+          uint32_t v;
+          LOAD(uint32_t, (uint32_t*)view->viewData() + offset, v);
+          r.setNumber(v);
+          return true;
+      }
+      default:
+          return ReportBadArrayType(cx);
+    }
+
+#undef LOAD
+
+}
+
+bool
+js::atomics_store(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    HandleValue objv = args.get(0);
+    HandleValue idxv = args.get(1);
+    HandleValue valv = args.get(2);
+    MutableHandleValue r = args.rval();
+
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    uint32_t offset;
+    bool inRange;
+    if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset, &inRange))
+        return false;
+    int32_t numberValue;
+    if (!ToInt32(cx, valv, &numberValue))
+        return false;
+
+    if (!inRange) {
+        atomics_fullMemoryBarrier();
+        r.set(valv);
+        return true;
+    }
+
+    // STORE stores value in *addr
+    // addr must be a T*, and value should be of type T
+
+#if defined(CXX11_ATOMICS)
+# define STORE(T, addr, value)                                           \
+    do {                                                                \
+        std::atomic_store(reinterpret_cast<std::atomic<T>*>(addr), (T)value); \
+} while(0)
+#elif defined(GNU_ATOMICS)
+# define STORE(T, addr, value)                   \
+    do {                                        \
+        __sync_synchronize();                   \
+        *(addr) = value;                        \
+        __sync_synchronize();                   \
+    } while(0)
+#else
+# define STORE(a, b, c)  (void)0
+#endif
+
+    switch (view->type()) {
+      case Scalar::Int8: {
+          int8_t value = (int8_t)numberValue;
+          STORE(int8_t, (int8_t*)view->viewData() + offset, value);
+          r.setInt32(value);
+          return true;
+      }
+      case Scalar::Uint8: {
+          uint8_t value = (uint8_t)numberValue;
+          STORE(uint8_t, (uint8_t*)view->viewData() + offset, value);
+          r.setInt32(value);
+          return true;
+      }
+      case Scalar::Uint8Clamped: {
+          uint8_t value = ClampIntForUint8Array(numberValue);
+          STORE(uint8_t, (uint8_t*)view->viewData() + offset, value);
+          r.setInt32(value);
+          return true;
+      }
+      case Scalar::Int16: {
+          int16_t value = (int16_t)numberValue;
+          STORE(int16_t, (int16_t*)view->viewData() + offset, value);
+          r.setInt32(value);
+          return true;
+      }
+      case Scalar::Uint16: {
+          uint16_t value = (uint16_t)numberValue;
+          STORE(uint16_t, (uint16_t*)view->viewData() + offset, value);
+          r.setInt32(value);
+          return true;
+      }
+      case Scalar::Int32: {
+          int32_t value = numberValue;
+          STORE(int32_t, (int32_t*)view->viewData() + offset, value);
+          r.setInt32(value);
+          return true;
+      }
+      case Scalar::Uint32: {
+          uint32_t value = (uint32_t)numberValue;
+          STORE(uint32_t, (uint32_t*)view->viewData() + offset, value);
+          r.setNumber((double)value);
+          return true;
+      }
+      default:
+        return ReportBadArrayType(cx);
+    }
+
+#undef STORE
+}
+
+template<typename T>
+static bool
+atomics_binop_impl(JSContext *cx, HandleValue objv, HandleValue idxv, HandleValue valv,
+                   MutableHandleValue r)
+{
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    uint32_t offset;
+    bool inRange;
+    if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset, &inRange))
+        return false;
+    int32_t numberValue;
+    if (!ToInt32(cx, valv, &numberValue))
+        return false;
+
+    if (!inRange)
+        return atomics_fence_impl(cx, r);
+
+    switch (view->type()) {
+      case Scalar::Int8: {
+          int8_t v = (int8_t)numberValue;
+          r.setInt32(T::operate((int8_t*)view->viewData() + offset, v));
+          return true;
+      }
+      case Scalar::Uint8: {
+          uint8_t v = (uint8_t)numberValue;
+          r.setInt32(T::operate((uint8_t*)view->viewData() + offset, v));
+          return true;
+      }
+      case Scalar::Uint8Clamped: {
+          // Spec says:
+          //  - clamp the input value
+          //  - perform the operation
+          //  - clamp the result
+          //  - store the result
+          // This requires a CAS loop.
+          int32_t value = ClampIntForUint8Array(numberValue);
+          uint8_t* loc = (uint8_t*)view->viewData() + offset;
+          for (;;) {
+              uint8_t old = *loc;
+              uint8_t result = (uint8_t)ClampIntForUint8Array(T::perform(old, value));
+              uint8_t tmp = old;  // tmp is overwritten by CAS
+              CAS(uint8_t, loc, tmp, result);
+              if (tmp == old) {
+                  r.setInt32(old);
+                  break;
+              }
+          }
+          return true;
+      }
+      case Scalar::Int16: {
+          int16_t v = (int16_t)numberValue;
+          r.setInt32(T::operate((int16_t*)view->viewData() + offset, v));
+          return true;
+      }
+      case Scalar::Uint16: {
+          uint16_t v = (uint16_t)numberValue;
+          r.setInt32(T::operate((uint16_t*)view->viewData() + offset, v));
+          return true;
+      }
+      case Scalar::Int32: {
+          int32_t v = numberValue;
+          r.setInt32(T::operate((int32_t*)view->viewData() + offset, v));
+          return true;
+      }
+      case Scalar::Uint32: {
+          uint32_t v = (uint32_t)numberValue;
+          r.setNumber((double)T::operate((uint32_t*)view->viewData() + offset, v));
+          return true;
+      }
+      default:
+        return ReportBadArrayType(cx);
+    }
+}
+
+#define INTEGRAL_TYPES_FOR_EACH(NAME, TRANSFORM) \
+    static int8_t operate(int8_t* addr, int8_t v) { return NAME(TRANSFORM(int8_t, addr), v); } \
+    static uint8_t operate(uint8_t* addr, uint8_t v) { return NAME(TRANSFORM(uint8_t, addr), v); } \
+    static int16_t operate(int16_t* addr, int16_t v) { return NAME(TRANSFORM(int16_t, addr), v); } \
+    static uint16_t operate(uint16_t* addr, uint16_t v) { return NAME(TRANSFORM(uint16_t, addr), v); } \
+    static int32_t operate(int32_t* addr, int32_t v) { return NAME(TRANSFORM(int32_t, addr), v); } \
+    static uint32_t operate(uint32_t* addr, uint32_t v) { return NAME(TRANSFORM(uint32_t, addr), v); }
+
+#define CAST_ATOMIC(t, v) reinterpret_cast<std::atomic<t>*>(v)
+#define DO_NOTHING(t, v) v
+#define ZERO(t, v) 0
+
+class do_add
+{
+public:
+#if defined(CXX11_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(std::atomic_fetch_add, CAST_ATOMIC)
+#elif defined(GNU_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(__sync_fetch_and_add, DO_NOTHING)
+#else
+    INTEGRAL_TYPES_FOR_EACH(ZERO, DO_NOTHING)
+#endif
+    static int32_t perform(int32_t x, int32_t y) { return x + y; }
+};
+
+bool
+js::atomics_add(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    return atomics_binop_impl<do_add>(cx, args.get(0), args.get(1), args.get(2), args.rval());
+}
+
+class do_sub
+{
+public:
+#if defined(CXX11_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(std::atomic_fetch_sub, CAST_ATOMIC)
+#elif defined(GNU_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(__sync_fetch_and_sub, DO_NOTHING)
+#else
+    INTEGRAL_TYPES_FOR_EACH(ZERO, DO_NOTHING)
+#endif
+    static int32_t perform(int32_t x, int32_t y) { return x - y; }
+};
+
+bool
+js::atomics_sub(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    return atomics_binop_impl<do_sub>(cx, args.get(0), args.get(1), args.get(2), args.rval());
+}
+
+class do_and
+{
+public:
+#if defined(CXX11_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(std::atomic_fetch_and, CAST_ATOMIC)
+#elif defined(GNU_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(__sync_fetch_and_and, DO_NOTHING)
+#else
+    INTEGRAL_TYPES_FOR_EACH(ZERO, DO_NOTHING)
+#endif
+    static int32_t perform(int32_t x, int32_t y) { return x & y; }
+};
+
+bool
+js::atomics_and(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    return atomics_binop_impl<do_and>(cx, args.get(0), args.get(1), args.get(2), args.rval());
+}
+
+class do_or
+{
+public:
+#if defined(CXX11_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(std::atomic_fetch_or, CAST_ATOMIC)
+#elif defined(GNU_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(__sync_fetch_and_or, DO_NOTHING)
+#else
+    INTEGRAL_TYPES_FOR_EACH(ZERO, DO_NOTHING)
+#endif
+    static int32_t perform(int32_t x, int32_t y) { return x | y; }
+};
+
+bool
+js::atomics_or(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    return atomics_binop_impl<do_or>(cx, args.get(0), args.get(1), args.get(2), args.rval());
+}
+
+class do_xor
+{
+public:
+#if defined(CXX11_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(std::atomic_fetch_xor, CAST_ATOMIC)
+#elif defined(GNU_ATOMICS)
+    INTEGRAL_TYPES_FOR_EACH(__sync_fetch_and_xor, DO_NOTHING)
+#else
+    INTEGRAL_TYPES_FOR_EACH(ZERO, DO_NOTHING)
+#endif
+    static int32_t perform(int32_t x, int32_t y) { return x ^ y; }
+};
+
+bool
+js::atomics_xor(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    return atomics_binop_impl<do_xor>(cx, args.get(0), args.get(1), args.get(2), args.rval());
+}
+
+#undef INTEGRAL_TYPES_FOR_EACH
+#undef CAST_ATOMIC
+#undef DO_NOTHING
+#undef ZERO
+
+namespace js {
+
+// Represents one waiting worker.
+//
+// The type is declared opaque in SharedArrayObject.h.  Instances of
+// js::FutexWaiter are stack-allocated and linked onto a list across a
+// call to JS::PerRuntimeFutexAPI::wait().
+//
+// The 'waiters' field of the SharedArrayRawBuffer points to the highest
+// priority waiter in the list, and lower priority nodes are linked through
+// the 'lower_pri' field.  The 'back' field goes the other direction.
+// The list is circular, so the 'lower_pri' field of the lowest priority
+// node points to the first node in the list.  The list has no dedicated
+// header node.
+
+class FutexWaiter
+{
+  public:
+    FutexWaiter(uint32_t offset, JS::PerRuntimeFutexAPI *fx)
+      : offset(offset),
+        fx(fx),
+        lower_pri(nullptr),
+        back(nullptr)
+    {
+    }
+
+    bool        waiting;                // Set to true when the worker is on the list and not woken
+    uint32_t    offset;                 // int32 element index within the SharedArrayBuffer
+    JS::PerRuntimeFutexAPI *fx;         // ...
+    FutexWaiter *lower_pri;             // Lower priority nodes in circular doubly-linked list of waiters
+    FutexWaiter *back;                  // Other direction
+};
+
+class AutoLockFutexAPI
+{
+    JS::PerRuntimeFutexAPI * const fx;
+  public:
+    AutoLockFutexAPI(JS::PerRuntimeFutexAPI *fx) : fx(fx) {
+        fx->lock();
+    }
+    ~AutoLockFutexAPI() {
+        fx->unlock();
+    }
+};
+
+} // namespace js
+
+bool
+js::atomics_futexWait(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    HandleValue objv = args.get(0);
+    HandleValue idxv = args.get(1);
+    HandleValue valv = args.get(2);
+    HandleValue timeoutv = args.get(3);
+    MutableHandleValue r = args.rval();
+
+    JS::PerRuntimeFutexAPI* fx = cx->runtime()->futexAPI_;
+    if (!fx)
+        return ReportNoFutexes(cx);
+
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    if (view->type() != Scalar::Int32)
+        return ReportBadArrayType(cx);
+    uint32_t offset;
+    bool inRange;
+    if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset, &inRange))
+        return false;
+    int32_t value;
+    if (!ToInt32(cx, valv, &value))
+        return false;
+    double timeout;
+    if (!ToInteger(cx, timeoutv, &timeout))
+        return false;
+    if (timeout < 0)
+        timeout = 0;
+
+    if (!inRange) {
+        atomics_fullMemoryBarrier();
+        r.setUndefined();
+        return true;
+    }
+
+    // This lock also protects the "waiters" field on SharedArrayRawBuffer,
+    // and it provides the necessary memory fence.
+    AutoLockFutexAPI lock(fx);
+
+    int32_t* addr = (int32_t*)view->viewData() + offset;
+    if (*addr != value) {
+        r.setInt32(AtomicsObject::FutexNotequal);
+        return true;
+    }
+
+    Rooted<SharedArrayBufferObject *> sab(cx, &view->buffer()->as<SharedArrayBufferObject>());
+    SharedArrayRawBuffer *sarb = sab->rawBufferObject();
+
+    FutexWaiter w(offset, fx);
+    w.waiting = true;
+    if (FutexWaiter *waiters = sarb->waiters()) {
+        w.lower_pri = waiters;
+        w.back = waiters->back;
+        waiters->back->lower_pri = &w;
+        waiters->back = &w;
+    } else {
+        w.lower_pri = w.back = &w;
+        sarb->setWaiters(&w);
+    }
+
+    bool retval = true;
+    switch (fx->wait(timeout)) {
+      case JS::PerRuntimeFutexAPI::Woken:
+        r.setInt32(AtomicsObject::FutexOK);
+        break;
+      case JS::PerRuntimeFutexAPI::Timedout:
+        r.setInt32(AtomicsObject::FutexTimedout);
+        break;
+      case JS::PerRuntimeFutexAPI::ErrorTooLong:
+        // This is a hack, but it's serviceable.
+        JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_ATOMICS_TOO_LONG);
+        retval = false;
+        break;
+      case JS::PerRuntimeFutexAPI::InterruptForTerminate:
+        // Throw an uncatchable exception.
+        JS_ClearPendingException(cx);
+        retval = false;
+        break;
+    }
+
+    if (w.lower_pri == &w) {
+        sarb->setWaiters(nullptr);
+    } else {
+        w.lower_pri->back = w.back;
+        w.back->lower_pri = w.lower_pri;
+        if (sarb->waiters() == &w)
+            sarb->setWaiters(w.lower_pri);
+    }
+    return retval;
+}
+
+bool
+js::atomics_futexWake(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    HandleValue objv = args.get(0);
+    HandleValue idxv = args.get(1);
+    HandleValue countv = args.get(2);
+    MutableHandleValue r = args.rval();
+
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    if (view->type() != Scalar::Int32)
+        return ReportBadArrayType(cx);
+    uint32_t offset;
+    bool inRange;
+    if (!GetSharedTypedArrayIndex(cx, idxv, view, &offset, &inRange))
+        return false;
+    if (!inRange) {
+        atomics_fullMemoryBarrier();
+        r.setUndefined();
+        return true;
+    }
+    double count;
+    if (!ToInteger(cx, countv, &count))
+        return false;
+    if (count < 0)
+        count = 0;
+
+    JS::PerRuntimeFutexAPI* fx = cx->runtime()->futexAPI_;
+    if (!fx)
+        return ReportNoFutexes(cx);
+
+    AutoLockFutexAPI lock(fx);
+
+    Rooted<SharedArrayBufferObject *> sab(cx, &view->buffer()->as<SharedArrayBufferObject>());
+    SharedArrayRawBuffer *sarb = sab->rawBufferObject();
+    int32_t woken = 0;
+
+    FutexWaiter *waiters = sarb->waiters();
+    if (waiters && count > 0) {
+        FutexWaiter *iter = waiters;
+        do {
+            FutexWaiter *c = iter;
+            iter = iter->lower_pri;
+            if (c->offset != offset || !c->waiting)
+                continue;
+            c->fx->wake();
+            c->waiting = false;
+            ++woken;
+            --count;
+        } while (count > 0 && iter != waiters);
+    }
+
+    r.setInt32(woken);
+    return true;
+}
+
+bool
+js::atomics_futexWakeOrRequeue(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    HandleValue objv = args.get(0);
+    HandleValue idx1v = args.get(1);
+    HandleValue countv = args.get(2);
+    HandleValue valv = args.get(3);
+    HandleValue idx2v = args.get(4);
+    MutableHandleValue r = args.rval();
+
+    Rooted<SharedTypedArrayObject *> view(cx, nullptr);
+    if (!GetSharedTypedArray(cx, objv, &view))
+        return false;
+    if (view->type() != Scalar::Int32)
+        return ReportBadArrayType(cx);
+    uint32_t offset1;
+    bool inRange1;
+    if (!GetSharedTypedArrayIndex(cx, idx1v, view, &offset1, &inRange1))
+        return false;
+    double count;
+    if (!ToInteger(cx, countv, &count))
+        return false;
+    if (count < 0)
+        count = 0;
+    int32_t value;
+    if (!ToInt32(cx, valv, &value))
+        return false;
+    uint32_t offset2;
+    bool inRange2;
+    if (!GetSharedTypedArrayIndex(cx, idx2v, view, &offset2, &inRange2))
+        return false;
+    if (!(inRange1 && inRange2)) {
+        atomics_fullMemoryBarrier();
+        r.setUndefined();
+        return true;
+    }
+
+    JS::PerRuntimeFutexAPI* fx = cx->runtime()->futexAPI_;
+    if (!fx)
+        return ReportNoFutexes(cx);
+
+    AutoLockFutexAPI lock(fx);
+
+    int32_t* addr = (int32_t*)view->viewData() + offset1;
+    if (*addr != value) {
+        r.setInt32(AtomicsObject::FutexNotequal);
+        return true;
+    }
+
+    Rooted<SharedArrayBufferObject *> sab(cx, &view->buffer()->as<SharedArrayBufferObject>());
+    SharedArrayRawBuffer *sarb = sab->rawBufferObject();
+
+    // Walk the list of waiters looking for those waiting on offset1.
+    // Wake some and requeue the others.  There may already be other
+    // waiters on offset2, so those that are requeued must be moved to
+    // the back of the list.  Offset1 may equal offset2.  The list's
+    // first node may change, and the list may be emptied out by the
+    // operation.
+
+    FutexWaiter *waiters = sarb->waiters();
+    if (!waiters) {
+        r.setInt32(0);
+        return true;
+    }
+
+    int32_t woken = 0;
+    FutexWaiter whead((uint32_t)-1, nullptr); // Header node for waiters
+    FutexWaiter *first = waiters;
+    FutexWaiter *last = waiters->back;
+    whead.lower_pri = first;
+    whead.back = last;
+    first->back = &whead;
+    last->lower_pri = &whead;
+
+    FutexWaiter rhead((uint32_t)-1, nullptr); // Header node for requeued
+    rhead.lower_pri = rhead.back = &rhead;
+
+    FutexWaiter *iter = whead.lower_pri;
+    while (iter != &whead) {
+        FutexWaiter *c = iter;
+        iter = iter->lower_pri;
+        if (!c->waiting || c->offset != offset1)
+            continue;
+        if (count > 0) {
+            c->fx->wake();
+            c->waiting = false;
+            ++woken;
+            --count;
+        } else {
+            c->offset = offset2;
+
+            // Remove the node from the waiters list.
+            c->back->lower_pri = c->lower_pri;
+            c->lower_pri->back = c->back;
+
+            // Insert the node at the back of the requeuers list.
+            c->lower_pri = &rhead;
+            c->back = rhead.back;
+            rhead.back->lower_pri = c;
+            rhead.back = c;
+        }
+    }
+
+    // If there are any requeuers, append them to the waiters.
+    if (rhead.lower_pri != &rhead) {
+        whead.back->lower_pri = rhead.lower_pri;
+        rhead.lower_pri->back = whead.back;
+
+        whead.back = rhead.back;
+        rhead.back->lower_pri = &whead;
+    }
+
+    // Make the final list and install it.
+    waiters = nullptr;
+    if (whead.lower_pri != &whead) {
+        whead.back->lower_pri = whead.lower_pri;
+        whead.lower_pri->back = whead.back;
+        waiters = whead.lower_pri;
+    }
+    sarb->setWaiters(waiters);
+
+    r.setInt32(woken);
+    return true;
+}
+
+const JSFunctionSpec AtomicsMethods[] = {
+    JS_FN("compareExchange",    atomics_compareExchange,    4,0),
+    JS_FN("load",               atomics_load,               2,0),
+    JS_FN("store",              atomics_store,              3,0),
+    JS_FN("fence",              atomics_fence,              0,0),
+    JS_FN("add",                atomics_add,                3,0),
+    JS_FN("sub",                atomics_sub,                3,0),
+    JS_FN("and",                atomics_and,                3,0),
+    JS_FN("or",                 atomics_or,                 3,0),
+    JS_FN("xor",                atomics_xor,                3,0),
+    JS_FN("futexWait",          atomics_futexWait,          4,0),
+    JS_FN("futexWake",          atomics_futexWake,          3,0),
+    JS_FN("futexWakeOrRequeue", atomics_futexWakeOrRequeue, 5,0),
+    JS_FS_END
+};
+
+static const JSConstDoubleSpec AtomicsConstants[] = {
+    {"OK",       AtomicsObject::FutexOK},
+    {"TIMEDOUT", AtomicsObject::FutexTimedout},
+    {"NOTEQUAL", AtomicsObject::FutexNotequal},
+    {0,          0}
+};
+
+JSObject *
+AtomicsObject::initClass(JSContext *cx, Handle<GlobalObject *> global)
+{
+    // Create Atomics Object.
+    RootedObject objProto(cx, global->getOrCreateObjectPrototype(cx));
+    if (!objProto)
+        return nullptr;
+    RootedObject Atomics(cx, NewObjectWithGivenProto(cx, &AtomicsObject::class_, objProto,
+                                                     global, SingletonObject));
+    if (!Atomics)
+        return nullptr;
+
+    if (!JS_DefineFunctions(cx, Atomics, AtomicsMethods))
+        return nullptr;
+    if (!JS_DefineConstDoubles(cx, Atomics, AtomicsConstants))
+        return nullptr;
+
+    RootedValue AtomicsValue(cx, ObjectValue(*Atomics));
+
+    // Everything is set up, install Atomics on the global object.
+    if (!JSObject::defineProperty(cx, global, cx->names().Atomics, AtomicsValue, nullptr, nullptr, 0))
+        return nullptr;
+
+    global->setConstructor(JSProto_Atomics, AtomicsValue);
+    return Atomics;
+}
+
+JSObject *
+js_InitAtomicsClass(JSContext *cx, HandleObject obj)
+{
+    MOZ_ASSERT(obj->is<GlobalObject>());
+    Rooted<GlobalObject *> global(cx, &obj->as<GlobalObject>());
+    return AtomicsObject::initClass(cx, global);
+}
+
+#undef CXX11_ATOMICS
+#undef GNU_ATOMICS
new file mode 100644
--- /dev/null
+++ b/js/src/builtin/AtomicsObject.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef builtin_AtomicsObject_h
+#define builtin_AtomicsObject_h
+
+#include "jsobj.h"
+
+namespace js {
+
+class AtomicsObject : public JSObject
+{
+  public:
+    static const Class class_;
+    static JSObject* initClass(JSContext *cx, Handle<GlobalObject *> global);
+    static bool toString(JSContext *cx, unsigned int argc, jsval *vp);
+
+    static const int FutexOK = 0;
+
+    // The error values must be negative because APIs such as futexWaitOrRequeue
+    // return a value that is either the number of tasks woken or an error code.
+    static const int FutexNotequal = -1;
+    static const int FutexTimedout = -2;
+
+    // Internal signals; negative for the same reason.
+    static const int FutexInterrupted = -1000;
+};
+
+void atomics_fullMemoryBarrier();
+
+bool atomics_compareExchange(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_load(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_store(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_fence(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_add(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_sub(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_and(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_or(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_xor(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_futexWait(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_futexWake(JSContext *cx, unsigned argc, Value *vp);
+bool atomics_futexWakeOrRequeue(JSContext *cx, unsigned argc, Value *vp);
+
+}  /* namespace js */
+
+JSObject *
+js_InitAtomicsClass(JSContext *cx, js::HandleObject obj);
+
+#endif /* builtin_AtomicsObject_h */
--- a/js/src/builtin/Object.cpp
+++ b/js/src/builtin/Object.cpp
@@ -2,32 +2,33 @@
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "builtin/Object.h"
 
 #include "mozilla/ArrayUtils.h"
+#include "mozilla/UniquePtr.h"
 
 #include "jscntxt.h"
 
 #include "frontend/BytecodeCompiler.h"
 #include "vm/StringBuffer.h"
 
 #include "jsobjinlines.h"
 
 #include "vm/NativeObject-inl.h"
 
 using namespace js;
 using namespace js::types;
 
 using js::frontend::IsIdentifier;
 using mozilla::ArrayLength;
-
+using mozilla::UniquePtr;
 
 bool
 js::obj_construct(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
     RootedObject obj(cx, nullptr);
     if (args.length() > 0 && !args[0].isNullOrUndefined()) {
@@ -604,17 +605,22 @@ obj_setPrototypeOf(JSContext *cx, unsign
     RootedObject newProto(cx, args[1].toObjectOrNull());
 
     bool success;
     if (!JSObject::setProto(cx, obj, newProto, &success))
         return false;
 
     /* Step 7. */
     if (!success) {
-        JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_OBJECT_NOT_EXTENSIBLE, "object");
+        UniquePtr<char[], JS::FreePolicy> bytes(DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+                                                                        args[0], NullPtr()));
+        if (!bytes)
+            return false;
+        JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_SETPROTOTYPEOF_FAIL,
+                             bytes.get());
         return false;
     }
 
     /* Step 8. */
     args.rval().set(args[0]);
     return true;
 }
 
@@ -1001,31 +1007,42 @@ obj_isExtensible(JSContext *cx, unsigned
         RootedObject obj(cx, &args.get(0).toObject());
         if (!JSObject::isExtensible(cx, obj, &extensible))
             return false;
     }
     args.rval().setBoolean(extensible);
     return true;
 }
 
-// ES6 draft rev27 (2014/08/24) 19.1.2.15 Object.preventExtensions(O)
+// ES6 20141014 draft 19.1.2.15 Object.preventExtensions(O)
 static bool
 obj_preventExtensions(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     args.rval().set(args.get(0));
 
     // Step 1.
     if (!args.get(0).isObject())
         return true;
 
-    // Steps 2-5.
+    // Steps 2-3.
     RootedObject obj(cx, &args.get(0).toObject());
 
-    return JSObject::preventExtensions(cx, obj);
+    bool status;
+    if (!JSObject::preventExtensions(cx, obj, &status))
+        return false;
+
+    // Step 4.
+    if (!status) {
+        JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_CHANGE_EXTENSIBILITY);
+        return false;
+    }
+
+    // Step 5.
+    return true;
 }
 
 // ES6 draft rev27 (2014/08/24) 19.1.2.5 Object.freeze(O)
 static bool
 obj_freeze(JSContext *cx, unsigned argc, Value *vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
     args.rval().set(args.get(0));
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -2073,16 +2073,35 @@ ByteSize(JSContext *cx, unsigned argc, V
     JS::ubi::Node node = args.get(0);
     if (node)
         args.rval().set(NumberValue(node.size(mallocSizeOf)));
     else
         args.rval().setUndefined();
     return true;
 }
 
+static bool
+SetImmutablePrototype(JSContext *cx, unsigned argc, Value *vp)
+{
+    CallArgs args = CallArgsFromVp(argc, vp);
+    if (!args.get(0).isObject()) {
+        JS_ReportError(cx, "setImmutablePrototype: object expected");
+        return false;
+    }
+
+    RootedObject obj(cx, &args[0].toObject());
+
+    bool succeeded;
+    if (!JSObject::setImmutablePrototype(cx, obj, &succeeded))
+        return false;
+
+    args.rval().setBoolean(succeeded);
+    return true;
+}
+
 static const JSFunctionSpecWithHelp TestingFunctions[] = {
     JS_FN_HELP("gc", ::GC, 0, 0,
 "gc([obj] | 'compartment' [, 'shrinking'])",
 "  Run the garbage collector. When obj is given, GC only its compartment.\n"
 "  If 'compartment' is given, GC any compartments that were scheduled for\n"
 "  GC via schedulegc.\n"
 "  If 'shrinking' is passes as the optional second argument, perform a\n"
 "  shrinking GC rather than a normal GC."),
@@ -2395,16 +2414,24 @@ gc::ZealModeHelpText),
 "    options.locals - show local variables in each frame\n"
 "    options.thisprops - show the properties of the 'this' object of each frame\n"),
 
     JS_FN_HELP("byteSize", ByteSize, 1, 0,
 "byteSize(value)",
 "  Return the size in bytes occupied by |value|, or |undefined| if value\n"
 "  is not allocated in memory.\n"),
 
+    JS_FN_HELP("setImmutablePrototype", SetImmutablePrototype, 1, 0,
+"setImmutablePrototype(obj)",
+"  Try to make obj's [[Prototype]] immutable, such that subsequent attempts to\n"
+"  change it will fail.  Return true if obj's [[Prototype]] was successfully made\n"
+"  immutable (or if it already was immutable), false otherwise.  Throws in case\n"
+"  of internal error, or if the operation doesn't even make sense (for example,\n"
+"  because the object is a revoked proxy)."),
+
     JS_FS_HELP_END
 };
 
 static const JSPropertySpec TestingProperties[] = {
     JS_PSG("timesAccessed", TimesAccessed, 0),
     JS_PS_END
 };
 
--- a/js/src/gc/StoreBuffer.cpp
+++ b/js/src/gc/StoreBuffer.cpp
@@ -21,16 +21,20 @@ using mozilla::ReentrancyGuard;
 
 /*** Edges ***/
 
 void
 StoreBuffer::SlotsEdge::mark(JSTracer *trc)
 {
     NativeObject *obj = object();
 
+    // Beware JSObject::swap exchanging a native object for a non-native one.
+    if (!obj->isNative())
+        return;
+
     if (IsInsideNursery(obj))
         return;
 
     if (kind() == ElementKind) {
         int32_t initLen = obj->getDenseInitializedLength();
         int32_t clampedStart = Min(start_, initLen);
         int32_t clampedEnd = Min(start_ + count_, initLen);
         gc::MarkArraySlots(trc, clampedEnd - clampedStart,
--- a/js/src/jit-test/tests/asm.js/testMathLib.js
+++ b/js/src/jit-test/tests/asm.js/testMathLib.js
@@ -59,68 +59,73 @@ testUnary(asmLink(asmCompile('glob', USE
 
 var f = asmLink(asmCompile('glob', USE_ASM + 'var abs=glob.Math.abs; function f(i) { i=i|0; return abs(i|0)|0 } return f'), this);
 for (n of [-Math.pow(2,31)-1, -Math.pow(2,31), -Math.pow(2,31)+1, -1, 0, 1, Math.pow(2,31)-2, Math.pow(2,31)-1, Math.pow(2,31)])
     assertEq(f(n), Math.abs(n|0)|0);
 
 var f = asmLink(asmCompile('glob', USE_ASM + 'var clz32=glob.Math.clz32; function f(i) { i=i|0; return clz32(i)|0 } return f'), this);
 for (n of [0, 1, 2, 15, 16, Math.pow(2,31)-1, Math.pow(2,31), Math.pow(2,31)+1, Math.pow(2,32)-1, Math.pow(2,32), Math.pow(2,32)+1])
     assertEq(f(n), Math.clz32(n|0));
+assertEq(asmLink(asmCompile('glob', USE_ASM + 'var clz32=glob.Math.clz32; function f(i, j) { i=i|0;j=j|0; return (clz32(i) < (j|0))|0 } return f'), this)(0x1, 30), 0);
+assertEq(asmLink(asmCompile('glob', USE_ASM + 'var clz32=glob.Math.clz32; function f(i, j) { i=i|0;j=j|0; return (clz32(i) < (j>>>0))|0 } return f'), this)(0x1, 30), 0);
 
 var doubleNumbers = [NaN, Infinity, -Infinity, -10000, -3.4, -0, 0, 3.4, 10000];
 var floatNumbers = [];
 for (var x of doubleNumbers) floatNumbers.push(Math.fround(x));
-var intNumbers = [-10000, -3, -1, 0, 3, 10000];
+var intNumbers = [-Math.pow(2,31), -10000, -3, -1, 0, 3, 10000, Math.pow(2,31), Math.pow(2,31)+1];
 
 function testBinary(f, g, numbers) {
     for (n of numbers)
         for (o of numbers)
             assertEq(f(n,o), g(n,o));
 }
 
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var po=glob.Math.pow; function f(d,e) { d=+d;e=+e; return +po(d,e) } return f'), {Math:{pow:Math.sin}});
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var po=glob.Math.pow; function f(d,e) { d=+d;e=+e; return +po(d,e) } return f'), {Math:{pow:null}});
 testBinary(asmLink(asmCompile('glob', USE_ASM + 'var po=glob.Math.pow; function f(d,e) { d=+d;e=+e; return +po(d,e) } return f'), {Math:{pow:Math.pow}}), Math.pow, doubleNumbers);
 
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var at=glob.Math.atan2; function f(d,e) { d=+d;e=+e; return +at(d,e) } return f'), {Math:{atan2:Math.sin}});
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var at=glob.Math.atan2; function f(d,e) { d=+d;e=+e; return +at(d,e) } return f'), {Math:{atan2:null}});
 testBinary(asmLink(asmCompile('glob', USE_ASM + 'var at=glob.Math.atan2; function f(d,e) { d=+d;e=+e; return +at(d,e) } return f'), {Math:{atan2:Math.atan2}}), Math.atan2, doubleNumbers);
 
+function coercedMin(...args) { for (var i = 0; i < args.length; i++) args[i] = args[i]|0; return Math.min(...args) }
+function coercedMax(...args) { for (var i = 0; i < args.length; i++) args[i] = args[i]|0; return Math.max(...args) }
 assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=+d; return +min(d) } return f');
 assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + 'var i32=new glob.Int32Array(heap); var min=glob.Math.min; function f() { return min(i32[0], 5)|0 } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(x) { x=x|0; return min(3 + x, 5)|0 } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(x) { x=x|0; return min(5, 3 + x)|0 } return f');
+assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(x) { x=x|0; return min(x, 1)|0 } return f');
 
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=+d;e=+e; return +min(d,e) } return f'), {Math:{min:Math.sin}});
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=+d;e=+e; return +min(d,e) } return f'), {Math:{min:null}});
 testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=+d;e=+e; return +min(d,e) } return f'), {Math:{min:Math.min}}), Math.min, doubleNumbers);
 testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; var f32=glob.Math.fround; function f(d,e) { d=f32(d);e=f32(e); return f32(min(d,e)) } return f'), this), Math.min, floatNumbers);
-testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=d|0;e=e|0; return min(d,e)|0} return f'), {Math:{min:Math.min}}), Math.min, intNumbers);
+testBinary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e) { d=d|0;e=e|0; return min(d|0,e|0)|0} return f'), {Math:{min:Math.min}}), coercedMin, intNumbers);
 
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=+d;e=+e; return +max(d,e) } return f'), {Math:{max:Math.sin}});
 assertAsmLinkFail(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=+d;e=+e; return +max(d,e) } return f'), {Math:{max:null}});
 testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=+d;e=+e; return +max(d,e) } return f'), {Math:{max:Math.max}}), Math.max, doubleNumbers);
 testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; var f32=glob.Math.fround; function f(d,e) { d=f32(d);e=f32(e); return f32(max(d,e)) } return f'), this), Math.max, floatNumbers);
-testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=d|0;e=e|0; return max(d,e)|0} return f'), {Math:{max:Math.max}}), Math.max, intNumbers);
+testBinary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e) { d=d|0;e=e|0; return max(d|0,e|0)|0} return f'), {Math:{max:Math.max}}), coercedMax, intNumbers);
 
 function testTernary(f, g, numbers) {
     for (n of numbers)
         for (o of numbers)
             for (p of numbers)
                 assertEq(f(n,o,p), g(n,o,p));
 }
 
 assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=+d;e=+e;g=g|0; return +min(d,e,g) } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=+g; return max(d,e,g)|0 } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=+d;e=+e;g=+g; return min(d,e,g)|0 } return f');
-testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return +max(d,e,g) } return f'), {Math:{max:Math.max}}), Math.max, intNumbers);
-testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return max(d,e,g)|0 } return f'), {Math:{max:Math.max}}), Math.max, intNumbers);
+testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return +max(d|0,e|0,g|0) } return f'), {Math:{max:Math.max}}), coercedMax, intNumbers);
+testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return max(d|0,e|0,g|0)|0 } return f'), {Math:{max:Math.max}}), coercedMax, intNumbers);
 testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d,e,g) { d=+d;e=+e;g=+g; return +max(d,e,g) } return f'), {Math:{max:Math.max}}), Math.max, doubleNumbers);
 testTernary(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; var _=glob.Math.fround; function f(d,e,g) { d=_(d);e=_(e);g=_(g); return _(max(d,e,g)) } return f'), this), Math.max, floatNumbers);
-testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return min(d,e,g)|0 } return f'), {Math:{min:Math.min}}), Math.min, intNumbers);
+testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=d|0;e=e|0;g=g|0; return min(d|0,e|0,g|0)|0 } return f'), {Math:{min:Math.min}}), coercedMin, intNumbers);
 testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d,e,g) { d=+d;e=+e;g=+g; return +min(d,e,g) } return f'), {Math:{min:Math.min}}), Math.min, doubleNumbers);
 testTernary(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; var _=glob.Math.fround; function f(d,e,g) { d=_(d);e=_(e);g=_(g); return _(min(d,e,g)) } return f'), this), Math.min, floatNumbers);
 
 // Implicit return coercions of math functions
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var im=glob.Math.imul; function f(i) { i=i|0; i = im(i,i); return i|0 } return f'), this)(3), 9);
 assertAsmTypeFail('glob', USE_ASM + 'var im=glob.Math.imul; function f(d) { d=+d; d = im(d, d) } return f');
 assertAsmTypeFail('glob', USE_ASM + FROUND + 'var im=glob.Math.imul; function f(d) { d=fround(d); d = im(d, d) } return f');
 
@@ -134,17 +139,17 @@ assertAsmTypeFail('glob', USE_ASM + 'var
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var sqrt=glob.Math.sqrt; function f(d) { d=+d; d = sqrt(d); return +d } return f'), this)(256), 16);
 assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var sqrt=glob.Math.sqrt; function f(d) { d=fround(d); d = fround(sqrt(d)); return +d } return f'), this)(13.37), Math.fround(Math.sqrt(Math.fround(13.37))));
 assertAsmTypeFail('glob', USE_ASM + FROUND + 'var sqrt=glob.Math.sqrt; function f(d) { d=fround(d); d = sqrt(d); return fround(d) } return f');
 assertAsmTypeFail('glob', USE_ASM + FROUND + 'var sqrt=glob.Math.sqrt; function f(d) { d=fround(d); d = sqrt(d); return d } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var sqrt=glob.Math.sqrt; function f(n) { n=n|0; var d=0.; d = sqrt(n|0) } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var sqrt=glob.Math.sqrt; function f(n) { n=n|0; var d=3.; n = sqrt(d)|0 } return f');
 
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=+d; d = min(d, 13.); return +d } return f'), this)(12), 12);
-assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=d|0; d = min(d, 11); return d|0 } return f'), this)(12), 11);
+assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=d|0; d = min(d|0, 11); return d|0 } return f'), this)(12), 11);
 assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(d) { d=fround(d); d = min(d, fround(13.37)); return fround(d) } return f'), this)(14), Math.fround(13.37));
 
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var sin=glob.Math.sin; function f(d) { d=+d; d = sin(d); return +d } return f'), this)(Math.PI), Math.sin(Math.PI));
 assertAsmTypeFail('glob', USE_ASM + FROUND + 'var sin=glob.Math.sin; function f(d) { d=fround(d); d = sin(d) } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var sin=glob.Math.sin; function f(d) { d=d|0; d = sin(d) } return f');
 
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var pow=glob.Math.pow; function f(d) { d=+d; d = pow(d,d); return +d } return f'), this)(3), 27);
 assertAsmTypeFail('glob', USE_ASM + FROUND + 'var pow=glob.Math.pow; function f(d) { d=fround(d); d = pow(d, d) } return f');
@@ -153,20 +158,20 @@ assertAsmTypeFail('glob', USE_ASM + 'var
 assertAsmTypeFail('glob', USE_ASM + 'var sin=glob.Math.sin; function f(d) { d=+d; var i=0; i = sin(d)|0; } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var pow=glob.Math.pow; function f(d) { d=+d; var i=0; i = pow(d,d)|0; } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var atan2=glob.Math.atan2; function f(d) { d=+d; var i=0; i = atan2(d,d)|0; } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var sqrt=glob.Math.sqrt; function f(d) { d=+d; sqrt(d)|0; } return f');
 assertAsmTypeFail('glob', USE_ASM + 'var abs=glob.Math.abs; function f(d) { d=+d; abs(d)|0; } return f');
 
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var im=glob.Math.imul; function f(i) { i=i|0; var d=0.0; d = +im(i,i); return +d } return f'), this)(42), Math.imul(42, 42));
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var abs=glob.Math.abs; function f(i) { i=i|0; var d=0.0; d = +abs(i|0); return +d } return f'), this)(-42), 42);
-assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(i) { i=i|0; var d=0.0; d = +min(i, 0); return +d } return f'), this)(-42), -42);
-assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(i) { i=i|0; var d=fround(0); d = fround(min(i, 0)); return +d } return f'), this)(-42), -42);
-assertEq(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(i) { i=i|0; var d=0.0; d = +max(i, 0); return +d } return f'), this)(-42), 0);
-assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var max=glob.Math.max; function f(i) { i=i|0; var d=fround(0); d = fround(max(i, 0)); return +d } return f'), this)(-42), 0);
+assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(i) { i=i|0; var d=0.0; d = +min(i|0, 0); return +d } return f'), this)(-42), -42);
+assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(i) { i=i|0; var d=fround(0); d = fround(min(i|0, 0)); return +d } return f'), this)(-42), -42);
+assertEq(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(i) { i=i|0; var d=0.0; d = +max(i|0, 0); return +d } return f'), this)(-42), 0);
+assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var max=glob.Math.max; function f(i) { i=i|0; var d=fround(0); d = fround(max(i|0, 0)); return +d } return f'), this)(-42), 0);
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var min=glob.Math.min; function f(d) { d=+d; var i=0; i = ~~min(d, 0.)|0; return i|0 } return f'), this)(-42), -42);
 assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var min=glob.Math.min; function f(d) { d=fround(d); var i=0; i = ~~min(d, fround(0))|0; return i|0 } return f'), this)(-42), -42);
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var max=glob.Math.max; function f(d) { d=+d; var i=0; i = ~~max(d, 0.)|0; return i|0 } return f'), this)(-42), 0);
 assertEq(asmLink(asmCompile('glob', USE_ASM + FROUND + 'var max=glob.Math.max; function f(d) { d=fround(d); var i=0; i = ~~max(d, fround(0))|0; return i|0 } return f'), this)(-42), 0);
 
 assertEq(asmLink(asmCompile('glob', USE_ASM + 'var abs=glob.Math.abs; function f(i) { i=i|0; var d=0.0; return +d; +abs(i|0); return 3.0;} return f'), this)(-42), 0);
 
 assertAsmTypeFail('glob', USE_ASM + 'var tau=glob.Math.TAU; function f() {} return f');
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/atomics/basic-tests.js
@@ -0,0 +1,416 @@
+// Basic functional tests for the Atomics primitives.
+//
+// These do not test atomicity, just that calling and coercions and
+// indexing and exception behavior all work right.
+//
+// These do not test the futex operations.
+
+var DEBUG = false;		// Set to true for useful printouts
+
+function dprint(...xs) {
+    if (!DEBUG)
+	return;
+    var s = "";
+    for ( var x in xs )
+	s += String(xs[x]);
+    print(s);
+}
+
+function testMethod(a, ...indices) {
+    dprint("Method: " + a.constructor.name);
+    var poison;
+    switch (a.BYTES_PER_ELEMENT) {
+    case 1: poison = 0x5A; break;
+    case 2: poison = 0x5A5A; break;
+    case 4: poison = 0x5A5A5A5A; break;
+    }
+    for ( var i=0 ; i < indices.length ; i++ ) {
+	var x = indices[i];
+	if (x > 0)
+	    a[x-1] = poison;
+	if (x < a.length-1)
+	    a[x+1] = poison;
+
+	// val = 0
+	assertEq(Atomics.compareExchange(a, x, 0, 37), 0);
+	// val = 37
+	assertEq(Atomics.compareExchange(a, x, 37, 5), 37);
+	// val = 5
+	assertEq(Atomics.compareExchange(a, x, 7, 8), 5); // ie should fail
+	// val = 5
+	assertEq(Atomics.compareExchange(a, x, 5, 9), 5);
+	// val = 9
+	assertEq(Atomics.compareExchange(a, x, 5, 0), 9); // should also fail
+
+	// val = 9
+	assertEq(Atomics.load(a, x), 9);
+	// val = 9
+	assertEq(Atomics.store(a, x, 14), 14); // What about coercion?
+	// val = 14
+	assertEq(Atomics.load(a, x), 14);
+	// val = 14
+	Atomics.store(a, x, 0);
+	// val = 0
+
+	Atomics.fence();
+
+	// val = 0
+	assertEq(Atomics.add(a, x, 3), 0);
+	// val = 3
+	assertEq(Atomics.sub(a, x, 2), 3);
+	// val = 1
+	assertEq(Atomics.or(a, x, 6), 1);
+	// val = 7
+	assertEq(Atomics.and(a, x, 14), 7);
+	// val = 6
+	assertEq(Atomics.xor(a, x, 5), 6);
+	// val = 3
+	assertEq(Atomics.load(a, x), 3);
+	// val = 3
+	Atomics.store(a, x, 0);
+	// val = 0
+
+	// Check adjacent elements were not affected
+	if (x > 0) {
+	    assertEq(a[x-1], poison);
+	    a[x-1] = 0;
+	}
+	if (x < a.length-1) {
+	    assertEq(a[x+1], poison);
+	    a[x+1] = 0;
+	}
+    }
+}
+
+function testFunction(a, ...indices) {
+    dprint("Function: " + a.constructor.name);
+    var poison;
+    switch (a.BYTES_PER_ELEMENT) {
+    case 1: poison = 0x5A; break;
+    case 2: poison = 0x5A5A; break;
+    case 4: poison = 0x5A5A5A5A; break;
+    }
+    for ( var i=0 ; i < indices.length ; i++ ) {
+	var x = indices[i];
+	if (x > 0)
+	    a[x-1] = poison;
+	if (x < a.length-1)
+	    a[x+1] = poison;
+
+	// val = 0
+	assertEq(gAtomics_compareExchange(a, x, 0, 37), 0);
+	// val = 37
+	assertEq(gAtomics_compareExchange(a, x, 37, 5), 37);
+	// val = 5
+	assertEq(gAtomics_compareExchange(a, x, 7, 8), 5); // ie should fail
+	// val = 5
+	assertEq(gAtomics_compareExchange(a, x, 5, 9), 5);
+	// val = 9
+	assertEq(gAtomics_compareExchange(a, x, 5, 0), 9); // should also fail
+
+	// val = 9
+	assertEq(gAtomics_load(a, x), 9);
+	// val = 9
+	assertEq(gAtomics_store(a, x, 14), 14); // What about coercion?
+	// val = 14
+	assertEq(gAtomics_load(a, x), 14);
+	// val = 14
+	gAtomics_store(a, x, 0);
+	// val = 0
+
+	gAtomics_fence();
+
+	// val = 0
+	assertEq(gAtomics_add(a, x, 3), 0);
+	// val = 3
+	assertEq(gAtomics_sub(a, x, 2), 3);
+	// val = 1
+	assertEq(gAtomics_or(a, x, 6), 1);
+	// val = 7
+	assertEq(gAtomics_and(a, x, 14), 7);
+	// val = 6
+	assertEq(gAtomics_xor(a, x, 5), 6);
+	// val = 3
+	assertEq(gAtomics_load(a, x), 3);
+	// val = 3
+	gAtomics_store(a, x, 0);
+	// val = 0
+
+	// Check adjacent elements were not affected
+	if (x > 0) {
+	    assertEq(a[x-1], poison);
+	    a[x-1] = 0;
+	}
+	if (x < a.length-1) {
+	    assertEq(a[x+1], poison);
+	    a[x+1] = 0;
+	}
+    }
+}
+
+function testTypeCAS(a) {
+    dprint("Type: " + a.constructor.name);
+
+    var thrown = false;
+    try {
+	Atomics.compareExchange([0], 0, 0, 1);
+    }
+    catch (e) {
+	thrown = true;
+	assertEq(e instanceof TypeError, true);
+    }
+    assertEq(thrown, true);
+
+    // All these variants should be OK
+    Atomics.compareExchange(a, 0, 0.7, 1.8);
+    Atomics.compareExchange(a, 0, "0", 1);
+    Atomics.compareExchange(a, 0, 0, "1");
+    Atomics.compareExchange(a, 0, 0);
+}
+
+function testTypeBinop(a, op) {
+    dprint("Type: " + a.constructor.name);
+
+    var thrown = false;
+    try {
+	op([0], 0, 1);
+    }
+    catch (e) {
+	thrown = true;
+	assertEq(e instanceof TypeError, true);
+    }
+    assertEq(thrown, true);
+
+    // These are all OK
+    op(a, 0, 0.7);
+    op(a, 0, "0");
+    op(a, 0);
+}
+
+function testRangeCAS(a) {
+    dprint("Range: " + a.constructor.name);
+
+    assertEq(Atomics.compareExchange(a, -1, 0, 1), undefined); // out of range => undefined, no effect
+    assertEq(a[0], 0);
+    a[0] = 0;
+
+    assertEq(Atomics.compareExchange(a, "hi", 0, 1), undefined); // invalid => undefined, no effect
+    assertEq(a[0], 0);
+    a[0] = 0;
+
+    assertEq(Atomics.compareExchange(a, a.length + 5, 0, 1), undefined); // out of range => undefined, no effect
+    assertEq(a[0], 0);
+}
+
+// Ad-hoc tests for extreme and out-of-range values 
+// None of these should throw
+
+function testInt8Extremes(a) {
+    dprint("Int8 extremes");
+
+    a[10] = 0;
+    a[11] = 0;
+
+    Atomics.store(a, 10, 255);
+    assertEq(a[10], -1);
+    assertEq(Atomics.load(a, 10), -1);
+
+    Atomics.add(a, 10, 255); // should coerce to -1
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.add(a, 10, -1);
+    assertEq(a[10], -3);
+    assertEq(Atomics.load(a, 10), -3);
+
+    Atomics.sub(a, 10, 255);	// should coerce to -1
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.sub(a, 10, 256);	// should coerce to 0
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.and(a, 10, -1);	// Preserve all
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.and(a, 10, 256);	// Preserve none
+    assertEq(a[10], 0);
+    assertEq(Atomics.load(a, 10), 0);
+    
+    assertEq(a[11], 0);
+}
+
+function testUint8Extremes(a) {
+    dprint("Uint8 extremes");
+
+    a[10] = 0;
+    a[11] = 0;
+
+    Atomics.store(a, 10, 255);
+    assertEq(a[10], 255);
+    assertEq(Atomics.load(a, 10), 255);
+
+    Atomics.add(a, 10, 255);
+    assertEq(a[10], 254);
+    assertEq(Atomics.load(a, 10), 254);
+
+    Atomics.add(a, 10, -1);
+    assertEq(a[10], 253);
+    assertEq(Atomics.load(a, 10), 253);
+
+    Atomics.sub(a, 10, 255);
+    assertEq(a[10], 254);
+    assertEq(Atomics.load(a, 10), 254);
+
+    Atomics.and(a, 10, -1);	// Preserve all
+    assertEq(a[10], 254);
+    assertEq(Atomics.load(a, 10), 254);
+
+    Atomics.and(a, 10, 256);	// Preserve none
+    assertEq(a[10], 0);
+    assertEq(Atomics.load(a, 10), 0);
+    
+    assertEq(a[11], 0);
+}
+
+function testInt16Extremes(a) {
+    dprint("Int16 extremes");
+
+    a[10] = 0;
+    a[11] = 0;
+
+    Atomics.store(a, 10, 65535);
+    assertEq(a[10], -1);
+    assertEq(Atomics.load(a, 10), -1);
+
+    Atomics.add(a, 10, 65535); // should coerce to -1
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.add(a, 10, -1);
+    assertEq(a[10], -3);
+    assertEq(Atomics.load(a, 10), -3);
+
+    Atomics.sub(a, 10, 65535);	// should coerce to -1
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.sub(a, 10, 65536);	// should coerce to 0
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.and(a, 10, -1);	// Preserve all
+    assertEq(a[10], -2);
+    assertEq(Atomics.load(a, 10), -2);
+
+    Atomics.and(a, 10, 65536);	// Preserve none
+    assertEq(a[10], 0);
+    assertEq(Atomics.load(a, 10), 0);
+
+    assertEq(a[11], 0);
+}
+
+function testUint32(a) {
+    var k = 0;
+    for ( var i=0 ; i < 20 ; i++ ) {
+	a[i] = i+5;
+	k += a[i];
+    }
+
+    var sum = 0;
+    for ( var i=0 ; i < 20 ; i++ )
+	sum += Atomics.add(a, i, 1);
+
+    assertEq(sum, k);
+}
+
+function isLittleEndian() {
+    var xxx = new ArrayBuffer(2);
+    var xxa = new Int16Array(xxx);
+    var xxb = new Int8Array(xxx);
+    xxa[0] = 37;
+    var is_little = xxb[0] == 37;
+    return is_little;
+}
+
+function runTests() {
+    var is_little = isLittleEndian();
+
+    // Currently the SharedArrayBuffer needs to be a multiple of 4K bytes in size.
+    var sab = new SharedArrayBuffer(4096);
+
+    // Test that two arrays created on the same storage alias
+    var t1 = new SharedInt8Array(sab);
+    var t2 = new SharedUint16Array(sab);
+
+    assertEq(t1[0], 0);
+    assertEq(t2[0], 0);
+    t1[0] = 37;
+    if (is_little)
+	assertEq(t2[0], 37);
+    else
+	assertEq(t2[0], 37 << 16);
+    t1[0] = 0;
+
+    // Test that invoking as Atomics.whatever() works, on correct arguments
+    testMethod(new SharedInt8Array(sab), 0, 42, 4095);
+    testMethod(new SharedUint8Array(sab), 0, 42, 4095);
+    testMethod(new SharedUint8ClampedArray(sab), 0, 42, 4095);
+    testMethod(new SharedInt16Array(sab), 0, 42, 2047);
+    testMethod(new SharedUint16Array(sab), 0, 42, 2047);
+    testMethod(new SharedInt32Array(sab), 0, 42, 1023);
+    testMethod(new SharedUint32Array(sab), 0, 42, 1023);
+
+    // Test that invoking as v = Atomics.whatever; v() works, on correct arguments
+    gAtomics_compareExchange = Atomics.compareExchange;
+    gAtomics_load = Atomics.load;
+    gAtomics_store = Atomics.store;
+    gAtomics_fence = Atomics.fence;
+    gAtomics_add = Atomics.add;
+    gAtomics_sub = Atomics.sub;
+    gAtomics_and = Atomics.and;
+    gAtomics_or = Atomics.or;
+    gAtomics_xor = Atomics.xor;
+
+    testFunction(new SharedInt8Array(sab), 0, 42, 4095);
+    testFunction(new SharedUint8Array(sab), 0, 42, 4095);
+    testFunction(new SharedUint8ClampedArray(sab), 0, 42, 4095);
+    testFunction(new SharedInt16Array(sab), 0, 42, 2047);
+    testFunction(new SharedUint16Array(sab), 0, 42, 2047);
+    testFunction(new SharedInt32Array(sab), 0, 42, 1023);
+    testFunction(new SharedUint32Array(sab), 0, 42, 1023);
+
+    // Test various range and type conditions
+    var v8 = new SharedInt8Array(sab);
+    var v32 = new SharedInt32Array(sab);
+
+    testTypeCAS(v8);
+    testTypeCAS(v32);
+
+    testTypeBinop(v8, Atomics.add);
+    testTypeBinop(v8, Atomics.sub);
+    testTypeBinop(v8, Atomics.and);
+    testTypeBinop(v8, Atomics.or);
+    testTypeBinop(v8, Atomics.xor);
+
+    testTypeBinop(v32, Atomics.add);
+    testTypeBinop(v32, Atomics.sub);
+    testTypeBinop(v32, Atomics.and);
+    testTypeBinop(v32, Atomics.or);
+    testTypeBinop(v32, Atomics.xor);
+
+    // Test out-of-range references
+    testRangeCAS(v8);
+    testRangeCAS(v32);
+
+    // Test extreme values
+    testInt8Extremes(new SharedInt8Array(sab));
+    testUint8Extremes(new SharedUint8Array(sab));
+    testInt16Extremes(new SharedInt16Array(sab));
+    testUint32(new SharedUint32Array(sab));
+}
+
+if (this.Atomics && this.SharedArrayBuffer && this.SharedInt32Array)
+    runTests();
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/atomics/inline-add.js
@@ -0,0 +1,31 @@
+// |jit-test| slow;
+//
+// This is intended to be run manually with IONFLAGS=logs and
+// postprocessing by iongraph to verify manually (by inspecting the
+// MIR) that:
+//
+//  - the add operation is inlined as it should be
+//  - loads and stores are not moved across the add
+//
+// Be sure to run with --ion-eager --ion-offthread-compile=off.
+
+function add(ta) {
+    var x = ta[0];
+    Atomics.add(ta, 86, 6);
+    var y = ta[1];
+    var z = y + 1;
+    var w = x + z;
+    return w;
+}
+
+if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
+    quit(0);
+
+var sab = new SharedArrayBuffer(4096);
+var ia = new SharedInt32Array(sab);
+for ( var i=0, limit=ia.length ; i < limit ; i++ )
+    ia[i] = 37;
+var v = 0;
+for ( var i=0 ; i < 1000 ; i++ )
+    v += add(ia);
+//print(v);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/atomics/inline-add2.js
@@ -0,0 +1,31 @@
+// |jit-test| slow;
+//
+// Like inline-add, but with SharedUint32Array, which is a special
+// case because the value is representable only as a Number.
+// All this tests is that the Uint32 path is being triggered.
+//
+// This is intended to be run manually with IONFLAGS=logs and
+// postprocessing by iongraph to verify manually (by inspecting the
+// MIR) that:
+//
+//  - the add operation is inlined as it should be, with
+//    a return type 'Double'
+//  - loads and stores are not moved across the add
+//
+// Be sure to run with --ion-eager --ion-offthread-compile=off.
+
+function add(ta) {
+    return Atomics.add(ta, 86, 6);
+}
+
+if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedUint32Array)
+    quit(0);
+
+var sab = new SharedArrayBuffer(4096);
+var ia = new SharedUint32Array(sab);
+for ( var i=0, limit=ia.length ; i < limit ; i++ )
+    ia[i] = 0xdeadbeef;		// Important: Not an int32-capable value
+var v = 0;
+for ( var i=0 ; i < 1000 ; i++ )
+    v += add(ia);
+//print(v);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/atomics/inline-cmpxchg.js
@@ -0,0 +1,31 @@
+// |jit-test| slow;
+//
+// This is intended to be run manually with IONFLAGS=logs and
+// postprocessing by iongraph to verify manually (by inspecting the
+// MIR) that:
+//
+//  - the cmpxchg operation is inlined as it should be
+//  - loads and stores are not moved across the cmpxchg
+//
+// Be sure to run with --ion-eager --ion-offthread-compile=off.
+
+function cmpxchg(ta) {
+    var x = ta[0];
+    Atomics.compareExchange(ta, 86, 37, 42);
+    var y = ta[1];
+    var z = y + 1;
+    var w = x + z;
+    return w;
+}
+
+if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
+    quit(0);
+
+var sab = new SharedArrayBuffer(4096);
+var ia = new SharedInt32Array(sab);
+for ( var i=0, limit=ia.length ; i < limit ; i++ )
+    ia[i] = 37;
+var v = 0;
+for ( var i=0 ; i < 1000 ; i++ )
+    v += cmpxchg(ia);
+//print(v);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/atomics/inline-fence.js
@@ -0,0 +1,31 @@
+// |jit-test| slow;
+//
+// This is intended to be run manually with IONFLAGS=logs and
+// postprocessing by iongraph to verify manually (by inspecting the
+// MIR) that:
+//
+//  - the fence operation is inlined as it should be
+//  - loads and stores are not moved across the fence
+//
+// Be sure to run with --ion-eager --ion-offthread-compile=off.
+
+function fence(ta) {
+    var x = ta[0];
+    Atomics.fence();
+    var y = ta[1];
+    var z = y + 1;
+    var w = x + z;
+    return w;
+}
+
+if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
+    quit(0);
+
+var sab = new SharedArrayBuffer(4096);
+var ia = new SharedInt32Array(sab);
+for ( var i=0, limit=ia.length ; i < limit ; i++ )
+    ia[i] = 37;
+var v = 0;
+for ( var i=0 ; i < 1000 ; i++ )
+    v += fence(ia);
+//print(v);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/bug1085298.js
@@ -0,0 +1,7 @@
+function f(x, y) {
+        return (y | 0 && x ? y | 0 : 0)
+}
+m = [1]
+assertEq(f(m[0], m[0]), 1)
+assertEq(f(m[1], m[0]), 0)
+assertEq(f(m[2], m[0]), 0)
new file mode 100644
--- /dev/null
+++ b/js/src/jit/AtomicOp.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_AtomicOp_h
+#define jit_AtomicOp_h
+
+namespace js {
+namespace jit {
+
+// Types of atomic operation, shared by MIR and LIR.
+
+enum AtomicOp {
+    AtomicFetchAddOp,
+    AtomicFetchSubOp,
+    AtomicFetchAndOp,
+    AtomicFetchOrOp,
+    AtomicFetchXorOp
+};
+
+// Memory barrier types, shared by MIR and LIR.
+//
+// MembarSynchronizing is here because some platforms can make the
+// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
+// but there's been no reason to use it yet.
+
+enum MemoryBarrierBits {
+    MembarLoadLoad = 1,
+    MembarLoadStore = 2,
+    MembarStoreStore = 4,
+    MembarStoreLoad = 8,
+
+    MembarSynchronizing = 16,
+
+    // For validity testing
+    MembarAllbits = 31,
+};
+
+// Standard barrier bits for a full barrier.
+static const int MembarFull = MembarLoadLoad|MembarLoadStore|MembarStoreLoad|MembarStoreStore;
+
+// Standard sets of barrier bits for atomic loads and stores.
+// See http://gee.cs.oswego.edu/dl/jmm/cookbook.html for more.
+static const int MembarBeforeLoad = 0;
+static const int MembarAfterLoad = MembarLoadLoad|MembarLoadStore;
+static const int MembarBeforeStore = MembarStoreStore;
+static const int MembarAfterStore = MembarStoreLoad;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_AtomicOp_h */
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -6155,19 +6155,17 @@ JitCode *
 JitRuntime::generateLazyLinkStub(JSContext *cx)
 {
     MacroAssembler masm(cx);
 
     Label call;
     GeneralRegisterSet regs = GeneralRegisterSet::Volatile();
     Register temp0 = regs.takeAny();
 
-    uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
-    masm.Push(Imm32(descriptor));
-    masm.call(&call);
+    masm.callWithExitFrame(&call);
     masm.jump(ReturnReg);
 
     masm.bind(&call);
     masm.enterExitFrame();
     masm.setupUnalignedABICall(1, temp0);
     masm.loadJSContext(temp0);
     masm.passABIArg(temp0);
     masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, LazyLinkTopActivation));
@@ -8959,16 +8957,78 @@ CodeGenerator::visitStoreTypedArrayEleme
     }
     if (guardLength)
         masm.bind(&skip);
 
     return true;
 }
 
 bool
+CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement *lir)
+{
+    Register elements = ToRegister(lir->elements());
+    AnyRegister output = ToAnyRegister(lir->output());
+    Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+    MOZ_ASSERT(lir->oldval()->isRegister());
+    MOZ_ASSERT(lir->newval()->isRegister());
+
+    Register oldval = ToRegister(lir->oldval());
+    Register newval = ToRegister(lir->newval());
+
+    Scalar::Type arrayType = lir->mir()->arrayType();
+    int width = Scalar::byteSize(arrayType);
+
+    if (lir->index()->isConstant()) {
+        Address dest(elements, ToInt32(lir->index()) * width);
+        masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+    } else {
+        BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+        masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
+    }
+
+    return true;
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(MacroAssembler &masm, AtomicOp op,
+                        Scalar::Type arrayType, const LAllocation *value, const T &mem,
+                        Register temp1, Register temp2, AnyRegister output)
+{
+    if (value->isConstant())
+        masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
+    else
+        masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
+}
+
+bool
+CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop *lir)
+{
+    AnyRegister output = ToAnyRegister(lir->output());
+    Register elements = ToRegister(lir->elements());
+    Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
+    Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+    const LAllocation* value = lir->value();
+
+    Scalar::Type arrayType = lir->mir()->arrayType();
+    int width = Scalar::byteSize(arrayType);
+
+    if (lir->index()->isConstant()) {
+        Address mem(elements, ToInt32(lir->index()) * width);
+        AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
+    } else {
+        BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+        AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
+    }
+
+    return true;
+}
+
+bool
 CodeGenerator::visitClampIToUint8(LClampIToUint8 *lir)
 {
     Register output = ToRegister(lir->output());
     MOZ_ASSERT(output == ToRegister(lir->input()));
     masm.clampIntToUint8(output);
     return true;
 }
 
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -260,16 +260,18 @@ class CodeGenerator : public CodeGenerat
     bool visitArrayPushV(LArrayPushV *lir);
     bool visitArrayPushT(LArrayPushT *lir);
     bool visitArrayConcat(LArrayConcat *lir);
     bool visitArrayJoin(LArrayJoin *lir);
     bool visitLoadTypedArrayElement(LLoadTypedArrayElement *lir);
     bool visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole *lir);
     bool visitStoreTypedArrayElement(LStoreTypedArrayElement *lir);
     bool visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole *lir);
+    bool visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement *lir);
+    bool visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop *lir);
     bool visitClampIToUint8(LClampIToUint8 *lir);
     bool visitClampDToUint8(LClampDToUint8 *lir);
     bool visitClampVToUint8(LClampVToUint8 *lir);
     bool visitCallIteratorStart(LCallIteratorStart *lir);
     bool visitIteratorStart(LIteratorStart *lir);
     bool visitIteratorMore(LIteratorMore *lir);
     bool visitIsNoIterAndBranch(LIsNoIterAndBranch *lir);
     bool visitIteratorEnd(LIteratorEnd *lir);
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -1869,19 +1869,16 @@ AttachFinishedCompilations(JSContext *cx
                 builder = testBuilder;
                 HelperThreadState().remove(finished, &i);
                 break;
             }
         }
         if (!builder)
             break;
 
-// TODO bug 1047346: Enable lazy linking for other architectures again by
-//                   fixing the lazy link stub.
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
         // Try to defer linking if the script is on the stack, to postpone
         // invalidating them.
         if (builder->info().executionMode() == SequentialExecution &&
             builder->script()->hasIonScript())
         {
             bool onStack = false;
             for (JitActivationIterator iter(cx->runtime()); !iter.done(); ++iter) {
                 for (JitFrameIterator it(iter); !it.done(); ++it) {
@@ -1901,17 +1898,16 @@ AttachFinishedCompilations(JSContext *cx
             }
 
             if (onStack) {
                 builder->script()->setPendingIonBuilder(cx, builder);
                 HelperThreadState().ionLazyLinkList().insertFront(builder);
                 continue;
             }
         }
-#endif
 
         if (CodeGenerator *codegen = builder->backgroundCodegen()) {
             RootedScript script(cx, builder->script());
             IonContext ictx(cx, &builder->alloc());
             AutoTraceLog logScript(logger, TraceLogCreateTextId(logger, script));
             AutoTraceLog logLink(logger, TraceLogger::IonLinking);
 
             // Root the assembler until the builder is finished below. As it
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -722,16 +722,23 @@ class IonBuilder
     InliningStatus inlineStrFromCharCode(CallInfo &callInfo);
     InliningStatus inlineStrCharAt(CallInfo &callInfo);
     InliningStatus inlineStrReplace(CallInfo &callInfo);
 
     // RegExp natives.
     InliningStatus inlineRegExpExec(CallInfo &callInfo);
     InliningStatus inlineRegExpTest(CallInfo &callInfo);
 
+    // Atomics natives.
+    InliningStatus inlineAtomicsCompareExchange(CallInfo &callInfo);
+    InliningStatus inlineAtomicsLoad(CallInfo &callInfo);
+    InliningStatus inlineAtomicsStore(CallInfo &callInfo);
+    InliningStatus inlineAtomicsFence(CallInfo &callInfo);
+    InliningStatus inlineAtomicsBinop(CallInfo &callInfo, JSFunction *target);
+
     // Array intrinsics.
     InliningStatus inlineUnsafePutElements(CallInfo &callInfo);
     bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
     bool inlineUnsafeSetTypedArrayElement(CallInfo &callInfo, uint32_t base,
                                           ScalarTypeDescr::Type arrayType);
     bool inlineUnsafeSetTypedObjectArrayElement(CallInfo &callInfo, uint32_t base,
                                                 ScalarTypeDescr::Type arrayType);
     InliningStatus inlineNewDenseArray(CallInfo &callInfo);
@@ -786,16 +793,19 @@ class IonBuilder
 
     // Inlining helpers.
     bool inlineGenericFallback(JSFunction *target, CallInfo &callInfo, MBasicBlock *dispatchBlock,
                                bool clonedAtCallsite);
     bool inlineTypeObjectFallback(CallInfo &callInfo, MBasicBlock *dispatchBlock,
                                   MTypeObjectDispatch *dispatch, MGetPropertyCache *cache,
                                   MBasicBlock **fallbackTarget);
 
+    bool atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayElementType);
+    void atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index);
+
     bool testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo);
 
     MDefinition *makeCallsiteClone(JSFunction *target, MDefinition *fun);
     MCall *makeCallHelper(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite);
     bool makeCall(JSFunction *target, CallInfo &callInfo, bool cloneAtCallsite);
 
     MDefinition *patchInlinedReturn(CallInfo &callInfo, MBasicBlock *exit, MBasicBlock *bottom);
     MDefinition *patchInlinedReturns(CallInfo &callInfo, MIRGraphReturns &returns,
--- a/js/src/jit/IonMacroAssembler.cpp
+++ b/js/src/jit/IonMacroAssembler.cpp
@@ -6,16 +6,17 @@
 
 #include "jit/IonMacroAssembler.h"
 
 #include "jsinfer.h"
 #include "jsprf.h"
 
 #include "builtin/TypedObject.h"
 #include "gc/GCTrace.h"
+#include "jit/AtomicOp.h"
 #include "jit/Bailouts.h"
 #include "jit/BaselineFrame.h"
 #include "jit/BaselineIC.h"
 #include "jit/BaselineJIT.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
 #include "jit/ParallelFunctions.h"
 #include "vm/ForkJoin.h"
@@ -392,16 +393,221 @@ MacroAssembler::loadFromTypedArray(Scala
     }
 }
 
 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address &src, const ValueOperand &dest,
                                                  bool allowDouble, Register temp, Label *fail);
 template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex &src, const ValueOperand &dest,
                                                  bool allowDouble, Register temp, Label *fail);
 
+template<typename T>
+void
+MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem,
+                                               Register oldval, Register newval,
+                                               Register temp, AnyRegister output)
+{
+    switch (arrayType) {
+      case Scalar::Int8:
+        compareExchange8SignExtend(mem, oldval, newval, output.gpr());
+        break;
+      case Scalar::Uint8:
+        compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
+        break;
+      case Scalar::Uint8Clamped:
+        compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
+        break;
+      case Scalar::Int16:
+        compareExchange16SignExtend(mem, oldval, newval, output.gpr());
+        break;
+      case Scalar::Uint16:
+        compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
+        break;
+      case Scalar::Int32:
+        compareExchange32(mem, oldval, newval, output.gpr());
+        break;
+      case Scalar::Uint32:
+        // At the moment, the code in MCallOptimize.cpp requires the output
+        // type to be double for uint32 arrays.  See bug 1077305.
+        MOZ_ASSERT(output.isFloat());
+        compareExchange32(mem, oldval, newval, temp);
+        convertUInt32ToDouble(temp, output.fpu());
+        break;
+      default:
+        MOZ_CRASH("Invalid typed array type");
+    }
+}
+
+template void
+MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address &mem,
+                                               Register oldval, Register newval, Register temp,
+                                               AnyRegister output);
+template void
+MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex &mem,
+                                               Register oldval, Register newval, Register temp,
+                                               AnyRegister output);
+
+template<typename S, typename T>
+void
+MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
+                                           const T &mem, Register temp1, Register temp2, AnyRegister output)
+{
+    // Uint8Clamped is explicitly not supported here
+    switch (arrayType) {
+      case Scalar::Int8:
+        switch (op) {
+          case AtomicFetchAddOp:
+            atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchSubOp:
+            atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchAndOp:
+            atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchOrOp:
+            atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchXorOp:
+            atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
+            break;
+          default:
+            MOZ_CRASH("Invalid typed array atomic operation");
+        }
+        break;
+      case Scalar::Uint8:
+        switch (op) {
+          case AtomicFetchAddOp:
+            atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchSubOp:
+            atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchAndOp:
+            atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchOrOp:
+            atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchXorOp:
+            atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          default:
+            MOZ_CRASH("Invalid typed array atomic operation");
+        }
+        break;
+      case Scalar::Int16:
+        switch (op) {
+          case AtomicFetchAddOp:
+            atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchSubOp:
+            atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchAndOp:
+            atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchOrOp:
+            atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchXorOp:
+            atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
+            break;
+          default:
+            MOZ_CRASH("Invalid typed array atomic operation");
+        }
+        break;
+      case Scalar::Uint16:
+        switch (op) {
+          case AtomicFetchAddOp:
+            atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchSubOp:
+            atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchAndOp:
+            atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchOrOp:
+            atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchXorOp:
+            atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
+            break;
+          default:
+            MOZ_CRASH("Invalid typed array atomic operation");
+        }
+        break;
+      case Scalar::Int32:
+        switch (op) {
+          case AtomicFetchAddOp:
+            atomicFetchAdd32(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchSubOp:
+            atomicFetchSub32(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchAndOp:
+            atomicFetchAnd32(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchOrOp:
+            atomicFetchOr32(value, mem, temp1, output.gpr());
+            break;
+          case AtomicFetchXorOp:
+            atomicFetchXor32(value, mem, temp1, output.gpr());
+            break;
+          default:
+            MOZ_CRASH("Invalid typed array atomic operation");
+        }
+        break;
+      case Scalar::Uint32:
+        // At the moment, the code in MCallOptimize.cpp requires the output
+        // type to be double for uint32 arrays.  See bug 1077305.
+        MOZ_ASSERT(output.isFloat());
+        switch (op) {
+          case AtomicFetchAddOp:
+            atomicFetchAdd32(value, mem, InvalidReg, temp1);
+            break;
+          case AtomicFetchSubOp:
+            atomicFetchSub32(value, mem, InvalidReg, temp1);
+            break;
+          case AtomicFetchAndOp:
+            atomicFetchAnd32(value, mem, temp2, temp1);
+            break;
+          case AtomicFetchOrOp:
+            atomicFetchOr32(value, mem, temp2, temp1);
+            break;
+          case AtomicFetchXorOp:
+            atomicFetchXor32(value, mem, temp2, temp1);
+            break;
+          default:
+            MOZ_CRASH("Invalid typed array atomic operation");
+        }
+        convertUInt32ToDouble(temp1, output.fpu());
+        break;
+      default:
+        MOZ_CRASH("Invalid typed array type");
+    }
+}
+
+template void
+MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+                                           const Imm32 &value, const Address &mem,
+                                           Register temp1, Register temp2, AnyRegister output);
+template void
+MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+                                           const Imm32 &value, const BaseIndex &mem,
+                                           Register temp1, Register temp2, AnyRegister output);
+template void
+MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+                                           const Register &value, const Address &mem,
+                                           Register temp1, Register temp2, AnyRegister output);
+template void
+MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+                                           const Register &value, const BaseIndex &mem,
+                                           Register temp1, Register temp2, AnyRegister output);
+
 // Inlined version of gc::CheckAllocatorState that checks the bare essentials
 // and bails for anything that cannot be handled with our jit allocators.
 void
 MacroAssembler::checkAllocatorState(Label *fail)
 {
     // Don't execute the inline path if we are tracing allocations.
     if (js::gc::TraceEnabled())
         jump(fail);
--- a/js/src/jit/IonMacroAssembler.h
+++ b/js/src/jit/IonMacroAssembler.h
@@ -17,16 +17,17 @@
 # include "jit/arm/MacroAssembler-arm.h"
 #elif defined(JS_CODEGEN_MIPS)
 # include "jit/mips/MacroAssembler-mips.h"
 #elif defined(JS_CODEGEN_NONE)
 # include "jit/none/MacroAssembler-none.h"
 #else
 # error "Unknown architecture!"
 #endif
+#include "jit/AtomicOp.h"
 #include "jit/IonInstrumentation.h"
 #include "jit/JitCompartment.h"
 #include "jit/VMFunctions.h"
 #include "vm/ProxyObject.h"
 #include "vm/Shape.h"
 
 #ifdef IS_LITTLE_ENDIAN
 #define IMM32_16ADJ(X) X << 16
@@ -733,16 +734,24 @@ class MacroAssembler : public MacroAssem
           case Scalar::Uint32:
             store32(value, dest);
             break;
           default:
             MOZ_CRASH("Invalid typed array type");
         }
     }
 
+    template<typename T>
+    void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem, Register oldval, Register newval,
+                                        Register temp, AnyRegister output);
+
+    template<typename S, typename T>
+    void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
+                                    const T &mem, Register temp1, Register temp2, AnyRegister output);
+
     void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex &dest);
     void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address &dest);
 
     Register extractString(const Address &address, Register scratch) {
         return extractObject(address, scratch);
     }
     Register extractString(const ValueOperand &value, Register scratch) {
         return extractObject(value, scratch);
@@ -939,16 +948,25 @@ class MacroAssembler : public MacroAssem
         leaveSPSFrame();
         MacroAssemblerSpecific::callIon(callee);
         uint32_t ret = currentOffset();
         reenterSPSFrame();
         return ret;
     }
 
     // see above comment for what is returned
+    uint32_t callWithExitFrame(Label *target) {
+        leaveSPSFrame();
+        MacroAssemblerSpecific::callWithExitFrame(target);
+        uint32_t ret = currentOffset();
+        reenterSPSFrame();
+        return ret;
+    }
+
+    // see above comment for what is returned
     uint32_t callWithExitFrame(JitCode *target) {
         leaveSPSFrame();
         MacroAssemblerSpecific::callWithExitFrame(target);
         uint32_t ret = currentOffset();
         reenterSPSFrame();
         return ret;
     }
 
--- a/js/src/jit/LIR-Common.h
+++ b/js/src/jit/LIR-Common.h
@@ -4866,16 +4866,90 @@ class LStoreTypedArrayElementStatic : pu
     const LAllocation *ptr() {
         return getOperand(0);
     }
     const LAllocation *value() {
         return getOperand(1);
     }
 };
 
+class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 1>
+{
+  public:
+    LIR_HEADER(CompareExchangeTypedArrayElement)
+
+    LCompareExchangeTypedArrayElement(const LAllocation &elements, const LAllocation &index,
+                                      const LAllocation &oldval, const LAllocation &newval,
+                                      const LDefinition &temp)
+    {
+        setOperand(0, elements);
+        setOperand(1, index);
+        setOperand(2, oldval);
+        setOperand(3, newval);
+        setTemp(0, temp);
+    }
+
+    const LAllocation *elements() {
+        return getOperand(0);
+    }
+    const LAllocation *index() {
+        return getOperand(1);
+    }
+    const LAllocation *oldval() {
+        return getOperand(2);
+    }
+    const LAllocation *newval() {
+        return getOperand(3);
+    }
+    const LDefinition *temp() {
+        return getTemp(0);
+    }
+
+    const MCompareExchangeTypedArrayElement *mir() const {
+        return mir_->toCompareExchangeTypedArrayElement();
+    }
+};
+
+class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 2>
+{
+  public:
+    LIR_HEADER(AtomicTypedArrayElementBinop)
+
+    LAtomicTypedArrayElementBinop(const LAllocation &elements, const LAllocation &index,
+                                  const LAllocation &value, const LDefinition &temp1,
+                                  const LDefinition &temp2)
+    {
+        setOperand(0, elements);
+        setOperand(1, index);
+        setOperand(2, value);
+        setTemp(0, temp1);
+        setTemp(1, temp2);
+    }
+
+    const LAllocation *elements() {
+        return getOperand(0);
+    }
+    const LAllocation *index() {
+        return getOperand(1);
+    }
+    const LAllocation *value() {
+        return getOperand(2);
+    }
+    const LDefinition *temp1() {
+        return getTemp(0);
+    }
+    const LDefinition *temp2() {
+        return getTemp(1);
+    }
+
+    const MAtomicTypedArrayElementBinop *mir() const {
+        return mir_->toAtomicTypedArrayElementBinop();
+    }
+};
+
 class LEffectiveAddress : public LInstructionHelper<1, 2, 0>
 {
   public:
     LIR_HEADER(EffectiveAddress);
 
     LEffectiveAddress(const LAllocation &base, const LAllocation &index) {
         setOperand(0, base);
         setOperand(1, index);
@@ -6623,12 +6697,36 @@ class LThrowUninitializedLexical : publi
   public:
     LIR_HEADER(ThrowUninitializedLexical)
 
     MLexicalCheck *mir() {
         return mir_->toLexicalCheck();
     }
 };
 
+class LMemoryBarrier : public LInstructionHelper<0, 0, 0>
+{
+  private:
+    const int type_;
+
+  public:
+    LIR_HEADER(MemoryBarrier)
+
+    // The parameter 'type' is a bitwise 'or' of the barrier types needed,
+    // see AtomicOp.h.
+    explicit LMemoryBarrier(int type) : type_(type)
+    {
+        MOZ_ASSERT((type_ & ~MembarAllbits) == 0);
+    }
+
+    int type() const {
+        return type_;
+    }
+
+    const MMemoryBarrier *mir() const {
+        return mir_->toMemoryBarrier();
+    }
+};
+
 } // namespace jit
 } // namespace js
 
 #endif /* jit_LIR_Common_h */
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -653,17 +653,18 @@ class LNode
     virtual void setDef(size_t index, const LDefinition &def) = 0;
 
     // Returns information about operands.
     virtual size_t numOperands() const = 0;
     virtual LAllocation *getOperand(size_t index) = 0;
     virtual void setOperand(size_t index, const LAllocation &a) = 0;
 
     // Returns information about temporary registers needed. Each temporary
-    // register is an LUse with a TEMPORARY policy, or a fixed register.
+    // register is an LDefinition with a fixed or virtual register and
+    // either GENERAL, FLOAT32, or DOUBLE type.
     virtual size_t numTemps() const = 0;
     virtual LDefinition *getTemp(size_t index) = 0;
     virtual void setTemp(size_t index, const LDefinition &a) = 0;
 
     // Returns the number of successors of this instruction, if it is a control
     // transfer instruction, or zero otherwise.
     virtual size_t numSuccessors() const = 0;
     virtual MBasicBlock *getSuccessor(size_t i) const = 0;
--- a/js/src/jit/LOpcodes.h
+++ b/js/src/jit/LOpcodes.h
@@ -229,16 +229,18 @@
     _(StoreElementHoleV)            \
     _(StoreElementHoleT)            \
     _(LoadTypedArrayElement)        \
     _(LoadTypedArrayElementHole)    \
     _(LoadTypedArrayElementStatic)  \
     _(StoreTypedArrayElement)       \
     _(StoreTypedArrayElementHole)   \
     _(StoreTypedArrayElementStatic) \
+    _(CompareExchangeTypedArrayElement) \
+    _(AtomicTypedArrayElementBinop) \
     _(EffectiveAddress)             \
     _(ClampIToUint8)                \
     _(ClampDToUint8)                \
     _(ClampVToUint8)                \
     _(LoadFixedSlotV)               \
     _(LoadFixedSlotT)               \
     _(StoreFixedSlotV)              \
     _(StoreFixedSlotT)              \
@@ -322,16 +324,17 @@
     _(AsmJSLoadFFIFunc)             \
     _(AsmJSParameter)               \
     _(AsmJSReturn)                  \
     _(AsmJSVoidReturn)              \
     _(AsmJSPassStackArg)            \
     _(AsmJSCall)                    \
     _(InterruptCheckPar)            \
     _(RecompileCheck)               \
+    _(MemoryBarrier)                \
     _(AssertRangeI)                 \
     _(AssertRangeD)                 \
     _(AssertRangeF)                 \
     _(AssertRangeV)                 \
     _(LexicalCheck)                 \
     _(ThrowUninitializedLexical)
 
 #if defined(JS_CODEGEN_X86)
--- a/js/src/jit/LinearScan.cpp
+++ b/js/src/jit/LinearScan.cpp
@@ -206,17 +206,17 @@ LinearScanAllocator::allocateRegisters()
 
 /*
  * This function iterates over control flow edges in the function and resolves
  * conflicts wherein two predecessors of a block have different allocations
  * for a virtual register than the block itself. It also turns phis into moves.
  *
  * The algorithm is based on the one published in "Linear Scan Register
  * Allocation on SSA Form" by C. Wimmer et al., for which the full citation
- * appears above.
+ * appears in LiveRangeAllocator.cpp.
  */
 bool
 LinearScanAllocator::resolveControlFlow()
 {
     for (size_t i = 0; i < graph.numBlocks(); i++) {
         if (mir->shouldCancel("LSRA Resolve Control Flow (main loop)"))
             return false;
 
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -2854,20 +2854,32 @@ LIRGenerator::visitLoadTypedArrayElement
 
     MOZ_ASSERT(IsNumberType(ins->type()));
 
     // We need a temp register for Uint32Array with known double result.
     LDefinition tempDef = LDefinition::BogusTemp();
     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
         tempDef = temp();
 
+    if (ins->requiresMemoryBarrier()) {
+        LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarBeforeLoad);
+        if (!add(fence, ins))
+            return false;
+    }
     LLoadTypedArrayElement *lir = new(alloc()) LLoadTypedArrayElement(elements, index, tempDef);
     if (ins->fallible() && !assignSnapshot(lir, Bailout_Overflow))
         return false;
-    return define(lir, ins);
+    if (!define(lir, ins))
+        return false;
+    if (ins->requiresMemoryBarrier()) {
+        LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarAfterLoad);
+        if (!add(fence, ins))
+            return false;
+    }
+    return true;
 }
 
 bool
 LIRGenerator::visitClampToUint8(MClampToUint8 *ins)
 {
     MDefinition *in = ins->input();
 
     switch (in->type()) {
@@ -2941,17 +2953,34 @@ LIRGenerator::visitStoreTypedArrayElemen
     LAllocation index = useRegisterOrConstant(ins->index());
     LAllocation value;
 
     // For byte arrays, the value has to be in a byte register on x86.
     if (ins->isByteArray())
         value = useByteOpRegisterOrNonDoubleConstant(ins->value());
     else
         value = useRegisterOrNonDoubleConstant(ins->value());
-    return add(new(alloc()) LStoreTypedArrayElement(elements, index, value), ins);
+
+    // Optimization opportunity for atomics: on some platforms there
+    // is a store instruction that incorporates the necessary
+    // barriers, and we could use that instead of separate barrier and
+    // store instructions.  See bug #1077027.
+    if (ins->requiresMemoryBarrier()) {
+        LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarBeforeStore);
+        if (!add(fence, ins))
+            return false;
+    }
+    if (!add(new(alloc()) LStoreTypedArrayElement(elements, index, value), ins))
+        return false;
+    if (ins->requiresMemoryBarrier()) {
+        LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarAfterStore);
+        if (!add(fence, ins))
+            return false;
+    }
+    return true;
 }
 
 bool
 LIRGenerator::visitStoreTypedArrayElementHole(MStoreTypedArrayElementHole *ins)
 {
     MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
     MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
     MOZ_ASSERT(ins->length()->type() == MIRType_Int32);
@@ -3709,16 +3738,23 @@ LIRGenerator::visitRecompileCheck(MRecom
 {
     LRecompileCheck *lir = new(alloc()) LRecompileCheck(temp());
     if (!add(lir, ins))
         return false;
     return assignSafepoint(lir, ins);
 }
 
 bool
+LIRGenerator::visitMemoryBarrier(MMemoryBarrier *ins)
+{
+    LMemoryBarrier *lir = new(alloc()) LMemoryBarrier(ins->type());
+    return add(lir, ins);
+}
+
+bool
 LIRGenerator::visitSimdConstant(MSimdConstant *ins)
 {
     MOZ_ASSERT(IsSimdType(ins->type()));
 
     if (ins->type() == MIRType_Int32x4)
         return define(new(alloc()) LInt32x4(), ins);
     if (ins->type() == MIRType_Float32x4)
         return define(new(alloc()) LFloat32x4(), ins);
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -264,16 +264,17 @@ class LIRGenerator : public LIRGenerator
     bool visitAsmJSReturn(MAsmJSReturn *ins);
     bool visitAsmJSVoidReturn(MAsmJSVoidReturn *ins);
     bool visitAsmJSPassStackArg(MAsmJSPassStackArg *ins);
     bool visitAsmJSCall(MAsmJSCall *ins);
     bool visitSetDOMProperty(MSetDOMProperty *ins);
     bool visitGetDOMProperty(MGetDOMProperty *ins);
     bool visitGetDOMMember(MGetDOMMember *ins);
     bool visitRecompileCheck(MRecompileCheck *ins);
+    bool visitMemoryBarrier(MMemoryBarrier *ins);
     bool visitSimdExtractElement(MSimdExtractElement *ins);
     bool visitSimdInsertElement(MSimdInsertElement *ins);
     bool visitSimdSignMask(MSimdSignMask *ins);
     bool visitSimdSwizzle(MSimdSwizzle *ins);
     bool visitSimdShuffle(MSimdShuffle *ins);
     bool visitSimdUnaryArith(MSimdUnaryArith *ins);
     bool visitSimdBinaryComp(MSimdBinaryComp *ins);
     bool visitSimdBinaryArith(MSimdBinaryArith *ins);
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jsmath.h"
 
+#include "builtin/AtomicsObject.h"
 #include "builtin/TestingFunctions.h"
 #include "builtin/TypedObject.h"
 #include "jit/BaselineInspector.h"
 #include "jit/IonBuilder.h"
 #include "jit/Lowering.h"
 #include "jit/MIR.h"
 #include "jit/MIRGraph.h"
 #include "vm/ArgumentsObject.h"
@@ -29,16 +30,34 @@ IonBuilder::InliningStatus
 IonBuilder::inlineNativeCall(CallInfo &callInfo, JSFunction *target)
 {
     MOZ_ASSERT(target->isNative());
     JSNative native = target->native();
 
     if (!optimizationInfo().inlineNative())
         return InliningStatus_NotInlined;
 
+    // Atomic natives.
+    if (native == atomics_compareExchange)
+        return inlineAtomicsCompareExchange(callInfo);
+    if (native == atomics_load)
+        return inlineAtomicsLoad(callInfo);
+    if (native == atomics_store)
+        return inlineAtomicsStore(callInfo);
+    if (native == atomics_fence)
+        return inlineAtomicsFence(callInfo);
+    if (native == atomics_add ||
+        native == atomics_sub ||
+        native == atomics_and ||
+        native == atomics_or ||
+        native == atomics_xor)
+    {
+        return inlineAtomicsBinop(callInfo, target);
+    }
+
     // Array natives.
     if (native == js_Array)
         return inlineArray(callInfo);
     if (native == js::array_pop)
         return inlineArrayPopShift(callInfo, MArrayPopShift::Pop);
     if (native == js::array_shift)
         return inlineArrayPopShift(callInfo, MArrayPopShift::Shift);
     if (native == js::array_push)
@@ -2231,16 +2250,235 @@ IonBuilder::inlineBoundFunction(CallInfo
 
     if (!makeCall(scriptedTarget, callInfo, false))
         return InliningStatus_Error;
 
     return InliningStatus_Inlined;
 }
 
 IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsCompareExchange(CallInfo &callInfo)
+{
+    if (callInfo.argc() != 4 || callInfo.constructing())
+        return InliningStatus_NotInlined;
+
+    Scalar::Type arrayType;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+        return InliningStatus_NotInlined;
+
+    MDefinition *oldval = callInfo.getArg(2);
+    if (!(oldval->type() == MIRType_Int32 || oldval->type() == MIRType_Double))
+        return InliningStatus_NotInlined;
+
+    MDefinition *newval = callInfo.getArg(3);
+    if (!(newval->type() == MIRType_Int32 || newval->type() == MIRType_Double))
+        return InliningStatus_NotInlined;
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    MInstruction *elements;
+    MDefinition *index;
+    atomicsCheckBounds(callInfo, &elements, &index);
+
+    MDefinition *oldvalToWrite = oldval;
+    if (oldval->type() == MIRType_Double) {
+        oldvalToWrite = MTruncateToInt32::New(alloc(), oldval);
+        current->add(oldvalToWrite->toInstruction());
+    }
+
+    MDefinition *newvalToWrite = newval;
+    if (newval->type() == MIRType_Double) {
+        newvalToWrite = MTruncateToInt32::New(alloc(), newval);
+        current->add(newvalToWrite->toInstruction());
+    }
+
+    MCompareExchangeTypedArrayElement *cas =
+        MCompareExchangeTypedArrayElement::New(alloc(), elements, index, arrayType,
+                                               oldvalToWrite, newvalToWrite);
+    cas->setResultType(getInlineReturnType());
+    current->add(cas);
+    current->push(cas);
+
+    return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsLoad(CallInfo &callInfo)
+{
+    if (callInfo.argc() != 2 || callInfo.constructing())
+        return InliningStatus_NotInlined;
+
+    Scalar::Type arrayType;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+        return InliningStatus_NotInlined;
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    MInstruction *elements;
+    MDefinition *index;
+    atomicsCheckBounds(callInfo, &elements, &index);
+
+    MLoadTypedArrayElement *load =
+        MLoadTypedArrayElement::New(alloc(), elements, index, arrayType,
+                                    DoesRequireMemoryBarrier);
+    load->setResultType(getInlineReturnType());
+    current->add(load);
+    current->push(load);
+
+    return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsStore(CallInfo &callInfo)
+{
+    if (callInfo.argc() != 3 || callInfo.constructing())
+        return InliningStatus_NotInlined;
+
+    Scalar::Type arrayType;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+        return InliningStatus_NotInlined;
+
+    MDefinition *value = callInfo.getArg(2);
+    if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
+        return InliningStatus_NotInlined;
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    MInstruction *elements;
+    MDefinition *index;
+    atomicsCheckBounds(callInfo, &elements, &index);
+
+    MDefinition *toWrite = value;
+    if (value->type() == MIRType_Double) {
+        toWrite = MTruncateToInt32::New(alloc(), value);
+        current->add(toWrite->toInstruction());
+    }
+    MStoreTypedArrayElement *store =
+        MStoreTypedArrayElement::New(alloc(), elements, index, toWrite, arrayType,
+                                     DoesRequireMemoryBarrier);
+    current->add(store);
+    current->push(value);
+
+    return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsFence(CallInfo &callInfo)
+{
+    if (callInfo.argc() != 0 || callInfo.constructing())
+        return InliningStatus_NotInlined;
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    MMemoryBarrier *fence = MMemoryBarrier::New(alloc());
+    current->add(fence);
+    pushConstant(UndefinedValue());
+
+    return InliningStatus_Inlined;
+}
+
+IonBuilder::InliningStatus
+IonBuilder::inlineAtomicsBinop(CallInfo &callInfo, JSFunction *target)
+{
+    if (callInfo.argc() != 3 || callInfo.constructing())
+        return InliningStatus_NotInlined;
+
+    Scalar::Type arrayType;
+    if (!atomicsMeetsPreconditions(callInfo, &arrayType))
+        return InliningStatus_NotInlined;
+
+    MDefinition *value = callInfo.getArg(2);
+    if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
+        return InliningStatus_NotInlined;
+
+    callInfo.setImplicitlyUsedUnchecked();
+
+    MInstruction *elements;
+    MDefinition *index;
+    atomicsCheckBounds(callInfo, &elements, &index);
+
+    JSNative native = target->native();
+    AtomicOp k = AtomicFetchAddOp;
+    if (native == atomics_add)
+        k = AtomicFetchAddOp;
+    else if (native == atomics_sub)
+        k = AtomicFetchSubOp;
+    else if (native == atomics_and)
+        k = AtomicFetchAndOp;
+    else if (native == atomics_or)
+        k = AtomicFetchOrOp;
+    else if (native == atomics_xor)
+        k = AtomicFetchXorOp;
+    else
+        MOZ_CRASH("Bad atomic operation");
+
+    MDefinition *toWrite = value;
+    if (value->type() == MIRType_Double) {
+        toWrite = MTruncateToInt32::New(alloc(), value);
+        current->add(toWrite->toInstruction());
+    }
+    MAtomicTypedArrayElementBinop *binop =
+        MAtomicTypedArrayElementBinop::New(alloc(), k, elements, index, arrayType, toWrite);
+    binop->setResultType(getInlineReturnType());
+    current->add(binop);
+    current->push(binop);
+
+    return InliningStatus_Inlined;
+}
+
+bool
+IonBuilder::atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayType)
+{
+    if (callInfo.getArg(0)->type() != MIRType_Object)
+        return false;
+
+    if (callInfo.getArg(1)->type() != MIRType_Int32)
+        return false;
+
+    // Ensure that the first argument is a valid SharedTypedArray.
+    //
+    // Then check both that the element type is something we can
+    // optimize and that the return type is suitable for that element
+    // type.
+
+    types::TemporaryTypeSet *arg0Types = callInfo.getArg(0)->resultTypeSet();
+    if (!arg0Types)
+        return false;
+
+    *arrayType = arg0Types->getSharedTypedArrayType();
+    switch (*arrayType) {
+      case Scalar::Int8:
+      case Scalar::Uint8:
+      case Scalar::Int16:
+      case Scalar::Uint16:
+      case Scalar::Int32:
+        return getInlineReturnType() == MIRType_Int32;
+      case Scalar::Uint32:
+        // Bug 1077305: it would be attractive to allow inlining even
+        // if the inline return type is Int32, which it will frequently
+        // be.
+        return getInlineReturnType() == MIRType_Double;
+      default:
+        // Excludes floating types and Uint8Clamped
+        return false;
+    }
+}
+
+void
+IonBuilder::atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index)
+{
+    // Perform bounds checking and extract the elements vector.
+    MDefinition *obj = callInfo.getArg(0);
+    MInstruction *length = nullptr;
+    *index = callInfo.getArg(1);
+    *elements = nullptr;
+    addTypedArrayLengthAndData(obj, DoBoundsCheck, index, &length, elements);
+}
+
+IonBuilder::InliningStatus
 IonBuilder::inlineIsConstructing(CallInfo &callInfo)
 {
     MOZ_ASSERT(!callInfo.constructing());
     MOZ_ASSERT(callInfo.argc() == 0);
     MOZ_ASSERT(script()->functionNonDelazifying(),
                "isConstructing() should only be called in function scripts");
 
     if (getInlineReturnType() != MIRType_Boolean)
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -1260,24 +1260,24 @@ MPhi::foldsTernary()
 
     MBasicBlock *pred = block()->immediateDominator();
     if (!pred || !pred->lastIns()->isTest())
         return nullptr;
 
     MTest *test = pred->lastIns()->toTest();
 
     // True branch may only dominate one edge of MPhi.
-    if (test->ifTrue()->dominates(block()->getPredecessor(0)) &&
+    if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
         test->ifTrue()->dominates(block()->getPredecessor(1)))
     {
         return nullptr;
     }
 
     // False branch may only dominate one edge of MPhi.
-    if (test->ifFalse()->dominates(block()->getPredecessor(0)) &&
+    if (test->ifFalse()->dominates(block()->getPredecessor(0)) ==
         test->ifFalse()->dominates(block()->getPredecessor(1)))
     {
         return nullptr;
     }
 
     // True and false branch must dominate different edges of MPhi.
     if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
         test->ifFalse()->dominates(block()->getPredecessor(0)))
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -10,16 +10,17 @@
  */
 
 #ifndef jit_MIR_h
 #define jit_MIR_h
 
 #include "mozilla/Array.h"
 #include "mozilla/DebugOnly.h"
 
+#include "jit/AtomicOp.h"
 #include "jit/FixedList.h"
 #include "jit/InlineList.h"
 #include "jit/IonAllocPolicy.h"
 #include "jit/IonMacroAssembler.h"
 #include "jit/MOpcodes.h"
 #include "jit/TypedObjectPrediction.h"
 #include "jit/TypePolicy.h"
 #include "vm/ArrayObject.h"
@@ -8032,59 +8033,85 @@ class MArrayJoin
         return true;
     }
     virtual AliasSet getAliasSet() const {
         return AliasSet::Load(AliasSet::Element | AliasSet::ObjectFields);
     }
     MDefinition *foldsTo(TempAllocator &alloc);
 };
 
+// See comments above MMemoryBarrier, below.
+
+enum MemoryBarrierRequirement
+{
+    DoesNotRequireMemoryBarrier,
+    DoesRequireMemoryBarrier
+};
+
+// Also see comments above MMemoryBarrier, below.
+
 class MLoadTypedArrayElement
   : public MBinaryInstruction
 {
     Scalar::Type arrayType_;
+    bool requiresBarrier_;
 
     MLoadTypedArrayElement(MDefinition *elements, MDefinition *index,
-                           Scalar::Type arrayType)
-      : MBinaryInstruction(elements, index), arrayType_(arrayType)
+                           Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier)
+      : MBinaryInstruction(elements, index),
+        arrayType_(arrayType),
+        requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier)
     {
         setResultType(MIRType_Value);
-        setMovable();
+        if (requiresBarrier_)
+            setGuard();         // Not removable or movable
+        else
+            setMovable();
         MOZ_ASSERT(elements->type() == MIRType_Elements);
         MOZ_ASSERT(index->type() == MIRType_Int32);
         MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::TypeMax);
     }
 
   public:
     INSTRUCTION_HEADER(LoadTypedArrayElement)
 
     static MLoadTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements, MDefinition *index,
-                                       Scalar::Type arrayType)
-    {
-        return new(alloc) MLoadTypedArrayElement(elements, index, arrayType);
+                                       Scalar::Type arrayType,
+                                       MemoryBarrierRequirement requiresBarrier=DoesNotRequireMemoryBarrier)
+    {
+        return new(alloc) MLoadTypedArrayElement(elements, index, arrayType, requiresBarrier);
     }
 
     Scalar::Type arrayType() const {
         return arrayType_;
     }
     bool fallible() const {
         // Bailout if the result does not fit in an int32.
         return arrayType_ == Scalar::Uint32 && type() == MIRType_Int32;
     }
+    bool requiresMemoryBarrier() const {
+        return requiresBarrier_;
+    }
     MDefinition *elements() const {
         return getOperand(0);
     }
     MDefinition *index() const {
         return getOperand(1);
     }
     AliasSet getAliasSet() const {
+        // When a barrier is needed make the instruction effectful by
+        // giving it a "store" effect.
+        if (requiresBarrier_)
+            return AliasSet::Store(AliasSet::TypedArrayElement);
         return AliasSet::Load(AliasSet::TypedArrayElement);
     }
 
     bool congruentTo(const MDefinition *ins) const {
+        if (requiresBarrier_)
+            return false;
         if (!ins->isLoadTypedArrayElement())
             return false;
         const MLoadTypedArrayElement *other = ins->toLoadTypedArrayElement();
         if (arrayType_ != other->arrayType_)
             return false;
         return congruentIfOperandsEqual(other);
     }
 
@@ -8209,37 +8236,46 @@ class MLoadTypedArrayElementStatic
     bool canProduceFloat32() const { return viewType() == Scalar::Float32; }
 };
 
 class MStoreTypedArrayElement
   : public MTernaryInstruction,
     public StoreTypedArrayPolicy::Data
 {
     Scalar::Type arrayType_;
+    bool requiresBarrier_;
 
     // See note in MStoreElementCommon.
     bool racy_;
 
     MStoreTypedArrayElement(MDefinition *elements, MDefinition *index, MDefinition *value,
-                            Scalar::Type arrayType)
-      : MTernaryInstruction(elements, index, value), arrayType_(arrayType), racy_(false)
-    {
-        setMovable();
+                            Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier)
+      : MTernaryInstruction(elements, index, value),
+        arrayType_(arrayType),
+        requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
+        racy_(false)
+    {
+        if (requiresBarrier_)
+            setGuard();         // Not removable or movable
+        else
+            setMovable();
         MOZ_ASSERT(elements->type() == MIRType_Elements);
         MOZ_ASSERT(index->type() == MIRType_Int32);
         MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::TypeMax);
     }
 
   public:
     INSTRUCTION_HEADER(StoreTypedArrayElement)
 
     static MStoreTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements, MDefinition *index,
-                                        MDefinition *value, Scalar::Type arrayType)
-    {
-        return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType);
+                                        MDefinition *value, Scalar::Type arrayType,
+                                        MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
+    {
+        return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType,
+                                                  requiresBarrier);
     }
 
     Scalar::Type arrayType() const {
         return arrayType_;
     }
     bool isByteArray() const {
         return arrayType_ == Scalar::Int8 ||
                arrayType_ == Scalar::Uint8 ||
@@ -8256,16 +8292,19 @@ class MStoreTypedArrayElement
         return getOperand(1);
     }
     MDefinition *value() const {
         return getOperand(2);
     }
     AliasSet getAliasSet() const {
         return AliasSet::Store(AliasSet::TypedArrayElement);
     }
+    bool requiresMemoryBarrier() const {
+        return requiresBarrier_;
+    }
     bool racy() const {
         return racy_;
     }
     void setRacy() {
         racy_ = true;
     }
     TruncateKind operandTruncateKind(size_t index) const;
 
@@ -11447,16 +11486,169 @@ class MRecompileCheck : public MNullaryI
         return increaseWarmUpCounter_;
     }
 
     AliasSet getAliasSet() const {
         return AliasSet::None();
     }
 };
 
+// All barriered operations - MMemoryBarrier, MCompareExchangeTypedArrayElement,
+// and MAtomicTypedArrayElementBinop, as well as MLoadTypedArrayElement and
+// MStoreTypedArrayElement when they are marked as requiring a memory barrer - have
+// the following attributes:
+//
+// - Not movable
+// - Not removable
+// - Not congruent with any other instruction
+// - Effectful (they alias every TypedArray store)
+//
+// The intended effect of those constraints is to prevent all loads
+// and stores preceding the barriered operation from being moved to
+// after the barriered operation, and vice versa, and to prevent the
+// barriered operation from being removed or hoisted.
+
+class MMemoryBarrier
+  : public MNullaryInstruction
+{
+    // The type is a combination of the memory barrier types in AtomicOp.h.
+    const int type_;
+
+    explicit MMemoryBarrier(int type)
+      : type_(type)
+    {
+        MOZ_ASSERT((type_ & ~MembarAllbits) == 0);
+        setGuard();             // Not removable
+    }
+
+  public:
+    INSTRUCTION_HEADER(MemoryBarrier);
+
+    static MMemoryBarrier *New(TempAllocator &alloc, int type=MembarFull) {
+        return new(alloc) MMemoryBarrier(type);
+    }
+    int type() const {
+        return type_;
+    }
+
+    AliasSet getAliasSet() const {
+        return AliasSet::Store(AliasSet::TypedArrayElement);
+    }
+};
+
+class MCompareExchangeTypedArrayElement
+  : public MAryInstruction<4>,
+    public MixPolicy< MixPolicy<ObjectPolicy<0>, IntPolicy<1> >, MixPolicy<IntPolicy<2>, IntPolicy<3> > >
+{
+    Scalar::Type arrayType_;
+
+    explicit MCompareExchangeTypedArrayElement(MDefinition *elements, MDefinition *index,
+                                               Scalar::Type arrayType, MDefinition *oldval,
+                                               MDefinition *newval)
+      : arrayType_(arrayType)
+    {
+        initOperand(0, elements);
+        initOperand(1, index);
+        initOperand(2, oldval);
+        initOperand(3, newval);
+        setGuard();             // Not removable
+    }
+
+  public:
+    INSTRUCTION_HEADER(CompareExchangeTypedArrayElement);
+
+    static MCompareExchangeTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements,
+                                                  MDefinition *index, Scalar::Type arrayType,
+                                                  MDefinition *oldval, MDefinition *newval)
+    {
+        return new(alloc) MCompareExchangeTypedArrayElement(elements, index, arrayType, oldval, newval);
+    }
+    bool isByteArray() const {
+        return (arrayType_ == Scalar::Int8 ||
+                arrayType_ == Scalar::Uint8 ||
+                arrayType_ == Scalar::Uint8Clamped);
+    }
+    MDefinition *elements() {
+        return getOperand(0);
+    }
+    MDefinition *index() {
+        return getOperand(1);
+    }
+    MDefinition *oldval() {
+        return getOperand(2);
+    }
+    int oldvalOperand() {
+        return 2;
+    }
+    MDefinition *newval() {
+        return getOperand(3);
+    }
+    Scalar::Type arrayType() const {
+        return arrayType_;
+    }
+    AliasSet getAliasSet() const {
+        return AliasSet::Store(AliasSet::TypedArrayElement);
+    }
+};
+
+class MAtomicTypedArrayElementBinop
+    : public MAryInstruction<3>,
+      public Mix3Policy< ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2> >
+{
+  private:
+    AtomicOp op_;
+    Scalar::Type arrayType_;
+
+  protected:
+    explicit MAtomicTypedArrayElementBinop(AtomicOp op, MDefinition *elements, MDefinition *index,
+                                           Scalar::Type arrayType, MDefinition *value)
+      : op_(op),
+        arrayType_(arrayType)
+    {
+        initOperand(0, elements);
+        initOperand(1, index);
+        initOperand(2, value);
+        setGuard();             // Not removable
+    }
+
+  public:
+    INSTRUCTION_HEADER(AtomicTypedArrayElementBinop);
+
+    static MAtomicTypedArrayElementBinop *New(TempAllocator &alloc, AtomicOp op,
+                                              MDefinition *elements, MDefinition *index,
+                                              Scalar::Type arrayType, MDefinition *value)
+    {
+        return new(alloc) MAtomicTypedArrayElementBinop(op, elements, index, arrayType, value);
+    }
+
+    bool isByteArray() const {
+        return (arrayType_ == Scalar::Int8 ||
+                arrayType_ == Scalar::Uint8 ||
+                arrayType_ == Scalar::Uint8Clamped);
+    }
+    AtomicOp operation() const {
+        return op_;
+    }
+    Scalar::Type arrayType() const {
+        return arrayType_;
+    }
+    MDefinition *elements() {
+        return getOperand(0);
+    }
+    MDefinition *index() {
+        return getOperand(1);
+    }
+    MDefinition *value() {
+        return getOperand(2);
+    }
+    AliasSet getAliasSet() const {
+        return AliasSet::Store(AliasSet::TypedArrayElement);
+    }
+};
+
 class MAsmJSNeg : public MUnaryInstruction
 {
     MAsmJSNeg(MDefinition *op, MIRType type)
       : MUnaryInstruction(op)
     {
         setResultType(type);
         setMovable();
     }
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -179,16 +179,18 @@ namespace jit {
     _(ArrayConcat)                                                          \
     _(ArrayJoin)                                                            \
     _(LoadTypedArrayElement)                                                \
     _(LoadTypedArrayElementHole)                                            \
     _(LoadTypedArrayElementStatic)                                          \
     _(StoreTypedArrayElement)                                               \
     _(StoreTypedArrayElementHole)                                           \
     _(StoreTypedArrayElementStatic)                                         \
+    _(CompareExchangeTypedArrayElement)                                     \
+    _(AtomicTypedArrayElementBinop)                                         \
     _(EffectiveAddress)                                                     \
     _(ClampToUint8)                                                         \
     _(LoadFixedSlot)                                                        \
     _(StoreFixedSlot)                                                       \
     _(CallGetProperty)                                                      \
     _(GetNameCache)                                                         \
     _(CallGetIntrinsicValue)                                                \
     _(CallsiteCloneCache)                                                   \
@@ -246,16 +248,17 @@ namespace jit {
     _(NewDerivedTypedObject)                                                \
     _(LambdaPar)                                                            \
     _(RestPar)                                                              \
     _(ForkJoinContext)                                                      \
     _(ForkJoinGetSlice)                                                     \
     _(GuardThreadExclusive)                                                 \
     _(InterruptCheckPar)                                                    \
     _(RecompileCheck)                                                       \
+    _(MemoryBarrier)                                                        \
     _(UnknownValue)                                                         \
     _(LexicalCheck)                                                         \
     _(ThrowUninitializedLexical)
 
 // Forward declarations of MIR types.
 #define FORWARD_DECLARE(op) class M##op;
  MIR_OPCODE_LIST(FORWARD_DECLARE)
 #undef FORWARD_DECLARE
--- a/js/src/jit/ParallelSafetyAnalysis.cpp
+++ b/js/src/jit/ParallelSafetyAnalysis.cpp
@@ -343,21 +343,24 @@ class ParallelSafetyVisitor : public MDe
     UNSAFE_OP(AsmJSLoadFuncPtr)
     UNSAFE_OP(AsmJSLoadFFIFunc)
     UNSAFE_OP(AsmJSReturn)
     UNSAFE_OP(AsmJSVoidReturn)
     UNSAFE_OP(AsmJSPassStackArg)
     UNSAFE_OP(AsmJSParameter)
     UNSAFE_OP(AsmJSCall)
     DROP_OP(RecompileCheck)
+    UNSAFE_OP(CompareExchangeTypedArrayElement)
+    UNSAFE_OP(AtomicTypedArrayElementBinop)
+    UNSAFE_OP(MemoryBarrier)
     UNSAFE_OP(UnknownValue)
     UNSAFE_OP(LexicalCheck)
     UNSAFE_OP(ThrowUninitializedLexical)
 
-    // It looks like this could easily be made safe:
+    // It looks like these could easily be made safe:
     UNSAFE_OP(ConvertElementsToDoubles)
     UNSAFE_OP(MaybeCopyElementsForWrite)
 };
 
 static void
 TransplantResumePoint(MInstruction *oldInstruction, MInstruction *replacementInstruction)
 {
     MOZ_ASSERT(!oldInstruction->isDiscarded());
--- a/js/src/jit/TypePolicy.cpp
+++ b/js/src/jit/TypePolicy.cpp
@@ -419,16 +419,17 @@ IntPolicy<Op>::staticAdjustInputs(TempAl
     def->replaceOperand(Op, replace);
 
     return replace->typePolicy()->adjustInputs(alloc, replace);
 }
 
 template bool IntPolicy<0>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
 template bool IntPolicy<1>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
 template bool IntPolicy<2>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
+template bool IntPolicy<3>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
 
 template <unsigned Op>
 bool
 ConvertToInt32Policy<Op>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def)
 {
     MDefinition *in = def->getOperand(Op);
     if (in->type() == MIRType_Int32)
         return true;
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2242,8 +2242,29 @@ CodeGeneratorARM::visitForkJoinGetSlice(
     MOZ_CRASH("NYI");
 }
 
 JitCode *
 JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
 {
     MOZ_CRASH("NYI");
 }
+
+void
+CodeGeneratorARM::memoryBarrier(int barrier)
+{
+    // On ARMv6 the optional argument (BarrierST, etc) is ignored.
+    if (barrier == (MembarStoreStore|MembarSynchronizing))
+        masm.ma_dsb(masm.BarrierST);
+    else if (barrier & MembarSynchronizing)
+        masm.ma_dsb();
+    else if (barrier == MembarStoreStore)
+        masm.ma_dmb(masm.BarrierST);
+    else if (barrier)
+        masm.ma_dmb();
+}
+
+bool
+CodeGeneratorARM::visitMemoryBarrier(LMemoryBarrier *ins)
+{
+    memoryBarrier(ins->type());
+    return true;
+}
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -170,16 +170,18 @@ class CodeGeneratorARM : public CodeGene
     // Functions for LTestVAndBranch.
     Register splitTagForTest(const ValueOperand &value);
 
     bool divICommon(MDiv *mir, Register lhs, Register rhs, Register output, LSnapshot *snapshot,
                     Label &done);
     bool modICommon(MMod *mir, Register lhs, Register rhs, Register output, LSnapshot *snapshot,
                     Label &done);
 
+    void memoryBarrier(int barrier);
+
   public:
     CodeGeneratorARM(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm);
 
   public:
     bool visitBox(LBox *box);
     bool visitBoxFloatingPoint(LBoxFloatingPoint *box);
     bool visitUnbox(LUnbox *unbox);
     bool visitValue(LValue *value);
@@ -201,16 +203,18 @@ class CodeGeneratorARM : public CodeGene
     bool visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins);
     bool visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins);
     bool visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins);
     bool visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins);
     bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
 
     bool visitForkJoinGetSlice(LForkJoinGetSlice *ins);
 
+    bool visitMemoryBarrier(LMemoryBarrier *ins);
+
     bool generateInvalidateEpilogue();
 
   protected:
     bool visitEffectiveAddress(LEffectiveAddress *ins);
     bool visitUDiv(LUDiv *ins);
     bool visitUMod(LUMod *ins);
     bool visitSoftUDivOrMod(LSoftUDivOrMod *ins);
 
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -565,9 +565,74 @@ LIRGeneratorARM::visitSimdSplatX4(MSimdS
 }
 
 bool
 LIRGeneratorARM::visitSimdValueX4(MSimdValueX4 *ins)
 {
     MOZ_CRASH("NYI");
 }
 
-//__aeabi_uidiv
+bool
+LIRGeneratorARM::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
+{
+    MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+    MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
+    MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
+
+    const LUse elements = useRegister(ins->elements());
+    const LAllocation index = useRegisterOrConstant(ins->index());
+
+    // For most operations we don't need any temps because there are
+    // enough scratch registers.  tempDef2 is never needed on ARM.
+    //
+    // For a Uint32Array with a known double result we need a temp for
+    // the intermediate output, this is tempDef1.
+    //
+    // Optimization opportunity (bug 1077317): We can do better by
+    // allowing 'value' to remain as an imm32 if it is small enough to
+    // fit in an instruction.
+
+    LDefinition tempDef1 = LDefinition::BogusTemp();
+    LDefinition tempDef2 = LDefinition::BogusTemp();
+
+    const LAllocation value = useRegister(ins->value());
+    if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+        tempDef1 = temp();
+
+    LAtomicTypedArrayElementBinop *lir =
+        new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
+
+    return define(lir, ins);
+}
+
+bool
+LIRGeneratorARM::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
+{
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+    MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
+    MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
+
+    const LUse elements = useRegister(ins->elements());
+    const LAllocation index = useRegisterOrConstant(ins->index());
+
+    // If the target is a floating register then we need a temp at the
+    // CodeGenerator level for creating the result.
+    //
+    // Optimization opportunity (bug 1077317): We could do better by
+    // allowing oldval to remain an immediate, if it is small enough
+    // to fit in an instruction.
+
+    const LAllocation newval = useRegister(ins->newval());
+    const LAllocation oldval = useRegister(ins->oldval());
+    LDefinition tempDef = LDefinition::BogusTemp();
+    if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+        tempDef = temp();
+
+    LCompareExchangeTypedArrayElement *lir =
+        new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
+
+    return define(lir, ins);
+}
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -101,16 +101,18 @@ class LIRGeneratorARM : public LIRGenera
     bool visitAsmJSLoadHeap(MAsmJSLoadHeap *ins);
     bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins);
     bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins);
     bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins);
     bool visitForkJoinGetSlice(MForkJoinGetSlice *ins);
     bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
     bool visitSimdSplatX4(MSimdSplatX4 *ins);
     bool visitSimdValueX4(MSimdValueX4 *ins);
+    bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
+    bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
 };
 
 typedef LIRGeneratorARM LIRGeneratorSpecific;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_arm_Lowering_arm_h */
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -1800,16 +1800,25 @@ MacroAssemblerARMCompat::buildOOLFakeExi
 
     Push(Imm32(descriptor)); // descriptor_
     Push(ImmPtr(fakeReturnAddr));
 
     return true;
 }
 
 void
+MacroAssemblerARMCompat::callWithExitFrame(Label *target)
+{
+    uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
+    Push(Imm32(descriptor)); // descriptor
+
+    ma_callIonHalfPush(target);
+}
+
+void
 MacroAssemblerARMCompat::callWithExitFrame(JitCode *target)
 {
     uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
     Push(Imm32(descriptor)); // descriptor
 
     addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
     RelocStyle rs;
     if (HasMOVWT())
@@ -3715,16 +3724,27 @@ MacroAssemblerARM::ma_callIonHalfPush(co
     // the stack before the call, when we return the pc is poped and the stack
     // is restored to its unaligned state.
     AutoForbidPools afp(this, 2);
     ma_push(pc);
     as_blx(r);
 }
 
 void
+MacroAssemblerARM::ma_callIonHalfPush(Label *label)
+{
+    // The stack is unaligned by 4 bytes. We push the pc to the stack to align
+    // the stack before the call, when we return the pc is poped and the stack
+    // is restored to its unaligned state.
+    AutoForbidPools afp(this, 2);
+    ma_push(pc);
+    as_bl(label, Always);
+}
+
+void
 MacroAssemblerARM::ma_call(ImmPtr dest)
 {
     RelocStyle rs;
     if (HasMOVWT())
         rs = L_MOVWT;
     else
         rs = L_LDR;
 
@@ -4685,9 +4705,285 @@ MacroAssemblerARMCompat::branchValueIsNu
     Label done;
 
     branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
     branchPtrInNurseryRange(cond, value.payloadReg(), temp, label);
 
     bind(&done);
 }
 
+namespace js {
+namespace jit {
+
+template<>
+Register
+MacroAssemblerARMCompat::computePointer<BaseIndex>(const BaseIndex &src, Register r)
+{
+    Register base = src.base;
+    Register index = src.index;
+    uint32_t scale = Imm32::ShiftOf(src.scale).value;
+    int32_t offset = src.offset;
+    as_add(r, base, lsl(index, scale));
+    if (offset != 0)
+        ma_add(r, Imm32(offset), r);
+    return r;
+}
+
+template<>
+Register
+MacroAssemblerARMCompat::computePointer<Address>(const Address &src, Register r)
+{
+    if (src.offset == 0)
+        return src.base;
+    ma_add(src.base, Imm32(src.offset), r);
+    return r;
+}
+
+} // namespace jit
+} // namespace js
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend, const T &mem,
+                                         Register oldval, Register newval, Register output)
+{
+    // If LDREXB/H and STREXB/H are not available we use the
+    // word-width operations with read-modify-add.  That does not
+    // abstract well, so fork.
+    //
+    // Bug 1077321: We may further optimize for ARMv8 here.
+    if (nbytes < 4 && !HasLDSTREXBHD())
+        compareExchangeARMv6(nbytes, signExtend, mem, oldval, newval, output);
+    else
+        compareExchangeARMv7(nbytes, signExtend, mem, oldval, newval, output);
+}
+
+// General algorithm:
+//
+//     ...    ptr, <addr>         ; compute address of item
+//     dmb
+// L0  ldrex* output, [ptr]
+//     sxt*   output, output, 0   ; sign-extend if applicable
+//     *xt*   tmp, oldval, 0      ; sign-extend or zero-extend if applicable
+//     cmp    output, tmp
+//     bne    L1                  ; failed - values are different
+//     strex* tmp, newval, [ptr]
+//     cmp    tmp, 1
+//     beq    L0                  ; failed - location is dirty, retry
+// L1  dmb
+//
+// Discussion here:  http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html.
+// However note that that discussion uses 'isb' as the trailing fence.
+// I've not quite figured out why, and I've gone with dmb here which
+// is safe.  Also see the LLVM source, which uses 'dmb ish' generally.
+// (Apple's Swift CPU apparently handles ish in a non-default, faster
+// way.)
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchangeARMv7(int nbytes, bool signExtend, const T &mem,
+                                              Register oldval, Register newval, Register output)
+{
+    Label Lagain;
+    Label Ldone;
+    ma_dmb(BarrierST);
+    Register ptr = computePointer(mem, secondScratchReg_);
+    bind(&Lagain);
+    switch (nbytes) {
+      case 1:
+        as_ldrexb(output, ptr);
+        if (signExtend) {
+            as_sxtb(output, output, 0);
+            as_sxtb(ScratchRegister, oldval, 0);
+        } else {
+            as_uxtb(ScratchRegister, oldval, 0);
+        }
+        break;
+      case 2:
+        as_ldrexh(output, ptr);
+        if (signExtend) {
+            as_sxth(output, output, 0);
+            as_sxth(ScratchRegister, oldval, 0);
+        } else {
+            as_uxth(ScratchRegister, oldval, 0);
+        }
+        break;
+      case 4:
+        MOZ_ASSERT(!signExtend);
+        as_ldrex(output, ptr);
+        break;
+    }
+    if (nbytes < 4)
+        as_cmp(output, O2Reg(ScratchRegister));
+    else
+        as_cmp(output, O2Reg(oldval));
+    as_b(&Ldone, NotEqual);
+    switch (nbytes) {
+      case 1:
+        as_strexb(ScratchRegister, newval, ptr);
+        break;
+      case 2:
+        as_strexh(ScratchRegister, newval, ptr);
+        break;
+      case 4:
+        as_strex(ScratchRegister, newval, ptr);
+        break;
+    }
+    as_cmp(ScratchRegister, Imm8(1));
+    as_b(&Lagain, Equal);
+    bind(&Ldone);
+    ma_dmb();
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::compareExchangeARMv6(int nbytes, bool signExtend, const T &mem,
+                                              Register oldval, Register newval, Register output)
+{
+    // Bug 1077318: Must use read-modify-write with LDREX / STREX.
+    MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+    MOZ_CRASH("NYI");
+}
+
+template void
+js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
+                                                  const Address &address, Register oldval,
+                                                  Register newval, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::compareExchange(int nbytes, bool signExtend,
+                                                  const BaseIndex &address, Register oldval,
+                                                  Register newval, Register output);
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32 &value,
+                                       const T &mem, Register temp, Register output)
+{
+    // The Imm32 value case is not needed yet because lowering always
+    // forces the value into a register at present (bug 1077317).  But
+    // the method must be present for the platform-independent code to
+    // link.
+    MOZ_CRASH("Feature NYI");
+}
+
+// General algorithm:
+//
+//     ...    ptr, <addr>         ; compute address of item
+//     dmb
+// L0  ldrex* output, [ptr]
+//     sxt*   output, output, 0   ; sign-extend if applicable
+//     OP     tmp, output, value  ; compute value to store
+//     strex* tmp, tmp, [ptr]
+//     cmp    tmp, 1
+//     beq    L0                  ; failed - location is dirty, retry
+//     dmb                        ; ordering barrier required
+//
+// Also see notes above at compareExchange re the barrier strategy.
+//
+// Observe that the value being operated into the memory element need
+// not be sign-extended because no OP will make use of bits to the
+// left of the bits indicated by the width of the element, and neither
+// output nor the bits stored are affected by OP.
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+                                       const Register &value, const T &mem, Register temp,
+                                       Register output)
+{
+    // Fork for non-word operations on ARMv6.
+    //
+    // Bug 1077321: We may further optimize for ARMv8 here.
+    if (nbytes < 4 && !HasLDSTREXBHD())
+        atomicFetchOpARMv6(nbytes, signExtend, op, value, mem, temp, output);
+    else {
+        MOZ_ASSERT(temp == InvalidReg);
+        atomicFetchOpARMv7(nbytes, signExtend, op, value, mem, output);
+    }
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op,
+                                            const Register &value, const T &mem, Register output)
+{
+    Label Lagain;
+    Register ptr = computePointer(mem, secondScratchReg_);
+    ma_dmb();
+    bind(&Lagain);
+    switch (nbytes) {
+      case 1:
+        as_ldrexb(output, ptr);
+        if (signExtend)
+            as_sxtb(output, output, 0);
+        break;
+      case 2:
+        as_ldrexh(output, ptr);
+        if (signExtend)
+            as_sxth(output, output, 0);
+        break;
+      case 4:
+        MOZ_ASSERT(!signExtend);
+        as_ldrex(output, ptr);
+        break;
+    }
+    switch (op) {
+      case AtomicFetchAddOp:
+        as_add(ScratchRegister, output, O2Reg(value));
+        break;
+      case AtomicFetchSubOp:
+        as_sub(ScratchRegister, output, O2Reg(value));
+        break;
+      case AtomicFetchAndOp:
+        as_and(ScratchRegister, output, O2Reg(value));
+        break;
+      case AtomicFetchOrOp:
+        as_orr(ScratchRegister, output, O2Reg(value));
+        break;
+      case AtomicFetchXorOp:
+        as_eor(ScratchRegister, output, O2Reg(value));
+        break;
+    }
+    switch (nbytes) {
+      case 1:
+        as_strexb(ScratchRegister, ScratchRegister, ptr);
+        break;
+      case 2:
+        as_strexh(ScratchRegister, ScratchRegister, ptr);
+        break;
+      case 4:
+        as_strex(ScratchRegister, ScratchRegister, ptr);
+        break;
+    }
+    as_cmp(ScratchRegister, Imm8(1));
+    as_b(&Lagain, Equal);
+    ma_dmb();
+}
+
+template<typename T>
+void
+MacroAssemblerARMCompat::atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op,
+                                            const Register &value, const T &mem, Register temp,
+                                            Register output)
+{
+    // Bug 1077318: Must use read-modify-write with LDREX / STREX.
+    MOZ_ASSERT(nbytes == 1 || nbytes == 2);
+    MOZ_CRASH("NYI");
+}
+
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+                                                const Imm32 &value, const Address &mem,
+                                                Register temp, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+                                                const Imm32 &value, const BaseIndex &mem,
+                                                Register temp, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+                                                const Register &value, const Address &mem,
+                                                Register temp, Register output);
+template void
+js::jit::MacroAssemblerARMCompat::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op,
+                                                const Register &value, const BaseIndex &mem,
+                                                Register temp, Register output);
+
 #endif
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -7,16 +7,17 @@
 #ifndef jit_arm_MacroAssembler_arm_h
 #define jit_arm_MacroAssembler_arm_h
 
 #include "mozilla/DebugOnly.h"
 
 #include "jsopcode.h"
 
 #include "jit/arm/Assembler-arm.h"
+#include "jit/AtomicOp.h"
 #include "jit/IonCaches.h"
 #include "jit/IonFrames.h"
 #include "jit/MoveResolver.h"
 
 using mozilla::DebugOnly;
 
 namespace js {
 namespace jit {
@@ -401,16 +402,19 @@ class MacroAssemblerARM : public Assembl
     // Calls an Ion function, assumes that the stack is untouched (8 byte
     // aligned).
     void ma_callIon(const Register reg);
     // Calls an Ion function, assuming that sp has already been decremented.
     void ma_callIonNoPush(const Register reg);
     // Calls an ion function, assuming that the stack is currently not 8 byte
     // aligned.
     void ma_callIonHalfPush(const Register reg);
+    // Calls an ion function, assuming that the stack is currently not 8 byte
+    // aligned.
+    void ma_callIonHalfPush(Label *label);
 
     void ma_call(ImmPtr dest);
 
     // Float registers can only be loaded/stored in continuous runs when using
     // vstm/vldm. This function breaks set into continuous runs and loads/stores
     // them at [rm]. rm will be modified and left in a state logically suitable
     // for the next load/store. Returns the offset from [dm] for the logical
     // next load/store.
@@ -1270,16 +1274,17 @@ class MacroAssemblerARMCompat : public M
     void setFramePushed(uint32_t framePushed) {
         framePushed_ = framePushed;
     }
 
     // Builds an exit frame on the stack, with a return address to an internal
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
 
+    void callWithExitFrame(Label *target);
     void callWithExitFrame(JitCode *target);
     void callWithExitFrame(JitCode *target, Register dynStack);
 
     // Makes an Ion call using the only two methods that it is sane for
     // independent code to make a call.
     void callIon(Register callee);
     void callIonFromAsmJS(Register callee);
 
@@ -1415,16 +1420,182 @@ class MacroAssemblerARMCompat : public M
     }
     void storeFloat32(FloatRegister src, BaseIndex addr) {
         // Harder cases not handled yet.
         MOZ_ASSERT(addr.offset == 0);
         uint32_t scale = Imm32::ShiftOf(addr.scale).value;
         ma_vstr(VFPRegister(src).singleOverlay(), addr.base, addr.index, scale);
     }
 
+  private:
+    template<typename T>
+    Register computePointer(const T &src, Register r);
+
+    template<typename T>
+    void compareExchangeARMv6(int nbytes, bool signExtend, const T &mem, Register oldval,
+                              Register newval, Register output);
+
+    template<typename T>
+    void compareExchangeARMv7(int nbytes, bool signExtend, const T &mem, Register oldval,
+                              Register newval, Register output);
+
+    template<typename T>
+    void compareExchange(int nbytes, bool signExtend, const T &address, Register oldval,
+                         Register newval, Register output);
+
+    template<typename T>
+    void atomicFetchOpARMv6(int nbytes, bool signExtend, AtomicOp op, const Register &value,
+                            const T &mem, Register temp, Register output);
+
+    template<typename T>
+    void atomicFetchOpARMv7(int nbytes, bool signExtend, AtomicOp op, const Register &value,
+                            const T &mem, Register output);
+
+    template<typename T>
+    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32 &value,
+                       const T &address, Register temp, Register output);
+
+    template<typename T>
+    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register &value,
+                       const T &address, Register temp, Register output);
+
+  public:
+    // T in {Address,BaseIndex}
+    // S in {Imm32,Register}
+
+    template<typename T>
+    void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output)
+    {
+        compareExchange(1, true, mem, oldval, newval, output);
+    }
+    template<typename T>
+    void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output)
+    {
+        compareExchange(1, false, mem, oldval, newval, output);
+    }
+    template<typename T>
+    void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output)
+    {
+        compareExchange(2, true, mem, oldval, newval, output);
+    }
+    template<typename T>
+    void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output)
+    {
+        compareExchange(2, false, mem, oldval, newval, output);
+    }
+    template<typename T>
+    void compareExchange32(const T &mem, Register oldval, Register newval, Register output)  {
+        compareExchange(4, false, mem, oldval, newval, output);
+    }
+
+    template<typename T, typename S>
+    void atomicFetchAdd8SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd16SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAdd32(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, temp, output);
+    }
+
+    template<typename T, typename S>
+    void atomicFetchSub8SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchSub8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchSub16SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchSub16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchSub32(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, temp, output);
+    }
+
+    template<typename T, typename S>
+    void atomicFetchAnd8SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd16SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchAnd32(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, temp, output);
+    }
+
+    template<typename T, typename S>
+    void atomicFetchOr8SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchOr8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchOr16SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchOr16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchOr32(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, temp, output);
+    }
+
+    template<typename T, typename S>
+    void atomicFetchXor8SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchXor8ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchXor16SignExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchXor16ZeroExtend(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, temp, output);
+    }
+    template<typename T, typename S>
+    void atomicFetchXor32(const S &value, const T &mem, Register temp, Register output) {
+        atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, temp, output);
+    }
+
     void clampIntToUint8(Register reg) {
         // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
         // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
         as_mov(ScratchRegister, asr(reg, 8), SetCond);
         ma_mov(Imm32(0xff), reg, NoSetCond, NotEqual);
         ma_mov(Imm32(0), reg, NoSetCond, Signed);
     }
 
--- a/js/src/jit/mips/MacroAssembler-mips.cpp
+++ b/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -919,42 +919,53 @@ MacroAssemblerMIPS::ma_b(Address addr, I
 
 void
 MacroAssemblerMIPS::ma_b(Label *label, JumpKind jumpKind)
 {
     branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
 }
 
 void
-MacroAssemblerMIPS::ma_bal(Label *label, JumpKind jumpKind)
-{
-    branchWithCode(getBranchCode(BranchIsCall), label, jumpKind);
+MacroAssemblerMIPS::ma_bal(Label *label, DelaySlotFill delaySlotFill)
+{
+    if (label->bound()) {
+        // Generate the long jump for calls because return address has to be
+        // the address after the reserved block.
+        addLongJump(nextOffset());
+        ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+        as_jalr(ScratchRegister);
+        if (delaySlotFill == FillDelaySlot)
+            as_nop();
+        return;
+    }
+
+    // Second word holds a pointer to the next branch in label's chain.
+    uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+    // Make the whole branch continous in the buffer.
+    m_buffer.ensureSpace(4 * sizeof(uint32_t));
+
+    BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+    writeInst(nextInChain);
+    label->use(bo.getOffset());
+    // Leave space for long jump.
+    as_nop();
+    if (delaySlotFill == FillDelaySlot)
+        as_nop();
 }
 
 void
 MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind)
 {
-    InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+    MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
     InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
 
     if (label->bound()) {
         int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
 
-        // Generate the long jump for calls because return address has to be
-        // the address after the reserved block.
-        if (code.encode() == inst_bgezal.encode()) {
-            MOZ_ASSERT(jumpKind != ShortJump);
-            // Handle long call
-            addLongJump(nextOffset());
-            ma_liPatchable(ScratchRegister, Imm32(label->offset()));
-            as_jalr(ScratchRegister);
-            as_nop();
-            return;
-        }
-
         if (BOffImm16::IsInRange(offset))
             jumpKind = ShortJump;
 
         if (jumpKind == ShortJump) {
             MOZ_ASSERT(BOffImm16::IsInRange(offset));
             code.setBOffImm16(BOffImm16(offset));
             writeInst(code.encode());
             as_nop();
@@ -992,18 +1003,17 @@ MacroAssemblerMIPS::branchWithCode(InstI
         // Indicate that this is short jump with offset 4.
         code.setBOffImm16(BOffImm16(4));
         BufferOffset bo = writeInst(code.encode());
         writeInst(nextInChain);
         label->use(bo.getOffset());
         return;
     }
 
-    bool conditional = (code.encode() != inst_bgezal.encode() &&
-                        code.encode() != inst_beq.encode());
+    bool conditional = code.encode() != inst_beq.encode();
 
     // Make the whole branch continous in the buffer.
     m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
 
     BufferOffset bo = writeInst(code.encode());
     writeInst(nextInChain);
     label->use(bo.getOffset());
     // Leave space for potential long jump.
@@ -1485,16 +1495,25 @@ MacroAssemblerMIPSCompat::buildOOLFakeEx
 
     Push(Imm32(descriptor)); // descriptor_
     Push(ImmPtr(fakeReturnAddr));
 
     return true;
 }
 
 void
+MacroAssemblerMIPSCompat::callWithExitFrame(Label *target)
+{
+    uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
+    Push(Imm32(descriptor)); // descriptor
+
+    ma_callIonHalfPush(target);
+}
+
+void
 MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target)
 {
     uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
     Push(Imm32(descriptor)); // descriptor
 
     addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
     ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
     ma_callIonHalfPush(ScratchRegister);
@@ -3083,16 +3102,27 @@ void
 MacroAssemblerMIPS::ma_callIonHalfPush(const Register r)
 {
     // This is a MIPS hack to push return address during jalr delay slot.
     as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
     as_jalr(r);
     as_sw(ra, StackPointer, 0);
 }
 
+// This macrosintruction calls the ion code and pushes the return address to
+// the stack in the case when stack is not alligned.
+void
+MacroAssemblerMIPS::ma_callIonHalfPush(Label *label)
+{
+    // This is a MIPS hack to push return address during jalr delay slot.
+    as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+    // TODO
+    // TODO
+}
+
 void
 MacroAssemblerMIPS::ma_call(ImmPtr dest)
 {
     ma_liPatchable(CallReg, dest);
     as_jalr(CallReg);
     as_nop();
 }
 
--- a/js/src/jit/mips/MacroAssembler-mips.h
+++ b/js/src/jit/mips/MacroAssembler-mips.h
@@ -32,16 +32,22 @@ enum LoadStoreExtension
 };
 
 enum JumpKind
 {
     LongJump = 0,
     ShortJump = 1
 };
 
+enum DelaySlotFill
+{
+    DontFillDelaySlot = 0,
+    FillDelaySlot = 1
+};
+
 struct ImmTag : public Imm32
 {
     ImmTag(JSValueTag mask)
       : Imm32(int32_t(mask))
     { }
 };
 
 struct ImmType : public ImmTag
@@ -228,17 +234,17 @@ class MacroAssemblerMIPS : public Assemb
     void ma_b(Address addr, Imm32 imm, Label *l, Condition c, JumpKind jumpKind = LongJump);
     void ma_b(Address addr, Register rhs, Label *l, Condition c, JumpKind jumpKind = LongJump) {
         MOZ_ASSERT(rhs != ScratchRegister);
         ma_lw(ScratchRegister, addr);
         ma_b(ScratchRegister, rhs, l, c, jumpKind);
     }
 
     void ma_b(Label *l, JumpKind jumpKind = LongJump);
-    void ma_bal(Label *l, JumpKind jumpKind = LongJump);
+    void ma_bal(Label *l, DelaySlotFill delaySlotFill = FillDelaySlot);
 
     // fp instructions
     void ma_lis(FloatRegister dest, float value);
     void ma_lid(FloatRegister dest, double value);
     void ma_liNegZero(FloatRegister dest);
 
     void ma_mv(FloatRegister src, ValueOperand dest);
     void ma_mv(ValueOperand src, FloatRegister dest);
@@ -972,16 +978,17 @@ public:
     void setFramePushed(uint32_t framePushed) {
         framePushed_ = framePushed;
     }
 
     // Builds an exit frame on the stack, with a return address to an internal
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
 
+    void callWithExitFrame(Label *target);
     void callWithExitFrame(JitCode *target);
     void callWithExitFrame(JitCode *target, Register dynStack);
 
     // Makes an Ion call using the only two methods that it is sane for
     // indep code to make a call
     void callIon(Register callee);
     void callIonFromAsmJS(Register callee);
 
--- a/js/src/jit/none/Lowering-none.h
+++ b/js/src/jit/none/Lowering-none.h
@@ -73,16 +73,18 @@ class LIRGeneratorNone : public LIRGener
     bool visitGuardObjectType(MGuardObjectType *ins) { MOZ_CRASH(); }
     bool visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble *ins) { MOZ_CRASH(); }
     bool visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32 *ins) { MOZ_CRASH(); }
     bool visitAsmJSLoadHeap(MAsmJSLoadHeap *ins) { MOZ_CRASH(); }
     bool visitAsmJSStoreHeap(MAsmJSStoreHeap *ins) { MOZ_CRASH(); }
     bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins) { MOZ_CRASH(); }
     bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins) { MOZ_CRASH(); }
     bool visitForkJoinGetSlice(MForkJoinGetSlice *ins) { MOZ_CRASH(); }
+    bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins) { MOZ_CRASH(); }
+    bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins) { MOZ_CRASH(); }
 
     LTableSwitch *newLTableSwitch(LAllocation, LDefinition, MTableSwitch *) { MOZ_CRASH(); }
     LTableSwitchV *newLTableSwitchV(MTableSwitch *) { MOZ_CRASH(); }
     bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins) { MOZ_CRASH(); }
     bool visitSimdSplatX4(MSimdSplatX4 *ins) { MOZ_CRASH(); }
     bool visitSimdValueX4(MSimdValueX4 *lir) { MOZ_CRASH(); }
 };
 
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -180,16 +180,17 @@ class MacroAssemblerNone : public Assemb
     template <typename T> void call(T) { MOZ_CRASH(); }
     template <typename T, typename S> void call(T, S) { MOZ_CRASH(); }
     template <typename T> void callWithABI(T, MoveOp::Type v = MoveOp::GENERAL) { MOZ_CRASH(); }
 
     void setupAlignedABICall(uint32_t) { MOZ_CRASH(); }
     void setupUnalignedABICall(uint32_t, Register) { MOZ_CRASH(); }
     template <typename T> void passABIArg(T, MoveOp::Type v = MoveOp::GENERAL) { MOZ_CRASH(); }
 
+    void callWithExitFrame(Label *) { MOZ_CRASH(); }
     void callWithExitFrame(JitCode *) { MOZ_CRASH(); }
     void callWithExitFrame(JitCode *, Register) { MOZ_CRASH(); }
 
     void callIon(Register callee) { MOZ_CRASH(); }
     void callIonFromAsmJS(Register callee) { MOZ_CRASH(); }
 
     void nop() { MOZ_CRASH(); }
     void breakpoint() { MOZ_CRASH(); }
@@ -291,16 +292,47 @@ class MacroAssemblerNone : public Assemb
     template <typename T, typename S> void storeUnalignedInt32x4(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void storeAlignedFloat32x4(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void storeUnalignedFloat32x4(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void store8(T, S) { MOZ_CRASH(); }
     template <typename T, typename S> void store16(T, S) { MOZ_CRASH(); }
 
     template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
 
+    template <typename T> void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+    template <typename T> void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+    template <typename T> void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+    template <typename T> void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+    template <typename T> void compareExchange32(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAdd8SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAdd8ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAdd16SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAdd16ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAdd32(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchSub8SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchSub8ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchSub16SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchSub16ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchSub32(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAnd8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAnd8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAnd16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAnd16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchAnd32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchOr8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchOr8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchOr16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchOr16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchOr32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchXor8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchXor8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchXor16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchXor16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+    template <typename T, typename S> void atomicFetchXor32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
+
     void clampIntToUint8(Register) { MOZ_CRASH(); }
 
     Register splitTagForTest(ValueOperand) { MOZ_CRASH(); }
 
     template <typename T> void branchTestUndefined(Condition, T, Label *) { MOZ_CRASH(); }
     template <typename T> void branchTestInt32(Condition, T, Label *) { MOZ_CRASH(); }
     template <typename T> void branchTestBoolean(Condition, T, Label *) { MOZ_CRASH(); }
     template <typename T> void branchTestDouble(Condition, T, Label *) { MOZ_CRASH(); }
--- a/js/src/jit/shared/Assembler-x86-shared.h
+++ b/js/src/jit/shared/Assembler-x86-shared.h
@@ -624,28 +624,46 @@ class AssemblerX86Shared : public Assemb
             break;
           case Operand::MEM_SCALE:
             masm.movzbl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
+    void movsbl(Register src, Register dest) {
+        masm.movsbl_rr(src.code(), dest.code());
+    }
     void movsbl(const Operand &src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movsbl_mr(src.disp(), src.base(), dest.code());
             break;
           case Operand::MEM_SCALE:
             masm.movsbl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
+    void movb(const Operand &src, Register dest) {
+        switch (src.kind()) {
+          case Operand::MEM_REG_DISP:
+            masm.movb_mr(src.disp(), src.base(), dest.code());
+            break;
+          case Operand::MEM_SCALE:
+            masm.movb_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
+            break;
+          default:
+            MOZ_CRASH("unexpected operand kind");
+        }
+    }
+    void movb(Imm32 src, Register dest) {
+        masm.movb_i8r(src.value & 255, dest.code());
+    }
     void movb(Register src, const Operand &dest) {
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movb_rm(src.code(), dest.disp(), dest.base());
             break;
           case Operand::MEM_SCALE:
             masm.movb_rm(src.code(), dest.disp(), dest.base(), dest.index(), dest.scale());
             break;
@@ -678,16 +696,24 @@ class AssemblerX86Shared : public Assemb
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
     void movzwl(Register src, Register dest) {
         masm.movzwl_rr(src.code(), dest.code());
     }
+    void movw(const Operand &src, Register dest) {
+        masm.prefix_16_for_32();
+        movl(src, dest);
+    }
+    void movw(Imm32 src, Register dest) {
+        masm.prefix_16_for_32();
+        movl(src, dest);
+    }
     void movw(Register src, const Operand &dest) {
         switch (dest.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movw_rm(src.code(), dest.disp(), dest.base());
             break;
           case Operand::MEM_SCALE:
             masm.movw_rm(src.code(), dest.disp(), dest.base(), dest.index(), dest.scale());
             break;
@@ -702,16 +728,19 @@ class AssemblerX86Shared : public Assemb
             break;
           case Operand::MEM_SCALE:
             masm.movw_i16m(src.value, dest.disp(), dest.base(), dest.index(), dest.scale());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
+    void movswl(Register src, Register dest) {
+        masm.movswl_rr(src.code(), dest.code());
+    }
     void movswl(const Operand &src, Register dest) {
         switch (src.kind()) {
           case Operand::MEM_REG_DISP:
             masm.movswl_mr(src.disp(), src.base(), dest.code());
             break;
           case Operand::MEM_SCALE:
             masm.movswl_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
             break;
@@ -916,19 +945,17 @@ class AssemblerX86Shared : public Assemb
             MOZ_CRASH("unexpected operand kind");
         }
     }
 
     void breakpoint() {
         masm.int3();
     }
 
-#ifdef DEBUG
     static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
-#endif
     static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
     static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
     static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
     static bool SupportsSimd() { return CPUInfo::IsSSE2Present(); }
 
     // The below cmpl methods switch the lhs and rhs when it invokes the
     // macroassembler to conform with intel standard.  When calling this
     // function put the left operand on the left as you would expect.
@@ -1055,16 +1082,22 @@ class AssemblerX86Shared : public Assemb
             break;
           case Operand::MEM_ADDRESS32:
             masm.addl_im(imm.value, op.address());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
+    // Note, lock_addl() is used for a memory barrier on non-SSE2 systems.
+    // Do not optimize, replace by XADDL, or similar.
+    void lock_addl(Imm32 imm, const Operand &op) {
+        masm.prefix_lock();
+        addl(imm, op);
+    }
     void subl(Imm32 imm, Register dest) {
         masm.subl_ir(imm.value, dest.code());
     }
     void subl(Imm32 imm, const Operand &op) {
         switch (op.kind()) {
           case Operand::REG:
             masm.subl_ir(imm.value, op.reg());
             break;
@@ -1306,34 +1339,79 @@ class AssemblerX86Shared : public Assemb
             MOZ_CRASH("unexpected operand kind");
         }
     }
     void lock_decl(const Operand &op) {
         masm.prefix_lock();
         decl(op);
     }
 
-    void lock_cmpxchg32(Register src, const Operand &op) {
+    void lock_cmpxchg8(Register src, const Operand &mem) {
+        masm.prefix_lock();
+        switch (mem.kind()) {
+          case Operand::MEM_REG_DISP:
+            masm.cmpxchg8(src.code(), mem.disp(), mem.base());
+            break;
+          case Operand::MEM_SCALE:
+            masm.cmpxchg8(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
+            break;
+          default:
+            MOZ_CRASH("unexpected operand kind");
+        }
+    }
+    void lock_cmpxchg16(Register src, const Operand &mem) {
         masm.prefix_lock();
-        switch (op.kind()) {
+        switch (mem.kind()) {
           case Operand::MEM_REG_DISP:
-            masm.cmpxchg32(src.code(), op.disp(), op.base());
+            masm.cmpxchg16(src.code(), mem.disp(), mem.base());
+            break;
+          case Operand::MEM_SCALE:
+            masm.cmpxchg16(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
+            break;
+          default:
+            MOZ_CRASH("unexpected operand kind");
+        }
+    }
+    void lock_cmpxchg32(Register src, const Operand &mem) {
+        masm.prefix_lock();
+        switch (mem.kind()) {
+          case Operand::MEM_REG_DISP:
+            masm.cmpxchg32(src.code(), mem.disp(), mem.base());
+            break;
+          case Operand::MEM_SCALE:
+            masm.cmpxchg32(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
 
-    void xaddl(Register srcdest, const Operand &mem) {
+    void lock_xaddb(Register srcdest, const Operand &mem) {
         switch (mem.kind()) {
           case Operand::MEM_REG_DISP:
-            masm.xaddl_rm(srcdest.code(), mem.disp(), mem.base());
+            masm.lock_xaddb_rm(srcdest.code(), mem.disp(), mem.base());
             break;
           case Operand::MEM_SCALE:
-            masm.xaddl_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
+            masm.lock_xaddb_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
+            break;
+          default:
+            MOZ_CRASH("unexpected operand kind");
+        }
+    }
+    void lock_xaddw(Register srcdest, const Operand &mem) {
+        masm.prefix_16_for_32();
+        lock_xaddl(srcdest, mem);
+    }
+    void lock_xaddl(Register srcdest, const Operand &mem) {
+        switch (mem.kind()) {
+          case Operand::MEM_REG_DISP:
+            masm.lock_xaddl_rm(srcdest.code(), mem.disp(), mem.base());
+            break;
+          case Operand::MEM_SCALE:
+            masm.lock_xaddl_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
             break;
           default:
             MOZ_CRASH("unexpected operand kind");
         }
     }
 
     void push(const Imm32 imm) {
         masm.push_i32(imm.value);
--- a/js/src/jit/shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/shared/BaseAssembler-x86-shared.h
@@ -211,26 +211,31 @@ public:
         RoundToNearest = 0x0,
         RoundDown      = 0x1,
         RoundUp        = 0x2,
         RoundToZero    = 0x3
     } RoundingMode;
 
 private:
     typedef enum {
+        OP_ADD_EbGb                     = 0x00,
         OP_ADD_EvGv                     = 0x01,
         OP_ADD_GvEv                     = 0x03,
+        OP_OR_EbGb                      = 0x08,
         OP_OR_EvGv                      = 0x09,
         OP_OR_GvEv                      = 0x0B,
         OP_2BYTE_ESCAPE                 = 0x0F,
+        OP_AND_EbGb                     = 0x20,
         OP_AND_EvGv                     = 0x21,
         OP_AND_GvEv                     = 0x23,
+        OP_SUB_EbGb                     = 0x28,
         OP_SUB_EvGv                     = 0x29,
         OP_SUB_GvEv                     = 0x2B,
         PRE_PREDICT_BRANCH_NOT_TAKEN    = 0x2E,
+        OP_XOR_EbGb                     = 0x30,
         OP_XOR_EvGv                     = 0x31,
         OP_XOR_GvEv                     = 0x33,
         OP_CMP_EvGv                     = 0x39,
         OP_CMP_GvEv                     = 0x3B,
         OP_CMP_EAXIv                    = 0x3D,
 #ifdef JS_CODEGEN_X64
         PRE_REX                         = 0x40,
 #endif
@@ -250,16 +255,17 @@ private:
         OP_GROUP1_EbIb                  = 0x80,
         OP_GROUP1_EvIz                  = 0x81,
         OP_GROUP1_EvIb                  = 0x83,
         OP_TEST_EbGb                    = 0x84,
         OP_TEST_EvGv                    = 0x85,
         OP_XCHG_EvGv                    = 0x87,
         OP_MOV_EbGv                     = 0x88,
         OP_MOV_EvGv                     = 0x89,
+        OP_MOV_GvEb                     = 0x8A,
         OP_MOV_GvEv                     = 0x8B,
         OP_LEA                          = 0x8D,
         OP_GROUP1A_Ev                   = 0x8F,
         OP_NOP                          = 0x90,
         OP_PUSHFLAGS                    = 0x9C,
         OP_POPFLAGS                     = 0x9D,
         OP_CDQ                          = 0x99,
         OP_MOV_EAXOv                    = 0xA1,
@@ -344,23 +350,26 @@ private:
         OP2_PSRLD_UdqIb     = 0x72,
         OP2_PSRLDQ_Vd       = 0x73,
         OP2_PCMPEQW         = 0x75,
         OP2_PCMPEQD_VdqWdq  = 0x76,
         OP2_MOVD_EdVd       = 0x7E,
         OP2_MOVDQ_WdqVdq    = 0x7F,
         OP2_JCC_rel32       = 0x80,
         OP_SETCC            = 0x90,
+        OP_FENCE            = 0xAE,
         OP2_IMUL_GvEv       = 0xAF,
+        OP2_CMPXCHG_GvEb    = 0xB0,
         OP2_CMPXCHG_GvEw    = 0xB1,
         OP2_BSR_GvEv        = 0xBD,
         OP2_MOVSX_GvEb      = 0xBE,
         OP2_MOVSX_GvEw      = 0xBF,
         OP2_MOVZX_GvEb      = 0xB6,
         OP2_MOVZX_GvEw      = 0xB7,
+        OP2_XADD_EbGb       = 0xC0,
         OP2_XADD_EvGv       = 0xC1,
         OP2_CMPPS_VpsWps    = 0xC2,
         OP2_PEXTRW_GdUdIb   = 0xC5,
         OP2_SHUFPS_VpsWpsIb = 0xC6,
         OP2_PSRLD_VdqWdq    = 0xD2,
         OP2_PSRAD_VdqWdq    = 0xE2,
         OP2_PXORDQ_VdqWdq   = 0xEF,
         OP2_PSLLD_VdqWdq    = 0xF2,
@@ -678,25 +687,42 @@ public:
             m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
             m_formatter.immediate8(imm);
         } else {
             m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
             m_formatter.immediate32(imm);
         }
     }
 
-    void xaddl_rm(RegisterID srcdest, int offset, RegisterID base)
+    void lock_xaddb_rm(RegisterID srcdest, int offset, RegisterID base)
+    {
+        spew("lock xaddl %s, %s0x%x(%s)",
+            nameIReg(1, srcdest), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.oneByteOp(PRE_LOCK);
+        m_formatter.twoByteOp(OP2_XADD_EbGb, srcdest, base, offset);
+    }
+
+    void lock_xaddb_rm(RegisterID srcdest, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        spew("lock xaddl %s, %s0x%x(%s,%s,%d)",
+            nameIReg(1, srcdest), PRETTY_PRINT_OFFSET(offset),
+            nameIReg(base), nameIReg(index), 1<<scale);
+        m_formatter.oneByteOp(PRE_LOCK);
+        m_formatter.twoByteOp(OP2_XADD_EbGb, srcdest, base, index, scale, offset);
+    }
+
+    void lock_xaddl_rm(RegisterID srcdest, int offset, RegisterID base)
     {
         spew("lock xaddl %s, %s0x%x(%s)",
             nameIReg(4,srcdest), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp(PRE_LOCK);
         m_formatter.twoByteOp(OP2_XADD_EvGv, srcdest, base, offset);
     }
 
-    void xaddl_rm(RegisterID srcdest, int offset, RegisterID base, RegisterID index, int scale)
+    void lock_xaddl_rm(RegisterID srcdest, int offset, RegisterID base, RegisterID index, int scale)
     {
         spew("lock xaddl %s, %s0x%x(%s,%s,%d)",
             nameIReg(4, srcdest), PRETTY_PRINT_OFFSET(offset),
             nameIReg(base), nameIReg(index), 1<<scale);
         m_formatter.oneByteOp(PRE_LOCK);
         m_formatter.twoByteOp(OP2_XADD_EvGv, srcdest, base, index, scale, offset);
     }
 
@@ -1422,37 +1448,76 @@ public:
     }
 
     void prefix_lock()
     {
         spew("lock");
         m_formatter.oneByteOp(PRE_LOCK);
     }
 
+    void prefix_16_for_32()
+    {
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+    }
+
     void incl_m32(int offset, RegisterID base)
     {
         spew("incl       %s0x%x(%s)", PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_INC, base, offset);
     }
 
     void decl_m32(int offset, RegisterID base)
     {
         spew("decl       %s0x%x(%s)", PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_DEC, base, offset);
     }
 
+    // Note that CMPXCHG performs comparison against REG = %al/%ax/%eax.
+    // If %REG == [%base+offset], then %src -> [%base+offset].
+    // Otherwise, [%base+offset] -> %REG.
+    // For the 8-bit operations src must also be an 8-bit register.
+
+    void cmpxchg8(RegisterID src, int offset, RegisterID base)
+    {
+        spew("cmpxchg8    %s, %s0x%x(%s)",
+             nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.twoByteOp(OP2_CMPXCHG_GvEb, src, base, offset);
+    }
+    void cmpxchg8(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        spew("cmpxchg8    %s, %s0x%x(%s,%s,%d)",
+             nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(index), 1<<scale);
+        m_formatter.twoByteOp(OP2_CMPXCHG_GvEb, src, base, index, scale, offset);
+    }
+    void cmpxchg16(RegisterID src, int offset, RegisterID base)
+    {
+        spew("cmpxchg16    %s, %s0x%x(%s)",
+             nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, offset);
+    }
+    void cmpxchg16(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        spew("cmpxchg16    %s, %s0x%x(%s,%s,%d)",
+             nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(index), 1<<scale);
+        m_formatter.prefix(PRE_OPERAND_SIZE);
+        m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, index, scale, offset);
+    }
     void cmpxchg32(RegisterID src, int offset, RegisterID base)
     {
-        // Note that 32-bit CMPXCHG performs comparison against %eax.
-        // If %eax == [%base+offset], then %src -> [%base+offset].
-        // Otherwise, [%base+offset] -> %eax.
-        spew("cmpxchg    %s, %s0x%x(%s)",
+        spew("cmpxchg32    %s, %s0x%x(%s)",
              nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, offset);
     }
+    void cmpxchg32(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+    {
+        spew("cmpxchg32    %s, %s0x%x(%s,%s,%d)",
+             nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(index), 1<<scale);
+        m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, index, scale, offset);
+    }
 
 
     // Comparisons:
 
     void cmpl_rr(RegisterID src, RegisterID dst)
     {
         spew("cmpl       %s, %s",
              nameIReg(4, src), nameIReg(4, dst));
@@ -1980,16 +2045,24 @@ public:
     void movl_i32r(int imm, RegisterID dst)
     {
         spew("movl       $0x%x, %s",
              imm, nameIReg(4, dst));
         m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
         m_formatter.immediate32(imm);
     }
 
+    void movb_i8r(int imm, RegisterID reg)
+    {
+        spew("movb       $0x%x, %s",
+             imm, nameIReg(1, reg));
+        m_formatter.oneByteOp(OP_MOV_EbGv, reg);
+        m_formatter.immediate8(imm);
+    }
+
     void movb_i8m(int imm, int offset, RegisterID base)
     {
         spew("movb       $0x%x, %s0x%x(%s)",
              imm, PRETTY_PRINT_OFFSET(offset), nameIReg(base));
         m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
         m_formatter.immediate8(imm);
     }
 
@@ -2270,16 +2343,30 @@ public:
 
     void movb_rm(RegisterID src, const void* addr)
     {
         spew("movb       %s, %p",
              nameIReg(1, src), addr);
         m_formatter.oneByteOp8(OP_MOV_EbGv, src, addr);
     }
 
+    void movb_mr(int offset, RegisterID base, RegisterID dst)
+    {
+        spew("movb       %s0x%x(%s), %s",
+             PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(1, dst));
+        m_formatter.oneByteOp(OP_MOV_GvEb, dst, base, offset);
+    }
+
+    void movb_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        spew("movb       %d(%s,%s,%d), %s",
+             offset, nameIReg(base), nameIReg(index), 1<<scale, nameIReg(1, dst));
+        m_formatter.oneByteOp(OP_MOV_GvEb, dst, base, index, scale, offset);
+    }
+
     void movzbl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movzbl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
     }
 
     void movzbl_mr_disp32(int offset, RegisterID base, RegisterID dst)
@@ -2298,16 +2385,23 @@ public:
 
     void movzbl_mr(const void* addr, RegisterID dst)
     {
         spew("movzbl     %p, %s",
              addr, nameIReg(dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, addr);
     }
 
+    void movsbl_rr(RegisterID src, RegisterID dst)
+    {
+        spew("movsbl     %s, %s",
+             nameIReg(1,src), nameIReg(4,dst));
+        m_formatter.twoByteOp8_movx(OP2_MOVSX_GvEb, dst, src);
+    }
+
     void movsbl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movsbl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
     }
 
     void movsbl_mr_disp32(int offset, RegisterID base, RegisterID dst)
@@ -2361,16 +2455,23 @@ public:
 
     void movzwl_mr(const void* addr, RegisterID dst)
     {
         spew("movzwl     %p, %s",
              addr, nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, addr);
     }
 
+    void movswl_rr(RegisterID src, RegisterID dst)
+    {
+        spew("movswl     %s, %s",
+             nameIReg(2, src), nameIReg(4, dst));
+        m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, src);
+    }
+
     void movswl_mr(int offset, RegisterID base, RegisterID dst)
     {
         spew("movswl     %s0x%x(%s), %s",
              PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(4, dst));
         m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
     }
 
     void movswl_mr_disp32(int offset, RegisterID base, RegisterID dst)
@@ -3898,16 +3999,21 @@ public:
 
     void popa()
     {
         spew("popa");
         m_formatter.oneByteOp(OP_POPA);
     }
 #endif
 
+    void mfence() {
+        spew("mfence");
+        m_formatter.twoByteOp(OP_FENCE, (int)6, (RegisterID)0);
+    }
+
     // Assembler admin methods:
 
     JmpDst label()
     {
         JmpDst r = JmpDst(m_formatter.size());
         spew("#label     ((%d))", r.m_offset);
         return r;
     }
--- a/js/src/jit/shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.cpp
@@ -3076,10 +3076,18 @@ JitRuntime::generateForkJoinGetSliceStub
 
 #ifdef JS_ION_PERF
     writePerfSpewerJitCodeProfile(code, "ForkJoinGetSliceStub");
 #endif
 
     return code;
 }
 
+bool
+CodeGeneratorX86Shared::visitMemoryBarrier(LMemoryBarrier *ins)
+{
+    if (ins->type() & MembarStoreLoad)
+        masm.storeLoadFence();
+    return true;
+}
+
 } // namespace jit
 } // namespace js
--- a/js/src/jit/shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/shared/CodeGenerator-x86-shared.h
@@ -192,16 +192,17 @@ class CodeGeneratorX86Shared : public Co
     virtual bool visitRound(LRound *lir);
     virtual bool visitRoundF(LRoundF *lir);
     virtual bool visitGuardShape(LGuardShape *guard);
     virtual bool visitGuardObjectType(LGuardObjectType *guard);
     virtual bool visitGuardClass(LGuardClass *guard);
     virtual bool visitEffectiveAddress(LEffectiveAddress *ins);
     virtual bool visitUDivOrMod(LUDivOrMod *ins);
     virtual bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
+    virtual bool visitMemoryBarrier(LMemoryBarrier *ins);
 
     bool visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool);
 
     bool visitForkJoinGetSlice(LForkJoinGetSlice *ins);
 
     bool visitNegI(LNegI *lir);
     bool visitNegD(LNegD *lir);
     bool visitNegF(LNegF *lir);
--- a/js/src/jit/shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/shared/Lowering-x86-shared.cpp
@@ -345,16 +345,155 @@ LIRGeneratorX86Shared::visitForkJoinGetS
                           tempFixed(eax),
                           tempFixed(edx),
                           tempFixed(ForkJoinGetSliceReg_temp0),
                           tempFixed(ForkJoinGetSliceReg_temp1));
     return defineFixed(lir, ins, LAllocation(AnyRegister(ForkJoinGetSliceReg_output)));
 }
 
 bool
+LIRGeneratorX86Shared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
+{
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+    MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
+    MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
+
+    const LUse elements = useRegister(ins->elements());
+    const LAllocation index = useRegisterOrConstant(ins->index());
+
+    // Register allocation:
+    //
+    // If the target is an integer register then the target must be
+    // eax.
+    //
+    // If the target is a floating register then we need a temp at the
+    // lower level; that temp must be eax.
+    //
+    // oldval must be in a register.
+    //
+    // newval will need to be in a register.  If the source is a byte
+    // array then the newval must be a register that has a byte size:
+    // ebx, ecx, or edx, since eax is taken for the output in this
+    // case.
+    //
+    // Bug #1077036 describes some optimization opportunities.
+
+    bool fixedOutput = false;
+    LDefinition tempDef = LDefinition::BogusTemp();
+    LAllocation newval;
+    if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+        tempDef = tempFixed(eax);
+        newval = useRegister(ins->newval());
+    } else {
+        fixedOutput = true;
+        if (ins->isByteArray())
+            newval = useFixed(ins->newval(), ebx);
+        else
+            newval = useRegister(ins->newval());
+    }
+
+    // A register allocator limitation precludes 'useRegisterAtStart()' here.
+    const LAllocation oldval = useRegister(ins->oldval());
+
+    LCompareExchangeTypedArrayElement *lir =
+        new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
+
+    return fixedOutput ? defineFixed(lir, ins, LAllocation(AnyRegister(eax))) : define(lir, ins);
+}
+
+bool
+LIRGeneratorX86Shared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
+{
+    MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+    MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+    MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
+    MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
+
+    const LUse elements = useRegister(ins->elements());
+    const LAllocation index = useRegisterOrConstant(ins->index());
+
+    // Register allocation:
+    //
+    // For ADD and SUB we'll use XADD:
+    //
+    //    movl       src, output
+    //    lock xaddl output, mem
+    //
+    // For the 8-bit variants XADD needs a byte register for the
+    // output only.
+    //
+    // For AND/OR/XOR we need to use a CMPXCHG loop:
+    //
+    //    movl          *mem, eax
+    // L: mov           eax, temp
+    //    andl          src, temp
+    //    lock cmpxchg  temp, mem  ; reads eax also
+    //    jnz           L
+    //    ; result in eax
+    //
+    // Note the placement of L, cmpxchg will update eax with *mem if
+    // *mem does not have the expected value, so reloading it at the
+    // top of the loop is redundant.
+    //
+    // If the array is not a uint32 array then:
+    //  - eax should be the output (one result of the cmpxchg)
+    //  - there is a temp, which must have a byte register if
+    //    the array has 1-byte elements elements
+    //
+    // If the array is a uint32 array then:
+    //  - eax is the first temp
+    //  - we also need a second temp
+    //
+    // For simplicity we force the 'value' into a byte register if the
+    // array has 1-byte elements, though that could be worked around.
+    //
+    // For simplicity we also choose fixed byte registers even when
+    // any available byte register would have been OK.
+    //
+    // There are optimization opportunities:
+    //  - when the result is unused, Bug #1077014.
+    //  - better register allocation and instruction selection, Bug #1077036.
+
+    bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
+    bool fixedOutput = true;
+    LDefinition tempDef1 = LDefinition::BogusTemp();
+    LDefinition tempDef2 = LDefinition::BogusTemp();
+    LAllocation value;
+
+    if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+        value = useRegister(ins->value());
+        fixedOutput = false;
+        if (bitOp) {
+            tempDef1 = tempFixed(eax);
+            tempDef2 = temp();
+        } else {
+            tempDef1 = temp();
+        }
+    } else if (ins->isByteArray()) {
+        value = useFixed(ins->value(), ebx);
+        if (bitOp)
+            tempDef1 = tempFixed(ecx);
+    }
+    else {
+        value = useRegister(ins->value());
+        if (bitOp)
+            tempDef1 = temp();
+    }
+
+    LAtomicTypedArrayElementBinop *lir =
+        new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
+
+    return fixedOutput ? defineFixed(lir, ins, LAllocation(AnyRegister(eax))) : define(lir, ins);
+}
+
+bool
 LIRGeneratorX86Shared::visitSimdTernaryBitwise(MSimdTernaryBitwise *ins)
 {
     MOZ_ASSERT(IsSimdType(ins->type()));
 
     if (ins->type() == MIRType_Int32x4 || ins->type() == MIRType_Float32x4) {
         LSimdSelect *lins = new(alloc()) LSimdSelect;
 
         // This must be useRegisterAtStart() because it is destroyed.
--- a/js/src/jit/shared/Lowering-x86-shared.h
+++ b/js/src/jit/shared/Lowering-x86-shared.h
@@ -50,14 +50,16 @@ class LIRGeneratorX86Shared : public LIR
     bool lowerConstantDouble(double d, MInstruction *ins);
     bool lowerConstantFloat32(float d, MInstruction *ins);
     bool lowerTruncateDToInt32(MTruncateToInt32 *ins);
     bool lowerTruncateFToInt32(MTruncateToInt32 *ins);
     bool visitForkJoinGetSlice(MForkJoinGetSlice *ins);
     bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
     bool visitSimdSplatX4(MSimdSplatX4 *ins);
     bool visitSimdValueX4(MSimdValueX4 *ins);
+    bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
+    bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_shared_Lowering_x86_shared_h */
--- a/js/src/jit/shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.cpp
@@ -161,16 +161,24 @@ MacroAssemblerX86Shared::buildFakeExitFr
     bind(cl.src());
     *offset = currentOffset();
 
     MOZ_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size());
     return addCodeLabel(cl);
 }
 
 void
+MacroAssemblerX86Shared::callWithExitFrame(Label *target)
+{
+    uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
+    Push(Imm32(descriptor));
+    call(target);
+}
+
+void
 MacroAssemblerX86Shared::callWithExitFrame(JitCode *target)
 {
     uint32_t descriptor = MakeFrameDescriptor(framePushed(), JitFrame_IonJS);
     Push(Imm32(descriptor));
     call(target);
 }
 
 void
--- a/js/src/jit/shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/shared/MacroAssembler-x86-shared.h
@@ -188,20 +188,303 @@ class MacroAssemblerX86Shared : public A
         notl(reg);
     }
     void atomic_inc32(const Operand &addr) {
         lock_incl(addr);
     }
     void atomic_dec32(const Operand &addr) {
         lock_decl(addr);
     }
-    void atomic_cmpxchg32(Register src, const Operand &addr, Register dest) {
+    void atomic_cmpxchg8(Register newval, const Operand &addr, Register oldval_and_result) {
+        // %eax must be explicitly provided for calling clarity.
+        MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
+        lock_cmpxchg8(newval, addr);
+    }
+    void atomic_cmpxchg16(Register newval, const Operand &addr, Register oldval_and_result) {
+        // %eax must be explicitly provided for calling clarity.
+        MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
+        lock_cmpxchg16(newval, addr);
+    }
+    void atomic_cmpxchg32(Register newval, const Operand &addr, Register oldval_and_result) {
         // %eax must be explicitly provided for calling clarity.
-        MOZ_ASSERT(dest.code() == X86Registers::eax);
-        lock_cmpxchg32(src, addr);
+        MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
+        lock_cmpxchg32(newval, addr);
+    }
+
+    template <typename T>
+    void atomicFetchAdd8SignExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        if (src != output)
+            movl(src, output);
+        lock_xaddb(output, Operand(mem));
+        movsbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd8ZeroExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        lock_xaddb(output, Operand(mem));
+        movzbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd8SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        movb(src, output);
+        lock_xaddb(output, Operand(mem));
+        movsbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd8ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        movb(src, output);
+        lock_xaddb(output, Operand(mem));
+        movzbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd16SignExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        lock_xaddw(output, Operand(mem));
+        movswl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd16ZeroExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        lock_xaddw(output, Operand(mem));
+        movzwl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd16SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        movl(src, output);
+        lock_xaddw(output, Operand(mem));
+        movswl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd16ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        movl(src, output);
+        lock_xaddw(output, Operand(mem));
+        movzwl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchAdd32(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        lock_xaddl(output, Operand(mem));
+    }
+
+    template <typename T>
+    void atomicFetchAdd32(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        movl(src, output);
+        lock_xaddl(output, Operand(mem));
+    }
+
+    template <typename T>
+    void atomicFetchSub8SignExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        negl(output);
+        lock_xaddb(output, Operand(mem));
+        movsbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub8ZeroExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        negl(output);
+        lock_xaddb(output, Operand(mem));
+        movzbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub8SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        movb(Imm32(-src.value), output);
+        lock_xaddb(output, Operand(mem));
+        movsbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub8ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(temp == InvalidReg);
+        movb(Imm32(-src.value), output);
+        lock_xaddb(output, Operand(mem));
+        movzbl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub16SignExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        negl(output);
+        lock_xaddw(output, Operand(mem));
+        movswl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub16ZeroExtend(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        negl(output);
+        lock_xaddw(output, Operand(mem));
+        movzwl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub16SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        movl(Imm32(-src.value), output);
+        lock_xaddw(output, Operand(mem));
+        movswl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub16ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        movl(Imm32(-src.value), output);
+        lock_xaddw(output, Operand(mem));
+        movzwl(output, output);
+    }
+
+    template <typename T>
+    void atomicFetchSub32(Register src, const T &mem, Register temp, Register output) {
+        MOZ_ASSERT(temp == InvalidReg);
+        if (src != output)
+            movl(src, output);
+        negl(output);
+        lock_xaddl(output, Operand(mem));
+    }
+
+    template <typename T>
+    void atomicFetchSub32(Imm32 src, const T &mem, Register temp, Register output) {
+        movl(Imm32(-src.value), output);
+        lock_xaddl(output, Operand(mem));
+    }
+
+    // requires output == eax
+#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG)        \
+        MOZ_ASSERT(output == eax); \
+        LOAD(Operand(mem), eax);  \
+        Label again;              \
+        bind(&again);             \
+        movl(eax, temp);          \
+        OP(src, temp);            \
+        LOCK_CMPXCHG(temp, Operand(mem)); \
+        j(NonZero, &again);
+
+    template <typename S, typename T>
+    void atomicFetchAnd8SignExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchg8)
+        movsbl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchAnd8ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchg8)
+        movzbl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchAnd16SignExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchg16)
+        movswl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchAnd16ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchg16)
+        movzwl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchAnd32(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchg32)
+    }
+
+    template <typename S, typename T>
+    void atomicFetchOr8SignExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchg8)
+        movsbl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchOr8ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchg8)
+        movzbl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchOr16SignExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchg16)
+        movswl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchOr16ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchg16)
+        movzwl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchOr32(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchg32)
+    }
+
+    template <typename S, typename T>
+    void atomicFetchXor8SignExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchg8)
+        movsbl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchXor8ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchg8)
+        movzbl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchXor16SignExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchg16)
+        movswl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchXor16ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchg16)
+        movzwl(eax, eax);
+    }
+    template <typename S, typename T>
+    void atomicFetchXor32(const S &src, const T &mem, Register temp, Register output) {
+        ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchg32)
+    }
+
+#undef ATOMIC_BITOP_BODY
+
+    void storeLoadFence() {
+        // This implementation follows Linux.
+        if (HasSSE2())
+            masm.mfence();
+        else
+            lock_addl(Imm32(0), Operand(Address(esp, 0)));
     }
 
     void branch16(Condition cond, Register lhs, Register rhs, Label *label) {
         cmpw(lhs, rhs);
         j(cond, label);
     }
     void branch32(Condition cond, const Operand &lhs, Register rhs, Label *label) {
         cmpl(lhs, rhs);
@@ -357,26 +640,60 @@ class MacroAssemblerX86Shared : public A
     }
     void load8SignExtend(const BaseIndex &src, Register dest) {
         movsbl(Operand(src), dest);
     }
     template <typename S, typename T>
     void store8(const S &src, const T &dest) {
         movb(src, Operand(dest));
     }
+    template <typename T>
+    void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(newval == ebx || newval == ecx || newval == edx);
+        if (oldval != output)
+            movl(oldval, output);
+        lock_cmpxchg8(newval, Operand(mem));
+        movzbl(output, output);
+    }
+    template <typename T>
+    void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output) {
+        MOZ_ASSERT(output == eax);
+        MOZ_ASSERT(newval == ebx || newval == ecx || newval == edx);
+        if (oldval != output)
+            movl(oldval, output);
+        lock_cmpxchg8(newval, Operand(mem));
+        movsbl(output, output);
+    }
     void load16ZeroExtend(const Address &src, Register dest) {
         movzwl(Operand(src), dest);
     }
     void load16ZeroExtend(const BaseIndex &src, Register dest) {
         movzwl(Operand(src), dest);
     }
     template <typename S, typename T>
     void store16(const S &src, const T &dest) {
         movw(src, Operand(dest));
     }
+    template <typename T>
+    void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output) {
+        MOZ_ASSERT(output == eax);
+        if (oldval != output)
+            movl(oldval, output);
+        lock_cmpxchg16(newval, Operand(mem));
+        movzwl(output, output);
+    }
+    template <typename T>
+    void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output) {
+        MOZ_ASSERT(output == eax);
+        if (oldval != output)
+            movl(oldval, output);
+        lock_cmpxchg16(newval, Operand(mem));
+        movswl(output, output);
+    }
     void load16SignExtend(const Address &src, Register dest) {
         movswl(Operand(src), dest);
     }
     void load16SignExtend(const BaseIndex &src, Register dest) {
         movswl(Operand(src), dest);
     }
     void load32(const Address &address, Register dest) {
         movl(Operand(address), dest);
@@ -386,16 +703,23 @@ class MacroAssemblerX86Shared : public A
     }
     void load32(const Operand &src, Register dest) {
         movl(src, dest);
     }
     template <typename S, typename T>
     void store32(const S &src, const T &dest) {
         movl(src, Operand(dest));
     }
+    template <typename T>
+    void compareExchange32(const T &mem, Register oldval, Register newval, Register output) {
+        MOZ_ASSERT(output == eax);
+        if (oldval != output)
+            movl(oldval, output);
+        lock_cmpxchg32(newval, Operand(mem));
+    }
     template <typename S, typename T>
     void store32_NoSecondScratch(const S &src, const T &dest) {
         store32(src, dest);
     }
     void loadDouble(const Address &src, FloatRegister dest) {
         movsd(src, dest);
     }
     void loadDouble(const BaseIndex &src, FloatRegister dest) {
@@ -847,16 +1171,17 @@ class MacroAssemblerX86Shared : public A
     template <typename T>
     void computeEffectiveAddress(const T &address, Register dest) {
         lea(Operand(address), dest);
     }
 
     // Builds an exit frame on the stack, with a return address to an internal
     // non-function. Returns offset to be passed to markSafepointAt().
     bool buildFakeExitFrame(Register scratch, uint32_t *offset);
+    void callWithExitFrame(Label *target);
     void callWithExitFrame(JitCode *target);
 
     void call(const CallSiteDesc &desc, Label *label) {
         call(label);
         append(desc, currentOffset(), framePushed_);
     }
     void call(const CallSiteDesc &desc, Register reg) {
         call(reg);
--- a/js/src/js.msg
+++ b/js/src/js.msg
@@ -445,8 +445,13 @@ MSG_DEF(JSMSG_PAR_ARRAY_SCATTER_CONFLICT
 
 // Reflect
 MSG_DEF(JSMSG_BAD_PARSE_NODE,          0, JSEXN_INTERNALERR, "bad parse node")
 
 // Symbol
 MSG_DEF(JSMSG_BAD_SYMBOL,              1, JSEXN_TYPEERR, "{0} is not a well-known @@-symbol")
 MSG_DEF(JSMSG_SYMBOL_TO_STRING,        0, JSEXN_TYPEERR, "can't convert symbol to string")
 MSG_DEF(JSMSG_SYMBOL_TO_NUMBER,        0, JSEXN_TYPEERR, "can't convert symbol to number")
+
+// Atomics and futexes
+MSG_DEF(JSMSG_ATOMICS_NOT_INSTALLED,     0, JSEXN_ERR, "futex support is not installed")
+MSG_DEF(JSMSG_ATOMICS_BAD_ARRAY,         0, JSEXN_TYPEERR, "invalid array type for the operation")
+MSG_DEF(JSMSG_ATOMICS_TOO_LONG,          0, JSEXN_RANGEERR, "timeout value too large")
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -41,16 +41,17 @@
 #include "jstypes.h"
 #include "jsutil.h"
 #include "jswatchpoint.h"
 #include "jsweakmap.h"
 #include "jswrapper.h"
 #include "prmjtime.h"
 
 #include "asmjs/AsmJSLink.h"
+#include "builtin/AtomicsObject.h"
 #include "builtin/Eval.h"
 #include "builtin/Intl.h"
 #include "builtin/MapObject.h"
 #include "builtin/RegExp.h"
 #include "builtin/SymbolObject.h"
 #ifdef ENABLE_BINARYDATA
 #include "builtin/SIMD.h"
 #include "builtin/TypedObject.h"
@@ -667,16 +668,28 @@ JS_GetRuntimePrivate(JSRuntime *rt)
 }
 
 JS_PUBLIC_API(void)
 JS_SetRuntimePrivate(JSRuntime *rt, void *data)
 {
     rt->data = data;
 }
 
+JS_PUBLIC_API(JS::PerRuntimeFutexAPI *)
+JS::GetRuntimeFutexAPI(JSRuntime *rt)
+{
+    return rt->futexAPI_;
+}
+
+JS_PUBLIC_API(void)
+JS::SetRuntimeFutexAPI(JSRuntime *rt, JS::PerRuntimeFutexAPI *fx)
+{
+    rt->futexAPI_ = fx;
+}
+
 static void
 StartRequest(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime();
     MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
 
     if (rt->requestDepth) {
         rt->requestDepth++;
@@ -1204,16 +1217,17 @@ static const JSStdName builtin_property_
     { EAGER_ATOM(encodeURIComponent), JSProto_String },
 #if JS_HAS_UNEVAL
     { EAGER_ATOM(uneval), JSProto_String },
 #endif
 #ifdef ENABLE_BINARYDATA
     { EAGER_ATOM(SIMD), JSProto_SIMD },
     { EAGER_ATOM(TypedObject), JSProto_TypedObject },
 #endif
+    { EAGER_ATOM(Atomics), JSProto_Atomics },
 
     { 0, JSProto_LIMIT }
 };
 
 #undef EAGER_ATOM
 
 JS_PUBLIC_API(bool)
 JS_ResolveStandardClass(JSContext *cx, HandleObject obj, HandleId id, bool *resolved)
@@ -6453,19 +6467,19 @@ JS_DecodeInterpretedFunction(JSContext *
     XDRDecoder decoder(cx, data, length);
     RootedFunction funobj(cx);
     if (!decoder.codeFunction(&funobj))
         return nullptr;
     return funobj;
 }
 
 JS_PUBLIC_API(bool)
-JS_PreventExtensions(JSContext *cx, JS::HandleObject obj)
-{
-    return JSObject::preventExtensions(cx, obj);
+JS_PreventExtensions(JSContext *cx, JS::HandleObject obj, bool *succeeded)
+{
+    return JSObject::preventExtensions(cx, obj, succeeded);
 }
 
 JS_PUBLIC_API(void)
 JS::SetAsmJSCacheOps(JSRuntime *rt, const JS::AsmJSCacheOps *ops)
 {
     rt->asmJSCacheOps = *ops;
 }
 
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -608,16 +608,71 @@ class HandleValueArray
     const Value *begin() const { return elements_; }
 
     HandleValue operator[](size_t i) const {
         MOZ_ASSERT(i < length_);
         return HandleValue::fromMarkedLocation(&elements_[i]);
     }
 };
 
+// Container for futex methods, used to implement the Atomics primitives.
+//
+// Client code calls JS_SetContextFutexAPI to install an instance of a
+// subclass of PerRuntimeFutexAPI on the runtime.  Implementations
+// of the Atomics primitives will use that object, if it is present
+// (and fail, if not).
+//
+// The API may differ among clients; for example, the APIs installed by
+// a worker and by the main window event thread are possibly different.
+
+class PerRuntimeFutexAPI
+{
+  public:
+    virtual ~PerRuntimeFutexAPI() {}
+
+    // Acquire the GLOBAL lock for all futex resources in all domains.
+    virtual void lock() = 0;
+
+    // Release the GLOBAL lock.
+    virtual void unlock() = 0;
+
+    // Return true iff the calling thread is a worker thread.  This must be
+    // used to guard calls to wait().  The lock need not be held.
+    virtual bool isOnWorkerThread() = 0;
+
+    enum WakeResult {
+        Woken,                  // Woken by futexWait
+        Timedout,               // Woken by timeout
+        InterruptForTerminate,  // Woken by a request to terminate the worker
+        ErrorTooLong            // Implementation constraint on the timer (for now)
+    };
+
+    // Block the thread.
+    //
+    // The lock must be held around this call, see lock() and unlock().
+    virtual WakeResult wait(double timeout_ns) = 0;
+
+    // Wake the thread represented by this PerRuntimeFutexAPI.
+    //
+    // The lock must be held around this call, see lock() and unlock().
+    // Since the sleeping thread also needs that lock to wake up, the
+    // thread will not actually wake up until the caller of wake()
+    // releases the lock.
+    virtual void wake() = 0;
+};
+
+JS_PUBLIC_API(JS::PerRuntimeFutexAPI *)
+GetRuntimeFutexAPI(JSRuntime *rt);
+
+// Transfers ownership of fx to rt; if rt's futexAPI field is not null when rt is
+// deleted then rt's destructor will delete that value.  If fx is null in this
+// call then ownership of a held nonnull value is transfered away from rt.
+JS_PUBLIC_API(void)
+SetRuntimeFutexAPI(JSRuntime *rt, JS::PerRuntimeFutexAPI *fx);
+
 }  /* namespace JS */
 
 /************************************************************************/
 
 struct JSFreeOp {
   private:
     JSRuntime   *runtime_;
 
@@ -2789,18 +2844,25 @@ extern JS_PUBLIC_API(bool)
 JS_DeepFreezeObject(JSContext *cx, JS::Handle<JSObject*> obj);
 
 /*
  * Freezes an object; see ES5's Object.freeze(obj) method.
  */
 extern JS_PUBLIC_API(bool)
 JS_FreezeObject(JSContext *cx, JS::Handle<JSObject*> obj);
 
-extern JS_PUBLIC_API(bool)
-JS_PreventExtensions(JSContext *cx, JS::HandleObject obj);
+/*
+ * Attempt to make |obj| non-extensible.  If an error occurs while making the
+ * attempt, return false (with a pending exception set, depending upon the
+ * nature of the error).  If no error occurs, return true with |*succeeded| set
+ * to indicate whether the attempt successfully set the [[Extensible]] property
+ * to false.
+ */
+extern JS_PUBLIC_API(bool)
+JS_PreventExtensions(JSContext *cx, JS::HandleObject obj, bool *succeeded);
 
 extern JS_PUBLIC_API(JSObject *)
 JS_New(JSContext *cx, JS::HandleObject ctor, const JS::HandleValueArray& args);
 
 extern JS_PUBLIC_API(JSObject *)
 JS_DefineObject(JSContext *cx, JS::HandleObject obj, const char *name,
                 const JSClass *clasp = nullptr, JS::HandleObject proto = JS::NullPtr(),
                 unsigned attrs = 0);
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -67,32 +67,51 @@ struct CrossCompartmentKey
         DebuggerObject,
         DebuggerEnvironment
     };
 
     Kind kind;
     JSObject *debugger;
     js::gc::Cell *wrapped;
 
-    CrossCompartmentKey()
-      : kind(ObjectWrapper), debugger(nullptr), wrapped(nullptr) {}
     explicit CrossCompartmentKey(JSObject *wrapped)
-      : kind(ObjectWrapper), debugger(nullptr), wrapped(wrapped) {}
+      : kind(ObjectWrapper), debugger(nullptr), wrapped(wrapped)
+    {
+        MOZ_RELEASE_ASSERT(wrapped);
+    }
     explicit CrossCompartmentKey(JSString *wrapped)
-      : kind(StringWrapper), debugger(nullptr), wrapped(wrapped) {}
-    explicit CrossCompartmentKey(Value wrapped)
-      : kind(wrapped.isString() ? StringWrapper : ObjectWrapper),
+      : kind(StringWrapper), debugger(nullptr), wrapped(wrapped)
+    {
+        MOZ_RELEASE_ASSERT(wrapped);
+    }
+    explicit CrossCompartmentKey(Value wrappedArg)
+      : kind(wrappedArg.isString() ? StringWrapper : ObjectWrapper),
+        debugger(nullptr),
+        wrapped((js::gc::Cell *)wrappedArg.toGCThing())
+    {
+        MOZ_RELEASE_ASSERT(wrappedArg.isString() || wrappedArg.isObject());
+        MOZ_RELEASE_ASSERT(wrapped);
+    }
+    explicit CrossCompartmentKey(const RootedValue &wrappedArg)
+      : kind(wrappedArg.get().isString() ? StringWrapper : ObjectWrapper),
         debugger(nullptr),
-        wrapped((js::gc::Cell *)wrapped.toGCThing()) {}
-    explicit CrossCompartmentKey(const RootedValue &wrapped)
-      : kind(wrapped.get().isString() ? StringWrapper : ObjectWrapper),
-        debugger(nullptr),
-        wrapped((js::gc::Cell *)wrapped.get().toGCThing()) {}
+        wrapped((js::gc::Cell *)wrappedArg.get().toGCThing())
+    {
+        MOZ_RELEASE_ASSERT(wrappedArg.isString() || wrappedArg.isObject());
+        MOZ_RELEASE_ASSERT(wrapped);
+    }
     CrossCompartmentKey(Kind kind, JSObject *dbg, js::gc::Cell *wrapped)
-      : kind(kind), debugger(dbg), wrapped(wrapped) {}
+      : kind(kind), debugger(dbg), wrapped(wrapped)
+    {
+        MOZ_RELEASE_ASSERT(dbg);
+        MOZ_RELEASE_ASSERT(wrapped);
+    }
+
+  private:
+    CrossCompartmentKey() MOZ_DELETE;
 };
 
 struct WrapperHasher : public DefaultHasher<CrossCompartmentKey>
 {
     static HashNumber hash(const CrossCompartmentKey &key) {
         MOZ_ASSERT(!IsPoisonedPtr(key.wrapped));
         return uint32_t(uintptr_t(key.wrapped)) | uint32_t(key.kind);
     }
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -863,20 +863,27 @@ CreateFunctionPrototype(JSContext *cx, J
     // "callee" and "caller" accessors on strict mode arguments objects.  (The
     // spec also uses this for "arguments" and "caller" on various functions,
     // but we're experimenting with implementing them using accessors on
     // |Function.prototype| right now.)
     RootedObject tte(cx, NewObjectWithGivenProto(cx, &JSFunction::class_, functionProto, self,
                                                  SingletonObject));
     if (!tte)
         return nullptr;
+
+    bool succeeded;
     RootedFunction throwTypeError(cx, NewFunction(cx, tte, ThrowTypeError, 0,
                                                   JSFunction::NATIVE_FUN, self, js::NullPtr()));
-    if (!throwTypeError || !JSObject::preventExtensions(cx, throwTypeError))
+    if (!throwTypeError || !JSObject::preventExtensions(cx, throwTypeError, &succeeded))
         return nullptr;
+    if (!succeeded) {
+        JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_CHANGE_EXTENSIBILITY);
+        return nullptr;
+    }
+
     self->setThrowTypeError(throwTypeError);
 
     return functionProto;
 }
 
 const Class JSFunction::class_ = {
     js_Function_str,
     JSCLASS_NEW_RESOLVE | JSCLASS_IMPLEMENTS_BARRIERS |
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -1224,18 +1224,23 @@ JSObject::getSealedOrFrozenAttributes(un
 }
 
 /* static */ bool
 JSObject::sealOrFreeze(JSContext *cx, HandleObject obj, ImmutabilityType it)
 {
     assertSameCompartment(cx, obj);
     MOZ_ASSERT(it == SEAL || it == FREEZE);
 
-    if (!JSObject::preventExtensions(cx, obj))
+    bool succeeded;
+    if (!JSObject::preventExtensions(cx, obj, &succeeded))
         return false;
+    if (!succeeded) {
+        JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_CHANGE_EXTENSIBILITY);
+        return false;
+    }
 
     AutoIdVector props(cx);
     if (!GetPropertyKeys(cx, obj, JSITER_HIDDEN | JSITER_OWNONLY | JSITER_SYMBOLS, &props))
         return false;
 
     /* preventExtensions must sparsify dense objects, so we can assign to holes without checks. */
     MOZ_ASSERT_IF(obj->isNative(), obj->as<NativeObject>().getDenseCapacity() == 0);
 
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -347,23 +347,63 @@ class JSObject : public js::gc::Cell
      * 3. JSObject::getProto(cx, obj, &proto) computes the proto of an object.
      *    If obj is a proxy and the proto is lazy, this code may allocate or
      *    GC in order to compute the proto. Currently, it will not run JS code.
      */
 
     js::TaggedProto getTaggedProto() const {
         return type_->proto();
     }
+
     bool hasTenuredProto() const;
 
     bool uninlinedIsProxy() const;
+
     JSObject *getProto() const {
         MOZ_ASSERT(!uninlinedIsProxy());
         return getTaggedProto().toObjectOrNull();
     }
+
+    // Normal objects and a subset of proxies have uninteresting [[Prototype]].
+    // For such objects the [[Prototype]] is just a value returned when needed
+    // for accesses, or modified in response to requests.  These objects store
+    // the [[Prototype]] directly within |obj->type_|.
+    //
+    // Proxies that don't have such a simple [[Prototype]] instead have a
+    // "lazy" [[Prototype]].  Accessing the [[Prototype]] of such an object
+    // requires going through the proxy handler {get,set}PrototypeOf and
+    // setImmutablePrototype methods.  This is most commonly useful for proxies
+    // that are wrappers around other objects.  If the [[Prototype]] of the
+    // underlying object changes, the [[Prototype]] of the wrapper must also
+    // simultaneously change.  We implement this by having the handler methods
+    // simply delegate to the wrapped object, forwarding its response to the
+    // caller.
+    //
+    // This method returns true if this object has a non-simple [[Prototype]]
+    // as described above, or false otherwise.
+    bool hasLazyPrototype() const {
+        bool lazy = getTaggedProto().isLazy();
+        MOZ_ASSERT_IF(lazy, uninlinedIsProxy());
+        return lazy;
+    }
+
+    // True iff this object's [[Prototype]] is immutable.  Must not be called
+    // on proxies with lazy [[Prototype]]!
+    bool nonLazyPrototypeIsImmutable() const {
+        MOZ_ASSERT(!hasLazyPrototype());
+        return lastProperty()->hasObjectFlag(js::BaseShape::IMMUTABLE_PROTOTYPE);
+    }
+
+    // Attempt to make |obj|'s [[Prototype]] immutable, such that subsequently
+    // trying to change it will not work.  If an internal error occurred,
+    // returns false.  Otherwise, |*succeeded| is set to true iff |obj|'s
+    // [[Prototype]] is now immutable.
+    static bool
+    setImmutablePrototype(js::ExclusiveContext *cx, JS::HandleObject obj, bool *succeeded);
+
     static inline bool getProto(JSContext *cx, js::HandleObject obj,
                                 js::MutableHandleObject protop);
     // Returns false on error, success of operation in outparam.
     static inline bool setProto(JSContext *cx, JS::HandleObject obj,
                                 JS::HandleObject proto, bool *succeeded);
 
     // uninlinedSetType() is the same as setType(), but not inlined.
     inline void setType(js::types::TypeObject *newType);
@@ -469,20 +509,21 @@ class JSObject : public js::gc::Cell
     // all as friends.
     bool nonProxyIsExtensible() const {
         MOZ_ASSERT(!uninlinedIsProxy());
 
         // [[Extensible]] for ordinary non-proxy objects is an object flag.
         return !lastProperty()->hasObjectFlag(js::BaseShape::NOT_EXTENSIBLE);
     }
 
-    // Attempt to change the [[Extensible]] bit on |obj| to false.  Callers
-    // must ensure that |obj| is currently extensible before calling this!
+    // Attempt to change the [[Extensible]] bit on |obj| to false.  Indicate
+    // success or failure through the |*succeeded| outparam, or actual error
+    // through the return value.
     static bool
-    preventExtensions(JSContext *cx, js::HandleObject obj);
+    preventExtensions(JSContext *cx, js::HandleObject obj, bool *succeeded);
 
   private:
     enum ImmutabilityType { SEAL, FREEZE };
 
     /*
      * The guts of Object.seal (ES5 15.2.3.8) and Object.freeze (ES5 15.2.3.9): mark the
      * object as non-extensible, and adjust each property's attributes appropriately: each
      * property becomes non-configurable, and if |freeze|, data properties become
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -176,22 +176,33 @@ JSObject::getProto(JSContext *cx, js::Ha
         protop.set(obj->getTaggedProto().toObjectOrNull());
         return true;
     }
 }
 
 /* static */ inline bool
 JSObject::setProto(JSContext *cx, JS::HandleObject obj, JS::HandleObject proto, bool *succeeded)
 {
-    /* Proxies live in their own little world. */
-    if (obj->getTaggedProto().isLazy()) {
+    /*
+     * If |obj| has a "lazy" [[Prototype]], it is 1) a proxy 2) whose handler's
+     * {get,set}PrototypeOf and setImmutablePrototype methods mediate access to
+     * |obj.[[Prototype]]|.  The Proxy subsystem is responsible for responding
+     * to such attempts.
+     */
+    if (obj->hasLazyPrototype()) {
         MOZ_ASSERT(obj->is<js::ProxyObject>());
         return js::Proxy::setPrototypeOf(cx, obj, proto, succeeded);
     }
 
+    /* Disallow mutation of immutable [[Prototype]]s. */
+    if (obj->nonLazyPrototypeIsImmutable()) {
+        *succeeded = false;
+        return true;
+    }
+
     /*
      * Disallow mutating the [[Prototype]] on ArrayBuffer objects, which
      * due to their complicated delegate-object shenanigans can't easily
      * have a mutable [[Prototype]].
      */
     if (obj->is<js::ArrayBufferObject>()) {
         JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_SETPROTOTYPEOF_FAIL,
                              "incompatible ArrayBuffer");
--- a/js/src/jsprototypes.h
+++ b/js/src/jsprototypes.h
@@ -117,12 +117,13 @@ IF_SAB(real,imaginary)(SharedUint8Array,
 IF_SAB(real,imaginary)(SharedInt16Array,        45,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Int16)) \
 IF_SAB(real,imaginary)(SharedUint16Array,       46,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Uint16)) \
 IF_SAB(real,imaginary)(SharedInt32Array,        47,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Int32)) \
 IF_SAB(real,imaginary)(SharedUint32Array,       48,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Uint32)) \
 IF_SAB(real,imaginary)(SharedFloat32Array,      49,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Float32)) \
 IF_SAB(real,imaginary)(SharedFloat64Array,      50,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Float64)) \
 IF_SAB(real,imaginary)(SharedUint8ClampedArray, 51,     js_InitViaClassSpec,       SHARED_TYPED_ARRAY_CLASP(Uint8Clamped)) \
     real(TypedArray,            52,      js_InitViaClassSpec,      &js::TypedArrayObject::sharedTypedArrayPrototypeClass) \
+IF_SAB(real,imaginary)(Atomics,                 53,     js_InitAtomicsClass, OCLASP(Atomics)) \
 
 #define JS_FOR_EACH_PROTOTYPE(macro) JS_FOR_PROTOTYPES(macro,macro)
 
 #endif /* jsprototypes_h */
--- a/js/src/jsproxy.h
+++ b/js/src/jsproxy.h
@@ -251,28 +251,32 @@ class JS_FRIEND_API(BaseProxyHandler)
     virtual bool getOwnPropertyDescriptor(JSContext *cx, HandleObject proxy, HandleId id,
                                           MutableHandle<JSPropertyDescriptor> desc) const = 0;
     virtual bool defineProperty(JSContext *cx, HandleObject proxy, HandleId id,
                                 MutableHandle<JSPropertyDescriptor> desc) const = 0;
     virtual bool ownPropertyKeys(JSContext *cx, HandleObject proxy,
                                  AutoIdVector &props) const = 0;
     virtual bool delete_(JSContext *cx, HandleObject proxy, HandleId id, bool *bp) const = 0;
     virtual bool enumerate(JSContext *cx, HandleObject proxy, AutoIdVector &props) const = 0;
-    virtual bool isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const = 0;
-    virtual bool preventExtensions(JSContext *cx, HandleObject proxy) const = 0;
 
     /*
      * These methods are standard, but the engine does not normally call them.
      * They're opt-in. See "Proxy prototype chains" above.
      *
      * getPrototypeOf() crashes if called. setPrototypeOf() throws a TypeError.
      */
     virtual bool getPrototypeOf(JSContext *cx, HandleObject proxy, MutableHandleObject protop) const;
     virtual bool setPrototypeOf(JSContext *cx, HandleObject proxy, HandleObject proto, bool *bp) const;
 
+    /* Non-standard but conceptual kin to {g,s}etPrototypeOf, so lives here. */
+    virtual bool setImmutablePrototype(JSContext *cx, HandleObject proxy, bool *succeeded) const;
+
+    virtual bool preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const = 0;
+    virtual bool isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const = 0;
+
     /*
      * These standard internal methods are implemented, as a convenience, so
      * that ProxyHandler subclasses don't have to provide every single method.
      *
      * The base-class implementations work by calling getPropertyDescriptor().
      * They do not follow any standard. When in doubt, override them.
      */
     virtual bool has(JSContext *cx, HandleObject proxy, HandleId id, bool *bp) const;
@@ -360,22 +364,24 @@ class JS_PUBLIC_API(DirectProxyHandler) 
     virtual bool defineProperty(JSContext *cx, HandleObject proxy, HandleId id,
                                 MutableHandle<JSPropertyDescriptor> desc) const MOZ_OVERRIDE;
     virtual bool ownPropertyKeys(JSContext *cx, HandleObject proxy,
                                  AutoIdVector &props) const MOZ_OVERRIDE;
     virtual bool delete_(JSContext *cx, HandleObject proxy, HandleId id,
                          bool *bp) const MOZ_OVERRIDE;
     virtual bool enumerate(JSContext *cx, HandleObject proxy,
                            AutoIdVector &props) const MOZ_OVERRIDE;
-    virtual bool isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const MOZ_OVERRIDE;
-    virtual bool preventExtensions(JSContext *cx, HandleObject proxy) const MOZ_OVERRIDE;
     virtual bool getPrototypeOf(JSContext *cx, HandleObject proxy,
                                 MutableHandleObject protop) const MOZ_OVERRIDE;
     virtual bool setPrototypeOf(JSContext *cx, HandleObject proxy, HandleObject proto,
                                 bool *bp) const MOZ_OVERRIDE;
+    virtual bool setImmutablePrototype(JSContext *cx, HandleObject proxy,
+                                       bool *succeeded) const MOZ_OVERRIDE;
+    virtual bool preventExtensions(JSContext *cx, HandleObject proxy, bool *succeeded) const MOZ_OVERRIDE;
+    virtual bool isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const MOZ_OVERRIDE;
     virtual bool has(JSContext *cx, HandleObject proxy, HandleId id,
                      bool *bp) const MOZ_OVERRIDE;
     virtual bool get(JSContext *cx, HandleObject proxy, HandleObject receiver,
                      HandleId id, MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool set(JSContext *cx, HandleObject proxy, HandleObject receiver,
                      HandleId id, bool strict, MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool call(JSContext *cx, HandleObject proxy, const CallArgs &args) const MOZ_OVERRIDE;
     virtual bool construct(JSContext *cx, HandleObject proxy, const CallArgs &args) const MOZ_OVERRIDE;
--- a/js/src/jswrapper.h
+++ b/js/src/jswrapper.h
@@ -117,22 +117,24 @@ class JS_FRIEND_API(CrossCompartmentWrap
     virtual bool getOwnPropertyDescriptor(JSContext *cx, HandleObject wrapper, HandleId id,
                                           MutableHandle<JSPropertyDescriptor> desc) const MOZ_OVERRIDE;
     virtual bool defineProperty(JSContext *cx, HandleObject wrapper, HandleId id,
                                 MutableHandle<JSPropertyDescriptor> desc) const MOZ_OVERRIDE;
     virtual bool ownPropertyKeys(JSContext *cx, HandleObject wrapper,
                                  AutoIdVector &props) const MOZ_OVERRIDE;
     virtual bool delete_(JSContext *cx, HandleObject wrapper, HandleId id, bool *bp) const MOZ_OVERRIDE;
     virtual bool enumerate(JSContext *cx, HandleObject wrapper, AutoIdVector &props) const MOZ_OVERRIDE;
-    virtual bool isExtensible(JSContext *cx, HandleObject wrapper, bool *extensible) const MOZ_OVERRIDE;
-    virtual bool preventExtensions(JSContext *cx, HandleObject wrapper) const MOZ_OVERRIDE;
     virtual bool getPrototypeOf(JSContext *cx, HandleObject proxy,
                                 MutableHandleObject protop) const MOZ_OVERRIDE;
     virtual bool setPrototypeOf(JSContext *cx, HandleObject proxy, HandleObject proto,
                                 bool *bp) const MOZ_OVERRIDE;
+    virtual bool setImmutablePrototype(JSContext *cx, HandleObject proxy,
+                                       bool *succeeded) const MOZ_OVERRIDE;
+    virtual bool preventExtensions(JSContext *cx, HandleObject wrapper, bool *succeeded) const MOZ_OVERRIDE;
+    virtual bool isExtensible(JSContext *cx, HandleObject wrapper, bool *extensible) const MOZ_OVERRIDE;
     virtual bool has(JSContext *cx, HandleObject wrapper, HandleId id, bool *bp) const MOZ_OVERRIDE;
     virtual bool get(JSContext *cx, HandleObject wrapper, HandleObject receiver,
                      HandleId id, MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool set(JSContext *cx, HandleObject wrapper, HandleObject receiver,
                      HandleId id, bool strict, MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool call(JSContext *cx, HandleObject wrapper, const CallArgs &args) const MOZ_OVERRIDE;
     virtual bool construct(JSContext *cx, HandleObject wrapper, const CallArgs &args) const MOZ_OVERRIDE;
 
@@ -178,19 +180,20 @@ class JS_FRIEND_API(SecurityWrapper) : p
     { }
 
     virtual bool enter(JSContext *cx, HandleObject wrapper, HandleId id, Wrapper::Action act,
                        bool *bp) const MOZ_OVERRIDE;
 
     virtual bool defineProperty(JSContext *cx, HandleObject wrapper, HandleId id,
                                 MutableHandle<JSPropertyDescriptor> desc) const MOZ_OVERRIDE;
     virtual bool isExtensible(JSContext *cx, HandleObject wrapper, bool *extensible) const MOZ_OVERRIDE;
-    virtual bool preventExtensions(JSContext *cx, HandleObject wrapper) const MOZ_OVERRIDE;
+    virtual bool preventExtensions(JSContext *cx, HandleObject wrapper, bool *succeeded) const MOZ_OVERRIDE;
     virtual bool setPrototypeOf(JSContext *cx, HandleObject proxy, HandleObject proto,
                                 bool *bp) const MOZ_OVERRIDE;
+    virtual bool setImmutablePrototype(JSContext *cx, HandleObject proxy, bool *succeeded) const MOZ_OVERRIDE;
 
     virtual bool nativeCall(JSContext *cx, IsAcceptableThis test, NativeImpl impl,
                             CallArgs args) const MOZ_OVERRIDE;
     virtual bool objectClassIs(HandleObject obj, ESClassValue classValue,
                                JSContext *cx) const MOZ_OVERRIDE;
     virtual bool regexp_toShared(JSContext *cx, HandleObject proxy, RegExpGuard *g) const MOZ_OVERRIDE;
     virtual bool boxedValue_unbox(JSContext *cx, HandleObject proxy, MutableHandleValue vp) const MOZ_OVERRIDE;
     virtual bool defaultValue(JSContext *cx, HandleObject wrapper, JSType hint,
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -94,16 +94,17 @@ EXPORTS.js += [
 ]
 
 UNIFIED_SOURCES += [
     'asmjs/AsmJSFrameIterator.cpp',
     'asmjs/AsmJSLink.cpp',
     'asmjs/AsmJSModule.cpp',
     'asmjs/AsmJSSignalHandlers.cpp',
     'asmjs/AsmJSValidate.cpp',
+    'builtin/AtomicsObject.cpp',
     'builtin/Eval.cpp',
     'builtin/Intl.cpp',
     'builtin/MapObject.cpp',
     'builtin/Object.cpp',
     'builtin/Profilers.cpp',
     'builtin/SIMD.cpp',
     'builtin/SymbolObject.cpp',
     'builtin/TestingFunctions.cpp',
--- a/js/src/proxy/BaseProxyHandler.cpp
+++ b/js/src/proxy/BaseProxyHandler.cpp
@@ -316,16 +316,23 @@ BaseProxyHandler::setPrototypeOf(JSConte
     // This keeps us away from the footgun of having the first proto set opt
     // you out of having dynamic protos altogether.
     JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_SETPROTOTYPEOF_FAIL,
                          "incompatible Proxy");
     return false;
 }
 
 bool
+BaseProxyHandler::setImmutablePrototype(JSContext *cx, HandleObject proxy, bool *succeeded) const
+{
+    *succeeded = false;
+    return true;
+}
+
+bool
 BaseProxyHandler::watch(JSContext *cx, HandleObject proxy, HandleId id, HandleObject callable) const
 {
     JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_CANT_WATCH,
                          proxy->getClass()->name);
     return false;
 }
 
 bool
--- a/js/src/proxy/CrossCompartmentWrapper.cpp
+++ b/js/src/proxy/CrossCompartmentWrapper.cpp
@@ -23,34 +23,16 @@ using namespace js;
             ok = (pre) && (op);                                 \
         }                                                       \
         return ok && (post);                                    \
     JS_END_MACRO
 
 #define NOTHING (true)
 
 bool
-CrossCompartmentWrapper::isExtensible(JSContext *cx, HandleObject wrapper, bool *extensible) const
-{
-    PIERCE(cx, wrapper,
-           NOTHING,
-           Wrapper::isExtensible(cx, wrapper, extensible),
-           NOTHING);
-}
-
-bool
-CrossCompartmentWrapper::preventExtensions(JSContext *cx, HandleObject wrapper) const
-{
-    PIERCE(cx, wrapper,
-           NOTHING,
-           Wrapper::preventExtensions(cx, wrapper),
-           NOTHING);
-}
-
-bool
 CrossCompartmentWrapper::getPropertyDescriptor(JSContext *cx, HandleObject wrapper, HandleId id,
                                                MutableHandle<PropertyDescriptor> desc) const
 {
     PIERCE(cx, wrapper,
            NOTHING,
            Wrapper::getPropertyDescriptor(cx, wrapper, id, desc),
            cx->compartment()->wrap(cx, desc));
 }
@@ -100,16 +82,71 @@ CrossCompartmentWrapper::enumerate(JSCon
 {
     PIERCE(cx, wrapper,
            NOTHING,
            Wrapper::enumerate(cx, wrapper, props),
            NOTHING);
 }
 
 bool
+CrossCompartmentWrapper::getPrototypeOf(JSContext *cx, HandleObject wrapper,
+                                        MutableHandleObject protop) const
+{
+    {
+        RootedObject wrapped(cx, wrappedObject(wrapper));
+        AutoCompartment call(cx, wrapped);
+        if (!JSObject::getProto(cx, wrapped, protop))
+            return false;
+        if (protop)
+            protop->setDelegate(cx);
+    }
+
+    return cx->compartment()->wrap(cx, protop);
+}
+
+bool
+CrossCompartmentWrapper::setPrototypeOf(JSContext *cx, HandleObject wrapper,
+                                        HandleObject proto, bool *bp) const
+{
+    RootedObject protoCopy(cx, proto);
+    PIERCE(cx, wrapper,
+           cx->compartment()->wrap(cx, &protoCopy),
+           Wrapper::setPrototypeOf(cx, wrapper, protoCopy, bp),
+           NOTHING);
+}
+
+bool
+CrossCompartmentWrapper::setImmutablePrototype(JSContext *cx, HandleObject wrapper, bool *succeeded) const
+{
+    PIERCE(cx, wrapper,
+           NOTHING,
+           Wrapper::setImmutablePrototype(cx, wrapper, succeeded),
+           NOTHING);
+}
+
+bool
+CrossCompartmentWrapper::preventExtensions(JSContext *cx, HandleObject wrapper,
+                                           bool *succeeded) const
+{
+    PIERCE(cx, wrapper,
+           NOTHING,
+           Wrapper::preventExtensions(cx, wrapper, succeeded),
+           NOTHING);
+}
+
+bool
+CrossCompartmentWrapper::isExtensible(JSContext *cx, HandleObject wrapper, bool *extensible) const
+{
+    PIERCE(cx, wrapper,
+           NOTHING,
+           Wrapper::isExtensible(cx, wrapper, extensible),
+           NOTHING);
+}
+
+bool
 CrossCompartmentWrapper::has(JSContext *cx, HandleObject wrapper, HandleId id, bool *bp) const
 {
     PIERCE(cx, wrapper,
            NOTHING,
            Wrapper::has(cx, wrapper, id, bp),
            NOTHING);
 }
 
@@ -399,43 +436,16 @@ CrossCompartmentWrapper::defaultValue(JS
                                       MutableHandleValue vp) const
 {
     PIERCE(cx, wrapper,
            NOTHING,
            Wrapper::defaultValue(cx, wrapper, hint, vp),
            cx->compartment()->wrap(cx, vp));
 }
 
-bool
-CrossCompartmentWrapper::getPrototypeOf(JSContext *cx, HandleObject wrapper,
-                                        MutableHandleObject protop) const
-{
-    {
-        RootedObject wrapped(cx, wrappedObject(wrapper));
-        AutoCompartment call(cx, wrapped);
-        if (!JSObject::getProto(cx, wrapped, protop))
-            return false;
-        if (protop)
-            protop->setDelegate(cx);
-    }
-
-    return cx->compartment()->wrap(cx, protop);
-}
-
-bool
-CrossCompartmentWrapper::setPrototypeOf(JSContext *cx, HandleObject wrapper,
-                                        HandleObject proto, bool *bp) const
-{
-    RootedObject protoCopy(cx, proto);
-    PIERCE(cx, wrapper,
-           cx->compartment()->wrap(cx, &protoCopy),
-           Wrapper::setPrototypeOf(cx, wrapper, protoCopy, bp),
-           NOTHING);
-}
-
 const CrossCompartmentWrapper CrossCompartmentWrapper::singleton(0u);
 
 bool
 js::IsCrossCompartmentWrapper(JSObject *obj)
 {
     return IsWrapper(obj) &&
            !!(Wrapper::wrapperHandler(obj)->flags() & Wrapper::CROSS_COMPARTMENT);
 }
--- a/js/src/proxy/DeadObjectProxy.cpp
+++ b/js/src/proxy/DeadObjectProxy.cpp
@@ -10,32 +10,16 @@
 #include "jsfun.h" // XXXefaust Bug 1064662
 
 #include "vm/ProxyObject.h"
 
 using namespace js;
 using namespace js::gc;
 
 bool
-DeadObjectProxy::isExtensible(JSContext *cx, HandleObject proxy, bool *extensible) const
-{
-    // This is kind of meaningless, but dead-object semantics aside,
-    // [[Extensible]] always being true is consistent with other proxy types.
-    *extensible = true;
-    return true;
-}
-
-bool
-DeadObjectProxy::preventExtensions(JSContext *cx, HandleObject proxy) const
-{
-    JS_ReportErrorNumber(cx, js_GetErrorMessage, nullptr, JSMSG_DEAD_OBJECT);
-    return false;
-}
-
-bool
 DeadObjectProxy::getPropertyDescriptor(JSContext *cx, HandleObject wrapper, HandleId id,
                                        MutableHandle<PropertyDescriptor> desc) const
 {