Merge mozilla-inbound to mozilla-central. a=merge
authorDorel Luca <dluca@mozilla.com>
Wed, 07 Feb 2018 11:58:17 +0200
changeset 402693 4fe6f6560083f8c8257282bef1d4e0ced9d1b975
parent 402692 2bd611e4debb29ff8dea29a90190f2bead634ba6 (current diff)
parent 402669 6f8bd3a6369d71200fec3041aab53eb9791d8583 (diff)
child 402694 65ad8e133d36d17e6f282ee79e8336a36caf8206
child 402718 874e75899ed29eff8dd6ceaa8969673ee8b76f2a
child 402730 ea00596eb02e86a919c6734a2307ff118a01d257
push id99624
push userdluca@mozilla.com
push dateWed, 07 Feb 2018 10:21:23 +0000
treeherdermozilla-inbound@65ad8e133d36 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone60.0a1
first release with
nightly linux32
4fe6f6560083 / 60.0a1 / 20180207100355 / files
nightly linux64
4fe6f6560083 / 60.0a1 / 20180207100355 / files
nightly mac
4fe6f6560083 / 60.0a1 / 20180207100355 / files
nightly win32
4fe6f6560083 / 60.0a1 / 20180207100355 / files
nightly win64
4fe6f6560083 / 60.0a1 / 20180207100355 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-inbound to mozilla-central. a=merge
dom/svg/test/test_SVGUnitTypes.html
dom/webidl/SVGUnitTypeValues.webidl
--- a/accessible/base/XULMap.h
+++ b/accessible/base/XULMap.h
@@ -1,7 +1,24 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-XULMAP(image, New_MaybeImageOrToolbarButtonAccessible)
-XULMAP(statusbar, New_StatusBarAccessible)
-XULMAP(menuseparator, New_MenuSeparator)
+XULMAP_TYPE(menuseparator, XULMenuSeparatorAccessible)
+XULMAP_TYPE(statusbar, XULStatusBarAccessible)
+
+XULMAP(
+  image,
+  [](nsIContent* aContent, Accessible* aContext) -> Accessible* {
+    if (aContent->IsElement() &&
+        aContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::onclick)) {
+      return new XULToolbarButtonAccessible(aContent, aContext->Document());
+    }
+
+    // Don't include nameless images in accessible tree.
+    if (!aContent->IsElement() ||
+        !aContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::tooltiptext)) {
+      return nullptr;
+    }
+
+    return new ImageAccessibleWrap(aContent, aContext->Document());
+  }
+)
--- a/accessible/base/nsAccessibilityService.cpp
+++ b/accessible/base/nsAccessibilityService.cpp
@@ -267,43 +267,16 @@ New_HTMLTableHeaderCellIfScope(nsIConten
 {
   if (aContext->IsTableRow() && aContext->GetContent() == aContent->GetParent() &&
       aContent->IsElement() &&
       aContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::scope))
     return new HTMLTableHeaderCellAccessibleWrap(aContent, aContext->Document());
   return nullptr;
 }
 
-#ifdef MOZ_XUL
-static Accessible*
-New_MaybeImageOrToolbarButtonAccessible(nsIContent* aContent,
-                                        Accessible* aContext)
-{
-  if (aContent->IsElement() &&
-      aContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::onclick)) {
-    return new XULToolbarButtonAccessible(aContent, aContext->Document());
-  }
-
-  // Don't include nameless images in accessible tree.
-  if (!aContent->IsElement() ||
-      !aContent->AsElement()->HasAttr(kNameSpaceID_None, nsGkAtoms::tooltiptext)) {
-    return nullptr;
-  }
-
-  return new ImageAccessibleWrap(aContent, aContext->Document());
-}
-static Accessible*
-New_MenuSeparator(nsIContent* aContent, Accessible* aContext)
-  { return new XULMenuSeparatorAccessible(aContent, aContext->Document()); }
-
-static Accessible*
-New_StatusBarAccessible(nsIContent* aContent, Accessible* aContext)
-  { return new XULStatusBarAccessible(aContent, aContext->Document()); }
-#endif
-
 /**
  * Cached value of the PREF_ACCESSIBILITY_FORCE_DISABLED preference.
  */
 static int32_t sPlatformDisabledState = 0;
 
 ////////////////////////////////////////////////////////////////////////////////
 // Markup maps array.
 
@@ -314,50 +287,59 @@ static int32_t sPlatformDisabledState = 
   { &nsGkAtoms::name, nullptr, &nsGkAtoms::DOMAttrName }
 
 #define AttrFromDOMIf(name, DOMAttrName, DOMAttrValue) \
   { &nsGkAtoms::name, nullptr,  &nsGkAtoms::DOMAttrName, &nsGkAtoms::DOMAttrValue }
 
 #define MARKUPMAP(atom, new_func, r, ... ) \
   { &nsGkAtoms::atom, new_func, static_cast<a11y::role>(r), { __VA_ARGS__ } },
 
-static const MarkupMapInfo sMarkupMapList[] = {
+static const HTMLMarkupMapInfo sHTMLMarkupMapList[] = {
   #include "MarkupMap.h"
 };
 
+#undef MARKUPMAP
+
 #ifdef MOZ_XUL
 #define XULMAP(atom, new_func) \
   { &nsGkAtoms::atom, new_func },
 
-static const XULMarkupMapInfo sXULMapList[] = {
+#define XULMAP_TYPE(atom, new_type) \
+XULMAP( \
+  atom, \
+  [](nsIContent* aContent, Accessible* aContext) -> Accessible* { \
+    return new new_type(aContent, aContext->Document()); \
+  } \
+)
+
+static const XULMarkupMapInfo sXULMarkupMapList[] = {
   #include "XULMap.h"
 };
+
+#undef XULMAP_TYPE
+#undef XULMAP
 #endif
 
 #undef Attr
 #undef AttrFromDOM
 #undef AttrFromDOMIf
-#undef MARKUPMAP
-#ifdef MOZ_XUL
-#undef XULMAP
-#endif
 
 ////////////////////////////////////////////////////////////////////////////////
 // nsAccessibilityService
 ////////////////////////////////////////////////////////////////////////////////
 
 nsAccessibilityService *nsAccessibilityService::gAccessibilityService = nullptr;
 ApplicationAccessible* nsAccessibilityService::gApplicationAccessible = nullptr;
 xpcAccessibleApplication* nsAccessibilityService::gXPCApplicationAccessible = nullptr;
 uint32_t nsAccessibilityService::gConsumers = 0;
 
 nsAccessibilityService::nsAccessibilityService() :
-  DocManager(), FocusManager(), mMarkupMaps(ArrayLength(sMarkupMapList))
+  DocManager(), FocusManager(), mHTMLMarkupMap(ArrayLength(sHTMLMarkupMapList))
 #ifdef MOZ_XUL
-  , mXULMarkupMaps(ArrayLength(sXULMapList))
+  , mXULMarkupMap(ArrayLength(sXULMarkupMapList))
 #endif
 {
 }
 
 nsAccessibilityService::~nsAccessibilityService()
 {
   NS_ASSERTION(IsShutdown(), "Accessibility wasn't shutdown!");
   gAccessibilityService = nullptr;
@@ -1165,18 +1147,18 @@ nsAccessibilityService::CreateAccessible
     bool isARIATablePart = roleMapEntry &&
       (roleMapEntry->accTypes & (eTableCell | eTableRow | eTable));
 
     if (!isARIATablePart ||
         frame->AccessibleType() == eHTMLTableCellType ||
         frame->AccessibleType() == eHTMLTableRowType ||
         frame->AccessibleType() == eHTMLTableType) {
       // Prefer to use markup to decide if and what kind of accessible to create,
-      const MarkupMapInfo* markupMap =
-        mMarkupMaps.Get(content->NodeInfo()->NameAtom());
+      const HTMLMarkupMapInfo* markupMap =
+        mHTMLMarkupMap.Get(content->NodeInfo()->NameAtom());
       if (markupMap && markupMap->new_func)
         newAcc = markupMap->new_func(content, aContext);
 
       if (!newAcc) // try by frame accessible type.
         newAcc = CreateAccessibleByFrameType(frame, content, aContext);
     }
 
     // In case of ARIA grid or table use table-specific classes if it's not
@@ -1229,17 +1211,17 @@ nsAccessibilityService::CreateAccessible
 
         return nullptr;
       }
     }
 
 #ifdef MOZ_XUL
     // Prefer to use XUL to decide if and what kind of accessible to create.
     const XULMarkupMapInfo* xulMap =
-      mXULMarkupMaps.Get(content->NodeInfo()->NameAtom());
+      mXULMarkupMap.Get(content->NodeInfo()->NameAtom());
     if (xulMap && xulMap->new_func) {
       newAcc = xulMap->new_func(content, aContext);
     }
 #endif
 
     // XBL bindings may use @role attribute to point the accessible type
     // they belong to.
     if (!newAcc) {
@@ -1266,18 +1248,18 @@ nsAccessibilityService::CreateAccessible
         // polyline and image. A 'use' and 'text' graphic elements require
         // special support.
         newAcc = new EnumRoleAccessible<roles::GRAPHIC>(content, document);
       } else if (content->IsSVGElement(nsGkAtoms::svg)) {
         newAcc = new EnumRoleAccessible<roles::DIAGRAM>(content, document);
       }
 
     } else if (content->IsMathMLElement()) {
-      const MarkupMapInfo* markupMap =
-        mMarkupMaps.Get(content->NodeInfo()->NameAtom());
+      const HTMLMarkupMapInfo* markupMap =
+        mHTMLMarkupMap.Get(content->NodeInfo()->NameAtom());
       if (markupMap && markupMap->new_func)
         newAcc = markupMap->new_func(content, aContext);
 
       // Fall back to text when encountering Content MathML.
       if (!newAcc && !content->IsAnyOfMathMLElements(nsGkAtoms::annotation_,
                                                      nsGkAtoms::annotation_xml_,
                                                      nsGkAtoms::mpadded_,
                                                      nsGkAtoms::mphantom_,
@@ -1345,22 +1327,22 @@ nsAccessibilityService::Init()
   // Subscribe to EventListenerService.
   nsCOMPtr<nsIEventListenerService> eventListenerService =
     do_GetService("@mozilla.org/eventlistenerservice;1");
   if (!eventListenerService)
     return false;
 
   eventListenerService->AddListenerChangeListener(this);
 
-  for (uint32_t i = 0; i < ArrayLength(sMarkupMapList); i++)
-    mMarkupMaps.Put(*sMarkupMapList[i].tag, &sMarkupMapList[i]);
+  for (uint32_t i = 0; i < ArrayLength(sHTMLMarkupMapList); i++)
+    mHTMLMarkupMap.Put(*sHTMLMarkupMapList[i].tag, &sHTMLMarkupMapList[i]);
 
 #ifdef MOZ_XUL
-  for (uint32_t i = 0; i < ArrayLength(sXULMapList); i++)
-    mXULMarkupMaps.Put(*sXULMapList[i].tag, &sXULMapList[i]);
+  for (uint32_t i = 0; i < ArrayLength(sXULMarkupMapList); i++)
+    mXULMarkupMap.Put(*sXULMarkupMapList[i].tag, &sXULMarkupMapList[i]);
 #endif
 
 #ifdef A11Y_LOG
   logging::CheckEnv();
 #endif
 
   gAccessibilityService = this;
   NS_ADDREF(gAccessibilityService); // will release in Shutdown()
@@ -1760,18 +1742,18 @@ nsAccessibilityService::CreateAccessible
 
   return newAcc.forget();
 }
 
 void
 nsAccessibilityService::MarkupAttributes(const nsIContent* aContent,
                                          nsIPersistentProperties* aAttributes) const
 {
-  const mozilla::a11y::MarkupMapInfo* markupMap =
-    mMarkupMaps.Get(aContent->NodeInfo()->NameAtom());
+  const mozilla::a11y::HTMLMarkupMapInfo* markupMap =
+    mHTMLMarkupMap.Get(aContent->NodeInfo()->NameAtom());
   if (!markupMap)
     return;
 
   for (uint32_t i = 0; i < ArrayLength(markupMap->attrs); i++) {
     const MarkupAttrInfo* info = markupMap->attrs + i;
     if (!info->name)
       break;
 
--- a/accessible/base/nsAccessibilityService.h
+++ b/accessible/base/nsAccessibilityService.h
@@ -56,17 +56,17 @@ typedef Accessible* (New_Accessible)(nsI
 struct MarkupAttrInfo {
   nsStaticAtom** name;
   nsStaticAtom** value;
 
   nsStaticAtom** DOMAttrName;
   nsStaticAtom** DOMAttrValue;
 };
 
-struct MarkupMapInfo {
+struct HTMLMarkupMapInfo {
   nsStaticAtom** tag;
   New_Accessible* new_func;
   a11y::role role;
   MarkupAttrInfo attrs[4];
 };
 
 #ifdef MOZ_XUL
 struct XULMarkupMapInfo {
@@ -238,18 +238,18 @@ public:
    * @param  aIsSubtreeHidden  [out, optional] indicates whether the node's
    *                             frame and its subtree is hidden
    */
   Accessible* CreateAccessible(nsINode* aNode, Accessible* aContext,
                                bool* aIsSubtreeHidden = nullptr);
 
   mozilla::a11y::role MarkupRole(const nsIContent* aContent) const
   {
-    const mozilla::a11y::MarkupMapInfo* markupMap =
-      mMarkupMaps.Get(aContent->NodeInfo()->NameAtom());
+    const mozilla::a11y::HTMLMarkupMapInfo* markupMap =
+      mHTMLMarkupMap.Get(aContent->NodeInfo()->NameAtom());
     return markupMap ? markupMap->role : mozilla::a11y::roles::NOTHING;
   }
 
   /**
    * Set the object attribute defined by markup for the given element.
    */
   void MarkupAttributes(const nsIContent* aContent,
                         nsIPersistentProperties* aAttributes) const;
@@ -343,19 +343,19 @@ private:
   static mozilla::a11y::ApplicationAccessible* gApplicationAccessible;
   static mozilla::a11y::xpcAccessibleApplication* gXPCApplicationAccessible;
 
   /**
    * Contains a set of accessibility service consumers.
    */
   static uint32_t gConsumers;
 
-  nsDataHashtable<nsPtrHashKey<const nsAtom>, const mozilla::a11y::MarkupMapInfo*> mMarkupMaps;
+  nsDataHashtable<nsPtrHashKey<const nsAtom>, const mozilla::a11y::HTMLMarkupMapInfo*> mHTMLMarkupMap;
 #ifdef MOZ_XUL
-  nsDataHashtable<nsPtrHashKey<const nsAtom>, const mozilla::a11y::XULMarkupMapInfo*> mXULMarkupMaps;
+  nsDataHashtable<nsPtrHashKey<const nsAtom>, const mozilla::a11y::XULMarkupMapInfo*> mXULMarkupMap;
 #endif
 
   friend nsAccessibilityService* GetAccService();
   friend nsAccessibilityService* GetOrCreateAccService(uint32_t);
   friend void MaybeShutdownAccService(uint32_t);
   friend void mozilla::a11y::PrefChanged(const char*, void*);
   friend mozilla::a11y::FocusManager* mozilla::a11y::FocusMgr();
   friend mozilla::a11y::SelectionManager* mozilla::a11y::SelectionMgr();
--- a/devtools/client/debugger/test/mochitest/browser_dbg_breakpoints-actual-location.js
+++ b/devtools/client/debugger/test/mochitest/browser_dbg_breakpoints-actual-location.js
@@ -28,26 +28,26 @@ function test() {
 
     Task.spawn(function* () {
       yield waitForSourceAndCaretAndScopes(gPanel, "-02.js", 1);
 
       is(queries.getBreakpoints(gController.getState()).length, 0,
          "There are no breakpoints in the editor");
 
       const response = yield actions.addBreakpoint({
-        actor: gSources.selectedValue, line: 4
+        actor: gSources.selectedValue, line: 5
       });
 
       ok(response.actualLocation, "has an actualLocation");
       is(response.actualLocation.line, 6, "moved to line 6");
 
       is(queries.getBreakpoints(gController.getState()).length, 1,
          "There is only one breakpoint in the editor");
 
-      ok(!queries.getBreakpoint(gController.getState(), { actor: gSources.selectedValue, line: 4 }),
+      ok(!queries.getBreakpoint(gController.getState(), { actor: gSources.selectedValue, line: 5 }),
          "There isn't any breakpoint added on an invalid line.");
       ok(queries.getBreakpoint(gController.getState(), { actor: gSources.selectedValue, line: 6 }),
          "There isn't any breakpoint added on an invalid line.");
 
       resumeDebuggerThenCloseAndFinish(gPanel);
     });
 
     callInTab(gTab, "firstCall");
--- a/devtools/server/tests/unit/test_blackboxing-01.js
+++ b/devtools/server/tests/unit/test_blackboxing-01.js
@@ -123,18 +123,16 @@ function evalCode() {
 }
 
 const runTest = Task.async(function* (onSteppedLocation, onDebuggerStatementFrames) {
   let packet = yield executeOnNextTickAndWaitForPause(gDebuggee.runTest,
                                                       gClient);
   Assert.equal(packet.why.type, "breakpoint");
 
   yield stepIn(gClient, gThreadClient);
-  yield stepIn(gClient, gThreadClient);
-  yield stepIn(gClient, gThreadClient);
 
   const location = yield getCurrentLocation();
   onSteppedLocation(location);
 
   packet = yield resumeAndWaitForPause(gClient, gThreadClient);
   Assert.equal(packet.why.type, "debuggerStatement");
 
   let { frames } = yield getFrames(gThreadClient, 0, 100);
--- a/docshell/base/nsDocShell.cpp
+++ b/docshell/base/nsDocShell.cpp
@@ -6406,19 +6406,27 @@ nsDocShell::ForceRefreshURI(nsIURI* aURI
    */
   loadInfo->SetSendReferrer(false);
 
   /* for most refreshes the current URI is an appropriate
    * internal referrer
    */
   loadInfo->SetReferrer(mCurrentURI);
 
-  /* Don't ever "guess" on which principal to use to avoid picking
-   * the current principal.
-   */
+  // Set the triggering pricipal to aPrincipal if available, or current
+  // document's principal otherwise.
+  nsCOMPtr<nsIPrincipal> principal = aPrincipal;
+  if (!principal) {
+    nsCOMPtr<nsIDocument> doc = GetDocument();
+    if (!doc) {
+      return NS_ERROR_FAILURE;
+    }
+    principal = doc->NodePrincipal();
+  }
+  loadInfo->SetTriggeringPrincipal(principal);
   loadInfo->SetPrincipalIsExplicit(true);
 
   /* Check if this META refresh causes a redirection
    * to another site.
    */
   bool equalUri = false;
   nsresult rv = aURI->Equals(mCurrentURI, &equalUri);
   if (NS_SUCCEEDED(rv) && (!equalUri) && aMetaRefresh &&
@@ -6436,23 +6444,16 @@ nsDocShell::ForceRefreshURI(nsIURI* aURI
     GetReferringURI(getter_AddRefs(internalReferrer));
     if (internalReferrer) {
       loadInfo->SetReferrer(internalReferrer);
     }
   } else {
     loadInfo->SetLoadType(nsIDocShellLoadInfo::loadRefresh);
   }
 
-  // If the principal is null, the refresh will have a triggeringPrincipal
-  // derived from the referrer URI, or will be set to the system principal
-  // if there is no refererrer. See LoadURI()
-  if (aPrincipal) {
-    loadInfo->SetTriggeringPrincipal(aPrincipal);
-  }
-
   /*
    * LoadURI(...) will cancel all refresh timers... This causes the
    * Timer and its refreshData instance to be released...
    */
   LoadURI(aURI, loadInfo, nsIWebNavigation::LOAD_FLAGS_DISALLOW_INHERIT_PRINCIPAL, true);
 
   return NS_OK;
 }
--- a/docshell/base/nsIRefreshURI.idl
+++ b/docshell/base/nsIRefreshURI.idl
@@ -14,36 +14,34 @@ interface nsIURI;
 interface nsIRefreshURI : nsISupports {
     /**
       * Load a uri after waiting for aMillis milliseconds. If the docshell
       * is busy loading a page currently, the refresh request will be
       * queued and executed when the current load finishes. 
       *
       * @param aUri The uri to refresh.
       * @param aPrincipal The triggeringPrincipal for the refresh load
-      *   May be null, in which case a principal will be built based on the
-      *   referrer URI of the previous docshell load, or will use the system
-      *   principal when there is no referrer.
+      *   May be null, in which case the principal of current document will be
+      *   applied.
       * @param aMillis The number of milliseconds to wait.
       * @param aRepeat Flag to indicate if the uri is to be 
       *                repeatedly refreshed every aMillis milliseconds.
       * @param aMetaRefresh Flag to indicate if this is a Meta refresh.
       */
     void refreshURI(in nsIURI aURI, in nsIPrincipal aPrincipal,
                     in long aMillis, in boolean aRepeat,
                     in boolean aMetaRefresh);
 
     /**
       * Loads a URI immediately as if it were a refresh.
       *
       * @param aURI The URI to refresh.
       * @param aPrincipal The triggeringPrincipal for the refresh load
-      *   May be null, in which case a principal will be built based on the
-      *   referrer URI of the previous docshell load, or will use the system
-      *   principal when there is no referrer.
+      *   May be null, in which case the principal of current document will be
+      *   applied.
       * @param aMillis The number of milliseconds by which this refresh would
       *                be delayed if it were not being forced.
       * @param aMetaRefresh Flag to indicate if this is a meta refresh.
       */
     void forceRefreshURI(in nsIURI aURI, in nsIPrincipal aPrincipal,
                          in long aMillis, in boolean aMetaRefresh);
 
     /**
@@ -63,19 +61,18 @@ interface nsIRefreshURI : nsISupports {
     /**
       * Parses the passed in header string and sets up a refreshURI if
       * a "refresh" header is found. If docshell is busy loading a page 
       * currently, the request will be queued and executed when 
       * the current page finishes loading. 
       *
       * @param aBaseURI base URI to resolve refresh uri with.
       * @param aPrincipal The triggeringPrincipal for the refresh load
-      *   May be null, in which case a principal will be built based on the
-      *   referrer URI of the previous docshell load, or will use the system
-      *   principal when there is no referrer.
+      *   May be null, in which case the principal of current document will be
+      *   applied.
       * @param aHeader  The meta refresh header string.
       */
     void setupRefreshURIFromHeader(in nsIURI aBaseURI,
                                    in nsIPrincipal principal,
                                    in ACString aHeader);
 
     /**
       * Cancels all timer loads.
--- a/dom/base/nsDocument.cpp
+++ b/dom/base/nsDocument.cpp
@@ -8811,16 +8811,17 @@ nsDocument::OnPageHide(bool aPersisted,
       PageUnloadingEventTimeStamp timeStamp(this);
       DispatchPageTransition(target, NS_LITERAL_STRING("pagehide"), aPersisted);
     }
   }
 
   mVisible = false;
 
   UpdateVisibilityState();
+
   EnumerateExternalResources(NotifyPageHide, &aPersisted);
   EnumerateActivityObservers(NotifyActivityChanged, nullptr);
 
   ClearPendingFullscreenRequests(this);
   if (GetFullscreenElement()) {
     // If this document was fullscreen, we should exit fullscreen in this
     // doctree branch. This ensures that if the user navigates while in
     // fullscreen mode we don't leave its still visible ancestor documents
--- a/dom/base/test/unit/test_isequalnode.js
+++ b/dom/base/test/unit/test_isequalnode.js
@@ -1,13 +1,15 @@
 /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+Components.utils.importGlobalProperties(["NodeFilter"]);
+
 function run_test()
 {
   /*
    * NOTE: [i] is not allowed in this test, since it's done via classinfo and
    * we don't have that in xpcshell; the workaround is item(i).  Suck.
    */
   init();
 
@@ -368,20 +370,20 @@ function test_isEqualNode_null()
   }
 }
 
 function test_isEqualNode_wholeDoc()
 {
   doc = ParseFile("isequalnode_data.xml");
   var doc2 = ParseFile("isequalnode_data.xml");
   var tw1 =
-    doc.createTreeWalker(doc, 0xFFFFFFFF /* NodeFilter.SHOW_ALL */,
+    doc.createTreeWalker(doc, NodeFilter.SHOW_ALL,
                          null);
   var tw2 =
-    doc2.createTreeWalker(doc2, 0xFFFFFFFF /* NodeFilter.SHOW_ALL */,
+    doc2.createTreeWalker(doc2, NodeFilter.SHOW_ALL,
                           null);
   do {
     check_eq_nodes(tw1.currentNode, tw2.currentNode);
     tw1.nextNode();
   } while(tw2.nextNode());
 }
 
 // UTILITY FUNCTIONS
--- a/dom/base/test/unit/test_range.js
+++ b/dom/base/test/unit/test_range.js
@@ -1,26 +1,28 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+Components.utils.importGlobalProperties(["NodeFilter"]);
+
 const C_i = Components.interfaces;
 
 const UNORDERED_TYPE = C_i.nsIDOMXPathResult.ANY_UNORDERED_NODE_TYPE;
 
 /**
  * Determine if the data node has only ignorable white-space.
  *
  * @return NodeFilter.FILTER_SKIP if it does.
  * @return NodeFilter.FILTER_ACCEPT otherwise.
  */
 function isWhitespace(aNode) {
   return ((/\S/).test(aNode.nodeValue)) ?
-         3 /* NodeFilter.FILTER_SKIP */ :
-         1 /* NodeFilter.FILTER_ACCEPT */;
+         NodeFilter.FILTER_SKIP :
+         NodeFilter.FILTER_ACCEPT;
 }
 
 /**
  * Create a DocumentFragment with cloned children equaling a node's children.
  *
  * @param aNode The node to copy from.
  *
  * @return DocumentFragment node.
@@ -77,56 +79,56 @@ function evalXPathInDocumentFragment(aCo
   var childIndex = 1;
   var bracketIndex = prefix.indexOf("[");
   if (bracketIndex != -1) {
     childIndex = Number(prefix.substring(bracketIndex + 1, prefix.indexOf("]")));
     Assert.ok(childIndex > 0);
     prefix = prefix.substr(0, bracketIndex);
   }
 
-  var targetType = 1 /* NodeFilter.SHOW_ELEMENT */;
+  var targetType = NodeFilter.SHOW_ELEMENT;
   var targetNodeName = prefix;
   if (prefix.indexOf("processing-instruction(") == 0) {
-    targetType = 0x40 /* NodeFilter.SHOW_PROCESSING_INSTRUCTION */;
+    targetType = NodeFilter.SHOW_PROCESSING_INSTRUCTION;
     targetNodeName = prefix.substring(prefix.indexOf("(") + 2, prefix.indexOf(")") - 1);
   }
   switch (prefix) {
     case "text()":
-      targetType = 4 | 8 /* NodeFilter.SHOW_TEXT | NodeFilter.SHOW_CDATA_SECTION*/;
+      targetType = NodeFilter.SHOW_TEXT | NodeFilter.SHOW_CDATA_SECTION;
       targetNodeName = null;
       break;
     case "comment()":
-      targetType = 0x80 /* NodeFilter.SHOW_COMMENT */;
+      targetType = NodeFilter.SHOW_COMMENT;
       targetNodeName = null;
       break;
     case "node()":
-      targetType = 0xFFFFFFFF /* NodeFilter.SHOW_ALL */;
+      targetType = NodeFilter.SHOW_ALL;
       targetNodeName = null;
   }
 
   var filter = {
     count: 0,
 
     // NodeFilter
     acceptNode: function acceptNode(aNode) {
       if (aNode.parentNode != aContextNode) {
         // Don't bother looking at kids either.
-        return 2 /* NodeFilter.FILTER_REJECT */;
+        return NodeFilter.FILTER_REJECT;
       }
 
       if (targetNodeName && targetNodeName != aNode.nodeName) {
-        return 3 /* NodeFilter.FILTER_SKIP */;
+        return NodeFilter.FILTER_SKIP;
       }
 
       this.count++;
       if (this.count != childIndex) {
-        return 3 /* NodeFilter.FILTER_SKIP */;
+        return NodeFilter.FILTER_SKIP;
       }
 
-      return 1 /* NodeFilter.FILTER_ACCEPT */;
+      return NodeFilter.FILTER_ACCEPT;
     }
   };
 
   // Look for the node matching the step from the document fragment.
   var walker = aContextNode.ownerDocument.createTreeWalker(
                  aContextNode,
                  targetType,
                  filter);
@@ -177,18 +179,18 @@ function getParsedDocument(aPath) {
 
 function processParsedDocument(doc) {
   Assert.ok(doc.documentElement.localName != "parsererror");
   Assert.ok(doc instanceof C_i.nsIDOMXPathEvaluator);
   Assert.ok(doc instanceof C_i.nsIDOMDocument);
 
   // Clean out whitespace.
   var walker = doc.createTreeWalker(doc,
-                                    4 | 8 /* NodeFilter.SHOW_TEXT |
-					     NodeFilter.SHOW_CDATA_SECTION */,
+                                    NodeFilter.SHOW_TEXT |
+                                    NodeFilter.SHOW_CDATA_SECTION,
                                     isWhitespace);
   while (walker.nextNode()) {
     var parent = walker.currentNode.parentNode;
     parent.removeChild(walker.currentNode);
     walker.currentNode = parent;
   }
 
   // Clean out mandatory splits between nodes.
@@ -272,18 +274,18 @@ function do_extract_test(doc) {
       Assert.ok(extractFrag.isEqualNode(cutFragment));
     } else {
       Assert.equal(extractFrag.firstChild, null);
     }
     Assert.ok(baseFrag.isEqualNode(resultFrag));
 
     dump("Ensure the original nodes weren't extracted - test " + i + "\n\n");
     var walker = doc.createTreeWalker(baseFrag,
-				      0xFFFFFFFF /* NodeFilter.SHOW_ALL */,
-				      null);
+                                      NodeFilter.SHOW_ALL,
+                                      null);
     var foundStart = false;
     var foundEnd = false;
     do {
       if (walker.currentNode == startContainer) {
         foundStart = true;
       }
 
       if (walker.currentNode == endContainer) {
@@ -305,17 +307,17 @@ function do_extract_test(doc) {
     baseRange = getRange(baseSource, baseFrag);
     var startContainer = baseRange.startContainer;
     var endContainer = baseRange.endContainer;
     baseRange.deleteContents();
     Assert.ok(baseFrag.isEqualNode(resultFrag));
 
     dump("Ensure the original nodes weren't deleted - test " + i + "\n\n");
     walker = doc.createTreeWalker(baseFrag,
-                                  0xFFFFFFFF /* NodeFilter.SHOW_ALL */,
+                                  NodeFilter.SHOW_ALL,
                                   null);
     foundStart = false;
     foundEnd = false;
     do {
       if (walker.currentNode == startContainer) {
         foundStart = true;
       }
 
--- a/dom/base/test/unit/test_treewalker.js
+++ b/dom/base/test/unit/test_treewalker.js
@@ -1,26 +1,28 @@
 /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+Components.utils.importGlobalProperties(["NodeFilter"]);
+
 function run_test()
 {
   test_treeWalker_currentNode();
 }
 
 // TEST CODE
 
 function test_treeWalker_currentNode()
 {
   var XHTMLDocString = '<html xmlns="http://www.w3.org/1999/xhtml">';
   XHTMLDocString += '<body><input/>input</body></html>';
 
   var doc = ParseXML(XHTMLDocString);
 
   var body = doc.getElementsByTagName("body")[0];
-  var filter = 1 | 4 /* NodeFilter.SHOW_ELEMENT | NodeFilter.SHOW_TEXT */;
+  var filter = NodeFilter.SHOW_ELEMENT | NodeFilter.SHOW_TEXT;
   var walker = doc.createTreeWalker(body, filter, null);
   walker.currentNode = body.firstChild;
   walker.nextNode();
 }
 
--- a/dom/html/HTMLMediaElement.cpp
+++ b/dom/html/HTMLMediaElement.cpp
@@ -19,17 +19,16 @@
 #include "mozilla/Sprintf.h"
 
 #include "AutoplayPolicy.h"
 #include "base/basictypes.h"
 #include "nsIDOMHTMLMediaElement.h"
 #include "TimeRanges.h"
 #include "nsGenericHTMLElement.h"
 #include "nsAttrValueInlines.h"
-#include "nsDocShellLoadTypes.h"
 #include "nsPresContext.h"
 #include "nsIClassOfService.h"
 #include "nsIPresShell.h"
 #include "nsGkAtoms.h"
 #include "nsSize.h"
 #include "nsIFrame.h"
 #include "nsIDocument.h"
 #include "nsIDOMDocument.h"
@@ -3838,36 +3837,32 @@ HTMLMediaElement::AddMediaElementToURITa
   }
   MediaElementSetForURI* entry = gElementTable->PutEntry(mLoadingSrc);
   entry->mElements.AppendElement(this);
   NS_ASSERTION(MediaElementTableCount(this, mLoadingSrc) == 1,
     "Should have a single entry for element in element table after addition");
 }
 
 void
-HTMLMediaElement::RemoveMediaElementFromURITable(bool aFroceClearEntry)
+HTMLMediaElement::RemoveMediaElementFromURITable()
 {
   if (!mDecoder || !mLoadingSrc || !gElementTable) {
     return;
   }
   MediaElementSetForURI* entry = gElementTable->GetEntry(mLoadingSrc);
   if (!entry) {
     return;
   }
-  if (aFroceClearEntry) {
+  entry->mElements.RemoveElement(this);
+  if (entry->mElements.IsEmpty()) {
     gElementTable->RemoveEntry(entry);
-  } else {
-    entry->mElements.RemoveElement(this);
-    if (entry->mElements.IsEmpty()) {
-      gElementTable->RemoveEntry(entry);
-    }
-  }
-  if (gElementTable->Count() == 0) {
-    delete gElementTable;
-    gElementTable = nullptr;
+    if (gElementTable->Count() == 0) {
+      delete gElementTable;
+      gElementTable = nullptr;
+    }
   }
   NS_ASSERTION(MediaElementTableCount(this, mLoadingSrc) == 0,
     "After remove, should no longer have an entry in element table");
 }
 
 HTMLMediaElement*
 HTMLMediaElement::LookupMediaElementURITable(nsIURI* aURI)
 {
@@ -3949,116 +3944,16 @@ private:
   }
   // Guaranteed to be valid by HTMLMediaElement.
   HTMLMediaElement* mWeak = nullptr;
   Phase mPhase = Phase::Init;
 };
 
 NS_IMPL_ISUPPORTS(HTMLMediaElement::ShutdownObserver, nsIObserver)
 
-class HTMLMediaElement::ForceReloadListener : public nsIWebProgressListener
-                                            , public nsSupportsWeakReference
-{
-public:
-  NS_DECL_ISUPPORTS
-
-  void Subscribe(HTMLMediaElement* aPtr, nsIWebProgress* aWebProgress)
-  {
-    MOZ_DIAGNOSTIC_ASSERT(!mWeak);
-    MOZ_DIAGNOSTIC_ASSERT(aWebProgress);
-    mWeak = aPtr;
-    aWebProgress->AddProgressListener(this,
-                                      nsIWebProgress::NOTIFY_STATE_NETWORK);
-  }
-  void Unsubscribe(nsIWebProgress* aWebProgress)
-  {
-    MOZ_DIAGNOSTIC_ASSERT(mWeak);
-    mWeak = nullptr;
-    if (aWebProgress) {
-      aWebProgress->RemoveProgressListener(this);
-    }
-  }
-
-  NS_IMETHODIMP OnStateChange(nsIWebProgress* aWebProgress,
-                              nsIRequest* aRequest,
-                              uint32_t aProgressStateFlags,
-                              nsresult aStatus) override
-  {
-    MOZ_DIAGNOSTIC_ASSERT(mWeak);
-    if ((aProgressStateFlags & STATE_IS_NETWORK) &&
-        (aProgressStateFlags & STATE_START)) {
-      // Query the LoadType to see if it's a ctrl+F5.
-      nsCOMPtr<nsIDocShell> shell(do_QueryInterface(aWebProgress));
-      if (shell) {
-        uint32_t loadType;
-        shell->GetLoadType(&loadType);
-        if (LOAD_RELOAD_BYPASS_PROXY_AND_CACHE == loadType && mWeak->mDecoder) {
-          mWeak->RemoveMediaElementFromURITable(true);
-          mWeak->ShutdownDecoder();
-        }
-      }
-    }
-    return NS_OK;
-  }
-
-  NS_IMETHODIMP
-  OnProgressChange(nsIWebProgress* aProgress,
-                   nsIRequest* aRequest,
-                   int32_t aCurSelfProgress,
-                   int32_t aMaxSelfProgress,
-                   int32_t aCurTotalProgress,
-                   int32_t aMaxTotalProgress) override
-  {
-    NS_NOTREACHED("notification excluded in AddProgressListener(...)");
-    return NS_OK;
-  }
-
-  NS_IMETHODIMP
-  OnLocationChange(nsIWebProgress* aWebProgress,
-                   nsIRequest* aRequest,
-                   nsIURI* aLocation,
-                   uint32_t aFlags) override
-  {
-    NS_NOTREACHED("notification excluded in AddProgressListener(...)");
-    return NS_OK;
-  }
-
-  NS_IMETHODIMP
-  OnStatusChange(nsIWebProgress* aWebProgress,
-                 nsIRequest* aRequest,
-                 nsresult aStatus,
-                 const char16_t* aMessage) override
-  {
-    NS_NOTREACHED("notification excluded in AddProgressListener(...)");
-    return NS_OK;
-  }
-
-  NS_IMETHODIMP
-  OnSecurityChange(nsIWebProgress* aWebProgress,
-                   nsIRequest* aRequest,
-                   uint32_t aState) override
-  {
-    NS_NOTREACHED("notification excluded in AddProgressListener(...)");
-    return NS_OK;
-  }
-
-protected:
-  virtual ~ForceReloadListener()
-  {
-    MOZ_DIAGNOSTIC_ASSERT(!mWeak);
-  }
-
-private:
-  HTMLMediaElement* mWeak = nullptr;
-};
-
-NS_IMPL_ISUPPORTS(HTMLMediaElement::ForceReloadListener,
-                  nsIWebProgressListener,
-                  nsISupportsWeakReference)
-
 HTMLMediaElement::HTMLMediaElement(already_AddRefed<mozilla::dom::NodeInfo>& aNodeInfo)
   : nsGenericHTMLElement(aNodeInfo),
     mMainThreadEventTarget(OwnerDoc()->EventTargetFor(TaskCategory::Other)),
     mAbstractMainThread(OwnerDoc()->AbstractMainThreadFor(TaskCategory::Other)),
     mSrcStreamTracksAvailable(false),
     mSrcStreamPausedCurrentTime(-1),
     mShutdownObserver(new ShutdownObserver),
     mSourcePointer(nullptr),
@@ -4123,43 +4018,24 @@ HTMLMediaElement::HTMLMediaElement(alrea
 
   double defaultVolume = Preferences::GetFloat("media.default_volume", 1.0);
   SetVolume(defaultVolume, rv);
 
   RegisterActivityObserver();
   NotifyOwnerDocumentActivityChanged();
 
   mShutdownObserver->Subscribe(this);
-  nsIDocShell* docShell = OwnerDoc()->GetDocShell();
-  if (docShell) {
-    nsCOMPtr<nsIDocShellTreeItem> root;
-    docShell->GetSameTypeRootTreeItem(getter_AddRefs(root));
-    nsCOMPtr<nsIWebProgress> webProgress = do_GetInterface(root);
-    if (webProgress) {
-      mForceReloadListener = new ForceReloadListener();
-      mForceReloadListener->Subscribe(this, webProgress);
-    }
-  }
 }
 
 HTMLMediaElement::~HTMLMediaElement()
 {
   NS_ASSERTION(!mHasSelfReference,
                "How can we be destroyed if we're still holding a self reference?");
+
   mShutdownObserver->Unsubscribe();
-  nsIDocShell* docShell = OwnerDoc()->GetDocShell();
-  nsCOMPtr<nsIWebProgress> webProgress;
-  if (docShell) {
-    nsCOMPtr<nsIDocShellTreeItem> root;
-    docShell->GetSameTypeRootTreeItem(getter_AddRefs(root));
-    webProgress = do_GetInterface(root);
-  }
-  if (mForceReloadListener) {
-    mForceReloadListener->Unsubscribe(webProgress);
-  }
 
   if (mVideoFrameContainer) {
     mVideoFrameContainer->ForgetElement();
   }
   UnregisterActivityObserver();
 
   mSetCDMRequest.DisconnectIfExists();
   if (mDecoder) {
--- a/dom/html/HTMLMediaElement.h
+++ b/dom/html/HTMLMediaElement.h
@@ -808,17 +808,16 @@ protected:
   class ChannelLoader;
   class ErrorSink;
   class MediaLoadListener;
   class MediaStreamTracksAvailableCallback;
   class MediaStreamTrackListener;
   class StreamListener;
   class StreamSizeListener;
   class ShutdownObserver;
-  class ForceReloadListener;
 
   MediaDecoderOwner::NextFrameStatus NextFrameStatus();
 
   void SetDecoder(MediaDecoder* aDecoder);
 
   class WakeLockBoolWrapper {
   public:
     WakeLockBoolWrapper(bool aVal, HTMLMediaElement& aOuter)
@@ -985,17 +984,17 @@ protected:
 
   /**
    * Call this after setting up mLoadingSrc and mDecoder.
    */
   void AddMediaElementToURITable();
   /**
    * Call this before modifying mLoadingSrc.
    */
-  void RemoveMediaElementFromURITable(bool aFroceClearEntry = false);
+  void RemoveMediaElementFromURITable();
   /**
    * Call this to find a media element with the same NodePrincipal and mLoadingSrc
    * set to aURI, and with a decoder on which Load() has been called.
    */
   HTMLMediaElement* LookupMediaElementURITable(nsIURI* aURI);
 
   /**
    * Shutdown and clear mDecoder and maintain associated invariants.
@@ -1398,17 +1397,16 @@ protected:
   RefPtr<StreamListener> mMediaStreamListener;
   // Holds a reference to the size-getting MediaStreamListener attached to
   // mSrcStream.
   RefPtr<StreamSizeListener> mMediaStreamSizeListener;
   // The selected video stream track which contained mMediaStreamSizeListener.
   RefPtr<VideoStreamTrack> mSelectedVideoStreamTrack;
 
   const RefPtr<ShutdownObserver> mShutdownObserver;
-  RefPtr<ForceReloadListener> mForceReloadListener;
 
   // Holds a reference to the MediaSource, if any, referenced by the src
   // attribute on the media element.
   RefPtr<MediaSource> mSrcMediaSource;
 
   // Holds a reference to the MediaSource supplying data for playback.  This
   // may either match mSrcMediaSource or come from Source element children.
   // This is set when and only when mLoadingSrc corresponds to an object url
--- a/dom/svg/test/mochitest.ini
+++ b/dom/svg/test/mochitest.ini
@@ -78,17 +78,16 @@ skip-if = android_version == '18' # bug 
 [test_SVG_namespace_ids.html]
 [test_SVGNumberList.xhtml]
 [test_SVGPathSegList.xhtml]
 [test_SVGPointList.xhtml]
 [test_SVGStringList.xhtml]
 [test_SVGStyleElement.xhtml]
 [test_SVGTransformListAddition.xhtml]
 [test_SVGTransformList.xhtml]
-[test_SVGUnitTypes.html]
 [test_SVGxxxListIndexing.xhtml]
 [test_SVGxxxList.xhtml]
 [test_switch.xhtml]
 [test_tabindex.html]
 [test_tearoff_with_cc.html]
 support-files = tearoff_with_cc_helper.html
 [test_text_2.html]
 [test_text_dirty.html]
deleted file mode 100644
--- a/dom/svg/test/test_SVGUnitTypes.html
+++ /dev/null
@@ -1,41 +0,0 @@
-<!DOCTYPE html>
-<html xmlns="http://www.w3.org/1999/xhtml">
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=366697
--->
-<head>
-<title>Test for Bug 842201</title>
-  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=842201">Mozilla Bug 842201</a>
-<p id="display"></p>
-<div id="content" style="display: none"></div>
-
-<pre id="test">
-<script class="testbody" type="application/javascript">
-SimpleTest.waitForExplicitFinish();
-
-function runTest()
-{
-  is(SVGPatternElement.SVG_UNIT_TYPE_OBJECTBOUNDINGBOX, 2,
-     "Pattern should implement SVGUnitTypes values");
-  is(SVGFilterElement.SVG_UNIT_TYPE_OBJECTBOUNDINGBOX, 2,
-     "Filter should implement SVGUnitTypes values");
-  is(SVGMaskElement.SVG_UNIT_TYPE_OBJECTBOUNDINGBOX, 2,
-     "Mask should implement SVGUnitTypes values");
-  is(SVGClipPathElement.SVG_UNIT_TYPE_OBJECTBOUNDINGBOX, 2,
-     "ClipPath should implement SVGUnitTypes values");
-  is(SVGLinearGradientElement.SVG_UNIT_TYPE_OBJECTBOUNDINGBOX, 2,
-     "LinearGradient should implement SVGUnitTypes values");
-  is(SVGRadialGradientElement.SVG_UNIT_TYPE_OBJECTBOUNDINGBOX, 2,
-     "RadialGradient should implement SVGUnitTypes values");
-  SimpleTest.finish();
-}
-
-window.addEventListener("load", runTest);
-</script>
-</pre>
-</body>
-</html>
--- a/dom/webidl/SVGClipPathElement.webidl
+++ b/dom/webidl/SVGClipPathElement.webidl
@@ -12,10 +12,8 @@
 
 interface SVGClipPathElement : SVGElement {
   [Constant]
   readonly attribute SVGAnimatedEnumeration clipPathUnits;
   [Constant]
   readonly attribute SVGAnimatedTransformList transform;
 };
 
-SVGClipPathElement implements SVGUnitTypeValues;
-
--- a/dom/webidl/SVGFilterElement.webidl
+++ b/dom/webidl/SVGFilterElement.webidl
@@ -23,10 +23,9 @@ interface SVGFilterElement : SVGElement 
   readonly attribute SVGAnimatedLength width;
   [Constant]
   readonly attribute SVGAnimatedLength height;
 
   // ImageData apply(ImageData source);
 };
 
 SVGFilterElement implements SVGURIReference;
-SVGFilterElement implements SVGUnitTypeValues;
 
--- a/dom/webidl/SVGGradientElement.webidl
+++ b/dom/webidl/SVGGradientElement.webidl
@@ -22,9 +22,8 @@ interface SVGGradientElement : SVGElemen
   readonly attribute SVGAnimatedEnumeration gradientUnits;
   [Constant]
   readonly attribute SVGAnimatedTransformList gradientTransform;
   [Constant]
   readonly attribute SVGAnimatedEnumeration spreadMethod;
 };
 
 SVGGradientElement implements SVGURIReference;
-SVGGradientElement implements SVGUnitTypeValues;
--- a/dom/webidl/SVGMaskElement.webidl
+++ b/dom/webidl/SVGMaskElement.webidl
@@ -25,10 +25,8 @@ interface SVGMaskElement : SVGElement {
   [Constant]
   readonly attribute SVGAnimatedLength y;
   [Constant]
   readonly attribute SVGAnimatedLength width;
   [Constant]
   readonly attribute SVGAnimatedLength height;
 };
 
-SVGMaskElement implements SVGUnitTypeValues;
-
--- a/dom/webidl/SVGPatternElement.webidl
+++ b/dom/webidl/SVGPatternElement.webidl
@@ -24,9 +24,8 @@ interface SVGPatternElement : SVGElement
   [Constant]
   readonly attribute SVGAnimatedLength width;
   [Constant]
   readonly attribute SVGAnimatedLength height;
 };
 
 SVGPatternElement implements SVGFitToViewBox;
 SVGPatternElement implements SVGURIReference;
-SVGPatternElement implements SVGUnitTypeValues;
deleted file mode 100644
--- a/dom/webidl/SVGUnitTypeValues.webidl
+++ /dev/null
@@ -1,19 +0,0 @@
-/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this file,
- * You can obtain one at http://mozilla.org/MPL/2.0/.
- *
- * The origin of this IDL file is
- * https://svgwg.org/svg2-draft/
- *
- * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
- * liability, trademark and document use rules apply.
- */
-
-[NoInterfaceObject]
-interface SVGUnitTypeValues {
-  // Unit Types
-  const unsigned short SVG_UNIT_TYPE_UNKNOWN = 0;
-  const unsigned short SVG_UNIT_TYPE_USERSPACEONUSE = 1;
-  const unsigned short SVG_UNIT_TYPE_OBJECTBOUNDINGBOX = 2;
-};
--- a/dom/webidl/SVGUnitTypes.webidl
+++ b/dom/webidl/SVGUnitTypes.webidl
@@ -6,11 +6,14 @@
  * The origin of this IDL file is
  * https://svgwg.org/svg2-draft/
  *
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
 interface SVGUnitTypes {
+  // Unit Types
+  const unsigned short SVG_UNIT_TYPE_UNKNOWN = 0;
+  const unsigned short SVG_UNIT_TYPE_USERSPACEONUSE = 1;
+  const unsigned short SVG_UNIT_TYPE_OBJECTBOUNDINGBOX = 2;
 };
 
-SVGUnitTypes implements SVGUnitTypeValues;
--- a/dom/webidl/moz.build
+++ b/dom/webidl/moz.build
@@ -878,17 +878,16 @@ WEBIDL_FILES = [
     'SVGTextElement.webidl',
     'SVGTextPathElement.webidl',
     'SVGTextPositioningElement.webidl',
     'SVGTitleElement.webidl',
     'SVGTransform.webidl',
     'SVGTransformList.webidl',
     'SVGTSpanElement.webidl',
     'SVGUnitTypes.webidl',
-    'SVGUnitTypeValues.webidl',
     'SVGURIReference.webidl',
     'SVGUseElement.webidl',
     'SVGViewElement.webidl',
     'SVGZoomAndPan.webidl',
     'SVGZoomAndPanValues.webidl',
     'TCPServerSocket.webidl',
     'TCPServerSocketEvent.webidl',
     'TCPSocket.webidl',
--- a/js/src/frontend/BytecodeEmitter.cpp
+++ b/js/src/frontend/BytecodeEmitter.cpp
@@ -60,17 +60,26 @@ class BreakableControl;
 class LabelControl;
 class LoopControl;
 class ForOfLoopControl;
 class TryFinallyControl;
 
 static bool
 ParseNodeRequiresSpecialLineNumberNotes(ParseNode* pn)
 {
-    return pn->getKind() == ParseNodeKind::While || pn->getKind() == ParseNodeKind::For;
+    // The few node types listed below are exceptions to the usual
+    // location-source-note-emitting code in BytecodeEmitter::emitTree().
+    // Single-line `while` loops and C-style `for` loops require careful
+    // handling to avoid strange stepping behavior.
+    // Functions usually shouldn't have location information (bug 1431202).
+
+    ParseNodeKind kind = pn->getKind();
+    return kind == ParseNodeKind::While ||
+           kind == ParseNodeKind::For ||
+           kind == ParseNodeKind::Function;
 }
 
 // A cache that tracks superfluous TDZ checks.
 //
 // Each basic block should have a TDZCheckCache in scope. Some NestableControl
 // subclasses contain a TDZCheckCache.
 class BytecodeEmitter::TDZCheckCache : public Nestable<BytecodeEmitter::TDZCheckCache>
 {
@@ -2492,17 +2501,17 @@ bool
 BytecodeEmitter::emitCheckIsCallable(CheckIsCallableKind kind)
 {
     return emit2(JSOP_CHECKISCALLABLE, uint8_t(kind));
 }
 
 static inline unsigned
 LengthOfSetLine(unsigned line)
 {
-    return 1 /* SN_SETLINE */ + (line > SN_4BYTE_OFFSET_MASK ? 4 : 1);
+    return 1 /* SRC_SETLINE */ + (line > SN_4BYTE_OFFSET_MASK ? 4 : 1);
 }
 
 /* Updates line number notes, not column notes. */
 bool
 BytecodeEmitter::updateLineNumberNotes(uint32_t offset)
 {
     TokenStreamAnyChars* ts = &parser.tokenStream();
     bool onThisLine;
@@ -7679,21 +7688,19 @@ BytecodeEmitter::emitFor(ParseNode* pn, 
 MOZ_NEVER_INLINE bool
 BytecodeEmitter::emitFunction(ParseNode* pn, bool needsProto)
 {
     FunctionBox* funbox = pn->pn_funbox;
     RootedFunction fun(cx, funbox->function());
     RootedAtom name(cx, fun->explicitName());
     MOZ_ASSERT_IF(fun->isInterpretedLazy(), fun->lazyScript());
 
-    /*
-     * Set the |wasEmitted| flag in the funbox once the function has been
-     * emitted. Function definitions that need hoisting to the top of the
-     * function will be seen by emitFunction in two places.
-     */
+    // Set the |wasEmitted| flag in the funbox once the function has been
+    // emitted. Function definitions that need hoisting to the top of the
+    // function will be seen by emitFunction in two places.
     if (funbox->wasEmitted) {
         // Annex B block-scoped functions are hoisted like any other
         // block-scoped function to the top of their scope. When their
         // definitions are seen for the second time, we need to emit the
         // assignment that assigns the function to the outer 'var' binding.
         if (funbox->isAnnexB) {
             auto emitRhs = [&name](BytecodeEmitter* bce, const NameLocation&, bool) {
                 // The RHS is the value of the lexically bound name in the
@@ -7730,22 +7737,20 @@ BytecodeEmitter::emitFunction(ParseNode*
 
         MOZ_ASSERT_IF(fun->hasScript(), fun->nonLazyScript());
         MOZ_ASSERT(pn->functionIsHoisted());
         return true;
     }
 
     funbox->wasEmitted = true;
 
-    /*
-     * Mark as singletons any function which will only be executed once, or
-     * which is inner to a lambda we only expect to run once. In the latter
-     * case, if the lambda runs multiple times then CloneFunctionObject will
-     * make a deep clone of its contents.
-     */
+    // Mark as singletons any function which will only be executed once, or
+    // which is inner to a lambda we only expect to run once. In the latter
+    // case, if the lambda runs multiple times then CloneFunctionObject will
+    // make a deep clone of its contents.
     if (fun->isInterpreted()) {
         bool singleton = checkRunOnceContext();
         if (!JSFunction::setTypeForScriptedFunction(cx, fun, singleton))
             return false;
 
         SharedContext* outersc = sc;
         if (fun->isInterpretedLazy()) {
             // We need to update the static scope chain regardless of whether
@@ -7791,22 +7796,22 @@ BytecodeEmitter::emitFunction(ParseNode*
         }
 
         if (outersc->isFunctionBox())
             outersc->asFunctionBox()->setHasInnerFunctions();
     } else {
         MOZ_ASSERT(IsAsmJSModule(fun));
     }
 
-    /* Make the function object a literal in the outer script's pool. */
+    // Make the function object a literal in the outer script's pool.
     unsigned index = objectList.add(pn->pn_funbox);
 
-    /* Non-hoisted functions simply emit their respective op. */
+    // Non-hoisted functions simply emit their respective op.
     if (!pn->functionIsHoisted()) {
-        /* JSOP_LAMBDA_ARROW is always preceded by a new.target */
+        // JSOP_LAMBDA_ARROW is always preceded by a new.target
         MOZ_ASSERT(fun->isArrow() == (pn->getOp() == JSOP_LAMBDA_ARROW));
         if (funbox->isAsync()) {
             MOZ_ASSERT(!needsProto);
             return emitAsyncWrapper(index, funbox->needsHomeObject(), fun->isArrow(),
                                     fun->isGenerator());
         }
 
         if (fun->isArrow()) {
@@ -7825,16 +7830,18 @@ BytecodeEmitter::emitFunction(ParseNode*
         }
 
         if (pn->getOp() == JSOP_DEFFUN) {
             if (!emitIndex32(JSOP_LAMBDA, index))
                 return false;
             return emit1(JSOP_DEFFUN);
         }
 
+        // This is a FunctionExpression, ArrowFunctionExpression, or class
+        // constructor. Emit the single instruction (without location info).
         return emitIndex32(pn->getOp(), index);
     }
 
     MOZ_ASSERT(!needsProto);
 
     bool topLevelFunction;
     if (sc->isFunctionBox() || (sc->isEvalContext() && sc->strict())) {
         // No nested functions inside other functions are top-level.
@@ -7867,18 +7874,16 @@ BytecodeEmitter::emitFunction(ParseNode*
                     return false;
                 }
             } else {
                 if (!emitIndex32(JSOP_LAMBDA, index))
                     return false;
             }
             if (!emit1(JSOP_DEFFUN))
                 return false;
-            if (!updateSourceCoordNotes(pn->pn_pos.begin))
-                return false;
             switchToMain();
         }
     } else {
         // For functions nested within functions and blocks, make a lambda and
         // initialize the binding name of the function in the current scope.
 
         bool isAsync = funbox->isAsync();
         bool isGenerator = funbox->isGenerator();
@@ -11172,17 +11177,17 @@ BytecodeEmitter::finishTakingSrcNotes(ui
                 delta = Min(offset, SN_XDELTA_MASK);
                 sn = main.notes.begin();
             }
         }
     }
 
     // The prologue count might have changed, so we can't reuse prologueCount.
     // The + 1 is to account for the final SN_MAKE_TERMINATOR that is appended
-    // when the notes are copied to their final destination by CopySrcNotes.
+    // when the notes are copied to their final destination by copySrcNotes.
     *out = prologue.notes.length() + main.notes.length() + 1;
     return true;
 }
 
 void
 BytecodeEmitter::copySrcNotes(jssrcnote* destination, uint32_t nsrcnotes)
 {
     unsigned prologueCount = prologue.notes.length();
--- a/js/src/frontend/BytecodeEmitter.h
+++ b/js/src/frontend/BytecodeEmitter.h
@@ -160,18 +160,25 @@ struct JumpList {
     // Add a jump instruction to the list.
     void push(jsbytecode* code, ptrdiff_t jumpOffset);
 
     // Patch all jump instructions in this list to jump to `target`.  This
     // clobbers the list.
     void patchAll(jsbytecode* code, JumpTarget target);
 };
 
+// Used to control whether JSOP_CALL_IGNORES_RV is emitted for function calls.
 enum class ValueUsage {
+    // Assume the value of the current expression may be used. This is always
+    // correct but prohibits JSOP_CALL_IGNORES_RV.
     WantValue,
+
+    // Pass this when emitting an expression if the expression's value is
+    // definitely unused by later instructions. You must make sure the next
+    // instruction is JSOP_POP, a jump to a JSOP_POP, or something similar.
     IgnoreValue
 };
 
 struct MOZ_STACK_CLASS BytecodeEmitter
 {
     class TDZCheckCache;
     class NestableControl;
     class EmitterScope;
--- a/js/src/jit-test/tests/atomics/basic-tests.js
+++ b/js/src/jit-test/tests/atomics/basic-tests.js
@@ -481,17 +481,17 @@ function runTests() {
     var t2 = new Uint16Array(sab);
 
     assertEq(t1[0], 0);
     assertEq(t2[0], 0);
     t1[0] = 37;
     if (is_little)
 	assertEq(t2[0], 37);
     else
-	assertEq(t2[0], 37 << 16);
+	assertEq(t2[0], 37 << 8);
     t1[0] = 0;
 
     // Test that invoking as Atomics.whatever() works, on correct arguments.
     CLONE(testMethod)(new Int8Array(sab), 0, 42, 4095);
     CLONE(testMethod)(new Uint8Array(sab), 0, 42, 4095);
     CLONE(testMethod)(new Int16Array(sab), 0, 42, 2047);
     CLONE(testMethod)(new Uint16Array(sab), 0, 42, 2047);
     CLONE(testMethod)(new Int32Array(sab), 0, 42, 1023);
--- a/js/src/jit-test/tests/debug/Frame-onStep-12.js
+++ b/js/src/jit-test/tests/debug/Frame-onStep-12.js
@@ -1,32 +1,34 @@
+// Check that stepping doesn't make it look like unreachable code is running.
+
 // Because our script source notes record only those bytecode offsets
 // at which source positions change, the default behavior in the
 // absence of a source note is to attribute a bytecode instruction to
 // the same source location as the preceding instruction. When control
 // flows from the preceding bytecode to the one we're emitting, that's
 // usually plausible. But successors in the bytecode stream are not
 // necessarily successors in the control flow graph. If the preceding
 // bytecode was a back edge of a loop, or the jump at the end of a
 // 'then' clause, its source position can be completely unrelated to
 // that of its successor.
-
+//
 // We try to avoid showing such nonsense offsets to the user by
 // requiring breakpoints and single-stepping to stop only at a line's
 // entry points, as reported by Debugger.Script.prototype.getLineOffsets;
 // and then ensuring that those entry points are all offsets mentioned
 // explicitly in the source notes, and hence deliberately attributed
 // to the given bytecode.
-
+//
 // This bit of JavaScript compiles to bytecode ending in a branch
 // instruction whose source position is the body of an unreachable
 // loop. The first instruction of the bytecode we emit following it
 // will inherit this nonsense position, if we have not explicitly
 // emitted a source note for said instruction.
-
+//
 // This test steps across such code and verifies that control never
 // appears to enter the unreachable loop.
 
 var bitOfCode = `debugger;                    // +0
                  if(false) {                  // +1
                    for(var b=0; b<0; b++) {   // +2
                       c = 2                   // +3
                     }                         // +4
--- a/js/src/jit-test/tests/debug/Frame-onStep-17.js
+++ b/js/src/jit-test/tests/debug/Frame-onStep-17.js
@@ -1,8 +1,10 @@
+// Test how stepping interacts with for-in/of statements.
+
 var g = newGlobal();
 var dbg = new Debugger;
 var gw = dbg.addDebuggee(g);
 var log;
 var previous;
 
 dbg.onDebuggerStatement = function (frame) {
   let debugLine = frame.script.getOffsetLocation(frame.offset).lineNumber;
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/debug/Frame-onStep-19.js
@@ -0,0 +1,63 @@
+// Stepping should ignore nested function declarations.
+
+// Nested functions are hoisted to the top of the function body,
+// so technically the first thing that happens when you call the outer function
+// is that each inner function is created and bound to a local variable.
+// But users don't actually want to see that happen when they're stepping.
+// It's super confusing.
+
+function runTest(script, expected) {
+    let g = newGlobal();
+    g.eval(script);
+
+    let dbg = new Debugger(g);
+    let log = [];
+    dbg.onEnterFrame = frame => {
+        let previousLine = undefined;
+        frame.onStep = function() {
+            let line = this.script.getOffsetLocation(this.offset).lineNumber;
+            if (line != previousLine) {
+                log.push(line);
+                previousLine = line;
+            }
+        };
+
+        // Now disable this hook so that we step over function calls, not into them.
+        dbg.onEnterFrame = undefined;
+    };
+
+    g.f();
+
+    assertEq(log.join(","), expected.join(","));
+}
+
+runTest(
+    `\
+      var f = (function() {      // line 1
+        let x = 1;               // line 2
+        funcb("funcb");          // line 3
+        function funcb(msg) {    // line 4
+          console.log(msg)
+        }
+      });                        // line 7
+    `,
+    [2, 3, 7]);
+
+// Stopping at the ClassDeclaration on line 8 is fine. For that matter,
+// stopping on line 5 wouldn't be so bad if we did it after line 3 and before
+// line 8; alas, the actual order of execution is 5, 2, 3, 8... which is too
+// confusing.
+runTest(
+    `\
+      function f() {    //  1
+        var x = 0;      //  2
+        a();            //  3
+
+        function a() {  //  5
+          x += 1;       //  6
+        }               //  7
+        class Car {}    //  8
+        return x;       //  9
+      }                 // 10
+    `,
+    [2, 3, 8, 9, 10]);
--- a/js/src/jit/AtomicOperations.h
+++ b/js/src/jit/AtomicOperations.h
@@ -336,17 +336,23 @@ AtomicOperations::isLockfreeJS(int32_t s
 // x86 the primitives here will be for x86, not for ARM, while the JIT emits ARM
 // code.  Our ARM simulator solves that the easy way: by using these primitives
 // to implement its atomic operations.  For other simulators there may need to
 // be special cases below to provide simulator-compatible primitives, for
 // example, for our ARM64 simulator the primitives could in principle
 // participate in the memory exclusivity monitors implemented by the simulator.
 // Such a solution is likely to be difficult.
 
-#if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)
+#if defined(JS_SIMULATOR_MIPS32)
+# if defined(__clang__) || defined(__GNUC__)
+#  include "jit/mips-shared/AtomicOperations-mips-shared.h"
+# else
+#  error "No AtomicOperations support for this platform+compiler combination"
+# endif
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)
 # if defined(__clang__) || defined(__GNUC__)
 #  include "jit/x86-shared/AtomicOperations-x86-shared-gcc.h"
 # elif defined(_MSC_VER)
 #  include "jit/x86-shared/AtomicOperations-x86-shared-msvc.h"
 # else
 #  error "No AtomicOperations support for this platform+compiler combination"
 # endif
 #elif defined(__arm__)
--- a/js/src/jit/CacheIR.cpp
+++ b/js/src/jit/CacheIR.cpp
@@ -281,17 +281,17 @@ GetPropIRGenerator::tryAttachIdempotentS
     RootedId id(cx_, NameToId(idVal_.toString()->asAtom().asPropertyName()));
 
     ValOperandId valId(writer.setInputOperandId(0));
     ObjOperandId objId = writer.guardIsObject(valId);
     if (tryAttachNative(obj, objId, id))
         return true;
 
     // Object lengths are supported only if int32 results are allowed.
-    if ((resultFlags_ & GetPropertyResultFlags::AllowInt32) && tryAttachObjectLength(obj, objId, id))
+    if (tryAttachObjectLength(obj, objId, id))
         return true;
 
     // Also support native data properties on DOMProxy prototypes.
     if (GetProxyStubType(cx_, obj, id) == ProxyStubType::DOMUnshadowed)
         return tryAttachDOMProxyUnshadowed(obj, objId, id);
 
     return false;
 }
@@ -1428,16 +1428,19 @@ GetPropIRGenerator::tryAttachTypedObject
 }
 
 bool
 GetPropIRGenerator::tryAttachObjectLength(HandleObject obj, ObjOperandId objId, HandleId id)
 {
     if (!JSID_IS_ATOM(id, cx_->names().length))
         return false;
 
+    if (!(resultFlags_ & GetPropertyResultFlags::AllowInt32))
+        return false;
+
     if (obj->is<ArrayObject>()) {
         // Make sure int32 is added to the TypeSet before we attach a stub, so
         // the stub can return int32 values without monitoring the result.
         if (obj->as<ArrayObject>().length() > INT32_MAX)
             return false;
 
         maybeEmitIdGuard(id);
         writer.guardClass(objId, GuardClassKind::Array);
@@ -1692,16 +1695,19 @@ GetPropIRGenerator::tryAttachMagicArgume
 
 bool
 GetPropIRGenerator::tryAttachArgumentsObjectArg(HandleObject obj, ObjOperandId objId,
                                                 uint32_t index, Int32OperandId indexId)
 {
     if (!obj->is<ArgumentsObject>() || obj->as<ArgumentsObject>().hasOverriddenElement())
         return false;
 
+    if (!(resultFlags_ & GetPropertyResultFlags::Monitored))
+        return false;
+
     if (obj->is<MappedArgumentsObject>()) {
         writer.guardClass(objId, GuardClassKind::MappedArguments);
     } else {
         MOZ_ASSERT(obj->is<UnmappedArgumentsObject>());
         writer.guardClass(objId, GuardClassKind::UnmappedArguments);
     }
 
     writer.loadArgumentsObjectArgResult(objId, indexId);
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -3258,121 +3258,16 @@ MacroAssembler::emitPreBarrierFastPath(J
 #else
 # error "Unknown architecture"
 #endif
 
     // No barrier is needed if the bit is set, |word & mask != 0|.
     branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
 }
 
-// ========================================================================
-// JS atomic operations.
-
-template<typename T>
-static void
-CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
-                  const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
-{
-    if (arrayType == Scalar::Uint32) {
-        masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
-        masm.convertUInt32ToDouble(temp, output.fpu());
-    } else {
-        masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
-    }
-}
-
-void
-MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
-                                  const Address& mem, Register oldval, Register newval,
-                                  Register temp, AnyRegister output)
-{
-    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
-}
-
-void
-MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
-                                  const BaseIndex& mem, Register oldval, Register newval,
-                                  Register temp, AnyRegister output)
-{
-    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
-}
-
-template<typename T>
-static void
-AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
-                 const T& mem, Register value, Register temp, AnyRegister output)
-{
-    if (arrayType == Scalar::Uint32) {
-        masm.atomicExchange(arrayType, sync, mem, value, temp);
-        masm.convertUInt32ToDouble(temp, output.fpu());
-    } else {
-        masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
-    }
-}
-
-void
-MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
-                                 const Address& mem, Register value, Register temp,
-                                 AnyRegister output)
-{
-    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
-}
-
-void
-MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
-                                 const BaseIndex& mem, Register value, Register temp,
-                                 AnyRegister output)
-{
-    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
-}
-
-template<typename T>
-static void
-AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
-                AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
-                AnyRegister output)
-{
-    if (arrayType == Scalar::Uint32) {
-        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
-        masm.convertUInt32ToDouble(temp1, output.fpu());
-    } else {
-        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
-    }
-}
-
-void
-MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                                Register value, const Address& mem, Register temp1, Register temp2,
-                                AnyRegister output)
-{
-    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
-}
-
-void
-MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                                Register value, const BaseIndex& mem, Register temp1, Register temp2,
-                                AnyRegister output)
-{
-    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
-}
-
-void
-MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                                 Register value, const BaseIndex& mem, Register temp)
-{
-    atomicEffectOp(arrayType, sync, op, value, mem, temp);
-}
-
-void
-MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                                 Register value, const Address& mem, Register temp)
-{
-    atomicEffectOp(arrayType, sync, op, value, mem, temp);
-}
-
 //}}} check_macroassembler_style
 
 void
 MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
     memoryBarrier(sync.barrierBefore);
 }
 
 void
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1589,51 +1589,79 @@ class MacroAssembler : public MacroAssem
     // see further below.
     //
     // Temp registers must be defined unless otherwise noted in the per-function
     // constraints.
 
     // 8-bit, 16-bit, and 32-bit wide operations.
     //
     // The 8-bit and 16-bit operations zero-extend or sign-extend the result to
-    // 32 bits, according to `type`.  On 64-bit systems, the upper 32 bits of
-    // the result will be zero.
+    // 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
+    // result will be zero on some platforms (eg, on x64) and will be the sign
+    // extension of the lower bits on other platforms (eg, MIPS).
 
     // CompareExchange with memory.  Return the value that was in memory,
     // whether we wrote or not.
     //
     // x86-shared: `output` must be eax.
+    // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+    // and 16-bit wide operations.
 
     void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
                          Register expected, Register replacement, Register output)
         DEFINED_ON(arm, arm64, x86_shared);
 
     void compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
                          Register expected, Register replacement, Register output)
         DEFINED_ON(arm, arm64, x86_shared);
 
+
+    void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
+                         Register expected, Register replacement, Register valueTemp,
+                         Register offsetTemp, Register maskTemp, Register output)
+        DEFINED_ON(mips_shared);
+
+    void compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
+                         Register expected, Register replacement, Register valueTemp,
+                         Register offsetTemp, Register maskTemp, Register output)
+        DEFINED_ON(mips_shared);
+
     // Exchange with memory.  Return the value initially in memory.
+    // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+    // and 16-bit wide operations.
 
     void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
                         Register value, Register output)
         DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
                         Register value, Register output)
         DEFINED_ON(arm, arm64, x86_shared);
 
+    void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
+                        Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
+                        Register output)
+        DEFINED_ON(mips_shared);
+
+    void atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
+                        Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
+                        Register output)
+        DEFINED_ON(mips_shared);
+
     // Read-modify-write with memory.  Return the value in memory before the
     // operation.
     //
     // x86-shared:
     //   For 8-bit operations, `value` and `output` must have a byte subregister.
     //   For Add and Sub, `temp` must be invalid.
     //   For And, Or, and Xor, `output` must be eax and `temp` must have a byte subregister.
     //
     // ARM: Registers `value` and `output` must differ.
+    // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+    // and 16-bit wide operations; `value` and `output` must differ.
 
     void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
                        Register value, const Address& mem, Register temp, Register output)
         DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
                        Imm32 value, const Address& mem, Register temp, Register output)
         DEFINED_ON(x86_shared);
@@ -1641,17 +1669,29 @@ class MacroAssembler : public MacroAssem
     void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
                        Register value, const BaseIndex& mem, Register temp, Register output)
         DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
                        Imm32 value, const BaseIndex& mem, Register temp, Register output)
         DEFINED_ON(x86_shared);
 
+    void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
+                       Register value, const Address& mem, Register valueTemp,
+                       Register offsetTemp, Register maskTemp, Register output)
+        DEFINED_ON(mips_shared);
+
+    void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
+                       Register value, const BaseIndex& mem, Register valueTemp,
+                       Register offsetTemp, Register maskTemp, Register output)
+        DEFINED_ON(mips_shared);
+
     // Read-modify-write with memory.  Return no value.
+    // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
+    // and 16-bit wide operations.
 
     void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
                         const Address& mem, Register temp)
         DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Imm32 value,
                         const Address& mem, Register temp)
         DEFINED_ON(x86_shared);
@@ -1659,67 +1699,77 @@ class MacroAssembler : public MacroAssem
     void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
                         const BaseIndex& mem, Register temp)
         DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Imm32 value,
                         const BaseIndex& mem, Register temp)
         DEFINED_ON(x86_shared);
 
+
+    void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
+                    const Address& mem, Register valueTemp, Register offsetTemp, Register maskTemp)
+        DEFINED_ON(mips_shared);
+
+    void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
+                    const BaseIndex& mem, Register valueTemp, Register offsetTemp, Register maskTemp)
+        DEFINED_ON(mips_shared);
+
     // 64-bit wide operations.
 
     // 64-bit atomic load.  On 64-bit systems, use regular wasm load with
     // Synchronization::Load, not this method.
     //
     // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
     // ARM: `temp` should be invalid; `output` must be (even,odd) pair.
+    // MIPS32: `temp` should be invalid.
 
     void atomicLoad64(const Synchronization& sync, const Address& mem, Register64 temp,
                       Register64 output)
-        DEFINED_ON(arm, x86);
+        DEFINED_ON(arm, mips32, x86);
 
     void atomicLoad64(const Synchronization& sync, const BaseIndex& mem, Register64 temp,
                       Register64 output)
-        DEFINED_ON(arm, x86);
+        DEFINED_ON(arm, mips32, x86);
 
     // x86: `expected` must be the same as `output`, and must be edx:eax
     // x86: `replacement` must be ecx:ebx
     // x64: `output` must be rax.
     // ARM: Registers must be distinct; `replacement` and `output` must be (even,odd) pairs.
+    // MIPS: Registers must be distinct.
 
     void compareExchange64(const Synchronization& sync, const Address& mem, Register64 expected,
-                           Register64 replacement, Register64 output)
-        DEFINED_ON(arm, arm64, x64, x86);
+                           Register64 replacement, Register64 output) PER_ARCH;
 
     void compareExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 expected,
-                           Register64 replacement, Register64 output)
-        DEFINED_ON(arm, arm64, x64, x86);
+                           Register64 replacement, Register64 output) PER_ARCH;
 
     // x86: `value` must be ecx:ebx; `output` must be edx:eax.
     // ARM: Registers must be distinct; `value` and `output` must be (even,odd) pairs.
+    // MIPS: Registers must be distinct.
 
     void atomicExchange64(const Synchronization& sync, const Address& mem, Register64 value,
-                          Register64 output)
-        DEFINED_ON(arm, arm64, x64, x86);
+                          Register64 output) PER_ARCH;
 
     void atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 value,
-                          Register64 output)
-        DEFINED_ON(arm, arm64, x64, x86);
+                          Register64 output) PER_ARCH;
 
     // x86: `output` must be edx:eax, `temp` must be ecx:ebx.
     // x64: For And, Or, and Xor `output` must be rax.
     // ARM: Registers must be distinct; `temp` and `output` must be (even,odd) pairs.
+    // MIPS: Registers must be distinct.
+    // MIPS32: `temp` should be invalid.
 
     void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
                          const Address& mem, Register64 temp, Register64 output)
-        DEFINED_ON(arm, arm64, x64);
+        DEFINED_ON(arm, arm64, mips32, mips64, x64);
 
     void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
                          const BaseIndex& mem, Register64 temp, Register64 output)
-        DEFINED_ON(arm, arm64, x64);
+        DEFINED_ON(arm, arm64, mips32, mips64, x64);
 
     void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
                          const Address& mem, Register64 temp, Register64 output)
         DEFINED_ON(x86);
 
     void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
                          const BaseIndex& mem, Register64 temp, Register64 output)
         DEFINED_ON(x86);
@@ -1743,60 +1793,113 @@ class MacroAssembler : public MacroAssem
     // Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
     // (`temp1` must always be valid.)
     //
     // For additional register constraints, see the primitive 32-bit operations
     // above.
 
     void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
                            Register expected, Register replacement, Register temp,
-                           AnyRegister output);
+                           AnyRegister output)
+        DEFINED_ON(arm, arm64, x86_shared);
 
     void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
                            const BaseIndex& mem, Register expected, Register replacement,
-                           Register temp, AnyRegister output);
+                           Register temp, AnyRegister output)
+        DEFINED_ON(arm, arm64, x86_shared);
+
+    void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
+                           Register expected, Register replacement, Register valueTemp,
+                           Register offsetTemp, Register maskTemp, Register temp,
+                           AnyRegister output)
+        DEFINED_ON(mips_shared);
+
+    void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
+                           Register expected, Register replacement, Register valueTemp,
+                           Register offsetTemp, Register maskTemp, Register temp,
+                           AnyRegister output)
+        DEFINED_ON(mips_shared);
 
     void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
-                          Register value, Register temp, AnyRegister output);
+                          Register value, Register temp, AnyRegister output)
+        DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
-                          Register value, Register temp, AnyRegister output);
+                          Register value, Register temp,  AnyRegister output)
+        DEFINED_ON(arm, arm64, x86_shared);
+
+    void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
+                          Register value, Register valueTemp, Register offsetTemp,
+                          Register maskTemp, Register temp, AnyRegister output)
+        DEFINED_ON(mips_shared);
+
+    void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
+                          Register value, Register valueTemp, Register offsetTemp,
+                          Register maskTemp, Register temp, AnyRegister output)
+        DEFINED_ON(mips_shared);
+
 
     void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                          Register value, const Address& mem, Register temp1, Register temp2,
-                         AnyRegister output);
+                         AnyRegister output)
+        DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                          Register value, const BaseIndex& mem, Register temp1, Register temp2,
-                         AnyRegister output);
+                         AnyRegister output)
+        DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                          Imm32 value, const Address& mem, Register temp1, Register temp2,
                          AnyRegister output)
         DEFINED_ON(x86_shared);
 
     void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                          Imm32 value, const BaseIndex& mem, Register temp1, Register temp2,
                          AnyRegister output)
         DEFINED_ON(x86_shared);
 
-    void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                          Register value, const Address& mem, Register temp);
+    void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                         Register value, const Address& mem, Register valueTemp,
+                         Register offsetTemp, Register maskTemp, Register temp,
+                         AnyRegister output)
+        DEFINED_ON(mips_shared);
+
+    void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                         Register value, const BaseIndex& mem, Register valueTemp,
+                         Register offsetTemp, Register maskTemp, Register temp,
+                         AnyRegister output)
+        DEFINED_ON(mips_shared);
 
     void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                          Register value, const BaseIndex& mem, Register temp);
+                          Register value, const Address& mem, Register temp)
+        DEFINED_ON(arm, arm64, x86_shared);
+
+    void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                          Register value, const BaseIndex& mem, Register temp)
+        DEFINED_ON(arm, arm64, x86_shared);
 
     void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                           Imm32 value, const Address& mem, Register temp)
         DEFINED_ON(x86_shared);
 
     void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                           Imm32 value, const BaseIndex& mem, Register temp)
         DEFINED_ON(x86_shared);
 
+    void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                          Register value, const Address& mem, Register valueTemp,
+                          Register offsetTemp, Register maskTemp)
+        DEFINED_ON(mips_shared);
+
+    void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                          Register value, const BaseIndex& mem, Register valueTemp,
+                          Register offsetTemp, Register maskTemp)
+        DEFINED_ON(mips_shared);
+
     //}}} check_macroassembler_decl_style
   public:
 
     // Emits a test of a value against all types in a TypeSet. A scratch
     // register is required.
     template <typename Source>
     void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind, Register scratch, Label* miss);
 
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -5625,16 +5625,121 @@ MacroAssembler::atomicFetchOp64(const Sy
 void
 MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
                                 const BaseIndex& mem, Register64 temp, Register64 output)
 {
     AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
 }
 
 // ========================================================================
+// JS atomic operations.
+
+template<typename T>
+static void
+CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                  const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
+    }
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const Address& mem, Register oldval, Register newval,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const BaseIndex& mem, Register oldval, Register newval,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+template<typename T>
+static void
+AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                 const T& mem, Register value, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicExchange(arrayType, sync, mem, value, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const Address& mem, Register value, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const BaseIndex& mem, Register value, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+template<typename T>
+static void
+AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
+                AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+        masm.convertUInt32ToDouble(temp1, output.fpu());
+    } else {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const Address& mem, Register temp1, Register temp2,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const BaseIndex& mem, Register temp1, Register temp2,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                           Register value, const BaseIndex& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                           Register value, const Address& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+// ========================================================================
 // Convert floating point.
 
 bool
 MacroAssembler::convertUInt64ToDoubleNeedsTemp()
 {
     return false;
 }
 
--- a/js/src/jit/arm64/MacroAssembler-arm64.cpp
+++ b/js/src/jit/arm64/MacroAssembler-arm64.cpp
@@ -1086,12 +1086,117 @@ MacroAssembler::atomicFetchOp64(const Sy
 
 void
 MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value, const BaseIndex& mem,
                                 Register64 temp, Register64 output)
 {
     MOZ_CRASH("NYI");
 }
 
+// ========================================================================
+// JS atomic operations.
+
+template<typename T>
+static void
+CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                  const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
+    }
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const Address& mem, Register oldval, Register newval,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const BaseIndex& mem, Register oldval, Register newval,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+template<typename T>
+static void
+AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                 const T& mem, Register value, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicExchange(arrayType, sync, mem, value, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const Address& mem, Register value, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const BaseIndex& mem, Register value, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+template<typename T>
+static void
+AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
+                AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+        masm.convertUInt32ToDouble(temp1, output.fpu());
+    } else {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const Address& mem, Register temp1, Register temp2,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const BaseIndex& mem, Register temp1, Register temp2,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                           Register value, const BaseIndex& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                           Register value, const Address& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
 //}}} check_macroassembler_style
 
 } // namespace jit
 } // namespace js
--- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -819,16 +819,23 @@ AssemblerMIPSShared::as_lwr(Register rd,
 BufferOffset
 AssemblerMIPSShared::as_ll(Register rd, Register rs, int16_t off)
 {
     spew("ll     %3s, (0x%x)%2s", rd.name(), off, rs.name());
     return writeInst(InstImm(op_ll, rs, rd, Imm16(off)).encode());
 }
 
 BufferOffset
+AssemblerMIPSShared::as_lld(Register rd, Register rs, int16_t off)
+{
+    spew("lld     %3s, (0x%x)%2s", rd.name(), off, rs.name());
+    return writeInst(InstImm(op_lld, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
 AssemblerMIPSShared::as_ld(Register rd, Register rs, int16_t off)
 {
     spew("ld     %3s, (0x%x)%2s", rd.name(), off, rs.name());
     return writeInst(InstImm(op_ld, rs, rd, Imm16(off)).encode());
 }
 
 BufferOffset
 AssemblerMIPSShared::as_ldl(Register rd, Register rs, int16_t off)
@@ -882,16 +889,24 @@ AssemblerMIPSShared::as_swr(Register rd,
 BufferOffset
 AssemblerMIPSShared::as_sc(Register rd, Register rs, int16_t off)
 {
     spew("sc     %3s, (0x%x)%2s", rd.name(), off, rs.name());
     return writeInst(InstImm(op_sc, rs, rd, Imm16(off)).encode());
 }
 
 BufferOffset
+AssemblerMIPSShared::as_scd(Register rd, Register rs, int16_t off)
+{
+    spew("scd     %3s, (0x%x)%2s", rd.name(), off, rs.name());
+    return writeInst(InstImm(op_scd, rs, rd, Imm16(off)).encode());
+}
+
+
+BufferOffset
 AssemblerMIPSShared::as_sd(Register rd, Register rs, int16_t off)
 {
     spew("sd     %3s, (0x%x)%2s", rd.name(), off, rs.name());
     return writeInst(InstImm(op_sd, rs, rd, Imm16(off)).encode());
 }
 
 BufferOffset
 AssemblerMIPSShared::as_sdl(Register rd, Register rs, int16_t off)
@@ -1921,18 +1936,16 @@ AssemblerMIPSShared::as_break(uint32_t c
     spew("break %d", code);
     writeInst(op_special | code << FunctionBits | ff_break);
 }
 
 void
 AssemblerMIPSShared::as_sync(uint32_t stype)
 {
     MOZ_ASSERT(stype <= 31);
-    if (isLoongson())
-        stype = 0;
     spew("sync %d", stype);
     writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
 }
 
 // This just stomps over memory with 32 bits of raw data. Its purpose is to
 // overwrite the call of JITed code with 32 bits worth of an offset. This will
 // is only meant to function on code that has been invalidated, so it should
 // be totally safe. Since that instruction will never be executed again, a
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -286,23 +286,25 @@ enum Opcode {
     op_sw       = 43 << OpcodeShift,
     op_sdl      = 44 << OpcodeShift,
     op_sdr      = 45 << OpcodeShift,
     op_swr      = 46 << OpcodeShift,
 
     op_ll       = 48 << OpcodeShift,
     op_lwc1     = 49 << OpcodeShift,
     op_lwc2     = 50 << OpcodeShift,
+    op_lld      = 52 << OpcodeShift,
     op_ldc1     = 53 << OpcodeShift,
     op_ldc2     = 54 << OpcodeShift,
     op_ld       = 55 << OpcodeShift,
 
     op_sc       = 56 << OpcodeShift,
     op_swc1     = 57 << OpcodeShift,
     op_swc2     = 58 << OpcodeShift,
+    op_scd      = 60 << OpcodeShift,
     op_sdc1     = 61 << OpcodeShift,
     op_sdc2     = 62 << OpcodeShift,
     op_sd       = 63 << OpcodeShift,
 };
 
 enum RSField {
     rs_zero  = 0 << RSShift,
     // cop1 encoding of RS field.
@@ -1059,25 +1061,27 @@ class AssemblerMIPSShared : public Assem
     BufferOffset as_lbu(Register rd, Register rs, int16_t off);
     BufferOffset as_lh(Register rd, Register rs, int16_t off);
     BufferOffset as_lhu(Register rd, Register rs, int16_t off);
     BufferOffset as_lw(Register rd, Register rs, int16_t off);
     BufferOffset as_lwu(Register rd, Register rs, int16_t off);
     BufferOffset as_lwl(Register rd, Register rs, int16_t off);
     BufferOffset as_lwr(Register rd, Register rs, int16_t off);
     BufferOffset as_ll(Register rd, Register rs, int16_t off);
+    BufferOffset as_lld(Register rd, Register rs, int16_t off);
     BufferOffset as_ld(Register rd, Register rs, int16_t off);
     BufferOffset as_ldl(Register rd, Register rs, int16_t off);
     BufferOffset as_ldr(Register rd, Register rs, int16_t off);
     BufferOffset as_sb(Register rd, Register rs, int16_t off);
     BufferOffset as_sh(Register rd, Register rs, int16_t off);
     BufferOffset as_sw(Register rd, Register rs, int16_t off);
     BufferOffset as_swl(Register rd, Register rs, int16_t off);
     BufferOffset as_swr(Register rd, Register rs, int16_t off);
     BufferOffset as_sc(Register rd, Register rs, int16_t off);
+    BufferOffset as_scd(Register rd, Register rs, int16_t off);
     BufferOffset as_sd(Register rd, Register rs, int16_t off);
     BufferOffset as_sdl(Register rd, Register rs, int16_t off);
     BufferOffset as_sdr(Register rd, Register rs, int16_t off);
 
     // Loongson-specific load and store instructions
     BufferOffset as_gslbx(Register rd, Register rs, Register ri, int16_t off);
     BufferOffset as_gssbx(Register rd, Register rs, Register ri, int16_t off);
     BufferOffset as_gslhx(Register rd, Register rs, Register ri, int16_t off);
--- a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -1,146 +1,416 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 /* For documentation, see jit/AtomicOperations.h */
 
-// NOTE, this file is *not* used with the MIPS simulator, only when compiling
-// for actual MIPS hardware.  The simulators get the files that are appropriate
-// for the hardware the simulator is running on.  See the comments before the
-// #include nest at the bottom of jit/AtomicOperations.h for more information.
-
 // NOTE, MIPS32 unlike MIPS64 doesn't provide hardware support for lock-free
 // 64-bit atomics. We lie down below about 8-byte atomics being always lock-
-// free in order to support wasm jit. It is necessary to link with -latomic to
-// get the 64-bit atomic intrinsics on MIPS32.
+// free in order to support wasm jit. The 64-bit atomic for MIPS32 do not use
+// __atomic intrinsic and therefore do not relay on -latomic.
+// Access to a aspecific 64-bit variable in memory is protected by an AddressLock
+// whose instance is shared between jit and AtomicOperations.
 
 #ifndef jit_mips_shared_AtomicOperations_mips_shared_h
 #define jit_mips_shared_AtomicOperations_mips_shared_h
 
 #include "mozilla/Assertions.h"
 #include "mozilla/Types.h"
 
+#include "builtin/AtomicsObject.h"
 #include "vm/ArrayBufferObject.h"
 
 #if !defined(__clang__) && !defined(__GNUC__)
 # error "This file only for gcc-compatible compilers"
 #endif
 
+#if defined(JS_SIMULATOR_MIPS32) && !defined(__i386__)
+# error "The MIPS32 simulator atomics assume x86"
+#endif
+
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+struct AddressLock
+{
+  public:
+    void acquire();
+    void release();
+  private:
+    uint32_t spinlock;
+};
+
+static_assert(sizeof(AddressLock) == sizeof(uint32_t),
+              "AddressLock must be 4 bytes for it to be consumed by jit");
+
+// For now use a single global AddressLock.
+static AddressLock gAtomic64Lock;
+
+struct MOZ_RAII AddressGuard
+{
+  explicit AddressGuard(void* addr)
+  {
+    gAtomic64Lock.acquire();
+  }
+
+  ~AddressGuard() {
+    gAtomic64Lock.release();
+  }
+};
+
+#endif
+
+} }
+
 inline bool
 js::jit::AtomicOperations::hasAtomic8()
 {
     return true;
 }
 
 inline bool
 js::jit::AtomicOperations::isLockfree8()
 {
     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
-# if _MIPS_SIM == _ABI64
+# if defined(JS_64BIT)
     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
 # endif
     return true;
 }
 
 inline void
 js::jit::AtomicOperations::fenceSeqCst()
 {
     __atomic_thread_fence(__ATOMIC_SEQ_CST);
 }
 
 template<typename T>
 inline T
 js::jit::AtomicOperations::loadSeqCst(T* addr)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     T v;
     __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
     return v;
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::loadSeqCst(int64_t* addr)
+{
+    AddressGuard guard(addr);
+    return *addr;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::loadSeqCst(uint64_t* addr)
+{
+    AddressGuard guard(addr);
+    return *addr;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline void
 js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline void
+js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    *addr = val;
+}
+
+template<>
+inline void
+js::jit::AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    *addr = val;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
     return oldval;
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval)
+{
+    AddressGuard guard(addr);
+    int64_t val = *addr;
+    if (val == oldval)
+        *addr = newval;
+    return val;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval)
+{
+    AddressGuard guard(addr);
+    uint64_t val = *addr;
+    if (val == oldval)
+        *addr = newval;
+    return val;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::fetchAddSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    int64_t old = *addr;
+    *addr = old + val;
+    return old;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::fetchAddSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    uint64_t old = *addr;
+    *addr = old + val;
+    return old;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::fetchSubSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    int64_t old = *addr;
+    *addr = old - val;
+    return old;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::fetchSubSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    uint64_t old = *addr;
+    *addr = old - val;
+    return old;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
 }
 
+
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::fetchAndSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    int64_t old = *addr;
+    *addr = old & val;
+    return old;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::fetchAndSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    uint64_t old = *addr;
+    *addr = old & val;
+    return old;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::fetchOrSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    int64_t old = *addr;
+    *addr = old | val;
+    return old;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::fetchOrSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    uint64_t old = *addr;
+    *addr = old | val;
+    return old;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
 
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::fetchXorSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    int64_t old = *addr;
+    *addr = old ^ val;
+    return old;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::fetchXorSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    uint64_t old = *addr;
+    *addr = old ^ val;
+    return old;
+}
+
+#endif
+
+} }
+
 template<typename T>
 inline T
 js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-    if (__atomic_always_lock_free(sizeof(T), 0)) {
-        T v;
-        __atomic_load(addr, &v, __ATOMIC_RELAXED);
-        return v;
-    } else {
-        return *addr;
-    }
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
+    T v;
+    __atomic_load(addr, &v, __ATOMIC_RELAXED);
+    return v;
 }
 
 namespace js { namespace jit {
 
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::loadSafeWhenRacy(int64_t* addr)
+{
+    return *addr;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::loadSafeWhenRacy(uint64_t* addr)
+{
+    return *addr;
+}
+
+#endif
+
 template<>
 inline uint8_clamped
 js::jit::AtomicOperations::loadSafeWhenRacy(uint8_clamped* addr)
 {
     uint8_t v;
     __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
     return uint8_clamped(v);
 }
@@ -160,26 +430,40 @@ js::jit::AtomicOperations::loadSafeWhenR
 }
 
 } }
 
 template<typename T>
 inline void
 js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
-    if (__atomic_always_lock_free(sizeof(T), 0)) {
-        __atomic_store(addr, &val, __ATOMIC_RELAXED);
-    } else {
-        *addr = val;
-    }
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
+    __atomic_store(addr, &val, __ATOMIC_RELAXED);
 }
 
 namespace js { namespace jit {
 
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(int64_t* addr, int64_t val)
+{
+    *addr = val;
+}
+
+template<>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(uint64_t* addr, uint64_t val)
+{
+    *addr = val;
+}
+
+#endif
+
 template<>
 inline void
 js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr, uint8_clamped val)
 {
     __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
 }
 
 template<>
@@ -211,15 +495,66 @@ js::jit::AtomicOperations::memmoveSafeWh
 {
     ::memmove(dest, src, nbytes);
 }
 
 template<typename T>
 inline T
 js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
 {
-    static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+    static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only");
     T v;
     __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
     return v;
 }
 
+namespace js { namespace jit {
+
+#if defined(JS_CODEGEN_MIPS32)
+
+template<>
+inline int64_t
+js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val)
+{
+    AddressGuard guard(addr);
+    int64_t old = *addr;
+    *addr = val;
+    return old;
+}
+
+template<>
+inline uint64_t
+js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val)
+{
+    AddressGuard guard(addr);
+    uint64_t old = *addr;
+    *addr = val;
+    return old;
+}
+
+#endif
+
+} }
+
+#if defined(JS_CODEGEN_MIPS32)
+
+inline void
+js::jit::AddressLock::acquire()
+{
+    uint32_t zero = 0;
+    uint32_t one = 1;
+    while (!__atomic_compare_exchange(&spinlock, &zero, &one, true, __ATOMIC_SEQ_CST,
+          __ATOMIC_SEQ_CST))
+    {
+        zero = 0;
+    }
+}
+
+inline void
+js::jit::AddressLock::release()
+{
+    uint32_t zero = 0;
+    __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+}
+
+#endif
+
 #endif // jit_mips_shared_AtomicOperations_mips_shared_h
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -2210,106 +2210,83 @@ CodeGeneratorMIPSShared::visitAsmJSStore
     masm.bind(&outOfRange);
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmCompareExchangeHeap(LWasmCompareExchangeHeap* ins)
 {
     MWasmCompareExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->access().type();
-    const LAllocation* ptr = ins->ptr();
-    Register ptrReg = ToRegister(ptr);
-    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+    Register ptrReg = ToRegister(ins->ptr());
+    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
-    Register valueTemp = ToRegister(ins->valueTemp());
-    Register offsetTemp = ToRegister(ins->offsetTemp());
-    Register maskTemp = ToRegister(ins->maskTemp());
-
-    masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
-                                        srcAddr, oldval, newval, InvalidReg,
-                                        valueTemp, offsetTemp, maskTemp,
-                                        ToAnyRegister(ins->output()));
+    Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+    masm.compareExchange(vt, Synchronization::Full(), srcAddr, oldval, newval, valueTemp,
+                         offsetTemp, maskTemp, ToRegister(ins->output()));
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins)
 {
     MWasmAtomicExchangeHeap* mir = ins->mir();
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
-    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    Register valueTemp = ToRegister(ins->valueTemp());
-    Register offsetTemp = ToRegister(ins->offsetTemp());
-    Register maskTemp = ToRegister(ins->maskTemp());
-
-    masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
-                                       srcAddr, value, InvalidReg, valueTemp,
-                                       offsetTemp, maskTemp, ToAnyRegister(ins->output()));
+    Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+    masm.atomicExchange(vt, Synchronization::Full(), srcAddr, value, valueTemp, offsetTemp,
+                        maskTemp, ToRegister(ins->output()));
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins)
 {
     MOZ_ASSERT(ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MWasmAtomicBinopHeap* mir = ins->mir();
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
-    Register flagTemp = ToRegister(ins->flagTemp());
-    Register valueTemp = ToRegister(ins->valueTemp());
-    Register offsetTemp = ToRegister(ins->offsetTemp());
-    Register maskTemp = ToRegister(ins->maskTemp());
-    const LAllocation* value = ins->value();
-    AtomicOp op = mir->operation();
-
-    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
-
-    if (value->isConstant())
-        atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
-                                   Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
-                                   valueTemp, offsetTemp, maskTemp,
-                                   ToAnyRegister(ins->output()));
-    else
-        atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
-                                   ToRegister(value), srcAddr, flagTemp, InvalidReg,
-                                   valueTemp, offsetTemp, maskTemp,
-                                   ToAnyRegister(ins->output()));
+    Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+
+    masm.atomicFetchOp(vt, Synchronization::Full(), mir->operation(), ToRegister(ins->value()),
+                       srcAddr, valueTemp, offsetTemp, maskTemp, ToRegister(ins->output()));
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmAtomicBinopHeapForEffect(LWasmAtomicBinopHeapForEffect* ins)
 {
     MOZ_ASSERT(!ins->mir()->hasUses());
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     MWasmAtomicBinopHeap* mir = ins->mir();
     Scalar::Type vt = mir->access().type();
     Register ptrReg = ToRegister(ins->ptr());
-    Register flagTemp = ToRegister(ins->flagTemp());
-    Register valueTemp = ToRegister(ins->valueTemp());
-    Register offsetTemp = ToRegister(ins->offsetTemp());
-    Register maskTemp = ToRegister(ins->maskTemp());
-    const LAllocation* value = ins->value();
-    AtomicOp op = mir->operation();
-
-    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
-
-    if (value->isConstant())
-        atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
-                                   valueTemp, offsetTemp, maskTemp);
-    else
-        atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
-                                   valueTemp, offsetTemp, maskTemp);
+    Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+    BaseIndex srcAddr(HeapReg, ptrReg, TimesOne, mir->access().offset());
+    masm.atomicEffectOp(vt, Synchronization::Full(), mir->operation(), ToRegister(ins->value()),
+                        srcAddr, valueTemp, offsetTemp, maskTemp);
 }
 
 void
 CodeGeneratorMIPSShared::visitWasmStackArg(LWasmStackArg* ins)
 {
     const MWasmStackArg* mir = ins->mir();
     if (ins->arg()->isConstant()) {
         masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
@@ -2480,427 +2457,167 @@ void
 CodeGeneratorMIPSShared::visitNegF(LNegF* ins)
 {
     FloatRegister input = ToFloatRegister(ins->input());
     FloatRegister output = ToFloatRegister(ins->output());
 
     masm.as_negs(output, input);
 }
 
-template<typename S, typename T>
-void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const S& value, const T& mem, Register flagTemp,
-                                                    Register outTemp, Register valueTemp,
-                                                    Register offsetTemp, Register maskTemp,
-                                                    AnyRegister output)
-{
-    MOZ_ASSERT(flagTemp != InvalidReg);
-    MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
-
-    switch (arrayType) {
-      case Scalar::Int8:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicFetchSub8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicFetchOr8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicFetchXor8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Uint8:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Int16:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicFetchSub16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicFetchOr16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicFetchXor16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Uint16:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Int32:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Uint32:
-        // At the moment, the code in MCallOptimize.cpp requires the output
-        // type to be double for uint32 arrays.  See bug 1077305.
-        MOZ_ASSERT(output.isFloat());
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        masm.convertUInt32ToDouble(outTemp, output.fpu());
-        break;
-      default:
-        MOZ_CRASH("Invalid typed array type");
-    }
-}
-
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Imm32& value, const Address& mem,
-                                                    Register flagTemp, Register outTemp,
-                                                    Register valueTemp, Register offsetTemp,
-                                                    Register maskTemp, AnyRegister output);
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Imm32& value, const BaseIndex& mem,
-                                                    Register flagTemp, Register outTemp,
-                                                    Register valueTemp, Register offsetTemp,
-                                                    Register maskTemp, AnyRegister output);
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Register& value, const Address& mem,
-                                                    Register flagTemp, Register outTemp,
-                                                    Register valueTemp, Register offsetTemp,
-                                                    Register maskTemp, AnyRegister output);
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Register& value, const BaseIndex& mem,
-                                                    Register flagTemp, Register outTemp,
-                                                    Register valueTemp, Register offsetTemp,
-                                                    Register maskTemp, AnyRegister output);
-
-// Binary operation for effect, result discarded.
-template<typename S, typename T>
-void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
-                                                    const T& mem, Register flagTemp, Register valueTemp,
-                                                    Register offsetTemp, Register maskTemp)
-{
-    MOZ_ASSERT(flagTemp != InvalidReg);
-
-    switch (arrayType) {
-      case Scalar::Int8:
-      case Scalar::Uint8:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicAdd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicSub8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicAnd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicOr8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicXor8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Int16:
-      case Scalar::Uint16:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicAdd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicSub16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicAnd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicOr16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicXor16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      case Scalar::Int32:
-      case Scalar::Uint32:
-        switch (op) {
-          case AtomicFetchAddOp:
-            masm.atomicAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchSubOp:
-            masm.atomicSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchAndOp:
-            masm.atomicAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchOrOp:
-            masm.atomicOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          case AtomicFetchXorOp:
-            masm.atomicXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-            break;
-          default:
-            MOZ_CRASH("Invalid typed array atomic operation");
-        }
-        break;
-      default:
-        MOZ_CRASH("Invalid typed array type");
-    }
-}
-
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Imm32& value, const Address& mem,
-                                                    Register flagTemp, Register valueTemp,
-                                                    Register offsetTemp, Register maskTemp);
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Imm32& value, const BaseIndex& mem,
-                                                    Register flagTemp, Register valueTemp,
-                                                    Register offsetTemp, Register maskTemp);
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Register& value, const Address& mem,
-                                                    Register flagTemp, Register valueTemp,
-                                                    Register offsetTemp, Register maskTemp);
-template void
-CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
-                                                    const Register& value, const BaseIndex& mem,
-                                                    Register flagTemp, Register valueTemp,
-                                                    Register offsetTemp, Register maskTemp);
-
-
 void
 CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
 {
     MWasmAddOffset* mir = lir->mir();
     Register base = ToRegister(lir->base());
     Register out = ToRegister(lir->output());
 
     masm.ma_addTestCarry(out, base, Imm32(mir->offset()), oldTrap(mir, wasm::Trap::OutOfBounds));
 }
 
-template <typename T>
-static inline void
-AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op,
-                        Scalar::Type arrayType, const LAllocation* value, const T& mem,
-                        Register flagTemp, Register outTemp, Register valueTemp,
-                        Register offsetTemp, Register maskTemp, AnyRegister output)
-{
-    if (value->isConstant())
-        cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp,
-                                       valueTemp, offsetTemp, maskTemp, output);
-    else
-        cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp,
-                                       valueTemp, offsetTemp, maskTemp, output);
-}
 
 void
 CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
 {
     MOZ_ASSERT(lir->mir()->hasUses());
 
     AnyRegister output = ToAnyRegister(lir->output());
     Register elements = ToRegister(lir->elements());
-    Register flagTemp = ToRegister(lir->temp1());
-    Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
-    Register valueTemp = ToRegister(lir->valueTemp());
-    Register offsetTemp = ToRegister(lir->offsetTemp());
-    Register maskTemp = ToRegister(lir->maskTemp());
-    const LAllocation* value = lir->value();
+    Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+    Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+    Register value = ToRegister(lir->value());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
     int width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
-        AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
-                                valueTemp, offsetTemp, maskTemp, output);
+        masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
+                             mem, valueTemp, offsetTemp, maskTemp, outTemp, output);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
-        AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
-                                valueTemp, offsetTemp, maskTemp, output);
+        masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
+                             mem, valueTemp, offsetTemp, maskTemp, outTemp, output);
     }
 }
 
-template <typename T>
-static inline void
-AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op, Scalar::Type arrayType,
-                        const LAllocation* value, const T& mem, Register flagTemp,
-                        Register valueTemp, Register offsetTemp, Register maskTemp)
-{
-    if (value->isConstant())
-        cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem,
-                                       flagTemp, valueTemp, offsetTemp, maskTemp);
-    else
-        cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem,
-                                       flagTemp, valueTemp, offsetTemp, maskTemp);
-}
-
 void
 CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
 {
     MOZ_ASSERT(!lir->mir()->hasUses());
 
     Register elements = ToRegister(lir->elements());
-    Register flagTemp = ToRegister(lir->flagTemp());
-    Register valueTemp = ToRegister(lir->valueTemp());
-    Register offsetTemp = ToRegister(lir->offsetTemp());
-    Register maskTemp = ToRegister(lir->maskTemp());
-    const LAllocation* value = lir->value();
+    Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+    Register value = ToRegister(lir->value());
     Scalar::Type arrayType = lir->mir()->arrayType();
     int width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address mem(elements, ToInt32(lir->index()) * width);
-        AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
-                                flagTemp, valueTemp, offsetTemp, maskTemp);
+        masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
+                             mem, valueTemp, offsetTemp, maskTemp);
     } else {
         BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
-        AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
-                                flagTemp, valueTemp, offsetTemp, maskTemp);
+        masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value,
+                             mem, valueTemp, offsetTemp, maskTemp);
     }
 }
 
 void
 CodeGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
 {
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
-    Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+    Register outTemp = ToTempRegisterOrInvalid(lir->temp());
 
     Register oldval = ToRegister(lir->oldval());
     Register newval = ToRegister(lir->newval());
-    Register valueTemp = ToRegister(lir->valueTemp());
-    Register offsetTemp = ToRegister(lir->offsetTemp());
-    Register maskTemp = ToRegister(lir->maskTemp());
+    Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
     int width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
-        masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
-                                            valueTemp, offsetTemp, maskTemp, output);
+        masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval,
+                               valueTemp, offsetTemp, maskTemp, outTemp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
-        masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
-                                            valueTemp, offsetTemp, maskTemp, output);
+        masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval,
+                               valueTemp, offsetTemp, maskTemp, outTemp, output);
     }
 }
 
 void
 CodeGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
 {
     Register elements = ToRegister(lir->elements());
     AnyRegister output = ToAnyRegister(lir->output());
-    Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+    Register outTemp = ToTempRegisterOrInvalid(lir->temp());
 
     Register value = ToRegister(lir->value());
-    Register valueTemp = ToRegister(lir->valueTemp());
-    Register offsetTemp = ToRegister(lir->offsetTemp());
-    Register maskTemp = ToRegister(lir->maskTemp());
+    Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+    Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+    Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
 
     Scalar::Type arrayType = lir->mir()->arrayType();
     int width = Scalar::byteSize(arrayType);
 
     if (lir->index()->isConstant()) {
         Address dest(elements, ToInt32(lir->index()) * width);
-        masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
-                                           valueTemp, offsetTemp, maskTemp, output);
+        masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, valueTemp,
+                              offsetTemp, maskTemp, outTemp, output);
     } else {
         BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
-        masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
-                                           valueTemp, offsetTemp, maskTemp, output);
+        masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, valueTemp,
+                              offsetTemp, maskTemp, outTemp, output);
     }
 }
+
+
+void
+CodeGeneratorMIPSShared::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir)
+{
+    Register ptr = ToRegister(lir->ptr());
+    Register64 oldValue = ToRegister64(lir->oldValue());
+    Register64 newValue = ToRegister64(lir->newValue());
+    Register64 output = ToOutRegister64(lir);
+    uint32_t offset = lir->mir()->access().offset();
+
+    BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+    masm.compareExchange64(Synchronization::Full(), addr, oldValue, newValue, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir)
+{
+    Register ptr = ToRegister(lir->ptr());
+    Register64 value = ToRegister64(lir->value());
+    Register64 output = ToOutRegister64(lir);
+    uint32_t offset = lir->mir()->access().offset();
+
+    BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+    masm.atomicExchange64(Synchronization::Full(), addr, value, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir)
+{
+    Register ptr = ToRegister(lir->ptr());
+    Register64 value = ToRegister64(lir->value());
+    Register64 output = ToOutRegister64(lir);
+#ifdef JS_CODEGEN_MIPS32
+    Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
+#else
+    Register64 temp(ToRegister(lir->getTemp(0)));
+#endif
+    uint32_t offset = lir->mir()->access().offset();
+
+    BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+    masm.atomicFetchOp64(Synchronization::Full(), lir->mir()->operation(), value, addr, temp,
+                         output);
+}
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -246,16 +246,20 @@ class CodeGeneratorMIPSShared : public C
                                     AnyRegister output);
 
     // Generating no result.
     template<typename S, typename T>
     void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
                                     const T& mem, Register flagTemp, Register valueTemp,
                                     Register offsetTemp, Register maskTemp);
 
+    void visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir);
+    void visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir);
+    void visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir);
+
   protected:
     void visitEffectiveAddress(LEffectiveAddress* ins);
     void visitUDivOrMod(LUDivOrMod* ins);
 
   public:
     // Unimplemented SIMD instructions
     void visitSimdSplatX4(LSimdSplatX4* lir) { MOZ_CRASH("NYI"); }
     void visitSimd128Int(LSimd128Int* ins) { MOZ_CRASH("NYI"); }
--- a/js/src/jit/mips-shared/LIR-mips-shared.h
+++ b/js/src/jit/mips-shared/LIR-mips-shared.h
@@ -383,12 +383,84 @@ class LWasmUnalignedStoreI64 : public de
     {
         setInt64Operand(1, value);
     }
     const LInt64Allocation value() {
         return getInt64Operand(ValueIndex);
     }
 };
 
+class LWasmCompareExchangeI64 : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES + INT64_PIECES, 0>
+{
+  public:
+    LIR_HEADER(WasmCompareExchangeI64);
+
+    LWasmCompareExchangeI64(const LAllocation& ptr, const LInt64Allocation& oldValue, const LInt64Allocation& newValue)
+    {
+        setOperand(0, ptr);
+        setInt64Operand(1, oldValue);
+        setInt64Operand(1 + INT64_PIECES, newValue);
+    }
+
+    const LAllocation* ptr() {
+        return getOperand(0);
+    }
+    const LInt64Allocation oldValue() {
+        return getInt64Operand(1);
+    }
+    const LInt64Allocation newValue() {
+        return getInt64Operand(1 + INT64_PIECES);
+    }
+    const MWasmCompareExchangeHeap* mir() const {
+        return mir_->toWasmCompareExchangeHeap();
+    }
+};
+
+class LWasmAtomicExchangeI64 : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 0>
+{
+  public:
+    LIR_HEADER(WasmAtomicExchangeI64);
+
+    LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value)
+    {
+        setOperand(0, ptr);
+        setInt64Operand(1, value);
+    }
+
+    const LAllocation* ptr() {
+        return getOperand(0);
+    }
+    const LInt64Allocation value() {
+        return getInt64Operand(1);
+    }
+    const MWasmAtomicExchangeHeap* mir() const {
+        return mir_->toWasmAtomicExchangeHeap();
+    }
+};
+
+class LWasmAtomicBinopI64 : public LInstructionHelper<INT64_PIECES, 1 + INT64_PIECES, 2>
+{
+  public:
+    LIR_HEADER(WasmAtomicBinopI64);
+
+    LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value)
+    {
+        setOperand(0, ptr);
+        setInt64Operand(1, value);
+    }
+
+    const LAllocation* ptr() {
+        return getOperand(0);
+    }
+    const LInt64Allocation value() {
+        return getInt64Operand(1);
+    }
+
+    const MWasmAtomicBinopHeap* mir() const {
+        return mir_->toWasmAtomicBinopHeap();
+    }
+};
+
+
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips_shared_LIR_mips_shared_h */
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -359,16 +359,24 @@ LIRGeneratorMIPSShared::visitWasmLoad(MW
         if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         define(lir, ins);
         return;
     }
 
     if (ins->type() == MIRType::Int64) {
+
+#ifdef JS_CODEGEN_MIPS32
+        if(ins->access().isAtomic()) {
+            auto* lir = new(alloc()) LWasmAtomicLoadI64(ptr);
+            defineInt64(lir, ins);
+            return;
+        }
+#endif
         auto* lir = new(alloc()) LWasmLoadI64(ptr);
         if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         defineInt64(lir, ins);
         return;
     }
 
@@ -381,19 +389,19 @@ LIRGeneratorMIPSShared::visitWasmLoad(MW
 
 void
 LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* value = ins->value();
-    LAllocation baseAlloc = useRegisterAtStart(base);
 
     if (IsUnaligned(ins->access())) {
+        LAllocation baseAlloc = useRegisterAtStart(base);
         if (ins->access().type() == Scalar::Int64) {
             LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
             auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
             if (ins->access().offset())
                 lir->setTemp(0, tempCopy(base, 0));
 
             add(lir, ins);
             return;
@@ -404,25 +412,36 @@ LIRGeneratorMIPSShared::visitWasmStore(M
         if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         add(lir, ins);
         return;
     }
 
     if (ins->access().type() == Scalar::Int64) {
+
+#ifdef JS_CODEGEN_MIPS32
+        if(ins->access().isAtomic()) {
+            auto* lir = new(alloc()) LWasmAtomicStoreI64(useRegister(base), useInt64Register(value), temp());
+            add(lir, ins);
+            return;
+        }
+#endif
+
+        LAllocation baseAlloc = useRegisterAtStart(base);
         LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
         auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
         if (ins->access().offset())
             lir->setTemp(0, tempCopy(base, 0));
 
         add(lir, ins);
         return;
     }
 
+    LAllocation baseAlloc = useRegisterAtStart(base);
     LAllocation valueAlloc = useRegisterAtStart(value);
     auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
     if (ins->access().offset())
         lir->setTemp(0, tempCopy(base, 0));
 
     add(lir, ins);
 }
 
@@ -573,24 +592,34 @@ LIRGeneratorMIPSShared::visitCompareExch
     const LUse elements = useRegister(ins->elements());
     const LAllocation index = useRegisterOrConstant(ins->index());
 
     // If the target is a floating register then we need a temp at the
     // CodeGenerator level for creating the result.
 
     const LAllocation newval = useRegister(ins->newval());
     const LAllocation oldval = useRegister(ins->oldval());
-    LDefinition uint32Temp = LDefinition::BogusTemp();
+
+    LDefinition outTemp = LDefinition::BogusTemp();
+    LDefinition valueTemp = LDefinition::BogusTemp();
+    LDefinition offsetTemp = LDefinition::BogusTemp();
+    LDefinition maskTemp = LDefinition::BogusTemp();
+
     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
-        uint32Temp = temp();
+        outTemp = temp();
+
+    if (Scalar::byteSize(ins->arrayType()) < 4) {
+        valueTemp = temp();
+        offsetTemp = temp();
+        maskTemp = temp();
+    }
 
     LCompareExchangeTypedArrayElement* lir =
-        new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, uint32Temp,
-                                                       /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
-                                                       /* maskTemp= */ temp());
+        new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, outTemp,
+                                                      valueTemp, offsetTemp, maskTemp);
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
 {
     MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
@@ -600,100 +629,139 @@ LIRGeneratorMIPSShared::visitAtomicExcha
 
     const LUse elements = useRegister(ins->elements());
     const LAllocation index = useRegisterOrConstant(ins->index());
 
     // If the target is a floating register then we need a temp at the
     // CodeGenerator level for creating the result.
 
     const LAllocation value = useRegister(ins->value());
-    LDefinition uint32Temp = LDefinition::BogusTemp();
+    LDefinition outTemp = LDefinition::BogusTemp();
+    LDefinition valueTemp = LDefinition::BogusTemp();
+    LDefinition offsetTemp = LDefinition::BogusTemp();
+    LDefinition maskTemp = LDefinition::BogusTemp();
+
     if (ins->arrayType() == Scalar::Uint32) {
         MOZ_ASSERT(ins->type() == MIRType::Double);
-        uint32Temp = temp();
+        outTemp = temp();
+    }
+
+    if (Scalar::byteSize(ins->arrayType()) < 4) {
+        valueTemp = temp();
+        offsetTemp = temp();
+        maskTemp = temp();
     }
 
     LAtomicExchangeTypedArrayElement* lir =
-        new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, uint32Temp,
-                                                      /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
-                                                      /* maskTemp= */ temp());
+        new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, outTemp,
+                                                      valueTemp, offsetTemp, maskTemp);
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
-    MOZ_ASSERT(ins->access().offset() == 0);
+    MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+
+    if (ins->access().type() == Scalar::Int64) {
+        auto* lir = new(alloc()) LWasmCompareExchangeI64(useRegister(ins->base()),
+                                                         useInt64Register(ins->oldValue()),
+                                                         useInt64Register(ins->newValue()));
+        defineInt64(lir, ins);
+        return;
+    }
 
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
+    LDefinition valueTemp = LDefinition::BogusTemp();
+    LDefinition offsetTemp = LDefinition::BogusTemp();
+    LDefinition maskTemp = LDefinition::BogusTemp();
+
+    if (ins->access().byteSize() < 4) {
+        valueTemp = temp();
+        offsetTemp = temp();
+        maskTemp = temp();
+    }
 
     LWasmCompareExchangeHeap* lir =
-        new(alloc()) LWasmCompareExchangeHeap(useRegister(base),
+        new(alloc()) LWasmCompareExchangeHeap(useRegister(ins->base()),
                                               useRegister(ins->oldValue()),
                                               useRegister(ins->newValue()),
-                                              /* valueTemp= */ temp(),
-                                              /* offsetTemp= */ temp(),
-                                              /* maskTemp= */ temp());
+                                              valueTemp, offsetTemp, maskTemp);
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins)
 {
     MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
-    MOZ_ASSERT(ins->access().offset() == 0);
+
+    if (ins->access().type() == Scalar::Int64) {
+        auto* lir = new(alloc()) LWasmAtomicExchangeI64(useRegister(ins->base()),
+                                                        useInt64Register(ins->value()));
+        defineInt64(lir, ins);
+        return;
+    }
 
-    const LAllocation base = useRegister(ins->base());
-    const LAllocation value = useRegister(ins->value());
+    LDefinition valueTemp = LDefinition::BogusTemp();
+    LDefinition offsetTemp = LDefinition::BogusTemp();
+    LDefinition maskTemp = LDefinition::BogusTemp();
 
-    // The output may not be used but will be clobbered regardless,
-    // so ignore the case where we're not using the value and just
-    // use the output register as a temp.
+    if (ins->access().byteSize() < 4) {
+        valueTemp = temp();
+        offsetTemp = temp();
+        maskTemp = temp();
+    }
 
     LWasmAtomicExchangeHeap* lir =
-        new(alloc()) LWasmAtomicExchangeHeap(base, value,
-                                             /* valueTemp= */ temp(),
-                                             /* offsetTemp= */ temp(),
-                                             /* maskTemp= */ temp());
+        new(alloc()) LWasmAtomicExchangeHeap(useRegister(ins->base()),
+                                             useRegister(ins->value()),
+                                             valueTemp, offsetTemp, maskTemp);
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins)
 {
-    MOZ_ASSERT(ins->access().type() < Scalar::Float32);
-    MOZ_ASSERT(ins->access().offset() == 0);
+    MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
 
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
+    if (ins->access().type() == Scalar::Int64) {
+        auto* lir = new(alloc()) LWasmAtomicBinopI64(useRegister(ins->base()),
+                                                     useInt64Register(ins->value()));
+        lir->setTemp(0, temp());
+#ifdef JS_CODEGEN_MIPS32
+        lir->setTemp(1, temp());
+#endif
+        defineInt64(lir, ins);
+        return;
+    }
+
+    LDefinition valueTemp = LDefinition::BogusTemp();
+    LDefinition offsetTemp = LDefinition::BogusTemp();
+    LDefinition maskTemp = LDefinition::BogusTemp();
+
+    if (ins->access().byteSize() < 4) {
+        valueTemp = temp();
+        offsetTemp = temp();
+        maskTemp = temp();
+    }
 
     if (!ins->hasUses()) {
         LWasmAtomicBinopHeapForEffect* lir =
-            new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base),
+            new(alloc()) LWasmAtomicBinopHeapForEffect(useRegister(ins->base()),
                                                        useRegister(ins->value()),
-                                                       /* flagTemp= */ temp(),
-                                                       /* valueTemp= */ temp(),
-                                                       /* offsetTemp= */ temp(),
-                                                       /* maskTemp= */ temp());
+                                                       valueTemp, offsetTemp, maskTemp);
         add(lir, ins);
         return;
     }
 
     LWasmAtomicBinopHeap* lir =
-        new(alloc()) LWasmAtomicBinopHeap(useRegister(base),
+        new(alloc()) LWasmAtomicBinopHeap(useRegister(ins->base()),
                                           useRegister(ins->value()),
-                                          /* temp= */ LDefinition::BogusTemp(),
-                                          /* flagTemp= */ temp(),
-                                          /* valueTemp= */ temp(),
-                                          /* offsetTemp= */ temp(),
-                                          /* maskTemp= */ temp());
+                                          valueTemp, offsetTemp, maskTemp);
 
     define(lir, ins);
 }
 
 void
 LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
 {
     MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
@@ -702,42 +770,45 @@ LIRGeneratorMIPSShared::visitAtomicTyped
 
     MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
     MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
 
     const LUse elements = useRegister(ins->elements());
     const LAllocation index = useRegisterOrConstant(ins->index());
     const LAllocation value = useRegister(ins->value());
 
+    LDefinition valueTemp = LDefinition::BogusTemp();
+    LDefinition offsetTemp = LDefinition::BogusTemp();
+    LDefinition maskTemp = LDefinition::BogusTemp();
+
+    if (Scalar::byteSize(ins->arrayType()) < 4) {
+        valueTemp = temp();
+        offsetTemp = temp();
+        maskTemp = temp();
+    }
+
     if (!ins->hasUses()) {
         LAtomicTypedArrayElementBinopForEffect* lir =
             new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value,
-                                                                /* flagTemp= */ temp(),
-                                                                /* valueTemp= */ temp(),
-                                                                /* offsetTemp= */ temp(),
-                                                                /* maskTemp= */ temp());
+                                                                valueTemp, offsetTemp, maskTemp);
         add(lir, ins);
         return;
     }
 
     // For a Uint32Array with a known double result we need a temp for
     // the intermediate output.
 
-    LDefinition flagTemp = temp();
     LDefinition outTemp = LDefinition::BogusTemp();
 
     if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
         outTemp = temp();
 
-    // On mips, map flagTemp to temp1 and outTemp to temp2, at least for now.
-
     LAtomicTypedArrayElementBinop* lir =
-        new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp,
-                                                   /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
-                                                   /* maskTemp= */ temp());
+        new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, outTemp,
+                                                   valueTemp, offsetTemp, maskTemp);
     define(lir, ins);
 }
 
 
 void
 LIRGeneratorMIPSShared::visitCopySign(MCopySign* ins)
 {
     MDefinition* lhs = ins->lhs();
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -1005,24 +1005,17 @@ void
 MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
 {
     MOZ_CRASH("NYI");
 }
 
 void
 MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
 {
-    if (barrier == MembarLoadLoad)
-        as_sync(19);
-    else if (barrier == MembarStoreStore)
-        as_sync(4);
-    else if (barrier & MembarSynchronizing)
-        as_sync();
-    else if (barrier)
-        as_sync(16);
+    as_sync();
 }
 
 // ===============================================================
 // Clamping functions.
 
 void
 MacroAssembler::clampIntToUint8(Register reg)
 {
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -1312,280 +1312,16 @@ MacroAssemblerMIPSShared::asMasm()
 }
 
 const MacroAssembler&
 MacroAssemblerMIPSShared::asMasm() const
 {
     return *static_cast<const MacroAssembler*>(this);
 }
 
-void
-MacroAssemblerMIPSShared::atomicEffectOpMIPSr2(int nbytes, AtomicOp op,
-                                               const Register& value, const Register& addr,
-                                               Register flagTemp, Register valueTemp,
-                                               Register offsetTemp, Register maskTemp)
-{
-    atomicFetchOpMIPSr2(nbytes, false, op, value, addr, flagTemp,
-                        valueTemp, offsetTemp, maskTemp, InvalidReg);
-}
-
-void
-MacroAssemblerMIPSShared::atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                                              const Register& addr, Register flagTemp, Register valueTemp,
-                                              Register offsetTemp, Register maskTemp, Register output)
-{
-    Label again;
-
-    as_andi(offsetTemp, addr, 3);
-    asMasm().subPtr(offsetTemp, addr);
-    as_sll(offsetTemp, offsetTemp, 3);
-    ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
-    as_sllv(maskTemp, maskTemp, offsetTemp);
-
-    bind(&again);
-
-    as_sync(16);
-
-    as_ll(flagTemp, addr, 0);
-
-    as_sllv(valueTemp, value, offsetTemp);
-    if (output != InvalidReg) {
-        as_and(output, flagTemp, maskTemp);
-        as_srlv(output, output, offsetTemp);
-        if (signExtend) {
-            switch (nbytes) {
-            case 1:
-                ma_seb(output, output);
-                break;
-            case 2:
-                ma_seh(output, output);
-                break;
-            case 4:
-                break;
-            default:
-                MOZ_CRASH("NYI");
-            }
-        }
-    }
-
-    switch (op) {
-    case AtomicFetchAddOp:
-        as_addu(valueTemp, flagTemp, valueTemp);
-        break;
-    case AtomicFetchSubOp:
-        as_subu(valueTemp, flagTemp, valueTemp);
-        break;
-    case AtomicFetchAndOp:
-        as_and(valueTemp, flagTemp, valueTemp);
-        break;
-    case AtomicFetchOrOp:
-        as_or(valueTemp, flagTemp, valueTemp);
-        break;
-    case AtomicFetchXorOp:
-        as_xor(valueTemp, flagTemp, valueTemp);
-        break;
-    default:
-        MOZ_CRASH("NYI");
-    }
-
-    as_and(valueTemp, valueTemp, maskTemp);
-    as_or(flagTemp, flagTemp, maskTemp);
-    as_xor(flagTemp, flagTemp, maskTemp);
-    as_or(flagTemp, flagTemp, valueTemp);
-
-    as_sc(flagTemp, addr, 0);
-
-    ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
-
-    as_sync(0);
-}
-
-void
-MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
-                                         const Address& address, Register flagTemp,
-                                         Register valueTemp, Register offsetTemp, Register maskTemp)
-{
-    ma_li(SecondScratchReg, value);
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
-                         flagTemp, valueTemp, offsetTemp, maskTemp);
-}
-
-void
-MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
-                                         const BaseIndex& address, Register flagTemp,
-                                         Register valueTemp, Register offsetTemp, Register maskTemp)
-{
-    ma_li(SecondScratchReg, value);
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
-                         flagTemp, valueTemp, offsetTemp, maskTemp);
-}
-
-void
-MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
-                                         const Address& address, Register flagTemp,
-                                         Register valueTemp, Register offsetTemp, Register maskTemp)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
-                         flagTemp, valueTemp, offsetTemp, maskTemp);
-}
-
-void
-MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
-                                         const BaseIndex& address, Register flagTemp,
-                                         Register valueTemp, Register offsetTemp, Register maskTemp)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
-                         flagTemp, valueTemp, offsetTemp, maskTemp);
-}
-
-void
-MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
-                                        const Address& address, Register flagTemp, Register valueTemp,
-                                        Register offsetTemp, Register maskTemp, Register output)
-{
-    ma_li(SecondScratchReg, value);
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
-                        flagTemp, valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
-                                        const BaseIndex& address, Register flagTemp, Register valueTemp,
-                                        Register offsetTemp, Register maskTemp, Register output)
-{
-    ma_li(SecondScratchReg, value);
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
-                        flagTemp, valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                                        const Address& address, Register flagTemp, Register valueTemp,
-                                        Register offsetTemp, Register maskTemp, Register output)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
-                        flagTemp, valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                                        const BaseIndex& address, Register flagTemp, Register valueTemp,
-                                        Register offsetTemp, Register maskTemp, Register output)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
-                        flagTemp, valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr,
-                                                Register oldval, Register newval, Register flagTemp,
-                                                Register valueTemp, Register offsetTemp, Register maskTemp,
-                                                Register output)
-{
-    Label again, end;
-
-    as_andi(offsetTemp, addr, 3);
-    asMasm().subPtr(offsetTemp, addr);
-    as_sll(offsetTemp, offsetTemp, 3);
-    ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
-    as_sllv(maskTemp, maskTemp, offsetTemp);
-
-    bind(&again);
-
-    as_sync(16);
-
-    as_ll(flagTemp, addr, 0);
-
-    as_and(output, flagTemp, maskTemp);
-    // If oldval is valid register, do compareExchange
-    if (InvalidReg != oldval) {
-        as_sllv(valueTemp, oldval, offsetTemp);
-        as_and(valueTemp, valueTemp, maskTemp);
-        ma_b(output, valueTemp, &end, NotEqual, ShortJump);
-    }
-
-    as_sllv(valueTemp, newval, offsetTemp);
-    as_and(valueTemp, valueTemp, maskTemp);
-    as_or(flagTemp, flagTemp, maskTemp);
-    as_xor(flagTemp, flagTemp, maskTemp);
-    as_or(flagTemp, flagTemp, valueTemp);
-
-    as_sc(flagTemp, addr, 0);
-
-    ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
-
-    as_sync(0);
-
-    bind(&end);
-
-    as_srlv(output, output, offsetTemp);
-    if (signExtend) {
-        switch (nbytes) {
-        case 1:
-            ma_seb(output, output);
-            break;
-        case 2:
-            ma_seh(output, output);
-            break;
-        case 4:
-            break;
-        default:
-            MOZ_CRASH("NYI");
-        }
-    }
-}
-
-void
-MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const Address& address,
-                                          Register oldval, Register newval, Register valueTemp,
-                                          Register offsetTemp, Register maskTemp, Register output)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
-                          valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const BaseIndex& address,
-                                          Register oldval, Register newval, Register valueTemp,
-                                          Register offsetTemp, Register maskTemp, Register output)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
-                          valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const Address& address,
-                                         Register value, Register valueTemp, Register offsetTemp,
-                                         Register maskTemp, Register output)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
-                          valueTemp, offsetTemp, maskTemp, output);
-}
-
-void
-MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const BaseIndex& address,
-                                         Register value, Register valueTemp, Register offsetTemp,
-                                         Register maskTemp, Register output)
-{
-    asMasm().computeEffectiveAddress(address, ScratchRegister);
-    compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
-                          valueTemp, offsetTemp, maskTemp, output);
-}
-
 //{{{ check_macroassembler_style
 // ===============================================================
 // MacroAssembler high-level usage.
 
 void
 MacroAssembler::flush()
 {
 }
@@ -1899,9 +1635,653 @@ MacroAssembler::wasmTruncateFloat32ToInt
 {
     as_truncws(ScratchFloat32Reg, input);
     as_cfc1(ScratchRegister, Assembler::FCSR);
     moveFromFloat32(ScratchFloat32Reg, output);
     ma_ext(ScratchRegister, ScratchRegister, 6, 1);
     ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
 }
 
-//}}} check_macroassembler_style
+// ========================================================================
+// Primitive atomic operations.
+
+template<typename T>
+static void
+CompareExchange(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync, const T& mem,
+                Register oldval, Register newval, Register valueTemp, Register offsetTemp,
+                Register maskTemp, Register output)
+{
+    bool signExtend = Scalar::isSignedIntType(type);
+    unsigned nbytes = Scalar::byteSize(type);
+
+     switch (nbytes) {
+        case 1:
+        case 2:
+            break;
+        case 4:
+            MOZ_ASSERT(valueTemp == InvalidReg);
+            MOZ_ASSERT(offsetTemp == InvalidReg);
+            MOZ_ASSERT(maskTemp == InvalidReg);
+            break;
+        default:
+            MOZ_CRASH();
+    }
+
+    Label again, end;
+
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    if (nbytes == 4) {
+
+        masm.memoryBarrierBefore(sync);
+        masm.bind(&again);
+
+        masm.as_ll(output, SecondScratchReg, 0);
+        masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+        masm.ma_move(ScratchRegister, newval);
+        masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+        masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+        masm.memoryBarrierAfter(sync);
+        masm.bind(&end);
+
+        return;
+    }
+
+    masm.as_andi(offsetTemp, SecondScratchReg, 3);
+    masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN
+    masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+    masm.as_sll(offsetTemp, offsetTemp, 3);
+    masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+    masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+    masm.as_nor(maskTemp, zero, maskTemp);
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&again);
+
+    masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+
+    masm.as_srlv(output, ScratchRegister, offsetTemp);
+
+    switch (nbytes) {
+        case 1:
+            if (signExtend) {
+                masm.ma_seb(valueTemp, oldval);
+                masm.ma_seb(output, output);
+            } else {
+                masm.as_andi(valueTemp, oldval, 0xff);
+                masm.as_andi(output, output, 0xff);
+            }
+            break;
+        case 2:
+            if (signExtend) {
+                masm.ma_seh(valueTemp, oldval);
+                masm.ma_seh(output, output);
+            } else {
+                masm.as_andi(valueTemp, oldval, 0xffff);
+                masm.as_andi(output, output, 0xffff);
+            }
+            break;
+    }
+
+    masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+    masm.as_sllv(valueTemp, newval, offsetTemp);
+    masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+    masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+    masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+    masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+    masm.memoryBarrierAfter(sync);
+
+    masm.bind(&end);
+
+}
+
+
+void
+MacroAssembler::compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
+                                Register oldval, Register newval, Register valueTemp,
+                                Register offsetTemp, Register maskTemp, Register output)
+{
+    CompareExchange(*this, type, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
+                    output);
+}
+
+void
+MacroAssembler::compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
+                                Register oldval, Register newval, Register valueTemp,
+                                Register offsetTemp, Register maskTemp, Register output)
+{
+    CompareExchange(*this, type, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
+                    output);
+}
+
+
+template<typename T>
+static void
+AtomicExchange(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync, const T& mem,
+               Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
+               Register output)
+{
+    bool signExtend = Scalar::isSignedIntType(type);
+    unsigned nbytes = Scalar::byteSize(type);
+
+     switch (nbytes) {
+        case 1:
+        case 2:
+            break;
+        case 4:
+            MOZ_ASSERT(valueTemp == InvalidReg);
+            MOZ_ASSERT(offsetTemp == InvalidReg);
+            MOZ_ASSERT(maskTemp == InvalidReg);
+            break;
+        default:
+            MOZ_CRASH();
+    }
+
+    Label again;
+
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    if (nbytes == 4) {
+
+        masm.memoryBarrierBefore(sync);
+        masm.bind(&again);
+
+        masm.as_ll(output, SecondScratchReg, 0);
+        masm.ma_move(ScratchRegister, value);
+        masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+        masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+        masm.memoryBarrierAfter(sync);
+
+        return;
+    }
+
+    masm.as_andi(offsetTemp, SecondScratchReg, 3);
+    masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN
+    masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+    masm.as_sll(offsetTemp, offsetTemp, 3);
+    masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+    masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+    masm.as_nor(maskTemp, zero, maskTemp);
+    switch (nbytes) {
+        case 1:
+            masm.as_andi(valueTemp, value, 0xff);
+            break;
+        case 2:
+            masm.as_andi(valueTemp, value, 0xffff);
+            break;
+    }
+    masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&again);
+
+    masm.as_ll(output, SecondScratchReg, 0);
+    masm.as_and(ScratchRegister, output, maskTemp);
+    masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+    masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+    masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+    masm.as_srlv(output, output, offsetTemp);
+
+    switch (nbytes) {
+        case 1:
+            if (signExtend) {
+                masm.ma_seb(output, output);
+            } else {
+                masm.as_andi(output, output, 0xff);
+            }
+            break;
+        case 2:
+            if (signExtend) {
+                masm.ma_seh(output, output);
+            } else {
+                masm.as_andi(output, output, 0xffff);
+            }
+            break;
+    }
+
+    masm.memoryBarrierAfter(sync);
+}
+
+
+void
+MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
+                               Register value, Register valueTemp, Register offsetTemp,
+                               Register maskTemp, Register output)
+{
+    AtomicExchange(*this, type, sync, mem, value, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssembler::atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
+                               Register value, Register valueTemp, Register offsetTemp,
+                               Register maskTemp, Register output)
+{
+    AtomicExchange(*this, type, sync, mem, value, valueTemp, offsetTemp, maskTemp, output);
+}
+
+
+template<typename T>
+static void
+AtomicFetchOp(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync,
+              AtomicOp op, const T& mem, Register value, Register valueTemp,
+              Register offsetTemp, Register maskTemp, Register output)
+{
+    bool signExtend = Scalar::isSignedIntType(type);
+    unsigned nbytes = Scalar::byteSize(type);
+
+     switch (nbytes) {
+        case 1:
+        case 2:
+            break;
+        case 4:
+            MOZ_ASSERT(valueTemp == InvalidReg);
+            MOZ_ASSERT(offsetTemp == InvalidReg);
+            MOZ_ASSERT(maskTemp == InvalidReg);
+            break;
+        default:
+            MOZ_CRASH();
+    }
+
+    Label again;
+
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    if (nbytes == 4) {
+
+        masm.memoryBarrierBefore(sync);
+        masm.bind(&again);
+
+        masm.as_ll(output, SecondScratchReg, 0);
+
+        switch (op) {
+        case AtomicFetchAddOp:
+            masm.as_addu(ScratchRegister, output, value);
+            break;
+        case AtomicFetchSubOp:
+            masm.as_subu(ScratchRegister, output, value);
+            break;
+        case AtomicFetchAndOp:
+            masm.as_and(ScratchRegister, output, value);
+            break;
+        case AtomicFetchOrOp:
+            masm.as_or(ScratchRegister, output, value);
+            break;
+        case AtomicFetchXorOp:
+            masm.as_xor(ScratchRegister, output, value);
+            break;
+        default:
+            MOZ_CRASH();
+        }
+
+        masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+        masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+        masm.memoryBarrierAfter(sync);
+
+        return;
+    }
+
+
+    masm.as_andi(offsetTemp, SecondScratchReg, 3);
+    masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN
+    masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+    masm.as_sll(offsetTemp, offsetTemp, 3);
+    masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+    masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+    masm.as_nor(maskTemp, zero, maskTemp);
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&again);
+
+    masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+    masm.as_srlv(output, ScratchRegister, offsetTemp);
+
+    switch (op) {
+        case AtomicFetchAddOp:
+            masm.as_addu(valueTemp, output, value);
+            break;
+        case AtomicFetchSubOp:
+            masm.as_subu(valueTemp, output, value);
+            break;
+        case AtomicFetchAndOp:
+            masm.as_and(valueTemp, output, value);
+            break;
+        case AtomicFetchOrOp:
+            masm.as_or(valueTemp, output, value);
+            break;
+        case AtomicFetchXorOp:
+            masm.as_xor(valueTemp, output, value);
+            break;
+        default:
+            MOZ_CRASH();
+    }
+
+    switch (nbytes) {
+        case 1:
+            masm.as_andi(valueTemp, valueTemp, 0xff);
+            break;
+        case 2:
+            masm.as_andi(valueTemp, valueTemp, 0xffff);
+            break;
+    }
+
+    masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+    masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+    masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+    masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+    masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+    switch (nbytes) {
+        case 1:
+            if (signExtend) {
+                masm.ma_seb(output, output);
+            } else {
+                masm.as_andi(output, output, 0xff);
+            }
+            break;
+        case 2:
+            if (signExtend) {
+                masm.ma_seh(output, output);
+            } else {
+                masm.as_andi(output, output, 0xffff);
+            }
+            break;
+    }
+
+    masm.memoryBarrierAfter(sync);
+}
+
+void
+MacroAssembler::atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
+                              Register value, const Address& mem, Register valueTemp,
+                              Register offsetTemp, Register maskTemp, Register output)
+{
+    AtomicFetchOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssembler::atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
+                              Register value, const BaseIndex& mem, Register valueTemp,
+                              Register offsetTemp, Register maskTemp, Register output)
+{
+    AtomicFetchOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp, output);
+}
+
+template<typename T>
+static void
+AtomicEffectOp(MacroAssembler& masm, Scalar::Type type, const Synchronization& sync, AtomicOp op,
+        const T& mem, Register value, Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+    unsigned nbytes = Scalar::byteSize(type);
+
+     switch (nbytes) {
+        case 1:
+        case 2:
+            break;
+        case 4:
+            MOZ_ASSERT(valueTemp == InvalidReg);
+            MOZ_ASSERT(offsetTemp == InvalidReg);
+            MOZ_ASSERT(maskTemp == InvalidReg);
+            break;
+        default:
+            MOZ_CRASH();
+    }
+
+    Label again;
+
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    if (nbytes == 4) {
+
+        masm.memoryBarrierBefore(sync);
+        masm.bind(&again);
+
+        masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+
+        switch (op) {
+        case AtomicFetchAddOp:
+            masm.as_addu(ScratchRegister, ScratchRegister, value);
+            break;
+        case AtomicFetchSubOp:
+            masm.as_subu(ScratchRegister, ScratchRegister, value);
+            break;
+        case AtomicFetchAndOp:
+            masm.as_and(ScratchRegister, ScratchRegister, value);
+            break;
+        case AtomicFetchOrOp:
+            masm.as_or(ScratchRegister, ScratchRegister, value);
+            break;
+        case AtomicFetchXorOp:
+            masm.as_xor(ScratchRegister, ScratchRegister, value);
+            break;
+        default:
+            MOZ_CRASH();
+        }
+
+        masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+        masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+        masm.memoryBarrierAfter(sync);
+
+        return;
+    }
+
+    masm.as_andi(offsetTemp, SecondScratchReg, 3);
+    masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN
+    masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+    masm.as_sll(offsetTemp, offsetTemp, 3);
+    masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+    masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+    masm.as_nor(maskTemp, zero, maskTemp);
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&again);
+
+    masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+    masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
+
+    switch (op) {
+        case AtomicFetchAddOp:
+            masm.as_addu(valueTemp, valueTemp, value);
+            break;
+        case AtomicFetchSubOp:
+            masm.as_subu(valueTemp, valueTemp, value);
+            break;
+        case AtomicFetchAndOp:
+            masm.as_and(valueTemp, valueTemp, value);
+            break;
+        case AtomicFetchOrOp:
+            masm.as_or(valueTemp, valueTemp, value);
+            break;
+        case AtomicFetchXorOp:
+            masm.as_xor(valueTemp, valueTemp, value);
+            break;
+        default:
+            MOZ_CRASH();
+    }
+
+    switch (nbytes) {
+        case 1:
+            masm.as_andi(valueTemp, valueTemp, 0xff);
+            break;
+        case 2:
+            masm.as_andi(valueTemp, valueTemp, 0xffff);
+            break;
+    }
+
+    masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+    masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+    masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+    masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+    masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero, ShortJump);
+
+    masm.memoryBarrierAfter(sync);
+}
+
+
+void
+MacroAssembler::atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
+                               Register value, const Address& mem, Register valueTemp,
+                               Register offsetTemp, Register maskTemp)
+{
+    AtomicEffectOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssembler::atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
+                               Register value, const BaseIndex& mem, Register valueTemp,
+                               Register offsetTemp, Register maskTemp)
+{
+    AtomicEffectOp(*this, type, sync, op, mem, value, valueTemp, offsetTemp, maskTemp);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template<typename T>
+static void
+CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                  const T& mem, Register oldval, Register newval, Register valueTemp,
+                  Register offsetTemp, Register maskTemp, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
+                             temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp, maskTemp, temp,
+                             output.gpr());
+    }
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const Address& mem, Register oldval, Register newval,
+                                  Register valueTemp, Register offsetTemp, Register maskTemp,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp, offsetTemp, maskTemp,
+                      temp, output);
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const BaseIndex& mem, Register oldval, Register newval,
+                                  Register valueTemp, Register offsetTemp, Register maskTemp,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval,valueTemp, offsetTemp, maskTemp,
+                      temp, output);
+}
+
+template<typename T>
+static void
+AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                 const T& mem, Register value, Register valueTemp,
+                 Register offsetTemp, Register maskTemp, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp,
+                            output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const Address& mem, Register value, Register valueTemp,
+                                 Register offsetTemp, Register maskTemp, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp, temp,
+                     output);
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const BaseIndex& mem, Register value, Register valueTemp,
+                                 Register offsetTemp, Register maskTemp, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp, maskTemp, temp, output);
+}
+
+template<typename T>
+static void
+AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                AtomicOp op, Register value, const T& mem, Register valueTemp,
+                Register offsetTemp, Register maskTemp, Register temp,
+                AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp,
+                           output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const Address& mem, Register valueTemp,
+                                Register offsetTemp, Register maskTemp, Register temp,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp, temp,
+                    output);
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const BaseIndex& mem, Register valueTemp,
+                                Register offsetTemp, Register maskTemp, Register temp,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp, temp,
+                    output);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                 Register value, const BaseIndex& mem, Register valueTemp,
+                                 Register offsetTemp, Register maskTemp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                 Register value, const Address& mem, Register valueTemp,
+                                 Register offsetTemp, Register maskTemp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp, maskTemp);
+}
+
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -211,61 +211,14 @@ class MacroAssemblerMIPSShared : public 
     void moveFromFloat32(FloatRegister src, Register dest) {
         as_mfc1(dest, src);
     }
 
     // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
     // Handle NaN specially if handleNaN is true.
     void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
     void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
-
-  private:
-    void atomicEffectOpMIPSr2(int nbytes, AtomicOp op, const Register& value, const Register& addr,
-                              Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
-    void atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value, const Register& addr,
-                             Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp,
-                             Register output);
-    void compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr, Register oldval,
-                               Register newval, Register flagTemp, Register valueTemp, Register offsetTemp,
-                               Register maskTemp, Register output);
-
-  protected:
-    void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const Address& address,
-                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
-    void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const BaseIndex& address,
-                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
-    void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const Address& address,
-                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
-    void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const BaseIndex& address,
-                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
-
-    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
-                       const Address& address, Register flagTemp, Register valueTemp,
-                       Register offsetTemp, Register maskTemp, Register output);
-    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
-                       const BaseIndex& address, Register flagTemp, Register valueTemp,
-                       Register offsetTemp, Register maskTemp, Register output);
-    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                       const Address& address, Register flagTemp, Register valueTemp,
-                       Register offsetTemp, Register maskTemp, Register output);
-    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
-                       const BaseIndex& address, Register flagTemp, Register valueTemp,
-                       Register offsetTemp, Register maskTemp, Register output);
-
-    void compareExchange(int nbytes, bool signExtend, const Address& address, Register oldval,
-                         Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
-                         Register output);
-    void compareExchange(int nbytes, bool signExtend, const BaseIndex& address, Register oldval,
-                         Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
-                         Register output);
-
-    void atomicExchange(int nbytes, bool signExtend, const Address& address, Register value,
-                        Register valueTemp, Register offsetTemp, Register maskTemp,
-                        Register output);
-    void atomicExchange(int nbytes, bool signExtend, const BaseIndex& address, Register value,
-                        Register valueTemp, Register offsetTemp, Register maskTemp,
-                        Register output);
 };
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips_shared_MacroAssembler_mips_shared_h */
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -817,8 +817,33 @@ CodeGeneratorMIPS::visitTestI64AndBranch
 
 void
 CodeGeneratorMIPS::setReturnDoubleRegs(LiveRegisterSet* regs)
 {
     MOZ_ASSERT(ReturnFloat32Reg.code_ == ReturnDoubleReg.code_);
     regs->add(ReturnFloat32Reg);
     regs->add(ReturnDoubleReg);
 }
+
+void
+CodeGeneratorMIPS::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir)
+{
+    Register ptr = ToRegister(lir->ptr());
+    Register64 output = ToOutRegister64(lir);
+    uint32_t offset = lir->mir()->access().offset();
+
+    BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+    masm.atomicLoad64(Synchronization::Full(), addr, Register64::Invalid(), output);
+}
+
+void
+CodeGeneratorMIPS::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir)
+{
+    Register ptr = ToRegister(lir->ptr());
+    Register64 value = ToRegister64(lir->value());
+    Register tmp = ToRegister(lir->tmp());
+    uint32_t offset = lir->mir()->access().offset();
+
+    BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+    masm.atomicStore64(addr, tmp, value);
+}
\ No newline at end of file
--- a/js/src/jit/mips32/CodeGenerator-mips32.h
+++ b/js/src/jit/mips32/CodeGenerator-mips32.h
@@ -80,16 +80,18 @@ class CodeGeneratorMIPS : public CodeGen
       : CodeGeneratorMIPSShared(gen, graph, masm)
     { }
 
   public:
     void visitBox(LBox* box);
     void visitBoxFloatingPoint(LBoxFloatingPoint* box);
     void visitUnbox(LUnbox* unbox);
     void setReturnDoubleRegs(LiveRegisterSet* regs);
+    void visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir);
+    void visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir);
 };
 
 typedef CodeGeneratorMIPS CodeGeneratorSpecific;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips32_CodeGenerator_mips32_h */
--- a/js/src/jit/mips32/LIR-mips32.h
+++ b/js/src/jit/mips32/LIR-mips32.h
@@ -172,13 +172,56 @@ class LInt64ToFloatingPoint : public LCa
         setInt64Operand(0, in);
     }
 
     MInt64ToFloatingPoint* mir() const {
         return mir_->toInt64ToFloatingPoint();
     }
 };
 
+class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 1, 0>
+{
+  public:
+    LIR_HEADER(WasmAtomicLoadI64);
+
+    LWasmAtomicLoadI64(const LAllocation& ptr)
+    {
+        setOperand(0, ptr);
+    }
+
+    const LAllocation* ptr() {
+        return getOperand(0);
+    }
+    const MWasmLoad* mir() const {
+        return mir_->toWasmLoad();
+    }
+};
+
+class LWasmAtomicStoreI64 : public LInstructionHelper<0, 1 + INT64_PIECES, 1>
+{
+  public:
+    LIR_HEADER(WasmAtomicStoreI64);
+
+    LWasmAtomicStoreI64(const LAllocation& ptr, const LInt64Allocation& value, const LDefinition& tmp)
+    {
+        setOperand(0, ptr);
+        setInt64Operand(1, value);
+        setTemp(0, tmp);
+    }
+
+    const LAllocation* ptr() {
+        return getOperand(0);
+    }
+    const LInt64Allocation value() {
+        return getInt64Operand(1);
+    }
+    const LDefinition* tmp() {
+        return getTemp(0);
+    }
+    const MWasmStore* mir() const {
+        return mir_->toWasmStore();
+    }
+};
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips32_LIR_mips32_h */
--- a/js/src/jit/mips32/LOpcodes-mips32.h
+++ b/js/src/jit/mips32/LOpcodes-mips32.h
@@ -15,11 +15,16 @@
     _(UDivOrMod)                \
     _(DivOrModI64)              \
     _(UDivOrModI64)             \
     _(WasmUnalignedLoad)        \
     _(WasmUnalignedStore)       \
     _(WasmUnalignedLoadI64)     \
     _(WasmUnalignedStoreI64)    \
     _(WasmTruncateToInt64)      \
-    _(Int64ToFloatingPoint)
+    _(Int64ToFloatingPoint)     \
+    _(WasmCompareExchangeI64)   \
+    _(WasmAtomicExchangeI64)    \
+    _(WasmAtomicBinopI64)       \
+    _(WasmAtomicLoadI64)        \
+    _(WasmAtomicStoreI64)       \
 
 #endif // jit_mips32_LOpcodes_mips32_h__
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -1937,109 +1937,16 @@ MacroAssemblerMIPSCompat::handleFailureW
     // FP; SP is pointing to the unwound return address to the wasm entry, so
     // we can just ret().
     bind(&wasm);
     loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), FramePointer);
     loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
     ret();
 }
 
-template<typename T>
-void
-MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
-                                                         Register oldval, Register newval,
-                                                         Register temp, Register valueTemp,
-                                                         Register offsetTemp, Register maskTemp,
-                                                         AnyRegister output)
-{
-    switch (arrayType) {
-      case Scalar::Int8:
-        compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint8:
-        compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int16:
-        compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint16:
-        compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int32:
-        compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint32:
-        // At the moment, the code in MCallOptimize.cpp requires the output
-        // type to be double for uint32 arrays.  See bug 1077305.
-        MOZ_ASSERT(output.isFloat());
-        compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
-        convertUInt32ToDouble(temp, output.fpu());
-        break;
-      default:
-        MOZ_CRASH("Invalid typed array type");
-    }
-}
-
-template void
-MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
-                                                         Register oldval, Register newval, Register temp,
-                                                         Register valueTemp, Register offsetTemp, Register maskTemp,
-                                                         AnyRegister output);
-template void
-MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
-                                                         Register oldval, Register newval, Register temp,
-                                                         Register valueTemp, Register offsetTemp, Register maskTemp,
-                                                         AnyRegister output);
-
-template<typename T>
-void
-MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
-                                                        Register value, Register temp, Register valueTemp,
-                                                        Register offsetTemp, Register maskTemp,
-                                                        AnyRegister output)
-{
-    switch (arrayType) {
-      case Scalar::Int8:
-        atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint8:
-        atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int16:
-        atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint16:
-        atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int32:
-        atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint32:
-        // At the moment, the code in MCallOptimize.cpp requires the output
-        // type to be double for uint32 arrays.  See bug 1077305.
-        MOZ_ASSERT(output.isFloat());
-        atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
-        convertUInt32ToDouble(temp, output.fpu());
-        break;
-      default:
-        MOZ_CRASH("Invalid typed array type");
-    }
-}
-
-template void
-MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
-                                                        Register value, Register temp, Register valueTemp,
-                                                        Register offsetTemp, Register maskTemp,
-                                                        AnyRegister output);
-template void
-MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
-                                                        Register value, Register temp, Register valueTemp,
-                                                        Register offsetTemp, Register maskTemp,
-                                                        AnyRegister output);
-
 CodeOffset
 MacroAssemblerMIPSCompat::toggledJump(Label* label)
 {
     CodeOffset ret(nextOffset().getOffset());
     ma_b(label);
     return ret;
 }
 
@@ -2482,16 +2389,224 @@ MacroAssembler::wasmTruncateFloat32ToUIn
     ma_or(output, ScratchRegister);
     ma_b(&done);
     bind(&simple);
     as_truncws(ScratchDoubleReg, input);
     moveFromFloat32(ScratchDoubleReg, output);
     bind(&done);
 }
 
+static void
+EnterAtomic64Region(MacroAssembler& masm, Register addr, Register spinlock, Register scratch)
+{
+    masm.movePtr(wasm::SymbolicAddress::js_jit_gAtomic64Lock, spinlock);
+    masm.as_lbu(zero, addr, 7); // Force memory trap on invalid access before we enter the spinlock.
+
+    Label tryLock;
+
+    masm.memoryBarrier(MembarFull);
+
+    masm.bind(&tryLock);
+
+    masm.as_ll(scratch, spinlock, 0);
+    masm.ma_b(scratch, scratch, &tryLock, Assembler::NonZero, ShortJump);
+    masm.ma_li(scratch, Imm32(1));
+    masm.as_sc(scratch, spinlock, 0);
+    masm.ma_b(scratch, scratch, &tryLock, Assembler::Zero, ShortJump);
+
+    masm.memoryBarrier(MembarFull);
+}
+
+static void
+ExitAtomic64Region(MacroAssembler& masm, Register spinlock)
+{
+    masm.memoryBarrier(MembarFull);
+    masm.as_sw(zero, spinlock, 0);
+    masm.memoryBarrier(MembarFull);
+}
+
+template <typename T>
+static void
+AtomicLoad64(MacroAssembler& masm, const T& mem, Register64 temp, Register64 output)
+{
+    MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
+
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
+                        /* scratch= */ output.low);
+
+    masm.load64(Address(SecondScratchReg, 0), output);
+
+    ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void
+MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem, Register64 temp,
+                             Register64 output)
+{
+    AtomicLoad64(*this, mem, temp, output);
+}
+
+void
+MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem, Register64 temp,
+                             Register64 output)
+{
+    AtomicLoad64(*this, mem, temp, output);
+}
+
+template<typename T>
+void
+MacroAssemblerMIPSCompat::atomicStore64(const T& mem, Register temp, Register64 value)
+{
+    computeEffectiveAddress(mem, SecondScratchReg);
+
+    EnterAtomic64Region(asMasm(), /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
+                        /* scratch= */ temp);
+
+    store64(value, Address(SecondScratchReg, 0));
+
+    ExitAtomic64Region(asMasm(), /* spinlock= */ ScratchRegister);
+}
+
+template void
+MacroAssemblerMIPSCompat::atomicStore64(const Address& mem, Register temp, Register64 value);
+template void
+MacroAssemblerMIPSCompat::atomicStore64(const BaseIndex& mem, Register temp, Register64 value);
+
+template <typename T>
+static void
+CompareExchange64(MacroAssembler& masm, const T& mem, Register64 expect, Register64 replace,
+                  Register64 output)
+{
+    MOZ_ASSERT(output != expect);
+    MOZ_ASSERT(output != replace);
+
+    Label exit;
+
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+    Address addr(SecondScratchReg, 0);
+
+    EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
+                        /* scratch= */ output.low);
+    masm.load64(addr, output);
+
+    masm.ma_b(output.low, expect.low, &exit, Assembler::NotEqual, ShortJump);
+    masm.ma_b(output.high, expect.high, &exit, Assembler::NotEqual, ShortJump);
+    masm.store64(replace, addr);
+    masm.bind(&exit);
+    ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+
+void
+MacroAssembler::compareExchange64(const Synchronization&, const Address& mem, Register64 expect,
+                                  Register64 replace, Register64 output)
+{
+    CompareExchange64(*this, mem, expect, replace, output);
+}
+
+void
+MacroAssembler::compareExchange64(const Synchronization&, const BaseIndex& mem, Register64 expect,
+                                  Register64 replace, Register64 output)
+{
+    CompareExchange64(*this, mem, expect, replace, output);
+}
+
+
+template <typename T>
+static void
+AtomicExchange64(MacroAssembler& masm, const T& mem, Register64 src, Register64 output)
+{
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+    Address addr(SecondScratchReg, 0);
+
+    EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
+                        /* scratch= */ output.low);
+
+    masm.load64(addr, output);
+    masm.store64(src, addr);
+
+    ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+
+void
+MacroAssembler::atomicExchange64(const Synchronization&, const Address& mem, Register64 src,
+                                 Register64 output)
+{
+    AtomicExchange64(*this, mem, src, output);
+}
+
+void
+MacroAssembler::atomicExchange64(const Synchronization&, const BaseIndex& mem, Register64 src,
+                                 Register64 output)
+{
+    AtomicExchange64(*this, mem, src, output);
+}
+
+template<typename T>
+static void
+AtomicFetchOp64(MacroAssembler& masm, AtomicOp op, Register64 value, const T& mem,
+                Register64 temp, Register64 output)
+{
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    EnterAtomic64Region(masm, /* addr= */ SecondScratchReg, /* spinlock= */ ScratchRegister,
+                        /* scratch= */ output.low);
+
+    masm.load64(Address(SecondScratchReg, 0), output);
+
+    switch(op) {
+      case AtomicFetchAddOp:
+        masm.as_addu(temp.low, output.low, value.low);
+        masm.as_sltu(temp.high, temp.low, output.low);
+        masm.as_addu(temp.high, temp.high, output.high);
+        masm.as_addu(temp.high, temp.high, value.high);
+        break;
+      case AtomicFetchSubOp:
+        masm.as_sltu(temp.high, output.low, value.low);
+        masm.as_subu(temp.high, output.high, temp.high);
+        masm.as_subu(temp.low, output.low, value.low);
+        masm.as_subu(temp.high, temp.high, value.high);
+        break;
+      case AtomicFetchAndOp:
+        masm.as_and(temp.low, output.low, value.low);
+        masm.as_and(temp.high, output.high, value.high);
+        break;
+      case AtomicFetchOrOp:
+        masm.as_or(temp.low, output.low, value.low);
+        masm.as_or(temp.high, output.high, value.high);
+        break;
+      case AtomicFetchXorOp:
+        masm.as_xor(temp.low, output.low, value.low);
+        masm.as_xor(temp.high, output.high, value.high);
+        break;
+      default:
+        MOZ_CRASH();
+    }
+
+    masm.store64(temp, Address(SecondScratchReg, 0));
+
+    ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void
+MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64 value,
+                                const Address& mem, Register64 temp, Register64 output)
+{
+    AtomicFetchOp64(*this, op, value, mem, temp, output);
+}
+
+void
+MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op, Register64 value,
+                                const BaseIndex& mem, Register64 temp, Register64 output)
+{
+    AtomicFetchOp64(*this, op, value, mem, temp, output);
+}
 // ========================================================================
 // Convert floating point.
 
 static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
 
 bool
 MacroAssembler::convertUInt64ToDoubleNeedsTemp()
 {
@@ -2505,8 +2620,9 @@ MacroAssembler::convertUInt64ToDouble(Re
     convertUInt32ToDouble(src.high, dest);
     loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
     mulDouble(ScratchDoubleReg, dest);
     convertUInt32ToDouble(src.low, ScratchDoubleReg);
     addDouble(ScratchDoubleReg, dest);
 }
 
 //}}} check_macroassembler_style
+
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -421,16 +421,17 @@ class MacroAssemblerMIPSCompat : public 
   protected:
     Operand ToType(Operand base);
     Address ToType(Address base) {
         return ToType(Operand(base)).toAddress();
     }
 
     uint32_t getType(const Value& val);
     void moveData(const Value& val, Register data);
+
   public:
     void moveValue(const Value& val, Register type, Register data);
 
     CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
     CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
 
     void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
         if (dest.isFloat())
@@ -547,339 +548,26 @@ class MacroAssemblerMIPSCompat : public 
     void storePayload(Register src, Address dest);
     void storePayload(const Value& val, const BaseIndex& dest);
     void storePayload(Register src, const BaseIndex& dest);
     void storeTypeTag(ImmTag tag, Address dest);
     void storeTypeTag(ImmTag tag, const BaseIndex& dest);
 
     void handleFailureWithHandlerTail(void* handler, Label* profilerExitTail);
 
+    template <typename T>
+    void atomicStore64(const T& mem, Register temp, Register64 value);
+
+
     /////////////////////////////////////////////////////////////////
     // Common interface.
     /////////////////////////////////////////////////////////////////
   public:
     // The following functions are exposed for use in platform-shared code.
 
-    template<typename T>
-    void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                    Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                    Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                     Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                     Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
-                           Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-
-    template<typename T>
-    void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange32(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicAdd8(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAdd16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAdd32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicSub8(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicSub16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicSub32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicAnd8(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAnd16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAnd32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                  Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                  Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
-                         Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicOr8(const T& value, const S& mem, Register flagTemp,
-                   Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicOr16(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicOr32(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicXor8(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicXor16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicXor32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T>
-    void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
-                                        Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
-                                        AnyRegister output);
-
-    template<typename T>
-    void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
-                                       Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
-                                       AnyRegister output);
-
     inline void incrementInt32Value(const Address& addr);
 
     void move32(Imm32 imm, Register dest);
     void move32(Register src, Register dest);
 
     void movePtr(Register src, Register dest);
     void movePtr(ImmWord imm, Register dest);
     void movePtr(ImmPtr imm, Register dest);
@@ -1021,137 +709,22 @@ class MacroAssemblerMIPSCompat : public 
     void alignStackPointer();
     void restoreStackPointer();
     static void calculateAlignedStackPointer(void** stackPointer);
 
     // If source is a double, load it into dest. If source is int32,
     // convert it to double. Else, branch to failure.
     void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
 
-    template<typename T>
-    void atomicFetchAdd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchSub8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAnd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchOr8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchXor8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval,
-                                    Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAdd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchSub16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAnd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchOr16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchXor16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval,
-                                     Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAdd32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchSub32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAnd32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchOr32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchXor32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T> void atomicExchange32(const T& mem, Register value, Register output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicExchange64(const T& mem, Register64 src, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void compareExchange64(const T& mem, Register64 expect, Register64 replace, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicLoad64(const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-
   protected:
     bool buildOOLFakeExitFrame(void* fakeReturnAddr);
 
+    void enterAtomic64Region(Register addr, Register spinlock, Register tmp);
+    void exitAtomic64Region(Register spinlock);
+
   public:
     CodeOffset labelForPatch() {
         return CodeOffset(nextOffset().getOffset());
     }
 
     void lea(Operand addr, Register dest) {
         ma_addu(dest, addr.baseReg(), Imm32(addr.disp()));
     }
--- a/js/src/jit/mips32/Simulator-mips32.cpp
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -1920,16 +1920,20 @@ Simulator::writeB(uint32_t addr, int8_t 
     LLBit_ = false;
     *ptr = value;
 }
 
 int
 Simulator::loadLinkedW(uint32_t addr, SimInstruction* instr)
 {
     if ((addr & kPointerAlignmentMask) == 0) {
+
+        if (handleWasmFault(addr, 1))
+            return -1;
+
         volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
         int32_t value = *ptr;
         lastLLValue_ = value;
         LLAddr_ = addr;
         // Note that any memory write or "external" interrupt should reset this value to false.
         LLBit_ = true;
         return value;
     }
@@ -2018,16 +2022,20 @@ typedef int64_t (*Prototype_General4)(in
 typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
                                       int32_t arg4);
 typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
                                       int32_t arg4, int32_t arg5);
 typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
                                       int32_t arg4, int32_t arg5, int32_t arg6);
 typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
                                       int32_t arg4, int32_t arg5, int32_t arg6, int32_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int32_t arg0, int32_t arg1, int32_t arg2,
+                                                        int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int32_t arg0, int32_t arg1, int64_t arg2,
+                                                      int64_t arg3);
 
 typedef double (*Prototype_Double_None)();
 typedef double (*Prototype_Double_Double)(double arg0);
 typedef double (*Prototype_Double_Int)(int32_t arg0);
 typedef int32_t (*Prototype_Int_Double)(double arg0);
 typedef int64_t (*Prototype_Int64_Double)(double arg0);
 typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1, int32_t arg2);
 typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1, int32_t arg2,
@@ -2041,16 +2049,23 @@ typedef double (*Prototype_Double_IntInt
 typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
 typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
 typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
 
 typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
 typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
                                                             double arg2, double arg3);
 
+static int64_t
+MakeInt64(int32_t first, int32_t second)
+{
+    // Little-endian order.
+    return ((int64_t)second << 32) | (uint32_t)first;
+}
+
 // Software interrupt instructions are used by the simulator to call into C++.
 void
 Simulator::softwareInterrupt(SimInstruction* instr)
 {
     int32_t func = instr->functionFieldRaw();
     uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
 
     // We first check if we met a call_rt_redirected.
@@ -2149,16 +2164,31 @@ Simulator::softwareInterrupt(SimInstruct
             double dval0, dval1;
             int32_t ival;
             getFpArgs(&dval0, &dval1, &ival);
             Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external);
             int32_t res = target(dval0);
             setRegister(v0, res);
             break;
           }
+          case Args_Int_GeneralGeneralGeneralInt64: {
+            Prototype_GeneralGeneralGeneralInt64 target =
+                reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+            // The int64 arg is not split across register and stack
+            int64_t result = target(arg0, arg1, arg2, MakeInt64(arg4, arg5));
+            setCallResult(result);
+            break;
+          }
+          case Args_Int_GeneralGeneralInt64Int64: {
+            Prototype_GeneralGeneralInt64Int64 target =
+                reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+            int64_t result = target(arg0, arg1, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5));
+            setCallResult(result);
+            break;
+          }
           case Args_Int64_Double: {
             double dval0, dval1;
             int32_t ival;
             getFpArgs(&dval0, &dval1, &ival);
             Prototype_Int64_Double target = reinterpret_cast<Prototype_Int64_Double>(external);
             int64_t result = target(dval0);
             setCallResult(result);
             break;
--- a/js/src/jit/mips32/Simulator-mips32.h
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -151,16 +151,18 @@ class Simulator {
     static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
     Simulator();
     ~Simulator();
 
+    static bool supportsAtomics() { return true; }
+
     // The currently executing Simulator instance. Potentially there can be one
     // for each native thread.
     static Simulator* Current();
 
     static inline uintptr_t StackLimit() {
         return Simulator::Current()->stackLimit();
     }
 
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -477,18 +477,16 @@ CodeGeneratorMIPS64::visitWasmUnalignedL
 }
 
 template <typename T>
 void
 CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
-    MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
-
     uint32_t offset = mir->access().offset();
     MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
 
     // Maybe add the offset.
     if (offset) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
--- a/js/src/jit/mips64/LOpcodes-mips64.h
+++ b/js/src/jit/mips64/LOpcodes-mips64.h
@@ -14,11 +14,14 @@
     _(DivOrModI64)              \
     _(UDivOrMod)                \
     _(UDivOrModI64)             \
     _(WasmUnalignedLoad)        \
     _(WasmUnalignedStore)       \
     _(WasmUnalignedLoadI64)     \
     _(WasmUnalignedStoreI64)    \
     _(WasmTruncateToInt64)      \
-    _(Int64ToFloatingPoint)
+    _(Int64ToFloatingPoint)     \
+    _(WasmCompareExchangeI64)   \
+    _(WasmAtomicExchangeI64)    \
+    _(WasmAtomicBinopI64)       \
 
 #endif // jit_mips64_LOpcodes_mips64_h__
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -2051,109 +2051,16 @@ MacroAssemblerMIPS64Compat::handleFailur
     // FP; SP is pointing to the unwound return address to the wasm entry, so
     // we can just ret().
     bind(&wasm);
     loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), FramePointer);
     loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
     ret();
 }
 
-template<typename T>
-void
-MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
-                                                           Register oldval, Register newval,
-                                                           Register temp, Register valueTemp,
-                                                           Register offsetTemp, Register maskTemp,
-                                                           AnyRegister output)
-{
-    switch (arrayType) {
-      case Scalar::Int8:
-        compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint8:
-        compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int16:
-        compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint16:
-        compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int32:
-        compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint32:
-        // At the moment, the code in MCallOptimize.cpp requires the output
-        // type to be double for uint32 arrays.  See bug 1077305.
-        MOZ_ASSERT(output.isFloat());
-        compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
-        convertUInt32ToDouble(temp, output.fpu());
-        break;
-      default:
-        MOZ_CRASH("Invalid typed array type");
-    }
-}
-
-template void
-MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
-                                                           Register oldval, Register newval, Register temp,
-                                                           Register valueTemp, Register offsetTemp, Register maskTemp,
-                                                           AnyRegister output);
-template void
-MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
-                                                           Register oldval, Register newval, Register temp,
-                                                           Register valueTemp, Register offsetTemp, Register maskTemp,
-                                                           AnyRegister output);
-
-template<typename T>
-void
-MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
-                                                          Register value, Register temp, Register valueTemp,
-                                                          Register offsetTemp, Register maskTemp,
-                                                          AnyRegister output)
-{
-    switch (arrayType) {
-      case Scalar::Int8:
-        atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint8:
-        atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int16:
-        atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint16:
-        atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Int32:
-        atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
-        break;
-      case Scalar::Uint32:
-        // At the moment, the code in MCallOptimize.cpp requires the output
-        // type to be double for uint32 arrays.  See bug 1077305.
-        MOZ_ASSERT(output.isFloat());
-        atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
-        convertUInt32ToDouble(temp, output.fpu());
-        break;
-      default:
-        MOZ_CRASH("Invalid typed array type");
-    }
-}
-
-template void
-MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
-                                                          Register value, Register temp, Register valueTemp,
-                                                          Register offsetTemp, Register maskTemp,
-                                                          AnyRegister output);
-template void
-MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
-                                                          Register value, Register temp, Register valueTemp,
-                                                          Register offsetTemp, Register maskTemp,
-                                                          AnyRegister output);
-
 CodeOffset
 MacroAssemblerMIPS64Compat::toggledJump(Label* label)
 {
     CodeOffset ret(nextOffset().getOffset());
     ma_b(label);
     return ret;
 }
 
@@ -2533,16 +2440,145 @@ MacroAssembler::wasmTruncateFloat32ToUIn
     as_cfc1(ScratchRegister, Assembler::FCSR);
     ma_ext(ScratchRegister, ScratchRegister, 6, 1);
     ma_or(ScratchRegister, output);
     moveFromFloat32(ScratchDoubleReg, output);
     ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
 
 }
 
+template <typename T>
+static void
+CompareExchange64(MacroAssembler& masm, const Synchronization& sync, const T& mem,
+                  Register64 expect, Register64 replace, Register64 output)
+{
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    Label tryAgain;
+    Label exit;
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&tryAgain);
+
+    masm.as_lld(output.reg, SecondScratchReg, 0);
+    masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+    masm.movePtr(replace.reg, ScratchRegister);
+    masm.as_scd(ScratchRegister, SecondScratchReg, 0);
+    masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero, ShortJump);
+
+    masm.memoryBarrierAfter(sync);
+
+    masm.bind(&exit);
+}
+
+void
+MacroAssembler::compareExchange64(const Synchronization& sync, const Address& mem,
+                                  Register64 expect, Register64 replace, Register64 output)
+{
+    CompareExchange64(*this, sync, mem, expect, replace, output);
+}
+
+void
+MacroAssembler::compareExchange64(const Synchronization& sync, const BaseIndex& mem,
+                                  Register64 expect, Register64 replace, Register64 output)
+{
+    CompareExchange64(*this, sync, mem, expect, replace, output);
+}
+
+template <typename T>
+static void
+AtomicExchange64(MacroAssembler& masm, const Synchronization& sync, const T& mem,
+                 Register64 src, Register64 output)
+{
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    Label tryAgain;
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&tryAgain);
+
+    masm.as_lld(output.reg, SecondScratchReg, 0);
+    masm.movePtr(src.reg, ScratchRegister);
+    masm.as_scd(ScratchRegister, SecondScratchReg, 0);
+    masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero, ShortJump);
+
+    masm.memoryBarrierAfter(sync);
+}
+
+void
+MacroAssembler::atomicExchange64(const Synchronization& sync, const Address& mem, Register64 src,
+                                 Register64 output)
+{
+    AtomicExchange64(*this, sync, mem, src, output);
+}
+
+void
+MacroAssembler::atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 src,
+                                 Register64 output)
+{
+    AtomicExchange64(*this, sync, mem, src, output);
+}
+
+template<typename T>
+static void
+AtomicFetchOp64(MacroAssembler& masm, const Synchronization& sync, AtomicOp op, Register64 value,
+                const T& mem, Register64 temp, Register64 output)
+{
+    masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+    Label tryAgain;
+
+    masm.memoryBarrierBefore(sync);
+
+    masm.bind(&tryAgain);
+
+    masm.as_lld(output.reg, SecondScratchReg, 0);
+
+    switch(op) {
+      case AtomicFetchAddOp:
+        masm.as_daddu(temp.reg, output.reg, value.reg);
+        break;
+      case AtomicFetchSubOp:
+        masm.as_dsubu(temp.reg, output.reg, value.reg);
+        break;
+      case AtomicFetchAndOp:
+        masm.as_and(temp.reg, output.reg, value.reg);
+        break;
+      case AtomicFetchOrOp:
+        masm.as_or(temp.reg, output.reg, value.reg);
+        break;
+      case AtomicFetchXorOp:
+        masm.as_xor(temp.reg, output.reg, value.reg);
+        break;
+      default:
+        MOZ_CRASH();
+    }
+
+    masm.as_scd(temp.reg, SecondScratchReg, 0);
+    masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
+
+    masm.memoryBarrierAfter(sync);
+}
+
+void
+MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
+                                const Address& mem, Register64 temp, Register64 output)
+{
+    AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
+}
+
+void
+MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
+                                const BaseIndex& mem, Register64 temp, Register64 output)
+{
+    AtomicFetchOp64(*this, sync, op, value, mem, temp, output);
+}
+
 // ========================================================================
 // Convert floating point.
 
 void
 MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest)
 {
     as_dmtc1(src.reg, dest);
     as_cvtdl(dest, dest);
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -585,336 +585,16 @@ class MacroAssemblerMIPS64Compat : publi
     void handleFailureWithHandlerTail(void* handler, Label* profilerExitTail);
 
     /////////////////////////////////////////////////////////////////
     // Common interface.
     /////////////////////////////////////////////////////////////////
   public:
     // The following functions are exposed for use in platform-shared code.
 
-    // TODO: These are no longer used in platform code.
-  private:
-    template<typename T>
-    void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                    Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                    Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                     Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
-                                     Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
-                           Register offsetTemp, Register maskTemp, Register output)
-    {
-        compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
-    }
-
-    template<typename T>
-    void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T>
-    void atomicExchange32(const T& mem, Register value, Register valueTemp,
-                          Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicAdd8(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAdd16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAdd32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicSub8(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicSub16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicSub32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicAnd8(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAnd16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicAnd32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                  Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                  Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
-                         Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicOr8(const T& value, const S& mem, Register flagTemp,
-                   Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicOr16(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicOr32(const T& value, const S& mem, Register flagTemp,
-                    Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-    template<typename T, typename S>
-    void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                   Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
-                                    Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template<typename T, typename S>
-    void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
-                          Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
-    {
-        atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
-    }
-    template <typename T, typename S>
-    void atomicXor8(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicXor16(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-    template <typename T, typename S>
-    void atomicXor32(const T& value, const S& mem, Register flagTemp,
-                     Register valueTemp, Register offsetTemp, Register maskTemp)
-    {
-        atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
-    }
-
-  public:
-    template<typename T>
-    void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
-                                        Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
-                                        AnyRegister output);
-
-    template<typename T>
-    void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
-                                       Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
-                                       AnyRegister output);
-
     inline void incrementInt32Value(const Address& addr);
 
     void move32(Imm32 imm, Register dest);
     void move32(Register src, Register dest);
 
     void movePtr(Register src, Register dest);
     void movePtr(ImmWord imm, Register dest);
     void movePtr(ImmPtr imm, Register dest);
@@ -1062,134 +742,16 @@ class MacroAssemblerMIPS64Compat : publi
 
     void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, Register dest);
 
     void cmp64Set(Assembler::Condition cond, Register lhs, Imm32 rhs, Register dest)
     {
         ma_cmp_set(dest, lhs, rhs, cond);
     }
 
-    template<typename T>
-    void atomicFetchAdd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchSub8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAnd8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchOr8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchXor8ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval,
-                                    Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicExchange8ZeroExtend(const T& mem, Register value, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAdd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchSub16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAnd16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchOr16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchXor16ZeroExtend(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval,
-                                     Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicExchange16ZeroExtend(const T& mem, Register value, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAdd32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchSub32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchAnd32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchOr32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void atomicFetchXor32(Register value, const T& mem, Register temp, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T>
-    void compareExchange32(const T& mem, Register oldval, Register newval, Register output) {
-        MOZ_CRASH();
-    }
-    template<typename T> void atomicExchange32(const T& mem, Register value, Register output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchAdd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchSub64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchAnd64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchOr64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicFetchXor64(Register64 value, const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicExchange64(const T& mem, Register64 src, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void compareExchange64(const T& mem, Register64 expect, Register64 replace, Register64 output) {
-        MOZ_CRASH();
-    }
-    template <typename T>
-    void atomicLoad64(const T& mem, Register64 temp, Register64 output) {
-        MOZ_CRASH();
-    }
-
   protected:
     bool buildOOLFakeExitFrame(void* fakeReturnAddr);
 
   public:
     CodeOffset labelForPatch() {
         return CodeOffset(nextOffset().getOffset());
     }
 
--- a/js/src/jit/mips64/Simulator-mips64.cpp
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -32,16 +32,17 @@
 #include "mozilla/Casting.h"
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/IntegerPrintfMacros.h"
 #include "mozilla/Likely.h"
 #include "mozilla/MathAlgorithms.h"
 
 #include <float.h>
 
+#include "jit/AtomicOperations.h"
 #include "jit/mips64/Assembler-mips64.h"
 #include "threading/LockGuard.h"
 #include "vm/Runtime.h"
 #include "wasm/WasmInstance.h"
 #include "wasm/WasmSignalHandlers.h"
 
 #define I8(v)   static_cast<int8_t>(v)
 #define I16(v)  static_cast<int16_t>(v)
@@ -460,25 +461,27 @@ SimInstruction::instructionType() const
       case op_lbu:
       case op_lh:
       case op_lhu:
       case op_lw:
       case op_lwu:
       case op_lwl:
       case op_lwr:
       case op_ll:
+      case op_lld:
       case op_ld:
       case op_ldl:
       case op_ldr:
       case op_sb:
       case op_sh:
       case op_sw:
       case op_swl:
       case op_swr:
       case op_sc:
+      case op_scd:
       case op_sd:
       case op_sdl:
       case op_sdr:
       case op_lwc1:
       case op_ldc1:
       case op_swc1:
       case op_sdc1:
         return kImmediateType;
@@ -1146,20 +1149,20 @@ Simulator::setLastDebuggerInput(char* in
 }
 
 static CachePage*
 GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache, void* page)
 {
     SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
     if (p)
         return p->value();
-
+    AutoEnterOOMUnsafeRegion oomUnsafe;
     CachePage* new_page = js_new<CachePage>();
-    if (!i_cache.add(p, page, new_page))
-        return nullptr;
+    if (!new_page || !i_cache.add(p, page, new_page))
+         oomUnsafe.crash("Simulator CachePage");
     return new_page;
 }
 
 // Flush from start up to and not including start + size.
 static void
 FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache, intptr_t start, int size)
 {
     MOZ_ASSERT(size <= CachePage::kPageSize);
@@ -1278,16 +1281,19 @@ Simulator::Simulator()
 
     // Set up architecture state.
     // All registers are initialized to zero to start with.
     for (int i = 0; i < Register::kNumSimuRegisters; i++)
         registers_[i] = 0;
     for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++)
         FPUregisters_[i] = 0;
     FCSR_ = 0;
+    LLBit_ = false;
+    LLAddr_ = 0;
+    lastLLValue_ = 0;
 
     // The ra and pc are initialized to a known bad value that will cause an
     // access violation if the simulator ever tries to execute it.
     registers_[pc] = bad_ra;
     registers_[ra] = bad_ra;
 
     for (int i = 0; i < kNumExceptions; i++)
         exceptions[i] = 0;
@@ -1353,21 +1359,20 @@ class Redirection
         Redirection* current = SimulatorProcess::redirection();
         for (; current != nullptr; current = current->next_) {
             if (current->nativeFunction_ == nativeFunction) {
                 MOZ_ASSERT(current->type() == type);
                 return current;
             }
         }
 
+        AutoEnterOOMUnsafeRegion oomUnsafe;
         Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
         if (!redir) {
-            MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection",
-                                       __FILE__, __LINE__);
-            MOZ_CRASH();
+            oomUnsafe.crash("Simulator redirection");
         }
         new(redir) Redirection(nativeFunction, type);
         return redir;
     }
 
     static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
         uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
         uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_);
@@ -1610,23 +1615,93 @@ int64_t
 Simulator::get_pc() const
 {
     return registers_[pc];
 }
 
 void
 Simulator::startInterrupt(JitActivation* activation)
 {
-    MOZ_CRASH("NIY");
+    JS::ProfilingFrameIterator::RegisterState state;
+    state.pc = (void*) get_pc();
+    state.fp = (void*) getRegister(fp);
+    state.sp = (void*) getRegister(sp);
+    state.lr = (void*) getRegister(ra);
+    activation->startWasmInterrupt(state);
 }
 
+// The signal handler only redirects the PC to the interrupt stub when the PC is
+// in function code. However, this guard is racy for the simulator since the
+// signal handler samples PC in the middle of simulating an instruction and thus
+// the current PC may have advanced once since the signal handler's guard. So we
+// re-check here.
 void
 Simulator::handleWasmInterrupt()
 {
-    MOZ_CRASH("NIY");
+    if (!wasm::CodeExists)
+        return;
+
+    void* pc = (void*)get_pc();
+    void* fp = (void*)getRegister(Register::fp);
+
+    JitActivation* activation = TlsContext.get()->activation()->asJit();
+    const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
+    if (!segment || !segment->containsCodePC(pc))
+        return;
+
+    // fp can be null during the prologue/epilogue of the entry function.
+    if (!fp)
+        return;
+
+    startInterrupt(activation);
+    set_pc(int64_t(segment->interruptCode()));
+}
+
+// WebAssembly memories contain an extra region of guard pages (see
+// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
+// using a signal handler that redirects PC to a stub that safely reports an
+// error. However, if the handler is hit by the simulator, the PC is in C++ code
+// and cannot be redirected. Therefore, we must avoid hitting the handler by
+// redirecting in the simulator before the real handler would have been hit.
+bool
+Simulator::handleWasmFault(uint64_t addr, unsigned numBytes)
+{
+    if (!wasm::CodeExists)
+        return false;
+
+    JSContext* cx = TlsContext.get();
+    if (!cx->activation() || !cx->activation()->isJit())
+        return false;
+    JitActivation* act = cx->activation()->asJit();
+
+    void* pc = reinterpret_cast<void*>(get_pc());
+    uint8_t* fp = reinterpret_cast<uint8_t*>(getRegister(Register::fp));
+
+    const wasm::CodeSegment* segment = wasm::LookupCodeSegment(pc);
+    if (!segment)
+        return false;
+
+    wasm::Instance* instance = wasm::LookupFaultingInstance(*segment, pc, fp);
+    if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
+        return false;
+
+    LLBit_ = false;
+
+    const wasm::MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
+    if (!memoryAccess) {
+        startInterrupt(act);
+        if (!instance->code().containsCodePC(pc))
+            MOZ_CRASH("Cannot map PC to trap handler");
+        set_pc(int64_t(segment->outOfBoundsCode()));
+        return true;
+    }
+
+    MOZ_ASSERT(memoryAccess->hasTrapOutOfLineCode());
+    set_pc(int64_t(memoryAccess->trapOutOfLineCode(segment->base())));
+    return true;
 }
 
 bool
 Simulator::handleWasmTrapFault()
 {
     if (!wasm::CodeExists)
         return false;
 
@@ -1647,244 +1722,368 @@ Simulator::handleWasmTrapFault()
     if (!segment->code().lookupTrap(pc, &trap, &bytecode))
         return false;
 
     act->startWasmTrap(trap, bytecode.offset, pc, fp);
     set_pc(int64_t(segment->trapCode()));
     return true;
 }
 
-// The MIPS cannot do unaligned reads and writes.  On some MIPS platforms an
-// interrupt is caused.  On others it does a funky rotation thing.  For now we
-// simply disallow unaligned reads, but at some point we may want to move to
-// emulating the rotate behaviour.  Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator.  Since the host is typically IA32 we will not
-// get the correct MIPS-like behaviour on unaligned accesses.
+// MIPS memory instructions (except lw(d)l/r , sw(d)l/r) trap on unaligned memory
+// access enabling the OS to handle them via trap-and-emulate.
+// Note that simulator runs have the runtime system running directly on the host
+// system and only generated code is executed in the simulator.
+// Since the host is typically IA32 it will not trap on unaligned memory access.
+// We assume that that executing correct generated code will not produce unaligned
+// memory access, so we explicitly check for address alignment and trap.
+// Note that trapping does not occur when executing wasm code, which requires that
+// unaligned memory access provides correct result.
 
 uint8_t
 Simulator::readBU(uint64_t addr, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 1))
+        return 0xff;
+
     uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
     return* ptr;
 }
 
 int8_t
 Simulator::readB(uint64_t addr, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 1))
+        return -1;
+
     int8_t* ptr = reinterpret_cast<int8_t*>(addr);
     return* ptr;
 }
 
 void
 Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 1))
+        return;
+
     uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
     *ptr = value;
 }
 
 void
 Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr)
 {
+    if (handleWasmFault(addr, 1))
+        return;
+
     int8_t* ptr = reinterpret_cast<int8_t*>(addr);
     *ptr = value;
 }
 
 uint16_t
 Simulator::readHU(uint64_t addr, SimInstruction* instr)
 {
-    if ((addr & 1) == 0) {
+    if (handleWasmFault(addr, 2))
+        return 0xffff;
+
+    if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
         return *ptr;
     }
     printf("Unaligned unsigned halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
     return 0;
 }
 
 int16_t
 Simulator::readH(uint64_t addr, SimInstruction* instr)
 {
-    if ((addr & 1) == 0) {
+    if (handleWasmFault(addr, 2))
+        return -1;
+
+    if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         int16_t* ptr = reinterpret_cast<int16_t*>(addr);
         return *ptr;
     }
     printf("Unaligned signed halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
     return 0;
 }
 
 void
 Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr)
 {
-    if ((addr & 1) == 0) {
+    if (handleWasmFault(addr, 2))
+        return;
+
+    if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+        LLBit_ = false;
         *ptr = value;
         return;
     }
     printf("Unaligned unsigned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
 }
 
 void
 Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr)
 {
-    if ((addr & 1) == 0) {
+    if (handleWasmFault(addr, 2))
+        return;
+
+    if ((addr & 1) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+        LLBit_ = false;
         *ptr = value;
         return;
     }
     printf("Unaligned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
 }
 
 uint32_t
 Simulator::readWU(uint64_t addr, SimInstruction* instr)
 {
-    if (addr < 0x400) {
-        // This has to be a NULL-dereference, drop into debugger.
-        printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
-               addr, reinterpret_cast<intptr_t>(instr));
-        MOZ_CRASH();
-    }
-    if ((addr & 3) == 0) {
+    if (handleWasmFault(addr, 4))
+        return -1;
+
+    if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
         return *ptr;
     }
     printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
     return 0;
 }
 
 int32_t
 Simulator::readW(uint64_t addr, SimInstruction* instr)
 {
-    if (addr < 0x400) {
-        // This has to be a NULL-dereference, drop into debugger.
-        printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
-               addr, reinterpret_cast<intptr_t>(instr));
-        MOZ_CRASH();
-    }
-    if ((addr & 3) == 0) {
+    if (handleWasmFault(addr, 4))
+        return -1;
+
+    if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         int32_t* ptr = reinterpret_cast<int32_t*>(addr);
         return *ptr;
     }
     printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
     return 0;
 }
 
 void
 Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr)
 {
-    if (addr < 0x400) {
-        // This has to be a NULL-dereference, drop into debugger.
-        printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
-               addr, reinterpret_cast<intptr_t>(instr));
-        MOZ_CRASH();
-    }
-    if ((addr & 3) == 0) {
+    if (handleWasmFault(addr, 4))
+        return;
+
+    if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+        LLBit_ = false;
         *ptr = value;
         return;
     }
     printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
 }
 
 void
 Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr)
 {
-    if (addr < 0x400) {
-        // This has to be a NULL-dereference, drop into debugger.
-        printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
-               addr, reinterpret_cast<intptr_t>(instr));
-        MOZ_CRASH();
-    }
-    if ((addr & 3) == 0) {
+    if (handleWasmFault(addr, 4))
+        return;
+
+    if ((addr & 3) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+        LLBit_ = false;
         *ptr = value;
         return;
     }
     printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
 }
 
 int64_t
 Simulator::readDW(uint64_t addr, SimInstruction* instr)
 {
-    if (addr < 0x400) {
-        // This has to be a NULL-dereference, drop into debugger.
-        printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
-               addr, reinterpret_cast<intptr_t>(instr));
-        MOZ_CRASH();
-    }
-    if ((addr & kPointerAlignmentMask) == 0) {
-        int64_t* ptr = reinterpret_cast<int64_t*>(addr);
-        return* ptr;
+    if (handleWasmFault(addr, 8))
+        return -1;
+
+    if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
+        intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+        return *ptr;
     }
     printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
     return 0;
 }
 
 void
 Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr)
 {
-    if (addr < 0x400) {
-        // This has to be a NULL-dereference, drop into debugger.
-        printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
-               addr, reinterpret_cast<intptr_t>(instr));
-        MOZ_CRASH();
-    }
-    if ((addr & kPointerAlignmentMask) == 0) {
+    if (handleWasmFault(addr, 8))
+        return;
+
+    if ((addr & kPointerAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+        LLBit_ = false;
         *ptr = value;
         return;
     }
     printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
 }
 
 double
 Simulator::readD(uint64_t addr, SimInstruction* instr)
 {
-    if ((addr & kDoubleAlignmentMask) == 0) {
+    if (handleWasmFault(addr, 8))
+        return NAN;
+
+    if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         double* ptr = reinterpret_cast<double*>(addr);
         return *ptr;
     }
     printf("Unaligned (double) read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
     return 0;
 }
 
 void
 Simulator::writeD(uint64_t addr, double value, SimInstruction* instr)
 {
-    if ((addr & kDoubleAlignmentMask) == 0) {
+    if (handleWasmFault(addr, 8))
+        return;
+
+    if ((addr & kDoubleAlignmentMask) == 0 || wasm::InCompiledCode(reinterpret_cast<void *>(get_pc()))) {
         double* ptr = reinterpret_cast<double*>(addr);
+        LLBit_ = false;
         *ptr = value;
         return;
     }
     printf("Unaligned (double) write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
            addr, reinterpret_cast<intptr_t>(instr));
     MOZ_CRASH();
 }
 
+int
+Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr)
+{
+    if ((addr & 3) == 0) {
+
+        if (handleWasmFault(addr, 4))
+            return -1;
+
+        volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+        int32_t value = *ptr;
+        lastLLValue_ = value;
+        LLAddr_ = addr;
+        // Note that any memory write or "external" interrupt should reset this value to false.
+        LLBit_ = true;
+        return value;
+    }
+    printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    MOZ_CRASH();
+    return 0;
+}
+
+int
+Simulator::storeConditionalW(uint64_t addr, int value, SimInstruction* instr)
+{
+    // Correct behavior in this case, as defined by architecture, is to just return 0,
+    // but there is no point at allowing that. It is certainly an indicator of a bug.
+    if (addr != LLAddr_) {
+        printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64 ", expected: 0x%016" PRIx64 "\n",
+               addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+        MOZ_CRASH();
+    }
+
+    if ((addr & 3) == 0) {
+        SharedMem<int32_t*> ptr = SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+        if (!LLBit_) {
+            return 0;
+        }
+
+        LLBit_ = false;
+        LLAddr_ = 0;
+        int32_t expected = int32_t(lastLLValue_);
+        int32_t old = AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+        return (old == expected) ? 1:0;
+    }
+    printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    MOZ_CRASH();
+    return 0;
+}
+
+int64_t
+Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr)
+{
+    if ((addr & kPointerAlignmentMask) == 0) {
+
+        if (handleWasmFault(addr, 8))
+            return -1;
+
+        volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+        int64_t value = *ptr;
+        lastLLValue_ = value;
+        LLAddr_ = addr;
+        // Note that any memory write or "external" interrupt should reset this value to false.
+        LLBit_ = true;
+        return value;
+    }
+    printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    MOZ_CRASH();
+    return 0;
+}
+
+int
+Simulator::storeConditionalD(uint64_t addr, int64_t value, SimInstruction* instr)
+{
+    // Correct behavior in this case, as defined by architecture, is to just return 0,
+    // but there is no point at allowing that. It is certainly an indicator of a bug.
+    if (addr != LLAddr_) {
+        printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64 ", expected: 0x%016" PRIx64 "\n",
+               addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+        MOZ_CRASH();
+    }
+
+    if ((addr & kPointerAlignmentMask) == 0) {
+        SharedMem<int64_t*> ptr = SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+        if (!LLBit_) {
+            return 0;
+        }
+
+        LLBit_ = false;
+        LLAddr_ = 0;
+        int64_t expected = lastLLValue_;
+        int64_t old = AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+        return (old == expected) ? 1:0;
+    }
+    printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    MOZ_CRASH();
+    return 0;
+}
+
 uintptr_t
 Simulator::stackLimit() const
 {
     return stackLimit_;
 }
 
 uintptr_t*
 Simulator::addressOfStackLimit()
@@ -1927,25 +2126,30 @@ typedef int64_t (*Prototype_General4)(in
 typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
                                       int64_t arg4);
 typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
                                       int64_t arg4, int64_t arg5);
 typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
                                       int64_t arg4, int64_t arg5, int64_t arg6);
 typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
                                       int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
-
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0, int64_t arg1, int64_t arg2,
+                                                        int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0, int64_t arg1, int64_t arg2,
+                                                      int64_t arg3);
 typedef double (*Prototype_Double_None)();
 typedef double (*Prototype_Double_Double)(double arg0);
 typedef double (*Prototype_Double_Int)(int64_t arg0);
 typedef int64_t (*Prototype_Int_Double)(double arg0);
 typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1, int64_t arg2);
 typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1, int64_t arg2,
                                                  int64_t arg3);
 typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+typedef float (*Prototype_Float32_IntInt)(int arg0, int arg1);
 
 typedef double (*Prototype_DoubleInt)(double arg0, int64_t arg1);
 typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
 typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
 typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
 
 typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
 typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
@@ -2003,16 +2207,19 @@ Simulator::softwareInterrupt(SimInstruct
             Prototype_General2 target = reinterpret_cast<Prototype_General2>(external);
             int64_t result = target(arg0, arg1);
             setCallResult(result);
             break;
           }
           case Args_General3: {
             Prototype_General3 target = reinterpret_cast<Prototype_General3>(external);
             int64_t result = target(arg0, arg1, arg2);
+            if(external == intptr_t(&js::wasm::Instance::wake)) {
+                result = int32_t(result);
+            }
             setCallResult(result);
             break;
           }
           case Args_General4: {
             Prototype_General4 target = reinterpret_cast<Prototype_General4>(external);
             int64_t result = target(arg0, arg1, arg2, arg3);
             setCallResult(result);
             break;
@@ -2052,16 +2259,36 @@ Simulator::softwareInterrupt(SimInstruct
           }
           case Args_Int_Double: {
             double dval0 = getFpuRegisterDouble(12);
             Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external);
             int64_t res = target(dval0);
             setRegister(v0, res);
             break;
           }
+          case Args_Int_GeneralGeneralGeneralInt64: {
+            Prototype_GeneralGeneralGeneralInt64 target =
+                reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+            int64_t result = target(arg0, arg1, arg2, arg3);
+            if(external == intptr_t(&js::wasm::Instance::wait_i32)) {
+                result = int32_t(result);
+            }
+            setCallResult(result);
+            break;
+          }
+          case Args_Int_GeneralGeneralInt64Int64: {
+            Prototype_GeneralGeneralInt64Int64 target =
+                reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+            int64_t result = target(arg0, arg1, arg2, arg3);
+            if(external == intptr_t(&js::wasm::Instance::wait_i64)) {
+                result = int32_t(result);
+            }
+            setCallResult(result);
+            break;
+          }
           case Args_Int_DoubleIntInt: {
             double dval = getFpuRegisterDouble(12);
             Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
             int64_t res = target(dval, arg1, arg2);
             setRegister(v0, res);
             break;
           }
           case Args_Int_IntDoubleIntInt: {
@@ -2081,16 +2308,32 @@ Simulator::softwareInterrupt(SimInstruct
           case Args_Float32_Float32: {
             float fval0;
             fval0 = getFpuRegisterFloat(12);
             Prototype_Float32_Float32 target = reinterpret_cast<Prototype_Float32_Float32>(external);
             float fresult = target(fval0);
             setCallResultFloat(fresult);
             break;
           }
+          case Args_Float32_Float32Float32: {
+            float fval0;
+            float fval1;
+            fval0 = getFpuRegisterFloat(12);
+            fval1 = getFpuRegisterFloat(13);
+            Prototype_Float32_Float32Float32 target = reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+            float fresult = target(fval0, fval1);
+            setCallResultFloat(fresult);
+            break;
+          }
+          case Args_Float32_IntInt: {
+            Prototype_Float32_IntInt target = reinterpret_cast<Prototype_Float32_IntInt>(external);
+            float fresult = target(arg0, arg1);
+            setCallResultFloat(fresult);
+            break;
+          }
           case Args_Double_Int: {
             Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external);
             double dresult = target(arg0);
             setCallResultDouble(dresult);
             break;
           }
           case Args_Double_DoubleInt: {
             double dval0 = getFpuRegisterDouble(12);
@@ -2929,16 +3172,31 @@ Simulator::decodeTypeRegister(SimInstruc
               case ff_ceil_l_fmt:  // Mips64r2 instruction.
                 i64 = I64(std::ceil(fs_value));
                 setFpuRegister(fd_reg, i64);
                 break;
               case ff_cvt_ps_s:
               case ff_c_f_fmt:
                 MOZ_CRASH();
                 break;
+              case ff_movf_fmt:
+                if (testFCSRBit(fcsr_cc)) {
+                  setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+                }
+                break;
+              case ff_movz_fmt:
+                if (rt == 0) {
+                  setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+                }
+                break;
+              case ff_movn_fmt:
+                if (rt != 0) {
+                  setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+                }
+                break;
               default:
                 MOZ_CRASH();
             }
             break;
           case rs_d:
             double dt_value, ds_value;
             ds_value = getFpuRegisterDouble(fs_reg);
             dt_value = getFpuRegisterDouble(ft_reg);
@@ -3106,17 +3364,18 @@ Simulator::decodeTypeRegister(SimInstruc
             break;
           case rs_l:
             switch (instr->functionFieldRaw()) {
               case ff_cvt_d_fmt:  // Mips64r2 instruction.
                 i64 = getFpuRegister(fs_reg);
                 setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
                 break;
               case ff_cvt_s_fmt:
-                MOZ_CRASH();
+                i64 = getFpuRegister(fs_reg);
+                setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
                 break;
               default:
                 MOZ_CRASH();
             }
             break;
           case rs_ps:
             break;
           default:
@@ -3473,17 +3732,21 @@ Simulator::decodeTypeImmediate(SimInstru
         addr = rs + se_imm16 - al_offset;
         alu_out = readW(addr, instr);
         alu_out = U32(alu_out) >> al_offset * 8;
         alu_out |= rt & mask;
         break;
       }
       case op_ll:
         addr = rs + se_imm16;
-        alu_out = readW(addr, instr);
+        alu_out = loadLinkedW(addr, instr);
+        break;
+      case op_lld:
+        addr = rs + se_imm16;
+        alu_out = loadLinkedD(addr, instr);
         break;
       case op_ld:
         addr = rs + se_imm16;
         alu_out = readDW(addr, instr);
         break;
       case op_ldl: {
         // al_offset is offset of the effective address within an aligned word.
         uint8_t al_offset = (rs + se_imm16) & 7;
@@ -3530,16 +3793,19 @@ Simulator::decodeTypeImmediate(SimInstru
         addr = rs + se_imm16 - al_offset;
         mem_value = readW(addr, instr);
         mem_value = (rt << al_offset * 8) | (mem_value & mask);
         break;
       }
       case op_sc:
         addr = rs + se_imm16;
         break;
+      case op_scd:
+        addr = rs + se_imm16;
+        break;
       case op_sd:
         addr = rs + se_imm16;
         break;
       case op_sdl: {
         uint8_t al_offset = (rs + se_imm16) & 7;
         uint8_t byte_shift = 7 - al_offset;
         uint64_t mask = byte_shift ? (~0ul << (al_offset + 1) * 8) : 0;
         addr = rs + se_imm16 - al_offset;
@@ -3611,16 +3877,17 @@ Simulator::decodeTypeImmediate(SimInstru
       case op_lb:
       case op_lhu:
       case op_lh:
       case op_lwu:
       case op_lw:
       case op_lwl:
       case op_lwr:
       case op_ll:
+      case op_lld:
       case op_ld:
       case op_ldl:
       case op_ldr:
         setRegister(rt_reg, alu_out);
         break;
       case op_sb:
         writeB(addr, I8(rt), instr);
         break;
@@ -3632,18 +3899,20 @@ Simulator::decodeTypeImmediate(SimInstru
         break;
       case op_swl:
         writeW(addr, I32(mem_value), instr);
         break;
       case op_swr:
         writeW(addr, I32(mem_value), instr);
         break;
       case op_sc:
-        writeW(addr, I32(rt), instr);
-        setRegister(rt_reg, 1);
+        setRegister(rt_reg, storeConditionalW(addr, I32(rt), instr));
+        break;
+      case op_scd:
+        setRegister(rt_reg, storeConditionalD(addr, rt, instr));
         break;
       case op_sd:
         writeDW(addr, rt, instr);
         break;
       case op_sdl:
         writeDW(addr, mem_value, instr);
         break;
       case op_sdr:
--- a/js/src/jit/mips64/Simulator-mips64.h
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -155,16 +155,18 @@ class Simulator {
     static Simulator* Create(JSContext* cx);
 
     static void Destroy(Simulator* simulator);
 
     // Constructor/destructor are for internal use only; use the static methods above.
     Simulator();
     ~Simulator();
 
+    static bool supportsAtomics() { return true; }
+
     // The currently executing Simulator instance. Potentially there can be one
     // for each native thread.
     static Simulator* Current();
 
     static inline uintptr_t StackLimit() {
         return Simulator::Current()->stackLimit();
     }
 
@@ -271,16 +273,22 @@ class Simulator {
     inline int64_t readDW(uint64_t addr, SimInstruction* instr);
     inline int64_t readDWL(uint64_t addr, SimInstruction* instr);
     inline int64_t readDWR(uint64_t addr, SimInstruction* instr);
     inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr);
 
     inline double readD(uint64_t addr, SimInstruction* instr);
     inline void writeD(uint64_t addr, double value, SimInstruction* instr);
 
+    inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+    inline int storeConditionalW(uint64_t addr, int32_t value, SimInstruction* instr);
+
+    inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+    inline int storeConditionalD(uint64_t addr, int64_t value, SimInstruction* instr);
+
     // Helper function for decodeTypeRegister.
     void configureTypeRegister(SimInstruction* instr,
                                int64_t& alu_out,
                                __int128& i128hilo,
                                unsigned __int128& u128hilo,
                                int64_t& next_pc,
                                int32_t& return_addr_reg,
                                bool& do_interrupt);
@@ -303,16 +311,18 @@ class Simulator {
     void disableStop(uint32_t code);
     void increaseStopCounter(uint32_t code);
     void printStopInfo(uint32_t code);
 
     // Handle a wasm interrupt triggered by an async signal handler.
     void handleWasmInterrupt();
     void startInterrupt(JitActivation* act);
 
+    // Handle any wasm faults, returning true if the fault was handled.
+    bool handleWasmFault(uint64_t addr, unsigned numBytes);
     bool handleWasmTrapFault();
 
     // Executes one instruction.
     void instructionDecode(SimInstruction* instr);
     // Execute one instruction placed in a branch delay slot.
     void branchDelayInstructionDecode(SimInstruction* instr);
 
   public:
@@ -345,16 +355,20 @@ class Simulator {
     // Architecture state.
     // Registers.
     int64_t registers_[kNumSimuRegisters];
     // Coprocessor Registers.
     int64_t FPUregisters_[kNumFPURegisters];
     // FPU control register.
     uint32_t FCSR_;
 
+    bool LLBit_;
+    uintptr_t LLAddr_;
+    int64_t lastLLValue_;
+
     // Simulator support.
     char* stack_;
     uintptr_t stackLimit_;
     bool pc_modified_;
     int64_t icount_;
     int64_t break_count_;
 
     // wasm async interrupt support
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -6403,26 +6403,28 @@ class LAtomicIsLockFree : public LInstru
     }
 };
 
 class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4>
 {
   public:
     LIR_HEADER(CompareExchangeTypedArrayElement)
 
+    // ARM, ARM64, x86, x64
     LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
                                       const LAllocation& oldval, const LAllocation& newval,
                                       const LDefinition& temp)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, oldval);
         setOperand(3, newval);
         setTemp(0, temp);
     }
+    // MIPS32, MIPS64
     LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
                                       const LAllocation& oldval, const LAllocation& newval,
                                       const LDefinition& temp, const LDefinition& valueTemp,
                                       const LDefinition& offsetTemp, const LDefinition& maskTemp)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, oldval);
@@ -6465,24 +6467,26 @@ class LCompareExchangeTypedArrayElement 
     }
 };
 
 class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4>
 {
   public:
     LIR_HEADER(AtomicExchangeTypedArrayElement)
 
+    // ARM, ARM64, x86, x64
     LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
                                      const LAllocation& value, const LDefinition& temp)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, value);
         setTemp(0, temp);
     }
+    // MIPS32, MIPS64
     LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
                                      const LAllocation& value, const LDefinition& temp,
                                      const LDefinition& valueTemp, const LDefinition& offsetTemp,
                                      const LDefinition& maskTemp)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, value);
@@ -6523,35 +6527,37 @@ class LAtomicExchangeTypedArrayElement :
 
 class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5>
 {
   public:
     LIR_HEADER(AtomicTypedArrayElementBinop)
 
     static const int32_t valueOp = 2;
 
+    // ARM, ARM64, x86, x64
     LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
                                   const LAllocation& value, const LDefinition& temp1,
                                   const LDefinition& temp2)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, value);
         setTemp(0, temp1);
         setTemp(1, temp2);
     }
+    // MIPS32, MIPS64
     LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
-                                  const LAllocation& value, const LDefinition& temp1,
-                                  const LDefinition& temp2, const LDefinition& valueTemp,
-                                  const LDefinition& offsetTemp, const LDefinition& maskTemp)
+                                  const LAllocation& value, const LDefinition& temp2,
+                                  const LDefinition& valueTemp, const LDefinition& offsetTemp,
+                                  const LDefinition& maskTemp)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, value);
-        setTemp(0, temp1);
+        setTemp(0, LDefinition::BogusTemp());
         setTemp(1, temp2);
         setTemp(2, valueTemp);
         setTemp(3, offsetTemp);
         setTemp(4, maskTemp);
     }
 
     const LAllocation* elements() {
         return getOperand(0);
@@ -6587,34 +6593,35 @@ class LAtomicTypedArrayElementBinop : pu
 };
 
 // Atomic binary operation where the result is discarded.
 class LAtomicTypedArrayElementBinopForEffect : public LInstructionHelper<0, 3, 4>
 {
   public:
     LIR_HEADER(AtomicTypedArrayElementBinopForEffect)
 
+    // ARM, ARM64, x86, x64
     LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
                                            const LAllocation& value,
                                            const LDefinition& flagTemp = LDefinition::BogusTemp())
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, value);
         setTemp(0, flagTemp);
     }
+    // MIPS32, MIPS64
     LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
-                                           const LAllocation& value, const LDefinition& flagTemp,
-                                           const LDefinition& valueTemp, const LDefinition& offsetTemp,
-                                           const LDefinition& maskTemp)
+                                           const LAllocation& value, const LDefinition& valueTemp,
+                                           const LDefinition& offsetTemp, const LDefinition& maskTemp)
     {
         setOperand(0, elements);
         setOperand(1, index);
         setOperand(2, value);
-        setTemp(0, flagTemp);
+        setTemp(0, LDefinition::BogusTemp());
         setTemp(1, valueTemp);
         setTemp(2, offsetTemp);
         setTemp(3, maskTemp);
     }
 
     const LAllocation* elements() {
         return getOperand(0);
     }
@@ -8725,26 +8732,25 @@ class LWasmAtomicBinopHeap : public LIns
         setOperand(1, value);
         setOperand(2, memoryBase);
         setTemp(0, temp);
         setTemp(1, LDefinition::BogusTemp());
         setTemp(2, flagTemp);
     }
     // MIPS32, MIPS64
     LWasmAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
-                         const LDefinition& temp, const LDefinition& flagTemp,
                          const LDefinition& valueTemp, const LDefinition& offsetTemp,
                          const LDefinition& maskTemp)
     {
         setOperand(0, ptr);
         setOperand(1, value);
         setOperand(2, LAllocation());
-        setTemp(0, temp);
+        setTemp(0, LDefinition::BogusTemp());
         setTemp(1, LDefinition::BogusTemp());
-        setTemp(2, flagTemp);
+        setTemp(2, LDefinition::BogusTemp());
         setTemp(3, valueTemp);
         setTemp(4, offsetTemp);
         setTemp(5, maskTemp);
     }
     const LAllocation* ptr() {
         return getOperand(0);
     }
     const LAllocation* value() {
@@ -8799,24 +8805,24 @@ class LWasmAtomicBinopHeapForEffect : pu
         setOperand(0, ptr);
         setOperand(1, value);
         setOperand(2, memoryBase);
         setTemp(0, LDefinition::BogusTemp());
         setTemp(1, flagTemp);
     }
     // MIPS32, MIPS64
     LWasmAtomicBinopHeapForEffect(const LAllocation& ptr, const LAllocation& value,
-                                  const LDefinition& flagTemp, const LDefinition& valueTemp,
-                                  const LDefinition& offsetTemp, const LDefinition& maskTemp)
+                                  const LDefinition& valueTemp, const LDefinition& offsetTemp,
+                                  const LDefinition& maskTemp)
     {
         setOperand(0, ptr);
         setOperand(1, value);
         setOperand(2, LAllocation());
         setTemp(0, LDefinition::BogusTemp());
-        setTemp(1, flagTemp);
+        setTemp(1, LDefinition::BogusTemp());
         setTemp(2, valueTemp);
         setTemp(3, offsetTemp);
         setTemp(4, maskTemp);
     }
     const LAllocation* ptr() {
         return getOperand(0);
     }
     const LAllocation* value() {
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -1147,16 +1147,132 @@ MacroAssembler::atomicEffectOp(Scalar::T
     AtomicEffectOp(*this, arrayType, op, value, mem);
 }
 
 // ========================================================================
 // JS atomic operations.
 
 template<typename T>
 static void
+CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                  const T& mem, Register oldval, Register newval, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.compareExchange(arrayType, sync, mem, oldval, newval, output.gpr());
+    }
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const Address& mem, Register oldval, Register newval,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+void
+MacroAssembler::compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                  const BaseIndex& mem, Register oldval, Register newval,
+                                  Register temp, AnyRegister output)
+{
+    CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, temp, output);
+}
+
+template<typename T>
+static void
+AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                 const T& mem, Register value, Register temp, AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicExchange(arrayType, sync, mem, value, temp);
+        masm.convertUInt32ToDouble(temp, output.fpu());
+    } else {
+        masm.atomicExchange(arrayType, sync, mem, value, output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const Address& mem, Register value, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+void
+MacroAssembler::atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
+                                 const BaseIndex& mem, Register value, Register temp,
+                                 AnyRegister output)
+{
+    AtomicExchangeJS(*this, arrayType, sync, mem, value, temp, output);
+}
+
+template<typename T>
+static void
+AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
+                AtomicOp op, Register value, const T& mem, Register temp1, Register temp2,
+                AnyRegister output)
+{
+    if (arrayType == Scalar::Uint32) {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
+        masm.convertUInt32ToDouble(temp1, output.fpu());
+    } else {
+        masm.atomicFetchOp(arrayType, sync, op, value, mem, temp1, output.gpr());
+    }
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const Address& mem, Register temp1, Register temp2,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void
+MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                Register value, const BaseIndex& mem, Register temp1, Register temp2,
+                                AnyRegister output)
+{
+    AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                 Register value, const BaseIndex& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                 Register value, const Address& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                 Imm32 value, const Address& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+void
+MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
+                                 Imm32 value, const BaseIndex& mem, Register temp)
+{
+    atomicEffectOp(arrayType, sync, op, value, mem, temp);
+}
+
+template<typename T>
+static void
 AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType, const Synchronization& sync,
                 AtomicOp op, Imm32 value, const T& mem, Register temp1, Register temp2,
                 AnyRegister output)
 {
     if (arrayType == Scalar::Uint32) {
         masm.atomicFetchOp(arrayType, sync, op, value, mem, temp2, temp1);
         masm.convertUInt32ToDouble(temp1, output.fpu());
     } else {
@@ -1175,23 +1291,9 @@ MacroAssembler::atomicFetchOpJS(Scalar::
 void
 MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
                                 Imm32 value, const BaseIndex& mem, Register temp1, Register temp2,
                                 AnyRegister output)
 {
     AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, temp1, temp2, output);
 }
 
-void
-MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                                 Imm32 value, const Address& mem, Register temp)
-{
-    atomicEffectOp(arrayType, sync, op, value, mem, temp);
-}
-
-void
-MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
-                                 Imm32 value, const BaseIndex& mem, Register temp)
-{
-    atomicEffectOp(arrayType, sync, op, value, mem, temp);
-}
-
 //}}} check_macroassembler_style
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -19,16 +19,17 @@
 #include "wasm/WasmBuiltins.h"
 
 #include "mozilla/Atomics.h"
 #include "mozilla/BinarySearch.h"
 
 #include "fdlibm.h"
 #include "jslibmath.h"
 
+#include "jit/AtomicOperations.h"
 #include "jit/InlinableNatives.h"
 #include "jit/MacroAssembler.h"
 #include "threading/Mutex.h"
 #include "wasm/WasmInstance.h"
 #include "wasm/WasmStubs.h"
 
 #include "vm/Debugger-inl.h"
 #include "vm/Stack-inl.h"
@@ -588,18 +589,22 @@ AddressOf(SymbolicAddress imm, ABIFuncti
         return FuncCast(Instance::currentMemory_i32, *abiType);
       case SymbolicAddress::WaitI32:
         *abiType = Args_Int_GeneralGeneralGeneralInt64;
         return FuncCast(Instance::wait_i32, *abiType);
       case SymbolicAddress::WaitI64:
         *abiType = Args_Int_GeneralGeneralInt64Int64;
         return FuncCast(Instance::wait_i64, *abiType);
       case SymbolicAddress::Wake:
-        *abiType = Args_General2;
+        *abiType = Args_General3;
         return FuncCast(Instance::wake, *abiType);
+#if defined(JS_CODEGEN_MIPS32)
+      case SymbolicAddress::js_jit_gAtomic64Lock:
+        return &js::jit::gAtomic64Lock;
+#endif
       case SymbolicAddress::Limit:
         break;
     }
 
     MOZ_CRASH("Bad SymbolicAddress");
 }
 
 bool
@@ -616,16 +621,19 @@ wasm::NeedsBuiltinThunk(SymbolicAddress 
       case SymbolicAddress::ReportOutOfBounds:        // GenerateOutOfBoundsExit
       case SymbolicAddress::ReportUnalignedAccess:    // GeneratesUnalignedExit
       case SymbolicAddress::CallImport_Void:          // GenerateImportInterpExit
       case SymbolicAddress::CallImport_I32:
       case SymbolicAddress::CallImport_I64:
       case SymbolicAddress::CallImport_F64:
       case SymbolicAddress::CoerceInPlace_ToInt32:    // GenerateImportJitExit
       case SymbolicAddress::CoerceInPlace_ToNumber:
+#if defined(JS_CODEGEN_MIPS32)
+      case SymbolicAddress::js_jit_gAtomic64Lock:
+#endif
         return false;
       case SymbolicAddress::ToInt32:
       case SymbolicAddress::DivI64:
       case SymbolicAddress::UDivI64:
       case SymbolicAddress::ModI64:
       case SymbolicAddress::UModI64:
       case SymbolicAddress::TruncateDoubleToUint64:
       case SymbolicAddress::TruncateDoubleToInt64:
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -1034,16 +1034,20 @@ ThunkedNativeToDescription(SymbolicAddre
       case SymbolicAddress::CurrentMemory:
         return "call to native current_memory (in wasm)";
       case SymbolicAddress::WaitI32:
         return "call to native i32.wait (in wasm)";
       case SymbolicAddress::WaitI64:
         return "call to native i64.wait (in wasm)";
       case SymbolicAddress::Wake:
         return "call to native wake (in wasm)";
+#if defined(JS_CODEGEN_MIPS32)
+      case SymbolicAddress::js_jit_gAtomic64Lock:
+        MOZ_CRASH();
+#endif
       case SymbolicAddress::Limit:
         break;
     }
     return "?";
 }
 
 const char*
 ProfilingFrameIterator::label() const
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -1383,16 +1383,19 @@ enum class SymbolicAddress
     Uint64ToDouble,
     Int64ToFloat32,
     Int64ToDouble,
     GrowMemory,
     CurrentMemory,
     WaitI32,
     WaitI64,
     Wake,
+#if defined(JS_CODEGEN_MIPS32)
+    js_jit_gAtomic64Lock,
+#endif
     Limit
 };
 
 bool
 IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
 
 // Assumptions captures ambient state that must be the same when compiling and
 // deserializing a module for the compiled code to be valid. If it's not, then
--- a/js/xpconnect/src/Sandbox.cpp
+++ b/js/xpconnect/src/Sandbox.cpp
@@ -36,16 +36,17 @@
 #include "mozilla/dom/DirectoryBinding.h"
 #include "mozilla/dom/DOMPrefs.h"
 #include "mozilla/dom/IndexedDatabaseManager.h"
 #include "mozilla/dom/Fetch.h"
 #include "mozilla/dom/FileBinding.h"
 #include "mozilla/dom/InspectorUtilsBinding.h"
 #include "mozilla/dom/MessageChannelBinding.h"
 #include "mozilla/dom/MessagePortBinding.h"
+#include "mozilla/dom/NodeFilterBinding.h"
 #include "mozilla/dom/PromiseBinding.h"
 #include "mozilla/dom/RequestBinding.h"
 #include "mozilla/dom/ResponseBinding.h"
 #ifdef MOZ_WEBRTC
 #include "mozilla/dom/RTCIdentityProviderRegistrar.h"
 #endif
 #include "mozilla/dom/FileReaderBinding.h"
 #include "mozilla/dom/ScriptSettings.h"
@@ -922,60 +923,62 @@ xpc::GlobalProperties::Parse(JSContext* 
         if (!nameValue.isString()) {
             JS_ReportErrorASCII(cx, "Property names must be strings");
             return false;
         }
         RootedString nameStr(cx, nameValue.toString());
         JSAutoByteString name;
         if (!name.encodeUtf8(cx, nameStr))
             return false;
-        if (!strcmp(name.ptr(), "CSS")) {
+        if (!strcmp(name.ptr(), "Blob")) {
+            Blob = true;
+        } else if (!strcmp(name.ptr(), "ChromeUtils")) {
+            ChromeUtils = true;
+        } else if (!strcmp(name.ptr(), "CSS")) {
             CSS = true;
         } else if (!strcmp(name.ptr(), "CSSRule")) {
             CSSRule = true;
-        } else if (!strcmp(name.ptr(), "indexedDB")) {
-            indexedDB = true;
-        } else if (!strcmp(name.ptr(), "XMLHttpRequest")) {
-            XMLHttpRequest = true;
+        } else if (!strcmp(name.ptr(), "Directory")) {
+            Directory = true;
+        } else if (!strcmp(name.ptr(), "File")) {
+            File = true;
+        } else if (!strcmp(name.ptr(), "FileReader")) {
+            FileReader = true;
+        } else if (!strcmp(name.ptr(), "InspectorUtils")) {
+            InspectorUtils = true;
+        } else if (!strcmp(name.ptr(), "MessageChannel")) {
+            MessageChannel = true;
+        } else if (!strcmp(name.ptr(), "NodeFilter")) {
+            NodeFilter = true;
+        } else if (!strcmp(name.ptr(), "TextDecoder")) {
+            TextDecoder = true;
         } else if (!strcmp(name.ptr(), "TextEncoder")) {
             TextEncoder = true;
-        } else if (!strcmp(name.ptr(), "TextDecoder")) {
-            TextDecoder = true;
         } else if (!strcmp(name.ptr(), "URL")) {
             URL = true;
         } else if (!strcmp(name.ptr(), "URLSearchParams")) {
             URLSearchParams = true;
+        } else if (!strcmp(name.ptr(), "XMLHttpRequest")) {
+            XMLHttpRequest = true;
         } else if (!strcmp(name.ptr(), "atob")) {
             atob = true;
         } else if (!strcmp(name.ptr(), "btoa")) {
             btoa = true;
-        } else if (!strcmp(name.ptr(), "Blob")) {
-            Blob = true;
-        } else if (!strcmp(name.ptr(), "Directory")) {
-            Directory = true;
-        } else if (!strcmp(name.ptr(), "File")) {
-            File = true;
+        } else if (!strcmp(name.ptr(), "caches")) {
+            caches = true;
         } else if (!strcmp(name.ptr(), "crypto")) {
             crypto = true;
+        } else if (!strcmp(name.ptr(), "fetch")) {
+            fetch = true;
+        } else if (!strcmp(name.ptr(), "indexedDB")) {
+            indexedDB = true;
 #ifdef MOZ_WEBRTC
         } else if (!strcmp(name.ptr(), "rtcIdentityProvider")) {
             rtcIdentityProvider = true;
 #endif
-        } else if (!strcmp(name.ptr(), "fetch")) {
-            fetch = true;
-        } else if (!strcmp(name.ptr(), "caches")) {
-            caches = true;
-        } else if (!strcmp(name.ptr(), "FileReader")) {
-            fileReader = true;
-        } else if (!strcmp(name.ptr(), "MessageChannel")) {
-            messageChannel = true;
-        } else if (!strcmp(name.ptr(), "InspectorUtils")) {
-            inspectorUtils = true;
-        } else if (!strcmp(name.ptr(), "ChromeUtils")) {
-            ChromeUtils = true;
         } else {
             JS_ReportErrorUTF8(cx, "Unknown property name: %s", name.ptr());
             return false;
         }
     }
     return true;
 }
 
@@ -983,91 +986,94 @@ bool
 xpc::GlobalProperties::Define(JSContext* cx, JS::HandleObject obj)
 {
     MOZ_ASSERT(js::GetContextCompartment(cx) == js::GetObjectCompartment(obj));
     // Properties will be exposed to System automatically but not to Sandboxes
     // if |[Exposed=System]| is specified.
     // This function holds common properties not exposed automatically but able
     // to be requested either in |Cu.importGlobalProperties| or
     // |wantGlobalProperties| of a sandbox.
+    if (Blob &&
+        !dom::BlobBinding::GetConstructorObject(cx))
+        return false;
+
+    if (ChromeUtils && !dom::ChromeUtilsBinding::GetConstructorObject(cx))
+        return false;
+
     if (CSS && !dom::CSSBinding::GetConstructorObject(cx))
         return false;
 
     if (CSSRule && !dom::CSSRuleBinding::GetConstructorObject(cx))
         return false;
 
-    if (XMLHttpRequest &&
-        !dom::XMLHttpRequestBinding::GetConstructorObject(cx))
+    if (Directory &&
+        !dom::DirectoryBinding::GetConstructorObject(cx))
+        return false;
+
+    if (File &&
+        !dom::FileBinding::GetConstructorObject(cx))
+        return false;
+
+    if (FileReader && !dom::FileReaderBinding::GetConstructorObject(cx))
+        return false;
+
+    if (InspectorUtils &&
+        !dom::InspectorUtilsBinding::GetConstructorObject(cx))
+        return false;
+
+    if (MessageChannel &&
+        (!dom::MessageChannelBinding::GetConstructorObject(cx) ||
+         !dom::MessagePortBinding::GetConstructorObject(cx)))
+        return false;
+
+    if (NodeFilter && !dom::NodeFilterBinding::GetConstructorObject(cx))
+        return false;
+
+    if (TextDecoder &&
+        !dom::TextDecoderBinding::GetConstructorObject(cx))
         return false;
 
     if (TextEncoder &&
         !dom::TextEncoderBinding::GetConstructorObject(cx))
         return false;
 
-    if (TextDecoder &&
-        !dom::TextDecoderBinding::GetConstructorObject(cx))
-        return false;
-
     if (URL &&
         !dom::URLBinding::GetConstructorObject(cx))
         return false;
 
     if (URLSearchParams &&
         !dom::URLSearchParamsBinding::GetConstructorObject(cx))
         return false;
 
+    if (XMLHttpRequest &&
+        !dom::XMLHttpRequestBinding::GetConstructorObject(cx))
+        return false;
+
     if (atob &&
         !JS_DefineFunction(cx, obj, "atob", Atob, 1, 0))
         return false;
 
     if (btoa &&
         !JS_DefineFunction(cx, obj, "btoa", Btoa, 1, 0))
         return false;
 
-    if (Blob &&
-        !dom::BlobBinding::GetConstructorObject(cx))
-        return false;
-
-    if (Directory &&
-        !dom::DirectoryBinding::GetConstructorObject(cx))
-        return false;
-
-    if (File &&
-        !dom::FileBinding::GetConstructorObject(cx))
+    if (caches && !dom::cache::CacheStorage::DefineCaches(cx, obj))
         return false;
 
     if (crypto && !SandboxCreateCrypto(cx, obj))
         return false;
 
+    if (fetch && !SandboxCreateFetch(cx, obj))
+        return false;
+
 #ifdef MOZ_WEBRTC
     if (rtcIdentityProvider && !SandboxCreateRTCIdentityProvider(cx, obj))
         return false;
 #endif
 
-    if (fetch && !SandboxCreateFetch(cx, obj))
-        return false;
-
-    if (caches && !dom::cache::CacheStorage::DefineCaches(cx, obj))
-        return false;
-
-    if (fileReader && !dom::FileReaderBinding::GetConstructorObject(cx))
-        return false;
-
-    if (messageChannel &&
-        (!dom::MessageChannelBinding::GetConstructorObject(cx) ||
-         !dom::MessagePortBinding::GetConstructorObject(cx)))
-        return false;
-
-    if (inspectorUtils &&
-        !dom::InspectorUtilsBinding::GetConstructorObject(cx))
-        return false;
-
-    if (ChromeUtils && !dom::ChromeUtilsBinding::GetConstructorObject(cx))
-        return false;
-
     return true;
 }
 
 bool
 xpc::GlobalProperties::DefineInXPCComponents(JSContext* cx, JS::HandleObject obj)
 {
     if (indexedDB &&
         !IndexedDatabaseManager::DefineIndexedDB(cx, obj))
--- a/js/xpconnect/src/xpcprivate.h
+++ b/js/xpconnect/src/xpcprivate.h
@@ -2670,37 +2670,42 @@ ThrowAndFail(nsresult errNum, JSContext*
 struct GlobalProperties {
     GlobalProperties() {
       mozilla::PodZero(this);
 
     }
     bool Parse(JSContext* cx, JS::HandleObject obj);
     bool DefineInXPCComponents(JSContext* cx, JS::HandleObject obj);
     bool DefineInSandbox(JSContext* cx, JS::HandleObject obj);
+
+    // Interface objects we can expose.
+    bool Blob : 1;
+    bool ChromeUtils : 1;
     bool CSS : 1;
     bool CSSRule : 1;
-    bool indexedDB : 1;
-    bool XMLHttpRequest : 1;
+    bool Directory : 1;
+    bool File : 1;
+    bool FileReader: 1;
+    bool InspectorUtils : 1;
+    bool MessageChannel: 1;
+    bool NodeFilter : 1;
     bool TextDecoder : 1;
     bool TextEncoder : 1;
     bool URL : 1;
     bool URLSearchParams : 1;
+    bool XMLHttpRequest : 1;
+
+    // Ad-hoc property names we implement.
     bool atob : 1;
     bool btoa : 1;
-    bool Blob : 1;
-    bool Directory : 1;
-    bool File : 1;
+    bool caches : 1;
     bool crypto : 1;
-    bool rtcIdentityProvider : 1;
     bool fetch : 1;
-    bool caches : 1;
-    bool fileReader: 1;
-    bool messageChannel: 1;
-    bool ChromeUtils : 1;
-    bool inspectorUtils : 1;
+    bool indexedDB : 1;
+    bool rtcIdentityProvider : 1;
 private:
     bool Define(JSContext* cx, JS::HandleObject obj);
 };
 
 // Infallible.
 already_AddRefed<nsIXPCComponents_utils_Sandbox>
 NewSandboxConstructor();
 
--- a/modules/libpref/Preferences.cpp
+++ b/modules/libpref/Preferences.cpp
@@ -416,16 +416,18 @@ public:
       aResult = mUserValue.mStringVal;
     }
 
     return NS_OK;
   }
 
   void ToDomPref(dom::Pref* aDomPref)
   {
+    MOZ_ASSERT(XRE_IsParentProcess());
+
     aDomPref->name() = mName;
 
     aDomPref->isLocked() = mIsLocked;
 
     if (mHasDefaultValue) {
       aDomPref->defaultValue() = dom::PrefValue();
       mDefaultValue.ToDomPrefValue(Type(),
                                    &aDomPref->defaultValue().get_PrefValue());
@@ -444,16 +446,17 @@ public:
                  dom::MaybePrefValue::Tnull_t ||
                aDomPref->userValue().type() == dom::MaybePrefValue::Tnull_t ||
                (aDomPref->defaultValue().get_PrefValue().type() ==
                 aDomPref->userValue().get_PrefValue().type()));
   }
 
   void FromDomPref(const dom::Pref& aDomPref, bool* aValueChanged)
   {
+    MOZ_ASSERT(!XRE_IsParentProcess());
     MOZ_ASSERT(strcmp(mName, aDomPref.name().get()) == 0);
 
     mIsLocked = aDomPref.isLocked();
 
     const dom::MaybePrefValue& defaultValue = aDomPref.defaultValue();
     bool defaultValueChanged = false;
     if (defaultValue.type() == dom::MaybePrefValue::TPrefValue) {
       PrefValue value;
@@ -487,16 +490,18 @@ public:
 
     if (userValueChanged || (defaultValueChanged && !mHasUserValue)) {
       *aValueChanged = true;
     }
   }
 
   bool HasAdvisablySizedValues()
   {
+    MOZ_ASSERT(XRE_IsParentProcess());
+
     if (!IsTypeString()) {
       return true;
     }
 
     const char* stringVal;
     if (mHasDefaultValue) {
       stringVal = mDefaultValue.mStringVal;
       if (strlen(stringVal) > MAX_ADVISABLE_PREF_LENGTH) {
@@ -704,16 +709,18 @@ pref_HashTableLookup(const char* aPrefNa
 static void
 NotifyCallbacks(const char* aPrefName);
 
 #define PREF_HASHTABLE_INITIAL_LENGTH 1024
 
 static PrefSaveData
 pref_savePrefs()
 {
+  MOZ_ASSERT(NS_IsMainThread());
+
   PrefSaveData savedPrefs(gHashTable->EntryCount());
 
   for (auto iter = gHashTable->Iter(); !iter.Done(); iter.Next()) {
     auto pref = static_cast<Pref*>(iter.Get());
 
     nsAutoCString prefValueStr;
     if (!pref->UserValueToStringForSaving(prefValueStr)) {
       continue;
@@ -1401,37 +1408,17 @@ nsPrefBranch::GetRoot(nsACString& aRoot)
 }
 
 NS_IMETHODIMP
 nsPrefBranch::GetPrefType(const char* aPrefName, int32_t* aRetVal)
 {
   NS_ENSURE_ARG(aPrefName);
 
   const PrefName& prefName = GetPrefName(aPrefName);
-  Pref* pref;
-  if (gHashTable && (pref = pref_HashTableLookup(prefName.get()))) {
-    switch (pref->Type()) {
-      case PrefType::String:
-        *aRetVal = PREF_STRING;
-        break;
-
-      case PrefType::Int:
-        *aRetVal = PREF_INT;
-        break;
-
-      case PrefType::Bool:
-        *aRetVal = PREF_BOOL;
-        break;
-
-      default:
-        MOZ_CRASH();
-    }
-  } else {
-    *aRetVal = PREF_INVALID;
-  }
+  *aRetVal = Preferences::GetType(prefName.get());
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsPrefBranch::GetBoolPrefWithDefault(const char* aPrefName,
                                      bool aDefaultValue,
                                      uint8_t aArgc,
                                      bool* aRetVal)
@@ -1976,16 +1963,18 @@ nsPrefBranch::GetChildList(const char* a
   int32_t numPrefs;
   int32_t dwIndex;
   AutoTArray<nsCString, 32> prefArray;
 
   NS_ENSURE_ARG(aStartingAt);
   NS_ENSURE_ARG_POINTER(aCount);
   NS_ENSURE_ARG_POINTER(aChildArray);
 
+  MOZ_ASSERT(NS_IsMainThread());
+
   *aChildArray = nullptr;
   *aCount = 0;
 
   // This will contain a list of all the pref name strings. Allocated on the
   // stack for speed.
 
   const PrefName& parent = GetPrefName(aStartingAt);
   size_t parentLen = parent.Length();
@@ -3074,18 +3063,16 @@ Preferences::ReadUserPrefsFromFile(nsIFi
   return openPrefFile(aFile);
 }
 
 NS_IMETHODIMP
 Preferences::ResetPrefs()
 {
   ENSURE_PARENT_PROCESS("Preferences::ResetPrefs", "all prefs");
 
-  NotifyServiceObservers(NS_PREFSERVICE_RESET_TOPIC_ID);
-
   gHashTable->ClearAndPrepareForLength(PREF_HASHTABLE_INITIAL_LENGTH);
   gPrefNameArena.Clear();
 
   return InitInitialObjects().isOk() ? NS_OK : NS_ERROR_FAILURE;
 }
 
 NS_IMETHODIMP
 Preferences::ResetUserPrefs()
@@ -3218,16 +3205,17 @@ Preferences::GetPreference(dom::Pref* aD
     pref->ToDomPref(aDomPref);
   }
 }
 
 void
 Preferences::GetPreferences(InfallibleTArray<dom::Pref>* aDomPrefs)
 {
   MOZ_ASSERT(XRE_IsParentProcess());
+  MOZ_ASSERT(NS_IsMainThread());
 
   aDomPrefs->SetCapacity(gHashTable->EntryCount());
   for (auto iter = gHashTable->Iter(); !iter.Done(); iter.Next()) {
     auto pref = static_cast<Pref*>(iter.Get());
 
     if (pref->HasAdvisablySizedValues()) {
       dom::Pref* setting = aDomPrefs->AppendElement();
       pref->ToDomPref(setting);
@@ -3810,18 +3798,17 @@ Preferences::InitInitialObjects()
     }
   }
 
 #ifdef MOZ_WIDGET_ANDROID
   // Set up the correct default for toolkit.telemetry.enabled. If this build
   // has MOZ_TELEMETRY_ON_BY_DEFAULT *or* we're on the beta channel, telemetry
   // is on by default, otherwise not. This is necessary so that beta users who
   // are testing final release builds don't flipflop defaults.
-  if (Preferences::GetType(kTelemetryPref, PrefValueKind::Default) ==
-      nsIPrefBranch::PREF_INVALID) {
+  if (Preferences::GetType(kTelemetryPref) == nsIPrefBranch::PREF_INVALID) {
     bool prerelease = false;
 #ifdef MOZ_TELEMETRY_ON_BY_DEFAULT
     prerelease = true;
 #else
     nsAutoCString prefValue;
     Preferences::GetCString(kChannelPref, prefValue, PrefValueKind::Default);
     if (prefValue.EqualsLiteral("beta")) {
       prerelease = true;
@@ -4153,23 +4140,38 @@ Preferences::HasUserValue(const char* aP
 {
   NS_ENSURE_TRUE(InitStaticMembers(), false);
 
   Pref* pref = pref_HashTableLookup(aPrefName);
   return pref && pref->HasUserValue();
 }
 
 /* static */ int32_t
-Preferences::GetType(const char* aPrefName, PrefValueKind aKind)
+Preferences::GetType(const char* aPrefName)
 {
   NS_ENSURE_TRUE(InitStaticMembers(), nsIPrefBranch::PREF_INVALID);
-  int32_t result;
-  return NS_SUCCEEDED(GetRootBranch(aKind)->GetPrefType(aPrefName, &result))
-           ? result
-           : nsIPrefBranch::PREF_INVALID;
+
+  Pref* pref;
+  if (!gHashTable || !(pref = pref_HashTableLookup(aPrefName))) {
+    return PREF_INVALID;
+  }
+
+  switch (pref->Type()) {
+    case PrefType::String:
+      return PREF_STRING;
+
+    case PrefType::Int:
+      return PREF_INT;
+
+    case PrefType::Bool:
+      return PREF_BOOL;
+
+    default:
+      MOZ_CRASH();
+  }
 }
 
 /* static */ nsresult
 Preferences::AddStrongObserver(nsIObserver* aObserver, const char* aPref)
 {
   MOZ_ASSERT(aObserver);
   NS_ENSURE_TRUE(InitStaticMembers(), NS_ERROR_NOT_AVAILABLE);
   return sPreferences->mRootBranch->AddObserver(aPref, aObserver, false);
--- a/modules/libpref/Preferences.h
+++ b/modules/libpref/Preferences.h
@@ -87,18 +87,17 @@ public:
   static nsIPrefBranch* GetRootBranch(PrefValueKind aKind = PrefValueKind::User)
   {
     NS_ENSURE_TRUE(InitStaticMembers(), nullptr);
     return (aKind == PrefValueKind::Default) ? sPreferences->mDefaultRootBranch
                                              : sPreferences->mRootBranch;
   }
 
   // Gets the type of the pref.
-  static int32_t GetType(const char* aPrefName,
-                         PrefValueKind aKind = PrefValueKind::User);
+  static int32_t GetType(const char* aPrefName);
 
   // Fallible value getters.
   static nsresult GetBool(const char* aPrefName,
                           bool* aResult,
                           PrefValueKind aKind = PrefValueKind::User);
   static nsresult GetInt(const char* aPrefName,
                          int32_t* aResult,
                          PrefValueKind aKind = PrefValueKind::User);
--- a/modules/libpref/nsIPrefService.idl
+++ b/modules/libpref/nsIPrefService.idl
@@ -1,25 +1,16 @@
 /* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsISupports.idl"
 #include "nsIPrefBranch.idl"
 
-%{C++
-struct PrefTuple;
-#include "nsTArrayForwardDeclare.h"
-%}
-
-[ptr] native nsPreferencesArrayPtr(nsTArray<PrefTuple>);
-[ptr] native nsPreferencePtr(PrefTuple);
-[ptr] native nsPreferencePtrConst(const PrefTuple);
-
 interface nsIFile;
 
 /**
  * The nsIPrefService interface is the main entry point into the back end
  * preferences management library. The preference service is directly
  * responsible for the management of the preferences files and also facilitates
  * access to the preference branch object which allows the direct manipulation
  * of the preferences themselves.
@@ -135,20 +126,14 @@ interface nsIPrefService : nsISupports
 #define NS_PREFSERVICE_CONTRACTID "@mozilla.org/preferences-service;1"
 
 /**
  * Notification sent before reading the default user preferences files.
  */
 #define NS_PREFSERVICE_READ_TOPIC_ID "prefservice:before-read-userprefs"
 
 /**
- * Notification sent when resetPrefs has been called, but before the actual
- * reset process occurs.
- */
-#define NS_PREFSERVICE_RESET_TOPIC_ID "prefservice:before-reset"
-
-/**
  * Notification sent when after reading app-provided default
  * preferences, but before user profile override defaults are loaded.
  */
 #define NS_PREFSERVICE_APPDEFAULTS_TOPIC_ID "prefservice:after-app-defaults"
 
 %}
--- a/testing/mochitest/browser-test.js
+++ b/testing/mochitest/browser-test.js
@@ -7,16 +7,18 @@ var gSaveInstrumentationData = null;
 ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
 ChromeUtils.import("resource://gre/modules/Task.jsm");
 ChromeUtils.import("resource://gre/modules/AppConstants.jsm");
 ChromeUtils.import("resource://gre/modules/Services.jsm");
 
 ChromeUtils.defineModuleGetter(this, "ContentSearch",
   "resource:///modules/ContentSearch.jsm");
 
+Cu.importGlobalProperties(["NodeFilter"]);
+
 const SIMPLETEST_OVERRIDES =
   ["ok", "is", "isnot", "todo", "todo_is", "todo_isnot", "info", "expectAssertions", "requestCompleteLog"];
 
 // non-android is bootstrapped by marionette
 if (Services.appinfo.OS == 'Android') {
   window.addEventListener("load", function() {
     window.addEventListener("MozAfterPaint", function() {
       setTimeout(testInit, 0);
@@ -281,17 +283,17 @@ function takeInstrumentation() {
 
   // An iterator over an element and all of its descendants
   function* elementDescendants(element) {
     let walker = Cc["@mozilla.org/inspector/deep-tree-walker;1"].
                  createInstance(Ci.inIDeepTreeWalker);
     walker.showAnonymousContent = true;
     walker.showSubDocuments = false;
     walker.showDocumentsAsNodes = false;
-    walker.init(element, 1 /* NodeFilter.SHOW_ELEMENT */);
+    walker.init(element, NodeFilter.SHOW_ELEMENT);
 
     yield element;
     while (walker.nextNode()) {
       if (walker.currentNode instanceof Element) {
         yield walker.currentNode;
       }
     }
   }
--- a/testing/specialpowers/content/specialpowersAPI.js
+++ b/testing/specialpowers/content/specialpowersAPI.js
@@ -21,17 +21,17 @@ ChromeUtils.import("resource://gre/modul
 ChromeUtils.import("resource://gre/modules/NetUtil.jsm");
 
 // We're loaded with "this" not set to the global in some cases, so we
 // have to play some games to get at the global object here.  Normally
 // we'd try "this" from a function called with undefined this value,
 // but this whole file is in strict mode.  So instead fall back on
 // returning "this" from indirect eval, which returns the global.
 if (!(function() { var e = eval; return e("this"); })().File) { // eslint-disable-line no-eval
-    Cu.importGlobalProperties(["File", "InspectorUtils"]);
+    Cu.importGlobalProperties(["File", "InspectorUtils", "NodeFilter"]);
 }
 
 // Allow stuff from this scope to be accessed from non-privileged scopes. This
 // would crash if used outside of automation.
 Cu.forcePermissiveCOWs();
 
 function SpecialPowersAPI() {
   this._consoleListeners = [];
@@ -2186,17 +2186,17 @@ SpecialPowersAPI.prototype = {
     return this._pu;
   },
 
   createDOMWalker(node, showAnonymousContent) {
     node = unwrapIfWrapped(node);
     let walker = Cc["@mozilla.org/inspector/deep-tree-walker;1"].
                  createInstance(Ci.inIDeepTreeWalker);
     walker.showAnonymousContent = showAnonymousContent;
-    walker.init(node.ownerDocument, 0xFFFFFFFF /* NodeFilter.SHOW_ALL */);
+    walker.init(node.ownerDocument, NodeFilter.SHOW_ALL);
     walker.currentNode = node;
     return {
       get firstChild() {
         return wrapIfUnwrapped(walker.firstChild());
       },
       get lastChild() {
         return wrapIfUnwrapped(walker.lastChild());
       },
--- a/testing/web-platform/meta/svg/historical.html.ini
+++ b/testing/web-platform/meta/svg/historical.html.ini
@@ -18,31 +18,16 @@
     expected: FAIL
 
   [SVGSVGElement.prototype.useCurrentView must be removed]
     expected: FAIL
 
   [SVGViewElement.prototype.viewTarget must be removed]
     expected: FAIL
 
-  [SVGClipPathElement must not implement SVGUnitTypes]
-    expected: FAIL
-
-  [SVGFilterElement must not implement SVGUnitTypes]
-    expected: FAIL
-
-  [SVGGradientElement must not implement SVGUnitTypes]
-    expected: FAIL
-
-  [SVGMaskElement must not implement SVGUnitTypes]
-    expected: FAIL
-
-  [SVGPatternElement must not implement SVGUnitTypes]
-    expected: FAIL
-
   [SVGSVGElement.prototype.pixelUnitToMillimeterX must be removed]
     expected: FAIL
 
   [SVGSVGElement.prototype.pixelUnitToMillimeterY must be removed]
     expected: FAIL
 
   [SVGSVGElement.prototype.screenPixelToMillimeterX must be removed]
     expected: FAIL
--- a/toolkit/components/viewsource/content/viewSource-content.js
+++ b/toolkit/components/viewsource/content/viewSource-content.js
@@ -5,16 +5,18 @@
 /* eslint-env mozilla/frame-script */
 
 ChromeUtils.import("resource://gre/modules/Services.jsm");
 ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
 
 ChromeUtils.defineModuleGetter(this, "DeferredTask",
   "resource://gre/modules/DeferredTask.jsm");
 
+Cu.importGlobalProperties(["NodeFilter"]);
+
 const NS_XHTML = "http://www.w3.org/1999/xhtml";
 const BUNDLE_URL = "chrome://global/locale/viewSource.properties";
 
 // These are markers used to delimit the selection during processing. They
 // are removed from the final rendering.
 // We use noncharacter Unicode codepoints to minimize the risk of clashing
 // with anything that might legitimately be present in the document.
 // U+FDD0..FDEF <noncharacters>
@@ -533,17 +535,17 @@ var ViewSourceContent = {
     // id attributes in the format <pre id="line123">, meaning that
     // the first line in the pre element is number 123.
     // However, in the plain text case, there is only one <pre> without an id,
     // so assume line 1.
     let curLine = pre.id ? parseInt(pre.id.substring(4)) : 1;
 
     // Walk through each of the text nodes and count newlines.
     let treewalker = content.document
-        .createTreeWalker(pre, 4 /* NodeFilter.SHOW_TEXT */, null);
+        .createTreeWalker(pre, NodeFilter.SHOW_TEXT, null);
 
     // The column number of the first character in the current text node.
     let firstCol = 1;
 
     let found = false;
     for (let textNode = treewalker.firstChild();
          textNode && !found;
          textNode = treewalker.nextNode()) {
--- a/toolkit/content/widgets/radio.xml
+++ b/toolkit/content/widgets/radio.xml
@@ -244,17 +244,17 @@
           var doc = this.ownerDocument;
 
           if (this.hasChildNodes()) {
             // Don't store the collected child nodes immediately,
             // collecting the child nodes could trigger constructors
             // which would blow away our list.
 
             var iterator = doc.createTreeWalker(this,
-						1 /* NodeFilter.SHOW_ELEMENT */,
+                                                NodeFilter.SHOW_ELEMENT,
                                                 this._filterRadioGroup);
             while (iterator.nextNode())
               radioChildren.push(iterator.currentNode);
             return this._radioChildren = radioChildren;
           }
 
           // We don't have child nodes.
           const XUL_NS = "http://www.mozilla.org/keymaster/"