merge mozilla-inbound to mozilla-central a=merge
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Mon, 16 Jan 2017 16:35:14 +0100
changeset 357588 d4d3a7b6d57e06891f158ccb4bebf507a8d55d32
parent 357567 aeda115ca5c37723ec703a2574204952001e40db (current diff)
parent 357587 ee2a1ad506f08f363d32cc54d3cd4db000ac829d (diff)
child 357600 70db86990f0df9766d1eb582bd7668a1dd6983bb
child 357617 586bc089769d01abc91343cb8900bfa7a6923c88
push id10621
push userjlund@mozilla.com
push dateMon, 23 Jan 2017 16:02:43 +0000
treeherdermozilla-aurora@dca7b42e6c67 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone53.0a1
merge mozilla-inbound to mozilla-central a=merge
devtools/client/debugger/new/bundle.js
devtools/client/debugger/new/styles.css
--- a/browser/base/content/test/general/browser_parsable_css.js
+++ b/browser/base/content/test/general/browser_parsable_css.js
@@ -9,17 +9,17 @@
  * matching the offending error. If an object has multiple regex criteria, they
  * ALL need to match an error in order for that error not to cause a test
  * failure. */
 let whitelist = [
   // CodeMirror is imported as-is, see bug 1004423.
   {sourceName: /codemirror\.css$/i,
    isFromDevTools: true},
   // The debugger uses cross-browser CSS.
-  {sourceName: /devtools\/client\/debugger\/new\/styles.css/i,
+  {sourceName: /devtools\/client\/debugger\/new\/debugger.css/i,
    isFromDevTools: true},
   // PDFjs is futureproofing its pseudoselectors, and those rules are dropped.
   {sourceName: /web\/viewer\.css$/i,
    errorMessage: /Unknown pseudo-class.*(fullscreen|selection)/i,
    isFromDevTools: false},
   // PDFjs rules needed for compat with other UAs.
   {sourceName: /web\/viewer\.css$/i,
    errorMessage: /Unknown property.*appearance/i,
--- a/browser/modules/FormSubmitObserver.jsm
+++ b/browser/modules/FormSubmitObserver.jsm
@@ -96,50 +96,58 @@ FormSubmitObserver.prototype =
   notifyInvalidSubmit(aFormElement, aInvalidElements) {
     // We are going to handle invalid form submission attempt by focusing the
     // first invalid element and show the corresponding validation message in a
     // panel attached to the element.
     if (!aInvalidElements.length) {
       return;
     }
 
-    // Insure that this is the FormSubmitObserver associated with the
-    // element / window this notification is about.
-    let element = aInvalidElements.queryElementAt(0, Ci.nsISupports);
-    if (this._content != element.ownerGlobal.top.document.defaultView) {
-      return;
-    }
+    // Show a validation message on the first focusable element.
+    for (let i = 0; i < aInvalidElements.length; i++) {
+      // Insure that this is the FormSubmitObserver associated with the
+      // element / window this notification is about.
+      let element = aInvalidElements.queryElementAt(i, Ci.nsISupports);
+      if (this._content != element.ownerGlobal.top.document.defaultView) {
+        return;
+      }
 
-    if (!(element instanceof HTMLInputElement ||
-          element instanceof HTMLTextAreaElement ||
-          element instanceof HTMLSelectElement ||
-          element instanceof HTMLButtonElement)) {
-      return;
-    }
+      if (!(element instanceof HTMLInputElement ||
+            element instanceof HTMLTextAreaElement ||
+            element instanceof HTMLSelectElement ||
+            element instanceof HTMLButtonElement)) {
+        continue;
+      }
 
-    // Update validation message before showing notification
-    this._validationMessage = element.validationMessage;
+      if (!Services.focus.elementIsFocusable(element, 0)) {
+        continue;
+      }
 
-    // Don't connect up to the same element more than once.
-    if (this._element == element) {
-      this._showPopup(element);
-      return;
-    }
-    this._element = element;
+      // Update validation message before showing notification
+      this._validationMessage = element.validationMessage;
 
-    element.focus();
+      // Don't connect up to the same element more than once.
+      if (this._element == element) {
+        this._showPopup(element);
+        break;
+      }
+      this._element = element;
 
-    // Watch for input changes which may change the validation message.
-    element.addEventListener("input", this, false);
+      element.focus();
+
+      // Watch for input changes which may change the validation message.
+      element.addEventListener("input", this, false);
 
-    // Watch for focus changes so we can disconnect our listeners and
-    // hide the popup.
-    element.addEventListener("blur", this, false);
+      // Watch for focus changes so we can disconnect our listeners and
+      // hide the popup.
+      element.addEventListener("blur", this, false);
 
-    this._showPopup(element);
+      this._showPopup(element);
+      break;
+    }
   },
 
   /*
    * Internal
    */
 
   /*
    * Handles input changes on the form element we've associated a popup
--- a/browser/modules/test/browser.ini
+++ b/browser/modules/test/browser.ini
@@ -36,8 +36,9 @@ support-files =
   usageTelemetrySearchSuggestions.xml
 [browser_UsageTelemetry_searchbar.js]
 support-files =
   usageTelemetrySearchSuggestions.sjs
   usageTelemetrySearchSuggestions.xml
 [browser_UsageTelemetry_content.js]
 [browser_UsageTelemetry_content_aboutHome.js]
 [browser_urlBar_zoom.js]
+[browser_bug1319078.js]
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/browser/modules/test/browser_bug1319078.js
@@ -0,0 +1,49 @@
+"use strict";
+
+var gInvalidFormPopup = document.getElementById('invalid-form-popup');
+
+function checkPopupHide() {
+  ok(gInvalidFormPopup.state != 'showing' && gInvalidFormPopup.state != 'open',
+     "[Test " + testId + "] The invalid form popup should not be shown");
+}
+
+var testId = 0;
+
+function incrementTest() {
+  testId++;
+  info("Starting next part of test");
+}
+
+/**
+ * In this test, we check that no popup appears if the element display is none.
+ */
+add_task(function* () {
+  ok(gInvalidFormPopup,
+     "The browser should have a popup to show when a form is invalid");
+
+  incrementTest();
+  let testPage =
+    'data:text/html,' +
+    '<form target="t"><input type="url"  placeholder="url" value="http://" style="display: none;"><input id="s" type="button" value="check"></form>';
+  let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, testPage);
+  yield BrowserTestUtils.synthesizeMouse("#s", 0, 0, {}, gBrowser.selectedBrowser);
+
+  checkPopupHide();
+  yield BrowserTestUtils.removeTab(tab);
+});
+
+/**
+ * In this test, we check that no popup appears if the element visibility is hidden.
+ */
+add_task(function* () {
+  incrementTest();
+  let testPage =
+    'data:text/html,' +
+    '<form target="t"><input type="url"  placeholder="url" value="http://" style="visibility: hidden;"><input id="s" type="button" value="check"></form>';
+  let tab = yield BrowserTestUtils.openNewForegroundTab(gBrowser, testPage);
+  yield BrowserTestUtils.synthesizeMouse("#s", 0, 0, {}, gBrowser.selectedBrowser);
+
+  checkPopupHide();
+  yield BrowserTestUtils.removeTab(tab);
+});
+
rename from devtools/client/debugger/new/styles.css
rename to devtools/client/debugger/new/debugger.css
rename from devtools/client/debugger/new/bundle.js
rename to devtools/client/debugger/new/debugger.js
--- a/devtools/client/debugger/new/index.html
+++ b/devtools/client/debugger/new/index.html
@@ -8,24 +8,24 @@
           type="text/css"
           href="chrome://devtools/content/sourceeditor/codemirror/lib/codemirror.css" />
     <link rel="stylesheet"
           type="text/css"
           href="chrome://devtools/content/sourceeditor/codemirror/addon/dialog/dialog.css" />
     <link rel="stylesheet"
           type="text/css"
           href="chrome://devtools/content/sourceeditor/codemirror/mozilla.css" />
-    <link rel="stylesheet" type="text/css" href="resource://devtools/client/debugger/new/styles.css" />
+    <link rel="stylesheet" type="text/css" href="resource://devtools/client/debugger/new/debugger.css" />
   </head>
   <body>
     <div id="mount"></div>
     <script type="application/javascript;version=1.8"
             src="chrome://devtools/content/shared/theme-switching.js"></script>
     <script type="text/javascript">
       const { BrowserLoader } = Components.utils.import("resource://devtools/client/shared/browser-loader.js", {});
       const { require: devtoolsRequire } = BrowserLoader({
         baseURI: "resource://devtools/client/debugger/new/",
         window,
       });
     </script>
-    <script type="text/javascript" src="resource://devtools/client/debugger/new/bundle.js"></script>
+    <script type="text/javascript" src="resource://devtools/client/debugger/new/debugger.js"></script>
   </body>
 </html>
--- a/devtools/client/debugger/new/moz.build
+++ b/devtools/client/debugger/new/moz.build
@@ -1,12 +1,12 @@
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 DevToolsModules(
-    'bundle.js',
+    'debugger.css',
+    'debugger.js',
     'panel.js',
     'pretty-print-worker.js',
-    'source-map-worker.js',
-    'styles.css'
+    'source-map-worker.js'
 )
--- a/dom/file/nsHostObjectProtocolHandler.h
+++ b/dom/file/nsHostObjectProtocolHandler.h
@@ -92,22 +92,16 @@ private:
 };
 
 class nsBlobProtocolHandler : public nsHostObjectProtocolHandler
 {
 public:
   NS_IMETHOD GetScheme(nsACString &result) override;
 };
 
-class nsMediaSourceProtocolHandler : public nsHostObjectProtocolHandler
-{
-public:
-  NS_IMETHOD GetScheme(nsACString &result) override;
-};
-
 class nsFontTableProtocolHandler : public nsHostObjectProtocolHandler
 {
 public:
   NS_IMETHOD GetScheme(nsACString &result);
   NS_IMETHOD NewURI(const nsACString & aSpec, const char * aOriginCharset, nsIURI *aBaseURI, nsIURI * *_retval);
 };
 
 bool IsBlobURI(nsIURI* aUri);
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -524,30 +524,37 @@ AudioContext::DecodeAudioData(const Arra
   RefPtr<DecodeErrorCallback> failureCallback;
   RefPtr<DecodeSuccessCallback> successCallback;
   if (aFailureCallback.WasPassed()) {
     failureCallback = &aFailureCallback.Value();
   }
   if (aSuccessCallback.WasPassed()) {
     successCallback = &aSuccessCallback.Value();
   }
-  RefPtr<WebAudioDecodeJob> job(
+  UniquePtr<WebAudioDecodeJob> job(
     new WebAudioDecodeJob(contentType, this,
                           promise, successCallback, failureCallback));
   AsyncDecodeWebAudio(contentType.get(), data, length, *job);
   // Transfer the ownership to mDecodeJobs
-  mDecodeJobs.AppendElement(job.forget());
+  mDecodeJobs.AppendElement(Move(job));
 
   return promise.forget();
 }
 
 void
 AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob)
 {
-  mDecodeJobs.RemoveElement(aDecodeJob);
+  // Since UniquePtr doesn't provide an operator== which allows you to compare
+  // against raw pointers, we need to iterate manually.
+  for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) {
+    if (mDecodeJobs[i].get() == aDecodeJob) {
+      mDecodeJobs.RemoveElementAt(i);
+      break;
+    }
+  }
 }
 
 void
 AudioContext::RegisterActiveNode(AudioNode* aNode)
 {
   if (!mIsShutDown) {
     mActiveNodes.PutEntry(aNode);
   }
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -8,16 +8,17 @@
 #define AudioContext_h_
 
 #include "mozilla/dom/AudioChannelBinding.h"
 #include "MediaBufferDecoder.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/dom/TypedArray.h"
+#include "mozilla/UniquePtr.h"
 #include "nsCOMPtr.h"
 #include "nsCycleCollectionParticipant.h"
 #include "nsHashKeys.h"
 #include "nsTHashtable.h"
 #include "js/TypeDecls.h"
 #include "nsIMemoryReporter.h"
 
 // X11 has a #define for CurrentTime. Unbelievable :-(.
@@ -343,17 +344,17 @@ private:
   // MediaStreams for a given context, on the MediasStreamGraph side.
   const AudioContextId mId;
   // Note that it's important for mSampleRate to be initialized before
   // mDestination, as mDestination's constructor needs to access it!
   const float mSampleRate;
   AudioContextState mAudioContextState;
   RefPtr<AudioDestinationNode> mDestination;
   RefPtr<AudioListener> mListener;
-  nsTArray<RefPtr<WebAudioDecodeJob> > mDecodeJobs;
+  nsTArray<UniquePtr<WebAudioDecodeJob> > mDecodeJobs;
   // This array is used to keep the suspend/resume/close promises alive until
   // they are resolved, so we can safely pass them accross threads.
   nsTArray<RefPtr<Promise>> mPromiseGripArray;
   // See RegisterActiveNode.  These will keep the AudioContext alive while it
   // is rendering and the window remains alive.
   nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes;
   // Raw (non-owning) references to all AudioNodes for this AudioContext.
   nsTHashtable<nsPtrHashKey<AudioNode> > mAllNodes;
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -29,37 +29,16 @@
 #include "mozilla/Telemetry.h"
 #include "nsPrintfCString.h"
 #include "GMPCrashHelper.h"
 
 namespace mozilla {
 
 extern LazyLogModule gMediaDecoderLog;
 
-NS_IMPL_CYCLE_COLLECTION_CLASS(WebAudioDecodeJob)
-
-NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(WebAudioDecodeJob)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutput)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mSuccessCallback)
-  NS_IMPL_CYCLE_COLLECTION_UNLINK(mFailureCallback)
-NS_IMPL_CYCLE_COLLECTION_UNLINK_END
-
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(WebAudioDecodeJob)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutput)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSuccessCallback)
-  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mFailureCallback)
-NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
-
-NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WebAudioDecodeJob)
-NS_IMPL_CYCLE_COLLECTION_TRACE_END
-NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebAudioDecodeJob, AddRef)
-NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebAudioDecodeJob, Release)
-
 using namespace dom;
 
 class ReportResultTask final : public Runnable
 {
 public:
   ReportResultTask(WebAudioDecodeJob& aDecodeJob,
                    WebAudioDecodeJob::ResultFn aFunction,
                    WebAudioDecodeJob::ErrorCode aErrorCode)
--- a/dom/media/webaudio/MediaBufferDecoder.h
+++ b/dom/media/webaudio/MediaBufferDecoder.h
@@ -30,19 +30,17 @@ struct WebAudioDecodeJob final
 {
   // You may omit both the success and failure callback, or you must pass both.
   // The callbacks are only necessary for asynchronous operation.
   WebAudioDecodeJob(const nsACString& aContentType,
                     dom::AudioContext* aContext,
                     dom::Promise* aPromise,
                     dom::DecodeSuccessCallback* aSuccessCallback = nullptr,
                     dom::DecodeErrorCallback* aFailureCallback = nullptr);
-
-  NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebAudioDecodeJob)
-  NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(WebAudioDecodeJob)
+  ~WebAudioDecodeJob();
 
   enum ErrorCode {
     NoError,
     UnknownContent,
     UnknownError,
     InvalidContent,
     NoAudio
   };
@@ -60,19 +58,16 @@ struct WebAudioDecodeJob final
   nsCString mContentType;
   uint32_t mWriteIndex;
   RefPtr<dom::AudioContext> mContext;
   RefPtr<dom::Promise> mPromise;
   RefPtr<dom::DecodeSuccessCallback> mSuccessCallback;
   RefPtr<dom::DecodeErrorCallback> mFailureCallback; // can be null
   RefPtr<dom::AudioBuffer> mOutput;
   RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
-
-private:
-  ~WebAudioDecodeJob();
 };
 
 void AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer,
                          uint32_t aLength, WebAudioDecodeJob& aDecodeJob);
 
 } // namespace mozilla
 
 #endif
--- a/js/src/builtin/ModuleObject.cpp
+++ b/js/src/builtin/ModuleObject.cpp
@@ -1135,76 +1135,76 @@ bool
 ModuleBuilder::processExport(frontend::ParseNode* pn)
 {
     MOZ_ASSERT(pn->isKind(PNK_EXPORT) || pn->isKind(PNK_EXPORT_DEFAULT));
     MOZ_ASSERT(pn->getArity() == (pn->isKind(PNK_EXPORT) ? PN_UNARY : PN_BINARY));
 
     bool isDefault = pn->getKind() == PNK_EXPORT_DEFAULT;
     ParseNode* kid = isDefault ? pn->pn_left : pn->pn_kid;
 
+    if (isDefault && pn->pn_right) {
+        // This is an export default containing an expression.
+        RootedAtom localName(cx_, cx_->names().starDefaultStar);
+        RootedAtom exportName(cx_, cx_->names().default_);
+        return appendExportEntry(exportName, localName);
+    }
+
     switch (kid->getKind()) {
       case PNK_EXPORT_SPEC_LIST:
         MOZ_ASSERT(!isDefault);
         for (ParseNode* spec = kid->pn_head; spec; spec = spec->pn_next) {
             MOZ_ASSERT(spec->isKind(PNK_EXPORT_SPEC));
             RootedAtom localName(cx_, spec->pn_left->pn_atom);
             RootedAtom exportName(cx_, spec->pn_right->pn_atom);
             if (!appendExportEntry(exportName, localName))
                 return false;
         }
         break;
 
       case PNK_CLASS: {
-          const ClassNode& cls = kid->as<ClassNode>();
-          MOZ_ASSERT(cls.names());
-          RootedAtom localName(cx_, cls.names()->innerBinding()->pn_atom);
-          RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
-          if (!appendExportEntry(exportName, localName))
-              return false;
-          break;
+        const ClassNode& cls = kid->as<ClassNode>();
+        MOZ_ASSERT(cls.names());
+        RootedAtom localName(cx_, cls.names()->innerBinding()->pn_atom);
+        RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
+        if (!appendExportEntry(exportName, localName))
+            return false;
+        break;
       }
 
       case PNK_VAR:
       case PNK_CONST:
       case PNK_LET: {
-          MOZ_ASSERT(kid->isArity(PN_LIST));
-          for (ParseNode* var = kid->pn_head; var; var = var->pn_next) {
-              if (var->isKind(PNK_ASSIGN))
-                  var = var->pn_left;
-              MOZ_ASSERT(var->isKind(PNK_NAME));
-              RootedAtom localName(cx_, var->pn_atom);
-              RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
-              if (!appendExportEntry(exportName, localName))
-                  return false;
-          }
-          break;
+        MOZ_ASSERT(kid->isArity(PN_LIST));
+        for (ParseNode* var = kid->pn_head; var; var = var->pn_next) {
+            if (var->isKind(PNK_ASSIGN))
+                var = var->pn_left;
+            MOZ_ASSERT(var->isKind(PNK_NAME));
+            RootedAtom localName(cx_, var->pn_atom);
+            RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
+            if (!appendExportEntry(exportName, localName))
+                return false;
+        }
+        break;
       }
 
       case PNK_FUNCTION: {
-          RootedFunction func(cx_, kid->pn_funbox->function());
-          if (!func->isArrow()) {
-              RootedAtom localName(cx_, func->explicitName());
-              RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
-              MOZ_ASSERT_IF(isDefault, localName);
-              if (!appendExportEntry(exportName, localName))
-                  return false;
-              break;
-          }
-      }
-
-      MOZ_FALLTHROUGH; // Arrow functions are handled below.
-
-      default:
-        MOZ_ASSERT(isDefault);
-        RootedAtom localName(cx_, cx_->names().starDefaultStar);
-        RootedAtom exportName(cx_, cx_->names().default_);
+        RootedFunction func(cx_, kid->pn_funbox->function());
+        MOZ_ASSERT(!func->isArrow());
+        RootedAtom localName(cx_, func->explicitName());
+        RootedAtom exportName(cx_, isDefault ? cx_->names().default_ : localName.get());
+        MOZ_ASSERT_IF(isDefault, localName);
         if (!appendExportEntry(exportName, localName))
             return false;
         break;
+      }
+
+      default:
+        MOZ_CRASH("Unexpected parse node");
     }
+
     return true;
 }
 
 bool
 ModuleBuilder::processExportFrom(frontend::ParseNode* pn)
 {
     MOZ_ASSERT(pn->isKind(PNK_EXPORT_FROM));
     MOZ_ASSERT(pn->isArity(PN_BINARY));
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/modules/bug-1320993.js
@@ -0,0 +1,2 @@
+parseModule("export default (class {})");
+parseModule("export default (class A {})");
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1372,16 +1372,52 @@ class MacroAssembler : public MacroAssem
     // wasm::MemoryAccessVector (there can be multiple when i64 is involved).
     // On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller
     // is responsible for doing this instead.
     void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
     void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
     void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
     void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
 
+    // For all the ARM wasmLoad and wasmStore functions, `ptr` MUST equal
+    // `ptrScratch`, and that register will be updated based on conditions
+    // listed below (where it is only mentioned as `ptr`).
+
+    // `ptr` will be updated if access.offset() != 0 or access.type() == Scalar::Int64.
+    void wasmLoad(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch, AnyRegister output) DEFINED_ON(arm);
+    void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch, Register64 output) DEFINED_ON(arm);
+    void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register ptr, Register ptrScratch) DEFINED_ON(arm);
+    void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register ptr, Register ptrScratch) DEFINED_ON(arm);
+
+    // `ptr` will always be updated.
+    void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                           Register output, Register tmp) DEFINED_ON(arm);
+
+    // `ptr` will always be updated and `tmp1` is always needed.  `tmp2` is
+    // needed for Float32; `tmp2` and `tmp3` are needed for Float64.  Temps must
+    // be Invalid when they are not needed.
+    void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                             FloatRegister output, Register tmp1, Register tmp2, Register tmp3) DEFINED_ON(arm);
+
+    // `ptr` will always be updated.
+    void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                              Register64 output, Register tmp) DEFINED_ON(arm);
+
+    // `ptr` and `value` will always be updated.
+    void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value, Register ptr, Register ptrScratch)
+        DEFINED_ON(arm);
+
+    // `ptr` will always be updated.
+    void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatValue, Register ptr,
+                              Register ptrScratch, Register tmp) DEFINED_ON(arm);
+
+    // `ptr` will always be updated.
+    void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register ptr, Register ptrScratch,
+                               Register tmp) DEFINED_ON(arm);
+
     // wasm specific methods, used in both the wasm baseline compiler and ion.
     void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
     void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);
     void outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned, wasm::TrapOffset off, Label* rejoin) DEFINED_ON(x86_shared);
 
     void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
     void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);
     void outOfLineWasmTruncateFloat32ToInt32(FloatRegister input, bool isUnsigned, wasm::TrapOffset off, Label* rejoin) DEFINED_ON(x86_shared);
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2361,79 +2361,30 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAs
     }
 }
 
 template <typename T>
 void
 CodeGeneratorARM::emitWasmLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
-
-    uint32_t offset = mir->access().offset();
-    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
-
-    Register ptr = ToRegister(lir->ptr());
-    Scalar::Type type = mir->access().type();
-
-    // Maybe add the offset.
-    if (offset || type == Scalar::Int64) {
-        ScratchRegisterScope scratch(masm);
-        Register ptrPlusOffset = ToRegister(lir->ptrCopy());
-        if (offset)
-            masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
-        ptr = ptrPlusOffset;
+    MIRType resultType = mir->type();
+    Register ptr;
+
+    if (mir->access().offset() || mir->access().type() == Scalar::Int64) {
+        ptr = ToRegister(lir->ptrCopy());
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+        ptr = ToRegister(lir->ptr());
     }
 
-    bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
-                    type == Scalar::Int64;
-    unsigned byteSize = mir->access().byteSize();
-
-    masm.memoryBarrier(mir->access().barrierBefore());
-
-    BufferOffset load;
-    if (mir->type() == MIRType::Int64) {
-        Register64 output = ToOutRegister64(lir);
-        if (type == Scalar::Int64) {
-            MOZ_ASSERT(INT64LOW_OFFSET == 0);
-
-            load = masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
-            masm.append(mir->access(), load.getOffset(), masm.framePushed());
-
-            masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
-
-            load = masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
-            masm.append(mir->access(), load.getOffset(), masm.framePushed());
-        } else {
-            load = masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.low);
-            masm.append(mir->access(), load.getOffset(), masm.framePushed());
-
-            if (isSigned)
-                masm.ma_asr(Imm32(31), output.low, output.high);
-            else
-                masm.ma_mov(Imm32(0), output.high);
-        }
-    } else {
-        AnyRegister output = ToAnyRegister(lir->output());
-        bool isFloat = output.isFloat();
-        if (isFloat) {
-            MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
-            ScratchRegisterScope scratch(masm);
-            masm.ma_add(HeapReg, ptr, scratch);
-
-            load = masm.ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
-            masm.append(mir->access(), load.getOffset(), masm.framePushed());
-        } else {
-            load = masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
-            masm.append(mir->access(), load.getOffset(), masm.framePushed());
-        }
-    }
-
-    masm.memoryBarrier(mir->access().barrierAfter());
+    if (resultType == MIRType::Int64)
+        masm.wasmLoadI64(mir->access(), ptr, ptr, ToOutRegister64(lir));
+    else
+        masm.wasmLoad(mir->access(), ptr, ptr, ToAnyRegister(lir->output()));
 }
 
 void
 CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
 {
     emitWasmLoad(lir);
 }
 
@@ -2443,81 +2394,32 @@ CodeGeneratorARM::visitWasmLoadI64(LWasm
     emitWasmLoad(lir);
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
-
-    uint32_t offset = mir->access().offset();
-    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+    MIRType resultType = mir->type();
 
     Register ptr = ToRegister(lir->ptrCopy());
-    if (offset) {
-        ScratchRegisterScope scratch(masm);
-        masm.ma_add(Imm32(offset), ptr, scratch);
+    Register tmp1 = ToRegister(lir->getTemp(1));
+
+    if (resultType == MIRType::Int64) {
+        masm.wasmUnalignedLoadI64(mir->access(), ptr, ptr, ToOutRegister64(lir), tmp1);
+    } else if (IsFloatingPointType(resultType)) {
+        Register tmp2(ToRegister(lir->getTemp(2)));
+        Register tmp3(Register::Invalid());
+        if (mir->access().byteSize() == 8)
+            tmp3 = ToRegister(lir->getTemp(3));
+        masm.wasmUnalignedLoadFP(mir->access(), ptr, ptr, ToFloatRegister(lir->output()), tmp1, tmp2, tmp3);
+    } else {
+        masm.wasmUnalignedLoad(mir->access(), ptr, ptr, ToRegister(lir->output()), tmp1);
     }
-
-    // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
-    masm.ma_add(HeapReg, ptr);
-
-    unsigned byteSize = mir->access().byteSize();
-    Scalar::Type type = mir->access().type();
-    bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
-                    type == Scalar::Int64;
-
-    MIRType mirType = mir->type();
-
-    Register tmp = ToRegister(lir->getTemp(1));
-
-    Register low;
-    if (IsFloatingPointType(mirType))
-        low = ToRegister(lir->getTemp(2));
-    else if (mirType == MIRType::Int64)
-        low = ToOutRegister64(lir).low;
-    else
-        low = ToRegister(lir->output());
-
-    MOZ_ASSERT(low != tmp);
-    MOZ_ASSERT(low != ptr);
-
-    masm.memoryBarrier(mir->access().barrierBefore());
-
-    masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
-
-    if (IsFloatingPointType(mirType)) {
-        FloatRegister output = ToFloatRegister(lir->output());
-        if (byteSize == 4) {
-            MOZ_ASSERT(output.isSingle());
-            masm.ma_vxfer(low, output);
-        } else {
-            MOZ_ASSERT(byteSize == 8);
-            MOZ_ASSERT(output.isDouble());
-            Register high = ToRegister(lir->getTemp(3));
-            masm.emitUnalignedLoad(/* signed */ false, 4, ptr, tmp, high, /* offset */ 4);
-            masm.ma_vxfer(low, high, output);
-        }
-    } else if (mirType == MIRType::Int64) {
-        Register64 output = ToOutRegister64(lir);
-        if (type == Scalar::Int64) {
-            MOZ_ASSERT(byteSize == 8);
-            masm.emitUnalignedLoad(isSigned, 4, ptr, tmp, output.high, /* offset */ 4);
-        } else {
-            MOZ_ASSERT(byteSize <= 4);
-            // Propagate sign.
-            if (isSigned)
-                masm.ma_asr(Imm32(31), output.low, output.high);
-            else
-                masm.ma_mov(Imm32(0), output.high);
-        }
-    }
-
-    masm.memoryBarrier(mir->access().barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
 {
     emitWasmUnalignedLoad(lir);
 }
 
@@ -2540,70 +2442,32 @@ CodeGeneratorARM::visitWasmAddOffset(LWa
     masm.ma_b(trap(mir, wasm::Trap::OutOfBounds), Assembler::CarrySet);
 }
 
 template <typename T>
 void
 CodeGeneratorARM::emitWasmStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
-
-    uint32_t offset = mir->access().offset();
-    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
-
-    Register ptr = ToRegister(lir->ptr());
-    unsigned byteSize = mir->access().byteSize();
-    Scalar::Type type = mir->access().type();
+    Scalar::Type accessType = mir->access().type();
+    Register ptr;
 
     // Maybe add the offset.
-    if (offset || type == Scalar::Int64) {
-        ScratchRegisterScope scratch(masm);
-        Register ptrPlusOffset = ToRegister(lir->ptrCopy());
-        if (offset)
-            masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
-        ptr = ptrPlusOffset;
+    if (mir->access().offset() || accessType == Scalar::Int64) {
+        ptr = ToRegister(lir->ptrCopy());
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+        ptr = ToRegister(lir->ptr());
     }
 
-    masm.memoryBarrier(mir->access().barrierBefore());
-
-    BufferOffset store;
-    if (type == Scalar::Int64) {
-        MOZ_ASSERT(INT64LOW_OFFSET == 0);
-
-        Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
-
-        store = masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
-        masm.append(mir->access(), store.getOffset(), masm.framePushed());
-
-        masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
-
-        store = masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
-        masm.append(mir->access(), store.getOffset(), masm.framePushed());
-    } else {
-        AnyRegister value = ToAnyRegister(lir->getOperand(lir->ValueIndex));
-        if (value.isFloat()) {
-            ScratchRegisterScope scratch(masm);
-            FloatRegister val = value.fpu();
-            MOZ_ASSERT((byteSize == 4) == val.isSingle());
-            masm.ma_add(HeapReg, ptr, scratch);
-
-            store = masm.ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
-            masm.append(mir->access(), store.getOffset(), masm.framePushed());
-        } else {
-            bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
-            Register val = value.gpr();
-
-            store = masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
-            masm.append(mir->access(), store.getOffset(), masm.framePushed());
-        }
-    }
-
-    masm.memoryBarrier(mir->access().barrierAfter());
+    if (accessType == Scalar::Int64)
+        masm.wasmStoreI64(mir->access(), ToRegister64(lir->getInt64Operand(lir->ValueIndex)),
+                          ptr, ptr);
+    else
+        masm.wasmStore(mir->access(), ToAnyRegister(lir->getOperand(lir->ValueIndex)), ptr, ptr);
 }
 
 void
 CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
 {
     emitWasmStore(lir);
 }
 
@@ -2613,61 +2477,30 @@ CodeGeneratorARM::visitWasmStoreI64(LWas
     emitWasmStore(lir);
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
-
-    uint32_t offset = mir->access().offset();
-    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+    Scalar::Type accessType = mir->access().type();
 
     Register ptr = ToRegister(lir->ptrCopy());
-    if (offset) {
-        ScratchRegisterScope scratch(masm);
-        masm.ma_add(Imm32(offset), ptr, scratch);
-    }
-
-    // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
-    masm.ma_add(HeapReg, ptr);
-
-    MIRType mirType = mir->value()->type();
-
-    masm.memoryBarrier(mir->access().barrierAfter());
-
-    Register val = ToRegister(lir->valueHelper());
-    if (IsFloatingPointType(mirType)) {
-        masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val);
-    } else if (mirType == MIRType::Int64) {
-        Register64 input = ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex));
-        if (input.low != val)
-            masm.ma_mov(input.low, val);
+    Register valOrTmp = ToRegister(lir->valueHelper());
+    if (accessType == Scalar::Int64) {
+        masm.wasmUnalignedStoreI64(mir->access(),
+                                   ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)),
+                                   ptr, ptr, valOrTmp);
+    } else if (accessType == Scalar::Float32 || accessType == Scalar::Float64) {
+        FloatRegister value = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
+        masm.wasmUnalignedStoreFP(mir->access(), value, ptr, ptr, valOrTmp);
+    } else {
+        masm.wasmUnalignedStore(mir->access(), valOrTmp, ptr, ptr);
     }
-
-    unsigned byteSize = mir->access().byteSize();
-    masm.emitUnalignedStore(Min(byteSize, 4u), ptr, val);
-
-    if (byteSize > 4) {
-        // It's a double or an int64 load.
-        // Load the high 32 bits when counter == 4.
-        if (IsFloatingPointType(mirType)) {
-            FloatRegister fp = ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex));
-            MOZ_ASSERT(fp.isDouble());
-            ScratchRegisterScope scratch(masm);
-            masm.ma_vxfer(fp, scratch, val);
-        } else {
-            MOZ_ASSERT(mirType == MIRType::Int64);
-            masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val);
-        }
-        masm.emitUnalignedStore(4, ptr, val, /* offset */ 4);
-    }
-
-    masm.memoryBarrier(mir->access().barrierBefore());
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
 {
     emitWasmUnalignedStore(lir);
 }
 
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -5431,16 +5431,90 @@ MacroAssembler::wasmTruncateFloat32ToUIn
 }
 
 void
 MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry)
 {
     wasmTruncateToInt32(input, output, MIRType::Float32, /* isUnsigned= */ false, oolEntry);
 }
 
+void
+MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                         AnyRegister output)
+{
+    wasmLoadImpl(access, ptr, ptrScratch, output, Register64::Invalid());
+}
+
+void
+MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                            Register64 output)
+{
+    wasmLoadImpl(access, ptr, ptrScratch, AnyRegister(), output);
+}
+
+void
+MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register ptr,
+                          Register ptrScratch)
+{
+    wasmStoreImpl(access, value, Register64::Invalid(), ptr, ptrScratch);
+}
+
+void
+MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register ptr,
+                             Register ptrScratch)
+{
+    wasmStoreImpl(access, AnyRegister(), value, ptr, ptrScratch);
+}
+
+void
+MacroAssembler::wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register ptr,
+                                  Register ptrScratch, Register output, Register tmp)
+{
+    wasmUnalignedLoadImpl(access, ptr, ptrScratch, AnyRegister(output), Register64::Invalid(), tmp,
+                          Register::Invalid(), Register::Invalid());
+}
+
+void
+MacroAssembler::wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register ptr,
+                                    Register ptrScratch, FloatRegister outFP, Register tmp1,
+                                    Register tmp2, Register tmp3)
+{
+    wasmUnalignedLoadImpl(access, ptr, ptrScratch, AnyRegister(outFP), Register64::Invalid(),
+                          tmp1, tmp2, tmp3);
+}
+
+void
+MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register ptr,
+                                     Register ptrScratch, Register64 out64, Register tmp)
+{
+    wasmUnalignedLoadImpl(access, ptr, ptrScratch, AnyRegister(), out64, tmp, Register::Invalid(),
+                          Register::Invalid());
+}
+
+void
+MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
+                                   Register ptr, Register ptrScratch)
+{
+    wasmUnalignedStoreImpl(access, FloatRegister(), Register64::Invalid(), ptr, ptrScratch, value);
+}
+
+void
+MacroAssembler::wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatVal,
+                                     Register ptr, Register ptrScratch, Register tmp)
+{
+    wasmUnalignedStoreImpl(access, floatVal, Register64::Invalid(), ptr, ptrScratch, tmp);
+}
+
+void
+MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 val64,
+                                      Register ptr, Register ptrScratch, Register tmp)
+{
+    wasmUnalignedStoreImpl(access, FloatRegister(), val64, ptr, ptrScratch, tmp);
+}
+
 //}}} check_macroassembler_style
 
 void
 MacroAssemblerARM::wasmTruncateToInt32(FloatRegister input, Register output, MIRType fromType,
                                        bool isUnsigned, Label* oolEntry)
 {
     // vcvt* converts NaN into 0, so check for NaNs here.
     {
@@ -5576,16 +5650,258 @@ MacroAssemblerARM::outOfLineWasmTruncate
                                  asMasm().framePushed()));
 
     bind(&inputIsNaN);
     asMasm().jump(wasm::TrapDesc(trapOffset, wasm::Trap::InvalidConversionToInteger,
                                  asMasm().framePushed()));
 }
 
 void
+MacroAssemblerARM::wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr,
+                                Register ptrScratch, AnyRegister output, Register64 out64)
+{
+    MOZ_ASSERT(ptr == ptrScratch);
+
+    uint32_t offset = access.offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+    Scalar::Type type = access.type();
+
+    // Maybe add the offset.
+    if (offset || type == Scalar::Int64) {
+        ScratchRegisterScope scratch(asMasm());
+        if (offset)
+            ma_add(Imm32(offset), ptr, scratch);
+    }
+
+    bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
+                    type == Scalar::Int64;
+    unsigned byteSize = access.byteSize();
+
+    asMasm().memoryBarrier(access.barrierBefore());
+
+    uint32_t framePushed = asMasm().framePushed();
+    BufferOffset load;
+    if (out64 != Register64::Invalid()) {
+        if (type == Scalar::Int64) {
+            MOZ_ASSERT(INT64LOW_OFFSET == 0);
+
+            load = ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, out64.low);
+            append(access, load.getOffset(), framePushed);
+
+            as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
+
+            load = ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, out64.high);
+            append(access, load.getOffset(), framePushed);
+        } else {
+            load = ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, out64.low);
+            append(access, load.getOffset(), framePushed);
+
+            if (isSigned)
+                ma_asr(Imm32(31), out64.low, out64.high);
+            else
+                ma_mov(Imm32(0), out64.high);
+        }
+    } else {
+        bool isFloat = output.isFloat();
+        if (isFloat) {
+            MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
+            ScratchRegisterScope scratch(asMasm());
+            ma_add(HeapReg, ptr, scratch);
+
+            load = ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
+            append(access, load.getOffset(), framePushed);
+        } else {
+            load = ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
+            append(access, load.getOffset(), framePushed);
+        }
+    }
+
+    asMasm().memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssemblerARM::wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+                                 Register64 val64, Register ptr, Register ptrScratch)
+{
+    MOZ_ASSERT(ptr == ptrScratch);
+
+    uint32_t offset = access.offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+    unsigned byteSize = access.byteSize();
+    Scalar::Type type = access.type();
+
+    // Maybe add the offset.
+    if (offset || type == Scalar::Int64) {
+        ScratchRegisterScope scratch(asMasm());
+        if (offset)
+            ma_add(Imm32(offset), ptr, scratch);
+    }
+
+    asMasm().memoryBarrier(access.barrierBefore());
+
+    uint32_t framePushed = asMasm().framePushed();
+
+    BufferOffset store;
+    if (type == Scalar::Int64) {
+        MOZ_ASSERT(INT64LOW_OFFSET == 0);
+
+        store = ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, val64.low);
+        append(access, store.getOffset(), framePushed);
+
+        as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
+
+        store = ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, val64.high);
+        append(access, store.getOffset(), framePushed);
+    } else {
+        if (value.isFloat()) {
+            ScratchRegisterScope scratch(asMasm());
+            FloatRegister val = value.fpu();
+            MOZ_ASSERT((byteSize == 4) == val.isSingle());
+            ma_add(HeapReg, ptr, scratch);
+
+            store = ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
+            append(access, store.getOffset(), framePushed);
+        } else {
+            bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
+            Register val = value.gpr();
+
+            store = ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
+            append(access, store.getOffset(), framePushed);
+        }
+    }
+
+    asMasm().memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssemblerARM::wasmUnalignedLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr,
+                                         Register ptrScratch, AnyRegister outAny, Register64 out64,
+                                         Register tmp, Register tmp2, Register tmp3)
+{
+    MOZ_ASSERT(ptr == ptrScratch);
+    MOZ_ASSERT_IF(access.type() != Scalar::Float32 && access.type() != Scalar::Float64,
+                  tmp2 == Register::Invalid() && tmp3 == Register::Invalid());
+    MOZ_ASSERT_IF(access.type() == Scalar::Float32,
+                  tmp2 != Register::Invalid() && tmp3 == Register::Invalid());
+    MOZ_ASSERT_IF(access.type() == Scalar::Float64,
+                  tmp2 != Register::Invalid() && tmp3 != Register::Invalid());
+
+    uint32_t offset = access.offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+    if (offset) {
+        ScratchRegisterScope scratch(asMasm());
+        ma_add(Imm32(offset), ptr, scratch);
+    }
+
+    // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
+    ma_add(HeapReg, ptr);
+
+    unsigned byteSize = access.byteSize();
+    Scalar::Type type = access.type();
+    bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
+                    type == Scalar::Int64;
+
+    Register low;
+    if (out64 != Register64::Invalid())
+        low = out64.low;
+    else if (outAny.isFloat())
+        low = tmp2;
+    else
+        low = outAny.gpr();
+
+    MOZ_ASSERT(low != tmp);
+    MOZ_ASSERT(low != ptr);
+
+    asMasm().memoryBarrier(access.barrierBefore());
+
+    emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
+
+    if (out64 != Register64::Invalid()) {
+        if (type == Scalar::Int64) {
+            MOZ_ASSERT(byteSize == 8);
+            emitUnalignedLoad(isSigned, 4, ptr, tmp, out64.high, /* offset */ 4);
+        } else {
+            MOZ_ASSERT(byteSize <= 4);
+            // Propagate sign.
+            if (isSigned)
+                ma_asr(Imm32(31), out64.low, out64.high);
+            else
+                ma_mov(Imm32(0), out64.high);
+        }
+    } else if (outAny.isFloat()) {
+        FloatRegister output = outAny.fpu();
+        if (byteSize == 4) {
+            MOZ_ASSERT(output.isSingle());
+            ma_vxfer(low, output);
+        } else {
+            MOZ_ASSERT(byteSize == 8);
+            MOZ_ASSERT(output.isDouble());
+            Register high = tmp3;
+            emitUnalignedLoad(/* signed */ false, 4, ptr, tmp, high, /* offset */ 4);
+            ma_vxfer(low, high, output);
+        }
+    }
+
+    asMasm().memoryBarrier(access.barrierAfter());
+}
+
+void
+MacroAssemblerARM::wasmUnalignedStoreImpl(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
+                                          Register64 val64, Register ptr, Register ptrScratch, Register tmp)
+{
+    MOZ_ASSERT(ptr == ptrScratch);
+    // They can't both be valid, but they can both be invalid.
+    MOZ_ASSERT_IF(!floatValue.isInvalid(), val64 == Register64::Invalid());
+    MOZ_ASSERT_IF(val64 != Register64::Invalid(), floatValue.isInvalid());
+
+    uint32_t offset = access.offset();
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+    unsigned byteSize = access.byteSize();
+
+    if (offset) {
+        ScratchRegisterScope scratch(asMasm());
+        ma_add(Imm32(offset), ptr, scratch);
+    }
+
+    // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
+    ma_add(HeapReg, ptr);
+
+    asMasm().memoryBarrier(access.barrierBefore());
+
+    if (val64 != Register64::Invalid()) {
+        if (val64.low != tmp)
+            ma_mov(val64.low, tmp);
+    } else if (!floatValue.isInvalid()) {
+        ma_vxfer(floatValue, tmp);
+    }
+    // Otherwise, tmp has the integer value to store.
+
+    emitUnalignedStore(Min(byteSize, 4u), ptr, tmp);
+
+    if (byteSize > 4) {
+        if (val64 != Register64::Invalid()) {
+            if (val64.high != tmp)
+                ma_mov(val64.high, tmp);
+        } else {
+            MOZ_ASSERT(!floatValue.isInvalid());
+            MOZ_ASSERT(floatValue.isDouble());
+            ScratchRegisterScope scratch(asMasm());
+            ma_vxfer(floatValue, scratch, tmp);
+        }
+        emitUnalignedStore(4, ptr, tmp, /* offset */ 4);
+    }
+
+    asMasm().memoryBarrier(access.barrierAfter());
+}
+
+void
 MacroAssemblerARM::emitUnalignedLoad(bool isSigned, unsigned byteSize, Register ptr, Register tmp,
                                      Register dest, unsigned offset)
 {
     // Preconditions.
     MOZ_ASSERT(ptr != tmp);
     MOZ_ASSERT(ptr != dest);
     MOZ_ASSERT(tmp != dest);
     MOZ_ASSERT(byteSize <= 4);
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -450,31 +450,51 @@ class MacroAssemblerARM : public Assembl
         }
         if (mode == DB) {
             return transferMultipleByRunsImpl
                 <FloatRegisterBackwardIterator>(set, ls, rm, mode, -1);
         }
         MOZ_CRASH("Invalid data transfer addressing mode");
     }
 
+    // `outAny` is valid if and only if `out64` == Register64::Invalid().
+    void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                      AnyRegister outAny, Register64 out64);
+
+    // `valAny` is valid if and only if `val64` == Register64::Invalid().
+    void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister valAny, Register64 val64,
+                       Register ptr, Register ptrScratch);
+
+  protected:
+    // `outAny` is valid if and only if `out64` == Register64::Invalid().
+    void wasmUnalignedLoadImpl(const wasm::MemoryAccessDesc& access, Register ptr, Register ptrScratch,
+                               AnyRegister outAny, Register64 out64, Register tmp1, Register tmp2,
+                               Register tmp3);
+
+    // The value to be stored is in `floatValue` (if not invalid), `val64` (if not invalid),
+    // or in `valOrTmp` (if `floatValue` and `val64` are both invalid).  Note `valOrTmp` must
+    // always be valid.
+    void wasmUnalignedStoreImpl(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
+                                Register64 val64, Register ptr, Register ptrScratch, Register valOrTmp);
+
+  private:
     // Loads `byteSize` bytes, byte by byte, by reading from ptr[offset],
     // applying the indicated signedness (defined by isSigned).
     // - all three registers must be different.
     // - tmp and dest will get clobbered, ptr will remain intact.
     // - byteSize can be up to 4 bytes and no more (GPR are 32 bits on ARM).
     void emitUnalignedLoad(bool isSigned, unsigned byteSize, Register ptr, Register tmp,
                            Register dest, unsigned offset = 0);
 
     // Ditto, for a store. Note stores don't care about signedness.
     // - the two registers must be different.
     // - val will get clobbered, ptr will remain intact.
     // - byteSize can be up to 4 bytes and no more (GPR are 32 bits on ARM).
     void emitUnalignedStore(unsigned byteSize, Register ptr, Register val, unsigned offset = 0);
 
-private:
     // Implementation for transferMultipleByRuns so we can use different
     // iterators for forward/backward traversals. The sign argument should be 1
     // if we traverse forwards, -1 if we traverse backwards.
     template<typename RegisterIterator> int32_t
     transferMultipleByRunsImpl(FloatRegisterSet set, LoadStore ls,
                                Register rm, DTMMode mode, int32_t sign)
     {
         MOZ_ASSERT(sign == 1 || sign == -1);
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -3413,38 +3413,38 @@ class BaseCompiler
         if (access->offset() >= OffsetGuardLimit) {
             masm.branchAdd32(Assembler::CarrySet, Imm32(access->offset()), ptr,
                              trap(Trap::OutOfBounds));
             access->clearOffset();
         }
     }
 
     // This is the temp register passed as the last argument to load()
-    MOZ_MUST_USE size_t loadStoreTemps(MemoryAccessDesc& access) {
+    MOZ_MUST_USE size_t loadTemps(MemoryAccessDesc& access) {
 #if defined(JS_CODEGEN_ARM)
         if (access.isUnaligned()) {
             switch (access.type()) {
               case Scalar::Float32:
-                return 1;
+                return 2;
               case Scalar::Float64:
-                return 2;
+                return 3;
               default:
-                break;
+                return 1;
             }
         }
         return 0;
 #else
         return 0;
 #endif
     }
 
     // ptr and dest may be the same iff dest is I32.
     // This may destroy ptr even if ptr and dest are not the same.
-    MOZ_MUST_USE bool load(MemoryAccessDesc& access, RegI32 ptr, bool omitBoundsCheck,
-                           AnyReg dest, RegI32 tmp1, RegI32 tmp2)
+    MOZ_MUST_USE bool load(MemoryAccessDesc& access, RegI32 ptr, bool omitBoundsCheck, AnyReg dest,
+                           RegI32 tmp1, RegI32 tmp2, RegI32 tmp3)
     {
         checkOffset(&access, ptr);
 
         OutOfLineCode* ool = nullptr;
 #ifndef WASM_HUGE_MEMORY
         if (!omitBoundsCheck) {
             if (access.isPlainAsmJS()) {
                 ool = new (alloc_) AsmJSLoadOOB(access.type(), dest.any());
@@ -3476,64 +3476,61 @@ class BaseCompiler
             AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
 
             masm.wasmLoad(access, srcAddr, out);
 
             if (byteRegConflict)
                 masm.mov(ScratchRegX86, dest.i32());
         }
 #elif defined(JS_CODEGEN_ARM)
-        if (access.offset() != 0)
-            masm.add32(Imm32(access.offset()), ptr);
-
-        bool isSigned = true;
-        switch (access.type()) {
-          case Scalar::Uint8:
-          case Scalar::Uint16:
-          case Scalar::Uint32: {
-            isSigned = false;
-            MOZ_FALLTHROUGH;
-          case Scalar::Int8:
-          case Scalar::Int16:
-          case Scalar::Int32:
-            Register rt = dest.tag == AnyReg::I64 ? dest.i64().low : dest.i32();
-            loadI32(access, isSigned, ptr, rt);
-            if (dest.tag == AnyReg::I64) {
-                if (isSigned)
-                    masm.ma_asr(Imm32(31), rt, dest.i64().high);
-                else
-                    masm.move32(Imm32(0), dest.i64().high);
+        if (access.isUnaligned()) {
+            switch (dest.tag) {
+              case AnyReg::I64:
+                masm.wasmUnalignedLoadI64(access, ptr, ptr, dest.i64(), tmp1);
+                break;
+              case AnyReg::F32:
+                masm.wasmUnalignedLoadFP(access, ptr, ptr, dest.f32(), tmp1, tmp2, Register::Invalid());
+                break;
+              case AnyReg::F64:
+                masm.wasmUnalignedLoadFP(access, ptr, ptr, dest.f64(), tmp1, tmp2, tmp3);
+                break;
+              default:
+                masm.wasmUnalignedLoad(access, ptr, ptr, dest.i32(), tmp1);
+                break;
             }
-            break;
-          }
-          case Scalar::Int64:
-            loadI64(access, ptr, dest.i64());
-            break;
-          case Scalar::Float32:
-            loadF32(access, ptr, dest.f32(), tmp1);
-            break;
-          case Scalar::Float64:
-            loadF64(access, ptr, dest.f64(), tmp1, tmp2);
-            break;
-          default:
-            MOZ_CRASH("Compiler bug: unexpected array type");
+        } else {
+            if (dest.tag == AnyReg::I64)
+                masm.wasmLoadI64(access, ptr, ptr, dest.i64());
+            else
+                masm.wasmLoad(access, ptr, ptr, dest.any());
         }
 #else
         MOZ_CRASH("BaseCompiler platform hook: load");
 #endif
 
         if (ool)
             masm.bind(ool->rejoin());
         return true;
     }
 
+    MOZ_MUST_USE size_t storeTemps(MemoryAccessDesc& access) {
+#if defined(JS_CODEGEN_ARM)
+        if (access.isUnaligned()) {
+            // See comment in store() about how this temp could be avoided for
+            // unaligned i8/i16/i32 stores with some restructuring elsewhere.
+            return 1;
+        }
+#endif
+        return 0;
+    }
+
     // ptr and src must not be the same register.
-    // This may destroy ptr.
-    MOZ_MUST_USE bool store(MemoryAccessDesc access, RegI32 ptr, bool omitBoundsCheck,
-                            AnyReg src, RegI32 tmp1, RegI32 tmp2)
+    // This may destroy ptr but will not destroy src.
+    MOZ_MUST_USE bool store(MemoryAccessDesc access, RegI32 ptr, bool omitBoundsCheck, AnyReg src,
+                            RegI32 tmp)
     {
         checkOffset(&access, ptr);
 
         Label rejoin;
 #ifndef WASM_HUGE_MEMORY
         if (!omitBoundsCheck) {
             if (access.isPlainAsmJS())
                 masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr, &rejoin);
@@ -3566,182 +3563,57 @@ class BaseCompiler
                 value = AnyRegister(ScratchRegX86);
             } else {
                 value = src.any();
             }
 
             masm.wasmStore(access, value, dstAddr);
         }
 #elif defined(JS_CODEGEN_ARM)
-        if (access.offset() != 0)
-            masm.add32(Imm32(access.offset()), ptr);
-
-        switch (access.type()) {
-          case Scalar::Uint8:
-            MOZ_FALLTHROUGH;
-          case Scalar::Uint16:
-            MOZ_FALLTHROUGH;
-          case Scalar::Int8:
-            MOZ_FALLTHROUGH;
-          case Scalar::Int16:
-            MOZ_FALLTHROUGH;
-          case Scalar::Int32:
-            MOZ_FALLTHROUGH;
-          case Scalar::Uint32: {
-            Register rt = src.tag == AnyReg::I64 ? src.i64().low : src.i32();
-            storeI32(access, ptr, rt);
-            break;
-          }
-          case Scalar::Int64:
-            storeI64(access, ptr, src.i64());
-            break;
-          case Scalar::Float32:
-            storeF32(access, ptr, src.f32(), tmp1);
-            break;
-          case Scalar::Float64:
-            storeF64(access, ptr, src.f64(), tmp1, tmp2);
-            break;
-          default:
-            MOZ_CRASH("Compiler bug: unexpected array type");
+        if (access.isUnaligned()) {
+            // TODO / OPTIMIZE (bug 1331264): We perform the copy on the i32
+            // path (and allocate the temp for the copy) because we will destroy
+            // the value in the temp.  We could avoid the copy and the temp if
+            // the caller would instead preserve src when it needs to return its
+            // value as a result (for teeStore).  If unaligned accesses are
+            // common it will be worthwhile to make that change, but there's no
+            // evidence yet that they will be common.
+            switch (src.tag) {
+              case AnyReg::I64:
+                masm.wasmUnalignedStoreI64(access, src.i64(), ptr, ptr, tmp);
+                break;
+              case AnyReg::F32:
+                masm.wasmUnalignedStoreFP(access, src.f32(), ptr, ptr, tmp);
+                break;
+              case AnyReg::F64:
+                masm.wasmUnalignedStoreFP(access, src.f64(), ptr, ptr, tmp);
+                break;
+              default:
+                moveI32(src.i32(), tmp);
+                masm.wasmUnalignedStore(access, tmp, ptr, ptr);
+                break;
+            }
+        } else {
+            if (access.type() == Scalar::Int64)
+                masm.wasmStoreI64(access, src.i64(), ptr, ptr);
+            else if (src.tag == AnyReg::I64)
+                masm.wasmStore(access, AnyRegister(src.i64().low), ptr, ptr);
+            else
+                masm.wasmStore(access, src.any(), ptr, ptr);
         }
 #else
         MOZ_CRASH("BaseCompiler platform hook: store");
 #endif
 
         if (rejoin.used())
             masm.bind(&rejoin);
 
         return true;
     }
 
-#ifdef JS_CODEGEN_ARM
-    void
-    loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
-        if (access.byteSize() > 1 && access.isUnaligned()) {
-            masm.add32(HeapReg, ptr);
-            SecondScratchRegisterScope scratch(*this);
-            masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr, scratch, rt, 0);
-        } else {
-            BufferOffset ld =
-                masm.ma_dataTransferN(js::jit::IsLoad, BitSize(access.byteSize()*8),
-                                      isSigned, HeapReg, ptr, rt, Offset, Assembler::Always);
-            masm.append(access, ld.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
-        if (access.byteSize() > 1 && access.isUnaligned()) {
-            masm.add32(HeapReg, ptr);
-            masm.emitUnalignedStore(access.byteSize(), ptr, rt, 0);
-        } else {
-            BufferOffset st =
-                masm.ma_dataTransferN(js::jit::IsStore, BitSize(access.byteSize()*8),
-                                      IsSigned(false), ptr, HeapReg, rt, Offset,
-                                      Assembler::Always);
-            masm.append(access, st.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
-        if (access.isUnaligned()) {
-            masm.add32(HeapReg, ptr);
-            SecondScratchRegisterScope scratch(*this);
-            masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, dest.low,
-                                   0);
-            masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, dest.high,
-                                   4);
-        } else {
-            BufferOffset ld;
-            ld = masm.ma_dataTransferN(js::jit::IsLoad, BitSize(32), IsSigned(false), HeapReg,
-                                       ptr, dest.low, Offset, Assembler::Always);
-            masm.append(access, ld.getOffset(), masm.framePushed());
-            masm.add32(Imm32(4), ptr);
-            ld = masm.ma_dataTransferN(js::jit::IsLoad, BitSize(32), IsSigned(false), HeapReg,
-                                       ptr, dest.high, Offset, Assembler::Always);
-            masm.append(access, ld.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
-        if (access.isUnaligned()) {
-            masm.add32(HeapReg, ptr);
-            masm.emitUnalignedStore(ByteSize(4), ptr, src.low, 0);
-            masm.emitUnalignedStore(ByteSize(4), ptr, src.high, 4);
-        } else {
-            BufferOffset st;
-            st = masm.ma_dataTransferN(js::jit::IsStore, BitSize(32), IsSigned(false), HeapReg,
-                                       ptr, src.low, Offset, Assembler::Always);
-            masm.append(access, st.getOffset(), masm.framePushed());
-            masm.add32(Imm32(4), ptr);
-            st = masm.ma_dataTransferN(js::jit::IsStore, BitSize(32), IsSigned(false), HeapReg,
-                                       ptr, src.high, Offset, Assembler::Always);
-            masm.append(access, st.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
-        masm.add32(HeapReg, ptr);
-        if (access.isUnaligned()) {
-            SecondScratchRegisterScope scratch(*this);
-            masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, tmp1, 0);
-            masm.ma_vxfer(tmp1, dest);
-        } else {
-            BufferOffset ld = masm.ma_vldr(VFPAddr(ptr, VFPOffImm(0)), dest,
-                                           Assembler::Always);
-            masm.append(access, ld.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
-        masm.add32(HeapReg, ptr);
-        if (access.isUnaligned()) {
-            masm.ma_vxfer(src, tmp1);
-            masm.emitUnalignedStore(ByteSize(4), ptr, tmp1, 0);
-        } else {
-            BufferOffset st =
-                masm.ma_vstr(src, VFPAddr(ptr, VFPOffImm(0)), Assembler::Always);
-            masm.append(access, st.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
-        masm.add32(HeapReg, ptr);
-        if (access.isUnaligned()) {
-            SecondScratchRegisterScope scratch(*this);
-            masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, tmp1, 0);
-            masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr, scratch, tmp2, 4);
-            masm.ma_vxfer(tmp1, tmp2, dest);
-        } else {
-            BufferOffset ld = masm.ma_vldr(VFPAddr(ptr, VFPOffImm(0)), dest,
-                                           Assembler::Always);
-            masm.append(access, ld.getOffset(), masm.framePushed());
-        }
-    }
-
-    void
-    storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
-        masm.add32(HeapReg, ptr);
-        if (access.isUnaligned()) {
-            masm.ma_vxfer(src, tmp1, tmp2);
-            masm.emitUnalignedStore(ByteSize(4), ptr, tmp1, 0);
-            masm.emitUnalignedStore(ByteSize(4), ptr, tmp2, 4);
-        } else {
-            BufferOffset st =
-                masm.ma_vstr(src, VFPAddr(ptr, VFPOffImm(0)), Assembler::Always);
-            masm.append(access, st.getOffset(), masm.framePushed());
-        }
-    }
-#endif // JS_CODEGEN_ARM
-
     ////////////////////////////////////////////////////////////
 
     // Generally speaking, ABOVE this point there should be no value
     // stack manipulation (calls to popI32 etc).
 
     // Generally speaking, BELOW this point there should be no
     // platform dependencies.  We make an exception for x86 register
     // targeting, which is not too hard to keep clean.
@@ -6663,29 +6535,30 @@ BaseCompiler::emitLoad(ValType type, Sca
         return false;
 
     if (deadCode_)
         return true;
 
     bool omitBoundsCheck = false;
     MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
 
-    size_t temps = loadStoreTemps(access);
+    size_t temps = loadTemps(access);
     RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
     RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
+    RegI32 tmp3 = temps >= 3 ? needI32() : invalidI32();
 
     switch (type) {
       case ValType::I32: {
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
 #ifdef JS_CODEGEN_ARM
         RegI32 rv = access.isUnaligned() ? needI32() : rp;
 #else
         RegI32 rv = rp;
 #endif
-        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
             return false;
         pushI32(rv);
         if (rp != rv)
             freeI32(rp);
         break;
       }
       case ValType::I64: {
         RegI64 rv;
@@ -6693,114 +6566,113 @@ BaseCompiler::emitLoad(ValType type, Sca
 #ifdef JS_CODEGEN_X86
         rv = abiReturnRegI64;
         needI64(rv);
         rp = popMemoryAccess(&access, &omitBoundsCheck);
 #else
         rp = popMemoryAccess(&access, &omitBoundsCheck);
         rv = needI64();
 #endif
-        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
             return false;
         pushI64(rv);
         freeI32(rp);
         break;
       }
       case ValType::F32: {
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
         RegF32 rv = needF32();
-        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
             return false;
         pushF32(rv);
         freeI32(rp);
         break;
       }
       case ValType::F64: {
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
         RegF64 rv = needF64();
-        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!load(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2, tmp3))
             return false;
         pushF64(rv);
         freeI32(rp);
         break;
       }
       default:
         MOZ_CRASH("load type");
         break;
     }
 
     if (temps >= 1)
         freeI32(tmp1);
     if (temps >= 2)
         freeI32(tmp2);
+    if (temps >= 3)
+        freeI32(tmp3);
 
     return true;
 }
 
 template<bool isStore>
 bool
 BaseCompiler::emitStoreOrTeeStore(ValType resultType, Scalar::Type viewType,
                                   LinearMemoryAddress<Nothing> addr)
 {
     if (deadCode_)
         return true;
 
     bool omitBoundsCheck = false;
     MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
 
-    size_t temps = loadStoreTemps(access);
+    size_t temps = storeTemps(access);
     RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
-    RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
 
     switch (resultType) {
       case ValType::I32: {
         RegI32 rv = popI32();
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
-        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
             return false;
         freeI32(rp);
         freeOrPushI32<isStore>(rv);
         break;
       }
       case ValType::I64: {
         RegI64 rv = popI64();
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
-        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
             return false;
         freeI32(rp);
         freeOrPushI64<isStore>(rv);
         break;
       }
       case ValType::F32: {
         RegF32 rv = popF32();
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
-        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
             return false;
         freeI32(rp);
         freeOrPushF32<isStore>(rv);
         break;
       }
       case ValType::F64: {
         RegF64 rv = popF64();
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
-        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1, tmp2))
+        if (!store(access, rp, omitBoundsCheck, AnyReg(rv), tmp1))
             return false;
         freeI32(rp);
         freeOrPushF64<isStore>(rv);
         break;
       }
       default:
         MOZ_CRASH("store type");
         break;
     }
 
     if (temps >= 1)
         freeI32(tmp1);
-    if (temps >= 2)
-        freeI32(tmp2);
 
     return true;
 }
 
 bool
 BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<Nothing> addr;
@@ -6985,49 +6857,46 @@ BaseCompiler::emitTeeStoreWithCoercion(V
         return true;
 
     // TODO / OPTIMIZE (bug 1316831): Disable bounds checking on constant
     // accesses below the minimum heap length.
 
     bool omitBoundsCheck = false;
     MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
 
-    size_t temps = loadStoreTemps(access);
+    size_t temps = storeTemps(access);
     RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
-    RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64) {
         RegF32 rv = popF32();
         RegF64 rw = needF64();
         masm.convertFloat32ToDouble(rv, rw);
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
-        if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1, tmp2))
+        if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1))
             return false;
         pushF32(rv);
         freeI32(rp);
         freeF64(rw);
     }
     else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
         RegF64 rv = popF64();
         RegF32 rw = needF32();
         masm.convertDoubleToFloat32(rv, rw);
         RegI32 rp = popMemoryAccess(&access, &omitBoundsCheck);
-        if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1, tmp2))
+        if (!store(access, rp, omitBoundsCheck, AnyReg(rw), tmp1))
             return false;
         pushF64(rv);
         freeI32(rp);
         freeF32(rw);
     }
     else
         MOZ_CRASH("unexpected coerced store");
 
     if (temps >= 1)
         freeI32(tmp1);
-    if (temps >= 2)
-        freeI32(tmp2);
 
     return true;
 }
 
 bool
 BaseCompiler::emitGrowMemory()
 {
     uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
new file mode 100644
--- /dev/null
+++ b/layout/generic/crashtests/742602.html
@@ -0,0 +1,6 @@
+<!DOCTYPE html>
+<html style="direction: rtl; border-left: medium solid;">
+<body onload="document.documentElement.style.borderWidth = '109472098330px';">
+<div style="float: right;"><option style="display: -moz-box;">I</option>0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0</div>
+</body>
+</html>
--- a/layout/generic/crashtests/crashtests.list
+++ b/layout/generic/crashtests/crashtests.list
@@ -475,16 +475,17 @@ load 723108.html
 load 724235.html
 skip-if(Android&&isDebugBuild) load 724978.xhtml # bug 1263300 - slow
 skip-if(Android&&isDebugBuild) load 730559.html # bug 1245634 - slow
 load 734777.html
 load 737313-1.html
 load 737313-2.html
 load 737313-3.html
 test-pref(font.size.inflation.emPerLine,15) load 740199-1.xhtml
+test-pref(font.size.inflation.emPerLine,15) load 742602.html
 load 747688.html
 load 750066.html
 load 757413.xhtml
 load 757413-2.html
 load 762764-1.html
 load 762902.html
 load 765409.html
 asserts(0-200) load 765621.html # bug 703550
new file mode 100644
--- /dev/null
+++ b/layout/mathml/crashtests/767251.xhtml
@@ -0,0 +1,1 @@
+<html xmlns="http://www.w3.org/1999/xhtml" dir="rtl"><tr><span><math xmlns="http://www.w3.org/1998/Math/MathML"/><tr>n</tr></span></tr></html>
--- a/layout/mathml/crashtests/crashtests.list
+++ b/layout/mathml/crashtests/crashtests.list
@@ -53,14 +53,15 @@ load 463763-2.xhtml
 load 476547-1.xhtml
 load 477740-1.xhtml
 load 541620-1.xhtml
 load 557474-1.html
 load 654928-1.html
 load 655451-1.xhtml
 load 713606-1.html
 load 716349-1.html
+load 767251.xhtml
 load 848725-1.html
 load 848725-2.html
 load 947557-1.html
 load 973322-1.xhtml
 load 1028521-1.xhtml
 load 1061027.html
--- a/layout/style/test/test_clip-path_polygon.html
+++ b/layout/style/test/test_clip-path_polygon.html
@@ -1,25 +1,26 @@
+<!DOCTYPE html>
 <html>
 <head>
 <style>
 body {padding: 0;margin:0;}
 div {
   width:  200px;
   height: 200px;
   position: fixed;
   top: 50px;
   left: 50px;
-  margin: 50;
-  padding: 50;
+  margin: 50px;
+  padding: 50px;
   border: 50px solid red;
   transform-origin: 0 0;
   transform: translate(50px, 50px) scale(0.5);
   background-color: green;
-  clip-path: polygon(0 0, 200px 0, 0 200px) content-box;*/
+  clip-path: polygon(0 0, 200px 0, 0 200px) content-box;
 }
 </style>
 <title>clip-path with polygon() hit test</title>
 <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
 <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
 </head>
 <div id="a"></div>
 <p style="margin-top: 110px">
@@ -33,9 +34,9 @@ if (SpecialPowers.getBoolPref("layout.cs
   isnot(a, document.elementFromPoint(301, 200), "a shouldn't be found");
   isnot(a, document.elementFromPoint(200, 301), "a shouldn't be found");
 }
 is(a, document.elementFromPoint(200, 200), "a should be found");
 is(a, document.elementFromPoint(299, 200), "a should be found");
 is(a, document.elementFromPoint(200, 299), "a should be found");
 is(a, document.elementFromPoint(250, 250), "a should be found");
 </script>
-</html>
\ No newline at end of file
+</html>
new file mode 100644
--- /dev/null
+++ b/layout/svg/crashtests/606914.xhtml
@@ -0,0 +1,1 @@
+<html xmlns="http://www.w3.org/1999/xhtml" style="display: table; position: absolute; left: 2305843009213694000pc; bottom: 2452284pc; padding: 9931442138140%; border-bottom-right-radius: 1152921504606847000pc;">X</html>
new file mode 100644
--- /dev/null
+++ b/layout/svg/crashtests/743469.svg
@@ -0,0 +1,5 @@
+<svg xmlns="http://www.w3.org/2000/svg" style="border-width: 51703084143745256mm; border-left-style: dashed; border-top-left-radius: 3%; border-top-style: dashed; border-right-style: solid; border-image-outset: 10;">
+<script>
+document.elementFromPoint(20, 20);
+</script>
+</svg>
--- a/layout/svg/crashtests/crashtests.list
+++ b/layout/svg/crashtests/crashtests.list
@@ -100,16 +100,17 @@ load 515288-1.html
 load 522394-1.svg
 load 522394-2.svg
 load 522394-3.svg
 load 566216-1.svg
 load 587336-1.html
 load 590291-1.svg
 load 601999-1.html
 load 605626-1.svg
+asserts(2) load 606914.xhtml # bug 606914
 asserts-if(stylo,2) load 610594-1.html # bug 1324669
 load 610954-1.html
 load 612662-1.svg
 load 612662-2.svg
 load 612736-1.svg
 load 612736-2.svg
 load 614367-1.svg
 load 620034-1.html
@@ -128,16 +129,17 @@ load 693424-1.svg
 load 709920-1.svg
 load 709920-2.svg
 load 713413-1.svg
 load 722003-1.svg
 load 725918-1.svg
 load 732836-1.svg
 load 740627-1.svg
 load 740627-2.svg
+load 743469.svg
 load 757704-1.svg
 load 757718-1.svg
 load 757751-1.svg
 load 767056-1.svg
 asserts-if(stylo,3) load 767535-1.xhtml # bug 1324669
 asserts-if(stylo,2) load 768087-1.html # bug 1324669
 load 768351.svg
 load 778492-1.svg
--- a/mobile/android/base/java/org/mozilla/gecko/icons/loader/IconGenerator.java
+++ b/mobile/android/base/java/org/mozilla/gecko/icons/loader/IconGenerator.java
@@ -16,16 +16,17 @@ import android.net.Uri;
 import android.support.annotation.NonNull;
 import android.support.annotation.VisibleForTesting;
 import android.text.TextUtils;
 import android.util.TypedValue;
 
 import org.mozilla.gecko.R;
 import org.mozilla.gecko.icons.IconRequest;
 import org.mozilla.gecko.icons.IconResponse;
+import org.mozilla.gecko.util.StringUtils;
 
 /**
  * This loader will generate an icon in case no icon could be loaded. In order to do so this needs
  * to be the last loader that will be tried.
  */
 public class IconGenerator implements IconLoader {
     // Mozilla's Visual Design Colour Palette
     // http://firefoxux.github.io/StyleGuide/#/visualDesign/colours
@@ -38,24 +39,16 @@ public class IconGenerator implements Ic
             0xFF01bdad,
             0xFF0996f8,
             0xFF02538b,
             0xFF1f386e,
             0xFF7a2f7a,
             0xFFea385e,
     };
 
-    // List of common prefixes of host names. Those prefixes will be striped before a prepresentative
-    // character for an URL is determined.
-    private static final String[] COMMON_PREFIXES = {
-            "www.",
-            "m.",
-            "mobile.",
-    };
-
     private static final int TEXT_SIZE_DP = 12;
     @Override
     public IconResponse load(IconRequest request) {
         if (request.getIconCount() > 1) {
             // There are still other icons to try. We will only generate an icon if there's only one
             // icon left and all previous loaders have failed (assuming this is the last one).
             return null;
         }
@@ -151,18 +144,14 @@ public class IconGenerator implements Ic
             snippet = uri.getPath();
         }
 
         if (TextUtils.isEmpty(snippet)) {
             // If we still have no snippet then just return the question mark
             return "?";
         }
 
-        // Strip common prefixes that we do not want to use to determine the representative character
-        for (String prefix : COMMON_PREFIXES) {
-            if (snippet.startsWith(prefix)) {
-                snippet = snippet.substring(prefix.length());
-            }
-        }
+        // Strip common prefixes that we do not want to use to determine the representative characterS
+        snippet = StringUtils.stripCommonSubdomains(snippet);
 
         return snippet;
     }
 }