merge mozilla-inbound to mozilla-central a=merge
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Thu, 08 Sep 2016 12:01:17 +0200
changeset 313183 938ce16be25f9c551c19ef8938e8717ed3d41ff5
parent 313145 331524df5cab6383f44d55d777b7cededdbc12c9 (current diff)
parent 313182 eeaebaf00e95a0e82732a5291a0f2e9e8cbb533e (diff)
child 313199 d00937cdf44d7cb55fc00b86f80c05b163b97115
child 313215 a3a5d19b2d0539c66e787e0f2f86c539fb1f6fd8
child 313241 542b38d5eb900afb47e2f9c07cb47300bddef870
push id30673
push usercbook@mozilla.com
push dateThu, 08 Sep 2016 10:01:33 +0000
treeherdermozilla-central@938ce16be25f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone51.0a1
first release with
nightly linux32
938ce16be25f / 51.0a1 / 20160908030434 / files
nightly linux64
938ce16be25f / 51.0a1 / 20160908030434 / files
nightly mac
938ce16be25f / 51.0a1 / 20160908030434 / files
nightly win32
938ce16be25f / 51.0a1 / 20160908030434 / files
nightly win64
938ce16be25f / 51.0a1 / 20160908030434 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
merge mozilla-inbound to mozilla-central a=merge
devtools/client/themes/common.css
testing/web-platform/meta/XMLHttpRequest/send-after-setting-document-domain.htm.ini
testing/web-platform/meta/XMLHttpRequest/send-authentication-basic-cors-not-enabled.htm.ini
testing/web-platform/meta/XMLHttpRequest/send-non-same-origin.sub.htm.ini
testing/web-platform/meta/XMLHttpRequest/send-redirect-bogus-sync.htm.ini
testing/web-platform/meta/XMLHttpRequest/send-redirect-infinite-sync.htm.ini
testing/web-platform/meta/XMLHttpRequest/xmlhttprequest-network-error-sync.htm.ini
testing/web-platform/tests/html/semantics/embedded-content/the-iframe-element/iframe_sandbox_popups_escaping.html
testing/web-platform/tests/html/semantics/embedded-content/the-iframe-element/iframe_sandbox_popups_helper.html
testing/web-platform/tests/html/semantics/embedded-content/the-iframe-element/iframe_sandbox_popups_nonescaping.html
toolkit/components/telemetry/Histograms.json
--- a/browser/base/content/abouthome/aboutHome.js
+++ b/browser/base/content/abouthome/aboutHome.js
@@ -268,35 +268,35 @@ function loadSnippets()
   let lastUpdate = gSnippetsMap.get("snippets-last-update");
   let updateURL = document.documentElement.getAttribute("snippetsURL");
   let shouldUpdate = !lastUpdate ||
                      Date.now() - lastUpdate > SNIPPETS_UPDATE_INTERVAL_MS;
   if (updateURL && shouldUpdate) {
     // Try to update from network.
     let xhr = new XMLHttpRequest();
     xhr.timeout = 5000;
-    try {
-      xhr.open("GET", updateURL, true);
-    } catch (ex) {
-      showSnippets();
-      loadCompleted();
-      return;
-    }
     // Even if fetching should fail we don't want to spam the server, thus
     // set the last update time regardless its results.  Will retry tomorrow.
     gSnippetsMap.set("snippets-last-update", Date.now());
     xhr.onloadend = function (event) {
       if (xhr.status == 200) {
         gSnippetsMap.set("snippets", xhr.responseText);
         gSnippetsMap.set("snippets-cached-version", currentVersion);
       }
       showSnippets();
       loadCompleted();
     };
-    xhr.send(null);
+    try {
+      xhr.open("GET", updateURL, true);
+      xhr.send(null);
+    } catch (ex) {
+      showSnippets();
+      loadCompleted();
+      return;
+    }
   } else {
     showSnippets();
     loadCompleted();
   }
 }
 
 /**
  * Shows locally cached remote snippets, or default ones when not available.
--- a/browser/base/content/newtab/newTab.css
+++ b/browser/base/content/newtab/newTab.css
@@ -17,18 +17,18 @@ body {
   display: -moz-box;
   position: relative;
   -moz-box-flex: 1;
   -moz-user-focus: normal;
   -moz-box-orient: vertical;
 }
 
 input {
-  font: message-box !important;
-  font-size: 16px !important;
+  font: message-box;
+  font-size: 16px;
 }
 
 input[type=button] {
   cursor: pointer;
 }
 
 /* UNDO */
 #newtab-undo-container {
--- a/browser/base/content/newtab/newTab.xhtml
+++ b/browser/base/content/newtab/newTab.xhtml
@@ -79,13 +79,15 @@
     <div id="newtab-horizontal-margin">
       <div class="newtab-side-margin"/>
       <div id="newtab-grid">
       </div>
       <div class="newtab-side-margin"/>
     </div>
     <div id="newtab-margin-bottom"/>
   </div>
-  <input id="newtab-customize-button" type="button" dir="&locale.dir;" title="&newtab.customize.title;"/>
+  <input id="newtab-customize-button" type="button" dir="&locale.dir;"
+         value="&#x2699;"
+         title="&newtab.customize.title;"/>
 </body>
 <script type="text/javascript;version=1.8" src="chrome://browser/content/contentSearchUI.js"/>
 <script type="text/javascript;version=1.8" src="chrome://browser/content/newtab/newTab.js"/>
 </html>
--- a/browser/components/originattributes/test/browser/browser.ini
+++ b/browser/components/originattributes/test/browser/browser.ini
@@ -14,11 +14,14 @@ support-files =
   test_firstParty.html
   test_firstParty_cookie.html
   test_firstParty_html_redirect.html
   test_firstParty_http_redirect.html
   test_firstParty_http_redirect.html^headers^
   test_firstParty_iframe_http_redirect.html
   test_firstParty_postMessage.html
   window.html
+  worker_blobify.js
+  worker_deblobify.js
 
 [browser_firstPartyIsolation.js]
 [browser_localStorageIsolation.js]
+[browser_blobURLIsolation.js]
new file mode 100644
--- /dev/null
+++ b/browser/components/originattributes/test/browser/browser_blobURLIsolation.js
@@ -0,0 +1,97 @@
+/**
+ * Bug 1264573 - A test case for blob url isolation.
+ */
+
+const TEST_PAGE = "http://mochi.test:8888/browser/browser/components/" +
+                  "originattributes/test/browser/file_firstPartyBasic.html";
+const SCRIPT_WORKER_BLOBIFY = "worker_blobify.js";
+const SCRIPT_WORKER_DEBLOBIFY = "worker_deblobify.js";
+
+function page_blobify(browser, input) {
+  return ContentTask.spawn(browser, input, function(input) {
+    return { blobURL: content.URL.createObjectURL(new content.Blob([input])) };
+  });
+}
+
+function page_deblobify(browser, blobURL) {
+  return ContentTask.spawn(browser, blobURL, function* (blobURL) {
+    if ("error" in blobURL) {
+      return blobURL;
+    }
+    blobURL = blobURL.blobURL;
+
+    function blobURLtoBlob(blobURL) {
+      return new content.Promise(function (resolve) {
+        let xhr = new content.XMLHttpRequest();
+        xhr.open("GET", blobURL, true);
+        xhr.onload = function () {
+          resolve(xhr.response);
+        };
+        xhr.onerror = function () {
+          resolve("xhr error");
+        };
+        xhr.responseType = "blob";
+        xhr.send();
+      });
+    }
+
+    function blobToString(blob) {
+      return new content.Promise(function (resolve) {
+        let fileReader = new content.FileReader();
+        fileReader.onload = function () {
+          resolve(fileReader.result);
+        };
+        fileReader.readAsText(blob);
+      });
+    }
+
+    let blob = yield blobURLtoBlob(blobURL);
+    if (blob == "xhr error") {
+      return "xhr error";
+    }
+
+    return yield blobToString(blob);
+  });
+}
+
+function workerIO(browser, scriptFile, message) {
+  return ContentTask.spawn(browser, {scriptFile, message}, function* (args) {
+    let worker = new content.Worker(args.scriptFile);
+    let promise = new content.Promise(function(resolve) {
+      let listenFunction = function(event) {
+        worker.removeEventListener("message", listenFunction, false);
+        worker.terminate();
+        resolve(event.data);
+      };
+      worker.addEventListener("message", listenFunction, false);
+    });
+    worker.postMessage(args.message);
+    return yield promise;
+  });
+}
+
+let worker_blobify = (browser, input) => workerIO(browser, SCRIPT_WORKER_BLOBIFY, input);
+let worker_deblobify = (browser, blobURL) => workerIO(browser, SCRIPT_WORKER_DEBLOBIFY, blobURL);
+
+function doTest(blobify, deblobify) {
+  let blobURL = null;
+  return function* (browser) {
+    if (blobURL === null) {
+      let input = Math.random().toString();
+      blobURL = yield blobify(browser, input);
+      return input;
+    }
+    let result = yield deblobify(browser, blobURL);
+    blobURL = null;
+    return result;
+  }
+}
+
+let tests = [];
+for (let blobify of [page_blobify, worker_blobify]) {
+  for (let deblobify of [page_deblobify, worker_deblobify]) {
+    tests.push(doTest(blobify, deblobify));
+  }
+}
+
+IsolationTestTools.runTests(TEST_PAGE, tests);
--- a/browser/components/originattributes/test/browser/head.js
+++ b/browser/components/originattributes/test/browser/head.js
@@ -254,41 +254,45 @@ this.IsolationTestTools = {
   /**
    * Run isolation tests. The framework will run tests with standard combinations
    * of prefs and tab settings, and checks whether the isolation is working.
    *
    * @param aURL
    *    The URL of the page that will be tested or an object contains 'url',
    *    the tested page, 'firstFrameSetting' for the frame setting of the first
    *    tab, and 'secondFrameSetting' for the second tab.
-   * @param aGetResultFunc
-   *    A function which is responsible for returning the isolation result back
-   *    to the framework for further checking. This function will be provided
-   *    the browser object of the tab, that allows modifying or fetching results
-   *    from the page content.
+   * @param aGetResultFuncs
+   *    An array of functions or a single function which are responsible for
+   *    returning the isolation result back to the framework for further checking.
+   *    Each of these functions will be provided the browser object of the tab,
+   *    that allows modifying or fetchings results from the page content.
    * @param aCompareResultFunc
    *    An optional function which allows modifying the way how does framework
    *    check results. This function will be provided a boolean to indicate
    *    the isolation is no or off and two results. This function should return
    *    a boolean to tell that whether isolation is working. If this function
    *    is not given, the framework will take case checking by itself.
    */
-  runTests(aURL, aGetResultFunc, aCompareResultFunc) {
+  runTests(aURL, aGetResultFuncs, aCompareResultFunc) {
     let pageURL;
     let firstFrameSetting;
     let secondFrameSetting;
 
     if (typeof aURL === "string") {
       pageURL = aURL;
     } else if (typeof aURL === "object") {
       pageURL = aURL.url;
       firstFrameSetting = aURL.firstFrameSetting;
       secondFrameSetting = aURL.secondFrameSetting;
     }
 
+    if (!Array.isArray(aGetResultFuncs)) {
+      aGetResultFuncs = [aGetResultFuncs];
+    }
+
     let tabSettings = [
                         { firstPartyDomain: "http://example.com", userContextId: 1},
                         { firstPartyDomain: "http://example.org", userContextId: 2}
                       ];
 
     this._add_task(function* (aMode) {
       let tabSettingA = 0;
 
@@ -298,37 +302,39 @@ this.IsolationTestTools = {
                                                         pageURL,
                                                         tabSettings[tabSettingA],
                                                         firstFrameSetting);
         let tabInfoB = yield IsolationTestTools._addTab(aMode,
                                                         pageURL,
                                                         tabSettings[tabSettingB],
                                                         secondFrameSetting);
 
-        // Fetch results from tabs.
-        let resultA = yield aGetResultFunc(tabInfoA.browser);
-        let resultB = yield aGetResultFunc(tabInfoB.browser);
+        for (let getResultFunc of aGetResultFuncs) {
+          // Fetch results from tabs.
+          let resultA = yield getResultFunc(tabInfoA.browser);
+          let resultB = yield getResultFunc(tabInfoB.browser);
+
+          // Compare results.
+          let result = false;
+          let shouldIsolate = (aMode !== TEST_MODE_NO_ISOLATION) &&
+                              tabSettingA !== tabSettingB;
+          if (aCompareResultFunc) {
+            result = yield aCompareResultFunc(shouldIsolate, resultA, resultB);
+          } else {
+            result = shouldIsolate ? resultA !== resultB :
+                                     resultA === resultB;
+          }
+
+          let msg = `Testing ${TEST_MODE_NAMES[aMode]} for ` +
+            `isolation ${shouldIsolate ? "on" : "off"} with TabSettingA ` +
+            `${tabSettingA} and tabSettingB ${tabSettingB}` +
+            `, resultA = ${resultA}, resultB = ${resultB}`;
+
+          ok(result, msg);
+        }
 
         // Close Tabs.
         yield BrowserTestUtils.removeTab(tabInfoA.tab);
         yield BrowserTestUtils.removeTab(tabInfoB.tab);
-
-        // Compare results.
-        let result = false;
-        let shouldIsolate = (aMode !== TEST_MODE_NO_ISOLATION) &&
-                            tabSettingA !== tabSettingB;
-        if (aCompareResultFunc) {
-          result = yield aCompareResultFunc(shouldIsolate, resultA, resultB);
-        } else {
-          result = shouldIsolate ? resultA !== resultB :
-                                   resultA === resultB;
-        }
-
-        let msg = `Testing ${TEST_MODE_NAMES[aMode]} for ` +
-                  `isolation ${shouldIsolate ? "on" : "off"} with TabSettingA ` +
-                  `${tabSettingA} and tabSettingB ${tabSettingB}`;
-
-        ok(result, msg);
       }
-
     });
   }
 };
new file mode 100644
--- /dev/null
+++ b/browser/components/originattributes/test/browser/worker_blobify.js
@@ -0,0 +1,11 @@
+// Wait for a string to be posted to this worker.
+// Create a blob containing this string, and then
+// post back a blob URL pointing to the blob.
+self.addEventListener("message", function (e) {
+  try {
+    var blobURL = URL.createObjectURL(new Blob([e.data]));
+    postMessage({ blobURL });
+  } catch (e) {
+    postMessage({ error: e.message });
+  }
+}, false);
new file mode 100644
--- /dev/null
+++ b/browser/components/originattributes/test/browser/worker_deblobify.js
@@ -0,0 +1,31 @@
+// Wait for a blob URL to be posted to this worker.
+// Obtain the blob, and read the string contained in it.
+// Post back the string.
+
+var postStringInBlob = function (blobObject) {
+  var fileReader = new FileReaderSync();
+  var result = fileReader.readAsText(blobObject);
+  postMessage(result);
+};
+
+self.addEventListener("message", function (e) {
+  if ("error" in e.data) {
+    postMessage(e.data);
+    return;
+  }
+  var blobURL = e.data.blobURL,
+      xhr = new XMLHttpRequest();
+  try {
+    xhr.open("GET", blobURL, true);
+    xhr.onload = function () {
+      postStringInBlob(xhr.response);
+    };
+    xhr.onerror = function () {
+      postMessage({ error: "xhr error" });
+    };
+    xhr.responseType = "blob";
+    xhr.send();
+  } catch (e) {
+    postMessage({ error: e.message });
+  }
+}, false);
--- a/browser/experiments/Experiments.jsm
+++ b/browser/experiments/Experiments.jsm
@@ -935,22 +935,16 @@ Experiments.Experiments.prototype = {
 
   /*
    * Helper function to make HTTP GET requests. Returns a promise that is resolved with
    * the responseText when the request is complete.
    */
   _httpGetRequest: function (url) {
     this._log.trace("httpGetRequest(" + url + ")");
     let xhr = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"].createInstance(Ci.nsIXMLHttpRequest);
-    try {
-      xhr.open("GET", url);
-    } catch (e) {
-      this._log.error("httpGetRequest() - Error opening request to " + url + ": " + e);
-      return Promise.reject(new Error("Experiments - Error opening XHR for " + url));
-    }
 
     this._networkRequest = xhr;
     let deferred = Promise.defer();
 
     let log = this._log;
     let errorhandler = (evt) => {
       log.error("httpGetRequest::onError() - Error making request to " + url + ": " + evt.type);
       deferred.reject(new Error("Experiments - XHR error for " + url + " - " + evt.type));
@@ -967,22 +961,29 @@ Experiments.Experiments.prototype = {
         this._networkRequest = null;
         return;
       }
 
       deferred.resolve(xhr.responseText);
       this._networkRequest = null;
     };
 
-    if (xhr.channel instanceof Ci.nsISupportsPriority) {
-      xhr.channel.priority = Ci.nsISupportsPriority.PRIORITY_LOWEST;
+    try {
+      xhr.open("GET", url);
+
+      if (xhr.channel instanceof Ci.nsISupportsPriority) {
+        xhr.channel.priority = Ci.nsISupportsPriority.PRIORITY_LOWEST;
+      }
+
+      xhr.timeout = MANIFEST_FETCH_TIMEOUT_MSEC;
+      xhr.send(null);
+    } catch (e) {
+      this._log.error("httpGetRequest() - Error opening request to " + url + ": " + e);
+      return Promise.reject(new Error("Experiments - Error opening XHR for " + url));
     }
-
-    xhr.timeout = MANIFEST_FETCH_TIMEOUT_MSEC;
-    xhr.send(null);
     return deferred.promise;
   },
 
   /*
    * Path of the cache file we use in the profile.
    */
   get _cacheFilePath() {
     return OS.Path.join(OS.Constants.Path.profileDir, FILE_CACHE);
--- a/browser/extensions/pocket/content/panels/css/saved.css
+++ b/browser/extensions/pocket/content/panels/css/saved.css
@@ -401,17 +401,17 @@
     float: left;
     line-height: normal;
     height: auto;
     min-height: 0;
     min-width: 5em;
     padding: 3px 2px 1px;
     text-transform: none;
 }
-.pkt_ext_containersaved .pkt_ext_tag_input_wrapper input::-moz-placeholder {
+.pkt_ext_containersaved .pkt_ext_tag_input_wrapper input::placeholder {
     color: #a9a9a9;
     letter-spacing: normal;
     text-transform: none;
 }
 .pkt_ext_containersaved .input_disabled {
     cursor: default;
     opacity: 0.5;
 }
--- a/browser/themes/osx/browser.css
+++ b/browser/themes/osx/browser.css
@@ -2325,17 +2325,17 @@ html|span.ac-emphasize-text-url {
   }
 
   #editBookmarkPanel .expander-up > .button-box > .button-icon,
   #editBookmarkPanel .expander-down > .button-box > .button-icon {
     width: 9px;
   }
 }
 
-#editBMPanel_tagsField > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::-moz-placeholder {
+#editBMPanel_tagsField > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::placeholder {
   opacity: 1.0;
   color: #bbb;
 }
 
 .editBMPanel_rowLabel {
   text-align: end;
 }
 
--- a/browser/themes/shared/newtab/newTab.inc.css
+++ b/browser/themes/shared/newtab/newTab.inc.css
@@ -80,16 +80,23 @@
   border: none;
 }
 
 .newtab-customize {
   height: 28px;
   width: 28px;
 }
 
+#newtab-customize-button {
+  font-size: 28px;
+  padding: 0;
+  /* only display the text label when CSS backgrounds are disabled (e.g. in high contrast mode) */
+  color: transparent;
+}
+
 #newtab-customize-button:-moz-any(:hover, :active, [active]) {
   background-image: -moz-image-rect(url(chrome://browser/skin/newtab/controls.svg), 0, 64, 32, 32);
   background-color: #FFFFFF;
   border: solid 1px #CCCCCC;
   border-radius: 2px;
 }
 
 /* CELLS */
--- a/browser/themes/windows/browser.css
+++ b/browser/themes/windows/browser.css
@@ -1352,18 +1352,18 @@ toolbar[brighttext] #close-button {
   /* let urlbar-back-button-clip-path clip the urlbar's right side for RTL */
   transform: scaleX(-1);
 }
 
 @conditionalForwardWithUrlbar@:-moz-locale-dir(rtl) {
   -moz-box-direction: reverse;
 }
 
-html|*.urlbar-input:-moz-lwtheme::-moz-placeholder,
-.searchbar-textbox:-moz-lwtheme > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::-moz-placeholder {
+html|*.urlbar-input:-moz-lwtheme::placeholder,
+.searchbar-textbox:-moz-lwtheme > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::placeholder {
   opacity: 1.0;
   color: #777;
 }
 
 #urlbar-container {
   -moz-box-align: center;
 }
 
--- a/devtools/client/styleeditor/test/sourcemap-css/test-bootstrap-scss.css
+++ b/devtools/client/styleeditor/test/sourcemap-css/test-bootstrap-scss.css
@@ -1849,17 +1849,17 @@ output {
   border: 1px solid #ccc;
   border-radius: 4px;
   box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
   transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; }
   .form-control:focus {
     border-color: #66afe9;
     outline: 0;
     box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); }
-  .form-control::-moz-placeholder {
+  .form-control::placeholder {
     color: #999999;
     opacity: 1; }
   .form-control:-ms-input-placeholder {
     color: #999999; }
   .form-control::-webkit-input-placeholder {
     color: #999999; }
   .form-control[disabled], .form-control[readonly], fieldset[disabled] .form-control {
     cursor: not-allowed;
@@ -4505,9 +4505,9 @@ body {
   .header, .marketing, .footer {
     padding-left: 0;
     padding-right: 0; }
   .header {
     margin-bottom: 30px; }
   .jumbotron {
     border-bottom: 300; } }
 
-/*# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5jc3MiLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiIiwic291cmNlcyI6WyJtYWluLnNjc3MiXSwic291cmNlc0NvbnRlbnQiOlsiJGljb24tZm9udC1wYXRoOiBcIi4uL2Jvd2VyX2NvbXBvbmVudHMvYm9vdHN0cmFwLXNhc3Mtb2ZmaWNpYWwvdmVuZG9yL2Fzc2V0cy9mb250cy9ib290c3RyYXAvXCI7XG5cbi8vIGJvd2VyOnNjc3NcbkBpbXBvcnQgXCIuLi9ib3dlcl9jb21wb25lbnRzL2Jvb3RzdHJhcC1zYXNzLW9mZmljaWFsL3ZlbmRvci9hc3NldHMvc3R5bGVzaGVldHMvYm9vdHN0cmFwLnNjc3NcIjtcbi8vIGVuZGJvd2VyXG5cbi5icm93c2VoYXBweSB7XG4gICAgbWFyZ2luOiAwLjJlbSAwO1xuICAgIGJhY2tncm91bmQ6ICNjY2M7XG4gICAgY29sb3I6ICMwMDA7XG4gICAgcGFkZGluZzogMC4yZW0gMDtcbn1cblxuLyogU3BhY2Ugb3V0IGNvbnRlbnQgYSBiaXQgKi9cbmJvZHkge1xuICAgIHBhZGRpbmctdG9wOiAyMHB4O1xuICAgIHBhZGRpbmctYm90dG9tOiAyMHB4O1xufVxuXG4vKiBFdmVyeXRoaW5nIGJ1dCB0aGUganVtYm90cm9uIGdldHMgc2lkZSBzcGFjaW5nIGZvciBtb2JpbGUgZmlyc3Qgdmlld3MgKi9cbi5oZWFkZXIsXG4ubWFya2V0aW5nLFxuLmZvb3RlciB7XG4gICAgcGFkZGluZy1sZWZ0OiAxNXB4O1xuICAgIHBhZGRpbmctcmlnaHQ6IDE1cHg7XG59XG5cbi8qIEN1c3RvbSBwYWdlIGhlYWRlciAqL1xuLmhlYWRlciB7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG5cbiAgICAvKiBNYWtlIHRoZSBtYXN0aGVhZCBoZWFkaW5nIHRoZSBzYW1lIGhlaWdodCBhcyB0aGUgbmF2aWdhdGlvbiAqL1xuICAgIGgzIHtcbiAgICAgICAgbWFyZ2luLXRvcDogMDtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMDtcbiAgICAgICAgbGluZS1oZWlnaHQ6IDQwcHg7XG4gICAgICAgIHBhZGRpbmctYm90dG9tOiAxOXB4O1xuICAgIH1cbn1cblxuLyogQ3VzdG9tIHBhZ2UgZm9vdGVyICovXG4uZm9vdGVyIHtcbiAgICBwYWRkaW5nLXRvcDogMTlweDtcbiAgICBjb2xvcjogIzc3NztcbiAgICBib3JkZXItdG9wOiAxcHggc29saWQgI2U1ZTVlNTtcbn1cblxuLmNvbnRhaW5lci1uYXJyb3cgPiBociB7XG4gICAgbWFyZ2luOiAzMHB4IDA7XG59XG5cbi8qIE1haW4gbWFya2V0aW5nIG1lc3NhZ2UgYW5kIHNpZ24gdXAgYnV0dG9uICovXG4uanVtYm90cm9uIHtcbiAgICB0ZXh0LWFsaWduOiBjZW50ZXI7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG4gICAgLmJ0biB7XG4gICAgICAgIGZvbnQtc2l6ZTogMjFweDtcbiAgICAgICAgcGFkZGluZzogMTRweCAyNHB4O1xuICAgIH1cbn1cblxuLyogU3VwcG9ydGluZyBtYXJrZXRpbmcgY29udGVudCAqL1xuLm1hcmtldGluZyB7XG4gICAgbWFyZ2luOiA0MHB4IDA7XG4gICAgcCArIGg0IHtcbiAgICAgICAgbWFyZ2luLXRvcDogMjhweDtcbiAgICB9XG59XG5cbi8qIFJlc3BvbnNpdmU6IFBvcnRyYWl0IHRhYmxldHMgYW5kIHVwICovXG5AbWVkaWEgc2NyZWVuIGFuZCAobWluLXdpZHRoOiA3NjhweCkge1xuICAgIC5jb250YWluZXIge1xuICAgICAgICBtYXgtd2lkdGg6IDczMHB4O1xuICAgIH1cblxuICAgIC8qIFJlbW92ZSB0aGUgcGFkZGluZyB3ZSBzZXQgZWFybGllciAqL1xuICAgIC5oZWFkZXIsXG4gICAgLm1hcmtldGluZyxcbiAgICAuZm9vdGVyIHtcbiAgICAgICAgcGFkZGluZy1sZWZ0OiAwO1xuICAgICAgICBwYWRkaW5nLXJpZ2h0OiAwO1xuICAgIH1cblxuICAgIC8qIFNwYWNlIG91dCB0aGUgbWFzdGhlYWQgKi9cbiAgICAuaGVhZGVyIHtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMzBweDtcbiAgICB9XG5cbiAgICAvKiBSZW1vdmUgdGhlIGJvdHRvbSBib3JkZXIgb24gdGhlIGp1bWJvdHJvbiBmb3IgdmlzdWFsIGVmZmVjdCAqL1xuICAgIC5qdW1ib3Ryb24ge1xuICAgICAgICBib3JkZXItYm90dG9tOiAzMDA7XG4gICAgfVxufVxuXG4vLyB0aGlzIGlzIGEgY29tbWVudC4uLlxuIl0sInNvdXJjZVJvb3QiOiIvc291cmNlLyJ9 */
\ No newline at end of file
+/*# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5jc3MiLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiIiwic291cmNlcyI6WyJtYWluLnNjc3MiXSwic291cmNlc0NvbnRlbnQiOlsiJGljb24tZm9udC1wYXRoOiBcIi4uL2Jvd2VyX2NvbXBvbmVudHMvYm9vdHN0cmFwLXNhc3Mtb2ZmaWNpYWwvdmVuZG9yL2Fzc2V0cy9mb250cy9ib290c3RyYXAvXCI7XG5cbi8vIGJvd2VyOnNjc3NcbkBpbXBvcnQgXCIuLi9ib3dlcl9jb21wb25lbnRzL2Jvb3RzdHJhcC1zYXNzLW9mZmljaWFsL3ZlbmRvci9hc3NldHMvc3R5bGVzaGVldHMvYm9vdHN0cmFwLnNjc3NcIjtcbi8vIGVuZGJvd2VyXG5cbi5icm93c2VoYXBweSB7XG4gICAgbWFyZ2luOiAwLjJlbSAwO1xuICAgIGJhY2tncm91bmQ6ICNjY2M7XG4gICAgY29sb3I6ICMwMDA7XG4gICAgcGFkZGluZzogMC4yZW0gMDtcbn1cblxuLyogU3BhY2Ugb3V0IGNvbnRlbnQgYSBiaXQgKi9cbmJvZHkge1xuICAgIHBhZGRpbmctdG9wOiAyMHB4O1xuICAgIHBhZGRpbmctYm90dG9tOiAyMHB4O1xufVxuXG4vKiBFdmVyeXRoaW5nIGJ1dCB0aGUganVtYm90cm9uIGdldHMgc2lkZSBzcGFjaW5nIGZvciBtb2JpbGUgZmlyc3Qgdmlld3MgKi9cbi5oZWFkZXIsXG4ubWFya2V0aW5nLFxuLmZvb3RlciB7XG4gICAgcGFkZGluZy1sZWZ0OiAxNXB4O1xuICAgIHBhZGRpbmctcmlnaHQ6IDE1cHg7XG59XG5cbi8qIEN1c3RvbSBwYWdlIGhlYWRlciAqL1xuLmhlYWRlciB7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG5cbiAgICAvKiBNYWtlIHRoZSBtYXN0aGVhZCBoZWFkaW5nIHRoZSBzYW1lIGhlaWdodCBhcyB0aGUgbmF2aWdhdGlvbiAqL1xuICAgIGgzIHtcbiAgICAgICAgbWFyZ2luLXRvcDogMDtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMDtcbiAgICAgICAgbGluZS1oZWlnaHQ6IDQwcHg7XG4gICAgICAgIHBhZGRpbmctYm90dG9tOiAxOXB4O1xuICAgIH1cbn1cblxuLyogQ3VzdG9tIHBhZ2UgZm9vdGVyICovXG4uZm9vdGVyIHtcbiAgICBwYWRkaW5nLXRvcDogMTlweDtcbiAgICBjb2xvcjogIzc3NztcbiAgICBib3JkZXItdG9wOiAxcHggc29saWQgI2U1ZTVlNTtcbn1cblxuLmNvbnRhaW5lci1uYXJyb3cgPiBociB7XG4gICAgbWFyZ2luOiAzMHB4IDA7XG59XG5cbi8qIE1haW4gbWFya2V0aW5nIG1lc3NhZ2UgYW5kIHNpZ24gdXAgYnV0dG9uICovXG4uanVtYm90cm9uIHtcbiAgICB0ZXh0LWFsaWduOiBjZW50ZXI7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG4gICAgLmJ0biB7XG4gICAgICAgIGZvbnQtc2l6ZTogMjFweDtcbiAgICAgICAgcGFkZGluZzogMTRweCAyNHB4O1xuICAgIH1cbn1cblxuLyogU3VwcG9ydGluZyBtYXJrZXRpbmcgY29udGVudCAqL1xuLm1hcmtldGluZyB7XG4gICAgbWFyZ2luOiA0MHB4IDA7XG4gICAgcCArIGg0IHtcbiAgICAgICAgbWFyZ2luLXRvcDogMjhweDtcbiAgICB9XG59XG5cbi8qIFJlc3BvbnNpdmU6IFBvcnRyYWl0IHRhYmxldHMgYW5kIHVwICovXG5AbWVkaWEgc2NyZWVuIGFuZCAobWluLXdpZHRoOiA3NjhweCkge1xuICAgIC5jb250YWluZXIge1xuICAgICAgICBtYXgtd2lkdGg6IDczMHB4O1xuICAgIH1cblxuICAgIC8qIFJlbW92ZSB0aGUgcGFkZGluZyB3ZSBzZXQgZWFybGllciAqL1xuICAgIC5oZWFkZXIsXG4gICAgLm1hcmtldGluZyxcbiAgICAuZm9vdGVyIHtcbiAgICAgICAgcGFkZGluZy1sZWZ0OiAwO1xuICAgICAgICBwYWRkaW5nLXJpZ2h0OiAwO1xuICAgIH1cblxuICAgIC8qIFNwYWNlIG91dCB0aGUgbWFzdGhlYWQgKi9cbiAgICAuaGVhZGVyIHtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMzBweDtcbiAgICB9XG5cbiAgICAvKiBSZW1vdmUgdGhlIGJvdHRvbSBib3JkZXIgb24gdGhlIGp1bWJvdHJvbiBmb3IgdmlzdWFsIGVmZmVjdCAqL1xuICAgIC5qdW1ib3Ryb24ge1xuICAgICAgICBib3JkZXItYm90dG9tOiAzMDA7XG4gICAgfVxufVxuXG4vLyB0aGlzIGlzIGEgY29tbWVudC4uLlxuIl0sInNvdXJjZVJvb3QiOiIvc291cmNlLyJ9 */
--- a/devtools/client/themes/common.css
+++ b/devtools/client/themes/common.css
@@ -551,18 +551,18 @@ checkbox:-moz-focusring {
   background-position: calc(100% - 8px) center;
 }
 
 .devtools-searchinput > .textbox-input-box > .textbox-search-icons > .textbox-search-icon,
 .devtools-filterinput > .textbox-input-box > .textbox-search-icons > .textbox-search-icon {
   visibility: hidden;
 }
 
-.devtools-searchinput .textbox-input::-moz-placeholder,
-.devtools-filterinput .textbox-input::-moz-placeholder {
+.devtools-searchinput .textbox-input::placeholder,
+.devtools-filterinput .textbox-input::placeholder {
   font-style: normal;
 }
 
 .devtools-plaininput {
   border-color: transparent;
   background-color: transparent;
 }
 
--- a/devtools/client/webide/modules/app-validator.js
+++ b/devtools/client/webide/modules/app-validator.js
@@ -67,22 +67,22 @@ AppValidator.checkManifest = function (m
   let deferred = promise.defer();
   let error;
 
   let req = new XMLHttpRequest();
   req.overrideMimeType("text/plain");
 
   try {
     req.open("GET", manifestURL, true);
+    req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
   } catch (e) {
     error = strings.formatStringFromName("validator.invalidManifestURL", [manifestURL], 1);
     deferred.reject(error);
     return deferred.promise;
   }
-  req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
 
   req.onload = function () {
     let manifest = null;
     try {
       manifest = JSON.parse(req.responseText);
     } catch (e) {
       error = strings.formatStringFromName("validator.invalidManifestJSON", [e, manifestURL], 2);
       deferred.reject(error);
@@ -223,22 +223,22 @@ AppValidator.prototype.validateLaunchPat
     deferred.resolve();
     return deferred.promise;
   }
 
   let req = new XMLHttpRequest();
   req.overrideMimeType("text/plain");
   try {
     req.open("HEAD", indexURL, true);
+    req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
   } catch (e) {
     this.error(strings.formatStringFromName("validator.accessFailedLaunchPath", [indexURL], 1));
     deferred.resolve();
     return deferred.promise;
   }
-  req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING;
   req.onload = () => {
     if (req.status >= 400)
       this.error(strings.formatStringFromName("validator.accessFailedLaunchPathBadHttpCode", [indexURL, req.status], 2));
     deferred.resolve();
   };
   req.onerror = () => {
     this.error(strings.formatStringFromName("validator.accessFailedLaunchPath", [indexURL], 1));
     deferred.resolve();
--- a/devtools/shared/css-properties-db.js
+++ b/devtools/shared/css-properties-db.js
@@ -58,17 +58,17 @@ exports.ANGLE_TAKING_FUNCTIONS = ["linea
 
 var domUtils = Cc["@mozilla.org/inspector/dom-utils;1"].getService(Ci.inIDOMUtils);
 var json = JSON.stringify(domUtils.getCSSPseudoElementNames(), null, 0);
 copy(`exports.PSEUDO_ELEMENTS = ${json};\n`);
 console.log("The PSEUDO_ELEMENTS have been copied to your clipboard.")
  */
 
 /*eslint-disable */
-exports.PSEUDO_ELEMENTS = [":after",":before",":backdrop",":first-letter",":first-line",":-moz-selection",":-moz-focus-inner",":-moz-focus-outer",":-moz-list-bullet",":-moz-list-number",":-moz-math-anonymous",":-moz-progress-bar",":-moz-range-track",":-moz-range-progress",":-moz-range-thumb",":-moz-meter-bar",":-moz-placeholder",":-moz-color-swatch"];
+exports.PSEUDO_ELEMENTS = [":after",":before",":backdrop",":first-letter",":first-line",":-moz-selection",":-moz-focus-inner",":-moz-focus-outer",":-moz-list-bullet",":-moz-list-number",":-moz-math-anonymous",":-moz-progress-bar",":-moz-range-track",":-moz-range-progress",":-moz-range-thumb",":-moz-meter-bar",":-moz-placeholder",":placeholder",":-moz-color-swatch"];
 /*eslint-enable */
 
 /**
  * A list of CSS Properties and their various characteristics. This is used on the
  * client-side when the CssPropertiesActor is not found, or when the client and server
  * are the same version. A single property takes the form:
  *
  *  "animation": {
--- a/devtools/shared/gcli/commands/jsb.js
+++ b/devtools/shared/gcli/commands/jsb.js
@@ -101,35 +101,34 @@ exports.items = [
         jslint_happy: args.jslintHappy,
         brace_style: args.braceStyle,
         space_before_conditional: !args.noSpaceBeforeConditional,
         unescape_strings: args.unescapeStrings
       };
 
       let xhr = new XMLHttpRequest();
 
-      try {
-        xhr.open("GET", args.url, true);
-      } catch(e) {
-        return l10n.lookup("jsbInvalidURL");
-      }
-
       let deferred = context.defer();
 
       xhr.onreadystatechange = function() {
         if (xhr.readyState == 4) {
           if (xhr.status == 200 || xhr.status == 0) {
             let result = beautify.js(xhr.responseText, opts);
 
             ScratchpadManager.openScratchpad({text: result});
 
             deferred.resolve();
           } else {
             deferred.reject("Unable to load page to beautify: " + args.url + " " +
                             xhr.status + " " + xhr.statusText);
           }
         };
       }
-      xhr.send(null);
+      try {
+        xhr.open("GET", args.url, true);
+        xhr.send(null);
+      } catch(e) {
+        return l10n.lookup("jsbInvalidURL");
+      }
       return deferred.promise;
     }
   }
 ];
--- a/dom/base/DOMException.cpp
+++ b/dom/base/DOMException.cpp
@@ -196,17 +196,16 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CI_INTERFACE_GETTER(Exception, nsIXPCException)
 
 Exception::Exception(const nsACString& aMessage,
                      nsresult aResult,
                      const nsACString& aName,
                      nsIStackFrame *aLocation,
                      nsISupports *aData)
 : mResult(NS_OK),
-  mLineNumber(0),
   mInitialized(false),
   mHoldingJSVal(false)
 {
   // A little hack... The nsIGenericModule nsIClassInfo scheme relies on there
   // having been at least one instance made via the factory. Otherwise, the
   // shared factory/classinsance object never gets created and our QI getter
   // for our instance's pointer to our nsIClassInfo will always return null.
   // This is bad because it means that wrapped exceptions will never have a
@@ -218,17 +217,16 @@ Exception::Exception(const nsACString& a
     sEverMadeOneFromFactory = true;
   }
 
   Initialize(aMessage, aResult, aName, aLocation, aData);
 }
 
 Exception::Exception()
   : mResult(NS_OK),
-    mLineNumber(-1),
     mInitialized(false),
     mHoldingJSVal(false)
 {
 }
 
 Exception::~Exception()
 {
   if (mHoldingJSVal) {
@@ -311,34 +309,34 @@ NS_IMETHODIMP
 Exception::GetFilename(JSContext* aCx, nsAString& aFilename)
 {
   NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
 
   if (mLocation) {
     return mLocation->GetFilename(aCx, aFilename);
   }
 
-  aFilename.Assign(mFilename);
+  aFilename.Truncate();
   return NS_OK;
 }
 
 NS_IMETHODIMP
 Exception::GetLineNumber(JSContext* aCx, uint32_t *aLineNumber)
 {
   NS_ENSURE_ARG_POINTER(aLineNumber);
   NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
 
   if (mLocation) {
     int32_t lineno;
     nsresult rv = mLocation->GetLineNumber(aCx, &lineno);
     *aLineNumber = lineno;
     return rv;
   }
 
-  *aLineNumber = mLineNumber;
+  *aLineNumber = 0;
   return NS_OK;
 }
 
 NS_IMETHODIMP
 Exception::GetColumnNumber(uint32_t* aColumnNumber)
 {
   NS_ENSURE_ARG_POINTER(aColumnNumber);
   NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED);
@@ -479,17 +477,17 @@ Exception::LineNumber(JSContext* aCx) co
   if (mLocation) {
     int32_t lineno;
     if (NS_SUCCEEDED(mLocation->GetLineNumber(aCx, &lineno))) {
       return lineno;
     }
     return 0;
   }
 
-  return mLineNumber;
+  return 0;
 }
 
 uint32_t
 Exception::ColumnNumber() const
 {
   return 0;
 }
 
--- a/dom/base/DOMException.h
+++ b/dom/base/DOMException.h
@@ -131,18 +131,16 @@ protected:
     }
   }
 
   nsCString       mMessage;
   nsresult        mResult;
   nsCString       mName;
   nsCOMPtr<nsIStackFrame> mLocation;
   nsCOMPtr<nsISupports> mData;
-  nsString        mFilename;
-  int             mLineNumber;
   bool            mInitialized;
 
   bool mHoldingJSVal;
   JS::Heap<JS::Value> mThrownJSVal;
 
 private:
   static bool sEverMadeOneFromFactory;
 };
--- a/dom/base/WebSocket.cpp
+++ b/dom/base/WebSocket.cpp
@@ -10,17 +10,16 @@
 
 #include "jsapi.h"
 #include "jsfriendapi.h"
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/net/WebSocketChannel.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/dom/MessageEvent.h"
 #include "mozilla/dom/MessageEventBinding.h"
-#include "mozilla/dom/nsCSPService.h"
 #include "mozilla/dom/nsCSPContext.h"
 #include "mozilla/dom/nsCSPUtils.h"
 #include "mozilla/dom/ScriptSettings.h"
 #include "mozilla/dom/WorkerPrivate.h"
 #include "mozilla/dom/WorkerRunnable.h"
 #include "mozilla/dom/WorkerScope.h"
 #include "nsAutoPtr.h"
 #include "nsGlobalWindow.h"
@@ -1563,37 +1562,38 @@ WebSocketImpl::Init(JSContext* aCx,
       // We crash here because we are sure that mURI is a valid URI, so either we
       // are OOM'ing or something else bad is happening.
       if (NS_WARN_IF(NS_FAILED(rv))) {
         MOZ_CRASH();
       }
     }
 
     // The 'real' nsHttpChannel of the websocket gets opened in the parent.
-    // Since we don't serialize the CSP within child and parent we have to
-    // perform the CSP check here instead of AsyncOpen2().
+    // Since we don't serialize the CSP within child and parent and also not
+    // the context, we have to perform content policy checks here instead of
+    // AsyncOpen2().
     // Please note that websockets can't follow redirects, hence there is no
     // need to perform a CSP check after redirects.
-    nsCOMPtr<nsIContentPolicy> cspService = do_GetService(CSPSERVICE_CONTRACTID);
-    int16_t shouldLoad = nsIContentPolicy::REJECT_REQUEST;
-    aRv = cspService->ShouldLoad(nsIContentPolicy::TYPE_WEBSOCKET,
-                                 uri,
-                                 nullptr, // aRequestOrigin not used within CSP
-                                 originDoc,
-                                 EmptyCString(), // aMimeTypeGuess
-                                 nullptr, // aExtra
-                                 aPrincipal,
-                                 &shouldLoad);
+    int16_t shouldLoad = nsIContentPolicy::ACCEPT;
+    aRv = NS_CheckContentLoadPolicy(nsIContentPolicy::TYPE_WEBSOCKET,
+                                    uri,
+                                    aPrincipal,
+                                    originDoc,
+                                    EmptyCString(),
+                                    nullptr,
+                                    &shouldLoad,
+                                    nsContentUtils::GetContentPolicy(),
+                                    nsContentUtils::GetSecurityManager());
 
     if (NS_WARN_IF(aRv.Failed())) {
       return;
     }
 
     if (NS_CP_REJECTED(shouldLoad)) {
-      // Disallowed by CSP
+      // Disallowed by content policy
       aRv.Throw(NS_ERROR_CONTENT_BLOCKED);
       return;
     }
   }
 
   // Potentially the page uses the CSP directive 'upgrade-insecure-requests'.
   // In such a case we have to upgrade ws: to wss: and also update mSecure
   // to reflect that upgrade. Please note that we can not upgrade from ws:
--- a/dom/base/test/test_blobURL_expiring.html
+++ b/dom/base/test/test_blobURL_expiring.html
@@ -19,19 +19,19 @@ onmessage = function(e) {
     xhr.onload = function() {
       is(xhr.response, "123", "Response matches!");
       resolve();
     }
   })).then(function() {
     document.body.removeChild(iframe);
   }).then(function() {
     var xhr = new XMLHttpRequest();
-
+    xhr.open("GET", blobURL);
     try {
-      xhr.open("GET", blobURL);
+      xhr.send();
       ok(false, "The URL should be done!");
     } catch(e) {
       ok(true, "The URL should be done!");
     }
 
     SimpleTest.finish();
   });
 }
--- a/dom/base/test/test_blob_fragment_and_query.html
+++ b/dom/base/test/test_blob_fragment_and_query.html
@@ -29,30 +29,30 @@ function runTest() {
   var url = URL.createObjectURL(blob);
   ok(url, "We have a URI");
 
   var test = tests.shift();
 
   URL.revokeObjectURL(url + test.part);
 
   var xhr = new XMLHttpRequest();
-  try {
   xhr.open('GET', url + test.part);
-  } catch(e) {
-    ok(test.revoke, "This should fail!");
-    runTest();
-    return;
-  }
 
   xhr.onload = function() {
     is(xhr.responseText, 'hello world', 'URL: ' + url + test.part);
     runTest();
   }
 
-  xhr.send();
+  try {
+    xhr.send();
+  } catch(e) {
+    ok(test.revoke, "This should fail!");
+    runTest();
+    return;
+  }
   ok(!test.revoke, "This should succeed!");
 }
 
 SimpleTest.waitForExplicitFinish();
 runTest();
 
   </script>
 </body>
--- a/dom/plugins/ipc/PPluginModule.ipdl
+++ b/dom/plugins/ipc/PPluginModule.ipdl
@@ -152,12 +152,14 @@ parent:
 
   async Profile(nsCString aProfile);
 
   // Answers to request about site data
   async ReturnClearSiteData(NPError aRv, uint64_t aCallbackId);
 
   async ReturnSitesWithData(nsCString[] aSites, uint64_t aCallbackId);
 
+  intr GetKeyState(int32_t aVirtKey)
+    returns (int16_t aState);
 };
 
 } // namespace plugins
 } // namespace mozilla
--- a/dom/plugins/ipc/PluginModuleChild.cpp
+++ b/dom/plugins/ipc/PluginModuleChild.cpp
@@ -85,16 +85,19 @@ static CreateFileAPtr sCreateFileAStub =
 
 // Used with fix for flash fullscreen window loosing focus.
 static bool gDelayFlashFocusReplyUntilEval = false;
 // Used to fix GetWindowInfo problems with internal flash settings dialogs
 static WindowsDllInterceptor sUser32Intercept;
 typedef BOOL (WINAPI *GetWindowInfoPtr)(HWND hwnd, PWINDOWINFO pwi);
 static GetWindowInfoPtr sGetWindowInfoPtrStub = nullptr;
 static HWND sBrowserHwnd = nullptr;
+// sandbox process doesn't get current key states.  So we need get it on chrome.
+typedef SHORT (WINAPI *GetKeyStatePtr)(int);
+static GetKeyStatePtr sGetKeyStatePtrStub = nullptr;
 #endif
 
 /* static */
 PluginModuleChild*
 PluginModuleChild::CreateForContentProcess(mozilla::ipc::Transport* aTransport,
                                            base::ProcessId aOtherPid)
 {
     PluginModuleChild* child = new PluginModuleChild(false);
@@ -2061,16 +2064,30 @@ PMCGetWindowInfoHook(HWND hWnd, PWINDOWI
   // it's internal settings window. Post removing sub widgets for tabs, touch
   // this up so they get the rect they expect.
   // XXX potentially tie this to a specific major version?
   BOOL result = sGetWindowInfoPtrStub(hWnd, pwi);
   if (sBrowserHwnd && sBrowserHwnd == hWnd)
       pwi->rcWindow = pwi->rcClient;
   return result;
 }
+
+// static
+SHORT WINAPI
+PMCGetKeyState(int aVirtKey)
+{
+    PluginModuleChild* chromeInstance = PluginModuleChild::GetChrome();
+    if (chromeInstance) {
+        int16_t ret = 0;
+        if (chromeInstance->CallGetKeyState(aVirtKey, &ret)) {
+          return ret;
+        }
+    }
+    return sGetKeyStatePtrStub(aVirtKey);
+}
 #endif
 
 PPluginInstanceChild*
 PluginModuleChild::AllocPPluginInstanceChild(const nsCString& aMimeType,
                                              const uint16_t& aMode,
                                              const InfallibleTArray<nsCString>& aNames,
                                              const InfallibleTArray<nsCString>& aValues)
 {
@@ -2081,22 +2098,28 @@ PluginModuleChild::AllocPPluginInstanceC
     // allocates an instance on its own. Make sure it gets the latest copy
     // of quirks once we have them. Also note, with process-per-tab, we may
     // have multiple PluginModuleChilds in the same plugin process, so only
     // initialize this once in gChromeInstance, which is a singleton.
     GetChrome()->InitQuirksModes(aMimeType);
     mQuirks = GetChrome()->mQuirks;
 
 #ifdef XP_WIN
+    sUser32Intercept.Init("user32.dll");
     if ((mQuirks & QUIRK_FLASH_HOOK_GETWINDOWINFO) &&
         !sGetWindowInfoPtrStub) {
-        sUser32Intercept.Init("user32.dll");
         sUser32Intercept.AddHook("GetWindowInfo", reinterpret_cast<intptr_t>(PMCGetWindowInfoHook),
                                  (void**) &sGetWindowInfoPtrStub);
     }
+
+    if ((mQuirks & QUIRK_FLASH_HOOK_GETKEYSTATE) &&
+        !sGetKeyStatePtrStub) {
+        sUser32Intercept.AddHook("GetKeyState", reinterpret_cast<intptr_t>(PMCGetKeyState),
+                                 (void**) &sGetKeyStatePtrStub);
+    }
 #endif
 
     return new PluginInstanceChild(&mFunctions, aMimeType, aMode, aNames,
                                    aValues);
 }
 
 void
 PluginModuleChild::InitQuirksModes(const nsCString& aMimeType)
--- a/dom/plugins/ipc/PluginModuleParent.cpp
+++ b/dom/plugins/ipc/PluginModuleParent.cpp
@@ -3360,9 +3360,25 @@ PluginModuleChromeParent::RecvProfile(co
     }
 
     mProfile = aProfile;
     mGatherer->GatheredOOPProfile();
 #endif
     return true;
 }
 
-
+bool
+PluginModuleParent::AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet)
+{
+    return false;
+}
+
+bool
+PluginModuleChromeParent::AnswerGetKeyState(const int32_t& aVirtKey,
+                                            int16_t* aRet)
+{
+#if defined(XP_WIN)
+    *aRet = ::GetKeyState(aVirtKey);
+    return true;
+#else
+    return PluginModuleParent::AnswerGetKeyState(aVirtKey, aRet);
+#endif
+}
--- a/dom/plugins/ipc/PluginModuleParent.h
+++ b/dom/plugins/ipc/PluginModuleParent.h
@@ -203,16 +203,18 @@ protected:
     static void TimeoutChanged(const char* aPref, void* aModule);
 
     virtual void UpdatePluginTimeout() {}
 
     virtual bool RecvNotifyContentModuleDestroyed() override { return true; }
 
     virtual bool RecvProfile(const nsCString& aProfile) override { return true; }
 
+    virtual bool AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) override;
+
     virtual bool RecvReturnClearSiteData(const NPError& aRv,
                                          const uint64_t& aCallbackId) override;
 
     virtual bool RecvReturnSitesWithData(nsTArray<nsCString>&& aSites,
                                          const uint64_t& aCallbackId) override;
 
     void SetPluginFuncs(NPPluginFuncs* aFuncs);
 
@@ -493,16 +495,19 @@ class PluginModuleChromeParent
     void GatheredAsyncProfile(nsIProfileSaveEvent* aSaveEvent);
     void StartProfiler(nsIProfilerStartParams* aParams);
     void StopProfiler();
 #endif
 
     virtual bool
     RecvProfile(const nsCString& aProfile) override;
 
+    virtual bool
+    AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) override;
+
 private:
     virtual void
     EnteredCxxStack() override;
 
     void
     ExitedCxxStack() override;
 
     mozilla::ipc::IProtocol* GetInvokingProtocol();
--- a/dom/plugins/ipc/PluginQuirks.cpp
+++ b/dom/plugins/ipc/PluginQuirks.cpp
@@ -30,16 +30,19 @@ int GetQuirksFromMimeTypeAndFilename(con
         quirks |= QUIRK_FLASH_RETURN_EMPTY_DOCUMENT_ORIGIN;
 #ifdef OS_WIN
         quirks |= QUIRK_WINLESS_TRACKPOPUP_HOOK;
         quirks |= QUIRK_FLASH_THROTTLE_WMUSER_EVENTS;
         quirks |= QUIRK_FLASH_HOOK_SETLONGPTR;
         quirks |= QUIRK_FLASH_HOOK_GETWINDOWINFO;
         quirks |= QUIRK_FLASH_FIXUP_MOUSE_CAPTURE;
         quirks |= QUIRK_WINLESS_HOOK_IME;
+#if defined(_M_X64) || defined(__x86_64__)
+        quirks |= QUIRK_FLASH_HOOK_GETKEYSTATE;
+#endif
 #endif
     }
 
 #ifdef OS_WIN
     // QuickTime plugin usually loaded with audio/mpeg mimetype
     NS_NAMED_LITERAL_CSTRING(quicktime, "npqtplugin");
     if (FindInReadable(quicktime, aPluginFilename)) {
         quirks |= QUIRK_QUICKTIME_AVOID_SETWINDOW;
--- a/dom/plugins/ipc/PluginQuirks.h
+++ b/dom/plugins/ipc/PluginQuirks.h
@@ -50,16 +50,18 @@ enum PluginQuirks {
   // Work around a Flash bug where it fails to check the error code of a
   // NPN_GetValue(NPNVdocumentOrigin) call before trying to dereference
   // its char* output.
   QUIRK_FLASH_RETURN_EMPTY_DOCUMENT_ORIGIN        = 1 << 10,
   // Win: Addresses a Unity bug with mouse capture.
   QUIRK_UNITY_FIXUP_MOUSE_CAPTURE                 = 1 << 11,
   // Win: Hook IMM32 API to handle IME event on windowless plugin
   QUIRK_WINLESS_HOOK_IME                          = 1 << 12,
+  // Win: Hook GetKeyState to get keyboard state on sandbox process
+  QUIRK_FLASH_HOOK_GETKEYSTATE                    = 1 << 13,
 };
 
 int GetQuirksFromMimeTypeAndFilename(const nsCString& aMimeType,
                                      const nsCString& aPluginFilename);
 
 } /* namespace plugins */
 } /* namespace mozilla */
 
--- a/dom/system/NetworkGeolocationProvider.js
+++ b/dom/system/NetworkGeolocationProvider.js
@@ -492,25 +492,25 @@ WifiGeoPositionProvider.prototype = {
 
     let xhr = Components.classes["@mozilla.org/xmlextras/xmlhttprequest;1"]
                         .createInstance(Ci.nsIXMLHttpRequest);
 
     this.notifyListener("locationUpdatePending");
 
     try {
       xhr.open("POST", url, true);
+      xhr.channel.loadFlags = Ci.nsIChannel.LOAD_ANONYMOUS;
     } catch (e) {
       this.notifyListener("notifyError",
                           [POSITION_UNAVAILABLE]);
       return;
     }
     xhr.setRequestHeader("Content-Type", "application/json; charset=UTF-8");
     xhr.responseType = "json";
     xhr.mozBackgroundRequest = true;
-    xhr.channel.loadFlags = Ci.nsIChannel.LOAD_ANONYMOUS;
     xhr.timeout = Services.prefs.getIntPref("geo.wifi.xhr.timeout");
     xhr.ontimeout = (function() {
       LOG("Location request XHR timed out.")
       this.notifyListener("notifyError",
                           [POSITION_UNAVAILABLE]);
     }).bind(this);
     xhr.onerror = (function() {
       this.notifyListener("notifyError",
--- a/dom/xhr/XMLHttpRequestMainThread.cpp
+++ b/dom/xhr/XMLHttpRequestMainThread.cpp
@@ -1521,21 +1521,21 @@ XMLHttpRequestMainThread::OpenInternal(c
   mAuthorRequestHeaders.Clear();
   ResetResponse();
 
   // Gecko-specific
   mFlagHadUploadListenersOnSend = false;
   mFlagAborted = false;
   mFlagTimedOut = false;
 
-  // The channel should really be created on send(), but we have a chrome-only
-  // XHR.channel API which necessitates creating the channel now, while doing
-  // the rest of the channel-setup later at send-time.
-  rv = CreateChannel();
-  NS_ENSURE_SUCCESS(rv, rv);
+  // Per spec we should only create the channel on send(), but we have internal
+  // code that relies on the channel being created now, and that code is not
+  // always IsSystemXHR(). However, we're not supposed to throw channel-creation
+  // errors during open(), so we silently ignore those here.
+  CreateChannel();
 
   // Step 12
   if (mState != State::opened) {
     mState = State::opened;
     FireReadystatechangeEvent();
   }
 
   return NS_OK;
@@ -2627,17 +2627,17 @@ XMLHttpRequestMainThread::InitiateFetch(
     // ref to us to be extra safe.
     mChannel->SetNotificationCallbacks(mNotificationCallbacks);
     mChannel = nullptr;
 
     mErrorLoad = true;
 
     // Per spec, we throw on sync errors, but not async.
     if (mFlagSynchronous) {
-      return rv;
+      return NS_ERROR_DOM_NETWORK_ERR;
     }
   }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 XMLHttpRequestMainThread::Send(nsIVariant* aVariant)
@@ -2719,29 +2719,35 @@ XMLHttpRequestMainThread::Send(nsIVarian
   return SendInternal(&body);
 }
 
 nsresult
 XMLHttpRequestMainThread::SendInternal(const RequestBodyBase* aBody)
 {
   NS_ENSURE_TRUE(mPrincipal, NS_ERROR_NOT_INITIALIZED);
 
-  PopulateNetworkInterfaceId();
+  // Steps 1 and 2
+  if (mState != State::opened || mFlagSend) {
+    return NS_ERROR_DOM_INVALID_STATE_ERR;
+  }
 
   nsresult rv = CheckInnerWindowCorrectness();
   if (NS_FAILED(rv)) {
     return NS_ERROR_DOM_INVALID_STATE_ERR;
   }
 
-  if (mState != State::opened || // Step 1
-      mFlagSend || // Step 2
-      !mChannel) { // Gecko-specific
-    return NS_ERROR_DOM_INVALID_STATE_ERR;
+  // If open() failed to create the channel, then throw a network error
+  // as per spec. We really should create the channel here in send(), but
+  // we have internal code relying on the channel being created in open().
+  if (!mChannel) {
+    return NS_ERROR_DOM_NETWORK_ERR;
   }
 
+  PopulateNetworkInterfaceId();
+
   // XXX We should probably send a warning to the JS console
   //     if there are no event listeners set and we are doing
   //     an asynchronous call.
 
   mUploadTransferred = 0;
   mUploadTotal = 0;
   // By default we don't have any upload, so mark upload complete.
   mUploadComplete = true;
@@ -2888,17 +2894,17 @@ XMLHttpRequestMainThread::SendInternal(c
       DispatchProgressEvent(mUpload, ProgressEventType::loadstart,
                             0, mUploadTotal);
     }
   }
 
   if (!mChannel) {
     // Per spec, silently fail on async request failures; throw for sync.
     if (mFlagSynchronous) {
-      return NS_ERROR_FAILURE;
+      return NS_ERROR_DOM_NETWORK_ERR;
     } else {
       // Defer the actual sending of async events just in case listeners
       // are attached after the send() method is called.
       NS_DispatchToCurrentThread(
         NewRunnableMethod<ProgressEventType>(this,
           &XMLHttpRequestMainThread::CloseRequestWithError,
           ProgressEventType::error));
       return NS_OK;
--- a/editor/reftests/xul/placeholder-reset.css
+++ b/editor/reftests/xul/placeholder-reset.css
@@ -1,8 +1,8 @@
 @namespace url('http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul');
 @namespace html url('http://www.w3.org/1999/xhtml');
 
 /* We need to have a non-transparent placeholder so we can test it. */
-html|input::-moz-placeholder {
+html|input::placeholder {
   opacity: 1.0;
   color: graytext;
-}
\ No newline at end of file
+}
--- a/embedding/components/windowwatcher/nsWindowWatcher.cpp
+++ b/embedding/components/windowwatcher/nsWindowWatcher.cpp
@@ -1106,18 +1106,19 @@ nsWindowWatcher::OpenWindowInternal(mozI
   nsCOMPtr<nsIPrincipal> subjectPrincipal =
     nsContentUtils::GetCurrentJSContext() ? nsContentUtils::SubjectPrincipal() :
                                             nullptr;
 
   if (windowIsNew) {
     auto* docShell = static_cast<nsDocShell*>(newDocShell.get());
 
     // If this is not a chrome docShell, we apply originAttributes from the
-    // subjectPrincipal.
+    // subjectPrincipal unless if it's an expanded principal.
     if (subjectPrincipal &&
+        !subjectPrincipal->GetIsExpandedPrincipal() &&
         docShell->ItemType() != nsIDocShellTreeItem::typeChrome) {
       DocShellOriginAttributes attrs;
       attrs.InheritFromDocToChildDocShell(BasePrincipal::Cast(subjectPrincipal)->OriginAttributesRef());
 
       docShell->SetOriginAttributes(attrs);
     }
 
     // Now set the opener principal on the new window.  Note that we need to do
--- a/gfx/2d/BezierUtils.cpp
+++ b/gfx/2d/BezierUtils.cpp
@@ -324,13 +324,16 @@ CalculateDistanceToEllipticArc(const Poi
   Float B = a * b + c * d;
   Float C = a * a + c * c - 1;
 
   Float S = sqrt(B * B - A * C);
 
   Float n1 = (- B + S) / A;
   Float n2 = (- B - S) / A;
 
+  MOZ_ASSERT(n1 >= 0);
+  MOZ_ASSERT(n2 >= 0);
+
   return n1 < n2 ? n1 : n2;
 }
 
 } // namespace gfx
 } // namespace mozilla
--- a/gfx/layers/LayerTreeInvalidation.cpp
+++ b/gfx/layers/LayerTreeInvalidation.cpp
@@ -157,16 +157,21 @@ struct LayerPropertiesBase : public Laye
   {
     MOZ_COUNT_CTOR(LayerPropertiesBase);
   }
   ~LayerPropertiesBase()
   {
     MOZ_COUNT_DTOR(LayerPropertiesBase);
   }
 
+protected:
+  LayerPropertiesBase(const LayerPropertiesBase& a) = delete;
+  LayerPropertiesBase& operator=(const LayerPropertiesBase& a) = delete;
+
+public:
   virtual nsIntRegion ComputeDifferences(Layer* aRoot,
                                          NotifySubDocInvalidationFunc aCallback,
                                          bool* aGeometryChanged);
 
   virtual void MoveBy(const IntPoint& aOffset);
 
   nsIntRegion ComputeChange(NotifySubDocInvalidationFunc aCallback,
                             bool& aGeometryChanged)
@@ -277,16 +282,21 @@ struct ContainerLayerProperties : public
     , mPreYScale(aLayer->GetPreYScale())
   {
     for (Layer* child = aLayer->GetFirstChild(); child; child = child->GetNextSibling()) {
       child->CheckCanary();
       mChildren.AppendElement(Move(CloneLayerTreePropertiesInternal(child)));
     }
   }
 
+protected:
+  ContainerLayerProperties(const ContainerLayerProperties& a) = delete;
+  ContainerLayerProperties& operator=(const ContainerLayerProperties& a) = delete;
+
+public:
   nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback,
                                     bool& aGeometryChanged) override
   {
     // Make sure we got our virtual call right
     mSubtypeCanary.Check();
     ContainerLayer* container = mLayer->AsContainerLayer();
     nsIntRegion invalidOfLayer; // Invalid regions of this layer.
     nsIntRegion result;         // Invliad regions for children only.
@@ -434,16 +444,21 @@ struct ContainerLayerProperties : public
 struct ColorLayerProperties : public LayerPropertiesBase
 {
   explicit ColorLayerProperties(ColorLayer *aLayer)
     : LayerPropertiesBase(aLayer)
     , mColor(aLayer->GetColor())
     , mBounds(aLayer->GetBounds())
   { }
 
+protected:
+  ColorLayerProperties(const ColorLayerProperties& a) = delete;
+  ColorLayerProperties& operator=(const ColorLayerProperties& a) = delete;
+
+public:
   virtual nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback,
                                             bool& aGeometryChanged)
   {
     ColorLayer* color = static_cast<ColorLayer*>(mLayer.get());
 
     if (mColor != color->GetColor()) {
       aGeometryChanged = true;
       return NewTransformedBounds();
--- a/gfx/layers/LayerTreeInvalidation.h
+++ b/gfx/layers/LayerTreeInvalidation.h
@@ -27,16 +27,23 @@ typedef void (*NotifySubDocInvalidationF
                                              const nsIntRegion& aRegion);
 
 /**
  * A set of cached layer properties (including those of child layers),
  * used for comparing differences in layer trees.
  */
 struct LayerProperties
 {
+protected:
+  LayerProperties() {}
+
+  LayerProperties(const LayerProperties& a) = delete;
+  LayerProperties& operator=(const LayerProperties& a) = delete;
+
+public:
   virtual ~LayerProperties() {}
 
   /**
    * Copies the current layer tree properties into
    * a new LayerProperties object.
    *
    * @param Layer tree to copy, or nullptr if we have no 
    * initial layer tree.
--- a/image/decoders/nsPNGDecoder.cpp
+++ b/image/decoders/nsPNGDecoder.cpp
@@ -185,17 +185,18 @@ nsPNGDecoder::PostHasTransparencyIfNeede
 // CreateFrame() is used for both simple and animated images.
 nsresult
 nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo)
 {
   MOZ_ASSERT(HasSize());
   MOZ_ASSERT(!IsMetadataDecode());
 
   // Check if we have transparency, and send notifications if needed.
-  auto transparency = GetTransparencyType(aFrameInfo.mFormat, aFrameInfo.mFrameRect);
+  auto transparency = GetTransparencyType(aFrameInfo.mFormat,
+                                          aFrameInfo.mFrameRect);
   PostHasTransparencyIfNeeded(transparency);
   SurfaceFormat format = transparency == TransparencyType::eNone
                        ? SurfaceFormat::B8G8R8X8
                        : SurfaceFormat::B8G8R8A8;
 
   // Make sure there's no animation or padding if we're downscaling.
   MOZ_ASSERT_IF(Size() != OutputSize(), mNumFrames == 0);
   MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation());
@@ -681,23 +682,25 @@ nsPNGDecoder::info_callback(png_structp 
       MOZ_ASSERT_UNREACHABLE("Doing downscale-during-decode "
                              "for an animated image?");
       png_error(decoder->mPNG, "Invalid downscale attempt"); // Abort decode.
     }
   }
 #endif
 
   if (decoder->IsMetadataDecode()) {
-    // If we are animated then the first frame rect is either: 1) the whole image
-    // if the IDAT chunk is part of the animation 2) the frame rect of the first
-    // fDAT chunk otherwise. If we are not animated then we want to make sure to
-    // call PostHasTransparency in the metadata decode if we need to. So it's okay
-    // to pass IntRect(0, 0, width, height) here for animated images; they will
-    // call with the proper first frame rect in the full decode.
-    auto transparency = decoder->GetTransparencyType(decoder->format, frameRect);
+    // If we are animated then the first frame rect is either:
+    // 1) the whole image if the IDAT chunk is part of the animation
+    // 2) the frame rect of the first fDAT chunk otherwise.
+    // If we are not animated then we want to make sure to call
+    // PostHasTransparency in the metadata decode if we need to. So it's
+    // okay to pass IntRect(0, 0, width, height) here for animated images;
+    // they will call with the proper first frame rect in the full decode.
+    auto transparency = decoder->GetTransparencyType(decoder->format,
+                                                     frameRect);
     decoder->PostHasTransparencyIfNeeded(transparency);
 
     // We have the metadata we're looking for, so stop here, before we allocate
     // buffers below.
     return decoder->DoTerminate(png_ptr, TerminalState::SUCCESS);
   }
 
 #ifdef PNG_APNG_SUPPORTED
@@ -752,17 +755,18 @@ nsPNGDecoder::PostInvalidationIfNeeded()
   PostInvalidation(invalidRect->mInputSpaceRect,
                    Some(invalidRect->mOutputSpaceRect));
 }
 
 static NextPixel<uint32_t>
 PackRGBPixelAndAdvance(uint8_t*& aRawPixelInOut)
 {
   const uint32_t pixel =
-    gfxPackedPixel(0xFF, aRawPixelInOut[0], aRawPixelInOut[1], aRawPixelInOut[2]);
+    gfxPackedPixel(0xFF, aRawPixelInOut[0], aRawPixelInOut[1],
+                   aRawPixelInOut[2]);
   aRawPixelInOut += 3;
   return AsVariant(pixel);
 }
 
 static NextPixel<uint32_t>
 PackRGBAPixelAndAdvance(uint8_t*& aRawPixelInOut)
 {
   const uint32_t pixel =
@@ -826,17 +830,18 @@ nsPNGDecoder::row_callback(png_structp p
     // Advance to the next pass. We may have to do this multiple times because
     // libpng will skip passes if the image is so small that no pixels have
     // changed on a given pass, but ADAM7InterpolatingFilter needs to be reset
     // once for every pass to perform interpolation properly.
     decoder->mPipe.ResetToFirstRow();
     decoder->mPass++;
   }
 
-  const png_uint_32 height = static_cast<png_uint_32>(decoder->mFrameRect.height);
+  const png_uint_32 height =
+    static_cast<png_uint_32>(decoder->mFrameRect.height);
 
   if (row_num >= height) {
     // Bail if we receive extra rows. This is especially important because if we
     // didn't, we might overflow the deinterlacing buffer.
     MOZ_ASSERT_UNREACHABLE("libpng producing extra rows?");
     return;
   }
 
@@ -931,17 +936,18 @@ nsPNGDecoder::DoTerminate(png_structp aP
 
 void
 nsPNGDecoder::DoYield(png_structp aPNGStruct)
 {
   // Pause data processing. png_process_data_pause() returns how many bytes of
   // the data that was passed to png_process_data() have not been consumed yet.
   // We use this information to tell StreamingLexer where to place us in the
   // input stream when we come back from the yield.
-  png_size_t pendingBytes = png_process_data_pause(aPNGStruct, /* save = */ false);
+  png_size_t pendingBytes = png_process_data_pause(aPNGStruct,
+                                                   /* save = */ false);
 
   MOZ_ASSERT(pendingBytes < mLastChunkLength);
   size_t consumedBytes = mLastChunkLength - min(pendingBytes, mLastChunkLength);
 
   mNextTransition =
     Transition::ContinueUnbufferedAfterYield(State::PNG_DATA, consumedBytes);
 }
 
@@ -973,20 +979,22 @@ nsPNGDecoder::frame_info_callback(png_st
   const IntRect frameRect(png_get_next_frame_x_offset(png_ptr, decoder->mInfo),
                           png_get_next_frame_y_offset(png_ptr, decoder->mInfo),
                           png_get_next_frame_width(png_ptr, decoder->mInfo),
                           png_get_next_frame_height(png_ptr, decoder->mInfo));
   const bool isInterlaced = bool(decoder->interlacebuf);
 
 #ifndef MOZ_EMBEDDED_LIBPNG
   // if using system library, check frame_width and height against 0
-  if (frameRect.width == 0)
+  if (frameRect.width == 0) {
     png_error(png_ptr, "Frame width must not be 0");
-  if (frameRect.height == 0)
+  }
+  if (frameRect.height == 0) {
     png_error(png_ptr, "Frame height must not be 0");
+  }
 #endif
 
   const FrameInfo info { decoder->format, frameRect, isInterlaced };
 
   // If the previous frame was hidden, skip the yield (which will mislead the
   // caller, who will think the previous frame was real) and just allocate the
   // new frame here.
   if (previousFrameWasHidden) {
@@ -1063,17 +1071,17 @@ nsPNGDecoder::SpeedHistogram() const
 bool
 nsPNGDecoder::IsValidICO() const
 {
   // Only 32-bit RGBA PNGs are valid ICO resources; see here:
   //   http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
 
   // If there are errors in the call to png_get_IHDR, the error_callback in
   // nsPNGDecoder.cpp is called.  In this error callback we do a longjmp, so
-  // we need to save the jump buffer here. Oterwise we'll end up without a
+  // we need to save the jump buffer here. Otherwise we'll end up without a
   // proper callstack.
   if (setjmp(png_jmpbuf(mPNG))) {
     // We got here from a longjmp call indirectly from png_get_IHDR
     return false;
   }
 
   png_uint_32
       png_width,  // Unused
--- a/js/src/asmjs/AsmJS.cpp
+++ b/js/src/asmjs/AsmJS.cpp
@@ -61,16 +61,32 @@ using mozilla::Move;
 using mozilla::PodCopy;
 using mozilla::PodEqual;
 using mozilla::PodZero;
 using mozilla::PositiveInfinity;
 using JS::AsmJSOption;
 using JS::GenericNaN;
 
 /*****************************************************************************/
+
+// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
+// greater or equal to MinHeapLength
+static const size_t MinHeapLength = PageSize;
+
+static uint32_t
+RoundUpToNextValidAsmJSHeapLength(uint32_t length)
+{
+    if (length <= MinHeapLength)
+        return MinHeapLength;
+
+    return wasm::RoundUpToNextValidARMImmediate(length);
+}
+
+
+/*****************************************************************************/
 // asm.js module object
 
 // The asm.js spec recognizes this set of builtin Math functions.
 enum AsmJSMathBuiltinFunction
 {
     AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan,
     AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
     AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
@@ -265,16 +281,17 @@ enum class CacheResult
 // AsmJSMetadata is built incrementally by ModuleValidator and then shared
 // immutably between AsmJSModules.
 
 struct AsmJSMetadataCacheablePod
 {
     uint32_t                numFFIs;
     uint32_t                srcLength;
     uint32_t                srcLengthWithRightBrace;
+    bool                    usesSimd;
 
     AsmJSMetadataCacheablePod() { PodZero(this); }
 };
 
 struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod
 {
     AsmJSGlobalVector       asmJSGlobals;
     AsmJSImportVector       asmJSImports;
@@ -1585,16 +1602,17 @@ class MOZ_STACK_CLASS ModuleValidator
     LifoAlloc             validationLifo_;
     FuncVector            functions_;
     FuncPtrTableVector    funcPtrTables_;
     GlobalMap             globalMap_;
     SigMap                sigMap_;
     ImportMap             importMap_;
     ArrayViewVector       arrayViews_;
     bool                  atomicsPresent_;
+    bool                  simdPresent_;
 
     // State used to build the AsmJSModule in finish():
     ModuleGenerator       mg_;
     MutableAsmJSMetadata  asmJSMetadata_;
 
     // Error reporting:
     UniqueChars           errorString_;
     uint32_t              errorOffset_;
@@ -1664,16 +1682,17 @@ class MOZ_STACK_CLASS ModuleValidator
         validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
         functions_(cx),
         funcPtrTables_(cx),
         globalMap_(cx),
         sigMap_(cx),
         importMap_(cx),
         arrayViews_(cx),
         atomicsPresent_(false),
+        simdPresent_(false),
         mg_(ImportVector()),
         errorString_(nullptr),
         errorOffset_(UINT32_MAX),
         errorOverRecursed_(false)
     {}
 
     ~ModuleValidator() {
         if (errorString_) {
@@ -1985,32 +2004,36 @@ class MOZ_STACK_CLASS ModuleValidator
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::AtomicsBuiltinFunction, Move(fieldChars));
         g.pod.u.atomicsBuiltinFunc_ = func;
         return asmJSMetadata_->asmJSGlobals.append(Move(g));
     }
     bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
+        simdPresent_ = true;
+
         UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
         if (!fieldChars)
             return false;
 
         Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
         if (!global)
             return false;
         global->u.simdCtorType_ = type;
         if (!globalMap_.putNew(var, global))
             return false;
 
         AsmJSGlobal g(AsmJSGlobal::SimdCtor, Move(fieldChars));
         g.pod.u.simdCtorType_ = type;
         return asmJSMetadata_->asmJSGlobals.append(Move(g));
     }
     bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* field) {
+        simdPresent_ = true;
+
         UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
         if (!fieldChars)
             return false;
 
         Global* global = validationLifo_.new_<Global>(Global::SimdOp);
         if (!global)
             return false;
         global->u.simdOp.type_ = type;
@@ -2279,16 +2302,18 @@ class MOZ_STACK_CLASS ModuleValidator
     }
     bool finishFunctionBodies() {
         return mg_.finishFuncDefs();
     }
     SharedModule finish() {
         if (!arrayViews_.empty())
             mg_.initMemoryUsage(atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared);
 
+        asmJSMetadata_->usesSimd = simdPresent_;
+
         MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
         for (const Func* func : functions_) {
             CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name());
             if (!funcName || !asmJSMetadata_->asmJSFuncNames.emplaceBack(Move(funcName)))
                 return nullptr;
         }
 
         uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
@@ -5559,35 +5584,37 @@ CheckSimdOperationCall(FunctionValidator
       case SimdOperation::Fn_swizzle:
         return CheckSimdSwizzle(f, call, opType, type);
       case SimdOperation::Fn_shuffle:
         return CheckSimdShuffle(f, call, opType, type);
 
       case SimdOperation::Fn_load:
       case SimdOperation::Fn_load1:
       case SimdOperation::Fn_load2:
-      case SimdOperation::Fn_load3:
         return CheckSimdLoad(f, call, opType, op, type);
       case SimdOperation::Fn_store:
       case SimdOperation::Fn_store1:
       case SimdOperation::Fn_store2:
-      case SimdOperation::Fn_store3:
         return CheckSimdStore(f, call, opType, op, type);
 
       case SimdOperation::Fn_select:
         return CheckSimdSelect(f, call, opType, type);
 
       case SimdOperation::Fn_splat:
         return CheckSimdSplat(f, call, opType, type);
 
       case SimdOperation::Fn_allTrue:
         return CheckSimdAllTrue(f, call, opType, type);
       case SimdOperation::Fn_anyTrue:
         return CheckSimdAnyTrue(f, call, opType, type);
 
+      case SimdOperation::Fn_load3:
+      case SimdOperation::Fn_store3:
+        return f.fail(call, "asm.js does not support 3-element SIMD loads or stores");
+
       case SimdOperation::Constructor:
         MOZ_CRASH("constructors are handled in CheckSimdCtorCall");
       case SimdOperation::Fn_fromFloat64x2Bits:
         MOZ_CRASH("NYI");
     }
     MOZ_CRASH("unexpected simd operation in CheckSimdOperationCall");
 }
 
@@ -7805,19 +7832,31 @@ CheckBuffer(JSContext* cx, const AsmJSMe
                         memoryLength,
                         metadata.minMemoryLength));
         if (!msg)
             return false;
         return LinkFail(cx, msg.get());
     }
 
     if (buffer->is<ArrayBufferObject>()) {
-        Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
-        if (!ArrayBufferObject::prepareForAsmJS(cx, abheap))
+        // On 64-bit, bounds checks are statically removed so the huge guard
+        // region is always necessary. On 32-bit, allocating a guard page
+        // requires reallocating the incoming ArrayBuffer which could trigger
+        // OOM. Thus, only ask for a guard page when SIMD is used since SIMD
+        // allows unaligned memory access (see MaxMemoryAccessSize comment);
+#ifdef WASM_HUGE_MEMORY
+        bool needGuard = true;
+#else
+        bool needGuard = metadata.usesSimd;
+#endif
+        Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
+        if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard))
             return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+
+        MOZ_ASSERT(arrayBuffer->isAsmJSMalloced() || arrayBuffer->isWasmMapped());
     } else {
         if (!buffer->as<SharedArrayBufferObject>().isPreparedForAsmJS())
             return LinkFail(cx, "SharedArrayBuffer must be created with wasm test mode enabled");
     }
 
     return true;
 }
 
@@ -8822,29 +8861,16 @@ js::AsmJSFunctionToString(JSContext* cx,
             return nullptr;
         if (!out.append(src))
             return nullptr;
     }
 
     return out.finishString();
 }
 
-// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
-// greater or equal to MinHeapLength
-static const size_t MinHeapLength = PageSize;
-
 bool
 js::IsValidAsmJSHeapLength(uint32_t length)
 {
     if (length < MinHeapLength)
         return false;
 
-    return wasm::IsValidARMLengthImmediate(length);
-}
-
-uint32_t
-js::RoundUpToNextValidAsmJSHeapLength(uint32_t length)
-{
-    if (length <= MinHeapLength)
-        return MinHeapLength;
-
-    return wasm::RoundUpToNextValidARMLengthImmediate(length);
-}
+    return wasm::IsValidARMImmediate(length);
+}
--- a/js/src/asmjs/AsmJS.h
+++ b/js/src/asmjs/AsmJS.h
@@ -79,14 +79,11 @@ AsmJSFunctionToString(JSContext* cx, Han
 extern JSString*
 AsmJSModuleToString(JSContext* cx, HandleFunction fun, bool addParenToLambda);
 
 // asm.js heap:
 
 extern bool
 IsValidAsmJSHeapLength(uint32_t length);
 
-extern uint32_t
-RoundUpToNextValidAsmJSHeapLength(uint32_t length);
-
 } // namespace js
 
 #endif // asmjs_asmjs_h
--- a/js/src/asmjs/WasmBaselineCompile.cpp
+++ b/js/src/asmjs/WasmBaselineCompile.cpp
@@ -2899,134 +2899,37 @@ class BaseCompiler
         MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF64");
 #endif
     }
 
     //////////////////////////////////////////////////////////////////////
     //
     // Heap access.
 
-    // TODO / CLEANUP - cloned from MIRGraph.cpp, should share.
-
-    bool needsBoundsCheckBranch(const MWasmMemoryAccess& access) const {
-        // A heap access needs a bounds-check branch if we're not relying on signal
-        // handlers to catch errors, and if it's not proven to be within bounds.
-        // We use signal-handlers on x64, but on x86 there isn't enough address
-        // space for a guard region.  Also, on x64 the atomic loads and stores
-        // can't (yet) use the signal handlers.
-
-#ifdef WASM_HUGE_MEMORY
-        return false;
-#else
-        return access.needsBoundsCheck();
-#endif
-    }
-
-    bool throwOnOutOfBounds(const MWasmMemoryAccess& access) {
-        return !isCompilingAsmJS();
-    }
-
-    // For asm.js code only: If we have a non-zero offset, it's possible that
-    // |ptr| itself is out of bounds, while adding the offset computes an
-    // in-bounds address. To catch this case, we need a second branch, which we
-    // emit out of line since it's unlikely to be needed in normal programs.
-    // For this, we'll generate an OffsetBoundsCheck OOL stub.
-
-    bool needsOffsetBoundsCheck(const MWasmMemoryAccess& access) const {
-        return isCompilingAsmJS() && access.offset() != 0;
-    }
-
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
-    class OffsetBoundsCheck : public OutOfLineCode
-    {
-        Label* maybeOutOfBounds;
-        Register ptrReg;
-        int32_t offset;
-
-      public:
-        OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset)
-          : maybeOutOfBounds(maybeOutOfBounds),
-            ptrReg(ptrReg),
-            offset(offset)
-        {}
-
-        void generate(MacroAssembler& masm) {
-            // asm.js code only:
-            //
-            // The access is heap[ptr + offset]. The inline code checks that
-            // ptr < heap.length - offset. We get here when that fails. We need to check
-            // for the case where ptr + offset >= 0, in which case the access is still
-            // in bounds.
-
-            MOZ_ASSERT(offset != 0,
-                       "An access without a constant offset doesn't need a separate "
-                       "OffsetBoundsCheck");
-            masm.cmp32(ptrReg, Imm32(-uint32_t(offset)));
-            if (maybeOutOfBounds)
-                masm.j(Assembler::Below, maybeOutOfBounds);
-            else
-                masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds);
-
-# ifdef JS_CODEGEN_X64
-            // In order to get the offset to wrap properly, we must sign-extend the
-            // pointer to 32-bits. We'll zero out the sign extension immediately
-            // after the access to restore asm.js invariants.
-            masm.movslq(ptrReg, ptrReg);
-# endif
-
-            masm.jmp(rejoin());
-        }
-    };
-
-    // CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch()
-
-    MOZ_MUST_USE
-    bool emitBoundsCheckBranch(const MWasmMemoryAccess& access, RegI32 ptr, Label* maybeFail) {
-        Label* pass = nullptr;
-
-        if (needsOffsetBoundsCheck(access)) {
-            auto* oolCheck = new(alloc_) OffsetBoundsCheck(maybeFail, ptr.reg, access.offset());
-            maybeFail = oolCheck->entry();
-            pass = oolCheck->rejoin();
-            if (!addOutOfLineCode(oolCheck))
-                return false;
-        }
-
-        // The bounds check is a comparison with an immediate value. The asm.js
-        // module linking process will add the length of the heap to the immediate
-        // field, so -access->endOffset() will turn into
-        // (heapLength - access->endOffset()), allowing us to test whether the end
-        // of the access is beyond the end of the heap.
-        MOZ_ASSERT(access.endOffset() >= 1,
-                   "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
-
-        uint32_t cmpOffset = masm.cmp32WithPatch(ptr.reg, Imm32(1 - access.endOffset())).offset();
-        if (maybeFail)
-            masm.j(Assembler::AboveOrEqual, maybeFail);
-        else
-            masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
-
-        if (pass)
-            masm.bind(pass);
-
-        masm.append(wasm::BoundsCheck(cmpOffset));
-        return true;
-    }
-
-    class OutOfLineLoadTypedArrayOOB : public OutOfLineCode
+    // Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the
+    // peculiar property of not throwing on out-of-bounds. Everything else
+    // (wasm, SIMD.js, Atomics) throws on out-of-bounds.
+    bool isAsmJSAccess(const MWasmMemoryAccess& access) {
+        return isCompilingAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess();
+    }
+
+#ifndef WASM_HUGE_MEMORY
+    class AsmJSLoadOOB : public OutOfLineCode
     {
         Scalar::Type viewType;
         AnyRegister dest;
+
       public:
-        OutOfLineLoadTypedArrayOOB(Scalar::Type viewType, AnyRegister dest)
+        AsmJSLoadOOB(Scalar::Type viewType, AnyRegister dest)
           : viewType(viewType),
             dest(dest)
         {}
 
         void generate(MacroAssembler& masm) {
+#if defined(JS_CODEGEN_X86)
             switch (viewType) {
               case Scalar::Float32x4:
               case Scalar::Int32x4:
               case Scalar::Int8x16:
               case Scalar::Int16x8:
               case Scalar::MaxTypedArrayViewType:
                 MOZ_CRASH("unexpected array type");
               case Scalar::Float32:
@@ -3043,252 +2946,126 @@ class BaseCompiler
               case Scalar::Uint32:
               case Scalar::Uint8Clamped:
                 masm.movePtr(ImmWord(0), dest.gpr());
                 break;
               case Scalar::Int64:
                 MOZ_CRASH("unexpected array type");
             }
             masm.jump(rejoin());
+#else
+            Unused << viewType;
+            Unused << dest;
+            MOZ_CRASH("Compiler bug: Unexpected platform.");
+#endif
         }
     };
-
-    MOZ_MUST_USE
-    bool maybeEmitLoadBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, AnyRegister dest,
-                                  OutOfLineCode** ool)
-    {
-        *ool = nullptr;
-        if (!needsBoundsCheckBranch(access))
-            return true;
-
-        if (throwOnOutOfBounds(access))
-            return emitBoundsCheckBranch(access, ptr, nullptr);
-
-        // TODO / MEMORY: We'll allocate *a lot* of these OOL objects,
-        // thus risking OOM on a platform that is already
-        // memory-constrained.  We could opt to allocate this path
-        // in-line instead.
-        *ool = new (alloc_) OutOfLineLoadTypedArrayOOB(access.accessType(), dest);
-        if (!addOutOfLineCode(*ool))
-            return false;
-
-        return emitBoundsCheckBranch(access, ptr, (*ool)->entry());
-    }
-
+#endif
+
+  private:
+    void checkOffset(MWasmMemoryAccess* access, RegI32 ptr) {
+        if (access->offset() >= OffsetGuardLimit) {
+            masm.branchAdd32(Assembler::CarrySet,
+                             Imm32(access->offset()), ptr.reg,
+                             JumpTarget::OutOfBounds);
+            access->clearOffset();
+        }
+    }
+
+  public:
     MOZ_MUST_USE
-    bool maybeEmitStoreBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, Label** rejoin) {
-        *rejoin = nullptr;
-        if (!needsBoundsCheckBranch(access))
-            return true;
-
-        if (throwOnOutOfBounds(access))
-            return emitBoundsCheckBranch(access, ptr, nullptr);
-
-        *rejoin = newLabel();
-        if (!*rejoin)
-            return false;
-
-        return emitBoundsCheckBranch(access, ptr, *rejoin);
-    }
-
-    void cleanupAfterBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr) {
-# ifdef JS_CODEGEN_X64
-        if (needsOffsetBoundsCheck(access)) {
-            // Zero out the high 32 bits, in case the OffsetBoundsCheck code had to
-            // sign-extend (movslq) the pointer value to get wraparound to work.
-            masm.movl(ptr.reg, ptr.reg);
-        }
-# endif
-    }
-
-    MOZ_MUST_USE
-    bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
-        if (access.offset() > INT32_MAX) {
-            masm.jump(wasm::JumpTarget::OutOfBounds);
-            return true;
-        }
+    bool load(MWasmMemoryAccess access, RegI32 ptr, AnyReg dest) {
+        checkOffset(&access, ptr);
 
         OutOfLineCode* ool = nullptr;
-        if (!maybeEmitLoadBoundsCheck(access, ptr, dest.any(), &ool))
-            return false;
+#ifndef WASM_HUGE_MEMORY
+        if (isAsmJSAccess(access)) {
+            ool = new (alloc_) AsmJSLoadOOB(access.accessType(), dest.any());
+            if (!addOutOfLineCode(ool))
+                return false;
+
+            masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, ool->entry());
+        } else {
+            masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds);
+        }
+#endif
 
 # if defined(JS_CODEGEN_X64)
         Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset());
 
         uint32_t before = masm.size();
-        if (dest.tag == AnyReg::I64) {
-            Register out = dest.i64().reg.reg;
-            switch (access.accessType()) {
-              case Scalar::Int8:      masm.movsbq(srcAddr, out); break;
-              case Scalar::Uint8:     masm.movzbq(srcAddr, out); break;
-              case Scalar::Int16:     masm.movswq(srcAddr, out); break;
-              case Scalar::Uint16:    masm.movzwq(srcAddr, out); break;
-              case Scalar::Int32:     masm.movslq(srcAddr, out); break;
-              // Int32 to int64 moves zero-extend by default.
-              case Scalar::Uint32:    masm.movl(srcAddr, out); break;
-              case Scalar::Int64:     masm.movq(srcAddr, out); break;
-              default:
-                MOZ_CRASH("Compiler bug: Unexpected array type in int64 load");
-            }
-        } else {
-            switch (access.accessType()) {
-              case Scalar::Int8:      masm.movsbl(srcAddr, dest.i32().reg); break;
-              case Scalar::Uint8:     masm.movzbl(srcAddr, dest.i32().reg); break;
-              case Scalar::Int16:     masm.movswl(srcAddr, dest.i32().reg); break;
-              case Scalar::Uint16:    masm.movzwl(srcAddr, dest.i32().reg); break;
-              case Scalar::Int32:
-              case Scalar::Uint32:    masm.movl(srcAddr, dest.i32().reg); break;
-              case Scalar::Float32:   masm.loadFloat32(srcAddr, dest.f32().reg); break;
-              case Scalar::Float64:   masm.loadDouble(srcAddr, dest.f64().reg); break;
-              default:
-                MOZ_CRASH("Compiler bug: Unexpected array type");
-            }
-        }
-
-        if (isCompilingAsmJS())
-            masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
-        // TODO: call verifyHeapAccessDisassembly somehow
+        if (dest.tag == AnyReg::I64)
+            masm.wasmLoadI64(access.accessType(), srcAddr, dest.i64().reg);
+        else
+            masm.wasmLoad(access.accessType(), 0, srcAddr, dest.any());
+
+        if (isAsmJSAccess(access))
+            masm.append(MemoryAccess(before));
 # elif defined(JS_CODEGEN_X86)
         Operand srcAddr(ptr.reg, access.offset());
 
-        if (dest.tag == AnyReg::I64)
-            MOZ_CRASH("Not implemented: I64 support");
-
-        bool mustMove = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
-        switch (access.accessType()) {
-          case Scalar::Int8:
-          case Scalar::Uint8: {
-            Register rd = mustMove ? ScratchRegX86 : dest.i32().reg;
-            if (access.accessType() == Scalar::Int8)
-                masm.movsblWithPatch(srcAddr, rd);
-            else
-                masm.movzblWithPatch(srcAddr, rd);
-            break;
-          }
-          case Scalar::Int16:     masm.movswlWithPatch(srcAddr, dest.i32().reg); break;
-          case Scalar::Uint16:    masm.movzwlWithPatch(srcAddr, dest.i32().reg); break;
-          case Scalar::Int32:
-          case Scalar::Uint32:    masm.movlWithPatch(srcAddr, dest.i32().reg); break;
-          case Scalar::Float32:   masm.vmovssWithPatch(srcAddr, dest.f32().reg); break;
-          case Scalar::Float64:   masm.vmovsdWithPatch(srcAddr, dest.f64().reg); break;
-          default:
-            MOZ_CRASH("Compiler bug: Unexpected array type");
-        }
-        uint32_t after = masm.size();
-        if (mustMove)
+        bool byteRegConflict = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
+        AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
+
+        masm.wasmLoad(access.accessType(), 0, srcAddr, out);
+
+        if (byteRegConflict)
             masm.mov(ScratchRegX86, dest.i32().reg);
-
-        masm.append(wasm::MemoryAccess(after));
-        // TODO: call verifyHeapAccessDisassembly somehow
 # else
         MOZ_CRASH("Compiler bug: Unexpected platform.");
 # endif
 
-        if (ool) {
-            cleanupAfterBoundsCheck(access, ptr);
+        if (ool)
             masm.bind(ool->rejoin());
-        }
         return true;
     }
 
     MOZ_MUST_USE
-    bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
-        if (access.offset() > INT32_MAX) {
-            masm.jump(wasm::JumpTarget::OutOfBounds);
-            return true;
-        }
-
-        Label* rejoin = nullptr;
-        if (!maybeEmitStoreBoundsCheck(access, ptr, &rejoin))
-            return false;
-
+    bool store(MWasmMemoryAccess access, RegI32 ptr, AnyReg src) {
+        checkOffset(&access, ptr);
+
+        Label rejoin;
+#ifndef WASM_HUGE_MEMORY
+        if (isAsmJSAccess(access))
+            masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, &rejoin);
+        else
+            masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds);
+#endif
+
+        // Emit the store
 # if defined(JS_CODEGEN_X64)
         Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset());
 
-        Register intReg = Register::Invalid();
-        if (src.tag == AnyReg::I32)
-            intReg = src.i32().reg;
-        else if (src.tag == AnyReg::I64)
-            intReg = src.i64().reg.reg;
-
         uint32_t before = masm.size();
-        switch (access.accessType()) {
-          case Scalar::Int8:
-          case Scalar::Uint8:        masm.movb(intReg, dstAddr); break;
-          case Scalar::Int16:
-          case Scalar::Uint16:       masm.movw(intReg, dstAddr); break;
-          case Scalar::Int32:
-          case Scalar::Uint32:       masm.movl(intReg, dstAddr); break;
-          case Scalar::Int64:        masm.movq(intReg, dstAddr); break;
-          case Scalar::Float32:      masm.storeFloat32(src.f32().reg, dstAddr); break;
-          case Scalar::Float64:      masm.storeDouble(src.f64().reg, dstAddr); break;
-          default:
-            MOZ_CRASH("Compiler bug: Unexpected array type");
-        }
+        masm.wasmStore(access.accessType(), 0, src.any(), dstAddr);
 
         if (isCompilingAsmJS())
-            masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
-        // TODO: call verifyHeapAccessDisassembly somehow
+            masm.append(MemoryAccess(before));
 # elif defined(JS_CODEGEN_X86)
         Operand dstAddr(ptr.reg, access.offset());
 
-        if (src.tag == AnyReg::I64)
-            MOZ_CRASH("Not implemented: I64 support");
-
-        bool didMove = false;
+        AnyRegister value;
         if (access.byteSize() == 1 && !singleByteRegs_.has(src.i32().reg)) {
-            didMove = true;
             masm.mov(src.i32().reg, ScratchRegX86);
+            value = AnyRegister(ScratchRegX86);
+        } else {
+            value = src.any();
         }
-        switch (access.accessType()) {
-          case Scalar::Int8:
-          case Scalar::Uint8: {
-            Register rs = src.i32().reg;
-            Register rt = didMove ? ScratchRegX86 : rs;
-            masm.movbWithPatch(rt, dstAddr);
-            break;
-          }
-          case Scalar::Int16:
-          case Scalar::Uint16:       masm.movwWithPatch(src.i32().reg, dstAddr); break;
-          case Scalar::Int32:
-          case Scalar::Uint32:       masm.movlWithPatch(src.i32().reg, dstAddr); break;
-          case Scalar::Float32:      masm.vmovssWithPatch(src.f32().reg, dstAddr); break;
-          case Scalar::Float64:      masm.vmovsdWithPatch(src.f64().reg, dstAddr); break;
-          default:
-              MOZ_CRASH("Compiler bug: Unexpected array type");
-        }
-        uint32_t after = masm.size();
-
-        masm.append(wasm::MemoryAccess(after));
-        // TODO: call verifyHeapAccessDisassembly somehow
+
+        masm.wasmStore(access.accessType(), 0, value, dstAddr);
 # else
         MOZ_CRASH("Compiler bug: unexpected platform");
 # endif
 
-        if (rejoin) {
-            cleanupAfterBoundsCheck(access, ptr);
-            masm.bind(rejoin);
-        }
+        if (rejoin.used())
+            masm.bind(&rejoin);
+
         return true;
     }
 
-#else
-
-    MOZ_MUST_USE
-    bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) {
-        MOZ_CRASH("BaseCompiler platform hook: loadHeap");
-    }
-
-    MOZ_MUST_USE
-    bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) {
-        MOZ_CRASH("BaseCompiler platform hook: storeHeap");
-    }
-
-#endif
-
     ////////////////////////////////////////////////////////////
 
     // Generally speaking, ABOVE this point there should be no value
     // stack manipulation (calls to popI32 etc).
 
     // Generally speaking, BELOW this point there should be no
     // platform dependencies.  We make an exception for x86 register
     // targeting, which is not too hard to keep clean.
@@ -5727,50 +5504,50 @@ BaseCompiler::emitLoad(ValType type, Sca
     // TODO / OPTIMIZE: Disable bounds checking on constant accesses
     // below the minimum heap length.
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset);
 
     switch (type) {
       case ValType::I32: {
         RegI32 rp = popI32();
-        if (!loadHeap(access, rp, AnyReg(rp)))
+        if (!load(access, rp, AnyReg(rp)))
             return false;
         pushI32(rp);
         break;
       }
       case ValType::I64: {
         RegI32 rp = popI32();
         RegI64 rv = needI64();
-        if (!loadHeap(access, rp, AnyReg(rv)))
+        if (!load(access, rp, AnyReg(rv)))
             return false;
         pushI64(rv);
         freeI32(rp);
         break;
       }
       case ValType::F32: {
         RegI32 rp = popI32();
         RegF32 rv = needF32();
-        if (!loadHeap(access, rp, AnyReg(rv)))
+        if (!load(access, rp, AnyReg(rv)))
             return false;
         pushF32(rv);
         freeI32(rp);
         break;
       }
       case ValType::F64: {
         RegI32 rp = popI32();
         RegF64 rv = needF64();
-        if (!loadHeap(access, rp, AnyReg(rv)))
+        if (!load(access, rp, AnyReg(rv)))
             return false;
         pushF64(rv);
         freeI32(rp);
         break;
       }
       default:
-        MOZ_CRASH("loadHeap type");
+        MOZ_CRASH("load type");
         break;
     }
     return true;
 }
 
 bool
 BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
 {
@@ -5786,51 +5563,51 @@ BaseCompiler::emitStore(ValType resultTy
     // below the minimum heap length.
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset);
 
     switch (resultType) {
       case ValType::I32: {
         RegI32 rp, rv;
         pop2xI32(&rp, &rv);
-        if (!storeHeap(access, rp, AnyReg(rv)))
+        if (!store(access, rp, AnyReg(rv)))
             return false;
         freeI32(rp);
         pushI32(rv);
         break;
       }
       case ValType::I64: {
         RegI64 rv = popI64();
         RegI32 rp = popI32();
-        if (!storeHeap(access, rp, AnyReg(rv)))
+        if (!store(access, rp, AnyReg(rv)))
             return false;
         freeI32(rp);
         pushI64(rv);
         break;
       }
       case ValType::F32: {
         RegF32 rv = popF32();
         RegI32 rp = popI32();
-        if (!storeHeap(access, rp, AnyReg(rv)))
+        if (!store(access, rp, AnyReg(rv)))
             return false;
         freeI32(rp);
         pushF32(rv);
         break;
       }
       case ValType::F64: {
         RegF64 rv = popF64();
         RegI32 rp = popI32();
-        if (!storeHeap(access, rp, AnyReg(rv)))
+        if (!store(access, rp, AnyReg(rv)))
             return false;
         freeI32(rp);
         pushF64(rv);
         break;
       }
       default:
-        MOZ_CRASH("storeHeap type");
+        MOZ_CRASH("store type");
         break;
     }
     return true;
 }
 
 bool
 BaseCompiler::emitSelect()
 {
@@ -6075,28 +5852,28 @@ BaseCompiler::emitStoreWithCoercion(ValT
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset);
 
     if (resultType == ValType::F32 && viewType == Scalar::Float64) {
         RegF32 rv = popF32();
         RegF64 rw = needF64();
         masm.convertFloat32ToDouble(rv.reg, rw.reg);
         RegI32 rp = popI32();
-        if (!storeHeap(access, rp, AnyReg(rw)))
+        if (!store(access, rp, AnyReg(rw)))
             return false;
         pushF32(rv);
         freeI32(rp);
         freeF64(rw);
     }
     else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
         RegF64 rv = popF64();
         RegF32 rw = needF32();
         masm.convertDoubleToFloat32(rv.reg, rw.reg);
         RegI32 rp = popI32();
-        if (!storeHeap(access, rp, AnyReg(rw)))
+        if (!store(access, rp, AnyReg(rw)))
             return false;
         pushF64(rv);
         freeI32(rp);
         freeF32(rw);
     }
     else
         MOZ_CRASH("unexpected coerced store");
 
--- a/js/src/asmjs/WasmCode.cpp
+++ b/js/src/asmjs/WasmCode.cpp
@@ -101,35 +101,44 @@ StaticallyLink(CodeSegment& cs, const Li
 
     // These constants are logically part of the code:
 
     *(double*)(cs.globalData() + NaN64GlobalDataOffset) = GenericNaN();
     *(float*)(cs.globalData() + NaN32GlobalDataOffset) = GenericNaN();
 }
 
 static void
-SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryObject memory)
+SpecializeToMemory(uint8_t* prevMemoryBase, CodeSegment& cs, const Metadata& metadata,
+                   ArrayBufferObjectMaybeShared& buffer)
 {
-    if (!metadata.boundsChecks.empty()) {
-        uint32_t length = memory->buffer().wasmBoundsCheckLimit();
-        MOZ_RELEASE_ASSERT(length == LegalizeMapLength(length));
-        MOZ_RELEASE_ASSERT(length >= memory->buffer().wasmActualByteLength());
+#ifdef WASM_HUGE_MEMORY
+    MOZ_RELEASE_ASSERT(metadata.boundsChecks.empty());
+    MOZ_RELEASE_ASSERT(metadata.isAsmJS() || metadata.memoryAccesses.empty());
+#else
+    uint32_t limit = buffer.wasmBoundsCheckLimit();
+    MOZ_RELEASE_ASSERT(IsValidBoundsCheckImmediate(limit));
 
-        for (const BoundsCheck& check : metadata.boundsChecks)
-            Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), length);
-    }
+    for (const BoundsCheck& check : metadata.boundsChecks)
+        MacroAssembler::wasmPatchBoundsCheck(check.patchAt(cs.base()), limit);
+#endif
 
 #if defined(JS_CODEGEN_X86)
-    uint8_t* base = memory->buffer().dataPointerEither().unwrap();
-    for (const MemoryAccess& access : metadata.memoryAccesses) {
-        // Patch memory pointer immediate.
-        void* addr = access.patchMemoryPtrImmAt(cs.base());
-        uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
-        MOZ_ASSERT(disp <= INT32_MAX);
-        X86Encoding::SetPointer(addr, (void*)(base + disp));
+    uint8_t* memoryBase = buffer.dataPointerEither().unwrap(/* code patching */);
+    if (prevMemoryBase != memoryBase) {
+        for (const MemoryAccess& access : metadata.memoryAccesses) {
+            void* patchAt = access.patchMemoryPtrImmAt(cs.base());
+
+            uint8_t* prevImm = (uint8_t*)X86Encoding::GetPointer(patchAt);
+            MOZ_ASSERT(prevImm >= prevMemoryBase);
+
+            uint32_t offset = prevImm - prevMemoryBase;
+            MOZ_ASSERT(offset <= INT32_MAX);
+
+            X86Encoding::SetPointer(patchAt, memoryBase + offset);
+        }
     }
 #endif
 }
 
 static bool
 SendCodeRangesToProfiler(JSContext* cx, CodeSegment& cs, const Bytes& bytecode,
                          const Metadata& metadata)
 {
@@ -227,17 +236,17 @@ CodeSegment::create(JSContext* cx,
     {
         JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
         AutoFlushICache afc("CodeSegment::create");
         AutoFlushICache::setRange(uintptr_t(codeBase), cs->codeLength());
 
         memcpy(codeBase, bytecode.begin(), bytecode.length());
         StaticallyLink(*cs, linkData, cx);
         if (memory)
-            SpecializeToMemory(*cs, metadata, memory);
+            SpecializeToMemory(nullptr, *cs, metadata, memory->buffer());
     }
 
     if (!ExecutableAllocator::makeExecutable(codeBase, cs->codeLength())) {
         ReportOutOfMemory(cx);
         return nullptr;
     }
 
     if (!SendCodeRangesToProfiler(cx, *cs, bytecode, metadata))
@@ -253,16 +262,26 @@ CodeSegment::~CodeSegment()
 
     MOZ_ASSERT(wasmCodeAllocations > 0);
     wasmCodeAllocations--;
 
     MOZ_ASSERT(totalLength() > 0);
     DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize());
 }
 
+void
+CodeSegment::onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer)
+{
+    AutoWritableJitCode awjc(base(), codeLength());
+    AutoFlushICache afc("CodeSegment::onMovingGrow");
+    AutoFlushICache::setRange(uintptr_t(base()), codeLength());
+
+    SpecializeToMemory(prevMemoryBase, *this, metadata, buffer);
+}
+
 size_t
 FuncDefExport::serializedSize() const
 {
     return sig_.serializedSize() +
            sizeof(pod);
 }
 
 uint8_t*
--- a/js/src/asmjs/WasmCode.h
+++ b/js/src/asmjs/WasmCode.h
@@ -95,16 +95,21 @@ class CodeSegment
     // enter/exit.
 
     bool containsFunctionPC(const void* pc) const {
         return pc >= base() && pc < (base() + functionCodeLength_);
     }
     bool containsCodePC(const void* pc) const {
         return pc >= base() && pc < (base() + codeLength_);
     }
+
+    // onMovingGrow must be called if the memory passed to 'create' performs a
+    // moving grow operation.
+
+    void onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer);
 };
 
 // ShareableBytes is a ref-counted vector of bytes which are incrementally built
 // during compilation and then immutably shared.
 
 struct ShareableBytes : ShareableBase<ShareableBytes>
 {
     // Vector is 'final', so instead make Vector a member and add boilerplate.
@@ -512,16 +517,17 @@ class Code
     CacheableCharsVector     funcLabels_;
     bool                     profilingEnabled_;
 
   public:
     Code(UniqueCodeSegment segment,
          const Metadata& metadata,
          const ShareableBytes* maybeBytecode);
 
+    CodeSegment& segment() { return *segment_; }
     const CodeSegment& segment() const { return *segment_; }
     const Metadata& metadata() const { return *metadata_; }
 
     // Frame iterator support:
 
     const CallSite* lookupCallSite(void* returnAddress) const;
     const CodeRange* lookupRange(void* pc) const;
 #ifdef WASM_HUGE_MEMORY
--- a/js/src/asmjs/WasmInstance.cpp
+++ b/js/src/asmjs/WasmInstance.cpp
@@ -287,52 +287,16 @@ Instance::growMemory_i32(Instance* insta
 }
 
 /* static */ uint32_t
 Instance::currentMemory_i32(Instance* instance)
 {
     return instance->currentMemory();
 }
 
-uint32_t
-Instance::growMemory(uint32_t delta)
-{
-    MOZ_RELEASE_ASSERT(memory_);
-
-    // Using uint64_t to avoid worrying about overflows in safety comp.
-    uint64_t curNumPages = currentMemory();
-    uint64_t newNumPages = curNumPages + (uint64_t) delta;
-
-    if (metadata().maxMemoryLength) {
-        ArrayBufferObject &buf = memory_->buffer().as<ArrayBufferObject>();
-        // Guaranteed by instantiateMemory
-        MOZ_RELEASE_ASSERT(buf.wasmMaxSize() && buf.wasmMaxSize() <= metadata().maxMemoryLength);
-
-        if (newNumPages * wasm::PageSize > buf.wasmMaxSize().value())
-            return (uint32_t) -1;
-
-        // Try to grow the memory
-        if (!buf.growForWasm(delta))
-            return (uint32_t) -1;
-    } else {
-        return -1; // TODO: implement grow_memory w/o max when we add realloc
-    }
-
-    return curNumPages;
-}
-
-uint32_t
-Instance::currentMemory()
-{
-    MOZ_RELEASE_ASSERT(memory_);
-    uint32_t curMemByteLen = memory_->buffer().wasmActualByteLength();
-    MOZ_ASSERT(curMemByteLen % wasm::PageSize == 0);
-    return curMemByteLen / wasm::PageSize;
-}
-
 Instance::Instance(JSContext* cx,
                    Handle<WasmInstanceObject*> object,
                    UniqueCode code,
                    HandleWasmMemoryObject memory,
                    SharedTableVector&& tables,
                    Handle<FunctionVector> funcImports,
                    const ValVector& globalImports)
   : compartment_(cx->compartment()),
@@ -406,16 +370,19 @@ Instance::Instance(JSContext* cx,
 
     for (size_t i = 0; i < tables_.length(); i++)
         *addressOfTableBase(i) = tables_[i]->base();
 }
 
 bool
 Instance::init(JSContext* cx)
 {
+    if (memory_ && memory_->movingGrowable() && !memory_->addMovingGrowObserver(cx, object_))
+        return false;
+
     if (!metadata().sigIds.empty()) {
         ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet.lock();
 
         if (!lockedSigIdSet->ensureInitialized(cx))
             return false;
 
         for (const SigWithId& sig : metadata().sigIds) {
             const void* sigId;
@@ -585,16 +552,19 @@ WasmInstanceObject*
 Instance::object() const
 {
     return object_;
 }
 
 bool
 Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
 {
+    // If there has been a moving grow, this Instance should have been notified.
+    MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither());
+
     if (!cx->compartment()->wasm.ensureProfilingState(cx))
         return false;
 
     const FuncDefExport& func = metadata().lookupFuncDefExport(funcDefIndex);
 
     // The calling convention for an external call into wasm is to pass an
     // array of 16-byte values where each value contains either a coerced int32
     // (in the low word), a double value (in the low dword) or a SIMD vector
@@ -801,16 +771,43 @@ Instance::callExport(JSContext* cx, uint
     }
 
     if (retObj)
         args.rval().set(ObjectValue(*retObj));
 
     return true;
 }
 
+uint32_t
+Instance::currentMemory()
+{
+    MOZ_RELEASE_ASSERT(memory_);
+    uint32_t byteLength = memory_->buffer().wasmActualByteLength();
+    MOZ_ASSERT(byteLength % wasm::PageSize == 0);
+    return byteLength / wasm::PageSize;
+}
+
+uint32_t
+Instance::growMemory(uint32_t delta)
+{
+    MOZ_ASSERT(!isAsmJS());
+    uint32_t ret = memory_->grow(delta);
+    MOZ_RELEASE_ASSERT(tlsData_.memoryBase == memory_->buffer().dataPointerEither());
+    return ret;
+}
+
+void
+Instance::onMovingGrow(uint8_t* prevMemoryBase)
+{
+    MOZ_ASSERT(!isAsmJS());
+    ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
+    tlsData_.memoryBase = buffer.dataPointer();
+    code_->segment().onMovingGrow(prevMemoryBase, metadata(), buffer);
+}
+
 void
 Instance::deoptimizeImportExit(uint32_t funcImportIndex)
 {
     const FuncImport& fi = metadata().funcImports[funcImportIndex];
     FuncImportTls& import = funcImportTls(fi);
     import.code = codeBase() + fi.interpExitCodeOffset();
     import.baselineScript = nullptr;
 }
--- a/js/src/asmjs/WasmInstance.h
+++ b/js/src/asmjs/WasmInstance.h
@@ -53,26 +53,27 @@ class Instance
     // Import call slow paths which are called directly from wasm code.
     friend void* AddressOf(SymbolicAddress, ExclusiveContext*);
     static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
     static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
     static uint32_t currentMemory_i32(Instance* instance);
-
     bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
                     MutableHandleValue rval);
-    uint32_t growMemory(uint32_t delta);
-    uint32_t currentMemory();
 
     // Only WasmInstanceObject can call the private trace function.
     friend class js::WasmInstanceObject;
     void tracePrivate(JSTracer* trc);
 
+    // Only WasmMemoryObject can call the private onMovingGrow notification.
+    friend class js::WasmMemoryObject;
+    void onMovingGrow(uint8_t* prevMemoryBase);
+
   public:
     Instance(JSContext* cx,
              HandleWasmInstanceObject object,
              UniqueCode code,
              HandleWasmMemoryObject memory,
              SharedTableVector&& tables,
              Handle<FunctionVector> funcImports,
              const ValVector& globalImports);
@@ -101,23 +102,33 @@ class Instance
 
     WasmInstanceObject* object() const;
 
     // Execute the given export given the JS call arguments, storing the return
     // value in args.rval.
 
     MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args);
 
+    // These methods implement their respective wasm operator but may also be
+    // called via the Memory JS API.
+
+    uint32_t currentMemory();
+    uint32_t growMemory(uint32_t delta);
+
     // Initially, calls to imports in wasm code call out through the generic
     // callImport method. If the imported callee gets JIT compiled and the types
     // match up, callImport will patch the code to instead call through a thunk
     // directly into the JIT code. If the JIT code is released, the Instance must
     // be notified so it can go back to the generic callImport.
 
     void deoptimizeImportExit(uint32_t funcImportIndex);
+
+    // Called by simulators to check whether accessing 'numBytes' starting at
+    // 'addr' would trigger a fault and be safely handled by signal handlers.
+
     bool memoryAccessWouldFault(uint8_t* addr, unsigned numBytes);
 
     // See Code::ensureProfilingState comment.
 
     MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
 
     // about:memory reporting:
 
--- a/js/src/asmjs/WasmIonCompile.cpp
+++ b/js/src/asmjs/WasmIonCompile.cpp
@@ -697,127 +697,108 @@ class FunctionCompiler
     void assign(unsigned slot, MDefinition* def)
     {
         if (inDeadCode())
             return;
         curBlock_->setSlot(info().localSlot(slot), def);
     }
 
   private:
-    // False means we're sure to be out-of-bounds after this bounds check.
-    bool maybeAddBoundsCheck(MDefinition* base, const MWasmMemoryAccess& access)
+    void checkOffsetAndBounds(MWasmMemoryAccess* access, MDefinition** base)
     {
-        if (access.offset() > uint32_t(INT32_MAX)) {
-            curBlock_->end(MWasmTrap::New(alloc(), Trap::OutOfBounds));
-            curBlock_ = nullptr;
-            return false;
+        // If the offset is bigger than the guard region, a separate instruction
+        // is necessary to add the offset to the base and check for overflow.
+        if (access->offset() >= OffsetGuardLimit || !JitOptions.wasmFoldOffsets) {
+            auto* ins = MWasmAddOffset::New(alloc(), *base, access->offset());
+            curBlock_->add(ins);
+
+            *base = ins;
+            access->clearOffset();
         }
 
 #ifndef WASM_HUGE_MEMORY
-        curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
+        curBlock_->add(MWasmBoundsCheck::New(alloc(), *base));
 #endif
-        return true;
     }
 
-    MDefinition* loadHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access,
-                                 bool isInt64 = false)
+    // Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the
+    // peculiar property of not throwing on out-of-bounds. Everything else
+    // (wasm, SIMD.js, Atomics) throws on out-of-bounds.
+    bool isAsmJSAccess(const MWasmMemoryAccess& access) {
+        return mg().isAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess();
+    }
+
+  public:
+    MDefinition* load(MDefinition* base, MWasmMemoryAccess access, ValType result)
     {
         if (inDeadCode())
             return nullptr;
 
         MInstruction* load = nullptr;
-        if (mg().isAsmJS()) {
-            load = MAsmJSLoadHeap::New(alloc(), base, access);
+        if (isAsmJSAccess(access)) {
+            MOZ_ASSERT(access.offset() == 0);
+            load = MAsmJSLoadHeap::New(alloc(), base, access.accessType());
         } else {
-            if (!maybeAddBoundsCheck(base, access))
-                return nullptr;
-            load = MWasmLoad::New(alloc(), base, access, isInt64);
+            checkOffsetAndBounds(&access, &base);
+            load = MWasmLoad::New(alloc(), base, access, ToMIRType(result));
         }
 
         curBlock_->add(load);
         return load;
     }
 
-    void storeHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
+    void store(MDefinition* base, MWasmMemoryAccess access, MDefinition* v)
     {
         if (inDeadCode())
             return;
 
         MInstruction* store = nullptr;
-        if (mg().isAsmJS()) {
-            store = MAsmJSStoreHeap::New(alloc(), base, access, v);
+        if (isAsmJSAccess(access)) {
+            MOZ_ASSERT(access.offset() == 0);
+            store = MAsmJSStoreHeap::New(alloc(), base, access.accessType(), v);
         } else {
-            if (!maybeAddBoundsCheck(base, access))
-                return;
+            checkOffsetAndBounds(&access, &base);
             store = MWasmStore::New(alloc(), base, access, v);
         }
 
         curBlock_->add(store);
     }
 
-  public:
-    MDefinition* loadHeap(MDefinition* base, const MWasmMemoryAccess& access, bool isInt64)
-    {
-        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap");
-        return loadHeapPrivate(base, access, isInt64);
-    }
-    MDefinition* loadSimdHeap(MDefinition* base, const MWasmMemoryAccess& access)
-    {
-        MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD loads should use loadHeap");
-        return loadHeapPrivate(base, access);
-    }
-    MDefinition* loadAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access)
-    {
-        return loadHeapPrivate(base, access);
-    }
-
-    void storeHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
-    {
-        MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD store should use storeSimdHeap");
-        storeHeapPrivate(base, access, v);
-    }
-    void storeSimdHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
-    {
-        MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD stores should use storeHeap");
-        storeHeapPrivate(base, access, v);
-    }
-    void storeAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
-    {
-        storeHeapPrivate(base, access, v);
-    }
-
-    MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
+    MDefinition* atomicCompareExchangeHeap(MDefinition* base, MWasmMemoryAccess access,
                                            MDefinition* oldv, MDefinition* newv)
     {
         if (inDeadCode())
             return nullptr;
 
+        checkOffsetAndBounds(&access, &base);
         auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv, tlsPointer_);
         curBlock_->add(cas);
         return cas;
     }
 
-    MDefinition* atomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access,
+    MDefinition* atomicExchangeHeap(MDefinition* base, MWasmMemoryAccess access,
                                     MDefinition* value)
     {
         if (inDeadCode())
             return nullptr;
 
+        checkOffsetAndBounds(&access, &base);
         auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value, tlsPointer_);
         curBlock_->add(cas);
         return cas;
     }
 
     MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
-                                 MDefinition* base, const MWasmMemoryAccess& access,
+                                 MDefinition* base, MWasmMemoryAccess access,
                                  MDefinition* v)
     {
         if (inDeadCode())
             return nullptr;
 
+        checkOffsetAndBounds(&access, &base);
         auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v, tlsPointer_);
         curBlock_->add(binop);
         return binop;
     }
 
     MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type)
     {
         if (inDeadCode())
@@ -1108,17 +1089,18 @@ class FunctionCompiler
         if (inDeadCode()) {
             *def = nullptr;
             return true;
         }
 
         CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
         auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
                                                             call.instanceArg_, call.regArgs_,
-                                                            ToMIRType(ret), call.spIncrement_);
+                                                            ToMIRType(ret), call.spIncrement_,
+                                                            call.tlsStackOffset_);
         if (!ins)
             return false;
 
         curBlock_->add(ins);
         *def = ins;
         return true;
     }
 
@@ -2346,30 +2328,30 @@ EmitSelect(FunctionCompiler& f)
 static bool
 EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr))
         return false;
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset);
-    f.iter().setResult(f.loadHeap(addr.base, access, type == ValType::I64));
+    f.iter().setResult(f.load(addr.base, access, type));
     return true;
 }
 
 static bool
 EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset);
-    f.storeHeap(addr.base, access, value);
+    f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
 {
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
@@ -2379,17 +2361,17 @@ EmitStoreWithCoercion(FunctionCompiler& 
     if (resultType == ValType::F32 && viewType == Scalar::Float64)
         value = f.unary<MToDouble>(value);
     else if (resultType == ValType::F64 && viewType == Scalar::Float32)
         value = f.unary<MToFloat32>(value);
     else
         MOZ_CRASH("unexpected coerced store");
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset);
-    f.storeHeap(addr.base, access, value);
+    f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitUnaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddress callee,
                          ValType operandType)
 {
     uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
@@ -2453,32 +2435,32 @@ EmitAtomicsLoad(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     if (!f.iter().readAtomicLoad(&addr, &viewType))
         return false;
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
                              MembarBeforeLoad, MembarAfterLoad);
-    f.iter().setResult(f.loadAtomicHeap(addr.base, access));
+    f.iter().setResult(f.load(addr.base, access, ValType::I32));
     return true;
 }
 
 static bool
 EmitAtomicsStore(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
     Scalar::Type viewType;
     MDefinition* value;
     if (!f.iter().readAtomicStore(&addr, &viewType, &value))
         return false;
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0,
                              MembarBeforeStore, MembarAfterStore);
-    f.storeAtomicHeap(addr.base, access, value);
+    f.store(addr.base, access, value);
     f.iter().setResult(value);
     return true;
 }
 
 static bool
 EmitAtomicsBinOp(FunctionCompiler& f)
 {
     LinearMemoryAddress<MDefinition*> addr;
@@ -2735,17 +2717,17 @@ EmitSimdLoad(FunctionCompiler& f, ValTyp
     if (!numElems)
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
         return false;
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
-    f.iter().setResult(f.loadSimdHeap(addr.base, access));
+    f.iter().setResult(f.load(addr.base, access, resultType));
     return true;
 }
 
 static bool
 EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
 {
     unsigned defaultNumElems;
     Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
@@ -2754,17 +2736,17 @@ EmitSimdStore(FunctionCompiler& f, ValTy
         numElems = defaultNumElems;
 
     LinearMemoryAddress<MDefinition*> addr;
     MDefinition* value;
     if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
         return false;
 
     MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems);
-    f.storeSimdHeap(addr.base, access, value);
+    f.store(addr.base, access, value);
     return true;
 }
 
 static bool
 EmitSimdSelect(FunctionCompiler& f, ValType simdType)
 {
     MDefinition* trueValue;
     MDefinition* falseValue;
@@ -2934,26 +2916,22 @@ EmitSimdOp(FunctionCompiler& f, ValType 
       case SimdOperation::Fn_shuffle:
         return EmitSimdShuffle(f, type);
       case SimdOperation::Fn_load:
         return EmitSimdLoad(f, type, 0);
       case SimdOperation::Fn_load1:
         return EmitSimdLoad(f, type, 1);
       case SimdOperation::Fn_load2:
         return EmitSimdLoad(f, type, 2);
-      case SimdOperation::Fn_load3:
-        return EmitSimdLoad(f, type, 3);
       case SimdOperation::Fn_store:
         return EmitSimdStore(f, type, 0);
       case SimdOperation::Fn_store1:
         return EmitSimdStore(f, type, 1);
       case SimdOperation::Fn_store2:
         return EmitSimdStore(f, type, 2);
-      case SimdOperation::Fn_store3:
-        return EmitSimdStore(f, type, 3);
       case SimdOperation::Fn_allTrue:
         return EmitSimdAllTrue(f, type);
       case SimdOperation::Fn_anyTrue:
         return EmitSimdAnyTrue(f, type);
       case SimdOperation::Fn_abs:
       case SimdOperation::Fn_neg:
       case SimdOperation::Fn_not:
       case SimdOperation::Fn_sqrt:
@@ -2997,16 +2975,18 @@ EmitSimdOp(FunctionCompiler& f, ValType 
       case SimdOperation::Fn_fromUint16x8Bits:
       case SimdOperation::Fn_fromInt16x8Bits:
         return EmitSimdBitcast(f, ValType::I16x8, type);
       case SimdOperation::Fn_fromInt32x4Bits:
       case SimdOperation::Fn_fromUint32x4Bits:
         return EmitSimdBitcast(f, ValType::I32x4, type);
       case SimdOperation::Fn_fromFloat32x4Bits:
         return EmitSimdBitcast(f, ValType::F32x4, type);
+      case SimdOperation::Fn_load3:
+      case SimdOperation::Fn_store3:
       case SimdOperation::Fn_fromFloat64x2Bits:
         MOZ_CRASH("NYI");
     }
     MOZ_CRASH("unexpected opcode");
 }
 
 static bool
 EmitGrowMemory(FunctionCompiler& f, uint32_t callOffset)
@@ -3022,17 +3002,20 @@ EmitGrowMemory(FunctionCompiler& f, uint
 
     MDefinition* delta;
     if (!f.iter().readUnary(ValType::I32, &delta))
         return false;
 
     if (!f.passArg(delta, ValType::I32, &args))
         return false;
 
-    f.finishCall(&args, PassTls::False, InterModule::False);
+    // As a short-cut, pretend this is an inter-module call so that any pinned
+    // heap pointer will be reloaded after the call. This hack will go away once
+    // we can stop pinning registers.
+    f.finishCall(&args, PassTls::True, InterModule::True);
 
     MDefinition* ret;
     if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
         return false;
 
     f.iter().setResult(ret);
     return true;
 }
--- a/js/src/asmjs/WasmJS.cpp
+++ b/js/src/asmjs/WasmJS.cpp
@@ -13,16 +13,17 @@
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
 
 #include "asmjs/WasmJS.h"
 
+#include "mozilla/CheckedInt.h"
 #include "mozilla/Maybe.h"
 
 #include "asmjs/WasmCompile.h"
 #include "asmjs/WasmInstance.h"
 #include "asmjs/WasmModule.h"
 #include "asmjs/WasmSignalHandlers.h"
 #include "builtin/Promise.h"
 #include "jit/JitOptions.h"
@@ -30,16 +31,17 @@
 
 #include "jsobjinlines.h"
 
 #include "vm/NativeObject-inl.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace js::wasm;
+using mozilla::CheckedInt;
 using mozilla::Nothing;
 
 bool
 wasm::HasCompilerSupport(ExclusiveContext* cx)
 {
     if (!cx->jitSupportsFloatingPoint())
         return false;
 
@@ -705,33 +707,56 @@ wasm::ExportedFunctionToDefinitionIndex(
     MOZ_ASSERT(IsExportedFunction(fun));
     const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT);
     return v.toInt32();
 }
 
 // ============================================================================
 // WebAssembly.Memory class and methods
 
+const ClassOps WasmMemoryObject::classOps_ =
+{
+    nullptr, /* addProperty */
+    nullptr, /* delProperty */
+    nullptr, /* getProperty */
+    nullptr, /* setProperty */
+    nullptr, /* enumerate */
+    nullptr, /* resolve */
+    nullptr, /* mayResolve */
+    WasmMemoryObject::finalize
+};
+
 const Class WasmMemoryObject::class_ =
 {
     "WebAssembly.Memory",
     JSCLASS_DELAY_METADATA_BUILDER |
-    JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS)
+    JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) |
+    JSCLASS_FOREGROUND_FINALIZE,
+    &WasmMemoryObject::classOps_
 };
 
+/* static */ void
+WasmMemoryObject::finalize(FreeOp* fop, JSObject* obj)
+{
+    WasmMemoryObject& memory = obj->as<WasmMemoryObject>();
+    if (memory.hasObservers())
+        fop->delete_(&memory.observers());
+}
+
 /* static */ WasmMemoryObject*
 WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShared buffer,
                          HandleObject proto)
 {
     AutoSetNewObjectMetadata metadata(cx);
     auto* obj = NewObjectWithGivenProto<WasmMemoryObject>(cx, proto);
     if (!obj)
         return nullptr;
 
     obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer));
+    MOZ_ASSERT(!obj->hasObservers());
     return obj;
 }
 
 /* static */ bool
 WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp)
 {
     CallArgs args = CallArgsFromVp(argc, vp);
 
@@ -835,16 +860,114 @@ const JSFunctionSpec WasmMemoryObject::m
 { JS_FS_END };
 
 ArrayBufferObjectMaybeShared&
 WasmMemoryObject::buffer() const
 {
     return getReservedSlot(BUFFER_SLOT).toObject().as<ArrayBufferObjectMaybeShared>();
 }
 
+bool
+WasmMemoryObject::hasObservers() const
+{
+    return !getReservedSlot(OBSERVERS_SLOT).isUndefined();
+}
+
+WasmMemoryObject::WeakInstanceSet&
+WasmMemoryObject::observers() const
+{
+    MOZ_ASSERT(hasObservers());
+    return *reinterpret_cast<WeakInstanceSet*>(getReservedSlot(OBSERVERS_SLOT).toPrivate());
+}
+
+WasmMemoryObject::WeakInstanceSet*
+WasmMemoryObject::getOrCreateObservers(JSContext* cx)
+{
+    if (!hasObservers()) {
+        auto observers = MakeUnique<WeakInstanceSet>(cx->zone(), InstanceSet());
+        if (!observers || !observers->init()) {
+            ReportOutOfMemory(cx);
+            return nullptr;
+        }
+
+        setReservedSlot(OBSERVERS_SLOT, PrivateValue(observers.release()));
+    }
+
+    return &observers();
+}
+
+bool
+WasmMemoryObject::movingGrowable() const
+{
+#ifdef WASM_HUGE_MEMORY
+    return false;
+#else
+    return !buffer().wasmMaxSize();
+#endif
+}
+
+bool
+WasmMemoryObject::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance)
+{
+    MOZ_ASSERT(movingGrowable());
+
+    WeakInstanceSet* observers = getOrCreateObservers(cx);
+    if (!observers)
+        return false;
+
+    if (!observers->putNew(instance)) {
+        ReportOutOfMemory(cx);
+        return false;
+    }
+
+    return true;
+}
+
+uint32_t
+WasmMemoryObject::grow(uint32_t delta)
+{
+    ArrayBufferObject &buf = buffer().as<ArrayBufferObject>();
+
+    MOZ_ASSERT(buf.wasmActualByteLength() % PageSize == 0);
+    uint32_t oldNumPages = buf.wasmActualByteLength() / PageSize;
+
+    CheckedInt<uint32_t> newSize = oldNumPages;
+    newSize += delta;
+    newSize *= PageSize;
+    if (!newSize.isValid())
+        return -1;
+
+    if (Maybe<uint32_t> maxSize = buf.wasmMaxSize()) {
+        if (newSize.value() > maxSize.value())
+            return -1;
+
+        if (!buf.wasmGrowToSizeInPlace(newSize.value()))
+            return -1;
+    } else {
+#ifdef WASM_HUGE_MEMORY
+        if (!buf.wasmGrowToSizeInPlace(newSize.value()))
+            return -1;
+#else
+        MOZ_ASSERT(movingGrowable());
+
+        uint8_t* prevMemoryBase = buf.dataPointer();
+
+        if (!buf.wasmMovingGrowToSize(newSize.value()))
+            return -1;
+
+        if (hasObservers()) {
+            for (InstanceSet::Range r = observers().all(); !r.empty(); r.popFront())
+                r.front()->instance().onMovingGrow(prevMemoryBase);
+        }
+#endif
+    }
+
+    return oldNumPages;
+}
+
 // ============================================================================
 // WebAssembly.Table class and methods
 
 const ClassOps WasmTableObject::classOps_ =
 {
     nullptr, /* addProperty */
     nullptr, /* delProperty */
     nullptr, /* getProperty */
--- a/js/src/asmjs/WasmJS.h
+++ b/js/src/asmjs/WasmJS.h
@@ -155,28 +155,43 @@ class WasmInstanceObject : public Native
 };
 
 // The class of WebAssembly.Memory. A WasmMemoryObject references an ArrayBuffer
 // or SharedArrayBuffer object which owns the actual memory.
 
 class WasmMemoryObject : public NativeObject
 {
     static const unsigned BUFFER_SLOT = 0;
+    static const unsigned OBSERVERS_SLOT = 1;
     static const ClassOps classOps_;
+    static void finalize(FreeOp* fop, JSObject* obj);
+
+    using InstanceSet = GCHashSet<ReadBarrieredWasmInstanceObject,
+                                  MovableCellHasher<ReadBarrieredWasmInstanceObject>,
+                                  SystemAllocPolicy>;
+    using WeakInstanceSet = JS::WeakCache<InstanceSet>;
+    bool hasObservers() const;
+    WeakInstanceSet& observers() const;
+    WeakInstanceSet* getOrCreateObservers(JSContext* cx);
+
   public:
-    static const unsigned RESERVED_SLOTS = 1;
+    static const unsigned RESERVED_SLOTS = 2;
     static const Class class_;
     static const JSPropertySpec properties[];
     static const JSFunctionSpec methods[];
     static bool construct(JSContext*, unsigned, Value*);
 
     static WasmMemoryObject* create(ExclusiveContext* cx,
                                     Handle<ArrayBufferObjectMaybeShared*> buffer,
                                     HandleObject proto);
     ArrayBufferObjectMaybeShared& buffer() const;
+
+    bool movingGrowable() const;
+    bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
+    uint32_t grow(uint32_t delta);
 };
 
 // The class of WebAssembly.Table. A WasmTableObject holds a refcount on a
 // wasm::Table, allowing a Table to be shared between multiple Instances
 // (eventually between multiple threads).
 
 class WasmTableObject : public NativeObject
 {
--- a/js/src/asmjs/WasmModule.cpp
+++ b/js/src/asmjs/WasmModule.cpp
@@ -571,20 +571,20 @@ Module::instantiateMemory(JSContext* cx,
                            buffer->as<ArrayBufferObject>().isWasm());
 
         uint32_t actualLength = buffer->wasmActualByteLength();
         if (actualLength < declaredMin || actualLength > declaredMax.valueOr(UINT32_MAX)) {
             JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory");
             return false;
         }
 
-        // For asm.js maxMemoryLength doesn't play a role since we can't grow memory.
-        // For wasm we require that either both memory and module don't specify a max size
-        // OR that the memory's max size is less than the modules.
-        if (!metadata_->isAsmJS()) {
+        if (metadata_->isAsmJS()) {
+            MOZ_ASSERT(IsValidAsmJSHeapLength(actualLength));
+            MOZ_ASSERT(actualLength == buffer->wasmMaxSize().value());
+        } else {
             Maybe<uint32_t> actualMax = buffer->as<ArrayBufferObject>().wasmMaxSize();
             if (declaredMax.isSome() != actualMax.isSome() || declaredMax < actualMax) {
                 JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory");
                 return false;
             }
         }
     } else {
         MOZ_ASSERT(!metadata_->isAsmJS());
--- a/js/src/asmjs/WasmSignalHandlers.cpp
+++ b/js/src/asmjs/WasmSignalHandlers.cpp
@@ -596,21 +596,34 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c
                             AddressOfGPRegisterSlot(context, address.index()));
         MOZ_ASSERT(address.scale() < 32, "address shift overflow");
         result += index * (uintptr_t(1) << address.scale());
     }
 
     return reinterpret_cast<uint8_t*>(result);
 }
 
-MOZ_COLD static uint8_t*
-EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
-                  const MemoryAccess* memoryAccess, const Instance& instance)
+MOZ_COLD static bool
+HugeMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
+                 const Instance& instance, uint8_t** ppc)
 {
     MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
+
+    // On WASM_HUGE_MEMORY platforms, wasm::MemoryAccess is only created for
+    // asm.js loads and stores since they unfortunately do not simply throw on
+    // out-of-bounds. Everything else (WebAssembly and experimental
+    // SIMD/Atomics) throws.
+
+    const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
+    if (!memoryAccess) {
+        *ppc = instance.codeSegment().outOfBoundsCode();
+        return true;
+    }
+
+    MOZ_RELEASE_ASSERT(instance.isAsmJS());
     MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase()));
 
     // Disassemble the instruction which caused the trap so that we can extract
     // information about it and decide what to do.
     Disassembler::HeapAccess access;
     uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
     const Disassembler::ComplexAddress& address = access.address();
     MOZ_RELEASE_ASSERT(end > pc);
@@ -646,50 +659,36 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
                        "Access begins outside the asm.js heap");
     MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() +
                        instance.memoryMappedSize(),
                        "Access extends beyond the asm.js heap guard region");
     MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() +
                        instance.memoryLength(),
                        "Computed access address is not actually out of bounds");
 
-    // Wasm loads/stores don't wrap offsets at all, so hitting the guard page
-    // means we are out of bounds in any cases.
-    if (!memoryAccess->wrapOffset()) {
-        MOZ_ASSERT(memoryAccess->throwOnOOB());
-        return instance.codeSegment().outOfBoundsCode();
-    }
-
     // The basic sandbox model is that all heap accesses are a heap base
     // register plus an index, and the index is always computed with 32-bit
     // operations, so we know it can only be 4 GiB off of the heap base.
     //
     // However, we wish to support the optimization of folding immediates
     // and scaled indices into addresses, and any address arithmetic we fold
     // gets done at full pointer width, so it doesn't get properly wrapped.
-    // We support this by extending MappedSize to the greatest size that could
-    // be reached by such an unwrapped address, and then when we arrive here in
-    // the signal handler for such an access, we compute the fully wrapped
-    // address, and perform the load or store on it.
+    // We support this by extending HugeMappedSize to the greatest size that
+    // could be reached by such an unwrapped address, and then when we arrive
+    // here in the signal handler for such an access, we compute the fully
+    // wrapped address, and perform the load or store on it.
     //
     // Taking a signal is really slow, but in theory programs really shouldn't
     // be hitting this anyway.
     intptr_t unwrappedOffset = accessAddress - instance.memoryBase().unwrap(/* for value */);
     uint32_t wrappedOffset = uint32_t(unwrappedOffset);
     size_t size = access.size();
     MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
     bool inBounds = wrappedOffset + size < instance.memoryLength();
 
-    // If this is storing Z of an XYZ, check whether X is also in bounds, so
-    // that we don't store anything before throwing.
-    MOZ_RELEASE_ASSERT(unwrappedOffset > memoryAccess->offsetWithinWholeSimdVector());
-    uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - memoryAccess->offsetWithinWholeSimdVector());
-    if (wrappedBaseOffset >= instance.memoryLength())
-        inBounds = false;
-
     if (inBounds) {
         // We now know that this is an access that is actually in bounds when
         // properly wrapped. Complete the load or store with the wrapped
         // address.
         SharedMem<uint8_t*> wrappedAddress = instance.memoryBase() + wrappedOffset;
         MOZ_RELEASE_ASSERT(wrappedAddress >= instance.memoryBase());
         MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
         MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.memoryBase() + instance.memoryLength());
@@ -706,20 +705,16 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
           case Disassembler::HeapAccess::LoadSext64:
             MOZ_CRASH("no int64 accesses in asm.js");
           case Disassembler::HeapAccess::Unknown:
             MOZ_CRASH("Failed to disassemble instruction");
         }
     } else {
         // We now know that this is an out-of-bounds access made by an asm.js
         // load/store that we should handle.
-
-        if (memoryAccess->throwOnOOB())
-            return instance.codeSegment().outOfBoundsCode();
-
         switch (access.kind()) {
           case Disassembler::HeapAccess::Load:
           case Disassembler::HeapAccess::LoadSext32:
             // Assign the JS-defined result value to the destination register
             // (ToInt32(undefined) or ToNumber(undefined), determined by the
             // type of the destination register). Very conveniently, we can
             // infer the type from the register class, since all SIMD accesses
             // throw on out of bounds (see above), so the only types using FP
@@ -731,17 +726,18 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont
             break;
           case Disassembler::HeapAccess::LoadSext64:
             MOZ_CRASH("no int64 accesses in asm.js");
           case Disassembler::HeapAccess::Unknown:
             MOZ_CRASH("Failed to disassemble instruction");
         }
     }
 
-    return end;
+    *ppc = end;
+    return true;
 }
 #endif // JS_CODEGEN_X64
 
 MOZ_COLD static bool
 IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
 {
     size_t accessLimit = instance.memoryMappedSize();
 
@@ -798,25 +794,21 @@ HandleFault(PEXCEPTION_POINTERS exceptio
         // always the logically-faulting pc). Fortunately, we can detect this
         // case and silence the exception ourselves (the exception will
         // retrigger after the interrupt jumps back to resumePC).
         return pc == instance->codeSegment().interruptCode() &&
                instance->codeSegment().containsFunctionPC(activation->resumePC());
     }
 
 #ifdef WASM_HUGE_MEMORY
-    const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
-    if (!memoryAccess)
-        *ppc = instance->codeSegment().outOfBoundsCode();
-    else
-        *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
+    return HugeMemoryAccess(context, pc, faultingAddress, *instance, ppc);
 #else
     *ppc = instance->codeSegment().outOfBoundsCode();
+    return true;
 #endif
-    return true;
 }
 
 static LONG WINAPI
 AsmJSFaultHandler(LPEXCEPTION_POINTERS exception)
 {
     if (HandleFault(exception))
         return EXCEPTION_CONTINUE_EXECUTION;
 
@@ -929,21 +921,18 @@ HandleMachException(JSRuntime* rt, const
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
 
     // This check isn't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!IsHeapAccessAddress(*instance, faultingAddress))
         return false;
 
 #ifdef WASM_HUGE_MEMORY
-    const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
-    if (!memoryAccess)
-        *ppc = instance->codeSegment().outOfBoundsCode();
-    else
-        *ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance);
+    if (!HugeMemoryAccess(&context, pc, faultingAddress, *instance, ppc))
+        return false;
 #else
     *ppc = instance->codeSegment().outOfBoundsCode();
 #endif
 
     // Update the thread state with the new pc and register values.
     kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
     if (kret != KERN_SUCCESS)
         return false;
@@ -1148,34 +1137,28 @@ HandleFault(int signum, siginfo_t* info,
     uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
 
     // This check isn't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!IsHeapAccessAddress(*instance, faultingAddress))
         return false;
 
 #ifdef WASM_HUGE_MEMORY
-    MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
-    const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
-    if (!memoryAccess)
-        *ppc = instance->codeSegment().outOfBoundsCode();
-    else
-        *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
+    return HugeMemoryAccess(context, pc, faultingAddress, *instance, ppc);
 #elif defined(JS_CODEGEN_ARM)
     MOZ_RELEASE_ASSERT(signal == Signal::BusError || signal == Signal::SegFault);
     if (signal == Signal::BusError)
         *ppc = instance->codeSegment().unalignedAccessCode();
     else
         *ppc = instance->codeSegment().outOfBoundsCode();
+    return true;
 #else
-    MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
     *ppc = instance->codeSegment().outOfBoundsCode();
+    return true;
 #endif
-
-    return true;
 }
 
 static struct sigaction sPrevSEGVHandler;
 static struct sigaction sPrevSIGBUSHandler;
 
 template<Signal signal>
 static void
 AsmJSFaultHandler(int signum, siginfo_t* info, void* context)
--- a/js/src/asmjs/WasmTypes.cpp
+++ b/js/src/asmjs/WasmTypes.cpp
@@ -619,55 +619,67 @@ Assumptions::sizeOfExcludingThis(MallocS
 }
 
 //  Heap length on ARM should fit in an ARM immediate. We approximate the set
 //  of valid ARM immediates with the predicate:
 //    2^n for n in [16, 24)
 //  or
 //    2^24 * n for n >= 1.
 bool
-wasm::IsValidARMLengthImmediate(uint32_t length)
+wasm::IsValidARMImmediate(uint32_t i)
 {
-    bool valid = (IsPowerOfTwo(length) ||
-                  (length & 0x00ffffff) == 0);
+    bool valid = (IsPowerOfTwo(i) ||
+                  (i & 0x00ffffff) == 0);
 
-    MOZ_ASSERT_IF(valid, length % PageSize == 0);
+    MOZ_ASSERT_IF(valid, i % PageSize == 0);
 
     return valid;
 }
 
 uint32_t
-wasm::RoundUpToNextValidARMLengthImmediate(uint32_t length)
+wasm::RoundUpToNextValidARMImmediate(uint32_t i)
 {
-    MOZ_ASSERT(length <= 0xff000000);
+    MOZ_ASSERT(i <= 0xff000000);
+
+    if (i <= 16 * 1024 * 1024)
+        i = i ? mozilla::RoundUpPow2(i) : 0;
+    else
+        i = (i + 0x00ffffff) & ~0x00ffffff;
+
+    MOZ_ASSERT(IsValidARMImmediate(i));
 
-    if (length <= 16 * 1024 * 1024)
-        length = length ? mozilla::RoundUpPow2(length) : 0;
-    else
-        length = (length + 0x00ffffff) & ~0x00ffffff;
+    return i;
+}
+
+#ifndef WASM_HUGE_MEMORY
 
-    MOZ_ASSERT(IsValidARMLengthImmediate(length));
-
-    return length;
+bool
+wasm::IsValidBoundsCheckImmediate(uint32_t i)
+{
+#ifdef JS_CODEGEN_ARM
+    return IsValidARMImmediate(i);
+#else
+    return true;
+#endif
 }
 
 size_t
-wasm::LegalizeMapLength(size_t requestedSize)
+wasm::ComputeMappedSize(uint32_t maxSize)
 {
-#ifdef WASM_HUGE_MEMORY
-    // On 64-bit platforms just give us a 4G guard region
-    return wasm::MappedSize;
-#else
-    uint32_t res = requestedSize;
+    MOZ_ASSERT(maxSize % PageSize == 0);
 
-    // On 32-bit platforms clamp down to 1GB
-    uint32_t MaxMappedSize = (1 << 30);
-    res = Min(res, MaxMappedSize);
+    // It is the bounds-check limit, not the mapped size, that gets baked into
+    // code. Thus round up the maxSize to the next valid immediate value
+    // *before* adding in the guard page.
 
 # ifdef JS_CODEGEN_ARM
-    // On Arm round so that it fits in a single instruction
-    res = RoundUpToNextValidARMLengthImmediate(res);
-    MOZ_RELEASE_ASSERT(res <= MaxMappedSize);
+    uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
+# else
+    uint32_t boundsCheckLimit = maxSize;
 # endif
+    MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
 
-    return res;
-#endif
+    MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
+    MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
+    return boundsCheckLimit + GuardSize;
 }
+
+#endif  // WASM_HUGE_MEMORY
--- a/js/src/asmjs/WasmTypes.h
+++ b/js/src/asmjs/WasmTypes.h
@@ -786,105 +786,16 @@ class CallSiteAndTarget : public CallSit
     static const uint32_t NOT_DEFINITION = UINT32_MAX;
 
     bool isDefinition() const { return funcDefIndex_ != NOT_DEFINITION; }
     uint32_t funcDefIndex() const { MOZ_ASSERT(isDefinition()); return funcDefIndex_; }
 };
 
 typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
 
-// Metadata for a bounds check that may need patching later.
-
-class BoundsCheck
-{
-  public:
-    BoundsCheck() = default;
-
-    explicit BoundsCheck(uint32_t cmpOffset)
-      : cmpOffset_(cmpOffset)
-    { }
-
-    uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
-    void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
-
-  private:
-    uint32_t cmpOffset_; // absolute offset of the comparison
-};
-
-// Summarizes a heap access made by wasm code that needs to be patched later
-// and/or looked up by the wasm signal handlers. Different architectures need
-// to know different things (x64: intruction offset, wrapping and failure
-// behavior, ARM: nothing, x86: offset of end of instruction (heap length to
-// patch is last 4 bytes of instruction)).
-#if defined(JS_CODEGEN_X86)
-class MemoryAccess
-{
-    uint32_t nextInsOffset_;
-
-  public:
-    MemoryAccess() = default;
-
-    explicit MemoryAccess(uint32_t nextInsOffset)
-      : nextInsOffset_(nextInsOffset)
-    { }
-
-    void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
-    void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
-};
-#elif defined(JS_CODEGEN_X64)
-class MemoryAccess
-{
-    uint32_t insnOffset_;
-    uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
-    bool throwOnOOB_;                     // should we throw on OOB?
-    bool wrapOffset_;                     // should we wrap the offset on OOB?
-
-  public:
-    enum OutOfBoundsBehavior {
-        Throw,
-        CarryOn,
-    };
-    enum WrappingBehavior {
-        WrapOffset,
-        DontWrapOffset,
-    };
-
-    MemoryAccess() = default;
-
-    MemoryAccess(uint32_t insnOffset, OutOfBoundsBehavior onOOB, WrappingBehavior onWrap,
-                 uint32_t offsetWithinWholeSimdVector = 0)
-      : insnOffset_(insnOffset),
-        offsetWithinWholeSimdVector_(offsetWithinWholeSimdVector),
-        throwOnOOB_(onOOB == OutOfBoundsBehavior::Throw),
-        wrapOffset_(onWrap == WrappingBehavior::WrapOffset)
-    {
-        MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector, "fits in uint8");
-    }
-
-    uint32_t insnOffset() const { return insnOffset_; }
-    uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
-    bool throwOnOOB() const { return throwOnOOB_; }
-    bool wrapOffset() const { return wrapOffset_; }
-
-    void offsetBy(uint32_t offset) { insnOffset_ += offset; }
-};
-#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
-      defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
-      defined(JS_CODEGEN_NONE)
-// Nothing! We don't patch or emulate memory accesses on these platforms.
-class MemoryAccess {
-  public:
-    void offsetBy(uint32_t) { MOZ_CRASH(); }
-    uint32_t insnOffset() const { MOZ_CRASH(); }
-};
-#endif
-
-WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
-WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
-
 // A wasm::SymbolicAddress represents a pointer to a well-known function or
 // object that is embedded in wasm code. Since wasm code is serialized and
 // later deserialized into a different address space, symbolic addresses must be
 // used for *all* pointers into the address space. The MacroAssembler records a
 // list of all SymbolicAddresses and the offsets of their use in the code for
 // later patching during static linking.
 
 enum class SymbolicAddress
@@ -1254,31 +1165,163 @@ struct ExternalTableElem
     //    WasmTableCallSigReg holds the signature id.
     void* code;
 
     // The pointer to the callee's instance's TlsData. This must be loaded into
     // WasmTlsReg before calling 'code'.
     TlsData* tls;
 };
 
-// Constants:
+// Because ARM has a fixed-width instruction encoding, ARM can only express a
+// limited subset of immediates (in a single instruction).
+
+extern bool
+IsValidARMImmediate(uint32_t i);
+
+extern uint32_t
+RoundUpToNextValidARMImmediate(uint32_t i);
 
 // The WebAssembly spec hard-codes the virtual page size to be 64KiB and
-// requires linear memory to always be a multiple of 64KiB.
+// requires the size of linear memory to always be a multiple of 64KiB.
+
 static const unsigned PageSize = 64 * 1024;
 
+// Bounds checks always compare the base of the memory access with the bounds
+// check limit. If the memory access is unaligned, this means that, even if the
+// bounds check succeeds, a few bytes of the access can extend past the end of
+// memory. To guard against this, extra space is included in the guard region to
+// catch the overflow. MaxMemoryAccessSize is a conservative approximation of
+// the maximum guard space needed to catch all unaligned overflows.
+
+static const unsigned MaxMemoryAccessSize = sizeof(Val);
+
 #ifdef JS_CODEGEN_X64
-#define WASM_HUGE_MEMORY
-static const uint64_t Uint32Range = uint64_t(UINT32_MAX) + 1;
-static const uint64_t MappedSize = 2 * Uint32Range + PageSize;
+
+// All other code should use WASM_HUGE_MEMORY instead of JS_CODEGEN_X64 so that
+// it is easy to use the huge-mapping optimization for other 64-bit platforms in
+// the future.
+# define WASM_HUGE_MEMORY
+
+// On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
+// unconditionally allocates a huge region of virtual memory of size
+// wasm::HugeMappedSize. This allows all memory resizing to work without
+// reallocation and provides enough guard space for all offsets to be folded
+// into memory accesses.
+
+static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
+static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
+static const uint64_t UnalignedGuardPage = PageSize;
+static const uint64_t HugeMappedSize = IndexRange + OffsetGuardLimit + UnalignedGuardPage;
+
+static_assert(MaxMemoryAccessSize <= UnalignedGuardPage, "rounded up to static page size");
+
+#else // !WASM_HUGE_MEMORY
+
+// On !WASM_HUGE_MEMORY platforms:
+//  - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
+//    original ArrayBuffer allocation which has no guard region at all.
+//  - For WebAssembly memories, an additional GuardSize is mapped after the
+//    accessible region of the memory to catch folded (base+offset) accesses
+//    where `offset < OffsetGuardLimit` as well as the overflow from unaligned
+//    accesses, as described above for MaxMemoryAccessSize.
+
+static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
+static const size_t GuardSize = PageSize;
+
+// Return whether the given immediate satisfies the constraints of the platform
+// (viz. that, on ARM, IsValidARMImmediate).
+
+extern bool
+IsValidBoundsCheckImmediate(uint32_t i);
+
+// For a given WebAssembly/asm.js max size, return the number of bytes to
+// map which will necessarily be a multiple of the system page size and greater
+// than maxSize. For a returned mappedSize:
+//   boundsCheckLimit = mappedSize - GuardSize
+//   IsValidBoundsCheckImmediate(boundsCheckLimit)
+
+extern size_t
+ComputeMappedSize(uint32_t maxSize);
+
+#endif // WASM_HUGE_MEMORY
+
+// Metadata for bounds check instructions that are patched at runtime with the
+// appropriate bounds check limit. On WASM_HUGE_MEMORY platforms for wasm (and
+// SIMD/Atomic) bounds checks, no BoundsCheck is created: the signal handler
+// catches everything. On !WASM_HUGE_MEMORY, a BoundsCheck is created for each
+// memory access (except when statically eliminated by optimizations) so that
+// the length can be patched in as an immediate. This requires that the bounds
+// check limit IsValidBoundsCheckImmediate.
+
+class BoundsCheck
+{
+  public:
+    BoundsCheck() = default;
+
+    explicit BoundsCheck(uint32_t cmpOffset)
+      : cmpOffset_(cmpOffset)
+    { }
+
+    uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
+    void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
+
+  private:
+    uint32_t cmpOffset_;
+};
+
+// Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only
+// (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the
+// signal handler can implement the semantically-correct wraparound logic; the
+// rest simply redirect to the out-of-bounds stub in the signal handler. On x86,
+// the base address of memory is baked into each memory access instruction so
+// the MemoryAccess records the location of each for patching. On all other
+// platforms, no MemoryAccess is created.
+
+#ifdef WASM_HUGE_MEMORY
+class MemoryAccess
+{
+    uint32_t insnOffset_;
+
+  public:
+    MemoryAccess() = default;
+    explicit MemoryAccess(uint32_t insnOffset)
+      : insnOffset_(insnOffset)
+    {}
+
+    uint32_t insnOffset() const { return insnOffset_; }
+
+    void offsetBy(uint32_t offset) { insnOffset_ += offset; }
+};
+#elif defined(JS_CODEGEN_X86)
+class MemoryAccess
+{
+    uint32_t nextInsOffset_;
+
+  public:
+    MemoryAccess() = default;
+    explicit MemoryAccess(uint32_t nextInsOffset)
+      : nextInsOffset_(nextInsOffset)
+    { }
+
+    void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; }
+    void offsetBy(uint32_t offset) { nextInsOffset_ += offset; }
+};
+#else
+class MemoryAccess {
+  public:
+    MemoryAccess() { MOZ_CRASH(); }
+    void offsetBy(uint32_t) { MOZ_CRASH(); }
+    uint32_t insnOffset() const { MOZ_CRASH(); }
+};
 #endif
 
-bool IsValidARMLengthImmediate(uint32_t length);
-uint32_t RoundUpToNextValidARMLengthImmediate(uint32_t length);
-size_t LegalizeMapLength(size_t requestedSize);
+WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
+WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
+
+// Constants:
 
 static const unsigned NaN64GlobalDataOffset       = 0;
 static const unsigned NaN32GlobalDataOffset       = NaN64GlobalDataOffset + sizeof(double);
 static const unsigned InitialGlobalDataBytes      = NaN32GlobalDataOffset + sizeof(float);
 
 static const unsigned MaxSigs                     =        4 * 1024;
 static const unsigned MaxFuncs                    =      512 * 1024;
 static const unsigned MaxGlobals                  =        4 * 1024;
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -240,24 +240,20 @@ GetBuildConfiguration(JSContext* cx, uns
 #ifdef EXPOSE_INTL_API
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "intl-api", value))
         return false;
 
-#if defined(XP_WIN)
-    value = BooleanValue(false);
-#elif defined(SOLARIS)
+#if defined(SOLARIS)
     value = BooleanValue(false);
-#elif defined(XP_UNIX)
+#else
     value = BooleanValue(true);
-#else
-    value = BooleanValue(false);
 #endif
     if (!JS_SetProperty(cx, info, "mapped-array-buffer", value))
         return false;
 
 #ifdef MOZ_MEMORY
     value = BooleanValue(true);
 #else
     value = BooleanValue(false);
--- a/js/src/frontend/Parser.cpp
+++ b/js/src/frontend/Parser.cpp
@@ -1033,17 +1033,54 @@ Parser<ParseHandler>::tryDeclareVar(Hand
     //   { { let x; } var x; }
 
     for (ParseContext::Scope* scope = pc->innermostScope();
          scope != pc->varScope().enclosing();
          scope = scope->enclosing())
     {
         if (AddDeclaredNamePtr p = scope->lookupDeclaredNameForAdd(name)) {
             DeclarationKind declaredKind = p->value()->kind();
-            if (!DeclarationKindIsVar(declaredKind) && !DeclarationKindIsParameter(declaredKind)) {
+            if (DeclarationKindIsVar(declaredKind)) {
+                // Any vars that are redeclared as body-level functions must
+                // be recorded as body-level functions.
+                //
+                // In the case of global and eval scripts, GlobalDeclaration-
+                // Instantiation [1] and EvalDeclarationInstantiation [2]
+                // check for the declarability of global var and function
+                // bindings via CanDeclareVar [3] and CanDeclareGlobal-
+                // Function [4]. CanDeclareGlobalFunction is strictly more
+                // restrictive than CanDeclareGlobalVar, so record the more
+                // restrictive kind. These semantics are implemented in
+                // CheckCanDeclareGlobalBinding.
+                //
+                // For a var previously declared as ForOfVar, this previous
+                // DeclarationKind is used only to check for if the
+                // 'arguments' binding should be declared. Since body-level
+                // functions shadow 'arguments' [5], it is correct to alter
+                // the kind to BodyLevelFunction. See
+                // declareFunctionArgumentsObject.
+                //
+                // For a var previously declared as
+                // VarForAnnexBLexicalFunction, this previous DeclarationKind
+                // is used so that vars synthesized solely for Annex B.3.3 may
+                // be removed if an early error would occur. If a synthesized
+                // Annex B.3.3 var has the same name as a body-level function,
+                // this is not a redeclaration, and indeed, because the
+                // body-level function binds the name, this name should not be
+                // removed should a redeclaration occur in the future. Thus it
+                // is also correct to alter the kind to BodyLevelFunction.
+                //
+                // [1] ES 15.1.11
+                // [2] ES 18.2.1.3
+                // [3] ES 8.1.1.4.15
+                // [4] ES 8.1.1.4.16
+                // [5] ES 9.2.12
+                if (kind == DeclarationKind::BodyLevelFunction)
+                    p->value()->alterKind(kind);
+            } else if (!DeclarationKindIsParameter(declaredKind)) {
                 // Annex B.3.5 allows redeclaring simple (non-destructured)
                 // catch parameters with var declarations, except when it
                 // appears in a for-of.
                 bool annexB35Allowance = declaredKind == DeclarationKind::SimpleCatchParameter &&
                                          kind != DeclarationKind::ForOfVar;
 
                 // Annex B.3.3 allows redeclaring functions in the same block.
                 bool annexB33Allowance = declaredKind == DeclarationKind::LexicalFunction &&
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -275,67 +275,71 @@ size_t
 GetPageFaultCount()
 {
     PROCESS_MEMORY_COUNTERS pmc;
     if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
         return 0;
     return pmc.PageFaultCount;
 }
 
-// On Windows the minimum size for a mapping is the allocation granularity
-// (64KiB in practice), so mapping very small buffers is potentially wasteful.
 void*
 AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
-    // The allocation granularity must be a whole multiple of the alignment and
-    // the caller must request an aligned offset to satisfy Windows' and the
-    // caller's alignment requirements.
+    MOZ_ASSERT(length && alignment);
+
+    // The allocation granularity and the requested offset
+    // must both be divisible by the requested alignment.
     if (allocGranularity % alignment != 0 || offset % alignment != 0)
         return nullptr;
 
-    // Make sure file exists and do sanity check for offset and size.
     HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
-    MOZ_ASSERT(hFile != INVALID_HANDLE_VALUE);
 
-    uint32_t fSizeHgh;
-    uint32_t fSizeLow = GetFileSize(hFile, LPDWORD(&fSizeHgh));
-    if (fSizeLow == INVALID_FILE_SIZE && GetLastError() != NO_ERROR)
-        return nullptr;
-
-    uint64_t fSize = (uint64_t(fSizeHgh) << 32) + fSizeLow;
-    if (offset >= size_t(fSize) || length == 0 || length > fSize - offset)
-        return nullptr;
-
-    uint64_t mapSize = length + offset;
-    HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, mapSize >> 32, mapSize, nullptr);
+    // This call will fail if the file does not exist, which is what we want.
+    HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
     if (!hMap)
         return nullptr;
 
-    // MapViewOfFile requires the offset to be a whole multiple of the
-    // allocation granularity.
-    size_t alignOffset = offset - (offset % allocGranularity);
-    size_t alignLength = length + (offset % allocGranularity);
-    void* map = MapViewOfFile(hMap, FILE_MAP_COPY, 0, alignOffset, alignLength);
+    size_t alignedOffset = offset - (offset % allocGranularity);
+    size_t alignedLength = length + (offset % allocGranularity);
+
+    DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
+    DWORD offsetL = uint32_t(alignedOffset);
+
+    // If the offset or length are out of bounds, this call will fail.
+    uint8_t* map = static_cast<uint8_t*>(MapViewOfFile(hMap, FILE_MAP_COPY, offsetH,
+                                                       offsetL, alignedLength));
+
+    // This just decreases the file mapping object's internal reference count;
+    // it won't actually be destroyed until we unmap the associated view.
     CloseHandle(hMap);
+
     if (!map)
         return nullptr;
 
-    return reinterpret_cast<void*>(uintptr_t(map) + (offset - alignOffset));
+#ifdef DEBUG
+    // Zero out data before and after the desired mapping to catch errors early.
+    if (offset != alignedOffset)
+        memset(map, 0, offset - alignedOffset);
+    if (alignedLength % pageSize)
+        memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
+#endif
+
+    return map + (offset - alignedOffset);
 }
 
 void
 DeallocateMappedContent(void* p, size_t /*length*/)
 {
     if (!p)
         return;
 
     // Calculate the address originally returned by MapViewOfFile.
-    // This is required because AllocateMappedContent returns a pointer that
-    // might be offset into the view, necessitated by the requirement that the
-    // beginning of a view must be aligned with the allocation granularity.
+    // This is needed because AllocateMappedContent returns a pointer
+    // that might be offset from the view, as the beginning of a
+    // view must be aligned with the allocation granularity.
     uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
     MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map)));
 }
 
 #  else // Various APIs are unavailable.
 
 void*
 MapAlignedPages(size_t size, size_t alignment)
@@ -771,71 +775,60 @@ GetPageFaultCount()
     if (err)
         return 0;
     return usage.ru_majflt;
 }
 
 void*
 AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
 {
-#define NEED_PAGE_ALIGNED 0
-    size_t pa_start; // Page aligned starting
-    size_t pa_end; // Page aligned ending
-    size_t pa_size; // Total page aligned size
-    struct stat st;
-    uint8_t* buf;
+    MOZ_ASSERT(length && alignment);
 
-    // Make sure file exists and do sanity check for offset and size.
-    if (fstat(fd, &st) < 0 || offset >= (size_t) st.st_size ||
-        length == 0 || length > (size_t) st.st_size - offset)
+    // The allocation granularity and the requested offset
+    // must both be divisible by the requested alignment.
+    if (allocGranularity % alignment != 0 || offset % alignment != 0)
         return nullptr;
 
-    // Check for minimal alignment requirement.
-#if NEED_PAGE_ALIGNED
-    alignment = std::max(alignment, pageSize);
-#endif
-    if (offset & (alignment - 1))
+    // Sanity check the offset and size, as mmap does not do this for us.
+    struct stat st;
+    if (fstat(fd, &st) || offset >= uint64_t(st.st_size) || length > uint64_t(st.st_size) - offset)
         return nullptr;
 
-    // Page aligned starting of the offset.
-    pa_start = offset & ~(pageSize - 1);
-    // Calculate page aligned ending by adding one page to the page aligned
-    // starting of data end position(offset + length - 1).
-    pa_end = ((offset + length - 1) & ~(pageSize - 1)) + pageSize;
-    pa_size = pa_end - pa_start;
+    size_t alignedOffset = offset - (offset % allocGranularity);
+    size_t alignedLength = length + (offset % allocGranularity);
 
-    // Ask for a continuous memory location.
-    buf = (uint8_t*) MapMemory(pa_size);
-    if (!buf)
+    uint8_t* map = static_cast<uint8_t*>(MapMemory(alignedLength, PROT_READ | PROT_WRITE,
+                                                   MAP_PRIVATE, fd, alignedOffset));
+    if (!map)
         return nullptr;
 
-    buf = (uint8_t*) MapMemoryAt(buf, pa_size, PROT_READ | PROT_WRITE,
-                                  MAP_PRIVATE | MAP_FIXED, fd, pa_start);
-    if (!buf)
-        return nullptr;
+#ifdef DEBUG
+    // Zero out data before and after the desired mapping to catch errors early.
+    if (offset != alignedOffset)
+        memset(map, 0, offset - alignedOffset);
+    if (alignedLength % pageSize)
+        memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
+#endif
 
-    // Reset the data before target file, which we don't need to see.
-    memset(buf, 0, offset - pa_start);
-
-    // Reset the data after target file, which we don't need to see.
-    memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length));
-
-    return buf + (offset - pa_start);
+    return map + (offset - alignedOffset);
 }
 
 void
 DeallocateMappedContent(void* p, size_t length)
 {
-    void* pa_start; // Page aligned starting
-    size_t total_size; // Total allocated size
+    if (!p)
+        return;
 
-    pa_start = (void*)(uintptr_t(p) & ~(pageSize - 1));
-    total_size = ((uintptr_t(p) + length) & ~(pageSize - 1)) + pageSize - uintptr_t(pa_start);
-    if (munmap(pa_start, total_size))
-        MOZ_ASSERT(errno == ENOMEM);
+    // Calculate the address originally returned by mmap.
+    // This is needed because AllocateMappedContent returns a pointer
+    // that might be offset from the mapping, as the beginning of a
+    // mapping must be aligned with the allocation granularity.
+    uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
+    size_t alignedLength = length + (uintptr_t(p) % allocGranularity);
+    UnmapPages(reinterpret_cast<void*>(map), alignedLength);
 }
 
 #else
 #error "Memory mapping functions are not defined for your OS."
 #endif
 
 void
 ProtectPages(void* p, size_t size)
--- a/js/src/jit-test/tests/asm.js/bug1219954.js
+++ b/js/src/jit-test/tests/asm.js/bug1219954.js
@@ -1,8 +1,9 @@
+// |jit-test| slow
 "use strict";
 
 if (!('oomTest' in this))
     quit();
 
 let g = (function() {
     "use asm";
     function f() {}
--- a/js/src/jit-test/tests/asm.js/testBug1255954.js
+++ b/js/src/jit-test/tests/asm.js/testBug1255954.js
@@ -1,8 +1,10 @@
+// |jit-test| slow
+
 const USE_ASM = '"use asm";';
 if (!('oomTest' in this))
     quit();
 function asmCompile() {
     var f = Function.apply(null, arguments);
 }
 oomTest(() => {
     try {
--- a/js/src/jit-test/tests/asm.js/testSIMD-load-store.js
+++ b/js/src/jit-test/tests/asm.js/testSIMD-load-store.js
@@ -1,13 +1,16 @@
-// |jit-test| test-also-noasmjs
+// |jit-test|
 load(libdir + "asm.js");
 load(libdir + "simd.js");
 load(libdir + "asserts.js");
 
+// Avoid pathological --ion-eager compile times due to bails in loops
+setJitCompilerOption('ion.warmup.trigger', 1000000);
+
 // Set to true to see more JS debugging spew
 const DEBUG = false;
 
 if (!isSimdAvailable() || typeof SIMD === 'undefined') {
     DEBUG && print("won't run tests as simd extensions aren't activated yet");
     quit(0);
 }
 
@@ -268,53 +271,43 @@ for (var i = 0; i < SIZE; i++)
 function MakeCodeFor(typeName) {
     return `
     "use asm";
     var type = glob.SIMD.${typeName};
     var c = type.check;
 
     var l1 = type.load1;
     var l2 = type.load2;
-    var l3 = type.load3;
 
     var s1 = type.store1;
     var s2 = type.store2;
-    var s3 = type.store3;
 
     var u8 = new glob.Uint8Array(heap);
 
     function load1(i) { i=i|0; return l1(u8, i); }
     function load2(i) { i=i|0; return l2(u8, i); }
-    function load3(i) { i=i|0; return l3(u8, i); }
 
     function loadCst1() { return l1(u8, 41 << 2); }
     function loadCst2() { return l2(u8, 41 << 2); }
-    function loadCst3() { return l3(u8, 41 << 2); }
 
     function store1(i, x) { i=i|0; x=c(x); return s1(u8, i, x); }
     function store2(i, x) { i=i|0; x=c(x); return s2(u8, i, x); }
-    function store3(i, x) { i=i|0; x=c(x); return s3(u8, i, x); }
 
     function storeCst1(x) { x=c(x); return s1(u8, 41 << 2, x); }
     function storeCst2(x) { x=c(x); return s2(u8, 41 << 2, x); }
-    function storeCst3(x) { x=c(x); return s3(u8, 41 << 2, x); }
 
     return {
         load1: load1,
         load2: load2,
-        load3: load3,
         loadCst1: loadCst1,
         loadCst2: loadCst2,
-        loadCst3: loadCst3,
         store1: store1,
         store2: store2,
-        store3: store3,
         storeCst1: storeCst1,
         storeCst2: storeCst2,
-        storeCst3: storeCst3,
     }
 `;
 }
 
 var SIZE = 0x10000;
 
 function TestPartialLoads(m, typedArray, x, y, z, w) {
     // Fill array with predictable values
@@ -324,53 +317,44 @@ function TestPartialLoads(m, typedArray,
         typedArray[i + 2] = z(i);
         typedArray[i + 3] = w(i);
     }
 
     // Test correct loads
     var i = 0, j = 0; // i in elems, j in bytes
     assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
     assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
-    assertEqX4(m.load3(j), [x(i), y(i), z(i), 0]);
 
     j += 4;
     assertEqX4(m.load1(j), [y(i), 0, 0, 0]);
     assertEqX4(m.load2(j), [y(i), z(i), 0, 0]);
-    assertEqX4(m.load3(j), [y(i), z(i), w(i), 0]);
 
     j += 4;
     assertEqX4(m.load1(j), [z(i), 0, 0, 0]);
     assertEqX4(m.load2(j), [z(i), w(i), 0, 0]);
-    assertEqX4(m.load3(j), [z(i), w(i), x(i+4), 0]);
 
     j += 4;
     assertEqX4(m.load1(j), [w(i), 0, 0, 0]);
     assertEqX4(m.load2(j), [w(i), x(i+4), 0, 0]);
-    assertEqX4(m.load3(j), [w(i), x(i+4), y(i+4), 0]);
 
     j += 4;
     i += 4;
     assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
     assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
-    assertEqX4(m.load3(j), [x(i), y(i), z(i), 0]);
 
     // Test loads with constant indexes (41)
     assertEqX4(m.loadCst1(), [y(40), 0, 0, 0]);
     assertEqX4(m.loadCst2(), [y(40), z(40), 0, 0]);
-    assertEqX4(m.loadCst3(), [y(40), z(40), w(40), 0]);
 
     // Test limit and OOB accesses
     assertEqX4(m.load1((SIZE - 1) << 2), [w(SIZE - 4), 0, 0, 0]);
     assertThrowsInstanceOf(() => m.load1(((SIZE - 1) << 2) + 1), RangeError);
 
     assertEqX4(m.load2((SIZE - 2) << 2), [z(SIZE - 4), w(SIZE - 4), 0, 0]);
     assertThrowsInstanceOf(() => m.load2(((SIZE - 2) << 2) + 1), RangeError);
-
-    assertEqX4(m.load3((SIZE - 3) << 2), [y(SIZE - 4), z(SIZE - 4), w(SIZE - 4), 0]);
-    assertThrowsInstanceOf(() => m.load3(((SIZE - 3) << 2) + 1), RangeError);
 }
 
 // Partial stores
 function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
     var val = SIMD[typeName](x, y, z, w);
 
     function Reset() {
         for (var i = 0; i < SIZE; i++)
@@ -394,28 +378,16 @@ function TestPartialStores(m, typedArray
         CheckNotModified(0, i >> 2);
         assertEq(typedArray[i >> 2], x);
         assertEq(typedArray[(i >> 2) + 1], y);
         CheckNotModified((i >> 2) + 2, SIZE);
         typedArray[i >> 2] = (i >> 2) + 1;
         typedArray[(i >> 2) + 1] = (i >> 2) + 2;
     }
 
-    function TestStore3(i) {
-        m.store3(i, val);
-        CheckNotModified(0, i >> 2);
-        assertEq(typedArray[i >> 2], x);
-        assertEq(typedArray[(i >> 2) + 1], y);
-        assertEq(typedArray[(i >> 2) + 2], z);
-        CheckNotModified((i >> 2) + 3, SIZE);
-        typedArray[i >> 2] = (i >> 2) + 1;
-        typedArray[(i >> 2) + 1] = (i >> 2) + 2;
-        typedArray[(i >> 2) + 2] = (i >> 2) + 3;
-    }
-
     function TestOOBStore(f) {
         assertThrowsInstanceOf(f, RangeError);
         CheckNotModified(0, SIZE);
     }
 
     Reset();
 
     TestStore1(0);
@@ -435,52 +407,30 @@ function TestPartialStores(m, typedArray
     TestStore2(3 << 2);
     TestStore2(1337 << 2);
 
     var i = (SIZE - 2) << 2;
     TestStore2(i);
     TestOOBStore(() => m.store2(i + 1, val));
     TestOOBStore(() => m.store2(-1, val));
 
-    TestStore3(0);
-    TestStore3(1 << 2);
-    TestStore3(2 << 2);
-    TestStore3(3 << 2);
-    TestStore3(1337 << 2);
-
-    var i = (SIZE - 3) << 2;
-    TestStore3(i);
-    TestOOBStore(() => m.store3(i + 1, val));
-    TestOOBStore(() => m.store3(-1, val));
-    TestOOBStore(() => m.store3(-9, val));
-
     // Constant indexes (41)
     m.storeCst1(val);
     CheckNotModified(0, 41);
     assertEq(typedArray[41], x);
     CheckNotModified(42, SIZE);
     typedArray[41] = 42;
 
     m.storeCst2(val);
     CheckNotModified(0, 41);
     assertEq(typedArray[41], x);
     assertEq(typedArray[42], y);
     CheckNotModified(43, SIZE);
     typedArray[41] = 42;
     typedArray[42] = 43;
-
-    m.storeCst3(val);
-    CheckNotModified(0, 41);
-    assertEq(typedArray[41], x);
-    assertEq(typedArray[42], y);
-    assertEq(typedArray[43], z);
-    CheckNotModified(44, SIZE);
-    typedArray[41] = 42;
-    typedArray[42] = 43;
-    typedArray[43] = 44;
 }
 
 var f32 = new Float32Array(SIZE);
 var mFloat32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Float32x4')), this, null, f32.buffer);
 
 TestPartialLoads(mFloat32x4, f32,
             (i) => i + 1,
             (i) => Math.fround(13.37),
--- a/js/src/jit-test/tests/asm.js/testSIMD.js
+++ b/js/src/jit-test/tests/asm.js/testSIMD.js
@@ -1,13 +1,16 @@
 load(libdir + "asm.js");
 load(libdir + "simd.js");
 load(libdir + "asserts.js");
 var heap = new ArrayBuffer(0x10000);
 
+// Avoid pathological --ion-eager compile times due to bails in loops
+setJitCompilerOption('ion.warmup.trigger', 1000000);
+
 // Set to true to see more JS debugging spew
 const DEBUG = false;
 
 if (!isSimdAvailable() || typeof SIMD === 'undefined') {
     DEBUG && print("won't run tests as simd extensions aren't activated yet");
     quit(0);
 }
 
--- a/js/src/jit-test/tests/asm.js/testZOOB.js
+++ b/js/src/jit-test/tests/asm.js/testZOOB.js
@@ -1,15 +1,12 @@
-// |jit-test| test-also-noasmjs
+// |jit-test|
 load(libdir + "asm.js");
 load(libdir + "asserts.js");
 
-setIonCheckGraphCoherency(false);
-setCachingEnabled(false);
-
 var ab = new ArrayBuffer(BUF_MIN);
 
 // Compute a set of interesting indices.
 indices = [0]
 for (var i of [4,1024,BUF_MIN,Math.pow(2,30),Math.pow(2,31),Math.pow(2,32),Math.pow(2,33)]) {
     for (var j of [-2,-1,0,1,2]) {
         for (var k of [1,-1])
             indices.push((i+j)*k);
@@ -99,105 +96,83 @@ function testSimdX4(ctor, shift, scale, 
     var arr = new ctor(ab);
 
     var c = asmCompile('glob', 'imp', 'b',
                        USE_ASM +
                        'var arr=new glob.' + ctor.name + '(b); ' +
                        'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' +
                        'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' +
                        'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' +
-                       'var SIMD_' + simdName + '_load3 = SIMD_' + simdName + '.load3; ' +
                        'var SIMD_' + simdName + '_load2 = SIMD_' + simdName + '.load2; ' +
                        'var SIMD_' + simdName + '_load1 = SIMD_' + simdName + '.load1; ' +
                        'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' +
-                       'var SIMD_' + simdName + '_store3 = SIMD_' + simdName + '.store3; ' +
                        'var SIMD_' + simdName + '_store2 = SIMD_' + simdName + '.store2; ' +
                        'var SIMD_' + simdName + '_store1 = SIMD_' + simdName + '.store1; ' +
                        'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
-                       'function load3(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load3(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
                        'function load2(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
                        'function load1(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
                        'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
-                       'function store3(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store3(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
                        'function store2(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
                        'function store1(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
-                       'return { load: load, load3: load3, load2: load2, load1: load1, store: store, store3: store3, store2 : store2, store1 : store1 }');
+                       'return { load: load, load2: load2, load1: load1, store: store, store2 : store2, store1 : store1 }');
     var f = asmLink(c, this, null, ab);
 
     for (var i of indices) {
         var index = ((i<<scale)+disp)>>shift;
 
-        var v, v3, v2, v1;
-        var t = false, t3 = false, t2 = false, t1 = false;
+        var v, v2, v1;
+        var t = false, t2 = false, t1 = false;
         try { v = simdCtor.load(arr, index); }
         catch (e) {
             assertEq(e instanceof RangeError, true);
             t = true;
         }
-        try { v3 = simdCtor.load3(arr, index); }
-        catch (e) {
-            assertEq(e instanceof RangeError, true);
-            t3 = true;
-        }
         try { v2 = simdCtor.load2(arr, index); }
         catch (e) {
             assertEq(e instanceof RangeError, true);
             t2 = true;
         }
         try { v1 = simdCtor.load1(arr, index); }
         catch (e) {
             assertEq(e instanceof RangeError, true);
             t1 = true;
         }
 
         // Loads
-        var l, l3, l2, l1;
-        var r = false, r3 = false, r2 = false, r1 = false;
+        var l, l2, l1;
+        var r = false, r2 = false, r1 = false;
         try { l = f.load(i); }
         catch (e) {
             assertEq(e instanceof RangeError, true);
             r = true;
         }
-        try { l3 = f.load3(i); }
-        catch (e) {
-            assertEq(e instanceof RangeError, true);
-            r3 = true;
-        }
         try { l2 = f.load2(i); }
         catch (e) {
             assertEq(e instanceof RangeError, true);
             r2 = true;
         }
         try { l1 = f.load1(i); }
         catch (e) {
             assertEq(e instanceof RangeError, true);
             r1 = true;
         }
         assertEq(t, r);
-        assertEq(t3, r3);
         assertEq(t2, r2);
         assertEq(t1, r1);
         if (!t) assertEqX4(v, l);
-        if (!t3) assertEqX4(v3, l3);
         if (!t2) assertEqX4(v2, l2);
         if (!t1) assertEqX4(v1, l1);
 
         // Stores
         if (!t) {
             simdCtor.store(arr, index, simdCtor.neg(v));
             f.store(i, v);
             assertEqX4(simdCtor.load(arr, index), v);
         } else
             assertThrowsInstanceOf(() => f.store(i, simdCtor()), RangeError);
-        if (!t3) {
-            simdCtor.store3(arr, index, simdCtor.neg(v3));
-            f.store3(i, v3);
-            assertEqX4(simdCtor.load3(arr, index), v3);
-        } else
-            assertThrowsInstanceOf(() => f.store3(i, simdCtor()), RangeError);
         if (!t2) {
             simdCtor.store2(arr, index, simdCtor.neg(v2));
             f.store2(i, v2);
             assertEqX4(simdCtor.load2(arr, index), v2);
         } else
             assertThrowsInstanceOf(() => f.store2(i, simdCtor()), RangeError);
         if (!t1) {
             simdCtor.store1(arr, index, simdCtor.neg(v1));
@@ -234,11 +209,19 @@ test(testInt, Int8Array, 0);
 test(testInt, Uint8Array, 0);
 test(testInt, Int16Array, 1);
 test(testInt, Uint16Array, 1);
 test(testInt, Int32Array, 2);
 test(testInt, Uint32Array, 2);
 test(testFloat32, Float32Array, 2);
 test(testFloat64, Float64Array, 3);
 if (typeof SIMD !== 'undefined' && isSimdAvailable()) {
+    // Avoid pathological --ion-eager compile times due to bails in loops
+    setJitCompilerOption('ion.warmup.trigger', 1000000);
+
+    // Use a fresh ArrayBuffer so prepareForAsmJS can allocated a guard page
+    // which SIMD.js needs. Since the original ArrayBuffer was prepared for
+    // asm.js that didn't use SIMD.js, it has no guard page (on 32-bit).
+    ab = new ArrayBuffer(BUF_MIN);
+
     test(testInt32x4, Uint8Array, 0);
     test(testFloat32x4, Uint8Array, 0);
 }
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/debug/bug1300517.js
@@ -0,0 +1,12 @@
+// |jit-test| error: ReferenceError
+g = newGlobal();
+g.log *= "";
+Debugger(g).onDebuggerStatement = frame => frame.eval("log += this.Math.toString();");
+let forceException = g.eval(`
+    (class extends class {} {
+        constructor() {
+            debugger;
+        }
+    })
+`);
+new forceException;
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/debug/bug1300528.js
@@ -0,0 +1,34 @@
+load(libdir + "asserts.js");
+
+if (helperThreadCount() === 0)
+  quit(0);
+
+function BigInteger(a, b, c) {}
+function montConvert(x) {
+    var r = new BigInteger(null);
+    return r;
+}
+var ba = new Array();
+a = new BigInteger(ba);
+g = montConvert(a);
+var lfGlobal = newGlobal();
+for (lfLocal in this) {
+    if (!(lfLocal in lfGlobal)) {
+        lfGlobal[lfLocal] = this[lfLocal];
+    }
+}
+lfGlobal.offThreadCompileScript(`
+  var dbg = new Debugger(g);
+  dbg.onEnterFrame = function (frame) {
+    var frameThis = frame.this;
+  }
+`);
+lfGlobal.runOffThreadScript();
+assertThrowsInstanceOf(test, ReferenceError);
+function test() {
+    function check(fun, msg, todo) {
+        success = fun();
+    }
+    check(() => Object.getPrototypeOf(view) == Object.getPrototypeOf(simple));
+    typeof this;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/big-resize.js
@@ -0,0 +1,10 @@
+load(libdir + "wasm.js");
+
+assertEq(new WebAssembly.Instance(new WebAssembly.Module(textToBinary(`(module
+    (memory 1 32768)
+    (func $test (result i32)
+        (if (i32.eq (grow_memory (i32.const 16384)) (i32.const -1)) (return (i32.const 42)))
+        (i32.store (i32.const 1073807356) (i32.const 42))
+        (i32.load (i32.const 1073807356)))
+    (export "test" $test)
+)`))).exports.test(), 42);
--- a/js/src/jit-test/tests/wasm/memory.js
+++ b/js/src/jit-test/tests/wasm/memory.js
@@ -143,24 +143,20 @@ for (let [type, ext] of [
     ['i64', '32'],
     ['f32', ''],
     ['f64', ''],
 ])
 {
     assertErrorMessage(() => badStoreModule(type, ext), TypeError, /can't touch memory/);
 }
 
-for (var ind = 0; ind < 1; ind++) {
-    /*
-     * TODO: wasm.explicit-bounds-check option is being deprecated. We will be adding a
-     * new option that treats all offset as "non-foldable". When that is added trigger
-     * it here when ind == 1.
-    if (ind == 1)
-        setJitCompilerOption('wasm.explicit-bounds-checks', 1);
-    */
+assertEq(getJitCompilerOptions()['wasm.fold-offsets'], 1);
+
+for (var foldOffsets = 0; foldOffsets <= 1; foldOffsets++) {
+    setJitCompilerOption('wasm.fold-offsets', foldOffsets);
 
     testLoad('i32', '', 0, 0, 0, 0x03020100);
     testLoad('i32', '', 1, 0, 1, 0x04030201);
     testLoad('i32', '', 0, 4, 0, 0x07060504);
     testLoad('i32', '', 1, 3, 4, 0x07060504);
     testLoad('f32', '', 0, 0, 0, 3.820471434542632e-37);
     testLoad('f32', '', 1, 0, 1, 1.539989614439558e-36);
     testLoad('f32', '', 0, 4, 0, 1.0082513512365273e-34);
@@ -394,8 +390,10 @@ for (var ind = 0; ind < 1; ind++) {
 
             testStore('i64', '', lastValidIndex, offset, align, '0x1234567887654321');
             testStoreOOB('i64', '', lastValidIndex + 1, offset, align, '0x1234567887654321');
         }
 
         setJitCompilerOption('wasm.test-mode', 0);
     }
 }
+
+setJitCompilerOption('wasm.fold-offsets', 1);
--- a/js/src/jit-test/tests/wasm/regress/oom-eval.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-eval.js
@@ -1,9 +1,9 @@
-// |jit-test| allow-oom
+// |jit-test| slow; allow-oom
 
 if (typeof oomTest !== 'function' || !wasmIsSupported()) {
     print('Missing oomTest or wasm support in wasm/regress/oom-eval');
     quit();
 }
 
 function foo() {
   var g = newGlobal();
--- a/js/src/jit-test/tests/wasm/regress/oom-init.js
+++ b/js/src/jit-test/tests/wasm/regress/oom-init.js
@@ -1,9 +1,9 @@
-// |jit-test| allow-oom
+// |jit-test| slow; allow-oom
 
 if (typeof oomTest !== 'function' || !wasmIsSupported())
     quit();
 
 Object.getOwnPropertyNames(this);
 s = newGlobal();
 evalcx("\
     /x/;\
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/resizing.js
@@ -0,0 +1,77 @@
+// |jit-test| test-also-wasm-baseline
+load(libdir + "wasm.js");
+
+const Module = WebAssembly.Module;
+const Instance = WebAssembly.Instance;
+const Table = WebAssembly.Table;
+const Memory = WebAssembly.Memory;
+
+// Test for stale heap pointers after resize
+
+// Grow directly from builtin call:
+assertEq(evalText(`(module
+    (memory 1)
+    (func $test (result i32)
+        (i32.store (i32.const 0) (i32.const 1))
+        (i32.store (i32.const 65532) (i32.const 10))
+        (grow_memory (i32.const 99))
+        (i32.store (i32.const 6553596) (i32.const 100))
+        (i32.add
+            (i32.load (i32.const 0))
+            (i32.add
+                (i32.load (i32.const 65532))
+                (i32.load (i32.const 6553596)))))
+    (export "test" $test)
+)`).exports.test(), 111);
+
+// Grow during call_import:
+var exports = evalText(`(module
+    (import $imp "a" "imp")
+    (memory 1)
+    (func $grow (grow_memory (i32.const 99)))
+    (export "grow" $grow)
+    (func $test (result i32)
+        (i32.store (i32.const 0) (i32.const 1))
+        (i32.store (i32.const 65532) (i32.const 10))
+        (call $imp)
+        (i32.store (i32.const 6553596) (i32.const 100))
+        (i32.add
+            (i32.load (i32.const 0))
+            (i32.add
+                (i32.load (i32.const 65532))
+                (i32.load (i32.const 6553596)))))
+    (export "test" $test)
+)`, {a:{imp() { exports.grow() }}}).exports;
+
+setJitCompilerOption("baseline.warmup.trigger", 2);
+setJitCompilerOption("ion.warmup.trigger", 4);
+for (var i = 0; i < 10; i++)
+    assertEq(exports.test(), 111);
+
+// Grow during call_indirect:
+var mem = new Memory({initial:1});
+var tbl = new Table({initial:1, element:"anyfunc"});
+var exports1 = evalText(`(module
+    (import "a" "mem" (memory 1))
+    (func $grow
+        (i32.store (i32.const 65532) (i32.const 10))
+        (grow_memory (i32.const 99))
+        (i32.store (i32.const 6553596) (i32.const 100)))
+    (export "grow" $grow)
+)`, {a:{mem}}).exports;
+var exports2 = evalText(`(module
+    (import "a" "tbl" (table 1))
+    (import "a" "mem" (memory 1))
+    (type $v2v (func))
+    (func $test (result i32)
+        (i32.store (i32.const 0) (i32.const 1))
+        (call_indirect $v2v (i32.const 0))
+        (i32.add
+            (i32.load (i32.const 0))
+            (i32.add
+                (i32.load (i32.const 65532))
+                (i32.load (i32.const 6553596)))))
+    (export "test" $test)
+)`, {a:{tbl, mem}}).exports;
+tbl.set(0, exports1.grow);
+assertEq(exports2.test(), 111);
--- a/js/src/jit-test/tests/wasm/spec/memory_trap.wast
+++ b/js/src/jit-test/tests/wasm/spec/memory_trap.wast
@@ -11,26 +11,26 @@
     )
 
     (export "load" $load)
     (func $load (param $i i32) (result i32)
       (i32.load (i32.add (call $addr_limit) (get_local $i)))
     )
 
     (export "grow_memory" $grow_memory)
-    (func $grow_memory (param i32)
+    (func $grow_memory (param i32) (result i32)
       (grow_memory (get_local 0))
     )
 )
 
 (assert_return (invoke "store" (i32.const -4) (i32.const 42)) (i32.const 42))
 (assert_return (invoke "load" (i32.const -4)) (i32.const 42))
 (assert_trap (invoke "store" (i32.const -3) (i32.const 13)) "out of bounds memory access")
 (assert_trap (invoke "load" (i32.const -3)) "out of bounds memory access")
 (assert_trap (invoke "store" (i32.const -2) (i32.const 13)) "out of bounds memory access")
 (assert_trap (invoke "load" (i32.const -2)) "out of bounds memory access")
 (assert_trap (invoke "store" (i32.const -1) (i32.const 13)) "out of bounds memory access")
 (assert_trap (invoke "load" (i32.const -1)) "out of bounds memory access")
 (assert_trap (invoke "store" (i32.const 0) (i32.const 13)) "out of bounds memory access")
 (assert_trap (invoke "load" (i32.const 0)) "out of bounds memory access")
 (assert_trap (invoke "store" (i32.const 0x80000000) (i32.const 13)) "out of bounds memory access")
 (assert_trap (invoke "load" (i32.const 0x80000000)) "out of bounds memory access")
-(assert_trap (invoke "grow_memory" (i32.const 0x80000000)) "memory size exceeds implementation limit")
+(assert_return (invoke "grow_memory" (i32.const 0x80000000)) (i32.const -1))
--- a/js/src/jit-test/tests/wasm/spec/memory_trap.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/memory_trap.wast.js
@@ -1,4 +1,2 @@
 // |jit-test| test-also-wasm-baseline
-// TODO current_memory opcode + traps on OOB
-quit();
 var importedArgs = ['memory_trap.wast']; load(scriptdir + '../spec.js');
--- a/js/src/jit-test/tests/wasm/spec/resizing.wast.js
+++ b/js/src/jit-test/tests/wasm/spec/resizing.wast.js
@@ -1,4 +1,2 @@
 // |jit-test| test-also-wasm-baseline
-// TODO memory resizing (you don't say)
-quit();
 var importedArgs = ['resizing.wast']; load(scriptdir + '../spec.js');
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -11610,16 +11610,37 @@ CodeGenerator::visitInterruptCheck(LInte
 
 void
 CodeGenerator::visitWasmTrap(LWasmTrap* lir)
 {
     MOZ_ASSERT(gen->compilingAsmJS());
     masm.jump(wasm::JumpTarget(lir->mir()->trap()));
 }
 
+void
+CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
+{
+    const MWasmBoundsCheck* mir = ins->mir();
+    Register ptr = ToRegister(ins->ptr());
+
+    if (mir->isRedundant()) {
+        // For better test coverage, inject debug assertions that redundant
+        // bounds checks really are redundant.
+#ifdef DEBUG
+        Label ok;
+        masm.wasmBoundsCheck(Assembler::Below, ptr, &ok);
+        masm.assumeUnreachable("Redundant bounds check failed!");
+        masm.bind(&ok);
+#endif
+        return;
+    }
+
+    masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr, wasm::JumpTarget::OutOfBounds);
+}
+
 typedef bool (*RecompileFn)(JSContext*);
 static const VMFunction RecompileFnInfo = FunctionInfo<RecompileFn>(Recompile, "Recompile");
 
 typedef bool (*ForcedRecompileFn)(JSContext*);
 static const VMFunction ForcedRecompileFnInfo =
     FunctionInfo<ForcedRecompileFn>(ForcedRecompile, "ForcedRecompile");
 
 void
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -414,16 +414,17 @@ class CodeGenerator final : public CodeG
     void visitAssertResultV(LAssertResultV* ins);
     void visitAssertResultT(LAssertResultT* ins);
     void emitAssertResultV(const ValueOperand output, const TemporaryTypeSet* typeset);
     void emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset);
 
     void visitInterruptCheck(LInterruptCheck* lir);
     void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ins);
     void visitWasmTrap(LWasmTrap* lir);
+    void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
     void visitRecompileCheck(LRecompileCheck* ins);
     void visitRotate(LRotate* ins);
 
     void visitRandom(LRandom* ins);
     void visitSignExtend(LSignExtend* ins);
 
 #ifdef DEBUG
     void emitDebugForceBailing(LInstruction* lir);
--- a/js/src/jit/EffectiveAddressAnalysis.cpp
+++ b/js/src/jit/EffectiveAddressAnalysis.cpp
@@ -95,46 +95,43 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* l
     if (base->isRecoveredOnBailout())
         return;
 
     MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
     last->replaceAllUsesWith(eaddr);
     last->block()->insertAfter(last, eaddr);
 }
 
-template<typename MWasmMemoryAccessType>
+template<typename AsmJSMemoryAccess>
 bool
-EffectiveAddressAnalysis::tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o)
+EffectiveAddressAnalysis::tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o)
 {
+#ifdef WASM_HUGE_MEMORY
     // Compute the new offset. Check for overflow.
     uint32_t oldOffset = ins->offset();
     uint32_t newOffset = oldOffset + o;
     if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset))
         return false;
 
-    // Compute the new offset to the end of the access. Check for overflow
-    // here also.
-    uint32_t newEnd = newOffset + ins->byteSize();
-    if (newEnd < newOffset)
-        return false;
-
-    // Determine the range of valid offsets which can be folded into this
-    // instruction and check whether our computed offset is within that range.
-    size_t range = mir_->foldableOffsetRange(ins);
-    if (size_t(newEnd) > range)
+    // The offset must ultimately be written into the offset immediate of a load
+    // or store instruction so don't allow folding of the offset is bigger.
+    if (newOffset >= wasm::OffsetGuardLimit)
         return false;
 
     // Everything checks out. This is the new offset.
     ins->setOffset(newOffset);
     return true;
+#else
+    return false;
+#endif
 }
 
-template<typename MWasmMemoryAccessType>
+template<typename AsmJSMemoryAccess>
 void
-EffectiveAddressAnalysis::analyzeAsmHeapAccess(MWasmMemoryAccessType* ins)
+EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins)
 {
     MDefinition* base = ins->base();
 
     if (base->isConstant()) {
         // Look for heap[i] where i is a constant offset, and fold the offset.
         // By doing the folding now, we simplify the task of codegen; the offset
         // is always the address mode immediate. This also allows it to avoid
         // a situation where the sum of a constant pointer value and a non-zero
@@ -193,15 +190,15 @@ EffectiveAddressAnalysis::analyze()
 
             // Note that we don't check for MAsmJSCompareExchangeHeap
             // or MAsmJSAtomicBinopHeap, because the backend and the OOB
             // mechanism don't support non-zero offsets for them yet
             // (TODO bug 1254935).
             if (i->isLsh())
                 AnalyzeLsh(graph_.alloc(), i->toLsh());
             else if (i->isAsmJSLoadHeap())
-                analyzeAsmHeapAccess(i->toAsmJSLoadHeap());
+                analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
             else if (i->isAsmJSStoreHeap())
-                analyzeAsmHeapAccess(i->toAsmJSStoreHeap());
+                analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
         }
     }
     return true;
 }
--- a/js/src/jit/EffectiveAddressAnalysis.h
+++ b/js/src/jit/EffectiveAddressAnalysis.h
@@ -14,21 +14,21 @@ namespace jit {
 
 class MIRGraph;
 
 class EffectiveAddressAnalysis
 {
     MIRGenerator* mir_;
     MIRGraph& graph_;
 
-    template<typename MWasmMemoryAccessType>
-    MOZ_MUST_USE bool tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o);
+    template <typename AsmJSMemoryAccess>
+    MOZ_MUST_USE bool tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o);
 
-    template<typename MWasmMemoryAccessType>
-    void analyzeAsmHeapAccess(MWasmMemoryAccessType* ins);
+    template <typename AsmJSMemoryAccess>
+    void analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins);
 
   public:
     EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph)
       : mir_(mir), graph_(graph)
     {}
 
     MOZ_MUST_USE bool analyze();
 };
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -219,16 +219,20 @@ DefaultJitOptions::DefaultJitOptions()
     }
 
     // Toggles whether unboxed plain objects can be created by the VM.
     SET_DEFAULT(disableUnboxedObjects, false);
 
     // Test whether wasm int64 / double NaN bits testing is enabled.
     SET_DEFAULT(wasmTestMode, false);
 
+    // Toggles the optimization whereby offsets are folded into loads and not
+    // included in the bounds check.
+    SET_DEFAULT(wasmFoldOffsets, true);
+
     // Determines whether we suppress using signal handlers
     // for interrupting jit-ed code. This is used only for testing.
     SET_DEFAULT(ionInterruptWithoutSignals, false);
 }
 
 bool
 DefaultJitOptions::isSmallFunction(JSScript* script) const
 {
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -65,16 +65,17 @@ struct DefaultJitOptions
     bool disableSharedStubs;
     bool disableSincos;
     bool disableSink;
     bool eagerCompilation;
     bool forceInlineCaches;
     bool limitScriptSize;
     bool osr;
     bool wasmTestMode;
+    bool wasmFoldOffsets;
     bool ionInterruptWithoutSignals;
     uint32_t baselineWarmUpThreshold;
     uint32_t exceptionBailoutThreshold;
     uint32_t frequentBailoutThreshold;
     uint32_t maxStackArgs;
     uint32_t osrPcMismatchesBeforeRecompile;
     uint32_t smallFunctionMaxBytecodeLength_;
     uint32_t jumpThreshold;
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -4120,16 +4120,39 @@ void
 LIRGenerator::visitHasClass(MHasClass* ins)
 {
     MOZ_ASSERT(ins->object()->type() == MIRType::Object);
     MOZ_ASSERT(ins->type() == MIRType::Boolean);
     define(new(alloc()) LHasClass(useRegister(ins->object())), ins);
 }
 
 void
+LIRGenerator::visitWasmAddOffset(MWasmAddOffset* ins)
+{
+    MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+    MOZ_ASSERT(ins->type() == MIRType::Int32);
+    define(new(alloc()) LWasmAddOffset(useRegisterAtStart(ins->base())), ins);
+}
+
+void
+LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
+{
+#ifndef DEBUG
+    if (ins->isRedundant())
+        return;
+#endif
+
+    MDefinition* input = ins->input();
+    MOZ_ASSERT(input->type() == MIRType::Int32);
+
+    auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(input));
+    add(lir, ins);
+}
+
+void
 LIRGenerator::visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins)
 {
     if (ins->type() == MIRType::Int64)
         defineInt64(new(alloc()) LWasmLoadGlobalVarI64, ins);
     else
         define(new(alloc()) LWasmLoadGlobalVar, ins);
 }
 
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -277,16 +277,18 @@ class LIRGenerator : public LIRGenerator
     void visitIn(MIn* ins);
     void visitInArray(MInArray* ins);
     void visitInstanceOf(MInstanceOf* ins);
     void visitCallInstanceOf(MCallInstanceOf* ins);
     void visitIsCallable(MIsCallable* ins);
     void visitIsConstructor(MIsConstructor* ins);
     void visitIsObject(MIsObject* ins);
     void visitHasClass(MHasClass* ins);
+    void visitWasmAddOffset(MWasmAddOffset* ins);
+    void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
     void visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins);
     void visitWasmStoreGlobalVar(MWasmStoreGlobalVar* ins);
     void visitAsmJSParameter(MAsmJSParameter* ins);
     void visitAsmJSReturn(MAsmJSReturn* ins);
     void visitAsmJSVoidReturn(MAsmJSVoidReturn* ins);
     void visitAsmJSPassStackArg(MAsmJSPassStackArg* ins);
     void visitWasmCall(MWasmCall* ins);
     void visitSetDOMProperty(MSetDOMProperty* ins);
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -1,16 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
  * vim: set ts=8 sts=4 et sw=4 tw=99:
  * This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "jit/MIR.h"
 
+#include "mozilla/CheckedInt.h"
 #include "mozilla/FloatingPoint.h"
 #include "mozilla/IntegerPrintfMacros.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/SizePrintfMacros.h"
 
 #include <ctype.h>
 
 #include "jslibmath.h"
@@ -29,16 +30,17 @@
 #include "jsobjinlines.h"
 #include "jsscriptinlines.h"
 
 using namespace js;
 using namespace js::jit;
 
 using JS::ToInt32;
 
+using mozilla::CheckedInt;
 using mozilla::NumbersAreIdentical;
 using mozilla::IsFloat32Representable;
 using mozilla::IsNaN;
 using mozilla::Maybe;
 using mozilla::DebugOnly;
 
 #ifdef DEBUG
 size_t MUse::index() const
@@ -4939,16 +4941,34 @@ MDefinition*
 MLoadFixedSlotAndUnbox::foldsTo(TempAllocator& alloc)
 {
     if (MDefinition* def = foldsToStore(alloc))
         return def;
 
     return this;
 }
 
+MDefinition*
+MWasmAddOffset::foldsTo(TempAllocator& alloc)
+{
+    MDefinition* baseArg = base();
+    if (!baseArg->isConstant())
+        return this;
+
+    MOZ_ASSERT(baseArg->type() == MIRType::Int32);
+    CheckedInt<uint32_t> ptr = baseArg->toConstant()->toInt32();
+
+    ptr += offset();
+
+    if (!ptr.isValid())
+        return this;
+
+    return MConstant::New(alloc, Int32Value(ptr.value()));
+}
+
 MDefinition::AliasType
 MAsmJSLoadHeap::mightAlias(const MDefinition* def) const
 {
     if (def->isAsmJSStoreHeap()) {
         const MAsmJSStoreHeap* store = def->toAsmJSStoreHeap();
         if (store->accessType() != accessType())
             return AliasType::MayAlias;
         if (!base()->isConstant() || !store->base()->isConstant())
@@ -5420,25 +5440,26 @@ MWasmCall::New(TempAllocator& alloc, con
 
 MWasmCall*
 MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
                                         const wasm::CallSiteDesc& desc,
                                         const wasm::SymbolicAddress builtin,
                                         const ABIArg& instanceArg,
                                         const Args& args,
                                         MIRType resultType,
-                                        uint32_t spIncrement)
+                                        uint32_t spIncrement,
+                                        uint32_t tlsStackOffset)
 {
     auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
     MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
-                                     MWasmCall::DontSaveTls, nullptr);
-
+                                     tlsStackOffset, nullptr);
     if (!call)
         return nullptr;
-    MOZ_ASSERT(instanceArg != ABIArg()); // instanceArg must be initialized.
+
+    MOZ_ASSERT(instanceArg != ABIArg());
     call->instanceArg_ = instanceArg;
     return call;
 }
 
 void
 MSqrt::trySpecializeFloat32(TempAllocator& alloc) {
     if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) {
         if (input()->type() == MIRType::Float32)
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13219,120 +13219,130 @@ class MAsmJSNeg
 
   public:
     INSTRUCTION_HEADER(AsmJSNeg)
     static MAsmJSNeg* NewAsmJS(TempAllocator& alloc, MDefinition* op, MIRType type) {
         return new(alloc) MAsmJSNeg(op, type);
     }
 };
 
+class MWasmBoundsCheck
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    bool redundant_;
+
+    explicit MWasmBoundsCheck(MDefinition* index)
+      : MUnaryInstruction(index),
+        redundant_(false)
+    {
+        setGuard(); // Effectful: throws for OOB.
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmBoundsCheck)
+    TRIVIAL_NEW_WRAPPERS
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    bool isRedundant() const {
+        return redundant_;
+    }
+
+    void setRedundant(bool val) {
+        redundant_ = val;
+    }
+};
+
+class MWasmAddOffset
+  : public MUnaryInstruction,
+    public NoTypePolicy::Data
+{
+    uint32_t offset_;
+
+    MWasmAddOffset(MDefinition* base, uint32_t offset)
+      : MUnaryInstruction(base),
+        offset_(offset)
+    {
+        setGuard();
+        setResultType(MIRType::Int32);
+    }
+
+  public:
+    INSTRUCTION_HEADER(WasmAddOffset)
+    TRIVIAL_NEW_WRAPPERS
+    NAMED_OPERANDS((0, base))
+
+    MDefinition* foldsTo(TempAllocator& alloc) override;
+
+    AliasSet getAliasSet() const override {
+        return AliasSet::None();
+    }
+
+    uint32_t offset() const {
+        return offset_;
+    }
+};
+
 class MWasmMemoryAccess
 {
     uint32_t offset_;
     uint32_t align_;
     Scalar::Type accessType_ : 8;
-    bool needsBoundsCheck_;
     unsigned numSimdElems_;
     MemoryBarrierBits barrierBefore_;
     MemoryBarrierBits barrierAfter_;
 
   public:
     explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, uint32_t offset,
                                unsigned numSimdElems = 0,
                                MemoryBarrierBits barrierBefore = MembarNobits,
                                MemoryBarrierBits barrierAfter = MembarNobits)
       : offset_(offset),
         align_(align),
         accessType_(accessType),
-        needsBoundsCheck_(true),
         numSimdElems_(numSimdElems),
         barrierBefore_(barrierBefore),
         barrierAfter_(barrierAfter)
     {
         MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
         MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
     }
 
     uint32_t offset() const { return offset_; }
-    uint32_t endOffset() const { return offset() + byteSize(); }
     uint32_t align() const { return align_; }
     Scalar::Type accessType() const { return accessType_; }
     unsigned byteSize() const {
         return Scalar::isSimdType(accessType())
                ? Scalar::scalarByteSize(accessType()) * numSimdElems()
                : TypedArrayElemSize(accessType());
     }
-    bool needsBoundsCheck() const { return needsBoundsCheck_; }
-    unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; }
+    unsigned numSimdElems() const { return numSimdElems_; }
     MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
     MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
     bool isAtomicAccess() const { return (barrierBefore_ | barrierAfter_) != MembarNobits; }
+    bool isSimdAccess() const { return Scalar::isSimdType(accessType_); }
     bool isUnaligned() const { return align() && align() < byteSize(); }
 
-    void removeBoundsCheck() { needsBoundsCheck_ = false; }
-    void setOffset(uint32_t o) { offset_ = o; }
-};
-
-class MWasmBoundsCheck
-  : public MUnaryInstruction,
-    public MWasmMemoryAccess,
-    public NoTypePolicy::Data
-{
-    bool redundant_;
-
-    explicit MWasmBoundsCheck(MDefinition* index, const MWasmMemoryAccess& access)
-      : MUnaryInstruction(index),
-        MWasmMemoryAccess(access),
-        redundant_(false)
-    {
-        setMovable();
-        setGuard(); // Effectful: throws for OOB.
-    }
-
-  public:
-    INSTRUCTION_HEADER(WasmBoundsCheck)
-    TRIVIAL_NEW_WRAPPERS
-
-    bool congruentTo(const MDefinition* ins) const override {
-        if (!congruentIfOperandsEqual(ins))
-            return false;
-        const MWasmBoundsCheck* other = ins->toWasmBoundsCheck();
-        return accessType() == other->accessType() &&
-               offset() == other->offset() &&
-               align() == other->align();
-    }
-
-    AliasSet getAliasSet() const override {
-        return AliasSet::None();
-    }
-
-    bool isRedundant() const {
-        return redundant_;
-    }
-
-    void setRedundant(bool val) {
-        redundant_ = val;
-    }
+    void clearOffset() { offset_ = 0; }
 };
 
 class MWasmLoad
   : public MUnaryInstruction,
     public MWasmMemoryAccess,
     public NoTypePolicy::Data
 {
-    MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, bool isInt64)
+    MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, MIRType resultType)
       : MUnaryInstruction(base),
         MWasmMemoryAccess(access)
     {
         setGuard();
-        MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in wasm");
-        if (isInt64)
-            setResultType(MIRType::Int64);
-        else
-            setResultType(ScalarTypeToMIRType(access.accessType()));
+        setResultType(resultType);
     }
 
   public:
     INSTRUCTION_HEADER(WasmLoad)
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, base))
 
     AliasSet getAliasSet() const override {
@@ -13361,64 +13371,77 @@ class MWasmStore
     TRIVIAL_NEW_WRAPPERS
     NAMED_OPERANDS((0, base), (1, value))
 
     AliasSet getAliasSet() const override {
         return AliasSet::Store(AliasSet::AsmJSHeap);
     }
 };
 
+class MAsmJSMemoryAccess
+{
+    uint32_t offset_;
+    Scalar::Type accessType_;
+    bool needsBoundsCheck_;
+
+  public:
+    explicit MAsmJSMemoryAccess(Scalar::Type accessType)
+      : offset_(0),
+        accessType_(accessType),
+        needsBoundsCheck_(true)
+    {
+        MOZ_ASSERT(accessType != Scalar::Uint8Clamped);
+        MOZ_ASSERT(!Scalar::isSimdType(accessType));
+    }
+
+    uint32_t offset() const { return offset_; }
+    uint32_t endOffset() const { return offset() + byteSize(); }
+    Scalar::Type accessType() const { return accessType_; }
+    unsigned byteSize() const { return TypedArrayElemSize(accessType()); }
+    bool needsBoundsCheck() const { return needsBoundsCheck_; }
+
+    void removeBoundsCheck() { needsBoundsCheck_ = false; }
+    void setOffset(uint32_t o) { offset_ = o; }
+};
+
 class MAsmJSLoadHeap
   : public MUnaryInstruction,
-    public MWasmMemoryAccess,
+    public MAsmJSMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSLoadHeap(MDefinition* base, const MWasmMemoryAccess& access)
+    MAsmJSLoadHeap(MDefinition* base, Scalar::Type accessType)
       : MUnaryInstruction(base),
-        MWasmMemoryAccess(access)
-    {
-        if (access.barrierBefore() | access.barrierAfter())
-            setGuard(); // Not removable
-        else
-            setMovable();
-
-        MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in asm.js");
-        setResultType(ScalarTypeToMIRType(access.accessType()));
+        MAsmJSMemoryAccess(accessType)
+    {
+        setResultType(ScalarTypeToMIRType(accessType));
     }
 
   public:
     INSTRUCTION_HEADER(AsmJSLoadHeap)
     TRIVIAL_NEW_WRAPPERS
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
 
     bool congruentTo(const MDefinition* ins) const override;
     AliasSet getAliasSet() const override {
-        // When a barrier is needed make the instruction effectful by
-        // giving it a "store" effect.
-        if (isAtomicAccess())
-            return AliasSet::Store(AliasSet::AsmJSHeap);
         return AliasSet::Load(AliasSet::AsmJSHeap);
     }
     AliasType mightAlias(const MDefinition* def) const override;
 };
 
 class MAsmJSStoreHeap
   : public MBinaryInstruction,
-    public MWasmMemoryAccess,
+    public MAsmJSMemoryAccess,
     public NoTypePolicy::Data
 {
-    MAsmJSStoreHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v)
+    MAsmJSStoreHeap(MDefinition* base, Scalar::Type accessType, MDefinition* v)
       : MBinaryInstruction(base, v),
-        MWasmMemoryAccess(access)
-    {
-        if (access.barrierBefore() | access.barrierAfter())
-            setGuard(); // Not removable
-    }
+        MAsmJSMemoryAccess(accessType)
+    {}
 
   public:
     INSTRUCTION_HEADER(AsmJSStoreHeap)
     TRIVIAL_NEW_WRAPPERS
 
     MDefinition* base() const { return getOperand(0); }
     void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); }
     MDefinition* value() const { return getOperand(1); }
@@ -13660,28 +13683,33 @@ class MWasmCall final
         AnyRegister reg;
         MDefinition* def;
         Arg(AnyRegister reg, MDefinition* def) : reg(reg), def(def) {}
     };
     typedef Vector<Arg, 8, SystemAllocPolicy> Args;
 
     static const uint32_t DontSaveTls = UINT32_MAX;
 
-    static MWasmCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc,
-                          const wasm::CalleeDesc& callee, const Args& args, MIRType resultType,
-                          uint32_t spIncrement, uint32_t tlsStackOffset,
+    static MWasmCall* New(TempAllocator& alloc,
+                          const wasm::CallSiteDesc& desc,
+                          const wasm::CalleeDesc& callee,
+                          const Args& args,
+                          MIRType resultType,
+                          uint32_t spIncrement,
+                          uint32_t tlsStackOffset,
                           MDefinition* tableIndex = nullptr);
 
     static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
                                                    const wasm::CallSiteDesc& desc,
                                                    const wasm::SymbolicAddress builtin,
                                                    const ABIArg& instanceArg,
                                                    const Args& args,
                                                    MIRType resultType,
-                                                   uint32_t spIncrement);
+                                                   uint32_t spIncrement,
+                                                   uint32_t tlsStackOffset);
 
     size_t numArgs() const {
         return argRegs_.length();
     }
     AnyRegister registerForArg(size_t index) const {
         MOZ_ASSERT(index < numArgs());
         return argRegs_[index];
     }
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -209,19 +209,16 @@ class MIRGenerator
 
   public:
     AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; }
 #endif
 
   public:
     const JitCompileOptions options;
 
-    bool needsBoundsCheckBranch(const MWasmMemoryAccess* access) const;
-    size_t foldableOffsetRange(const MWasmMemoryAccess* access) const;
-
   private:
     GraphSpewer gs_;
 
   public:
     GraphSpewer& graphSpewer() {
         return gs_;
     }
 };
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -100,62 +100,16 @@ MIRGenerator::addAbortedPreliminaryGroup
         if (group == abortedPreliminaryGroups_[i])
             return;
     }
     AutoEnterOOMUnsafeRegion oomUnsafe;
     if (!abortedPreliminaryGroups_.append(group))
         oomUnsafe.crash("addAbortedPreliminaryGroup");
 }
 
-bool
-MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const
-{
-    // A heap access needs a bounds-check branch if we're not relying on signal
-    // handlers to catch errors, and if it's not proven to be within bounds.
-    // We use signal-handlers on x64, but on x86 there isn't enough address
-    // space for a guard region.  Also, on x64 the atomic loads and stores
-    // can't (yet) use the signal handlers.
-#ifdef WASM_HUGE_MEMORY
-    return false;
-#else
-    return access->needsBoundsCheck();
-#endif
-}
-
-size_t
-MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
-{
-    // This determines whether it's ok to fold up to WasmImmediateRange
-    // offsets, instead of just WasmCheckedImmediateRange.
-
-    static_assert(WasmCheckedImmediateRange <= WasmImmediateRange,
-                  "WasmImmediateRange should be the size of an unconstrained "
-                  "address immediate");
-
-#ifdef WASM_HUGE_MEMORY
-    static_assert(wasm::Uint32Range + WasmImmediateRange + sizeof(wasm::Val) < wasm::MappedSize,
-                  "When using signal handlers for bounds checking, a uint32 is added to the base "
-                  "address followed by an immediate in the range [0, WasmImmediateRange). An "
-                  "unaligned access (whose size is conservatively approximated by wasm::Val) may "
-                  "spill over, so ensure a space at the end.");
-    return WasmImmediateRange;
-#else
-    // On 32-bit platforms, if we've proven the access is in bounds after
-    // 32-bit wrapping, we can fold full offsets because they're added with
-    // 32-bit arithmetic.
-    if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck())
-        return WasmImmediateRange;
-
-    // Otherwise, only allow the checked size. This is always less than the
-    // minimum heap length, and allows explicit bounds checks to fold in the
-    // offset without overflow.
-    return WasmCheckedImmediateRange;
-#endif
-}
-
 void
 MIRGraph::addBlock(MBasicBlock* block)
 {
     MOZ_ASSERT(block);
     block->setId(blockIdGen_++);
     blocks_.pushBack(block);
     numBlocks_++;
 }
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -266,16 +266,17 @@ namespace jit {
     _(GetDOMMember)                                                         \
     _(SetDOMProperty)                                                       \
     _(IsConstructor)                                                        \
     _(IsCallable)                                                           \
     _(IsObject)                                                             \
     _(HasClass)                                                             \
     _(CopySign)                                                             \
     _(WasmBoundsCheck)                                                      \
+    _(WasmAddOffset)                                                        \
     _(WasmLoad)                                                             \
     _(WasmStore)                                                            \
     _(WasmTrap)                                                             \
     _(WasmTruncateToInt32)                                                  \
     _(AsmJSNeg)                                                             \
     _(AsmJSUnsignedToDouble)                                                \
     _(AsmJSUnsignedToFloat32)                                               \
     _(AsmJSLoadHeap)                                                        \
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1035,18 +1035,18 @@ class MacroAssembler : public MacroAssem
     inline void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
                              Label* label) PER_SHARED_ARCH;
 
     inline void branchDoubleNotInInt64Range(Address src, Register temp, Label* fail);
     inline void branchDoubleNotInUInt64Range(Address src, Register temp, Label* fail);
     inline void branchFloat32NotInInt64Range(Address src, Register temp, Label* fail);
     inline void branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail);
 
-    template <typename T>
-    inline void branchAdd32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
+    template <typename T, typename L>
+    inline void branchAdd32(Condition cond, T src, Register dest, L label) PER_SHARED_ARCH;
     template <typename T>
     inline void branchSub32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
 
     inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
 
     template <class L>
     inline void branchTest32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
     template <class L>
@@ -1309,16 +1309,37 @@ class MacroAssembler : public MacroAssem
         DEFINED_ON(x86, x64);
     inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
         DEFINED_ON(x86_shared);
     inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
                                        FloatRegister floatTemp)
         DEFINED_ON(x86, x64);
 
   public:
+    // ========================================================================
+    // wasm support
+
+    // Emit a bounds check against the (dynamically-patched) wasm bounds check
+    // limit, jumping to 'label' if 'cond' holds.
+    template <class L>
+    inline void wasmBoundsCheck(Condition cond, Register index, L label) PER_ARCH;
+
+    // Called after compilation completes to patch the given limit into the
+    // given instruction's immediate.
+    static inline void wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) PER_ARCH;
+
+    // On x86, each instruction adds its own wasm::MemoryAccess's to the
+    // wasm::MemoryAccessVector (there can be multiple when i64 is involved).
+    // On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller
+    // is responsible for doing this instead.
+    void wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
+    void wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
+    void wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
+    void wasmStoreI64(Register64 value, Operand dstAddr) DEFINED_ON(x86);
+
     // wasm specific methods, used in both the wasm baseline compiler and ion.
     void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64);
     void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared);
     void outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned, Label* rejoin) DEFINED_ON(x86_shared);
 
     void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64);
     void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared);
     void outOfLineWasmTruncateFloat32ToInt32(FloatRegister input, bool isUnsigned, Label* rejoin) DEFINED_ON(x86_shared);
--- a/js/src/jit/WasmBCE.cpp
+++ b/js/src/jit/WasmBCE.cpp
@@ -6,95 +6,83 @@
 #include "jit/WasmBCE.h"
 #include "jit/MIRGenerator.h"
 #include "jit/MIRGraph.h"
 
 using namespace js;
 using namespace js::jit;
 using namespace mozilla;
 
-struct DefAndOffset {
-    MDefinition* loc;
-    uint32_t endOffset;
-};
-
-typedef js::HashMap<uint32_t, DefAndOffset, DefaultHasher<uint32_t>, SystemAllocPolicy>
+typedef js::HashMap<uint32_t, MDefinition*, DefaultHasher<uint32_t>, SystemAllocPolicy>
     LastSeenMap;
 
 // The Wasm Bounds Check Elimination (BCE) pass looks for bounds checks
 // on SSA values that have already been checked. (in the same block or in a
 // dominating block). These bounds checks are redundant and thus eliminated.
 //
 // Note: This is safe in the presense of dynamic memory sizes as long as they
 // can ONLY GROW. If we allow SHRINKING the heap, this pass should be
 // RECONSIDERED.
 //
 // TODO (dbounov): Are there a lot of cases where there is no single dominating
 // check, but a set of checks that together dominate a redundant check?
 //
 // TODO (dbounov): Generalize to constant additions relative to one base
-bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) {
+bool
+jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph)
+{
     // Map for dominating block where a given definition was checked
     LastSeenMap lastSeen;
     if (!lastSeen.init())
         return false;
 
     for (ReversePostorderIterator bIter(graph.rpoBegin()); bIter != graph.rpoEnd(); bIter++) {
         MBasicBlock* block = *bIter;
         for (MDefinitionIterator dIter(block); dIter;) {
             MDefinition* def = *dIter++;
 
             switch (def->op()) {
               case MDefinition::Op_WasmBoundsCheck: {
                 MWasmBoundsCheck* bc = def->toWasmBoundsCheck();
                 MDefinition* addr = def->getOperand(0);
-                LastSeenMap::Ptr checkPtr = lastSeen.lookup(addr->id());
 
-                if (checkPtr &&
-                    checkPtr->value().endOffset >= bc->endOffset() &&
-                    checkPtr->value().loc->block()->dominates(block)) {
-                    // Address already checked. Discard current check
+                LastSeenMap::AddPtr checkPtr = lastSeen.lookupForAdd(addr->id());
+                if (checkPtr && checkPtr->value()->block()->dominates(block)) {
                     bc->setRedundant(true);
                 } else {
-                    DefAndOffset defOff = { def, bc->endOffset() };
-                    // Address not previously checked - remember current check
-                    if (!lastSeen.put(addr->id(), defOff))
+                    if (!lastSeen.add(checkPtr, addr->id(), def))
                         return false;
                 }
                 break;
               }
               case MDefinition::Op_Phi: {
                 MPhi* phi = def->toPhi();
                 bool phiChecked = true;
-                uint32_t off = UINT32_MAX;
 
                 MOZ_ASSERT(phi->numOperands() > 0);
 
                 // If all incoming values to a phi node are safe (i.e. have a
                 // check that dominates this block) then we can consider this
                 // phi node checked.
                 //
                 // Note that any phi that is part of a cycle
                 // will not be "safe" since the value coming on the backedge
                 // cannot be in lastSeen because its block hasn't been traversed yet.
                 for (int i = 0, nOps = phi->numOperands(); i < nOps; i++) {
                     MDefinition* src = phi->getOperand(i);
+
                     LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id());
-
-                    if (!checkPtr || !checkPtr->value().loc->block()->dominates(block)) {
+                    if (!checkPtr || !checkPtr->value()->block()->dominates(block)) {
                         phiChecked = false;
                         break;
-                    } else {
-                        off = Min(off, checkPtr->value().endOffset);
                     }
                 }
 
                 if (phiChecked) {
-                    DefAndOffset defOff = { def, off };
-                    if (!lastSeen.put(def->id(), defOff))
+                    if (!lastSeen.put(def->id(), def))
                         return false;
                 }
 
                 break;
               }
               default:
                 break;
             }
--- a/js/src/jit/arm/Architecture-arm.h
+++ b/js/src/jit/arm/Architecture-arm.h
@@ -645,18 +645,12 @@ static inline bool UseHardFpABI()
 #endif
 }
 #endif
 
 // In order to handle SoftFp ABI calls, we need to be able to express that we
 // have ABIArg which are represented by pair of general purpose registers.
 #define JS_CODEGEN_REGISTER_PAIR 1
 
-// See MIRGenerator::foldableOffsetRange for more info.
-// TODO: Implement this for ARM. Note that it requires Codegen to respect the
-// offset field of AsmJSHeapAccess.
-static const size_t WasmCheckedImmediateRange = 0;
-static const size_t WasmImmediateRange = 0;
-
 } // namespace jit
 } // namespace js
 
 #endif /* jit_arm_Architecture_arm_h */
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -3376,37 +3376,16 @@ Assembler::BailoutTableStart(uint8_t* co
 {
     Instruction* inst = (Instruction*)code;
     // Skip a pool with an artificial guard or NOP fill.
     inst = inst->skipPool();
     MOZ_ASSERT(inst->is<InstBLImm>());
     return (uint8_t*) inst;
 }
 
-void
-Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
-{
-    Instruction* inst = (Instruction*) patchAt;
-    MOZ_ASSERT(inst->is<InstCMP>());
-    InstCMP* cmp = inst->as<InstCMP>();
-
-    Register index;
-    cmp->extractOp1(&index);
-
-    MOZ_ASSERT(cmp->extractOp2().isImm8());
-
-    Imm8 imm8 = Imm8(heapLength);
-    MOZ_ASSERT(!imm8.invalid);
-
-    *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
-    // NOTE: we don't update the Auto Flush Cache!  this function is currently
-    // only called from within ModuleGenerator::finish, which does that
-    // for us. Don't call this!
-}
-
 InstructionIterator::InstructionIterator(Instruction* i_)
   : i(i_)
 {
     // Work around pools with an artificial pool guard and around nop-fill.
     i = i->skipPool();
 }
 
 uint32_t Assembler::NopFill = 0;
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -1963,17 +1963,16 @@ class Assembler : public AssemblerShared
     static void ToggleToJmp(CodeLocationLabel inst_);
     static void ToggleToCmp(CodeLocationLabel inst_);
 
     static uint8_t* BailoutTableStart(uint8_t* code);
 
     static size_t ToggledCallSize(uint8_t* code);
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
 
-    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
     void processCodeLabels(uint8_t* rawCode);
 
     bool bailed() {
         return m_buffer.bail();
     }
 
     void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                      const Disassembler::HeapAccess& heapAccess)
--- a/js/src/jit/arm/CodeGenerator-arm.cpp
+++ b/js/src/jit/arm/CodeGenerator-arm.cpp
@@ -2235,138 +2235,113 @@ CodeGeneratorARM::visitWasmCallI64(LWasm
 {
     emitWasmCall(ins);
 }
 
 void
 CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
+    const LAllocation* ptr = ins->ptr();
+
     bool isSigned;
     int size;
     bool isFloat = false;
     switch (mir->accessType()) {
       case Scalar::Int8:    isSigned = true;  size =  8; break;
       case Scalar::Uint8:   isSigned = false; size =  8; break;
       case Scalar::Int16:   isSigned = true;  size = 16; break;
       case Scalar::Uint16:  isSigned = false; size = 16; break;
       case Scalar::Int32:
       case Scalar::Uint32:  isSigned = true;  size = 32; break;
       case Scalar::Float64: isFloat = true;   size = 64; break;
       case Scalar::Float32: isFloat = true;   size = 32; break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
-
-    const LAllocation* ptr = ins->ptr();
-
     if (ptr->isConstant()) {
         MOZ_ASSERT(!mir->needsBoundsCheck());
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         if (isFloat) {
             VFPRegister vd(ToFloatRegister(ins->output()));
             if (size == 32)
                 masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
             else
                 masm.ma_vldr(Address(HeapReg, ptrImm), vd, Assembler::Always);
         }  else {
             masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
                                   ToRegister(ins->output()), Offset, Assembler::Always);
         }
     } else {
         Register ptrReg = ToRegister(ptr);
-        if (isFloat)
-            masm.ma_loadHeapAsmJS(ptrReg, size, mir->needsBoundsCheck(),
-                                  /*faultOnOOB=*/false, ToFloatRegister(ins->output()));
-        else
-            masm.ma_loadHeapAsmJS(ptrReg, size, isSigned, mir->needsBoundsCheck(),
-                                  mir->isAtomicAccess(), ToRegister(ins->output()));
-    }
-
-    memoryBarrier(mir->barrierAfter());
-}
-
-void
-CodeGeneratorARM::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
-{
-    MWasmBoundsCheck* mir = ins->mir();
-
-    MOZ_ASSERT(mir->offset() <= INT32_MAX);
-
-    if (!mir->isRedundant()) {
-        // No guarantee that heapBase + endOffset can be properly encoded in
-        // the cmp immediate in ma_BoundsCheck, so use an explicit add instead.
-        uint32_t endOffset = mir->endOffset();
-
-        Register ptr = ToRegister(ins->ptr());
-
-        ScratchRegisterScope ptrPlusOffset(masm);
-        masm.move32(Imm32(endOffset), ptrPlusOffset);
-        masm.ma_add(ptr, ptrPlusOffset, SetCC);
-
-        // Detect unsigned overflow by checking the carry bit.
-        masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet);
-
-        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset();
-        masm.append(wasm::BoundsCheck(cmpOffset));
-        masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::Above);
-    } else {
-#ifdef DEBUG
-        Label ok1, ok2;
-        uint32_t endOffset = mir->endOffset();
-
-        Register ptr = ToRegister(ins->ptr());
-
-        ScratchRegisterScope ptrPlusOffset(masm);
-        masm.move32(Imm32(endOffset), ptrPlusOffset);
-        masm.ma_add(ptr, ptrPlusOffset, SetCC);
-
-        // Detect unsigned overflow by checking the carry bit.
-        masm.as_b(&ok1, Assembler::CarryClear);
-        masm.assumeUnreachable("Redundant bounds check failed!");
-        masm.bind(&ok1);
-
-        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset();
-        masm.append(wasm::BoundsCheck(cmpOffset));
-        masm.as_b(&ok2, Assembler::BelowOrEqual);
-        masm.assumeUnreachable("Redundant bounds check failed!");
-        masm.bind(&ok2);
-#endif
+        if (isFloat) {
+            FloatRegister output = ToFloatRegister(ins->output());
+            if (size == 32)
+                output = output.singleOverlay();
+
+            Assembler::Condition cond = Assembler::Always;
+            if (mir->needsBoundsCheck()) {
+                BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
+                masm.append(wasm::BoundsCheck(cmp.getOffset()));
+
+                size_t nanOffset = size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
+                masm.ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output,
+                             Assembler::AboveOrEqual);
+                cond = Assembler::Below;
+            }
+
+            masm.ma_vldr(output, HeapReg, ptrReg, 0, cond);
+        } else {
+            Register output = ToRegister(ins->output());
+
+            Assembler::Condition cond = Assembler::Always;
+            if (mir->needsBoundsCheck()) {
+                uint32_t cmpOffset = masm.as_cmp(ptrReg, Imm8(0)).getOffset();
+                masm.append(wasm::BoundsCheck(cmpOffset));
+
+                masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
+                cond = Assembler::Below;
+            }
+
+            masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, cond);
+        }
     }
 }
 
 template <typename T>
 void
 CodeGeneratorARM::emitWasmLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-
     uint32_t offset = mir->offset();
-    MOZ_ASSERT(offset <= INT32_MAX);
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
     Scalar::Type type = mir->accessType();
 
     // Maybe add the offset.
     if (offset || type == Scalar::Int64) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
-        masm.ma_add(Imm32(offset), ptrPlusOffset);
+        if (offset)
+            masm.ma_add(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
     bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 ||
                     type == Scalar::Int64;
     unsigned byteSize = mir->byteSize();
 
+    memoryBarrier(mir->barrierBefore());
+
     if (mir->type() == MIRType::Int64) {
         Register64 output = ToOutRegister64(lir);
         if (type == Scalar::Int64) {
             MOZ_ASSERT(INT64LOW_OFFSET == 0);
             masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
             masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr);
             masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
         } else {
@@ -2383,16 +2358,18 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
             MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
             ScratchRegisterScope scratch(masm);
             masm.ma_add(HeapReg, ptr, scratch);
             masm.ma_vldr(Address(scratch, 0), output.fpu());
         } else {
             masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
         }
     }
+
+    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir)
 {
     emitWasmLoad(lir);
 }
 
@@ -2403,20 +2380,18 @@ CodeGeneratorARM::visitWasmLoadI64(LWasm
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-
     uint32_t offset = mir->offset();
-    MOZ_ASSERT(offset <= INT32_MAX);
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptrCopy());
     if (offset)
         masm.ma_add(Imm32(offset), ptr);
 
     // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
     masm.ma_add(HeapReg, ptr);
 
@@ -2435,16 +2410,18 @@ CodeGeneratorARM::emitWasmUnalignedLoad(
     else if (mirType == MIRType::Int64)
         low = ToOutRegister64(lir).low;
     else
         low = ToRegister(lir->output());
 
     MOZ_ASSERT(low != tmp);
     MOZ_ASSERT(low != ptr);
 
+    memoryBarrier(mir->barrierBefore());
+
     masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low);
 
     if (IsFloatingPointType(mirType)) {
         FloatRegister output = ToFloatRegister(lir->output());
         if (byteSize == 4) {
             MOZ_ASSERT(output.isSingle());
             masm.ma_vxfer(low, output);
         } else {
@@ -2463,54 +2440,68 @@ CodeGeneratorARM::emitWasmUnalignedLoad(
             MOZ_ASSERT(byteSize <= 4);
             // Propagate sign.
             if (isSigned)
                 masm.ma_asr(Imm32(31), output.low, output.high);
             else
                 masm.ma_mov(Imm32(0), output.high);
         }
     }
+
+    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
 {
     emitWasmUnalignedLoad(lir);
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
 {
     emitWasmUnalignedLoad(lir);
 }
 
+void
+CodeGeneratorARM::visitWasmAddOffset(LWasmAddOffset* lir)
+{
+    MWasmAddOffset* mir = lir->mir();
+    Register base = ToRegister(lir->base());
+    Register out = ToRegister(lir->output());
+
+    masm.ma_add(base, Imm32(mir->offset()), out, SetCC);
+    masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet);
+}
+
 template <typename T>
 void
 CodeGeneratorARM::emitWasmStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-
     uint32_t offset = mir->offset();
-    MOZ_ASSERT(offset <= INT32_MAX);
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptr());
     unsigned byteSize = mir->byteSize();
     Scalar::Type type = mir->accessType();
 
     // Maybe add the offset.
     if (offset || type == Scalar::Int64) {
         Register ptrPlusOffset = ToRegister(lir->ptrCopy());
-        masm.ma_add(Imm32(offset), ptrPlusOffset);
+        if (offset)
+            masm.ma_add(Imm32(offset), ptrPlusOffset);
         ptr = ptrPlusOffset;
     } else {
         MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
     }
 
+    memoryBarrier(mir->barrierBefore());
+
     if (type == Scalar::Int64) {
         MOZ_ASSERT(INT64LOW_OFFSET == 0);
 
         Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
         masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
         masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr);
         masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
     } else {
@@ -2522,16 +2513,18 @@ CodeGeneratorARM::emitWasmStore(T* lir)
             masm.ma_add(HeapReg, ptr, scratch);
             masm.ma_vstr(val, Address(scratch, 0));
         } else {
             bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
             Register val = value.gpr();
             masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val);
         }
     }
+
+    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitWasmStore(LWasmStore* lir)
 {
     emitWasmStore(lir);
 }
 
@@ -2542,30 +2535,30 @@ CodeGeneratorARM::visitWasmStoreI64(LWas
 }
 
 template<typename T>
 void
 CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
 {
     const MWasmStore* mir = lir->mir();
 
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-
     uint32_t offset = mir->offset();
-    MOZ_ASSERT(offset <= INT32_MAX);
+    MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
 
     Register ptr = ToRegister(lir->ptrCopy());
     if (offset)
         masm.ma_add(Imm32(offset), ptr);
 
     // Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
     masm.ma_add(HeapReg, ptr);
 
     MIRType mirType = mir->value()->type();
 
+    memoryBarrier(mir->barrierAfter());
+
     Register val = ToRegister(lir->valueHelper());
     if (IsFloatingPointType(mirType)) {
         masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val);
     } else if (mirType == MIRType::Int64) {
         Register64 input = ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex));
         if (input.low != val)
             masm.ma_mov(input.low, val);
     }
@@ -2581,17 +2574,19 @@ CodeGeneratorARM::emitWasmUnalignedStore
             MOZ_ASSERT(fp.isDouble());
             ScratchRegisterScope scratch(masm);
             masm.ma_vxfer(fp, scratch, val);
         } else {
             MOZ_ASSERT(mirType == MIRType::Int64);
             masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val);
         }
         masm.emitUnalignedStore(4, ptr, val, /* offset */ 4);
-     }
+    }
+
+    memoryBarrier(mir->barrierBefore());
 }
 
 void
 CodeGeneratorARM::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
 {
     emitWasmUnalignedStore(lir);
 }
 
@@ -2600,35 +2595,35 @@ CodeGeneratorARM::visitWasmUnalignedStor
 {
     emitWasmUnalignedStore(lir);
 }
 
 void
 CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
+    const LAllocation* ptr = ins->ptr();
+
     bool isSigned;
     int size;
     bool isFloat = false;
     switch (mir->accessType()) {
       case Scalar::Int8:
       case Scalar::Uint8:   isSigned = false; size = 8; break;
       case Scalar::Int16:
       case Scalar::Uint16:  isSigned = false; size = 16; break;
       case Scalar::Int32:
       case Scalar::Uint32:  isSigned = true;  size = 32; break;
       case Scalar::Float64: isFloat  = true;  size = 64; break;
       case Scalar::Float32: isFloat = true;   size = 32; break;
       default: MOZ_CRASH("unexpected array type");
     }
 
-    memoryBarrier(mir->barrierBefore());
-
-    const LAllocation* ptr = ins->ptr();
-
     if (ptr->isConstant()) {
         MOZ_ASSERT(!mir->needsBoundsCheck());
         int32_t ptrImm = ptr->toConstant()->toInt32();
         MOZ_ASSERT(ptrImm >= 0);
         if (isFloat) {
             VFPRegister vd(ToFloatRegister(ins->value()));
             Address addr(HeapReg, ptrImm);
             if (size == 32)
@@ -2636,54 +2631,64 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA
             else
                 masm.storeDouble(vd, addr);
         } else {
             masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
                                   ToRegister(ins->value()), Offset, Assembler::Always);
         }
     } else {
         Register ptrReg = ToRegister(ptr);
-        if (isFloat)
-            masm.ma_storeHeapAsmJS(ptrReg, size, mir->needsBoundsCheck(), /*faultOnOOB=*/false,
-                                   ToFloatRegister(ins->value()));
-        else
-            masm.ma_storeHeapAsmJS(ptrReg, size, isSigned, mir->needsBoundsCheck(),
-                                   mir->isAtomicAccess(), ToRegister(ins->value()));
+
+        Assembler::Condition cond = Assembler::Always;
+        if (mir->needsBoundsCheck()) {
+            BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0));
+            masm.append(wasm::BoundsCheck(cmp.getOffset()));
+
+            cond = Assembler::Below;
+        }
+
+        if (isFloat) {
+            FloatRegister value = ToFloatRegister(ins->value());
+            if (size == 32)
+                value = value.singleOverlay();
+
+            masm.ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below);
+        } else {
+            Register value = ToRegister(ins->value());
+            masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset, cond);
+        }
     }
-
-    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Scalar::Type vt = mir->accessType();
     const LAllocation* ptr = ins->ptr();
     Register ptrReg = ToRegister(ptr);
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
 
-    if (mir->needsBoundsCheck()) {
-        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
-        masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-        masm.append(wasm::BoundsCheck(cmpOffset));
-    }
     masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                         srcAddr, oldval, newval, InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins)
 {
     const MAsmJSCompareExchangeHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Register ptr = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldval());
     Register newval = ToRegister(ins->newval());
     Register tls = ToRegister(ins->tls());
     Register instance = ToRegister(ins->getTemp(0));
     Register viewType = ToRegister(ins->getTemp(1));
 
     MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
@@ -2699,36 +2704,34 @@ CodeGeneratorARM::visitAsmJSCompareExcha
     masm.passABIArg(newval);
     masm.callWithABI(wasm::SymbolicAddress::AtomicCmpXchg);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    if (mir->needsBoundsCheck()) {
-        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
-        masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-        masm.append(wasm::BoundsCheck(cmpOffset));
-    }
-
     masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                        srcAddr, value, InvalidReg, ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins)
 {
     const MAsmJSAtomicExchangeHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Register ptr = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     Register tls = ToRegister(ins->tls());
     Register instance = ToRegister(ins->getTemp(0));
     Register viewType = ToRegister(ins->getTemp(1));
 
     MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg);
 
@@ -2741,75 +2744,68 @@ CodeGeneratorARM::visitAsmJSAtomicExchan
     masm.passABIArg(ptr);
     masm.passABIArg(value);
     masm.callWithABI(wasm::SymbolicAddress::AtomicXchg);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
-    MOZ_ASSERT(ins->mir()->hasUses());
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-
     MAsmJSAtomicBinopHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->hasUses());
+
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
-    if (mir->needsBoundsCheck()) {
-        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
-        masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-        masm.append(wasm::BoundsCheck(cmpOffset));
-    }
-
-    if (value->isConstant())
+    if (value->isConstant()) {
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
                                    ToAnyRegister(ins->output()));
-    else
+    } else {
         atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
                                    ToRegister(value), srcAddr, flagTemp, InvalidReg,
                                    ToAnyRegister(ins->output()));
+    }
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
-    MOZ_ASSERT(!ins->mir()->hasUses());
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-
     MAsmJSAtomicBinopHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(!mir->hasUses());
+
     Scalar::Type vt = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register flagTemp = ToRegister(ins->flagTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
     BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
 
-    if (mir->needsBoundsCheck()) {
-        uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset();
-        masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-        masm.append(wasm::BoundsCheck(cmpOffset));
-    }
-
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp);
     else
         atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp);
 }
 
 void
 CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins)
 {
     const MAsmJSAtomicBinopHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Register ptr = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     Register tls = ToRegister(ins->tls());
     Register instance = ToRegister(ins->getTemp(0));
     Register viewType = ToRegister(ins->getTemp(1));
 
     masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance);
     masm.move32(Imm32(mir->accessType()), viewType);
--- a/js/src/jit/arm/CodeGenerator-arm.h
+++ b/js/src/jit/arm/CodeGenerator-arm.h
@@ -235,21 +235,21 @@ class CodeGeneratorARM : public CodeGene
     void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
     void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
     void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
     void visitAsmSelect(LAsmSelect* ins);
     void visitAsmReinterpret(LAsmReinterpret* ins);
     void emitWasmCall(LWasmCallBase* ins);
     void visitWasmCall(LWasmCall* ins);
     void visitWasmCallI64(LWasmCallI64* ins);
-    void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
     void visitWasmLoad(LWasmLoad* ins);
     void visitWasmLoadI64(LWasmLoadI64* ins);
     void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins);
     void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* ins);
+    void visitWasmAddOffset(LWasmAddOffset* ins);
     void visitWasmStore(LWasmStore* ins);
     void visitWasmStoreI64(LWasmStoreI64* ins);
     void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
     void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
     void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
     void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
     void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
     void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -604,27 +604,16 @@ void
 LIRGeneratorARM::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
-LIRGeneratorARM::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
-{
-    MDefinition* input = ins->input();
-    MOZ_ASSERT(input->type() == MIRType::Int32);
-
-    LAllocation baseAlloc = useRegisterAtStart(input);
-    auto* lir = new(alloc()) LWasmBoundsCheck(baseAlloc);
-    add(lir, ins);
-}
-
-void
 LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation ptr = useRegisterAtStart(base);
 
     if (ins->isUnaligned()) {
@@ -718,19 +707,19 @@ LIRGeneratorARM::visitWasmStore(MWasmSto
 
 void
 LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
     MOZ_ASSERT(ins->offset() == 0);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
-    LAllocation baseAlloc;
 
     // For the ARM it is best to keep the 'base' in a register if a bounds check is needed.
+    LAllocation baseAlloc;
     if (base->isConstant() && !ins->needsBoundsCheck()) {
         // A bounds check is only skipped for a positive index.
         MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
         baseAlloc = LAllocation(base->toConstant());
     } else {
         baseAlloc = useRegisterAtStart(base);
     }
 
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -100,17 +100,16 @@ class LIRGeneratorARM : public LIRGenera
     void visitUnbox(MUnbox* unbox);
     void visitReturn(MReturn* ret);
     void lowerPhi(MPhi* phi);
     void visitGuardShape(MGuardShape* ins);
     void visitGuardObjectGroup(MGuardObjectGroup* ins);
     void visitAsmSelect(MAsmSelect* ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);
-    void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
     void visitWasmLoad(MWasmLoad* ins);
     void visitWasmStore(MWasmStore* ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h
+++ b/js/src/jit/arm/MacroAssembler-arm-inl.h
@@ -1445,22 +1445,22 @@ MacroAssembler::branchTruncateDoubleToIn
 
     ma_vcvt_F64_I32(src, scratchSIntReg);
     ma_vxfer(scratchSIntReg, dest);
     ma_cmp(dest, Imm32(0x7fffffff));
     ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
     ma_b(fail, Assembler::Equal);
 }
 
-template <typename T>
+template <typename T, typename L>
 void
-MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label)
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
 {
     add32(src, dest);
-    j(cond, label);
+    as_b(label, cond);
 }
 
 template <typename T>
 void
 MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
 {
     ma_sub(src, dest, SetCC);
     j(cond, label);
@@ -1966,16 +1966,48 @@ MacroAssembler::clampIntToUint8(Register
     // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is
     // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255
     ScratchRegisterScope scratch(*this);
     as_mov(scratch, asr(reg, 8), SetCC);
     ma_mov(Imm32(0xff), reg, NotEqual);
     ma_mov(Imm32(0), reg, Signed);
 }
 
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+    BufferOffset bo = as_cmp(index, Imm8(0));
+    append(wasm::BoundsCheck(bo.getOffset()));
+
+    as_b(label, cond);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+    Instruction* inst = (Instruction*) patchAt;
+    MOZ_ASSERT(inst->is<InstCMP>());
+    InstCMP* cmp = inst->as<InstCMP>();
+
+    Register index;
+    cmp->extractOp1(&index);
+
+    MOZ_ASSERT(cmp->extractOp2().isImm8());
+
+    Imm8 imm8 = Imm8(limit);
+    MOZ_RELEASE_ASSERT(!imm8.invalid);
+
+    *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always);
+    // Don't call Auto Flush Cache; the wasm caller has done this for us.
+}
+
 //}}} check_macroassembler_style
 // ===============================================================
 
 void
 MacroAssemblerARMCompat::incrementInt32Value(const Address& addr)
 {
     asMasm().add32(Imm32(1), ToPayload(addr));
 }
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -2126,65 +2126,16 @@ MacroAssemblerARMCompat::loadFloat32(con
     uint32_t scale = Imm32::ShiftOf(src.scale).value;
     int32_t offset = src.offset;
 
     ScratchRegisterScope scratch(asMasm());
     as_add(scratch, base, lsl(index, scale));
     ma_vldr(Address(scratch, offset), VFPRegister(dest).singleOverlay());
 }
 
-
-void
-MacroAssemblerARMCompat::ma_loadHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck,
-                                          bool faultOnOOB, FloatRegister output)
-{
-    if (size == 32)
-        output = output.singleOverlay();
-
-    if (!needsBoundsCheck) {
-        ma_vldr(output, HeapReg, ptrReg, 0, Assembler::Always);
-    } else {
-        uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
-        append(wasm::BoundsCheck(cmpOffset));
-
-        if (faultOnOOB) {
-            ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-        }
-        else {
-            size_t nanOffset =
-                size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
-            ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output,
-                    Assembler::AboveOrEqual);
-        }
-        ma_vldr(output, HeapReg, ptrReg, 0, Assembler::Below);
-    }
-}
-
-void
-MacroAssemblerARMCompat::ma_loadHeapAsmJS(Register ptrReg, int size, bool isSigned,
-                                          bool needsBoundsCheck, bool faultOnOOB,
-                                          Register output)
-{
-    if (!needsBoundsCheck) {
-        ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset,
-                         Assembler::Always);
-        return;
-    }
-
-    uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
-    append(wasm::BoundsCheck(cmpOffset));
-
-    if (faultOnOOB)
-        ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-    else
-        ma_mov(Imm32(0), output, Assembler::AboveOrEqual);
-
-    ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, Assembler::Below);
-}
-
 void
 MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address)
 {
     AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
     ma_mov(imm, scratch2);
     store8(scratch2, address);
 }
 
@@ -2351,61 +2302,16 @@ MacroAssemblerARMCompat::storePtr(Regist
 void
 MacroAssemblerARMCompat::storePtr(Register src, AbsoluteAddress dest)
 {
     ScratchRegisterScope scratch(asMasm());
     movePtr(ImmWord(uintptr_t(dest.addr)), scratch);
     storePtr(src, Address(scratch, 0));
 }
 
-void
-MacroAssemblerARMCompat::ma_storeHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck,
-                                           bool faultOnOOB, FloatRegister value)
-{
-    if (!needsBoundsCheck) {
-        BaseIndex addr(HeapReg, ptrReg, TimesOne, 0);
-        if (size == 32)
-            asMasm().storeFloat32(value, addr);
-        else
-            asMasm().storeDouble(value, addr);
-    } else {
-        uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
-        append(wasm::BoundsCheck(cmpOffset));
-
-        if (faultOnOOB)
-            ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-
-        if (size == 32)
-            value = value.singleOverlay();
-
-        ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below);
-    }
-}
-
-void
-MacroAssemblerARMCompat::ma_storeHeapAsmJS(Register ptrReg, int size, bool isSigned,
-                                           bool needsBoundsCheck, bool faultOnOOB,
-                                           Register value)
-{
-    if (!needsBoundsCheck) {
-        ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset,
-                         Assembler::Always);
-        return;
-    }
-
-    uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset();
-    append(wasm::BoundsCheck(cmpOffset));
-
-    if (faultOnOOB)
-        ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
-
-    ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset,
-                     Assembler::Below);
-}
-
 // Note: this function clobbers the input register.
 void
 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
 {
     if (HasVFPv3()) {
         Label notSplit;
         {
             ScratchDoubleScope scratchDouble(*this);
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -976,21 +976,16 @@ class MacroAssemblerARMCompat : public M
 
     // Load a float value into a register, then expand it to a double.
     void loadFloatAsDouble(const Address& addr, FloatRegister dest);
     void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
 
     void loadFloat32(const Address& addr, FloatRegister dest);
     void loadFloat32(const BaseIndex& src, FloatRegister dest);
 
-    void ma_loadHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, bool faultOnOOB,
-                          FloatRegister output);
-    void ma_loadHeapAsmJS(Register ptrReg, int size, bool isSigned, bool needsBoundsCheck,
-                          bool faultOnOOB, Register output);
-
     void store8(Register src, const Address& address);
     void store8(Imm32 imm, const Address& address);
     void store8(Register src, const BaseIndex& address);
     void store8(Imm32 imm, const BaseIndex& address);
 
     void store16(Register src, const Address& address);
     void store16(Imm32 imm, const Address& address);
     void store16(Register src, const BaseIndex& address);
@@ -1017,21 +1012,16 @@ class MacroAssemblerARMCompat : public M
     template <typename T> void storePtr(ImmGCPtr imm, T address);
     void storePtr(Register src, const Address& address);
     void storePtr(Register src, const BaseIndex& address);
     void storePtr(Register src, AbsoluteAddress dest);
     void moveDouble(FloatRegister src, FloatRegister dest, Condition cc = Always) {
         ma_vmov(src, dest, cc);
     }
 
-    void ma_storeHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, bool faultOnOOB,
-                           FloatRegister value);
-    void ma_storeHeapAsmJS(Register ptrReg, int size, bool isSigned, bool needsBoundsCheck,
-                           bool faultOnOOB, Register value);
-
   private:
     template<typename T>
     Register computePointer(const T& src, Register r);
 
     template<typename T>
     void compareExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register oldval,
                               Register newval, Register output);
 
@@ -1439,19 +1429,16 @@ class MacroAssemblerARMCompat : public M
     void abiret() {
         as_bx(lr);
     }
 
     void ma_storeImm(Imm32 c, const Address& dest) {
         ma_mov(c, lr);
         ma_str(lr, dest);
     }
-    BufferOffset ma_BoundsCheck(Register bounded) {
-        return as_cmp(bounded, Imm8(0));
-    }
 
     void moveFloat32(FloatRegister src, FloatRegister dest, Condition cc = Always) {
         as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
     }
 
     void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
         loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest);
     }
--- a/js/src/jit/arm64/Architecture-arm64.h
+++ b/js/src/jit/arm64/Architecture-arm64.h
@@ -451,15 +451,12 @@ hasUnaliasedDouble()
 // ARM prior to ARMv8 also has doubles that alias multiple floats.
 // Again, ARMv8 is in the clear.
 inline bool
 hasMultiAlias()
 {
     return false;
 }
 
-static const size_t WasmCheckedImmediateRange = 0;
-static const size_t WasmImmediateRange = 0;
-
 } // namespace jit
 } // namespace js
 
 #endif // jit_arm64_Architecture_arm64_h
--- a/js/src/jit/arm64/Assembler-arm64.cpp
+++ b/js/src/jit/arm64/Assembler-arm64.cpp
@@ -632,30 +632,16 @@ Assembler::FixupNurseryObjects(JSContext
 
 void
 Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
 {
     MOZ_CRASH("PatchInstructionImmediate()");
 }
 
 void
-Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
-{
-    Instruction* inst = (Instruction*) patchAt;
-    int32_t mask = ~(heapLength - 1);
-    unsigned n, imm_s, imm_r;
-    if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r))
-        MOZ_CRASH("Could not encode immediate!?");
-
-    inst->SetImmR(imm_r);
-    inst->SetImmS(imm_s);
-    inst->SetBitN(n);
-}
-
-void
 Assembler::retarget(Label* label, Label* target)
 {
     if (label->used()) {
         if (target->bound()) {
             bind(label, BufferOffset(target));
         } else if (target->used()) {
             // The target is not bound but used. Prepend label's branch list
             // onto target's.
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -372,18 +372,16 @@ class Assembler : public vixl::Assembler
             return reinterpret_cast<Instruction*>(&ldr);
         }
     };
 
     // Offset of the patchable target for the given entry.
     static const size_t OffsetOfJumpTableEntryPointer = 8;
 
   public:
-    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
-
     void writeCodePointer(AbsoluteLabel* absoluteLabel) {
         MOZ_ASSERT(!absoluteLabel->bound());
         uintptr_t x = LabelBase::INVALID_OFFSET;
         BufferOffset off = EmitData(&x, sizeof(uintptr_t));
 
         // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
         // of uses of an AbsoluteLabel through the assembly. ARM only uses labels
         // for the case statements of switch jump tables. Thus, for simplicity, we
--- a/js/src/jit/arm64/Lowering-arm64.cpp
+++ b/js/src/jit/arm64/Lowering-arm64.cpp
@@ -334,22 +334,16 @@ LIRGeneratorARM64::visitRandom(MRandom* 
 
 void
 LIRGeneratorARM64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
 {
     MOZ_CRASH("NY");
 }
 
 void
-LIRGeneratorARM64::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
-{
-    MOZ_CRASH("NY");
-}
-
-void
 LIRGeneratorARM64::visitWasmLoad(MWasmLoad* ins)
 {
     MOZ_CRASH("NY");
 }
 
 void
 LIRGeneratorARM64::visitWasmStore(MWasmStore* ins)
 {
--- a/js/src/jit/arm64/Lowering-arm64.h
+++ b/js/src/jit/arm64/Lowering-arm64.h
@@ -112,17 +112,16 @@ class LIRGeneratorARM64 : public LIRGene
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
     void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
     void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
     void visitSubstr(MSubstr* ins);
     void visitRandom(MRandom* ins);
     void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
-    void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
     void visitWasmLoad(MWasmLoad* ins);
     void visitWasmStore(MWasmStore* ins);
     void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
     void visitCopySign(MCopySign* ins);
     void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
 };
 
 typedef LIRGeneratorARM64 LIRGeneratorSpecific;
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h
@@ -1005,22 +1005,22 @@ MacroAssembler::branchTruncateDoubleMayb
 }
 
 void
 MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
 {
     convertDoubleToInt32(src, dest, fail);
 }
 
-template <typename T>
+template <typename T, typename L>
 void
-MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label)
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
 {
     adds32(src, dest);
-    branch(cond, label);
+    B(label, cond);
 }
 
 template <typename T>
 void
 MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
 {
     subs32(src, dest);
     branch(cond, label);
@@ -1540,16 +1540,32 @@ MacroAssembler::clampIntToUint8(Register
     MOZ_ASSERT(!scratch32.Is(reg32));
 
     Cmp(reg32, Operand(reg32, vixl::UXTB));
     Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual);
     Mov(scratch32, Operand(0xff));
     Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual);
 }
 
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+    MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+    MOZ_CRASH("NYI");
+}
+
 //}}} check_macroassembler_style
 // ===============================================================
 
 template <typename T>
 void
 MacroAssemblerCompat::addToStackPtr(T t)
 {
     asMasm().addPtr(t, getStackPointer());
--- a/js/src/jit/arm64/vixl/Assembler-vixl.h
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.h
@@ -853,17 +853,19 @@ class Assembler : public MozBaseAssemble
 
 #define COPYENUM(v) static const Condition v = vixl::v
 #define COPYENUM_(v) static const Condition v = vixl::v##_
   COPYENUM(Equal);
   COPYENUM(Zero);
   COPYENUM(NotEqual);
   COPYENUM(NonZero);
   COPYENUM(AboveOrEqual);
+  COPYENUM(CarrySet);
   COPYENUM(Below);
+  COPYENUM(CarryClear);
   COPYENUM(Signed);
   COPYENUM(NotSigned);
   COPYENUM(Overflow);
   COPYENUM(NoOverflow);
   COPYENUM(Above);
   COPYENUM(BelowOrEqual);
   COPYENUM_(GreaterThanOrEqual);
   COPYENUM_(LessThan);
--- a/js/src/jit/arm64/vixl/Constants-vixl.h
+++ b/js/src/jit/arm64/vixl/Constants-vixl.h
@@ -229,18 +229,18 @@ enum Condition {
 
   // Aliases.
   hs = cs,  // C set            Unsigned higher or same.
   lo = cc,  // C clear          Unsigned lower.
 
   // Mozilla expanded aliases.
   Equal = 0, Zero = 0,
   NotEqual = 1, NonZero = 1,
-  AboveOrEqual = 2,
-  Below = 3,
+  AboveOrEqual = 2, CarrySet = 2,
+  Below = 3, CarryClear = 3,
   Signed = 4,
   NotSigned = 5,
   Overflow = 6,
   NoOverflow = 7,
   Above = 8,
   BelowOrEqual = 9,
   GreaterThanOrEqual_ = 10,
   LessThan_ = 11,
--- a/js/src/jit/mips-shared/Architecture-mips-shared.h
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.h
@@ -327,18 +327,12 @@ hasUnaliasedDouble() {
 // On MIPS, fn-double aliases both fn-float32 and fn+1-float32, so if you need
 // to convert a float32 to a double as a temporary, you need a temporary
 // double register.
 inline bool
 hasMultiAlias() {
     return true;
 }
 
-// See MIRGenerator::foldableOffsetRange for more info.
-// TODO: Implement this for MIPS. Note that it requires Codegen to respect the
-// offset field of AsmJSHeapAccess.
-static const size_t WasmCheckedImmediateRange = 0;
-static const size_t WasmImmediateRange = 0;
-
 } // namespace jit
 } // namespace js
 
 #endif /* jit_mips_shared_Architecture_mips_shared_h */
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1656,38 +1656,16 @@ CodeGeneratorMIPSShared::visitWasmCall(L
 
 void
 CodeGeneratorMIPSShared::visitWasmCallI64(LWasmCallI64* ins)
 {
     emitWasmCallBase(ins);
 }
 
 void
-CodeGeneratorMIPSShared::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
-{
-    MWasmBoundsCheck* mir = ins->mir();
-
-    uint32_t offset = mir->offset();
-    MOZ_ASSERT(offset <= INT32_MAX);
-
-    uint32_t endOffset = mir->endOffset();
-    Register ptr = ToRegister(ins->ptr());
-
-    masm.move32(Imm32(endOffset), SecondScratchReg);
-    masm.addPtr(ptr, SecondScratchReg);
-
-    // Detect unsigned overflow.
-    masm.ma_b(SecondScratchReg, ptr, wasm::JumpTarget::OutOfBounds, Assembler::LessThan);
-
-    BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
-    masm.ma_b(SecondScratchReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::Above);
-    masm.append(wasm::BoundsCheck(bo.getOffset()));
-}
-
-void
 CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
 {
     const MWasmLoad* mir = lir->mir();
 
     MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
 
     uint32_t offset = mir->offset();
     MOZ_ASSERT(offset <= INT32_MAX);
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -191,17 +191,16 @@ class CodeGeneratorMIPSShared : public C
 
     void visitNegI(LNegI* lir);
     void visitNegD(LNegD* lir);
     void visitNegF(LNegF* lir);
     void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
     void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
     void visitWasmCall(LWasmCall* ins);
     void visitWasmCallI64(LWasmCallI64* ins);
-    void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
     void visitWasmLoad(LWasmLoad* ins);
     void visitWasmStore(LWasmStore* ins);
     void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
     void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -289,27 +289,16 @@ LIRGeneratorMIPSShared::visitAsmJSNeg(MA
         define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
     } else {
         MOZ_ASSERT(ins->type() == MIRType::Double);
         define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
     }
 }
 
 void
-LIRGeneratorMIPSShared::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
-{
-    if (!gen->needsBoundsCheckBranch(ins))
-        return;
-
-    MDefinition* index = ins->input();
-    auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index));
-    add(lir, ins);
-}
-
-void
 LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
 #ifdef JS_CODEGEN_MIPS64
     if (ins->type() == MIRType::Int64) {
         auto* lir = new(alloc()) LWasmLoadI64(useRegisterAtStart(base));
--- a/js/src/jit/mips-shared/Lowering-mips-shared.h
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -71,17 +71,16 @@ class LIRGeneratorMIPSShared : public LI
     void lowerModI(MMod* mod);
     void lowerDivI64(MDiv* div);
     void lowerModI64(MMod* mod);
     void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
     void lowerUDiv(MDiv* div);
     void lowerUMod(MMod* mod);
     void visitPowHalf(MPowHalf* ins);
     void visitAsmJSNeg(MAsmJSNeg* ins);
-    void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
     void visitWasmLoad(MWasmLoad* ins);
     void visitWasmStore(MWasmStore* ins);
     void visitAsmSelect(MAsmSelect* ins);
 
     LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
                                   MTableSwitch* ins);
     LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
 
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -618,19 +618,19 @@ MacroAssembler::branchTruncateDoubleMayb
 }
 
 void
 MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
 {
     convertDoubleToInt32(src, dest, fail);
 }
 
-template <typename T>
+template <typename T, typename L>
 void
-MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* overflow)
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L overflow)
 {
     switch (cond) {
       case Overflow:
         ma_addTestOverflow(dest, dest, src, overflow);
         break;
       default:
         MOZ_CRASH("NYI");
     }
--- a/js/src/jit/mips32/Assembler-mips32.cpp
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -478,19 +478,8 @@ Assembler::ToggleCall(CodeLocationLabel 
         *i2 = jalr;
     } else {
         InstNOP nop;
         *i2 = nop;
     }
 
     AutoFlushICache::flush(uintptr_t(i2), 4);
 }
-
-void
-Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
-{
-    Instruction* inst = (Instruction*) patchAt;
-    InstImm* i0 = (InstImm*) inst;
-    InstImm* i1 = (InstImm*) i0->next();
-
-    // Replace with new value
-    Assembler::UpdateLuiOriValue(i0, i1, heapLength);
-}
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -167,18 +167,16 @@ class Assembler : public AssemblerMIPSSh
                                         ImmPtr expectedValue);
     static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
 
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
     static uint32_t ExtractInstructionImmediate(uint8_t* code);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
-
-    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
 }; // Assembler
 
 static const uint32_t NumIntArgRegs = 4;
 
 static inline bool
 GetIntArgReg(uint32_t usedArgSlots, Register* out)
 {
     if (usedArgSlots < NumIntArgRegs) {
--- a/js/src/jit/mips32/MacroAssembler-mips32-inl.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
@@ -447,16 +447,37 @@ MacroAssembler::storeUncanonicalizedFloa
 }
 void
 MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
 {
     MOZ_ASSERT(addr.offset == 0);
     ma_ss(src, addr);
 }
 
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+    MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+    Instruction* inst = (Instruction*) patchAt;
+    InstImm* i0 = (InstImm*) inst;
+    InstImm* i1 = (InstImm*) i0->next();
+
+    // Replace with new value
+    Assembler::UpdateLuiOriValue(i0, i1, limit);
+}
+
 //}}} check_macroassembler_style
 // ===============================================================
 
 void
 MacroAssemblerMIPSCompat::incrementInt32Value(const Address& addr)
 {
     asMasm().add32(Imm32(1), ToPayload(addr));
 }
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -523,15 +523,8 @@ Assembler::ToggleCall(CodeLocationLabel 
         *i4 = jalr;
     } else {
         InstNOP nop;
         *i4 = nop;
     }
 
     AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t));
 }
-
-void
-Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength)
-{
-    // Replace with new value
-    Assembler::UpdateLoad64Value((Instruction*) patchAt, heapLength);
-}
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -169,18 +169,16 @@ class Assembler : public AssemblerMIPSSh
                                         ImmPtr expectedValue);
     static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
                                         PatchedImmPtr expectedValue);
 
     static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
     static uint64_t ExtractInstructionImmediate(uint8_t* code);
 
     static void ToggleCall(CodeLocationLabel inst_, bool enabled);
-
-    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength);
 }; // Assembler
 
 static const uint32_t NumIntArgRegs = 8;
 static const uint32_t NumFloatArgRegs = NumIntArgRegs;
 
 static inline bool
 GetIntArgReg(uint32_t usedArgSlots, Register* out)
 {
--- a/js/src/jit/mips64/MacroAssembler-mips64-inl.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -404,16 +404,33 @@ MacroAssembler::storeUncanonicalizedFloa
 }
 void
 MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
 {
     MOZ_ASSERT(addr.offset == 0);
     ma_ss(src, addr);
 }
 
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+    MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+    // Replace with new value
+    Assembler::UpdateLoad64Value((Instruction*) patchAt, limit);
+}
+
 //}}} check_macroassembler_style
 // ===============================================================
 
 void
 MacroAssemblerMIPS64Compat::incrementInt32Value(const Address& addr)
 {
     asMasm().add32(Imm32(1), addr);
 }
--- a/js/src/jit/none/Architecture-none.h
+++ b/js/src/jit/none/Architecture-none.h
@@ -146,15 +146,12 @@ inline bool hasMultiAlias() { MOZ_CRASH(
 static const uint32_t ShadowStackSpace = 0;
 static const uint32_t JumpImmediateRange = INT32_MAX;
 
 #ifdef JS_NUNBOX32
 static const int32_t NUNBOX32_TYPE_OFFSET = 4;
 static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
 #endif
 
-static const size_t WasmCheckedImmediateRange = 0;
-static const size_t WasmImmediateRange = 0;
-
 } // namespace jit
 } // namespace js
 
 #endif /* jit_none_Architecture_none_h */
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -142,18 +142,16 @@ class Assembler : public AssemblerShared
     static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) { MOZ_CRASH(); }
     static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
     static void PatchInstructionImmediate(uint8_t*, PatchedImmPtr) { MOZ_CRASH(); }
 
     static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
     static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
     static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); }
 
-    static void UpdateBoundsCheck(uint8_t*, uint32_t) { MOZ_CRASH(); }
-
     static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); }
 
     void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
                                      const Disassembler::HeapAccess& heapAccess)
     {
         MOZ_CRASH();
     }
 };
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -342,32 +342,27 @@ CodeGeneratorShared::restoreLiveVolatile
     LSafepoint* safepoint = ins->safepoint();
     LiveRegisterSet regs;
     regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile());
     masm.PopRegsInMask(regs);
 }
 
 void
 CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
-                                                 bool isInt64, Scalar::Type type, unsigned numElems,
-                                                 const Operand& mem, LAllocation alloc)
+                                                 Scalar::Type type, Operand mem, LAllocation alloc)
 {
 #ifdef DEBUG
     using namespace Disassembler;
 
     Disassembler::HeapAccess::Kind kind = isLoad ? HeapAccess::Load : HeapAccess::Store;
     switch (type) {
       case Scalar::Int8:
       case Scalar::Int16:
         if (kind == HeapAccess::Load)
-            kind = isInt64 ? HeapAccess::LoadSext64 : HeapAccess::LoadSext32;
-        break;
-      case Scalar::Int32:
-        if (isInt64 && kind == HeapAccess::Load)
-            kind = HeapAccess::LoadSext64;
+            kind = HeapAccess::LoadSext32;
         break;
       default:
         break;
     }
 
     OtherOperand op;
     switch (type) {
       case Scalar::Int8:
@@ -376,17 +371,17 @@ CodeGeneratorShared::verifyHeapAccessDis
       case Scalar::Uint16:
       case Scalar::Int32:
       case Scalar::Uint32:
         if (!alloc.isConstant()) {
             op = OtherOperand(ToRegister(alloc).encoding());
         } else {
             // x86 doesn't allow encoding an imm64 to memory move; the value
             // is wrapped anyways.
-            int32_t i = isInt64 ? int32_t(ToInt64(&alloc)) : ToInt32(&alloc);
+            int32_t i = ToInt32(&alloc);
 
             // Sign-extend the immediate value out to 32 bits. We do this even
             // for unsigned element types so that we match what the disassembly
             // code does, as it doesn't know about signedness of stores.
             unsigned shift = 32 - TypedArrayElemSize(type) * 8;
             i = i << shift >> shift;
             op = OtherOperand(i);
         }
@@ -403,38 +398,33 @@ CodeGeneratorShared::verifyHeapAccessDis
       case Scalar::Int32x4:
         op = OtherOperand(ToFloatRegister(alloc).encoding());
         break;
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("Unexpected array type");
     }
 
-    size_t size = Scalar::isSimdType(type)
-                  ? Scalar::scalarByteSize(type) * numElems
-                  : TypedArrayElemSize(type);
-    masm.verifyHeapAccessDisassembly(begin, end,
-                                     HeapAccess(kind, size, ComplexAddress(mem), op));
+    HeapAccess access(kind, TypedArrayElemSize(type), ComplexAddress(mem), op);
+    masm.verifyHeapAccessDisassembly(begin, end, access);
 #endif
 }
 
 void
-CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, bool isInt64,
-                                           Scalar::Type type, unsigned numElems, const Operand& mem,
-                                           LAllocation alloc)
+CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+                                           Operand mem, LAllocation alloc)
 {
-    verifyHeapAccessDisassembly(begin, end, true, isInt64, type, numElems, mem, alloc);
+    verifyHeapAccessDisassembly(begin, end, true, type, mem, alloc);
 }
 
 void
-CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, bool isInt64,
-                                            Scalar::Type type, unsigned numElems,
-                                            const Operand& mem, LAllocation alloc)
+CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+                                            Operand mem, LAllocation alloc)
 {
-    verifyHeapAccessDisassembly(begin, end, false, isInt64, type, numElems, mem, alloc);
+    verifyHeapAccessDisassembly(begin, end, false, type, mem, alloc);
 }
 
 inline bool
 CodeGeneratorShared::isGlobalObject(JSObject* object)
 {
     // Calling object->is<GlobalObject>() is racy because this relies on
     // checking the group and this can be changed while we are compiling off the
     // main thread.
--- a/js/src/jit/shared/CodeGenerator-shared.h
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -541,25 +541,24 @@ class CodeGeneratorShared : public LElem
     void emitTracelogIonStop() {
 #ifdef JS_TRACE_LOGGING
         emitTracelogStopEvent(TraceLogger_IonMonkey);
         emitTracelogScriptStop();
 #endif
     }
 
   protected:
-    inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad, bool isInt64,
-                                            Scalar::Type type, unsigned numElems,
-                                            const Operand& mem, LAllocation alloc);
+    inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
+                                            Scalar::Type type, Operand mem, LAllocation alloc);
 
   public:
-    inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, bool isInt64, Scalar::Type type,
-                                      unsigned numElems, const Operand& mem, LAllocation alloc);
-    inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, bool isInt64, Scalar::Type type,
-                                       unsigned numElems, const Operand& mem, LAllocation alloc);
+    inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+                                      Operand mem, LAllocation alloc);
+    inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+                                       Operand mem, LAllocation alloc);
 
     bool isGlobalObject(JSObject* object);
 };
 
 // An out-of-line path is generated at the end of the function.
 class OutOfLineCode : public TempObject
 {
     Label entry_;
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -7834,16 +7834,31 @@ class LAsmSelectI64 : public LAsmSelectB
     const LInt64Allocation falseExpr() {
         return getInt64Operand(FalseExprIndex);
     }
     const LAllocation* condExpr() {
         return getOperand(CondIndex);
     }
 };
 
+class LWasmAddOffset : public LInstructionHelper<1, 1, 0>
+{
+  public:
+    LIR_HEADER(WasmAddOffset);
+    explicit LWasmAddOffset(const LAllocation& base) {
+        setOperand(0, base);
+    }
+    MWasmAddOffset* mir() const {
+        return mir_->toWasmAddOffset();
+    }
+    const LAllocation* base() {
+        return getOperand(0);
+    }
+};
+
 class LWasmBoundsCheck : public LInstructionHelper<0, 1, 0>
 {
   public:
     LIR_HEADER(WasmBoundsCheck);
     explicit LWasmBoundsCheck(const LAllocation& ptr) {
         setOperand(0, ptr);
     }
     MWasmBoundsCheck* mir() const {
--- a/js/src/jit/shared/LOpcodes-shared.h
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -383,21 +383,22 @@
     _(CallDOMNative)                \
     _(IsCallable)                   \
     _(IsConstructor)                \
     _(IsObject)                     \
     _(IsObjectAndBranch)            \
     _(HasClass)                     \
     _(AsmSelect)                    \
     _(AsmSelectI64)                 \
+    _(WasmBoundsCheck)              \
+    _(WasmAddOffset)                \
     _(WasmLoad)                     \
     _(WasmLoadI64)                  \
     _(WasmStore)                    \
     _(WasmStoreI64)                 \
-    _(WasmBoundsCheck)              \
     _(WasmLoadGlobalVar)            \
     _(WasmLoadGlobalVarI64)         \
     _(WasmStoreGlobalVar)           \
     _(WasmStoreGlobalVarI64)        \
     _(AsmJSLoadHeap)                \
     _(AsmJSStoreHeap)               \
     _(AsmJSParameter)               \
     _(AsmJSParameterI64)            \
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -408,149 +408,71 @@ CodeGeneratorX64::visitWasmCallI64(LWasm
 void
 CodeGeneratorX64::memoryBarrier(MemoryBarrierBits barrier)
 {
     if (barrier & MembarStoreLoad)
         masm.storeLoadFence();
 }
 
 void
-CodeGeneratorX64::loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr,
-                           FloatRegister out)
+CodeGeneratorX64::wasmStore(Scalar::Type type, unsigned numSimdElems, const LAllocation* value,
+                            Operand dstAddr)
 {
-    switch (type) {
-      case Scalar::Float32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movss zeroes out the high lanes.
-          case 1: masm.loadFloat32(srcAddr, out); break;
-          // See comment above, which also applies to movsd.
-          case 2: masm.loadDouble(srcAddr, out); break;
-          case 4: masm.loadUnalignedSimd128Float(srcAddr, out); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
-        break;
-      }
-      case Scalar::Int32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movd zeroes out the high lanes.
-          case 1: masm.vmovd(srcAddr, out); break;
-          // See comment above, which also applies to movq.
-          case 2: masm.vmovq(srcAddr, out); break;
-          case 4: masm.loadUnalignedSimd128Int(srcAddr, out); break;
-          default: MOZ_CRASH("unexpected size for partial load");
+    if (value->isConstant()) {
+        const MConstant* mir = value->toConstant();
+        Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
+        switch (type) {
+          case Scalar::Int8:
+          case Scalar::Uint8:
+            masm.movb(cst, dstAddr);
+            break;
+          case Scalar::Int16:
+          case Scalar::Uint16:
+            masm.movw(cst, dstAddr);
+            break;
+          case Scalar::Int32:
+          case Scalar::Uint32:
+            masm.movl(cst, dstAddr);
+            break;
+          case Scalar::Int64:
+          case Scalar::Float32:
+          case Scalar::Float64:
+          case Scalar::Float32x4:
+          case Scalar::Int8x16:
+          case Scalar::Int16x8:
+          case Scalar::Int32x4:
+          case Scalar::Uint8Clamped:
+          case Scalar::MaxTypedArrayViewType:
+            MOZ_CRASH("unexpected array type");
         }
-        break;
-      }
-      case Scalar::Int8x16:
-        MOZ_ASSERT(numElems == 16, "unexpected partial load");
-        masm.loadUnalignedSimd128Int(srcAddr, out);
-        break;
-      case Scalar::Int16x8:
-        MOZ_ASSERT(numElems == 8, "unexpected partial load");
-        masm.loadUnalignedSimd128Int(srcAddr, out);
-        break;
-      case Scalar::Int8:
-      case Scalar::Uint8:
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-      case Scalar::Int64:
-      case Scalar::Float32:
-      case Scalar::Float64:
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("should only handle SIMD types");
-    }
-}
-
-static wasm::MemoryAccess
-AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throwBehavior,
-                  uint32_t offsetWithinWholeSimdVector = 0)
-{
-    return wasm::MemoryAccess(before, throwBehavior, wasm::MemoryAccess::WrapOffset,
-                              offsetWithinWholeSimdVector);
-}
-
-void
-CodeGeneratorX64::load(Scalar::Type type, const Operand& srcAddr, AnyRegister out)
-{
-    switch (type) {
-      case Scalar::Int8:      masm.movsbl(srcAddr, out.gpr()); break;
-      case Scalar::Uint8:     masm.movzbl(srcAddr, out.gpr()); break;
-      case Scalar::Int16:     masm.movswl(srcAddr, out.gpr()); break;
-      case Scalar::Uint16:    masm.movzwl(srcAddr, out.gpr()); break;
-      case Scalar::Int32:
-      case Scalar::Uint32:    masm.movl(srcAddr, out.gpr()); break;
-      case Scalar::Float32:   masm.loadFloat32(srcAddr, out.fpu()); break;
-      case Scalar::Float64:   masm.loadDouble(srcAddr, out.fpu()); break;
-      case Scalar::Float32x4:
-      case Scalar::Int8x16:
-      case Scalar::Int16x8:
-      case Scalar::Int32x4:
-        MOZ_CRASH("SIMD loads should be handled in emitSimdLoad");
-      case Scalar::Int64:
-        MOZ_CRASH("int64 loads must use load64");
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("unexpected array type");
-    }
-}
-
-void
-CodeGeneratorX64::loadI64(Scalar::Type type, const Operand& srcAddr, Register64 out)
-{
-    switch (type) {
-      case Scalar::Int8:      masm.movsbq(srcAddr, out.reg); break;
-      case Scalar::Uint8:     masm.movzbq(srcAddr, out.reg); break;
-      case Scalar::Int16:     masm.movswq(srcAddr, out.reg); break;
-      case Scalar::Uint16:    masm.movzwq(srcAddr, out.reg); break;
-      case Scalar::Int32:     masm.movslq(srcAddr, out.reg); break;
-      // Int32 to int64 moves zero-extend by default.
-      case Scalar::Uint32:    masm.movl(srcAddr, out.reg);   break;
-      case Scalar::Int64:     masm.movq(srcAddr, out.reg);   break;
-      case Scalar::Float32:
-      case Scalar::Float64:
-      case Scalar::Float32x4:
-      case Scalar::Int8x16:
-      case Scalar::Int16x8:
-      case Scalar::Int32x4:
-        MOZ_CRASH("non-int64 loads should use load()");
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("unexpected array type");
+    } else {
+        masm.wasmStore(type, numSimdElems, ToAnyRegister(value), dstAddr);
     }
 }
 
 template <typename T>
 void
 CodeGeneratorX64::emitWasmLoad(T* ins)
 {
     const MWasmLoad* mir = ins->mir();
-    bool isInt64 = mir->type() == MIRType::Int64;
-
-    Scalar::Type accessType = mir->accessType();
-    MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-    MOZ_ASSERT(mir->offset() <= INT32_MAX);
+    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    uint32_t before = masm.size();
-    if (isInt64)
-        loadI64(accessType, srcAddr, ToOutRegister64(ins));
+    memoryBarrier(mir->barrierBefore());
+
+    if (mir->type() == MIRType::Int64)
+        masm.wasmLoadI64(mir->accessType(), srcAddr, ToOutRegister64(ins));
     else
-        load(accessType, srcAddr, ToAnyRegister(ins->output()));
-    uint32_t after = masm.size();
+        masm.wasmLoad(mir->accessType(), mir->numSimdElems(), srcAddr, ToAnyRegister(ins->output()));
 
-    verifyLoadDisassembly(before, after, isInt64, accessType, /* numElems */ 0, srcAddr,
-                          *ins->output()->output());
+    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins)
 {
     emitWasmLoad(ins);
 }
 
@@ -560,463 +482,184 @@ CodeGeneratorX64::visitWasmLoadI64(LWasm
     emitWasmLoad(ins);
 }
 
 template <typename T>
 void
 CodeGeneratorX64::emitWasmStore(T* ins)
 {
     const MWasmStore* mir = ins->mir();
-
-    Scalar::Type accessType = mir->accessType();
-    MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-    MOZ_ASSERT(mir->offset() <= INT32_MAX);
+    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* value = ins->getOperand(ins->ValueIndex);
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    uint32_t before = masm.size();
-    store(accessType, value, dstAddr);
-    uint32_t after = masm.size();
-
-    verifyStoreDisassembly(before, after, mir->value()->type() == MIRType::Int64,
-                           accessType, /* numElems */ 0, dstAddr, *value);
+    memoryBarrier(mir->barrierBefore());
+    wasmStore(mir->accessType(), mir->numSimdElems(), value, dstAddr);
+    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorX64::visitWasmStore(LWasmStore* ins)
 {
     emitWasmStore(ins);
 }
 
 void
 CodeGeneratorX64::visitWasmStoreI64(LWasmStoreI64* ins)
 {
     emitWasmStore(ins);
 }
 
 void
-CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins)
-{
-    const MAsmJSLoadHeap* mir = ins->mir();
-    Scalar::Type type = mir->accessType();
-    FloatRegister out = ToFloatRegister(ins->output());
-    const LAllocation* ptr = ins->ptr();
-    Operand srcAddr = ptr->isBogus()
-                      ? Operand(HeapReg, mir->offset())
-                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
-
-    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
-
-    unsigned numElems = mir->numSimdElems();
-    if (numElems == 3) {
-        MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
-
-        Operand srcAddrZ =
-            ptr->isBogus()
-            ? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
-            : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
-
-        // Load XY
-        uint32_t before = masm.size();
-        loadSimd(type, 2, srcAddr, out);
-        uint32_t after = masm.size();
-        verifyLoadDisassembly(before, after, /* isInt64 */ false, type, 2, srcAddr,
-                              *ins->output()->output());
-        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
-
-        // Load Z (W is zeroed)
-        // This is still in bounds, as we've checked with a manual bounds check
-        // or we had enough space for sure when removing the bounds check.
-        before = after;
-        loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
-        after = masm.size();
-        verifyLoadDisassembly(before, after, /* isInt64 */ false, type, 1, srcAddrZ,
-                              LFloatReg(ScratchSimd128Reg));
-        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8));
-
-        // Move ZW atop XY
-        masm.vmovlhps(ScratchSimd128Reg, out, out);
-    } else {
-        uint32_t before = masm.size();
-        loadSimd(type, numElems, srcAddr, out);
-        uint32_t after = masm.size();
-        verifyLoadDisassembly(before, after, /* isInt64 */ true, type, numElems, srcAddr,
-                              *ins->output()->output());
-        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
-    }
-
-    if (hasBoundsCheck)
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
-}
-
-void
 CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
-    Scalar::Type accessType = mir->accessType();
-
-    if (Scalar::isSimdType(accessType))
-        return emitSimdLoad(ins);
+    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     const LDefinition* out = ins->output();
+
+    Scalar::Type accessType = mir->accessType();
+    MOZ_ASSERT(!Scalar::isSimdType(accessType));
+
     Operand srcAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    memoryBarrier(mir->barrierBefore());
-
-    OutOfLineLoadTypedArrayOutOfBounds* ool;
-    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
-
     uint32_t before = masm.size();
-    load(accessType, srcAddr, ToAnyRegister(out));
+    masm.wasmLoad(accessType, 0, srcAddr, ToAnyRegister(out));
     uint32_t after = masm.size();
 
-    verifyLoadDisassembly(before, after, /* isInt64 */ false, accessType, 0, srcAddr,
-                          *out->output());
-
-    if (ool) {
-        MOZ_ASSERT(hasBoundsCheck);
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
-        masm.bind(ool->rejoin());
-    }
-
-    memoryBarrier(mir->barrierAfter());
-
-    // We cannot emulate atomic accesses currently.
-    masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ?
-                                           wasm::MemoryAccess::Throw :
-                                           wasm::MemoryAccess::CarryOn)));
-}
-
-void
-CodeGeneratorX64::store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr)
-{
-    if (value->isConstant()) {
-        const MConstant* mir = value->toConstant();
-        Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64());
-        switch (type) {
-          case Scalar::Int8:
-          case Scalar::Uint8:        masm.movb(cst, dstAddr); break;
-          case Scalar::Int16:
-          case Scalar::Uint16:       masm.movw(cst, dstAddr); break;
-          case Scalar::Int32:
-          case Scalar::Uint32:       masm.movl(cst, dstAddr); break;
-          case Scalar::Int64:
-          case Scalar::Float32:
-          case Scalar::Float64:
-          case Scalar::Float32x4:
-          case Scalar::Int8x16:
-          case Scalar::Int16x8:
-          case Scalar::Int32x4:
-          case Scalar::Uint8Clamped:
-          case Scalar::MaxTypedArrayViewType:
-            MOZ_CRASH("unexpected array type");
-        }
-    } else {
-        switch (type) {
-          case Scalar::Int8:
-          case Scalar::Uint8:
-            masm.movb(ToRegister(value), dstAddr);
-            break;
-          case Scalar::Int16:
-          case Scalar::Uint16:
-            masm.movw(ToRegister(value), dstAddr);
-            break;
-          case Scalar::Int32:
-          case Scalar::Uint32:
-            masm.movl(ToRegister(value), dstAddr);
-            break;
-          case Scalar::Int64:
-            masm.movq(ToRegister(value), dstAddr);
-            break;
-          case Scalar::Float32:
-            masm.storeUncanonicalizedFloat32(ToFloatRegister(value), dstAddr);
-            break;
-          case Scalar::Float64:
-            masm.storeUncanonicalizedDouble(ToFloatRegister(value), dstAddr);
-            break;
-          case Scalar::Float32x4:
-          case Scalar::Int8x16:
-          case Scalar::Int16x8:
-          case Scalar::Int32x4:
-            MOZ_CRASH("SIMD stores must be handled in emitSimdStore");
-          case Scalar::Uint8Clamped:
-          case Scalar::MaxTypedArrayViewType:
-            MOZ_CRASH("unexpected array type");
-        }
-    }
-}
-
-void
-CodeGeneratorX64::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
-                            const Operand& dstAddr)
-{
-    switch (type) {
-      case Scalar::Float32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movss zeroes out the high lanes.
-          case 1: masm.storeUncanonicalizedFloat32(in, dstAddr); break;
-          // See comment above, which also applies to movsd.
-          case 2: masm.storeUncanonicalizedDouble(in, dstAddr); break;
-          case 4: masm.storeUnalignedSimd128Float(in, dstAddr); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
-        break;
-      }
-      case Scalar::Int32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movd zeroes out the high lanes.
-          case 1: masm.vmovd(in, dstAddr); break;
-          // See comment above, which also applies to movq.
-          case 2: masm.vmovq(in, dstAddr); break;
-          case 4: masm.storeUnalignedSimd128Int(in, dstAddr); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
-        break;
-      }
-      case Scalar::Int8x16:
-        MOZ_ASSERT(numElems == 16, "unexpected partial store");
-        masm.storeUnalignedSimd128Int(in, dstAddr);
-        break;
-      case Scalar::Int16x8:
-        MOZ_ASSERT(numElems == 8, "unexpected partial store");
-        masm.storeUnalignedSimd128Int(in, dstAddr);
-        break;
-      case Scalar::Int8:
-      case Scalar::Uint8:
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-      case Scalar::Int64:
-      case Scalar::Float32:
-      case Scalar::Float64:
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("should only handle SIMD types");
-    }
-}
-
-void
-CodeGeneratorX64::emitSimdStore(LAsmJSStoreHeap* ins)
-{
-    const MAsmJSStoreHeap* mir = ins->mir();
-    Scalar::Type type = mir->accessType();
-    FloatRegister in = ToFloatRegister(ins->value());
-    const LAllocation* ptr = ins->ptr();
-    Operand dstAddr = ptr->isBogus()
-                      ? Operand(HeapReg, mir->offset())
-                      : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
-
-    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
-
-    unsigned numElems = mir->numSimdElems();
-    if (numElems == 3) {
-        MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
-
-        Operand dstAddrZ =
-            ptr->isBogus()
-            ? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
-            : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
-
-        // It's possible that the Z could be out of bounds when the XY is in
-        // bounds. To avoid storing the XY before the exception is thrown, we
-        // store the Z first, and record its offset in the MemoryAccess so
-        // that the signal handler knows to check the bounds of the full
-        // access, rather than just the Z.
-        masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
-        uint32_t before = masm.size();
-        storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
-        uint32_t after = masm.size();
-        verifyStoreDisassembly(before, after, /* int64 */ false, type, 1, dstAddrZ,
-                               LFloatReg(ScratchSimd128Reg));
-        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8));
-
-        // Store XY
-        before = after;
-        storeSimd(type, 2, in, dstAddr);
-        after = masm.size();
-        verifyStoreDisassembly(before, after, /* int64 */ false, type, 2, dstAddr,
-                               *ins->value());
-        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
-    } else {
-        uint32_t before = masm.size();
-        storeSimd(type, numElems, in, dstAddr);
-        uint32_t after = masm.size();
-        verifyStoreDisassembly(before, after, /* int64 */ false, type, numElems, dstAddr,
-                               *ins->value());
-        masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw));
-    }
-
-    if (hasBoundsCheck)
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
+    verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output());
+    masm.append(wasm::MemoryAccess(before));
 }
 
 void
 CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
+
+    const LAllocation* ptr = ins->ptr();
+    const LAllocation* value = ins->value();
+
     Scalar::Type accessType = mir->accessType();
-    const LAllocation* value = ins->value();
+    MOZ_ASSERT(!Scalar::isSimdType(accessType));
 
     canonicalizeIfDeterministic(accessType, value);
 
-    if (Scalar::isSimdType(accessType))
-        return emitSimdStore(ins);
-
-    const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(HeapReg, mir->offset())
                       : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
 
-    memoryBarrier(mir->barrierBefore());
-
-    Label* rejoin;
-    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
-
     uint32_t before = masm.size();
-    store(accessType, value, dstAddr);
+    wasmStore(accessType, 0, value, dstAddr);
     uint32_t after = masm.size();
 
-    verifyStoreDisassembly(before, after, /* int64 */ false, accessType, 0, dstAddr, *value);
-
-    if (rejoin) {
-        MOZ_ASSERT(hasBoundsCheck);
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
-        masm.bind(rejoin);
-    }
-
-    memoryBarrier(mir->barrierAfter());
-
-    // See comment in visitAsmJSLoadHeap
-    masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ?
-                                           wasm::MemoryAccess::Throw :
-                                           wasm::MemoryAccess::CarryOn)));
+    verifyStoreDisassembly(before, after, accessType, dstAddr, *value);
+    masm.append(wasm::MemoryAccess(before));
 }
 
 void
 CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-
     MAsmJSCompareExchangeHeap* mir = ins->mir();
-    Scalar::Type accessType = mir->accessType();
+    MOZ_ASSERT(mir->offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
-    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    // Note that we can't use the same machinery as normal asm.js loads/stores
-    // since signal-handler bounds checking is not yet implemented for atomic
-    // accesses.
-    maybeEmitWasmBoundsCheckBranch(mir, ptr);
+    Scalar::Type accessType = mir->accessType();
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                         srcAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
-    MOZ_ASSERT(mir->offset() == 0,
-               "The AsmJS signal handler doesn't yet support emulating "
-               "atomic accesses in the case of a fault from an unwrapped offset");
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-    MOZ_ASSERT(ins->mir()->accessType() <= Scalar::Uint32);
-
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
-    Scalar::Type accessType = mir->accessType();
+    MOZ_ASSERT(mir->offset() == 0);
 
     Register ptr = ToRegister(ins->ptr());
-    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
     Register value = ToRegister(ins->value());
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    // See comment in visitAsmJSCompareExchangeHeap.
-    maybeEmitWasmBoundsCheckBranch(mir, ptr);
+    Scalar::Type accessType = mir->accessType();
+    MOZ_ASSERT(accessType <= Scalar::Uint32);
+
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        srcAddr,
                                        value,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
-    MOZ_ASSERT(mir->offset() == 0,
-               "The AsmJS signal handler doesn't yet support emulating "
-               "atomic accesses in the case of a fault from an unwrapped offset");
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
-    MOZ_ASSERT(ins->mir()->hasUses());
-    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
-
     MAsmJSAtomicBinopHeap* mir = ins->mir();
-    Scalar::Type accessType = mir->accessType();
-    accessType = accessType == Scalar::Uint32 ? Scalar::Int32 : accessType;
-    AtomicOp op = mir->operation();
+    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(mir->hasUses());
 
     Register ptr = ToRegister(ins->ptr());
+    const LAllocation* value = ins->value();
     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
-    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
-
-    const LAllocation* value = ins->value();
+    AnyRegister output = ToAnyRegister(ins->output());
+    MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    // See comment in visitAsmJSCompareExchangeHeap.
-    maybeEmitWasmBoundsCheckBranch(mir, ptr);
+    Scalar::Type accessType = mir->accessType();
+    if (accessType == Scalar::Uint32)
+        accessType = Scalar::Int32;
 
-    AnyRegister output = ToAnyRegister(ins->output());
+    AtomicOp op = mir->operation();
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne);
+
     if (value->isConstant()) {
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
                                    output);
     } else {
         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg,
                                    output);
     }
-
-    MOZ_ASSERT(mir->offset() == 0,
-               "The AsmJS signal handler doesn't yet support emulating "
-               "atomic accesses in the case of a fault from an unwrapped offset");
 }
 
 void
 CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
-    MOZ_ASSERT(!ins->mir()->hasUses());
+    MAsmJSAtomicBinopHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(!mir->hasUses());
+
+    Register ptr = ToRegister(ins->ptr());
+    const LAllocation* value = ins->value();
     MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
 
-    MAsmJSAtomicBinopHeap* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     AtomicOp op = mir->operation();
 
-    Register ptr = ToRegister(ins->ptr());
-    BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset());
-    const LAllocation* value = ins->value();
-
-    // See comment in visitAsmJSCompareExchangeHeap.
-    maybeEmitWasmBoundsCheckBranch(mir, ptr);
+    BaseIndex srcAddr(HeapReg, ptr, TimesOne);
 
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
     else
         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
-    MOZ_ASSERT(mir->offset() == 0,
-               "The AsmJS signal handler doesn't yet support emulating "
-               "atomic accesses in the case of a fault from an unwrapped offset");
 }
 
 void
 CodeGeneratorX64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
 {
     MWasmLoadGlobalVar* mir = ins->mir();
 
     MIRType type = mir->type();
--- a/js/src/jit/x64/CodeGenerator-x64.h
+++ b/js/src/jit/x64/CodeGenerator-x64.h
@@ -22,31 +22,20 @@ class CodeGeneratorX64 : public CodeGene
     ValueOperand ToValue(LInstruction* ins, size_t pos);
     ValueOperand ToOutValue(LInstruction* ins);
     ValueOperand ToTempValue(LInstruction* ins, size_t pos);
 
     void storeUnboxedValue(const LAllocation* value, MIRType valueType,
                            Operand dest, MIRType slotType);
     void memoryBarrier(MemoryBarrierBits barrier);
 
-    void load(Scalar::Type type, const Operand& srcAddr, AnyRegister out);
-    void loadI64(Scalar::Type type, const Operand& srcAddr, Register64 out);
-
-    void store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr);
-
-    void loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, FloatRegister out);
-    void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, const Operand& dstAddr);
+    void wasmStore(Scalar::Type type, unsigned numSimdElems, const LAllocation* value, Operand dstAddr);
+    template <typename T> void emitWasmLoad(T* ins);
+    template <typename T> void emitWasmStore(T* ins);
 
-    void emitSimdLoad(LAsmJSLoadHeap* ins);
-    void emitSimdStore(LAsmJSStoreHeap* ins);
-
-    template <typename T>
-    void emitWasmLoad(T* ins);
-    template <typename T>
-    void emitWasmStore(T* ins);
   public:
     CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
 
   public:
     void visitValue(LValue* value);
     void visitBox(LBox* box);
     void visitUnbox(LUnbox* unbox);
     void visitCompareB(LCompareB* lir);
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -189,16 +189,31 @@ void
 LIRGeneratorX64::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()));
     define(lir, ins);
 }
 
 void
+LIRGeneratorX64::visitWasmLoad(MWasmLoad* ins)
+{
+    if (ins->type() != MIRType::Int64) {
+        lowerWasmLoad(ins);
+        return;
+    }
+
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType::Int32);
+
+    auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+    defineInt64(lir, ins);
+}
+
+void
 LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     MDefinition* value = ins->value();
     LAllocation valueAlloc;
     switch (ins->accessType()) {
@@ -236,54 +251,44 @@ LIRGeneratorX64::visitWasmStore(MWasmSto
 }
 
 void
 LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    // For simplicity, require a register if we're going to emit a bounds-check
-    // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
-                            ? useRegisterAtStart(base)
-                            : useRegisterOrZeroAtStart(base);
-
-    define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
+    define(new(alloc()) LAsmJSLoadHeap(useRegisterOrZeroAtStart(base)), ins);
 }
 
 void
 LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
-    // For simplicity, require a register if we're going to emit a bounds-check
-    // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
-                            ? useRegisterAtStart(base)
-                            : useRegisterOrZeroAtStart(base);
-
     LAsmJSStoreHeap* lir = nullptr;  // initialize to silence GCC warning
     switch (ins->accessType()) {
       case Scalar::Int8:
       case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
       case Scalar::Int32:
       case Scalar::Uint32:
-        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterOrConstantAtStart(ins->value()));
+        lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+                                           useRegisterOrConstantAtStart(ins->value()));
         break;
       case Scalar::Float32:
       case Scalar::Float64:
       case Scalar::Float32x4:
       case Scalar::Int8x16:
       case Scalar::Int16x8:
       case Scalar::Int32x4:
-        lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
+        lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
+                                           useRegisterAtStart(ins->value()));
         break;
       case Scalar::Int64:
       case Scalar::Uint8Clamped:
       case Scalar::MaxTypedArrayViewType:
         MOZ_CRASH("unexpected array type");
     }
     add(lir, ins);
 }
@@ -459,31 +464,16 @@ LIRGeneratorX64::lowerUModI64(MMod* mod)
 {
     LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()),
                                                     useRegister(mod->rhs()),
                                                     tempFixed(rax));
     defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
 }
 
 void
-LIRGeneratorX64::visitWasmLoad(MWasmLoad* ins)
-{
-    if (ins->type() != MIRType::Int64) {
-        lowerWasmLoad(ins);
-        return;
-    }
-
-    MDefinition* base = ins->base();
-    MOZ_ASSERT(base->type() == MIRType::Int32);
-
-    auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
-    defineInt64(lir, ins);
-}
-
-void
 LIRGeneratorX64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
 {
     MDefinition* opd = ins->input();
     MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
 
     LDefinition maybeTemp = ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp();
     defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp), ins);
 }
--- a/js/src/jit/x64/Lowering-x64.h
+++ b/js/src/jit/x64/Lowering-x64.h
@@ -44,30 +44,30 @@ class LIRGeneratorX64 : public LIRGenera
     bool needTempForPostBarrier() { return true; }
 
     void lowerDivI64(MDiv* div);
     void lowerModI64(MMod* mod);
     void lowerUDivI64(MDiv* div);
     void lowerUModI64(MMod* mod);
 
   public:
-    void visitWasmLoad(MWasmLoad* ins);
     void visitBox(MBox* box);
     void visitUnbox(MUnbox* unbox);
     void visitReturn(MReturn* ret);
     void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
     void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
     void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
     void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins);
     void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins);
     void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
     void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
     void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
     void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
     void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+    void visitWasmLoad(MWasmLoad* ins);
     void visitWasmStore(MWasmStore* ins);
     void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
     void visitSubstr(MSubstr* ins);
     void visitRandom(MRandom* ins);
     void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
     void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
     void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
 };
--- a/js/src/jit/x64/MacroAssembler-x64-inl.h
+++ b/js/src/jit/x64/MacroAssembler-x64-inl.h
@@ -794,16 +794,32 @@ MacroAssembler::truncateDoubleToUInt64(A
 
     loadPtr(dest, temp);
     or64(Imm64(0x8000000000000000), Register64(temp));
     storePtr(temp, dest);
 
     bind(&done);
 }
 
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+    MOZ_CRASH("x64 should never emit a bounds check");
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+    MOZ_CRASH("x64 should never emit a bounds check");
+}
+
 //}}} check_macroassembler_style
 // ===============================================================
 
 void
 MacroAssemblerX64::incrementInt32Value(const Address& addr)
 {
     asMasm().addPtr(Imm32(1), addr);
 }
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -624,16 +624,180 @@ MacroAssembler::storeUnboxedValue(Consta
 
 template void
 MacroAssembler::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest,
                                   MIRType slotType);
 template void
 MacroAssembler::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest,
                                   MIRType slotType);
 
+// ========================================================================
+// wasm support
+
+void
+MacroAssembler::wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out)
+{
+    switch (type) {
+      case Scalar::Int8:
+        movsbl(srcAddr, out.gpr());
+        break;
+      case Scalar::Uint8:
+        movzbl(srcAddr, out.gpr());
+        break;
+      case Scalar::Int16:
+        movswl(srcAddr, out.gpr());
+        break;
+      case Scalar::Uint16:
+        movzwl(srcAddr, out.gpr());
+        break;
+      case Scalar::Int32:
+      case Scalar::Uint32:
+        movl(srcAddr, out.gpr());
+        break;
+      case Scalar::Float32:
+        loadFloat32(srcAddr, out.fpu());
+        break;
+      case Scalar::Float64:
+        loadDouble(srcAddr, out.fpu());
+        break;
+      case Scalar::Float32x4:
+        switch (numSimdElems) {
+          // In memory-to-register mode, movss zeroes out the high lanes.
+          case 1: loadFloat32(srcAddr, out.fpu()); break;
+          // See comment above, which also applies to movsd.
+          case 2: loadDouble(srcAddr, out.fpu()); break;
+          case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
+          default: MOZ_CRASH("unexpected size for partial load");
+        }
+        break;
+      case Scalar::Int32x4:
+        switch (numSimdElems) {
+          // In memory-to-register mode, movd zeroes out the high lanes.
+          case 1: vmovd(srcAddr, out.fpu()); break;
+          // See comment above, which also applies to movq.
+          case 2: vmovq(srcAddr, out.fpu()); break;
+          case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
+          default: MOZ_CRASH("unexpected size for partial load");
+        }
+        break;
+      case Scalar::Int8x16:
+        MOZ_ASSERT(numSimdElems == 16, "unexpected partial load");
+        loadUnalignedSimd128Int(srcAddr, out.fpu());
+        break;
+      case Scalar::Int16x8:
+        MOZ_ASSERT(numSimdElems == 8, "unexpected partial load");
+        loadUnalignedSimd128Int(srcAddr, out.fpu());
+        break;
+      case Scalar::Int64:
+        MOZ_CRASH("int64 loads must use load64");
+      case Scalar::Uint8Clamped:
+      case Scalar::MaxTypedArrayViewType:
+        MOZ_CRASH("unexpected array type");
+    }
+}
+
+void
+MacroAssembler::wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out)
+{
+    switch (type) {
+      case Scalar::Int8:
+        movsbq(srcAddr, out.reg);
+        break;
+      case Scalar::Uint8:
+        movzbq(srcAddr, out.reg);
+        break;
+      case Scalar::Int16:
+        movswq(srcAddr, out.reg);
+        break;
+      case Scalar::Uint16:
+        movzwq(srcAddr, out.reg);
+        break;
+      case Scalar::Int32:
+        movslq(srcAddr, out.reg);
+        break;
+      // Int32 to int64 moves zero-extend by default.
+      case Scalar::Uint32:
+        movl(srcAddr, out.reg);
+        break;
+      case Scalar::Int64:
+        movq(srcAddr, out.reg);
+        break;
+      case Scalar::Float32:
+      case Scalar::Float64:
+      case Scalar::Float32x4:
+      case Scalar::Int8x16:
+      case Scalar::Int16x8:
+      case Scalar::Int32x4:
+        MOZ_CRASH("non-int64 loads should use load()");
+      case Scalar::Uint8Clamped:
+      case Scalar::MaxTypedArrayViewType:
+        MOZ_CRASH("unexpected array type");
+    }
+}
+
+void
+MacroAssembler::wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value,
+                          Operand dstAddr)
+{
+    switch (type) {
+      case Scalar::Int8:
+      case Scalar::Uint8:
+        movb(value.gpr(), dstAddr);
+        break;
+      case Scalar::Int16:
+      case Scalar::Uint16:
+        movw(value.gpr(), dstAddr);
+        break;
+      case Scalar::Int32:
+      case Scalar::Uint32:
+        movl(value.gpr(), dstAddr);
+        break;
+      case Scalar::Int64:
+        movq(value.gpr(), dstAddr);
+        break;
+      case Scalar::Float32:
+        storeUncanonicalizedFloat32(value.fpu(), dstAddr);
+        break;
+      case Scalar::Float64:
+        storeUncanonicalizedDouble(value.fpu(), dstAddr);
+        break;
+      case Scalar::Float32x4:
+        switch (numSimdElems) {
+          // In memory-to-register mode, movss zeroes out the high lanes.
+          case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
+          // See comment above, which also applies to movsd.
+          case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
+          case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
+          default: MOZ_CRASH("unexpected size for partial load");
+        }
+        break;
+      case Scalar::Int32x4:
+        switch (numSimdElems) {
+          // In memory-to-register mode, movd zeroes out the high lanes.
+          case 1: vmovd(value.fpu(), dstAddr); break;
+          // See comment above, which also applies to movq.
+          case 2: vmovq(value.fpu(), dstAddr); break;
+          case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
+          default: MOZ_CRASH("unexpected size for partial load");
+        }
+        break;
+      case Scalar::Int8x16:
+        MOZ_ASSERT(numSimdElems == 16, "unexpected partial store");
+        storeUnalignedSimd128Int(value.fpu(), dstAddr);
+        break;
+      case Scalar::Int16x8:
+        MOZ_ASSERT(numSimdElems == 8, "unexpected partial store");
+        storeUnalignedSimd128Int(value.fpu(), dstAddr);
+        break;
+      case Scalar::Uint8Clamped:
+      case Scalar::MaxTypedArrayViewType:
+        MOZ_CRASH("unexpected array type");
+    }
+}
+
 void
 MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry)
 {
     vcvttsd2sq(input, output);
 
     // Check that the result is in the uint32_t range.
     ScratchRegisterScope scratch(*this);
     move32(Imm32(0xffffffff), scratch);
--- a/js/src/jit/x86-shared/Architecture-x86-shared.h
+++ b/js/src/jit/x86-shared/Architecture-x86-shared.h
@@ -452,16 +452,12 @@ hasUnaliasedDouble()
 // On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32
 // to a double as a temporary, you need a temporary double register.
 inline bool
 hasMultiAlias()
 {
     return false;
 }
 
-// See MIRGenerator::foldableOffsetRange for more info.
-static const size_t WasmCheckedImmediateRange = 4096;
-static const size_t WasmImmediateRange = UINT32_C(0x80000000);
-
 } // namespace jit
 } // namespace js
 
 #endif /* jit_x86_shared_Architecture_x86_h */
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -299,16 +299,18 @@ class AssemblerX86Shared : public Assemb
         AboveOrEqual = X86Encoding::ConditionAE,
         Below = X86Encoding::ConditionB,
         BelowOrEqual = X86Encoding::ConditionBE,
         GreaterThan = X86Encoding::ConditionG,
         GreaterThanOrEqual = X86Encoding::ConditionGE,
         LessThan = X86Encoding::ConditionL,
         LessThanOrEqual = X86Encoding::ConditionLE,
         Overflow = X86Encoding::ConditionO,
+        CarrySet = X86Encoding::ConditionC,
+        CarryClear = X86Encoding::ConditionNC,
         Signed = X86Encoding::ConditionS,
         NotSigned = X86Encoding::ConditionNS,
         Zero = X86Encoding::ConditionE,
         NonZero = X86Encoding::ConditionNE,
         Parity = X86Encoding::ConditionP,
         NoParity = X86Encoding::ConditionNP
     };
 
@@ -1076,44 +1078,16 @@ class AssemblerX86Shared : public Assemb
     }
     static void patchTwoByteNopToJump(uint8_t* jump, uint8_t* target) {
         X86Encoding::BaseAssembler::patchTwoByteNopToJump(jump, target);
     }
     static void patchJumpToTwoByteNop(uint8_t* jump) {
         X86Encoding::BaseAssembler::patchJumpToTwoByteNop(jump);
     }
 
-    static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) {
-        // On x64, even with signal handling being used for most bounds checks,
-        // there may be atomic operations that depend on explicit checks. All
-        // accesses that have been recorded are the only ones that need bound
-        // checks.
-        //
-        // An access is out-of-bounds iff
-        //          ptr + offset + data-type-byte-size > heapLength
-        //     i.e  ptr + offset + data-type-byte-size - 1 >= heapLength
-        //     i.e. ptr >= heapLength - data-type-byte-size - offset + 1.
-        //
-        // before := data-type-byte-size + offset - 1
-        uint32_t before = reinterpret_cast<uint32_t*>(patchAt)[-1];
-        uint32_t after = before + heapLength;
-
-        // If the computed index `before` already is out of bounds,
-        // we need to make sure the bounds check will fail all the time.
-        // For bounds checks, the sequence of instructions we use is:
-        //      cmp(ptrReg, #before)
-        //      jae(OutOfBounds)
-        // so replace the cmp immediate with 0.
-        if (after > heapLength)
-            after = 0;
-
-        MOZ_ASSERT_IF(after, int32_t(after) >= int32_t(before));
-        reinterpret_cast<uint32_t*>(patchAt)[-1] = after;
-    }
-
     void breakpoint() {
         masm.int3();
     }
 
     static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
     static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
     static bool HasSSSE3() { return CPUInfo::IsSSSE3Present(); }
     static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -419,92 +419,26 @@ CodeGeneratorX86Shared::visitOutOfLineLo
         Register destReg = ool->dest().gpr();
         masm.mov(ImmWord(0), destReg);
         break;
     }
     masm.jmp(ool->rejoin());
 }
 
 void
-CodeGeneratorX86Shared::visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck)
-{
-    // The access is heap[ptr + offset]. The inline code checks that
-    // ptr < heap.length - offset. We get here when that fails. We need to check
-    // for the case where ptr + offset >= 0, in which case the access is still
-    // in bounds.
-    MOZ_ASSERT(oolCheck->offset() != 0,
-               "An access without a constant offset doesn't need a separate OffsetBoundsCheck");
-    masm.cmp32(oolCheck->ptrReg(), Imm32(-uint32_t(oolCheck->offset())));
-    if (oolCheck->maybeOutOfBounds())
-        masm.j(Assembler::Below, oolCheck->maybeOutOfBounds());
-    else
-        masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds);
-
-#ifdef JS_CODEGEN_X64
-    // In order to get the offset to wrap properly, we must sign-extend the
-    // pointer to 32-bits. We'll zero out the sign extension immediately
-    // after the access to restore asm.js invariants.
-    masm.movslq(oolCheck->ptrReg(), oolCheck->ptrReg());
-#endif
-
-    masm.jmp(oolCheck->rejoin());
-}
-
-void
-CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access,
-                                                   const MInstruction* mir,
-                                                   Register ptr, Label* maybeFail)
+CodeGeneratorX86Shared::visitWasmAddOffset(LWasmAddOffset* lir)
 {
-    // Emit a bounds-checking branch for |access|.
-
-    MOZ_ASSERT(gen->needsBoundsCheckBranch(access));
-
-    Label* pass = nullptr;
-
-    // If we have a non-zero offset, it's possible that |ptr| itself is out of
-    // bounds, while adding the offset computes an in-bounds address. To catch
-    // this case, we need a second branch, which we emit out of line since it's
-    // unlikely to be needed in normal programs.
-    if (access->offset() != 0) {
-        auto oolCheck = new(alloc()) OffsetBoundsCheck(maybeFail, ptr, access->offset());
-        maybeFail = oolCheck->entry();
-        pass = oolCheck->rejoin();
-        addOutOfLineCode(oolCheck, mir);
-    }
-
-    // The bounds check is a comparison with an immediate value. The asm.js
-    // module linking process will add the length of the heap to the immediate
-    // field, so -access->endOffset() will turn into
-    // (heapLength - access->endOffset()), allowing us to test whether the end
-    // of the access is beyond the end of the heap.
-    MOZ_ASSERT(access->endOffset() >= 1,
-               "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
-
-    uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - access->endOffset())).offset();
-    if (maybeFail)
-        masm.j(Assembler::AboveOrEqual, maybeFail);
-    else
-        masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
-
-    if (pass)
-        masm.bind(pass);
-
-    masm.append(wasm::BoundsCheck(cmpOffset));
-}
-
-void
-CodeGeneratorX86Shared::visitWasmBoundsCheck(LWasmBoundsCheck* ins)
-{
-    const MWasmBoundsCheck* mir = ins->mir();
-
-    MOZ_ASSERT(gen->needsBoundsCheckBranch(mir));
-    MOZ_ASSERT(mir->offset() <= INT32_MAX);
-
-    Register ptrReg = ToRegister(ins->ptr());
-    maybeEmitWasmBoundsCheckBranch(mir, ptrReg, mir->isRedundant());
+    MWasmAddOffset* mir = lir->mir();
+    Register base = ToRegister(lir->base());
+    Register out = ToRegister(lir->output());
+
+    if (base != out)
+        masm.move32(base, out);
+    masm.add32(Imm32(mir->offset()), out);
+    masm.j(Assembler::CarrySet, wasm::JumpTarget::OutOfBounds);
 }
 
 void
 CodeGeneratorX86Shared::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
 {
     FloatRegister input = ToFloatRegister(lir->input());
     Register output = ToRegister(lir->output());
 
@@ -532,112 +466,16 @@ CodeGeneratorX86Shared::visitWasmTruncat
     else if (inputType == MIRType::Float32)
         masm.wasmTruncateFloat32ToInt32(input, output, oolEntry);
     else
         MOZ_CRASH("unexpected type");
 
     masm.bind(ool->rejoin());
 }
 
-void
-CodeGeneratorX86Shared::maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr,
-                                                       bool redundant)
-{
-    if (!gen->needsBoundsCheckBranch(mir))
-        return;
-
-    MOZ_ASSERT(mir->endOffset() >= 1,
-               "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck");
-
-    // TODO: See 1287224 Unify MWasmBoundsCheck::redunant_ and needsBoundsCheck
-    if (!redundant) {
-        uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - mir->endOffset())).offset();
-        masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds);
-        masm.append(wasm::BoundsCheck(cmpOffset));
-    } else {
-#ifdef DEBUG
-        Label ok;
-        uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - mir->endOffset())).offset();
-        masm.j(Assembler::Below, &ok);
-        masm.assumeUnreachable("Redundant bounds check failed!");
-        masm.bind(&ok);
-        masm.append(wasm::BoundsCheck(cmpOffset));
-#endif
-    }
-}
-
-bool
-CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* access,
-                                                          const MInstruction* mir,
-                                                          const LAllocation* ptr)
-{
-    if (!gen->needsBoundsCheckBranch(access))
-        return false;
-
-    emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
-    return true;
-}
-
-bool
-CodeGeneratorX86Shared::maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
-                                                      OutOfLineLoadTypedArrayOutOfBounds** ool)
-{
-    MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
-    *ool = nullptr;
-
-    if (!gen->needsBoundsCheckBranch(mir))
-        return false;
-
-    Label* rejoin = nullptr;
-    if (!mir->isAtomicAccess()) {
-        *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()),
-                                                               mir->accessType());
-        addOutOfLineCode(*ool, mir);
-        rejoin = (*ool)->entry();
-    }
-
-    emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), rejoin);
-    return true;
-}
-
-bool
-CodeGeneratorX86Shared::maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
-                                                       Label** rejoin)
-{
-    MOZ_ASSERT(!Scalar::isSimdType(mir->accessType()));
-
-    *rejoin = nullptr;
-    if (!gen->needsBoundsCheckBranch(mir))
-        return false;
-
-    if (!mir->isAtomicAccess())
-        *rejoin = alloc().lifoAlloc()->newInfallible<Label>();
-
-    emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin);
-    return true;
-}
-
-void
-CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access,
-                                                           Register ptr)
-{
-    // Clean up after performing a heap access checked by a branch.
-
-    MOZ_ASSERT(gen->needsBoundsCheckBranch(access));
-
-#ifdef JS_CODEGEN_X64
-    // If the offset is 0, we don't use an OffsetBoundsCheck.
-    if (access->offset() != 0) {
-        // Zero out the high 32 bits, in case the OffsetBoundsCheck code had to
-        // sign-extend (movslq) the pointer value to get wraparound to work.
-        masm.movl(ptr, ptr);
-    }
-#endif
-}
-
 bool
 CodeGeneratorX86Shared::generateOutOfLineCode()
 {
     if (!CodeGeneratorShared::generateOutOfLineCode())
         return false;
 
     if (deoptLabel_.used()) {
         // All non-table-based bailouts will go here.
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -45,35 +45,16 @@ class CodeGeneratorX86Shared : public Co
 
         AnyRegister dest() const { return dest_; }
         Scalar::Type viewType() const { return viewType_; }
         void accept(CodeGeneratorX86Shared* codegen) {
             codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this);
         }
     };
 
-    // Additional bounds checking for heap accesses with constant offsets.
-    class OffsetBoundsCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
-    {
-        Label* maybeOutOfBounds_;
-        Register ptrReg_;
-        int32_t offset_;
-      public:
-        OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset)
-          : maybeOutOfBounds_(maybeOutOfBounds), ptrReg_(ptrReg), offset_(offset)
-        {}
-
-        Label* maybeOutOfBounds() const { return maybeOutOfBounds_; }
-        Register ptrReg() const { return ptrReg_; }
-        int32_t offset() const { return offset_; }
-        void accept(CodeGeneratorX86Shared* codegen) {
-            codegen->visitOffsetBoundsCheck(this);
-        }
-    };
-
     // Additional bounds check for vector Float to Int conversion, when the
     // undefined pattern is seen. Might imply a bailout.
     class OutOfLineSimdFloatToIntCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
     {
         Register temp_;
         FloatRegister input_;
         LInstruction* ins_;
 
@@ -86,39 +67,17 @@ class CodeGeneratorX86Shared : public Co
         FloatRegister input() const { return input_; }
         LInstruction* ins() const { return ins_; }
 
         void accept(CodeGeneratorX86Shared* codegen) {
             codegen->visitOutOfLineSimdFloatToIntCheck(this);
         }
     };
 
-  private:
-    void emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, const MInstruction* ins,
-                                    Register ptr, Label* fail);
-
-  protected:
-    void maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr,
-                                        bool redundant = false);
-
   public:
-    // For SIMD and atomic loads and stores (which throw on out-of-bounds):
-    bool maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* mir, const MInstruction* ins,
-                                           const LAllocation* ptr);
-
-    // For asm.js plain and atomic loads that possibly require a bounds check:
-    bool maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins,
-                                       OutOfLineLoadTypedArrayOutOfBounds** ool);
-
-    // For asm.js plain and atomic stores that possibly require a bounds check:
-    bool maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins,
-                                        Label** rejoin);
-
-    void cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr);
-
     NonAssertingLabel deoptLabel_;
 
     Operand ToOperand(const LAllocation& a);
     Operand ToOperand(const LAllocation* a);
     Operand ToOperand(const LDefinition* def);
 
 #ifdef JS_PUNBOX64
     Operand ToOperandOrRegister64(const LInt64Allocation input);
@@ -279,29 +238,28 @@ class CodeGeneratorX86Shared : public Co
     virtual void visitGuardClass(LGuardClass* guard);
     virtual void visitEffectiveAddress(LEffectiveAddress* ins);
     virtual void visitUDivOrMod(LUDivOrMod* ins);
     virtual void visitUDivOrModConstant(LUDivOrModConstant *ins);
     virtual void visitAsmJSPassStackArg(LAsmJSPassStackArg* ins);
     virtual void visitAsmJSPassStackArgI64(LAsmJSPassStackArgI64* ins);
     virtual void visitAsmSelect(LAsmSelect* ins);
     virtual void visitAsmReinterpret(LAsmReinterpret* lir);
-    virtual void visitWasmBoundsCheck(LWasmBoundsCheck* ins);
     virtual void visitMemoryBarrier(LMemoryBarrier* ins);
+    virtual void visitWasmAddOffset(LWasmAddOffset* lir);
     virtual void visitWasmTruncateToInt32(LWasmTruncateToInt32* lir);
     virtual void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
     virtual void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
     virtual void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
     virtual void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
     virtual void visitCopySignD(LCopySignD* lir);
     virtual void visitCopySignF(LCopySignF* lir);
     virtual void visitRotateI64(LRotateI64* lir);
 
     void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool);
-    void visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck);
 
     void visitNegI(LNegI* lir);
     void visitNegD(LNegD* lir);
     void visitNegF(LNegF* lir);
 
     void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
 
     // SIMD operators
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -322,27 +322,16 @@ LIRGeneratorX86Shared::visitAsmJSNeg(MAs
         defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins, 0);
         break;
       default:
         MOZ_CRASH();
     }
 }
 
 void
-LIRGeneratorX86Shared::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
-{
-    if (!gen->needsBoundsCheckBranch(ins))
-        return;
-
-    MDefinition* index = ins->input();
-    auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index));
-    add(lir, ins);
-}
-
-void
 LIRGeneratorX86Shared::lowerWasmLoad(MWasmLoad* ins)
 {
     MOZ_ASSERT(ins->type() != MIRType::Int64);
 
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     auto* lir = new(alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
--- a/js/src/jit/x86-shared/Lowering-x86-shared.h
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -41,17 +41,16 @@ class LIRGeneratorX86Shared : public LIR
                      MDefinition* rhs);
     void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
                          MDefinition* lhs, MDefinition* rhs);
     void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
                          MDefinition* lhs, MDefinition* rhs);
     void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
                                  MDefinition* lhs, MDefinition* rhs);
     void visitAsmJSNeg(MAsmJSNeg* ins);
-    void visitWasmBoundsCheck(MWasmBoundsCheck* ins);
     void lowerWasmLoad(MWasmLoad* ins);
     void visitAsmSelect(MAsmSelect* ins);
     void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
     void lowerDivI(MDiv* div);
     void lowerModI(MMod* mod);
     void lowerUDiv(MDiv* div);
     void lowerUMod(MMod* mod);
     void lowerUrshD(MUrsh* mir);
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -635,19 +635,19 @@ MacroAssembler::branchDouble(DoubleCondi
         j(Parity, label);
         return;
     }
 
     MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
     j(ConditionFromDoubleCondition(cond), label);
 }
 
-template <typename T>
+template <typename T, typename L>
 void
-MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label)
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label)
 {
     addl(src, dest);
     j(cond, label);
 }
 
 template <typename T>
 void
 MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -252,149 +252,59 @@ CodeGeneratorX86::visitAsmJSUInt32ToFloa
     if (input != temp)
         masm.mov(input, temp);
 
     // Beware: convertUInt32ToFloat32 clobbers input.
     masm.convertUInt32ToFloat32(temp, output);
 }
 
 void
-CodeGeneratorX86::load(Scalar::Type accessType, const Operand& srcAddr, const LDefinition* out)
-{
-    switch (accessType) {
-      case Scalar::Int8:         masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Uint8Clamped:
-      case Scalar::Uint8:        masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Int16:        masm.movswlWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Uint16:       masm.movzwlWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Int32:
-      case Scalar::Uint32:       masm.movlWithPatch(srcAddr, ToRegister(out)); break;
-      case Scalar::Float32:      masm.vmovssWithPatch(srcAddr, ToFloatRegister(out)); break;
-      case Scalar::Float64:      masm.vmovsdWithPatch(srcAddr, ToFloatRegister(out)); break;
-      case Scalar::Float32x4:
-      case Scalar::Int8x16:
-      case Scalar::Int16x8:
-      case Scalar::Int32x4:      MOZ_CRASH("SIMD load should be handled in their own function");
-      case Scalar::Int64:        MOZ_CRASH("should be handled in a separate function (2 regs)");
-      case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type");
-    }
-    masm.append(wasm::MemoryAccess(masm.size()));
-}
-
-void
-CodeGeneratorX86::loadI64(Scalar::Type type, const Operand& srcAddr, Register64 out)
-{
-    switch (type) {
-      case Scalar::Int8:
-        MOZ_ASSERT(out == Register64(edx, eax));
-        masm.movsblWithPatch(srcAddr, out.low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.cdq();
-        break;
-      case Scalar::Uint8:
-        masm.movzblWithPatch(srcAddr, out.low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.xorl(out.high, out.high);
-        break;
-      case Scalar::Int16:
-        MOZ_ASSERT(out == Register64(edx, eax));
-        masm.movswlWithPatch(srcAddr, out.low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.cdq();
-        break;
-      case Scalar::Uint16:
-        masm.movzwlWithPatch(srcAddr, out.low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.xorl(out.high, out.high);
-        break;
-      case Scalar::Int32:
-        MOZ_ASSERT(out == Register64(edx, eax));
-        masm.movlWithPatch(srcAddr, out.low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.cdq();
-        break;
-      case Scalar::Uint32:
-        masm.movlWithPatch(srcAddr, out.low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.xorl(out.high, out.high);
-        break;
-      case Scalar::Int64:
-        if (srcAddr.kind() == Operand::MEM_ADDRESS32) {
-            Operand low(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64LOW_OFFSET));
-            Operand high(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64HIGH_OFFSET));
-
-            masm.movlWithPatch(low, out.low);
-            masm.append(wasm::MemoryAccess(masm.size()));
-            masm.movlWithPatch(high, out.high);
-            masm.append(wasm::MemoryAccess(masm.size()));
-        } else {
-            MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP);
-            Address addr = srcAddr.toAddress();
-            Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
-            Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
-
-            if (addr.base != out.low) {
-                masm.movlWithPatch(low, out.low);
-                masm.append(wasm::MemoryAccess(masm.size()));
-                masm.movlWithPatch(high, out.high);
-                masm.append(wasm::MemoryAccess(masm.size()));
-            } else {
-                MOZ_ASSERT(addr.base != out.high);
-                masm.movlWithPatch(high, out.high);
-                masm.append(wasm::MemoryAccess(masm.size()));
-                masm.movlWithPatch(low, out.low);
-                masm.append(wasm::MemoryAccess(masm.size()));
-            }
-        }
-        break;
-      case Scalar::Float32:
-      case Scalar::Float64:
-      case Scalar::Float32x4:
-      case Scalar::Int8x16:
-      case Scalar::Int16x8:
-      case Scalar::Int32x4:
-        MOZ_CRASH("non-int64 loads should use load()");
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("unexpected array type");
-    }
-}
-
-void
 CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
 {
     const MLoadTypedArrayElementStatic* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32);
 
     Register ptr = ToRegister(ins->ptr());
-    const LDefinition* out = ins->output();
+    AnyRegister out = ToAnyRegister(ins->output());
     OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
     uint32_t offset = mir->offset();
 
     if (mir->needsBoundsCheck()) {
         MOZ_ASSERT(offset == 0);
         if (!mir->fallible()) {
-            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
+            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
             addOutOfLineCode(ool, ins->mir());
         }
 
         masm.cmpPtr(ptr, ImmWord(mir->length()));
         if (ool)
             masm.j(Assembler::AboveOrEqual, ool->entry());
         else
             bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
     }
 
     Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
-    load(accessType, srcAddr, out);
+    switch (accessType) {
+      case Scalar::Int8:         masm.movsblWithPatch(srcAddr, out.gpr()); break;
+      case Scalar::Uint8:        masm.movzblWithPatch(srcAddr, out.gpr()); break;
+      case Scalar::Int16:        masm.movswlWithPatch(srcAddr, out.gpr()); break;
+      case Scalar::Uint16:       masm.movzwlWithPatch(srcAddr, out.gpr()); break;
+      case Scalar::Int32:
+      case Scalar::Uint32:       masm.movlWithPatch(srcAddr, out.gpr()); break;
+      case Scalar::Float32:      masm.vmovssWithPatch(srcAddr, out.fpu()); break;
+      case Scalar::Float64:      masm.vmovsdWithPatch(srcAddr, out.fpu()); break;
+      default:                   MOZ_CRASH("Unexpected type");
+    }
+
     if (accessType == Scalar::Float64)
-        masm.canonicalizeDouble(ToFloatRegister(out));
+        masm.canonicalizeDouble(out.fpu());
     if (accessType == Scalar::Float32)
-        masm.canonicalizeFloat(ToFloatRegister(out));
+        masm.canonicalizeFloat(out.fpu());
+
     if (ool)
         masm.bind(ool->rejoin());
 }
 
 void
 CodeGeneratorX86::emitWasmCall(LWasmCallBase* ins)
 {
     MWasmCall* mir = ins->mir();
@@ -433,87 +343,37 @@ CodeGeneratorX86::visitWasmCallI64(LWasm
 
 void
 CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier)
 {
     if (barrier & MembarStoreLoad)
         masm.storeLoadFence();
 }
 
-void
-CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr,
-                           FloatRegister out)
-{
-    switch (type) {
-      case Scalar::Float32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movss zeroes out the high lanes.
-          case 1: masm.vmovssWithPatch(srcAddr, out); break;
-          // See comment above, which also applies to movsd.
-          case 2: masm.vmovsdWithPatch(srcAddr, out); break;
-          case 4: masm.vmovupsWithPatch(srcAddr, out); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
-        break;
-      }
-      case Scalar::Int32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movd zeroes out the high lanes.
-          case 1: masm.vmovdWithPatch(srcAddr, out); break;
-          // See comment above, which also applies to movq.
-          case 2: masm.vmovqWithPatch(srcAddr, out); break;
-          case 4: masm.vmovdquWithPatch(srcAddr, out); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
-        break;
-      }
-      case Scalar::Int8x16:
-        MOZ_ASSERT(numElems == 16, "unexpected partial load");
-        masm.vmovdquWithPatch(srcAddr, out);
-        break;
-      case Scalar::Int16x8:
-        MOZ_ASSERT(numElems == 8, "unexpected partial load");
-        masm.vmovdquWithPatch(srcAddr, out);
-        break;
-      case Scalar::Int8:
-      case Scalar::Uint8:
-      case Scalar::Int16:
-      case Scalar::Uint16:
-      case Scalar::Int32:
-      case Scalar::Uint32:
-      case Scalar::Int64:
-      case Scalar::Float32:
-      case Scalar::Float64:
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("should only handle SIMD types");
-    }
-    masm.append(wasm::MemoryAccess(masm.size()));
-}
-
 template <typename T>
 void
 CodeGeneratorX86::emitWasmLoad(T* ins)
 {
     const MWasmLoad* mir = ins->mir();
-
-    Scalar::Type accessType = mir->accessType();
-    MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-    MOZ_ASSERT(mir->offset() <= INT32_MAX);
+    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
+
     Operand srcAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
+    memoryBarrier(mir->barrierBefore());
+
     if (mir->type() == MIRType::Int64)
-        loadI64(accessType, srcAddr, ToOutRegister64(ins));
+        masm.wasmLoadI64(mir->accessType(), srcAddr, ToOutRegister64(ins));
     else
-        load(accessType, srcAddr, ins->output());
+        masm.wasmLoad(mir->accessType(), mir->numSimdElems(), srcAddr, ToAnyRegister(ins->output()));
+
+    memoryBarrier(mir->barrierAfter());
 }
 
 void
 CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins)
 {
     emitWasmLoad(ins);
 }
 
@@ -523,416 +383,228 @@ CodeGeneratorX86::visitWasmLoadI64(LWasm
     emitWasmLoad(ins);
 }
 
 template <typename T>
 void
 CodeGeneratorX86::emitWasmStore(T* ins)
 {
     const MWasmStore* mir = ins->mir();
-
-    Scalar::Type accessType = mir->accessType();
-    MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI");
-    MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI");
-    MOZ_ASSERT(mir->offset() <= INT32_MAX);
+    MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit);
 
     const LAllocation* ptr = ins->ptr();
     Operand dstAddr = ptr->isBogus()
                       ? Operand(PatchedAbsoluteAddress(mir->offset()))
                       : Operand(ToRegister(ptr), mir->offset());
 
-    if (accessType == Scalar::Int64)
-        storeI64(accessType, ins->getInt64Operand(LWasmStoreI64::ValueIndex), dstAddr);
-    else
-        store(accessType, ins->getOperand(LWasmStore::ValueIndex), dstAddr);
+    memoryBarrier(mir->barrierBefore());
+
+    if (mir->accessType() == Scalar::Int64) {
+        Register64 value = ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
+        masm.wasmStoreI64(value, dstAddr);
+    } else {
+        AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
+        masm.wasmStore(mir->accessType(), mir->numSimdElems(), value, dstAddr);
+    }
+
+    memoryBarrier(mir->barrierBefore());
 }
 
-
 void
 CodeGeneratorX86::visitWasmStore(LWasmStore* ins)
 {
     emitWasmStore(ins);
 }
 
 void
 CodeGeneratorX86::visitWasmStoreI64(LWasmStoreI64* ins)
 {
     emitWasmStore(ins);
 }
 
 void
-CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap* ins)
-{
-    const MAsmJSLoadHeap* mir = ins->mir();
-    Scalar::Type type = mir->accessType();
-    FloatRegister out = ToFloatRegister(ins->output());
-    const LAllocation* ptr = ins->ptr();
-    Operand srcAddr = ptr->isBogus()
-                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
-                      : Operand(ToRegister(ptr), mir->offset());
-
-    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
-
-    unsigned numElems = mir->numSimdElems();
-    if (numElems == 3) {
-        MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
-
-        Operand srcAddrZ =
-            ptr->isBogus()
-            ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
-            : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
-
-        // Load XY
-        loadSimd(type, 2, srcAddr, out);
-
-        // Load Z (W is zeroed)
-        // This is still in bounds, as we've checked with a manual bounds check
-        // or we had enough space for sure when removing the bounds check.
-        loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg);
-
-        // Move ZW atop XY
-        masm.vmovlhps(ScratchSimd128Reg, out, out);
-    } else {
-        loadSimd(type, numElems, srcAddr, out);
-    }
-
-    if (hasBoundsCheck)
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
-}
-
-void
 CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
 {
     const MAsmJSLoadHeap* mir = ins->mir();
-    Scalar::Type accessType = mir->accessType();
-
-    if (Scalar::isSimdType(accessType))
-        return emitSimdLoad(ins);
+    MOZ_ASSERT(mir->offset() == 0);
 
     const LAllocation* ptr = ins->ptr();
-    const LDefinition* out = ins->output();
-    Operand srcAddr = ptr->isBogus()
-                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
-                      : Operand(ToRegister(ptr), mir->offset());
+    AnyRegister out = ToAnyRegister(ins->output());
 
-    memoryBarrier(mir->barrierBefore());
+    Scalar::Type accessType = mir->accessType();
+    MOZ_ASSERT(!Scalar::isSimdType(accessType));
 
-    OutOfLineLoadTypedArrayOutOfBounds* ool;
-    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool);
-
-    load(accessType, srcAddr, out);
+    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
+    if (mir->needsBoundsCheck()) {
+        ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
+        addOutOfLineCode(ool, mir);
 
-    if (ool) {
-        MOZ_ASSERT(hasBoundsCheck);
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
-        masm.bind(ool->rejoin());
+        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ool->entry());
     }
 
-    memoryBarrier(mir->barrierAfter());
-}
-
-void
-CodeGeneratorX86::store(Scalar::Type accessType, const LAllocation* value, const Operand& dstAddr)
-{
-    switch (accessType) {
-      case Scalar::Int8:
-      case Scalar::Uint8Clamped:
-      case Scalar::Uint8:
-        masm.movbWithPatch(ToRegister(value), dstAddr);
-        break;
-
-      case Scalar::Int16:
-      case Scalar::Uint16:
-        masm.movwWithPatch(ToRegister(value), dstAddr);
-        break;
-
-      case Scalar::Int32:
-      case Scalar::Uint32:
-        masm.movlWithPatch(ToRegister(value), dstAddr);
-        break;
-
-      case Scalar::Float32:
-        masm.vmovssWithPatch(ToFloatRegister(value), dstAddr);
-        break;
-
-      case Scalar::Float64:
-        masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr);
-        break;
-
-      case Scalar::Int64:
-        MOZ_CRASH("Should be handled in storeI64.");
-
-      case Scalar::Float32x4:
-      case Scalar::Int8x16:
-      case Scalar::Int16x8:
-      case Scalar::Int32x4:
-        MOZ_CRASH("SIMD stores should be handled in emitSimdStore");
+    Operand srcAddr = ptr->isBogus()
+                      ? Operand(PatchedAbsoluteAddress())
+                      : Operand(ToRegister(ptr), 0);
 
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("unexpected type");
-    }
-    masm.append(wasm::MemoryAccess(masm.size()));
-}
-
-void
-CodeGeneratorX86::storeI64(Scalar::Type accessType, const LInt64Allocation value,
-                           const Operand& dstAddr)
-{
-    Register64 input = ToRegister64(value);
-    MOZ_ASSERT(accessType == Scalar::Int64);
-    if (dstAddr.kind() == Operand::MEM_ADDRESS32) {
-        Operand low(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64LOW_OFFSET));
-        Operand high(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64HIGH_OFFSET));
+    masm.wasmLoad(accessType, 0, srcAddr, out);
 
-        masm.movlWithPatch(input.low, low);
-        masm.append(wasm::MemoryAccess(masm.size()));
-        masm.movlWithPatch(input.high, high);
-        masm.append(wasm::MemoryAccess(masm.size()));
-    } else {
-        MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP);
-        Address addr = dstAddr.toAddress();
-        Operand low(addr.base, addr.offset + INT64LOW_OFFSET);
-        Operand high(addr.base, addr.offset + INT64HIGH_OFFSET);
-
-        if (addr.base != input.low) {
-            masm.movlWithPatch(input.low, low);
-            masm.append(wasm::MemoryAccess(masm.size()));
-            masm.movlWithPatch(input.high, high);
-            masm.append(wasm::MemoryAccess(masm.size()));
-        } else {
-            MOZ_ASSERT(addr.base != input.high);
-            masm.movlWithPatch(input.high, high);
-            masm.append(wasm::MemoryAccess(masm.size()));
-            masm.movlWithPatch(input.low, low);
-            masm.append(wasm::MemoryAccess(masm.size()));
-        }
-    }
+    if (ool)
+        masm.bind(ool->rejoin());
 }
 
 void
 CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
 {
     MStoreTypedArrayElementStatic* mir = ins->mir();
     Scalar::Type accessType = mir->accessType();
     Register ptr = ToRegister(ins->ptr());
     const LAllocation* value = ins->value();
 
     canonicalizeIfDeterministic(accessType, value);
 
     uint32_t offset = mir->offset();
-    if (!mir->needsBoundsCheck()) {
-        Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
-        store(accessType, value, dstAddr);
-        return;
+    MOZ_ASSERT_IF(mir->needsBoundsCheck(), offset == 0);
+
+    Label rejoin;
+    if (mir->needsBoundsCheck()) {
+        MOZ_ASSERT(offset == 0);
+        masm.cmpPtr(ptr, ImmWord(mir->length()));
+        masm.j(Assembler::AboveOrEqual, &rejoin);
     }
 
-    MOZ_ASSERT(offset == 0);
-    masm.cmpPtr(ptr, ImmWord(mir->length()));
-    Label rejoin;
-    masm.j(Assembler::AboveOrEqual, &rejoin);
-
-    Operand dstAddr(ptr, int32_t(mir->base().asValue()));
-    store(accessType, value, dstAddr);
-    masm.bind(&rejoin);
-}
-
-void
-CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
-                            const Operand& dstAddr)
-{
-    switch (type) {
-      case Scalar::Float32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movss zeroes out the high lanes.
-          case 1: masm.vmovssWithPatch(in, dstAddr); break;
-          // See comment above, which also applies to movsd.
-          case 2: masm.vmovsdWithPatch(in, dstAddr); break;
-          case 4: masm.vmovupsWithPatch(in, dstAddr); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
+    Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
+    switch (accessType) {
+      case Scalar::Int8:
+      case Scalar::Uint8Clamped:
+      case Scalar::Uint8:
+        masm.movbWithPatch(ToRegister(value), dstAddr);
         break;
-      }
-      case Scalar::Int32x4: {
-        switch (numElems) {
-          // In memory-to-register mode, movd zeroes out the high lanes.
-          case 1: masm.vmovdWithPatch(in, dstAddr); break;
-          // See comment above, which also applies to movsd.
-          case 2: masm.vmovqWithPatch(in, dstAddr); break;
-          case 4: masm.vmovdquWithPatch(in, dstAddr); break;
-          default: MOZ_CRASH("unexpected size for partial load");
-        }
-        break;
-      }
-      case Scalar::Int8x16:
-        MOZ_ASSERT(numElems == 16, "unexpected partial store");
-        masm.vmovdquWithPatch(in, dstAddr);
-        break;
-      case Scalar::Int16x8:
-        MOZ_ASSERT(numElems == 8, "unexpected partial store");
-        masm.vmovdquWithPatch(in, dstAddr);
-        break;
-      case Scalar::Int8:
-      case Scalar::Uint8:
       case Scalar::Int16:
       case Scalar::Uint16:
+        masm.movwWithPatch(ToRegister(value), dstAddr);
+        break;
       case Scalar::Int32:
       case Scalar::Uint32:
-      case Scalar::Int64:
+        masm.movlWithPatch(ToRegister(value), dstAddr);
+        break;
       case Scalar::Float32:
+        masm.vmovssWithPatch(ToFloatRegister(value), dstAddr);
+        break;
       case Scalar::Float64:
-      case Scalar::Uint8Clamped:
-      case Scalar::MaxTypedArrayViewType:
-        MOZ_CRASH("should only handle SIMD types");
-    }
-    masm.append(wasm::MemoryAccess(masm.size()));
-}
-
-void
-CodeGeneratorX86::emitSimdStore(LAsmJSStoreHeap* ins)
-{
-    const MAsmJSStoreHeap* mir = ins->mir();
-    Scalar::Type type = mir->accessType();
-    FloatRegister in = ToFloatRegister(ins->value());
-    const LAllocation* ptr = ins->ptr();
-    Operand dstAddr = ptr->isBogus()
-                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
-                      : Operand(ToRegister(ptr), mir->offset());
-
-    bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr);
-
-    unsigned numElems = mir->numSimdElems();
-    if (numElems == 3) {
-        MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
-
-        Operand dstAddrZ =
-            ptr->isBogus()
-            ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
-            : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
-
-        // Store XY
-        storeSimd(type, 2, in, dstAddr);
-
-        masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg);
-
-        // Store Z (W is zeroed)
-        // This is still in bounds, as we've checked with a manual bounds check
-        // or we had enough space for sure when removing the bounds check.
-        storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ);
-    } else {
-        storeSimd(type, numElems, in, dstAddr);
+        masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr);
+        break;
+      default:
+        MOZ_CRASH("unexpected type");
     }
 
-    if (hasBoundsCheck)
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
+    if (rejoin.used())
+        masm.bind(&rejoin);
 }
 
 void
 CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
 {
     const MAsmJSStoreHeap* mir = ins->mir();
-    Scalar::Type accessType = mir->accessType();
-    const LAllocation* value = ins->value();
-
-    canonicalizeIfDeterministic(accessType, value);
-
-    if (Scalar::isSimdType(accessType))
-        return emitSimdStore(ins);
+    MOZ_ASSERT(mir->offset() == 0);
 
     const LAllocation* ptr = ins->ptr();
-    Operand dstAddr = ptr->isBogus()
-                      ? Operand(PatchedAbsoluteAddress(mir->offset()))
-                      : Operand(ToRegister(ptr), mir->offset());
+    const LAllocation* value = ins->value();
 
-    memoryBarrier(mir->barrierBefore());
-
-    Label* rejoin;
-    DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin);
+    Scalar::Type accessType = mir->accessType();
+    MOZ_ASSERT(!Scalar::isSimdType(accessType));
+    canonicalizeIfDeterministic(accessType, value);
 
-    store(accessType, value, dstAddr);
+    Operand dstAddr = ptr->isBogus()
+                      ? Operand(PatchedAbsoluteAddress())
+                      : Operand(ToRegister(ptr), 0);
 
-    if (rejoin) {
-        MOZ_ASSERT(hasBoundsCheck);
-        cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
-        masm.bind(rejoin);
-    }
+    Label rejoin;
+    if (mir->needsBoundsCheck())
+        masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), &rejoin);
 
-    memoryBarrier(mir->barrierAfter());
+    masm.wasmStore(accessType, 0, ToAnyRegister(value), dstAddr);
+
+    if (rejoin.used())
+        masm.bind(&rejoin);
 }
 
 // Perform bounds checking on the access if necessary; if it fails,
 // jump to out-of-line code that throws.  If the bounds check passes,
 // set up the heap address in addrTemp.
 
 void
-CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
-                                            const MWasmMemoryAccess* mir)
+CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg)
 {
-    maybeEmitWasmBoundsCheckBranch(mir, ptrReg);
-
     // Add in the actual heap pointer explicitly, to avoid opening up
     // the abstraction that is atomicBinopToTypedIntArray at this time.
     masm.movl(ptrReg, addrTemp);
-    masm.addlWithPatch(Imm32(mir->offset()), addrTemp);
+    masm.addlWithPatch(Imm32(0), addrTemp);
     masm.append(wasm::MemoryAccess(masm.size()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
 {
     MAsmJSCompareExchangeHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register oldval = ToRegister(ins->oldValue());
     Register newval = ToRegister(ins->newValue());
     Register addrTemp = ToRegister(ins->addrTemp());
 
-    asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
+    asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
-    Address memAddr(addrTemp, mir->offset());
+    Address memAddr(addrTemp, 0);
     masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                         memAddr,
                                         oldval,
                                         newval,
                                         InvalidReg,
                                         ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
 {
     MAsmJSAtomicExchangeHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register value = ToRegister(ins->value());
     Register addrTemp = ToRegister(ins->addrTemp());
 
-    asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
+    asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
-    Address memAddr(addrTemp, mir->offset());
+    Address memAddr(addrTemp, 0);
     masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                        memAddr,
                                        value,
                                        InvalidReg,
                                        ToAnyRegister(ins->output()));
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
     Register addrTemp = ToRegister(ins->addrTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
-    asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
+    asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
-    Address memAddr(addrTemp, mir->offset());
+    Address memAddr(addrTemp, 0);
     if (value->isConstant()) {
         atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
                                    Imm32(ToInt32(value)),
                                    memAddr,
                                    temp,
                                    InvalidReg,
                                    ToAnyRegister(ins->output()));
     } else {
@@ -944,27 +616,28 @@ CodeGeneratorX86::visitAsmJSAtomicBinopH
                                    ToAnyRegister(ins->output()));
     }
 }
 
 void
 CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
 {
     MAsmJSAtomicBinopHeap* mir = ins->mir();
+    MOZ_ASSERT(mir->offset() == 0);
+    MOZ_ASSERT(!mir->hasUses());
+
     Scalar::Type accessType = mir->accessType();
     Register ptrReg = ToRegister(ins->ptr());
     Register addrTemp = ToRegister(ins->addrTemp());
     const LAllocation* value = ins->value();
     AtomicOp op = mir->operation();
 
-    MOZ_ASSERT(!mir->hasUses());
+    asmJSAtomicComputeAddress(addrTemp, ptrReg);
 
-    asmJSAtomicComputeAddress(addrTemp, ptrReg, mir);
-
-    Address memAddr(addrTemp, mir->offset());
+    Address memAddr(addrTemp, 0);
     if (value->isConstant())
         atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
     else
         atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
 }
 
 void
 CodeGeneratorX86::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
--- a/js/src/jit/x86/CodeGenerator-x86.h
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -23,33 +23,20 @@ class CodeGeneratorX86 : public CodeGene
         return this;
     }
 
   protected:
     ValueOperand ToValue(LInstruction* ins, size_t pos);
     ValueOperand ToOutValue(LInstruction* ins);
     ValueOperand ToTempValue(LInstruction* ins, size_t pos);
 
-    void load(Scalar::Type vt, const Operand& srcAddr, const LDefinition* out);
-    void loadI64(Scalar::Type vt, const Operand& srcAddr, const Register64 out);
-    void store(Scalar::Type vt, const LAllocation* value, const Operand& dstAddr);
-    void storeI64(Scalar::Type vt, const LInt64Allocation value, const Operand& dstAddr);
-
-    void loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, FloatRegister out);
-    void emitSimdLoad(LAsmJSLoadHeap* ins);
-
-    void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, const Operand& dstAddr);
-    void emitSimdStore(LAsmJSStoreHeap* ins);
-
     void memoryBarrier(MemoryBarrierBits barrier);
 
-    template <typename T>
-    void emitWasmLoad(T* ins);
-    template <typename T>
-    void emitWasmStore(T* ins);
+    template <typename T> void emitWasmLoad(T* ins);
+    template <typename T> void emitWasmStore(T* ins);
 
   public:
     CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
 
   public:
     void visitBox(LBox* box);
     void visitBoxFloatingPoint(LBoxFloatingPoint* box);
     void visitUnbox(LUnbox* unbox);
@@ -97,18 +84,17 @@ class CodeGeneratorX86 : public CodeGene
     void visitClzI64(LClzI64* lir);
     void visitCtzI64(LCtzI64* lir);
     void visitNotI64(LNotI64* lir);
     void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir);
     void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
     void visitTestI64AndBranch(LTestI64AndBranch* lir);
 
   private:
-    void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
-                                   const MWasmMemoryAccess* access);
+    void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg);
 };
 
 typedef CodeGeneratorX86 CodeGeneratorSpecific;
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_x86_CodeGenerator_x86_h */
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -269,16 +269,40 @@ void
 LIRGeneratorX86::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins)
 {
     MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
     LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()), temp());
     define(lir, ins);
 }
 
 void
+LIRGeneratorX86::visitWasmLoad(MWasmLoad* ins)
+{
+    if (ins->type() != MIRType::Int64) {
+        lowerWasmLoad(ins);
+        return;
+    }
+
+    MDefinition* base = ins->base();
+    MOZ_ASSERT(base->type() == MIRType::Int32);
+
+    auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base));
+
+    Scalar::Type accessType = ins->accessType();
+    if (accessType == Scalar::Int8 || accessType == Scalar::Int16 || accessType == Scalar::Int32) {
+        // We use cdq to sign-extend the result and cdq demands these registers.
+        defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)),
+                                                    LAllocation(AnyRegister(eax))));
+        return;
+    }
+
+    defineInt64(lir, ins);
+}
+
+void
 LIRGeneratorX86::visitWasmStore(MWasmStore* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     LAllocation baseAlloc = useRegisterOrZeroAtStart(base);
 
     LAllocation valueAlloc;
@@ -316,32 +340,32 @@ LIRGeneratorX86::visitWasmStore(MWasmSto
 void
 LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
+    LAllocation baseAlloc = ins->needsBoundsCheck()
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
 }
 
 void
 LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
 {
     MDefinition* base = ins->base();
     MOZ_ASSERT(base->type() == MIRType::Int32);
 
     // For simplicity, require a register if we're going to emit a bounds-check
     // branch, so that we don't have special cases for constants.
-    LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins)
+    LAllocation baseAlloc = ins->needsBoundsCheck()
                             ? useRegisterAtStart(base)
                             : useRegisterOrZeroAtStart(base);
 
     LAsmJSStoreHeap* lir = nullptr;
     switch (ins->accessType()) {
       case Scalar::Int8: case Scalar::Uint8:
         // See comment for LIRGeneratorX86::useByteOpRegister.
         lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax));
@@ -567,40 +591,16 @@ void
 LIRGeneratorX86::lowerUModI64(MMod* mod)
 {
     LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
                                                     useInt64RegisterAtStart(mod->rhs()));
     defineReturn(lir, mod);
 }
 
 void
-LIRGeneratorX86::visitWasmLoad(MWasmLoad* ins)
-{
-    if (ins->type() != MIRType::Int64) {
-        lowerWasmLoad(ins);
-        return;