| author | Carsten "Tomcat" Book <cbook@mozilla.com> |
| Thu, 08 Sep 2016 12:01:17 +0200 | |
| changeset 313183 | 938ce16be25f9c551c19ef8938e8717ed3d41ff5 |
| parent 313145 | 331524df5cab6383f44d55d777b7cededdbc12c9 (current diff) |
| parent 313182 | eeaebaf00e95a0e82732a5291a0f2e9e8cbb533e (diff) |
| child 313199 | d00937cdf44d7cb55fc00b86f80c05b163b97115 |
| child 313215 | a3a5d19b2d0539c66e787e0f2f86c539fb1f6fd8 |
| child 313241 | 542b38d5eb900afb47e2f9c07cb47300bddef870 |
| push id | 30673 |
| push user | cbook@mozilla.com |
| push date | Thu, 08 Sep 2016 10:01:33 +0000 |
| treeherder | mozilla-central@938ce16be25f [default view] [failures only] |
| perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
| reviewers | merge |
| milestone | 51.0a1 |
| first release with | nightly linux32
938ce16be25f
/
51.0a1
/
20160908030434
/
files
nightly linux64
938ce16be25f
/
51.0a1
/
20160908030434
/
files
nightly mac
938ce16be25f
/
51.0a1
/
20160908030434
/
files
nightly win32
938ce16be25f
/
51.0a1
/
20160908030434
/
files
nightly win64
938ce16be25f
/
51.0a1
/
20160908030434
/
files
|
| last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
| releases | nightly linux32
51.0a1
/
20160908030434
/
pushlog to previous
nightly linux64
51.0a1
/
20160908030434
/
pushlog to previous
nightly mac
51.0a1
/
20160908030434
/
pushlog to previous
nightly win32
51.0a1
/
20160908030434
/
pushlog to previous
nightly win64
51.0a1
/
20160908030434
/
pushlog to previous
|
--- a/browser/base/content/abouthome/aboutHome.js +++ b/browser/base/content/abouthome/aboutHome.js @@ -268,35 +268,35 @@ function loadSnippets() let lastUpdate = gSnippetsMap.get("snippets-last-update"); let updateURL = document.documentElement.getAttribute("snippetsURL"); let shouldUpdate = !lastUpdate || Date.now() - lastUpdate > SNIPPETS_UPDATE_INTERVAL_MS; if (updateURL && shouldUpdate) { // Try to update from network. let xhr = new XMLHttpRequest(); xhr.timeout = 5000; - try { - xhr.open("GET", updateURL, true); - } catch (ex) { - showSnippets(); - loadCompleted(); - return; - } // Even if fetching should fail we don't want to spam the server, thus // set the last update time regardless its results. Will retry tomorrow. gSnippetsMap.set("snippets-last-update", Date.now()); xhr.onloadend = function (event) { if (xhr.status == 200) { gSnippetsMap.set("snippets", xhr.responseText); gSnippetsMap.set("snippets-cached-version", currentVersion); } showSnippets(); loadCompleted(); }; - xhr.send(null); + try { + xhr.open("GET", updateURL, true); + xhr.send(null); + } catch (ex) { + showSnippets(); + loadCompleted(); + return; + } } else { showSnippets(); loadCompleted(); } } /** * Shows locally cached remote snippets, or default ones when not available.
--- a/browser/base/content/newtab/newTab.css +++ b/browser/base/content/newtab/newTab.css @@ -17,18 +17,18 @@ body { display: -moz-box; position: relative; -moz-box-flex: 1; -moz-user-focus: normal; -moz-box-orient: vertical; } input { - font: message-box !important; - font-size: 16px !important; + font: message-box; + font-size: 16px; } input[type=button] { cursor: pointer; } /* UNDO */ #newtab-undo-container {
--- a/browser/base/content/newtab/newTab.xhtml +++ b/browser/base/content/newtab/newTab.xhtml @@ -79,13 +79,15 @@ <div id="newtab-horizontal-margin"> <div class="newtab-side-margin"/> <div id="newtab-grid"> </div> <div class="newtab-side-margin"/> </div> <div id="newtab-margin-bottom"/> </div> - <input id="newtab-customize-button" type="button" dir="&locale.dir;" title="&newtab.customize.title;"/> + <input id="newtab-customize-button" type="button" dir="&locale.dir;" + value="⚙" + title="&newtab.customize.title;"/> </body> <script type="text/javascript;version=1.8" src="chrome://browser/content/contentSearchUI.js"/> <script type="text/javascript;version=1.8" src="chrome://browser/content/newtab/newTab.js"/> </html>
--- a/browser/components/originattributes/test/browser/browser.ini +++ b/browser/components/originattributes/test/browser/browser.ini @@ -14,11 +14,14 @@ support-files = test_firstParty.html test_firstParty_cookie.html test_firstParty_html_redirect.html test_firstParty_http_redirect.html test_firstParty_http_redirect.html^headers^ test_firstParty_iframe_http_redirect.html test_firstParty_postMessage.html window.html + worker_blobify.js + worker_deblobify.js [browser_firstPartyIsolation.js] [browser_localStorageIsolation.js] +[browser_blobURLIsolation.js]
new file mode 100644 --- /dev/null +++ b/browser/components/originattributes/test/browser/browser_blobURLIsolation.js @@ -0,0 +1,97 @@ +/** + * Bug 1264573 - A test case for blob url isolation. + */ + +const TEST_PAGE = "http://mochi.test:8888/browser/browser/components/" + + "originattributes/test/browser/file_firstPartyBasic.html"; +const SCRIPT_WORKER_BLOBIFY = "worker_blobify.js"; +const SCRIPT_WORKER_DEBLOBIFY = "worker_deblobify.js"; + +function page_blobify(browser, input) { + return ContentTask.spawn(browser, input, function(input) { + return { blobURL: content.URL.createObjectURL(new content.Blob([input])) }; + }); +} + +function page_deblobify(browser, blobURL) { + return ContentTask.spawn(browser, blobURL, function* (blobURL) { + if ("error" in blobURL) { + return blobURL; + } + blobURL = blobURL.blobURL; + + function blobURLtoBlob(blobURL) { + return new content.Promise(function (resolve) { + let xhr = new content.XMLHttpRequest(); + xhr.open("GET", blobURL, true); + xhr.onload = function () { + resolve(xhr.response); + }; + xhr.onerror = function () { + resolve("xhr error"); + }; + xhr.responseType = "blob"; + xhr.send(); + }); + } + + function blobToString(blob) { + return new content.Promise(function (resolve) { + let fileReader = new content.FileReader(); + fileReader.onload = function () { + resolve(fileReader.result); + }; + fileReader.readAsText(blob); + }); + } + + let blob = yield blobURLtoBlob(blobURL); + if (blob == "xhr error") { + return "xhr error"; + } + + return yield blobToString(blob); + }); +} + +function workerIO(browser, scriptFile, message) { + return ContentTask.spawn(browser, {scriptFile, message}, function* (args) { + let worker = new content.Worker(args.scriptFile); + let promise = new content.Promise(function(resolve) { + let listenFunction = function(event) { + worker.removeEventListener("message", listenFunction, false); + worker.terminate(); + resolve(event.data); + }; + worker.addEventListener("message", listenFunction, false); + }); + worker.postMessage(args.message); + return yield promise; + }); +} + +let worker_blobify = (browser, input) => workerIO(browser, SCRIPT_WORKER_BLOBIFY, input); +let worker_deblobify = (browser, blobURL) => workerIO(browser, SCRIPT_WORKER_DEBLOBIFY, blobURL); + +function doTest(blobify, deblobify) { + let blobURL = null; + return function* (browser) { + if (blobURL === null) { + let input = Math.random().toString(); + blobURL = yield blobify(browser, input); + return input; + } + let result = yield deblobify(browser, blobURL); + blobURL = null; + return result; + } +} + +let tests = []; +for (let blobify of [page_blobify, worker_blobify]) { + for (let deblobify of [page_deblobify, worker_deblobify]) { + tests.push(doTest(blobify, deblobify)); + } +} + +IsolationTestTools.runTests(TEST_PAGE, tests);
--- a/browser/components/originattributes/test/browser/head.js +++ b/browser/components/originattributes/test/browser/head.js @@ -254,41 +254,45 @@ this.IsolationTestTools = { /** * Run isolation tests. The framework will run tests with standard combinations * of prefs and tab settings, and checks whether the isolation is working. * * @param aURL * The URL of the page that will be tested or an object contains 'url', * the tested page, 'firstFrameSetting' for the frame setting of the first * tab, and 'secondFrameSetting' for the second tab. - * @param aGetResultFunc - * A function which is responsible for returning the isolation result back - * to the framework for further checking. This function will be provided - * the browser object of the tab, that allows modifying or fetching results - * from the page content. + * @param aGetResultFuncs + * An array of functions or a single function which are responsible for + * returning the isolation result back to the framework for further checking. + * Each of these functions will be provided the browser object of the tab, + * that allows modifying or fetchings results from the page content. * @param aCompareResultFunc * An optional function which allows modifying the way how does framework * check results. This function will be provided a boolean to indicate * the isolation is no or off and two results. This function should return * a boolean to tell that whether isolation is working. If this function * is not given, the framework will take case checking by itself. */ - runTests(aURL, aGetResultFunc, aCompareResultFunc) { + runTests(aURL, aGetResultFuncs, aCompareResultFunc) { let pageURL; let firstFrameSetting; let secondFrameSetting; if (typeof aURL === "string") { pageURL = aURL; } else if (typeof aURL === "object") { pageURL = aURL.url; firstFrameSetting = aURL.firstFrameSetting; secondFrameSetting = aURL.secondFrameSetting; } + if (!Array.isArray(aGetResultFuncs)) { + aGetResultFuncs = [aGetResultFuncs]; + } + let tabSettings = [ { firstPartyDomain: "http://example.com", userContextId: 1}, { firstPartyDomain: "http://example.org", userContextId: 2} ]; this._add_task(function* (aMode) { let tabSettingA = 0; @@ -298,37 +302,39 @@ this.IsolationTestTools = { pageURL, tabSettings[tabSettingA], firstFrameSetting); let tabInfoB = yield IsolationTestTools._addTab(aMode, pageURL, tabSettings[tabSettingB], secondFrameSetting); - // Fetch results from tabs. - let resultA = yield aGetResultFunc(tabInfoA.browser); - let resultB = yield aGetResultFunc(tabInfoB.browser); + for (let getResultFunc of aGetResultFuncs) { + // Fetch results from tabs. + let resultA = yield getResultFunc(tabInfoA.browser); + let resultB = yield getResultFunc(tabInfoB.browser); + + // Compare results. + let result = false; + let shouldIsolate = (aMode !== TEST_MODE_NO_ISOLATION) && + tabSettingA !== tabSettingB; + if (aCompareResultFunc) { + result = yield aCompareResultFunc(shouldIsolate, resultA, resultB); + } else { + result = shouldIsolate ? resultA !== resultB : + resultA === resultB; + } + + let msg = `Testing ${TEST_MODE_NAMES[aMode]} for ` + + `isolation ${shouldIsolate ? "on" : "off"} with TabSettingA ` + + `${tabSettingA} and tabSettingB ${tabSettingB}` + + `, resultA = ${resultA}, resultB = ${resultB}`; + + ok(result, msg); + } // Close Tabs. yield BrowserTestUtils.removeTab(tabInfoA.tab); yield BrowserTestUtils.removeTab(tabInfoB.tab); - - // Compare results. - let result = false; - let shouldIsolate = (aMode !== TEST_MODE_NO_ISOLATION) && - tabSettingA !== tabSettingB; - if (aCompareResultFunc) { - result = yield aCompareResultFunc(shouldIsolate, resultA, resultB); - } else { - result = shouldIsolate ? resultA !== resultB : - resultA === resultB; - } - - let msg = `Testing ${TEST_MODE_NAMES[aMode]} for ` + - `isolation ${shouldIsolate ? "on" : "off"} with TabSettingA ` + - `${tabSettingA} and tabSettingB ${tabSettingB}`; - - ok(result, msg); } - }); } };
new file mode 100644 --- /dev/null +++ b/browser/components/originattributes/test/browser/worker_blobify.js @@ -0,0 +1,11 @@ +// Wait for a string to be posted to this worker. +// Create a blob containing this string, and then +// post back a blob URL pointing to the blob. +self.addEventListener("message", function (e) { + try { + var blobURL = URL.createObjectURL(new Blob([e.data])); + postMessage({ blobURL }); + } catch (e) { + postMessage({ error: e.message }); + } +}, false);
new file mode 100644 --- /dev/null +++ b/browser/components/originattributes/test/browser/worker_deblobify.js @@ -0,0 +1,31 @@ +// Wait for a blob URL to be posted to this worker. +// Obtain the blob, and read the string contained in it. +// Post back the string. + +var postStringInBlob = function (blobObject) { + var fileReader = new FileReaderSync(); + var result = fileReader.readAsText(blobObject); + postMessage(result); +}; + +self.addEventListener("message", function (e) { + if ("error" in e.data) { + postMessage(e.data); + return; + } + var blobURL = e.data.blobURL, + xhr = new XMLHttpRequest(); + try { + xhr.open("GET", blobURL, true); + xhr.onload = function () { + postStringInBlob(xhr.response); + }; + xhr.onerror = function () { + postMessage({ error: "xhr error" }); + }; + xhr.responseType = "blob"; + xhr.send(); + } catch (e) { + postMessage({ error: e.message }); + } +}, false);
--- a/browser/experiments/Experiments.jsm +++ b/browser/experiments/Experiments.jsm @@ -935,22 +935,16 @@ Experiments.Experiments.prototype = { /* * Helper function to make HTTP GET requests. Returns a promise that is resolved with * the responseText when the request is complete. */ _httpGetRequest: function (url) { this._log.trace("httpGetRequest(" + url + ")"); let xhr = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"].createInstance(Ci.nsIXMLHttpRequest); - try { - xhr.open("GET", url); - } catch (e) { - this._log.error("httpGetRequest() - Error opening request to " + url + ": " + e); - return Promise.reject(new Error("Experiments - Error opening XHR for " + url)); - } this._networkRequest = xhr; let deferred = Promise.defer(); let log = this._log; let errorhandler = (evt) => { log.error("httpGetRequest::onError() - Error making request to " + url + ": " + evt.type); deferred.reject(new Error("Experiments - XHR error for " + url + " - " + evt.type)); @@ -967,22 +961,29 @@ Experiments.Experiments.prototype = { this._networkRequest = null; return; } deferred.resolve(xhr.responseText); this._networkRequest = null; }; - if (xhr.channel instanceof Ci.nsISupportsPriority) { - xhr.channel.priority = Ci.nsISupportsPriority.PRIORITY_LOWEST; + try { + xhr.open("GET", url); + + if (xhr.channel instanceof Ci.nsISupportsPriority) { + xhr.channel.priority = Ci.nsISupportsPriority.PRIORITY_LOWEST; + } + + xhr.timeout = MANIFEST_FETCH_TIMEOUT_MSEC; + xhr.send(null); + } catch (e) { + this._log.error("httpGetRequest() - Error opening request to " + url + ": " + e); + return Promise.reject(new Error("Experiments - Error opening XHR for " + url)); } - - xhr.timeout = MANIFEST_FETCH_TIMEOUT_MSEC; - xhr.send(null); return deferred.promise; }, /* * Path of the cache file we use in the profile. */ get _cacheFilePath() { return OS.Path.join(OS.Constants.Path.profileDir, FILE_CACHE);
--- a/browser/extensions/pocket/content/panels/css/saved.css +++ b/browser/extensions/pocket/content/panels/css/saved.css @@ -401,17 +401,17 @@ float: left; line-height: normal; height: auto; min-height: 0; min-width: 5em; padding: 3px 2px 1px; text-transform: none; } -.pkt_ext_containersaved .pkt_ext_tag_input_wrapper input::-moz-placeholder { +.pkt_ext_containersaved .pkt_ext_tag_input_wrapper input::placeholder { color: #a9a9a9; letter-spacing: normal; text-transform: none; } .pkt_ext_containersaved .input_disabled { cursor: default; opacity: 0.5; }
--- a/browser/themes/osx/browser.css +++ b/browser/themes/osx/browser.css @@ -2325,17 +2325,17 @@ html|span.ac-emphasize-text-url { } #editBookmarkPanel .expander-up > .button-box > .button-icon, #editBookmarkPanel .expander-down > .button-box > .button-icon { width: 9px; } } -#editBMPanel_tagsField > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::-moz-placeholder { +#editBMPanel_tagsField > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::placeholder { opacity: 1.0; color: #bbb; } .editBMPanel_rowLabel { text-align: end; }
--- a/browser/themes/shared/newtab/newTab.inc.css +++ b/browser/themes/shared/newtab/newTab.inc.css @@ -80,16 +80,23 @@ border: none; } .newtab-customize { height: 28px; width: 28px; } +#newtab-customize-button { + font-size: 28px; + padding: 0; + /* only display the text label when CSS backgrounds are disabled (e.g. in high contrast mode) */ + color: transparent; +} + #newtab-customize-button:-moz-any(:hover, :active, [active]) { background-image: -moz-image-rect(url(chrome://browser/skin/newtab/controls.svg), 0, 64, 32, 32); background-color: #FFFFFF; border: solid 1px #CCCCCC; border-radius: 2px; } /* CELLS */
--- a/browser/themes/windows/browser.css +++ b/browser/themes/windows/browser.css @@ -1352,18 +1352,18 @@ toolbar[brighttext] #close-button { /* let urlbar-back-button-clip-path clip the urlbar's right side for RTL */ transform: scaleX(-1); } @conditionalForwardWithUrlbar@:-moz-locale-dir(rtl) { -moz-box-direction: reverse; } -html|*.urlbar-input:-moz-lwtheme::-moz-placeholder, -.searchbar-textbox:-moz-lwtheme > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::-moz-placeholder { +html|*.urlbar-input:-moz-lwtheme::placeholder, +.searchbar-textbox:-moz-lwtheme > .autocomplete-textbox-container > .textbox-input-box > html|*.textbox-input::placeholder { opacity: 1.0; color: #777; } #urlbar-container { -moz-box-align: center; }
--- a/devtools/client/styleeditor/test/sourcemap-css/test-bootstrap-scss.css +++ b/devtools/client/styleeditor/test/sourcemap-css/test-bootstrap-scss.css @@ -1849,17 +1849,17 @@ output { border: 1px solid #ccc; border-radius: 4px; box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; } .form-control:focus { border-color: #66afe9; outline: 0; box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); } - .form-control::-moz-placeholder { + .form-control::placeholder { color: #999999; opacity: 1; } .form-control:-ms-input-placeholder { color: #999999; } .form-control::-webkit-input-placeholder { color: #999999; } .form-control[disabled], .form-control[readonly], fieldset[disabled] .form-control { cursor: not-allowed; @@ -4505,9 +4505,9 @@ body { .header, .marketing, .footer { padding-left: 0; padding-right: 0; } .header { margin-bottom: 30px; } .jumbotron { border-bottom: 300; } } -/*# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5jc3MiLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiIiwic291cmNlcyI6WyJtYWluLnNjc3MiXSwic291cmNlc0NvbnRlbnQiOlsiJGljb24tZm9udC1wYXRoOiBcIi4uL2Jvd2VyX2NvbXBvbmVudHMvYm9vdHN0cmFwLXNhc3Mtb2ZmaWNpYWwvdmVuZG9yL2Fzc2V0cy9mb250cy9ib290c3RyYXAvXCI7XG5cbi8vIGJvd2VyOnNjc3NcbkBpbXBvcnQgXCIuLi9ib3dlcl9jb21wb25lbnRzL2Jvb3RzdHJhcC1zYXNzLW9mZmljaWFsL3ZlbmRvci9hc3NldHMvc3R5bGVzaGVldHMvYm9vdHN0cmFwLnNjc3NcIjtcbi8vIGVuZGJvd2VyXG5cbi5icm93c2VoYXBweSB7XG4gICAgbWFyZ2luOiAwLjJlbSAwO1xuICAgIGJhY2tncm91bmQ6ICNjY2M7XG4gICAgY29sb3I6ICMwMDA7XG4gICAgcGFkZGluZzogMC4yZW0gMDtcbn1cblxuLyogU3BhY2Ugb3V0IGNvbnRlbnQgYSBiaXQgKi9cbmJvZHkge1xuICAgIHBhZGRpbmctdG9wOiAyMHB4O1xuICAgIHBhZGRpbmctYm90dG9tOiAyMHB4O1xufVxuXG4vKiBFdmVyeXRoaW5nIGJ1dCB0aGUganVtYm90cm9uIGdldHMgc2lkZSBzcGFjaW5nIGZvciBtb2JpbGUgZmlyc3Qgdmlld3MgKi9cbi5oZWFkZXIsXG4ubWFya2V0aW5nLFxuLmZvb3RlciB7XG4gICAgcGFkZGluZy1sZWZ0OiAxNXB4O1xuICAgIHBhZGRpbmctcmlnaHQ6IDE1cHg7XG59XG5cbi8qIEN1c3RvbSBwYWdlIGhlYWRlciAqL1xuLmhlYWRlciB7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG5cbiAgICAvKiBNYWtlIHRoZSBtYXN0aGVhZCBoZWFkaW5nIHRoZSBzYW1lIGhlaWdodCBhcyB0aGUgbmF2aWdhdGlvbiAqL1xuICAgIGgzIHtcbiAgICAgICAgbWFyZ2luLXRvcDogMDtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMDtcbiAgICAgICAgbGluZS1oZWlnaHQ6IDQwcHg7XG4gICAgICAgIHBhZGRpbmctYm90dG9tOiAxOXB4O1xuICAgIH1cbn1cblxuLyogQ3VzdG9tIHBhZ2UgZm9vdGVyICovXG4uZm9vdGVyIHtcbiAgICBwYWRkaW5nLXRvcDogMTlweDtcbiAgICBjb2xvcjogIzc3NztcbiAgICBib3JkZXItdG9wOiAxcHggc29saWQgI2U1ZTVlNTtcbn1cblxuLmNvbnRhaW5lci1uYXJyb3cgPiBociB7XG4gICAgbWFyZ2luOiAzMHB4IDA7XG59XG5cbi8qIE1haW4gbWFya2V0aW5nIG1lc3NhZ2UgYW5kIHNpZ24gdXAgYnV0dG9uICovXG4uanVtYm90cm9uIHtcbiAgICB0ZXh0LWFsaWduOiBjZW50ZXI7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG4gICAgLmJ0biB7XG4gICAgICAgIGZvbnQtc2l6ZTogMjFweDtcbiAgICAgICAgcGFkZGluZzogMTRweCAyNHB4O1xuICAgIH1cbn1cblxuLyogU3VwcG9ydGluZyBtYXJrZXRpbmcgY29udGVudCAqL1xuLm1hcmtldGluZyB7XG4gICAgbWFyZ2luOiA0MHB4IDA7XG4gICAgcCArIGg0IHtcbiAgICAgICAgbWFyZ2luLXRvcDogMjhweDtcbiAgICB9XG59XG5cbi8qIFJlc3BvbnNpdmU6IFBvcnRyYWl0IHRhYmxldHMgYW5kIHVwICovXG5AbWVkaWEgc2NyZWVuIGFuZCAobWluLXdpZHRoOiA3NjhweCkge1xuICAgIC5jb250YWluZXIge1xuICAgICAgICBtYXgtd2lkdGg6IDczMHB4O1xuICAgIH1cblxuICAgIC8qIFJlbW92ZSB0aGUgcGFkZGluZyB3ZSBzZXQgZWFybGllciAqL1xuICAgIC5oZWFkZXIsXG4gICAgLm1hcmtldGluZyxcbiAgICAuZm9vdGVyIHtcbiAgICAgICAgcGFkZGluZy1sZWZ0OiAwO1xuICAgICAgICBwYWRkaW5nLXJpZ2h0OiAwO1xuICAgIH1cblxuICAgIC8qIFNwYWNlIG91dCB0aGUgbWFzdGhlYWQgKi9cbiAgICAuaGVhZGVyIHtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMzBweDtcbiAgICB9XG5cbiAgICAvKiBSZW1vdmUgdGhlIGJvdHRvbSBib3JkZXIgb24gdGhlIGp1bWJvdHJvbiBmb3IgdmlzdWFsIGVmZmVjdCAqL1xuICAgIC5qdW1ib3Ryb24ge1xuICAgICAgICBib3JkZXItYm90dG9tOiAzMDA7XG4gICAgfVxufVxuXG4vLyB0aGlzIGlzIGEgY29tbWVudC4uLlxuIl0sInNvdXJjZVJvb3QiOiIvc291cmNlLyJ9 */ \ No newline at end of file +/*# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFpbi5jc3MiLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiIiwic291cmNlcyI6WyJtYWluLnNjc3MiXSwic291cmNlc0NvbnRlbnQiOlsiJGljb24tZm9udC1wYXRoOiBcIi4uL2Jvd2VyX2NvbXBvbmVudHMvYm9vdHN0cmFwLXNhc3Mtb2ZmaWNpYWwvdmVuZG9yL2Fzc2V0cy9mb250cy9ib290c3RyYXAvXCI7XG5cbi8vIGJvd2VyOnNjc3NcbkBpbXBvcnQgXCIuLi9ib3dlcl9jb21wb25lbnRzL2Jvb3RzdHJhcC1zYXNzLW9mZmljaWFsL3ZlbmRvci9hc3NldHMvc3R5bGVzaGVldHMvYm9vdHN0cmFwLnNjc3NcIjtcbi8vIGVuZGJvd2VyXG5cbi5icm93c2VoYXBweSB7XG4gICAgbWFyZ2luOiAwLjJlbSAwO1xuICAgIGJhY2tncm91bmQ6ICNjY2M7XG4gICAgY29sb3I6ICMwMDA7XG4gICAgcGFkZGluZzogMC4yZW0gMDtcbn1cblxuLyogU3BhY2Ugb3V0IGNvbnRlbnQgYSBiaXQgKi9cbmJvZHkge1xuICAgIHBhZGRpbmctdG9wOiAyMHB4O1xuICAgIHBhZGRpbmctYm90dG9tOiAyMHB4O1xufVxuXG4vKiBFdmVyeXRoaW5nIGJ1dCB0aGUganVtYm90cm9uIGdldHMgc2lkZSBzcGFjaW5nIGZvciBtb2JpbGUgZmlyc3Qgdmlld3MgKi9cbi5oZWFkZXIsXG4ubWFya2V0aW5nLFxuLmZvb3RlciB7XG4gICAgcGFkZGluZy1sZWZ0OiAxNXB4O1xuICAgIHBhZGRpbmctcmlnaHQ6IDE1cHg7XG59XG5cbi8qIEN1c3RvbSBwYWdlIGhlYWRlciAqL1xuLmhlYWRlciB7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG5cbiAgICAvKiBNYWtlIHRoZSBtYXN0aGVhZCBoZWFkaW5nIHRoZSBzYW1lIGhlaWdodCBhcyB0aGUgbmF2aWdhdGlvbiAqL1xuICAgIGgzIHtcbiAgICAgICAgbWFyZ2luLXRvcDogMDtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMDtcbiAgICAgICAgbGluZS1oZWlnaHQ6IDQwcHg7XG4gICAgICAgIHBhZGRpbmctYm90dG9tOiAxOXB4O1xuICAgIH1cbn1cblxuLyogQ3VzdG9tIHBhZ2UgZm9vdGVyICovXG4uZm9vdGVyIHtcbiAgICBwYWRkaW5nLXRvcDogMTlweDtcbiAgICBjb2xvcjogIzc3NztcbiAgICBib3JkZXItdG9wOiAxcHggc29saWQgI2U1ZTVlNTtcbn1cblxuLmNvbnRhaW5lci1uYXJyb3cgPiBociB7XG4gICAgbWFyZ2luOiAzMHB4IDA7XG59XG5cbi8qIE1haW4gbWFya2V0aW5nIG1lc3NhZ2UgYW5kIHNpZ24gdXAgYnV0dG9uICovXG4uanVtYm90cm9uIHtcbiAgICB0ZXh0LWFsaWduOiBjZW50ZXI7XG4gICAgYm9yZGVyLWJvdHRvbTogMXB4IHNvbGlkICNlNWU1ZTU7XG4gICAgLmJ0biB7XG4gICAgICAgIGZvbnQtc2l6ZTogMjFweDtcbiAgICAgICAgcGFkZGluZzogMTRweCAyNHB4O1xuICAgIH1cbn1cblxuLyogU3VwcG9ydGluZyBtYXJrZXRpbmcgY29udGVudCAqL1xuLm1hcmtldGluZyB7XG4gICAgbWFyZ2luOiA0MHB4IDA7XG4gICAgcCArIGg0IHtcbiAgICAgICAgbWFyZ2luLXRvcDogMjhweDtcbiAgICB9XG59XG5cbi8qIFJlc3BvbnNpdmU6IFBvcnRyYWl0IHRhYmxldHMgYW5kIHVwICovXG5AbWVkaWEgc2NyZWVuIGFuZCAobWluLXdpZHRoOiA3NjhweCkge1xuICAgIC5jb250YWluZXIge1xuICAgICAgICBtYXgtd2lkdGg6IDczMHB4O1xuICAgIH1cblxuICAgIC8qIFJlbW92ZSB0aGUgcGFkZGluZyB3ZSBzZXQgZWFybGllciAqL1xuICAgIC5oZWFkZXIsXG4gICAgLm1hcmtldGluZyxcbiAgICAuZm9vdGVyIHtcbiAgICAgICAgcGFkZGluZy1sZWZ0OiAwO1xuICAgICAgICBwYWRkaW5nLXJpZ2h0OiAwO1xuICAgIH1cblxuICAgIC8qIFNwYWNlIG91dCB0aGUgbWFzdGhlYWQgKi9cbiAgICAuaGVhZGVyIHtcbiAgICAgICAgbWFyZ2luLWJvdHRvbTogMzBweDtcbiAgICB9XG5cbiAgICAvKiBSZW1vdmUgdGhlIGJvdHRvbSBib3JkZXIgb24gdGhlIGp1bWJvdHJvbiBmb3IgdmlzdWFsIGVmZmVjdCAqL1xuICAgIC5qdW1ib3Ryb24ge1xuICAgICAgICBib3JkZXItYm90dG9tOiAzMDA7XG4gICAgfVxufVxuXG4vLyB0aGlzIGlzIGEgY29tbWVudC4uLlxuIl0sInNvdXJjZVJvb3QiOiIvc291cmNlLyJ9 */
--- a/devtools/client/themes/common.css +++ b/devtools/client/themes/common.css @@ -551,18 +551,18 @@ checkbox:-moz-focusring { background-position: calc(100% - 8px) center; } .devtools-searchinput > .textbox-input-box > .textbox-search-icons > .textbox-search-icon, .devtools-filterinput > .textbox-input-box > .textbox-search-icons > .textbox-search-icon { visibility: hidden; } -.devtools-searchinput .textbox-input::-moz-placeholder, -.devtools-filterinput .textbox-input::-moz-placeholder { +.devtools-searchinput .textbox-input::placeholder, +.devtools-filterinput .textbox-input::placeholder { font-style: normal; } .devtools-plaininput { border-color: transparent; background-color: transparent; }
--- a/devtools/client/webide/modules/app-validator.js +++ b/devtools/client/webide/modules/app-validator.js @@ -67,22 +67,22 @@ AppValidator.checkManifest = function (m let deferred = promise.defer(); let error; let req = new XMLHttpRequest(); req.overrideMimeType("text/plain"); try { req.open("GET", manifestURL, true); + req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING; } catch (e) { error = strings.formatStringFromName("validator.invalidManifestURL", [manifestURL], 1); deferred.reject(error); return deferred.promise; } - req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING; req.onload = function () { let manifest = null; try { manifest = JSON.parse(req.responseText); } catch (e) { error = strings.formatStringFromName("validator.invalidManifestJSON", [e, manifestURL], 2); deferred.reject(error); @@ -223,22 +223,22 @@ AppValidator.prototype.validateLaunchPat deferred.resolve(); return deferred.promise; } let req = new XMLHttpRequest(); req.overrideMimeType("text/plain"); try { req.open("HEAD", indexURL, true); + req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING; } catch (e) { this.error(strings.formatStringFromName("validator.accessFailedLaunchPath", [indexURL], 1)); deferred.resolve(); return deferred.promise; } - req.channel.loadFlags |= Ci.nsIRequest.LOAD_BYPASS_CACHE | Ci.nsIRequest.INHIBIT_CACHING; req.onload = () => { if (req.status >= 400) this.error(strings.formatStringFromName("validator.accessFailedLaunchPathBadHttpCode", [indexURL, req.status], 2)); deferred.resolve(); }; req.onerror = () => { this.error(strings.formatStringFromName("validator.accessFailedLaunchPath", [indexURL], 1)); deferred.resolve();
--- a/devtools/shared/css-properties-db.js +++ b/devtools/shared/css-properties-db.js @@ -58,17 +58,17 @@ exports.ANGLE_TAKING_FUNCTIONS = ["linea var domUtils = Cc["@mozilla.org/inspector/dom-utils;1"].getService(Ci.inIDOMUtils); var json = JSON.stringify(domUtils.getCSSPseudoElementNames(), null, 0); copy(`exports.PSEUDO_ELEMENTS = ${json};\n`); console.log("The PSEUDO_ELEMENTS have been copied to your clipboard.") */ /*eslint-disable */ -exports.PSEUDO_ELEMENTS = [":after",":before",":backdrop",":first-letter",":first-line",":-moz-selection",":-moz-focus-inner",":-moz-focus-outer",":-moz-list-bullet",":-moz-list-number",":-moz-math-anonymous",":-moz-progress-bar",":-moz-range-track",":-moz-range-progress",":-moz-range-thumb",":-moz-meter-bar",":-moz-placeholder",":-moz-color-swatch"]; +exports.PSEUDO_ELEMENTS = [":after",":before",":backdrop",":first-letter",":first-line",":-moz-selection",":-moz-focus-inner",":-moz-focus-outer",":-moz-list-bullet",":-moz-list-number",":-moz-math-anonymous",":-moz-progress-bar",":-moz-range-track",":-moz-range-progress",":-moz-range-thumb",":-moz-meter-bar",":-moz-placeholder",":placeholder",":-moz-color-swatch"]; /*eslint-enable */ /** * A list of CSS Properties and their various characteristics. This is used on the * client-side when the CssPropertiesActor is not found, or when the client and server * are the same version. A single property takes the form: * * "animation": {
--- a/devtools/shared/gcli/commands/jsb.js +++ b/devtools/shared/gcli/commands/jsb.js @@ -101,35 +101,34 @@ exports.items = [ jslint_happy: args.jslintHappy, brace_style: args.braceStyle, space_before_conditional: !args.noSpaceBeforeConditional, unescape_strings: args.unescapeStrings }; let xhr = new XMLHttpRequest(); - try { - xhr.open("GET", args.url, true); - } catch(e) { - return l10n.lookup("jsbInvalidURL"); - } - let deferred = context.defer(); xhr.onreadystatechange = function() { if (xhr.readyState == 4) { if (xhr.status == 200 || xhr.status == 0) { let result = beautify.js(xhr.responseText, opts); ScratchpadManager.openScratchpad({text: result}); deferred.resolve(); } else { deferred.reject("Unable to load page to beautify: " + args.url + " " + xhr.status + " " + xhr.statusText); } }; } - xhr.send(null); + try { + xhr.open("GET", args.url, true); + xhr.send(null); + } catch(e) { + return l10n.lookup("jsbInvalidURL"); + } return deferred.promise; } } ];
--- a/dom/base/DOMException.cpp +++ b/dom/base/DOMException.cpp @@ -196,17 +196,16 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_END NS_IMPL_CI_INTERFACE_GETTER(Exception, nsIXPCException) Exception::Exception(const nsACString& aMessage, nsresult aResult, const nsACString& aName, nsIStackFrame *aLocation, nsISupports *aData) : mResult(NS_OK), - mLineNumber(0), mInitialized(false), mHoldingJSVal(false) { // A little hack... The nsIGenericModule nsIClassInfo scheme relies on there // having been at least one instance made via the factory. Otherwise, the // shared factory/classinsance object never gets created and our QI getter // for our instance's pointer to our nsIClassInfo will always return null. // This is bad because it means that wrapped exceptions will never have a @@ -218,17 +217,16 @@ Exception::Exception(const nsACString& a sEverMadeOneFromFactory = true; } Initialize(aMessage, aResult, aName, aLocation, aData); } Exception::Exception() : mResult(NS_OK), - mLineNumber(-1), mInitialized(false), mHoldingJSVal(false) { } Exception::~Exception() { if (mHoldingJSVal) { @@ -311,34 +309,34 @@ NS_IMETHODIMP Exception::GetFilename(JSContext* aCx, nsAString& aFilename) { NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED); if (mLocation) { return mLocation->GetFilename(aCx, aFilename); } - aFilename.Assign(mFilename); + aFilename.Truncate(); return NS_OK; } NS_IMETHODIMP Exception::GetLineNumber(JSContext* aCx, uint32_t *aLineNumber) { NS_ENSURE_ARG_POINTER(aLineNumber); NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED); if (mLocation) { int32_t lineno; nsresult rv = mLocation->GetLineNumber(aCx, &lineno); *aLineNumber = lineno; return rv; } - *aLineNumber = mLineNumber; + *aLineNumber = 0; return NS_OK; } NS_IMETHODIMP Exception::GetColumnNumber(uint32_t* aColumnNumber) { NS_ENSURE_ARG_POINTER(aColumnNumber); NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_INITIALIZED); @@ -479,17 +477,17 @@ Exception::LineNumber(JSContext* aCx) co if (mLocation) { int32_t lineno; if (NS_SUCCEEDED(mLocation->GetLineNumber(aCx, &lineno))) { return lineno; } return 0; } - return mLineNumber; + return 0; } uint32_t Exception::ColumnNumber() const { return 0; }
--- a/dom/base/DOMException.h +++ b/dom/base/DOMException.h @@ -131,18 +131,16 @@ protected: } } nsCString mMessage; nsresult mResult; nsCString mName; nsCOMPtr<nsIStackFrame> mLocation; nsCOMPtr<nsISupports> mData; - nsString mFilename; - int mLineNumber; bool mInitialized; bool mHoldingJSVal; JS::Heap<JS::Value> mThrownJSVal; private: static bool sEverMadeOneFromFactory; };
--- a/dom/base/WebSocket.cpp +++ b/dom/base/WebSocket.cpp @@ -10,17 +10,16 @@ #include "jsapi.h" #include "jsfriendapi.h" #include "mozilla/DOMEventTargetHelper.h" #include "mozilla/net/WebSocketChannel.h" #include "mozilla/dom/File.h" #include "mozilla/dom/MessageEvent.h" #include "mozilla/dom/MessageEventBinding.h" -#include "mozilla/dom/nsCSPService.h" #include "mozilla/dom/nsCSPContext.h" #include "mozilla/dom/nsCSPUtils.h" #include "mozilla/dom/ScriptSettings.h" #include "mozilla/dom/WorkerPrivate.h" #include "mozilla/dom/WorkerRunnable.h" #include "mozilla/dom/WorkerScope.h" #include "nsAutoPtr.h" #include "nsGlobalWindow.h" @@ -1563,37 +1562,38 @@ WebSocketImpl::Init(JSContext* aCx, // We crash here because we are sure that mURI is a valid URI, so either we // are OOM'ing or something else bad is happening. if (NS_WARN_IF(NS_FAILED(rv))) { MOZ_CRASH(); } } // The 'real' nsHttpChannel of the websocket gets opened in the parent. - // Since we don't serialize the CSP within child and parent we have to - // perform the CSP check here instead of AsyncOpen2(). + // Since we don't serialize the CSP within child and parent and also not + // the context, we have to perform content policy checks here instead of + // AsyncOpen2(). // Please note that websockets can't follow redirects, hence there is no // need to perform a CSP check after redirects. - nsCOMPtr<nsIContentPolicy> cspService = do_GetService(CSPSERVICE_CONTRACTID); - int16_t shouldLoad = nsIContentPolicy::REJECT_REQUEST; - aRv = cspService->ShouldLoad(nsIContentPolicy::TYPE_WEBSOCKET, - uri, - nullptr, // aRequestOrigin not used within CSP - originDoc, - EmptyCString(), // aMimeTypeGuess - nullptr, // aExtra - aPrincipal, - &shouldLoad); + int16_t shouldLoad = nsIContentPolicy::ACCEPT; + aRv = NS_CheckContentLoadPolicy(nsIContentPolicy::TYPE_WEBSOCKET, + uri, + aPrincipal, + originDoc, + EmptyCString(), + nullptr, + &shouldLoad, + nsContentUtils::GetContentPolicy(), + nsContentUtils::GetSecurityManager()); if (NS_WARN_IF(aRv.Failed())) { return; } if (NS_CP_REJECTED(shouldLoad)) { - // Disallowed by CSP + // Disallowed by content policy aRv.Throw(NS_ERROR_CONTENT_BLOCKED); return; } } // Potentially the page uses the CSP directive 'upgrade-insecure-requests'. // In such a case we have to upgrade ws: to wss: and also update mSecure // to reflect that upgrade. Please note that we can not upgrade from ws:
--- a/dom/base/test/test_blobURL_expiring.html +++ b/dom/base/test/test_blobURL_expiring.html @@ -19,19 +19,19 @@ onmessage = function(e) { xhr.onload = function() { is(xhr.response, "123", "Response matches!"); resolve(); } })).then(function() { document.body.removeChild(iframe); }).then(function() { var xhr = new XMLHttpRequest(); - + xhr.open("GET", blobURL); try { - xhr.open("GET", blobURL); + xhr.send(); ok(false, "The URL should be done!"); } catch(e) { ok(true, "The URL should be done!"); } SimpleTest.finish(); }); }
--- a/dom/base/test/test_blob_fragment_and_query.html +++ b/dom/base/test/test_blob_fragment_and_query.html @@ -29,30 +29,30 @@ function runTest() { var url = URL.createObjectURL(blob); ok(url, "We have a URI"); var test = tests.shift(); URL.revokeObjectURL(url + test.part); var xhr = new XMLHttpRequest(); - try { xhr.open('GET', url + test.part); - } catch(e) { - ok(test.revoke, "This should fail!"); - runTest(); - return; - } xhr.onload = function() { is(xhr.responseText, 'hello world', 'URL: ' + url + test.part); runTest(); } - xhr.send(); + try { + xhr.send(); + } catch(e) { + ok(test.revoke, "This should fail!"); + runTest(); + return; + } ok(!test.revoke, "This should succeed!"); } SimpleTest.waitForExplicitFinish(); runTest(); </script> </body>
--- a/dom/plugins/ipc/PPluginModule.ipdl +++ b/dom/plugins/ipc/PPluginModule.ipdl @@ -152,12 +152,14 @@ parent: async Profile(nsCString aProfile); // Answers to request about site data async ReturnClearSiteData(NPError aRv, uint64_t aCallbackId); async ReturnSitesWithData(nsCString[] aSites, uint64_t aCallbackId); + intr GetKeyState(int32_t aVirtKey) + returns (int16_t aState); }; } // namespace plugins } // namespace mozilla
--- a/dom/plugins/ipc/PluginModuleChild.cpp +++ b/dom/plugins/ipc/PluginModuleChild.cpp @@ -85,16 +85,19 @@ static CreateFileAPtr sCreateFileAStub = // Used with fix for flash fullscreen window loosing focus. static bool gDelayFlashFocusReplyUntilEval = false; // Used to fix GetWindowInfo problems with internal flash settings dialogs static WindowsDllInterceptor sUser32Intercept; typedef BOOL (WINAPI *GetWindowInfoPtr)(HWND hwnd, PWINDOWINFO pwi); static GetWindowInfoPtr sGetWindowInfoPtrStub = nullptr; static HWND sBrowserHwnd = nullptr; +// sandbox process doesn't get current key states. So we need get it on chrome. +typedef SHORT (WINAPI *GetKeyStatePtr)(int); +static GetKeyStatePtr sGetKeyStatePtrStub = nullptr; #endif /* static */ PluginModuleChild* PluginModuleChild::CreateForContentProcess(mozilla::ipc::Transport* aTransport, base::ProcessId aOtherPid) { PluginModuleChild* child = new PluginModuleChild(false); @@ -2061,16 +2064,30 @@ PMCGetWindowInfoHook(HWND hWnd, PWINDOWI // it's internal settings window. Post removing sub widgets for tabs, touch // this up so they get the rect they expect. // XXX potentially tie this to a specific major version? BOOL result = sGetWindowInfoPtrStub(hWnd, pwi); if (sBrowserHwnd && sBrowserHwnd == hWnd) pwi->rcWindow = pwi->rcClient; return result; } + +// static +SHORT WINAPI +PMCGetKeyState(int aVirtKey) +{ + PluginModuleChild* chromeInstance = PluginModuleChild::GetChrome(); + if (chromeInstance) { + int16_t ret = 0; + if (chromeInstance->CallGetKeyState(aVirtKey, &ret)) { + return ret; + } + } + return sGetKeyStatePtrStub(aVirtKey); +} #endif PPluginInstanceChild* PluginModuleChild::AllocPPluginInstanceChild(const nsCString& aMimeType, const uint16_t& aMode, const InfallibleTArray<nsCString>& aNames, const InfallibleTArray<nsCString>& aValues) { @@ -2081,22 +2098,28 @@ PluginModuleChild::AllocPPluginInstanceC // allocates an instance on its own. Make sure it gets the latest copy // of quirks once we have them. Also note, with process-per-tab, we may // have multiple PluginModuleChilds in the same plugin process, so only // initialize this once in gChromeInstance, which is a singleton. GetChrome()->InitQuirksModes(aMimeType); mQuirks = GetChrome()->mQuirks; #ifdef XP_WIN + sUser32Intercept.Init("user32.dll"); if ((mQuirks & QUIRK_FLASH_HOOK_GETWINDOWINFO) && !sGetWindowInfoPtrStub) { - sUser32Intercept.Init("user32.dll"); sUser32Intercept.AddHook("GetWindowInfo", reinterpret_cast<intptr_t>(PMCGetWindowInfoHook), (void**) &sGetWindowInfoPtrStub); } + + if ((mQuirks & QUIRK_FLASH_HOOK_GETKEYSTATE) && + !sGetKeyStatePtrStub) { + sUser32Intercept.AddHook("GetKeyState", reinterpret_cast<intptr_t>(PMCGetKeyState), + (void**) &sGetKeyStatePtrStub); + } #endif return new PluginInstanceChild(&mFunctions, aMimeType, aMode, aNames, aValues); } void PluginModuleChild::InitQuirksModes(const nsCString& aMimeType)
--- a/dom/plugins/ipc/PluginModuleParent.cpp +++ b/dom/plugins/ipc/PluginModuleParent.cpp @@ -3360,9 +3360,25 @@ PluginModuleChromeParent::RecvProfile(co } mProfile = aProfile; mGatherer->GatheredOOPProfile(); #endif return true; } - +bool +PluginModuleParent::AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) +{ + return false; +} + +bool +PluginModuleChromeParent::AnswerGetKeyState(const int32_t& aVirtKey, + int16_t* aRet) +{ +#if defined(XP_WIN) + *aRet = ::GetKeyState(aVirtKey); + return true; +#else + return PluginModuleParent::AnswerGetKeyState(aVirtKey, aRet); +#endif +}
--- a/dom/plugins/ipc/PluginModuleParent.h +++ b/dom/plugins/ipc/PluginModuleParent.h @@ -203,16 +203,18 @@ protected: static void TimeoutChanged(const char* aPref, void* aModule); virtual void UpdatePluginTimeout() {} virtual bool RecvNotifyContentModuleDestroyed() override { return true; } virtual bool RecvProfile(const nsCString& aProfile) override { return true; } + virtual bool AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) override; + virtual bool RecvReturnClearSiteData(const NPError& aRv, const uint64_t& aCallbackId) override; virtual bool RecvReturnSitesWithData(nsTArray<nsCString>&& aSites, const uint64_t& aCallbackId) override; void SetPluginFuncs(NPPluginFuncs* aFuncs); @@ -493,16 +495,19 @@ class PluginModuleChromeParent void GatheredAsyncProfile(nsIProfileSaveEvent* aSaveEvent); void StartProfiler(nsIProfilerStartParams* aParams); void StopProfiler(); #endif virtual bool RecvProfile(const nsCString& aProfile) override; + virtual bool + AnswerGetKeyState(const int32_t& aVirtKey, int16_t* aRet) override; + private: virtual void EnteredCxxStack() override; void ExitedCxxStack() override; mozilla::ipc::IProtocol* GetInvokingProtocol();
--- a/dom/plugins/ipc/PluginQuirks.cpp +++ b/dom/plugins/ipc/PluginQuirks.cpp @@ -30,16 +30,19 @@ int GetQuirksFromMimeTypeAndFilename(con quirks |= QUIRK_FLASH_RETURN_EMPTY_DOCUMENT_ORIGIN; #ifdef OS_WIN quirks |= QUIRK_WINLESS_TRACKPOPUP_HOOK; quirks |= QUIRK_FLASH_THROTTLE_WMUSER_EVENTS; quirks |= QUIRK_FLASH_HOOK_SETLONGPTR; quirks |= QUIRK_FLASH_HOOK_GETWINDOWINFO; quirks |= QUIRK_FLASH_FIXUP_MOUSE_CAPTURE; quirks |= QUIRK_WINLESS_HOOK_IME; +#if defined(_M_X64) || defined(__x86_64__) + quirks |= QUIRK_FLASH_HOOK_GETKEYSTATE; +#endif #endif } #ifdef OS_WIN // QuickTime plugin usually loaded with audio/mpeg mimetype NS_NAMED_LITERAL_CSTRING(quicktime, "npqtplugin"); if (FindInReadable(quicktime, aPluginFilename)) { quirks |= QUIRK_QUICKTIME_AVOID_SETWINDOW;
--- a/dom/plugins/ipc/PluginQuirks.h +++ b/dom/plugins/ipc/PluginQuirks.h @@ -50,16 +50,18 @@ enum PluginQuirks { // Work around a Flash bug where it fails to check the error code of a // NPN_GetValue(NPNVdocumentOrigin) call before trying to dereference // its char* output. QUIRK_FLASH_RETURN_EMPTY_DOCUMENT_ORIGIN = 1 << 10, // Win: Addresses a Unity bug with mouse capture. QUIRK_UNITY_FIXUP_MOUSE_CAPTURE = 1 << 11, // Win: Hook IMM32 API to handle IME event on windowless plugin QUIRK_WINLESS_HOOK_IME = 1 << 12, + // Win: Hook GetKeyState to get keyboard state on sandbox process + QUIRK_FLASH_HOOK_GETKEYSTATE = 1 << 13, }; int GetQuirksFromMimeTypeAndFilename(const nsCString& aMimeType, const nsCString& aPluginFilename); } /* namespace plugins */ } /* namespace mozilla */
--- a/dom/system/NetworkGeolocationProvider.js +++ b/dom/system/NetworkGeolocationProvider.js @@ -492,25 +492,25 @@ WifiGeoPositionProvider.prototype = { let xhr = Components.classes["@mozilla.org/xmlextras/xmlhttprequest;1"] .createInstance(Ci.nsIXMLHttpRequest); this.notifyListener("locationUpdatePending"); try { xhr.open("POST", url, true); + xhr.channel.loadFlags = Ci.nsIChannel.LOAD_ANONYMOUS; } catch (e) { this.notifyListener("notifyError", [POSITION_UNAVAILABLE]); return; } xhr.setRequestHeader("Content-Type", "application/json; charset=UTF-8"); xhr.responseType = "json"; xhr.mozBackgroundRequest = true; - xhr.channel.loadFlags = Ci.nsIChannel.LOAD_ANONYMOUS; xhr.timeout = Services.prefs.getIntPref("geo.wifi.xhr.timeout"); xhr.ontimeout = (function() { LOG("Location request XHR timed out.") this.notifyListener("notifyError", [POSITION_UNAVAILABLE]); }).bind(this); xhr.onerror = (function() { this.notifyListener("notifyError",
--- a/dom/xhr/XMLHttpRequestMainThread.cpp +++ b/dom/xhr/XMLHttpRequestMainThread.cpp @@ -1521,21 +1521,21 @@ XMLHttpRequestMainThread::OpenInternal(c mAuthorRequestHeaders.Clear(); ResetResponse(); // Gecko-specific mFlagHadUploadListenersOnSend = false; mFlagAborted = false; mFlagTimedOut = false; - // The channel should really be created on send(), but we have a chrome-only - // XHR.channel API which necessitates creating the channel now, while doing - // the rest of the channel-setup later at send-time. - rv = CreateChannel(); - NS_ENSURE_SUCCESS(rv, rv); + // Per spec we should only create the channel on send(), but we have internal + // code that relies on the channel being created now, and that code is not + // always IsSystemXHR(). However, we're not supposed to throw channel-creation + // errors during open(), so we silently ignore those here. + CreateChannel(); // Step 12 if (mState != State::opened) { mState = State::opened; FireReadystatechangeEvent(); } return NS_OK; @@ -2627,17 +2627,17 @@ XMLHttpRequestMainThread::InitiateFetch( // ref to us to be extra safe. mChannel->SetNotificationCallbacks(mNotificationCallbacks); mChannel = nullptr; mErrorLoad = true; // Per spec, we throw on sync errors, but not async. if (mFlagSynchronous) { - return rv; + return NS_ERROR_DOM_NETWORK_ERR; } } return NS_OK; } NS_IMETHODIMP XMLHttpRequestMainThread::Send(nsIVariant* aVariant) @@ -2719,29 +2719,35 @@ XMLHttpRequestMainThread::Send(nsIVarian return SendInternal(&body); } nsresult XMLHttpRequestMainThread::SendInternal(const RequestBodyBase* aBody) { NS_ENSURE_TRUE(mPrincipal, NS_ERROR_NOT_INITIALIZED); - PopulateNetworkInterfaceId(); + // Steps 1 and 2 + if (mState != State::opened || mFlagSend) { + return NS_ERROR_DOM_INVALID_STATE_ERR; + } nsresult rv = CheckInnerWindowCorrectness(); if (NS_FAILED(rv)) { return NS_ERROR_DOM_INVALID_STATE_ERR; } - if (mState != State::opened || // Step 1 - mFlagSend || // Step 2 - !mChannel) { // Gecko-specific - return NS_ERROR_DOM_INVALID_STATE_ERR; + // If open() failed to create the channel, then throw a network error + // as per spec. We really should create the channel here in send(), but + // we have internal code relying on the channel being created in open(). + if (!mChannel) { + return NS_ERROR_DOM_NETWORK_ERR; } + PopulateNetworkInterfaceId(); + // XXX We should probably send a warning to the JS console // if there are no event listeners set and we are doing // an asynchronous call. mUploadTransferred = 0; mUploadTotal = 0; // By default we don't have any upload, so mark upload complete. mUploadComplete = true; @@ -2888,17 +2894,17 @@ XMLHttpRequestMainThread::SendInternal(c DispatchProgressEvent(mUpload, ProgressEventType::loadstart, 0, mUploadTotal); } } if (!mChannel) { // Per spec, silently fail on async request failures; throw for sync. if (mFlagSynchronous) { - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_NETWORK_ERR; } else { // Defer the actual sending of async events just in case listeners // are attached after the send() method is called. NS_DispatchToCurrentThread( NewRunnableMethod<ProgressEventType>(this, &XMLHttpRequestMainThread::CloseRequestWithError, ProgressEventType::error)); return NS_OK;
--- a/editor/reftests/xul/placeholder-reset.css +++ b/editor/reftests/xul/placeholder-reset.css @@ -1,8 +1,8 @@ @namespace url('http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul'); @namespace html url('http://www.w3.org/1999/xhtml'); /* We need to have a non-transparent placeholder so we can test it. */ -html|input::-moz-placeholder { +html|input::placeholder { opacity: 1.0; color: graytext; -} \ No newline at end of file +}
--- a/embedding/components/windowwatcher/nsWindowWatcher.cpp +++ b/embedding/components/windowwatcher/nsWindowWatcher.cpp @@ -1106,18 +1106,19 @@ nsWindowWatcher::OpenWindowInternal(mozI nsCOMPtr<nsIPrincipal> subjectPrincipal = nsContentUtils::GetCurrentJSContext() ? nsContentUtils::SubjectPrincipal() : nullptr; if (windowIsNew) { auto* docShell = static_cast<nsDocShell*>(newDocShell.get()); // If this is not a chrome docShell, we apply originAttributes from the - // subjectPrincipal. + // subjectPrincipal unless if it's an expanded principal. if (subjectPrincipal && + !subjectPrincipal->GetIsExpandedPrincipal() && docShell->ItemType() != nsIDocShellTreeItem::typeChrome) { DocShellOriginAttributes attrs; attrs.InheritFromDocToChildDocShell(BasePrincipal::Cast(subjectPrincipal)->OriginAttributesRef()); docShell->SetOriginAttributes(attrs); } // Now set the opener principal on the new window. Note that we need to do
--- a/gfx/2d/BezierUtils.cpp +++ b/gfx/2d/BezierUtils.cpp @@ -324,13 +324,16 @@ CalculateDistanceToEllipticArc(const Poi Float B = a * b + c * d; Float C = a * a + c * c - 1; Float S = sqrt(B * B - A * C); Float n1 = (- B + S) / A; Float n2 = (- B - S) / A; + MOZ_ASSERT(n1 >= 0); + MOZ_ASSERT(n2 >= 0); + return n1 < n2 ? n1 : n2; } } // namespace gfx } // namespace mozilla
--- a/gfx/layers/LayerTreeInvalidation.cpp +++ b/gfx/layers/LayerTreeInvalidation.cpp @@ -157,16 +157,21 @@ struct LayerPropertiesBase : public Laye { MOZ_COUNT_CTOR(LayerPropertiesBase); } ~LayerPropertiesBase() { MOZ_COUNT_DTOR(LayerPropertiesBase); } +protected: + LayerPropertiesBase(const LayerPropertiesBase& a) = delete; + LayerPropertiesBase& operator=(const LayerPropertiesBase& a) = delete; + +public: virtual nsIntRegion ComputeDifferences(Layer* aRoot, NotifySubDocInvalidationFunc aCallback, bool* aGeometryChanged); virtual void MoveBy(const IntPoint& aOffset); nsIntRegion ComputeChange(NotifySubDocInvalidationFunc aCallback, bool& aGeometryChanged) @@ -277,16 +282,21 @@ struct ContainerLayerProperties : public , mPreYScale(aLayer->GetPreYScale()) { for (Layer* child = aLayer->GetFirstChild(); child; child = child->GetNextSibling()) { child->CheckCanary(); mChildren.AppendElement(Move(CloneLayerTreePropertiesInternal(child))); } } +protected: + ContainerLayerProperties(const ContainerLayerProperties& a) = delete; + ContainerLayerProperties& operator=(const ContainerLayerProperties& a) = delete; + +public: nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback, bool& aGeometryChanged) override { // Make sure we got our virtual call right mSubtypeCanary.Check(); ContainerLayer* container = mLayer->AsContainerLayer(); nsIntRegion invalidOfLayer; // Invalid regions of this layer. nsIntRegion result; // Invliad regions for children only. @@ -434,16 +444,21 @@ struct ContainerLayerProperties : public struct ColorLayerProperties : public LayerPropertiesBase { explicit ColorLayerProperties(ColorLayer *aLayer) : LayerPropertiesBase(aLayer) , mColor(aLayer->GetColor()) , mBounds(aLayer->GetBounds()) { } +protected: + ColorLayerProperties(const ColorLayerProperties& a) = delete; + ColorLayerProperties& operator=(const ColorLayerProperties& a) = delete; + +public: virtual nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback, bool& aGeometryChanged) { ColorLayer* color = static_cast<ColorLayer*>(mLayer.get()); if (mColor != color->GetColor()) { aGeometryChanged = true; return NewTransformedBounds();
--- a/gfx/layers/LayerTreeInvalidation.h +++ b/gfx/layers/LayerTreeInvalidation.h @@ -27,16 +27,23 @@ typedef void (*NotifySubDocInvalidationF const nsIntRegion& aRegion); /** * A set of cached layer properties (including those of child layers), * used for comparing differences in layer trees. */ struct LayerProperties { +protected: + LayerProperties() {} + + LayerProperties(const LayerProperties& a) = delete; + LayerProperties& operator=(const LayerProperties& a) = delete; + +public: virtual ~LayerProperties() {} /** * Copies the current layer tree properties into * a new LayerProperties object. * * @param Layer tree to copy, or nullptr if we have no * initial layer tree.
--- a/image/decoders/nsPNGDecoder.cpp +++ b/image/decoders/nsPNGDecoder.cpp @@ -185,17 +185,18 @@ nsPNGDecoder::PostHasTransparencyIfNeede // CreateFrame() is used for both simple and animated images. nsresult nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) { MOZ_ASSERT(HasSize()); MOZ_ASSERT(!IsMetadataDecode()); // Check if we have transparency, and send notifications if needed. - auto transparency = GetTransparencyType(aFrameInfo.mFormat, aFrameInfo.mFrameRect); + auto transparency = GetTransparencyType(aFrameInfo.mFormat, + aFrameInfo.mFrameRect); PostHasTransparencyIfNeeded(transparency); SurfaceFormat format = transparency == TransparencyType::eNone ? SurfaceFormat::B8G8R8X8 : SurfaceFormat::B8G8R8A8; // Make sure there's no animation or padding if we're downscaling. MOZ_ASSERT_IF(Size() != OutputSize(), mNumFrames == 0); MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation()); @@ -681,23 +682,25 @@ nsPNGDecoder::info_callback(png_structp MOZ_ASSERT_UNREACHABLE("Doing downscale-during-decode " "for an animated image?"); png_error(decoder->mPNG, "Invalid downscale attempt"); // Abort decode. } } #endif if (decoder->IsMetadataDecode()) { - // If we are animated then the first frame rect is either: 1) the whole image - // if the IDAT chunk is part of the animation 2) the frame rect of the first - // fDAT chunk otherwise. If we are not animated then we want to make sure to - // call PostHasTransparency in the metadata decode if we need to. So it's okay - // to pass IntRect(0, 0, width, height) here for animated images; they will - // call with the proper first frame rect in the full decode. - auto transparency = decoder->GetTransparencyType(decoder->format, frameRect); + // If we are animated then the first frame rect is either: + // 1) the whole image if the IDAT chunk is part of the animation + // 2) the frame rect of the first fDAT chunk otherwise. + // If we are not animated then we want to make sure to call + // PostHasTransparency in the metadata decode if we need to. So it's + // okay to pass IntRect(0, 0, width, height) here for animated images; + // they will call with the proper first frame rect in the full decode. + auto transparency = decoder->GetTransparencyType(decoder->format, + frameRect); decoder->PostHasTransparencyIfNeeded(transparency); // We have the metadata we're looking for, so stop here, before we allocate // buffers below. return decoder->DoTerminate(png_ptr, TerminalState::SUCCESS); } #ifdef PNG_APNG_SUPPORTED @@ -752,17 +755,18 @@ nsPNGDecoder::PostInvalidationIfNeeded() PostInvalidation(invalidRect->mInputSpaceRect, Some(invalidRect->mOutputSpaceRect)); } static NextPixel<uint32_t> PackRGBPixelAndAdvance(uint8_t*& aRawPixelInOut) { const uint32_t pixel = - gfxPackedPixel(0xFF, aRawPixelInOut[0], aRawPixelInOut[1], aRawPixelInOut[2]); + gfxPackedPixel(0xFF, aRawPixelInOut[0], aRawPixelInOut[1], + aRawPixelInOut[2]); aRawPixelInOut += 3; return AsVariant(pixel); } static NextPixel<uint32_t> PackRGBAPixelAndAdvance(uint8_t*& aRawPixelInOut) { const uint32_t pixel = @@ -826,17 +830,18 @@ nsPNGDecoder::row_callback(png_structp p // Advance to the next pass. We may have to do this multiple times because // libpng will skip passes if the image is so small that no pixels have // changed on a given pass, but ADAM7InterpolatingFilter needs to be reset // once for every pass to perform interpolation properly. decoder->mPipe.ResetToFirstRow(); decoder->mPass++; } - const png_uint_32 height = static_cast<png_uint_32>(decoder->mFrameRect.height); + const png_uint_32 height = + static_cast<png_uint_32>(decoder->mFrameRect.height); if (row_num >= height) { // Bail if we receive extra rows. This is especially important because if we // didn't, we might overflow the deinterlacing buffer. MOZ_ASSERT_UNREACHABLE("libpng producing extra rows?"); return; } @@ -931,17 +936,18 @@ nsPNGDecoder::DoTerminate(png_structp aP void nsPNGDecoder::DoYield(png_structp aPNGStruct) { // Pause data processing. png_process_data_pause() returns how many bytes of // the data that was passed to png_process_data() have not been consumed yet. // We use this information to tell StreamingLexer where to place us in the // input stream when we come back from the yield. - png_size_t pendingBytes = png_process_data_pause(aPNGStruct, /* save = */ false); + png_size_t pendingBytes = png_process_data_pause(aPNGStruct, + /* save = */ false); MOZ_ASSERT(pendingBytes < mLastChunkLength); size_t consumedBytes = mLastChunkLength - min(pendingBytes, mLastChunkLength); mNextTransition = Transition::ContinueUnbufferedAfterYield(State::PNG_DATA, consumedBytes); } @@ -973,20 +979,22 @@ nsPNGDecoder::frame_info_callback(png_st const IntRect frameRect(png_get_next_frame_x_offset(png_ptr, decoder->mInfo), png_get_next_frame_y_offset(png_ptr, decoder->mInfo), png_get_next_frame_width(png_ptr, decoder->mInfo), png_get_next_frame_height(png_ptr, decoder->mInfo)); const bool isInterlaced = bool(decoder->interlacebuf); #ifndef MOZ_EMBEDDED_LIBPNG // if using system library, check frame_width and height against 0 - if (frameRect.width == 0) + if (frameRect.width == 0) { png_error(png_ptr, "Frame width must not be 0"); - if (frameRect.height == 0) + } + if (frameRect.height == 0) { png_error(png_ptr, "Frame height must not be 0"); + } #endif const FrameInfo info { decoder->format, frameRect, isInterlaced }; // If the previous frame was hidden, skip the yield (which will mislead the // caller, who will think the previous frame was real) and just allocate the // new frame here. if (previousFrameWasHidden) { @@ -1063,17 +1071,17 @@ nsPNGDecoder::SpeedHistogram() const bool nsPNGDecoder::IsValidICO() const { // Only 32-bit RGBA PNGs are valid ICO resources; see here: // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx // If there are errors in the call to png_get_IHDR, the error_callback in // nsPNGDecoder.cpp is called. In this error callback we do a longjmp, so - // we need to save the jump buffer here. Oterwise we'll end up without a + // we need to save the jump buffer here. Otherwise we'll end up without a // proper callstack. if (setjmp(png_jmpbuf(mPNG))) { // We got here from a longjmp call indirectly from png_get_IHDR return false; } png_uint_32 png_width, // Unused
--- a/js/src/asmjs/AsmJS.cpp +++ b/js/src/asmjs/AsmJS.cpp @@ -61,16 +61,32 @@ using mozilla::Move; using mozilla::PodCopy; using mozilla::PodEqual; using mozilla::PodZero; using mozilla::PositiveInfinity; using JS::AsmJSOption; using JS::GenericNaN; /*****************************************************************************/ + +// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM +// greater or equal to MinHeapLength +static const size_t MinHeapLength = PageSize; + +static uint32_t +RoundUpToNextValidAsmJSHeapLength(uint32_t length) +{ + if (length <= MinHeapLength) + return MinHeapLength; + + return wasm::RoundUpToNextValidARMImmediate(length); +} + + +/*****************************************************************************/ // asm.js module object // The asm.js spec recognizes this set of builtin Math functions. enum AsmJSMathBuiltinFunction { AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan, AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan, AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp, @@ -265,16 +281,17 @@ enum class CacheResult // AsmJSMetadata is built incrementally by ModuleValidator and then shared // immutably between AsmJSModules. struct AsmJSMetadataCacheablePod { uint32_t numFFIs; uint32_t srcLength; uint32_t srcLengthWithRightBrace; + bool usesSimd; AsmJSMetadataCacheablePod() { PodZero(this); } }; struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod { AsmJSGlobalVector asmJSGlobals; AsmJSImportVector asmJSImports; @@ -1585,16 +1602,17 @@ class MOZ_STACK_CLASS ModuleValidator LifoAlloc validationLifo_; FuncVector functions_; FuncPtrTableVector funcPtrTables_; GlobalMap globalMap_; SigMap sigMap_; ImportMap importMap_; ArrayViewVector arrayViews_; bool atomicsPresent_; + bool simdPresent_; // State used to build the AsmJSModule in finish(): ModuleGenerator mg_; MutableAsmJSMetadata asmJSMetadata_; // Error reporting: UniqueChars errorString_; uint32_t errorOffset_; @@ -1664,16 +1682,17 @@ class MOZ_STACK_CLASS ModuleValidator validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE), functions_(cx), funcPtrTables_(cx), globalMap_(cx), sigMap_(cx), importMap_(cx), arrayViews_(cx), atomicsPresent_(false), + simdPresent_(false), mg_(ImportVector()), errorString_(nullptr), errorOffset_(UINT32_MAX), errorOverRecursed_(false) {} ~ModuleValidator() { if (errorString_) { @@ -1985,32 +2004,36 @@ class MOZ_STACK_CLASS ModuleValidator if (!globalMap_.putNew(var, global)) return false; AsmJSGlobal g(AsmJSGlobal::AtomicsBuiltinFunction, Move(fieldChars)); g.pod.u.atomicsBuiltinFunc_ = func; return asmJSMetadata_->asmJSGlobals.append(Move(g)); } bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) { + simdPresent_ = true; + UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field); if (!fieldChars) return false; Global* global = validationLifo_.new_<Global>(Global::SimdCtor); if (!global) return false; global->u.simdCtorType_ = type; if (!globalMap_.putNew(var, global)) return false; AsmJSGlobal g(AsmJSGlobal::SimdCtor, Move(fieldChars)); g.pod.u.simdCtorType_ = type; return asmJSMetadata_->asmJSGlobals.append(Move(g)); } bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* field) { + simdPresent_ = true; + UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field); if (!fieldChars) return false; Global* global = validationLifo_.new_<Global>(Global::SimdOp); if (!global) return false; global->u.simdOp.type_ = type; @@ -2279,16 +2302,18 @@ class MOZ_STACK_CLASS ModuleValidator } bool finishFunctionBodies() { return mg_.finishFuncDefs(); } SharedModule finish() { if (!arrayViews_.empty()) mg_.initMemoryUsage(atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared); + asmJSMetadata_->usesSimd = simdPresent_; + MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty()); for (const Func* func : functions_) { CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name()); if (!funcName || !asmJSMetadata_->asmJSFuncNames.emplaceBack(Move(funcName))) return nullptr; } uint32_t endBeforeCurly = tokenStream().currentToken().pos.end; @@ -5559,35 +5584,37 @@ CheckSimdOperationCall(FunctionValidator case SimdOperation::Fn_swizzle: return CheckSimdSwizzle(f, call, opType, type); case SimdOperation::Fn_shuffle: return CheckSimdShuffle(f, call, opType, type); case SimdOperation::Fn_load: case SimdOperation::Fn_load1: case SimdOperation::Fn_load2: - case SimdOperation::Fn_load3: return CheckSimdLoad(f, call, opType, op, type); case SimdOperation::Fn_store: case SimdOperation::Fn_store1: case SimdOperation::Fn_store2: - case SimdOperation::Fn_store3: return CheckSimdStore(f, call, opType, op, type); case SimdOperation::Fn_select: return CheckSimdSelect(f, call, opType, type); case SimdOperation::Fn_splat: return CheckSimdSplat(f, call, opType, type); case SimdOperation::Fn_allTrue: return CheckSimdAllTrue(f, call, opType, type); case SimdOperation::Fn_anyTrue: return CheckSimdAnyTrue(f, call, opType, type); + case SimdOperation::Fn_load3: + case SimdOperation::Fn_store3: + return f.fail(call, "asm.js does not support 3-element SIMD loads or stores"); + case SimdOperation::Constructor: MOZ_CRASH("constructors are handled in CheckSimdCtorCall"); case SimdOperation::Fn_fromFloat64x2Bits: MOZ_CRASH("NYI"); } MOZ_CRASH("unexpected simd operation in CheckSimdOperationCall"); } @@ -7805,19 +7832,31 @@ CheckBuffer(JSContext* cx, const AsmJSMe memoryLength, metadata.minMemoryLength)); if (!msg) return false; return LinkFail(cx, msg.get()); } if (buffer->is<ArrayBufferObject>()) { - Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>()); - if (!ArrayBufferObject::prepareForAsmJS(cx, abheap)) + // On 64-bit, bounds checks are statically removed so the huge guard + // region is always necessary. On 32-bit, allocating a guard page + // requires reallocating the incoming ArrayBuffer which could trigger + // OOM. Thus, only ask for a guard page when SIMD is used since SIMD + // allows unaligned memory access (see MaxMemoryAccessSize comment); +#ifdef WASM_HUGE_MEMORY + bool needGuard = true; +#else + bool needGuard = metadata.usesSimd; +#endif + Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>()); + if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard)) return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use"); + + MOZ_ASSERT(arrayBuffer->isAsmJSMalloced() || arrayBuffer->isWasmMapped()); } else { if (!buffer->as<SharedArrayBufferObject>().isPreparedForAsmJS()) return LinkFail(cx, "SharedArrayBuffer must be created with wasm test mode enabled"); } return true; } @@ -8822,29 +8861,16 @@ js::AsmJSFunctionToString(JSContext* cx, return nullptr; if (!out.append(src)) return nullptr; } return out.finishString(); } -// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM -// greater or equal to MinHeapLength -static const size_t MinHeapLength = PageSize; - bool js::IsValidAsmJSHeapLength(uint32_t length) { if (length < MinHeapLength) return false; - return wasm::IsValidARMLengthImmediate(length); -} - -uint32_t -js::RoundUpToNextValidAsmJSHeapLength(uint32_t length) -{ - if (length <= MinHeapLength) - return MinHeapLength; - - return wasm::RoundUpToNextValidARMLengthImmediate(length); -} + return wasm::IsValidARMImmediate(length); +}
--- a/js/src/asmjs/AsmJS.h +++ b/js/src/asmjs/AsmJS.h @@ -79,14 +79,11 @@ AsmJSFunctionToString(JSContext* cx, Han extern JSString* AsmJSModuleToString(JSContext* cx, HandleFunction fun, bool addParenToLambda); // asm.js heap: extern bool IsValidAsmJSHeapLength(uint32_t length); -extern uint32_t -RoundUpToNextValidAsmJSHeapLength(uint32_t length); - } // namespace js #endif // asmjs_asmjs_h
--- a/js/src/asmjs/WasmBaselineCompile.cpp +++ b/js/src/asmjs/WasmBaselineCompile.cpp @@ -2899,134 +2899,37 @@ class BaseCompiler MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF64"); #endif } ////////////////////////////////////////////////////////////////////// // // Heap access. - // TODO / CLEANUP - cloned from MIRGraph.cpp, should share. - - bool needsBoundsCheckBranch(const MWasmMemoryAccess& access) const { - // A heap access needs a bounds-check branch if we're not relying on signal - // handlers to catch errors, and if it's not proven to be within bounds. - // We use signal-handlers on x64, but on x86 there isn't enough address - // space for a guard region. Also, on x64 the atomic loads and stores - // can't (yet) use the signal handlers. - -#ifdef WASM_HUGE_MEMORY - return false; -#else - return access.needsBoundsCheck(); -#endif - } - - bool throwOnOutOfBounds(const MWasmMemoryAccess& access) { - return !isCompilingAsmJS(); - } - - // For asm.js code only: If we have a non-zero offset, it's possible that - // |ptr| itself is out of bounds, while adding the offset computes an - // in-bounds address. To catch this case, we need a second branch, which we - // emit out of line since it's unlikely to be needed in normal programs. - // For this, we'll generate an OffsetBoundsCheck OOL stub. - - bool needsOffsetBoundsCheck(const MWasmMemoryAccess& access) const { - return isCompilingAsmJS() && access.offset() != 0; - } - -#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) - class OffsetBoundsCheck : public OutOfLineCode - { - Label* maybeOutOfBounds; - Register ptrReg; - int32_t offset; - - public: - OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset) - : maybeOutOfBounds(maybeOutOfBounds), - ptrReg(ptrReg), - offset(offset) - {} - - void generate(MacroAssembler& masm) { - // asm.js code only: - // - // The access is heap[ptr + offset]. The inline code checks that - // ptr < heap.length - offset. We get here when that fails. We need to check - // for the case where ptr + offset >= 0, in which case the access is still - // in bounds. - - MOZ_ASSERT(offset != 0, - "An access without a constant offset doesn't need a separate " - "OffsetBoundsCheck"); - masm.cmp32(ptrReg, Imm32(-uint32_t(offset))); - if (maybeOutOfBounds) - masm.j(Assembler::Below, maybeOutOfBounds); - else - masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds); - -# ifdef JS_CODEGEN_X64 - // In order to get the offset to wrap properly, we must sign-extend the - // pointer to 32-bits. We'll zero out the sign extension immediately - // after the access to restore asm.js invariants. - masm.movslq(ptrReg, ptrReg); -# endif - - masm.jmp(rejoin()); - } - }; - - // CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch() - - MOZ_MUST_USE - bool emitBoundsCheckBranch(const MWasmMemoryAccess& access, RegI32 ptr, Label* maybeFail) { - Label* pass = nullptr; - - if (needsOffsetBoundsCheck(access)) { - auto* oolCheck = new(alloc_) OffsetBoundsCheck(maybeFail, ptr.reg, access.offset()); - maybeFail = oolCheck->entry(); - pass = oolCheck->rejoin(); - if (!addOutOfLineCode(oolCheck)) - return false; - } - - // The bounds check is a comparison with an immediate value. The asm.js - // module linking process will add the length of the heap to the immediate - // field, so -access->endOffset() will turn into - // (heapLength - access->endOffset()), allowing us to test whether the end - // of the access is beyond the end of the heap. - MOZ_ASSERT(access.endOffset() >= 1, - "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck"); - - uint32_t cmpOffset = masm.cmp32WithPatch(ptr.reg, Imm32(1 - access.endOffset())).offset(); - if (maybeFail) - masm.j(Assembler::AboveOrEqual, maybeFail); - else - masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds); - - if (pass) - masm.bind(pass); - - masm.append(wasm::BoundsCheck(cmpOffset)); - return true; - } - - class OutOfLineLoadTypedArrayOOB : public OutOfLineCode + // Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the + // peculiar property of not throwing on out-of-bounds. Everything else + // (wasm, SIMD.js, Atomics) throws on out-of-bounds. + bool isAsmJSAccess(const MWasmMemoryAccess& access) { + return isCompilingAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess(); + } + +#ifndef WASM_HUGE_MEMORY + class AsmJSLoadOOB : public OutOfLineCode { Scalar::Type viewType; AnyRegister dest; + public: - OutOfLineLoadTypedArrayOOB(Scalar::Type viewType, AnyRegister dest) + AsmJSLoadOOB(Scalar::Type viewType, AnyRegister dest) : viewType(viewType), dest(dest) {} void generate(MacroAssembler& masm) { +#if defined(JS_CODEGEN_X86) switch (viewType) { case Scalar::Float32x4: case Scalar::Int32x4: case Scalar::Int8x16: case Scalar::Int16x8: case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected array type"); case Scalar::Float32: @@ -3043,252 +2946,126 @@ class BaseCompiler case Scalar::Uint32: case Scalar::Uint8Clamped: masm.movePtr(ImmWord(0), dest.gpr()); break; case Scalar::Int64: MOZ_CRASH("unexpected array type"); } masm.jump(rejoin()); +#else + Unused << viewType; + Unused << dest; + MOZ_CRASH("Compiler bug: Unexpected platform."); +#endif } }; - - MOZ_MUST_USE - bool maybeEmitLoadBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, AnyRegister dest, - OutOfLineCode** ool) - { - *ool = nullptr; - if (!needsBoundsCheckBranch(access)) - return true; - - if (throwOnOutOfBounds(access)) - return emitBoundsCheckBranch(access, ptr, nullptr); - - // TODO / MEMORY: We'll allocate *a lot* of these OOL objects, - // thus risking OOM on a platform that is already - // memory-constrained. We could opt to allocate this path - // in-line instead. - *ool = new (alloc_) OutOfLineLoadTypedArrayOOB(access.accessType(), dest); - if (!addOutOfLineCode(*ool)) - return false; - - return emitBoundsCheckBranch(access, ptr, (*ool)->entry()); - } - +#endif + + private: + void checkOffset(MWasmMemoryAccess* access, RegI32 ptr) { + if (access->offset() >= OffsetGuardLimit) { + masm.branchAdd32(Assembler::CarrySet, + Imm32(access->offset()), ptr.reg, + JumpTarget::OutOfBounds); + access->clearOffset(); + } + } + + public: MOZ_MUST_USE - bool maybeEmitStoreBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr, Label** rejoin) { - *rejoin = nullptr; - if (!needsBoundsCheckBranch(access)) - return true; - - if (throwOnOutOfBounds(access)) - return emitBoundsCheckBranch(access, ptr, nullptr); - - *rejoin = newLabel(); - if (!*rejoin) - return false; - - return emitBoundsCheckBranch(access, ptr, *rejoin); - } - - void cleanupAfterBoundsCheck(const MWasmMemoryAccess& access, RegI32 ptr) { -# ifdef JS_CODEGEN_X64 - if (needsOffsetBoundsCheck(access)) { - // Zero out the high 32 bits, in case the OffsetBoundsCheck code had to - // sign-extend (movslq) the pointer value to get wraparound to work. - masm.movl(ptr.reg, ptr.reg); - } -# endif - } - - MOZ_MUST_USE - bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) { - if (access.offset() > INT32_MAX) { - masm.jump(wasm::JumpTarget::OutOfBounds); - return true; - } + bool load(MWasmMemoryAccess access, RegI32 ptr, AnyReg dest) { + checkOffset(&access, ptr); OutOfLineCode* ool = nullptr; - if (!maybeEmitLoadBoundsCheck(access, ptr, dest.any(), &ool)) - return false; +#ifndef WASM_HUGE_MEMORY + if (isAsmJSAccess(access)) { + ool = new (alloc_) AsmJSLoadOOB(access.accessType(), dest.any()); + if (!addOutOfLineCode(ool)) + return false; + + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, ool->entry()); + } else { + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds); + } +#endif # if defined(JS_CODEGEN_X64) Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset()); uint32_t before = masm.size(); - if (dest.tag == AnyReg::I64) { - Register out = dest.i64().reg.reg; - switch (access.accessType()) { - case Scalar::Int8: masm.movsbq(srcAddr, out); break; - case Scalar::Uint8: masm.movzbq(srcAddr, out); break; - case Scalar::Int16: masm.movswq(srcAddr, out); break; - case Scalar::Uint16: masm.movzwq(srcAddr, out); break; - case Scalar::Int32: masm.movslq(srcAddr, out); break; - // Int32 to int64 moves zero-extend by default. - case Scalar::Uint32: masm.movl(srcAddr, out); break; - case Scalar::Int64: masm.movq(srcAddr, out); break; - default: - MOZ_CRASH("Compiler bug: Unexpected array type in int64 load"); - } - } else { - switch (access.accessType()) { - case Scalar::Int8: masm.movsbl(srcAddr, dest.i32().reg); break; - case Scalar::Uint8: masm.movzbl(srcAddr, dest.i32().reg); break; - case Scalar::Int16: masm.movswl(srcAddr, dest.i32().reg); break; - case Scalar::Uint16: masm.movzwl(srcAddr, dest.i32().reg); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movl(srcAddr, dest.i32().reg); break; - case Scalar::Float32: masm.loadFloat32(srcAddr, dest.f32().reg); break; - case Scalar::Float64: masm.loadDouble(srcAddr, dest.f64().reg); break; - default: - MOZ_CRASH("Compiler bug: Unexpected array type"); - } - } - - if (isCompilingAsmJS()) - masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset)); - // TODO: call verifyHeapAccessDisassembly somehow + if (dest.tag == AnyReg::I64) + masm.wasmLoadI64(access.accessType(), srcAddr, dest.i64().reg); + else + masm.wasmLoad(access.accessType(), 0, srcAddr, dest.any()); + + if (isAsmJSAccess(access)) + masm.append(MemoryAccess(before)); # elif defined(JS_CODEGEN_X86) Operand srcAddr(ptr.reg, access.offset()); - if (dest.tag == AnyReg::I64) - MOZ_CRASH("Not implemented: I64 support"); - - bool mustMove = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg); - switch (access.accessType()) { - case Scalar::Int8: - case Scalar::Uint8: { - Register rd = mustMove ? ScratchRegX86 : dest.i32().reg; - if (access.accessType() == Scalar::Int8) - masm.movsblWithPatch(srcAddr, rd); - else - masm.movzblWithPatch(srcAddr, rd); - break; - } - case Scalar::Int16: masm.movswlWithPatch(srcAddr, dest.i32().reg); break; - case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, dest.i32().reg); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movlWithPatch(srcAddr, dest.i32().reg); break; - case Scalar::Float32: masm.vmovssWithPatch(srcAddr, dest.f32().reg); break; - case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, dest.f64().reg); break; - default: - MOZ_CRASH("Compiler bug: Unexpected array type"); - } - uint32_t after = masm.size(); - if (mustMove) + bool byteRegConflict = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg); + AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any(); + + masm.wasmLoad(access.accessType(), 0, srcAddr, out); + + if (byteRegConflict) masm.mov(ScratchRegX86, dest.i32().reg); - - masm.append(wasm::MemoryAccess(after)); - // TODO: call verifyHeapAccessDisassembly somehow # else MOZ_CRASH("Compiler bug: Unexpected platform."); # endif - if (ool) { - cleanupAfterBoundsCheck(access, ptr); + if (ool) masm.bind(ool->rejoin()); - } return true; } MOZ_MUST_USE - bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) { - if (access.offset() > INT32_MAX) { - masm.jump(wasm::JumpTarget::OutOfBounds); - return true; - } - - Label* rejoin = nullptr; - if (!maybeEmitStoreBoundsCheck(access, ptr, &rejoin)) - return false; - + bool store(MWasmMemoryAccess access, RegI32 ptr, AnyReg src) { + checkOffset(&access, ptr); + + Label rejoin; +#ifndef WASM_HUGE_MEMORY + if (isAsmJSAccess(access)) + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, &rejoin); + else + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, JumpTarget::OutOfBounds); +#endif + + // Emit the store # if defined(JS_CODEGEN_X64) Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset()); - Register intReg = Register::Invalid(); - if (src.tag == AnyReg::I32) - intReg = src.i32().reg; - else if (src.tag == AnyReg::I64) - intReg = src.i64().reg.reg; - uint32_t before = masm.size(); - switch (access.accessType()) { - case Scalar::Int8: - case Scalar::Uint8: masm.movb(intReg, dstAddr); break; - case Scalar::Int16: - case Scalar::Uint16: masm.movw(intReg, dstAddr); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movl(intReg, dstAddr); break; - case Scalar::Int64: masm.movq(intReg, dstAddr); break; - case Scalar::Float32: masm.storeFloat32(src.f32().reg, dstAddr); break; - case Scalar::Float64: masm.storeDouble(src.f64().reg, dstAddr); break; - default: - MOZ_CRASH("Compiler bug: Unexpected array type"); - } + masm.wasmStore(access.accessType(), 0, src.any(), dstAddr); if (isCompilingAsmJS()) - masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset)); - // TODO: call verifyHeapAccessDisassembly somehow + masm.append(MemoryAccess(before)); # elif defined(JS_CODEGEN_X86) Operand dstAddr(ptr.reg, access.offset()); - if (src.tag == AnyReg::I64) - MOZ_CRASH("Not implemented: I64 support"); - - bool didMove = false; + AnyRegister value; if (access.byteSize() == 1 && !singleByteRegs_.has(src.i32().reg)) { - didMove = true; masm.mov(src.i32().reg, ScratchRegX86); + value = AnyRegister(ScratchRegX86); + } else { + value = src.any(); } - switch (access.accessType()) { - case Scalar::Int8: - case Scalar::Uint8: { - Register rs = src.i32().reg; - Register rt = didMove ? ScratchRegX86 : rs; - masm.movbWithPatch(rt, dstAddr); - break; - } - case Scalar::Int16: - case Scalar::Uint16: masm.movwWithPatch(src.i32().reg, dstAddr); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movlWithPatch(src.i32().reg, dstAddr); break; - case Scalar::Float32: masm.vmovssWithPatch(src.f32().reg, dstAddr); break; - case Scalar::Float64: masm.vmovsdWithPatch(src.f64().reg, dstAddr); break; - default: - MOZ_CRASH("Compiler bug: Unexpected array type"); - } - uint32_t after = masm.size(); - - masm.append(wasm::MemoryAccess(after)); - // TODO: call verifyHeapAccessDisassembly somehow + + masm.wasmStore(access.accessType(), 0, value, dstAddr); # else MOZ_CRASH("Compiler bug: unexpected platform"); # endif - if (rejoin) { - cleanupAfterBoundsCheck(access, ptr); - masm.bind(rejoin); - } + if (rejoin.used()) + masm.bind(&rejoin); + return true; } -#else - - MOZ_MUST_USE - bool loadHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg dest) { - MOZ_CRASH("BaseCompiler platform hook: loadHeap"); - } - - MOZ_MUST_USE - bool storeHeap(const MWasmMemoryAccess& access, RegI32 ptr, AnyReg src) { - MOZ_CRASH("BaseCompiler platform hook: storeHeap"); - } - -#endif - //////////////////////////////////////////////////////////// // Generally speaking, ABOVE this point there should be no value // stack manipulation (calls to popI32 etc). // Generally speaking, BELOW this point there should be no // platform dependencies. We make an exception for x86 register // targeting, which is not too hard to keep clean. @@ -5727,50 +5504,50 @@ BaseCompiler::emitLoad(ValType type, Sca // TODO / OPTIMIZE: Disable bounds checking on constant accesses // below the minimum heap length. MWasmMemoryAccess access(viewType, addr.align, addr.offset); switch (type) { case ValType::I32: { RegI32 rp = popI32(); - if (!loadHeap(access, rp, AnyReg(rp))) + if (!load(access, rp, AnyReg(rp))) return false; pushI32(rp); break; } case ValType::I64: { RegI32 rp = popI32(); RegI64 rv = needI64(); - if (!loadHeap(access, rp, AnyReg(rv))) + if (!load(access, rp, AnyReg(rv))) return false; pushI64(rv); freeI32(rp); break; } case ValType::F32: { RegI32 rp = popI32(); RegF32 rv = needF32(); - if (!loadHeap(access, rp, AnyReg(rv))) + if (!load(access, rp, AnyReg(rv))) return false; pushF32(rv); freeI32(rp); break; } case ValType::F64: { RegI32 rp = popI32(); RegF64 rv = needF64(); - if (!loadHeap(access, rp, AnyReg(rv))) + if (!load(access, rp, AnyReg(rv))) return false; pushF64(rv); freeI32(rp); break; } default: - MOZ_CRASH("loadHeap type"); + MOZ_CRASH("load type"); break; } return true; } bool BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType) { @@ -5786,51 +5563,51 @@ BaseCompiler::emitStore(ValType resultTy // below the minimum heap length. MWasmMemoryAccess access(viewType, addr.align, addr.offset); switch (resultType) { case ValType::I32: { RegI32 rp, rv; pop2xI32(&rp, &rv); - if (!storeHeap(access, rp, AnyReg(rv))) + if (!store(access, rp, AnyReg(rv))) return false; freeI32(rp); pushI32(rv); break; } case ValType::I64: { RegI64 rv = popI64(); RegI32 rp = popI32(); - if (!storeHeap(access, rp, AnyReg(rv))) + if (!store(access, rp, AnyReg(rv))) return false; freeI32(rp); pushI64(rv); break; } case ValType::F32: { RegF32 rv = popF32(); RegI32 rp = popI32(); - if (!storeHeap(access, rp, AnyReg(rv))) + if (!store(access, rp, AnyReg(rv))) return false; freeI32(rp); pushF32(rv); break; } case ValType::F64: { RegF64 rv = popF64(); RegI32 rp = popI32(); - if (!storeHeap(access, rp, AnyReg(rv))) + if (!store(access, rp, AnyReg(rv))) return false; freeI32(rp); pushF64(rv); break; } default: - MOZ_CRASH("storeHeap type"); + MOZ_CRASH("store type"); break; } return true; } bool BaseCompiler::emitSelect() { @@ -6075,28 +5852,28 @@ BaseCompiler::emitStoreWithCoercion(ValT MWasmMemoryAccess access(viewType, addr.align, addr.offset); if (resultType == ValType::F32 && viewType == Scalar::Float64) { RegF32 rv = popF32(); RegF64 rw = needF64(); masm.convertFloat32ToDouble(rv.reg, rw.reg); RegI32 rp = popI32(); - if (!storeHeap(access, rp, AnyReg(rw))) + if (!store(access, rp, AnyReg(rw))) return false; pushF32(rv); freeI32(rp); freeF64(rw); } else if (resultType == ValType::F64 && viewType == Scalar::Float32) { RegF64 rv = popF64(); RegF32 rw = needF32(); masm.convertDoubleToFloat32(rv.reg, rw.reg); RegI32 rp = popI32(); - if (!storeHeap(access, rp, AnyReg(rw))) + if (!store(access, rp, AnyReg(rw))) return false; pushF64(rv); freeI32(rp); freeF32(rw); } else MOZ_CRASH("unexpected coerced store");
--- a/js/src/asmjs/WasmCode.cpp +++ b/js/src/asmjs/WasmCode.cpp @@ -101,35 +101,44 @@ StaticallyLink(CodeSegment& cs, const Li // These constants are logically part of the code: *(double*)(cs.globalData() + NaN64GlobalDataOffset) = GenericNaN(); *(float*)(cs.globalData() + NaN32GlobalDataOffset) = GenericNaN(); } static void -SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryObject memory) +SpecializeToMemory(uint8_t* prevMemoryBase, CodeSegment& cs, const Metadata& metadata, + ArrayBufferObjectMaybeShared& buffer) { - if (!metadata.boundsChecks.empty()) { - uint32_t length = memory->buffer().wasmBoundsCheckLimit(); - MOZ_RELEASE_ASSERT(length == LegalizeMapLength(length)); - MOZ_RELEASE_ASSERT(length >= memory->buffer().wasmActualByteLength()); +#ifdef WASM_HUGE_MEMORY + MOZ_RELEASE_ASSERT(metadata.boundsChecks.empty()); + MOZ_RELEASE_ASSERT(metadata.isAsmJS() || metadata.memoryAccesses.empty()); +#else + uint32_t limit = buffer.wasmBoundsCheckLimit(); + MOZ_RELEASE_ASSERT(IsValidBoundsCheckImmediate(limit)); - for (const BoundsCheck& check : metadata.boundsChecks) - Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), length); - } + for (const BoundsCheck& check : metadata.boundsChecks) + MacroAssembler::wasmPatchBoundsCheck(check.patchAt(cs.base()), limit); +#endif #if defined(JS_CODEGEN_X86) - uint8_t* base = memory->buffer().dataPointerEither().unwrap(); - for (const MemoryAccess& access : metadata.memoryAccesses) { - // Patch memory pointer immediate. - void* addr = access.patchMemoryPtrImmAt(cs.base()); - uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr)); - MOZ_ASSERT(disp <= INT32_MAX); - X86Encoding::SetPointer(addr, (void*)(base + disp)); + uint8_t* memoryBase = buffer.dataPointerEither().unwrap(/* code patching */); + if (prevMemoryBase != memoryBase) { + for (const MemoryAccess& access : metadata.memoryAccesses) { + void* patchAt = access.patchMemoryPtrImmAt(cs.base()); + + uint8_t* prevImm = (uint8_t*)X86Encoding::GetPointer(patchAt); + MOZ_ASSERT(prevImm >= prevMemoryBase); + + uint32_t offset = prevImm - prevMemoryBase; + MOZ_ASSERT(offset <= INT32_MAX); + + X86Encoding::SetPointer(patchAt, memoryBase + offset); + } } #endif } static bool SendCodeRangesToProfiler(JSContext* cx, CodeSegment& cs, const Bytes& bytecode, const Metadata& metadata) { @@ -227,17 +236,17 @@ CodeSegment::create(JSContext* cx, { JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread())); AutoFlushICache afc("CodeSegment::create"); AutoFlushICache::setRange(uintptr_t(codeBase), cs->codeLength()); memcpy(codeBase, bytecode.begin(), bytecode.length()); StaticallyLink(*cs, linkData, cx); if (memory) - SpecializeToMemory(*cs, metadata, memory); + SpecializeToMemory(nullptr, *cs, metadata, memory->buffer()); } if (!ExecutableAllocator::makeExecutable(codeBase, cs->codeLength())) { ReportOutOfMemory(cx); return nullptr; } if (!SendCodeRangesToProfiler(cx, *cs, bytecode, metadata)) @@ -253,16 +262,26 @@ CodeSegment::~CodeSegment() MOZ_ASSERT(wasmCodeAllocations > 0); wasmCodeAllocations--; MOZ_ASSERT(totalLength() > 0); DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize()); } +void +CodeSegment::onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer) +{ + AutoWritableJitCode awjc(base(), codeLength()); + AutoFlushICache afc("CodeSegment::onMovingGrow"); + AutoFlushICache::setRange(uintptr_t(base()), codeLength()); + + SpecializeToMemory(prevMemoryBase, *this, metadata, buffer); +} + size_t FuncDefExport::serializedSize() const { return sig_.serializedSize() + sizeof(pod); } uint8_t*
--- a/js/src/asmjs/WasmCode.h +++ b/js/src/asmjs/WasmCode.h @@ -95,16 +95,21 @@ class CodeSegment // enter/exit. bool containsFunctionPC(const void* pc) const { return pc >= base() && pc < (base() + functionCodeLength_); } bool containsCodePC(const void* pc) const { return pc >= base() && pc < (base() + codeLength_); } + + // onMovingGrow must be called if the memory passed to 'create' performs a + // moving grow operation. + + void onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer); }; // ShareableBytes is a ref-counted vector of bytes which are incrementally built // during compilation and then immutably shared. struct ShareableBytes : ShareableBase<ShareableBytes> { // Vector is 'final', so instead make Vector a member and add boilerplate. @@ -512,16 +517,17 @@ class Code CacheableCharsVector funcLabels_; bool profilingEnabled_; public: Code(UniqueCodeSegment segment, const Metadata& metadata, const ShareableBytes* maybeBytecode); + CodeSegment& segment() { return *segment_; } const CodeSegment& segment() const { return *segment_; } const Metadata& metadata() const { return *metadata_; } // Frame iterator support: const CallSite* lookupCallSite(void* returnAddress) const; const CodeRange* lookupRange(void* pc) const; #ifdef WASM_HUGE_MEMORY
--- a/js/src/asmjs/WasmInstance.cpp +++ b/js/src/asmjs/WasmInstance.cpp @@ -287,52 +287,16 @@ Instance::growMemory_i32(Instance* insta } /* static */ uint32_t Instance::currentMemory_i32(Instance* instance) { return instance->currentMemory(); } -uint32_t -Instance::growMemory(uint32_t delta) -{ - MOZ_RELEASE_ASSERT(memory_); - - // Using uint64_t to avoid worrying about overflows in safety comp. - uint64_t curNumPages = currentMemory(); - uint64_t newNumPages = curNumPages + (uint64_t) delta; - - if (metadata().maxMemoryLength) { - ArrayBufferObject &buf = memory_->buffer().as<ArrayBufferObject>(); - // Guaranteed by instantiateMemory - MOZ_RELEASE_ASSERT(buf.wasmMaxSize() && buf.wasmMaxSize() <= metadata().maxMemoryLength); - - if (newNumPages * wasm::PageSize > buf.wasmMaxSize().value()) - return (uint32_t) -1; - - // Try to grow the memory - if (!buf.growForWasm(delta)) - return (uint32_t) -1; - } else { - return -1; // TODO: implement grow_memory w/o max when we add realloc - } - - return curNumPages; -} - -uint32_t -Instance::currentMemory() -{ - MOZ_RELEASE_ASSERT(memory_); - uint32_t curMemByteLen = memory_->buffer().wasmActualByteLength(); - MOZ_ASSERT(curMemByteLen % wasm::PageSize == 0); - return curMemByteLen / wasm::PageSize; -} - Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object, UniqueCode code, HandleWasmMemoryObject memory, SharedTableVector&& tables, Handle<FunctionVector> funcImports, const ValVector& globalImports) : compartment_(cx->compartment()), @@ -406,16 +370,19 @@ Instance::Instance(JSContext* cx, for (size_t i = 0; i < tables_.length(); i++) *addressOfTableBase(i) = tables_[i]->base(); } bool Instance::init(JSContext* cx) { + if (memory_ && memory_->movingGrowable() && !memory_->addMovingGrowObserver(cx, object_)) + return false; + if (!metadata().sigIds.empty()) { ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet.lock(); if (!lockedSigIdSet->ensureInitialized(cx)) return false; for (const SigWithId& sig : metadata().sigIds) { const void* sigId; @@ -585,16 +552,19 @@ WasmInstanceObject* Instance::object() const { return object_; } bool Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args) { + // If there has been a moving grow, this Instance should have been notified. + MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither()); + if (!cx->compartment()->wasm.ensureProfilingState(cx)) return false; const FuncDefExport& func = metadata().lookupFuncDefExport(funcDefIndex); // The calling convention for an external call into wasm is to pass an // array of 16-byte values where each value contains either a coerced int32 // (in the low word), a double value (in the low dword) or a SIMD vector @@ -801,16 +771,43 @@ Instance::callExport(JSContext* cx, uint } if (retObj) args.rval().set(ObjectValue(*retObj)); return true; } +uint32_t +Instance::currentMemory() +{ + MOZ_RELEASE_ASSERT(memory_); + uint32_t byteLength = memory_->buffer().wasmActualByteLength(); + MOZ_ASSERT(byteLength % wasm::PageSize == 0); + return byteLength / wasm::PageSize; +} + +uint32_t +Instance::growMemory(uint32_t delta) +{ + MOZ_ASSERT(!isAsmJS()); + uint32_t ret = memory_->grow(delta); + MOZ_RELEASE_ASSERT(tlsData_.memoryBase == memory_->buffer().dataPointerEither()); + return ret; +} + +void +Instance::onMovingGrow(uint8_t* prevMemoryBase) +{ + MOZ_ASSERT(!isAsmJS()); + ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>(); + tlsData_.memoryBase = buffer.dataPointer(); + code_->segment().onMovingGrow(prevMemoryBase, metadata(), buffer); +} + void Instance::deoptimizeImportExit(uint32_t funcImportIndex) { const FuncImport& fi = metadata().funcImports[funcImportIndex]; FuncImportTls& import = funcImportTls(fi); import.code = codeBase() + fi.interpExitCodeOffset(); import.baselineScript = nullptr; }
--- a/js/src/asmjs/WasmInstance.h +++ b/js/src/asmjs/WasmInstance.h @@ -53,26 +53,27 @@ class Instance // Import call slow paths which are called directly from wasm code. friend void* AddressOf(SymbolicAddress, ExclusiveContext*); static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*); static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*); static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*); static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*); static uint32_t growMemory_i32(Instance* instance, uint32_t delta); static uint32_t currentMemory_i32(Instance* instance); - bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv, MutableHandleValue rval); - uint32_t growMemory(uint32_t delta); - uint32_t currentMemory(); // Only WasmInstanceObject can call the private trace function. friend class js::WasmInstanceObject; void tracePrivate(JSTracer* trc); + // Only WasmMemoryObject can call the private onMovingGrow notification. + friend class js::WasmMemoryObject; + void onMovingGrow(uint8_t* prevMemoryBase); + public: Instance(JSContext* cx, HandleWasmInstanceObject object, UniqueCode code, HandleWasmMemoryObject memory, SharedTableVector&& tables, Handle<FunctionVector> funcImports, const ValVector& globalImports); @@ -101,23 +102,33 @@ class Instance WasmInstanceObject* object() const; // Execute the given export given the JS call arguments, storing the return // value in args.rval. MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args); + // These methods implement their respective wasm operator but may also be + // called via the Memory JS API. + + uint32_t currentMemory(); + uint32_t growMemory(uint32_t delta); + // Initially, calls to imports in wasm code call out through the generic // callImport method. If the imported callee gets JIT compiled and the types // match up, callImport will patch the code to instead call through a thunk // directly into the JIT code. If the JIT code is released, the Instance must // be notified so it can go back to the generic callImport. void deoptimizeImportExit(uint32_t funcImportIndex); + + // Called by simulators to check whether accessing 'numBytes' starting at + // 'addr' would trigger a fault and be safely handled by signal handlers. + bool memoryAccessWouldFault(uint8_t* addr, unsigned numBytes); // See Code::ensureProfilingState comment. MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled); // about:memory reporting:
--- a/js/src/asmjs/WasmIonCompile.cpp +++ b/js/src/asmjs/WasmIonCompile.cpp @@ -697,127 +697,108 @@ class FunctionCompiler void assign(unsigned slot, MDefinition* def) { if (inDeadCode()) return; curBlock_->setSlot(info().localSlot(slot), def); } private: - // False means we're sure to be out-of-bounds after this bounds check. - bool maybeAddBoundsCheck(MDefinition* base, const MWasmMemoryAccess& access) + void checkOffsetAndBounds(MWasmMemoryAccess* access, MDefinition** base) { - if (access.offset() > uint32_t(INT32_MAX)) { - curBlock_->end(MWasmTrap::New(alloc(), Trap::OutOfBounds)); - curBlock_ = nullptr; - return false; + // If the offset is bigger than the guard region, a separate instruction + // is necessary to add the offset to the base and check for overflow. + if (access->offset() >= OffsetGuardLimit || !JitOptions.wasmFoldOffsets) { + auto* ins = MWasmAddOffset::New(alloc(), *base, access->offset()); + curBlock_->add(ins); + + *base = ins; + access->clearOffset(); } #ifndef WASM_HUGE_MEMORY - curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access)); + curBlock_->add(MWasmBoundsCheck::New(alloc(), *base)); #endif - return true; } - MDefinition* loadHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access, - bool isInt64 = false) + // Return true only for real asm.js (HEAP[i>>2]|0) accesses which have the + // peculiar property of not throwing on out-of-bounds. Everything else + // (wasm, SIMD.js, Atomics) throws on out-of-bounds. + bool isAsmJSAccess(const MWasmMemoryAccess& access) { + return mg().isAsmJS() && !access.isSimdAccess() && !access.isAtomicAccess(); + } + + public: + MDefinition* load(MDefinition* base, MWasmMemoryAccess access, ValType result) { if (inDeadCode()) return nullptr; MInstruction* load = nullptr; - if (mg().isAsmJS()) { - load = MAsmJSLoadHeap::New(alloc(), base, access); + if (isAsmJSAccess(access)) { + MOZ_ASSERT(access.offset() == 0); + load = MAsmJSLoadHeap::New(alloc(), base, access.accessType()); } else { - if (!maybeAddBoundsCheck(base, access)) - return nullptr; - load = MWasmLoad::New(alloc(), base, access, isInt64); + checkOffsetAndBounds(&access, &base); + load = MWasmLoad::New(alloc(), base, access, ToMIRType(result)); } curBlock_->add(load); return load; } - void storeHeapPrivate(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v) + void store(MDefinition* base, MWasmMemoryAccess access, MDefinition* v) { if (inDeadCode()) return; MInstruction* store = nullptr; - if (mg().isAsmJS()) { - store = MAsmJSStoreHeap::New(alloc(), base, access, v); + if (isAsmJSAccess(access)) { + MOZ_ASSERT(access.offset() == 0); + store = MAsmJSStoreHeap::New(alloc(), base, access.accessType(), v); } else { - if (!maybeAddBoundsCheck(base, access)) - return; + checkOffsetAndBounds(&access, &base); store = MWasmStore::New(alloc(), base, access, v); } curBlock_->add(store); } - public: - MDefinition* loadHeap(MDefinition* base, const MWasmMemoryAccess& access, bool isInt64) - { - MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD loads should use loadSimdHeap"); - return loadHeapPrivate(base, access, isInt64); - } - MDefinition* loadSimdHeap(MDefinition* base, const MWasmMemoryAccess& access) - { - MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD loads should use loadHeap"); - return loadHeapPrivate(base, access); - } - MDefinition* loadAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access) - { - return loadHeapPrivate(base, access); - } - - void storeHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v) - { - MOZ_ASSERT(!Scalar::isSimdType(access.accessType()), "SIMD store should use storeSimdHeap"); - storeHeapPrivate(base, access, v); - } - void storeSimdHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v) - { - MOZ_ASSERT(Scalar::isSimdType(access.accessType()), "non-SIMD stores should use storeHeap"); - storeHeapPrivate(base, access, v); - } - void storeAtomicHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v) - { - storeHeapPrivate(base, access, v); - } - - MDefinition* atomicCompareExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access, + MDefinition* atomicCompareExchangeHeap(MDefinition* base, MWasmMemoryAccess access, MDefinition* oldv, MDefinition* newv) { if (inDeadCode()) return nullptr; + checkOffsetAndBounds(&access, &base); auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv, tlsPointer_); curBlock_->add(cas); return cas; } - MDefinition* atomicExchangeHeap(MDefinition* base, const MWasmMemoryAccess& access, + MDefinition* atomicExchangeHeap(MDefinition* base, MWasmMemoryAccess access, MDefinition* value) { if (inDeadCode()) return nullptr; + checkOffsetAndBounds(&access, &base); auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value, tlsPointer_); curBlock_->add(cas); return cas; } MDefinition* atomicBinopHeap(js::jit::AtomicOp op, - MDefinition* base, const MWasmMemoryAccess& access, + MDefinition* base, MWasmMemoryAccess access, MDefinition* v) { if (inDeadCode()) return nullptr; + checkOffsetAndBounds(&access, &base); auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v, tlsPointer_); curBlock_->add(binop); return binop; } MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type) { if (inDeadCode()) @@ -1108,17 +1089,18 @@ class FunctionCompiler if (inDeadCode()) { *def = nullptr; return true; } CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register); auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin, call.instanceArg_, call.regArgs_, - ToMIRType(ret), call.spIncrement_); + ToMIRType(ret), call.spIncrement_, + call.tlsStackOffset_); if (!ins) return false; curBlock_->add(ins); *def = ins; return true; } @@ -2346,30 +2328,30 @@ EmitSelect(FunctionCompiler& f) static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) { LinearMemoryAddress<MDefinition*> addr; if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr)) return false; MWasmMemoryAccess access(viewType, addr.align, addr.offset); - f.iter().setResult(f.loadHeap(addr.base, access, type == ValType::I64)); + f.iter().setResult(f.load(addr.base, access, type)); return true; } static bool EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType) { LinearMemoryAddress<MDefinition*> addr; MDefinition* value; if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value)) return false; MWasmMemoryAccess access(viewType, addr.align, addr.offset); - f.storeHeap(addr.base, access, value); + f.store(addr.base, access, value); return true; } static bool EmitStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type viewType) { LinearMemoryAddress<MDefinition*> addr; MDefinition* value; @@ -2379,17 +2361,17 @@ EmitStoreWithCoercion(FunctionCompiler& if (resultType == ValType::F32 && viewType == Scalar::Float64) value = f.unary<MToDouble>(value); else if (resultType == ValType::F64 && viewType == Scalar::Float32) value = f.unary<MToFloat32>(value); else MOZ_CRASH("unexpected coerced store"); MWasmMemoryAccess access(viewType, addr.align, addr.offset); - f.storeHeap(addr.base, access, value); + f.store(addr.base, access, value); return true; } static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f, uint32_t callOffset, SymbolicAddress callee, ValType operandType) { uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset); @@ -2453,32 +2435,32 @@ EmitAtomicsLoad(FunctionCompiler& f) { LinearMemoryAddress<MDefinition*> addr; Scalar::Type viewType; if (!f.iter().readAtomicLoad(&addr, &viewType)) return false; MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0, MembarBeforeLoad, MembarAfterLoad); - f.iter().setResult(f.loadAtomicHeap(addr.base, access)); + f.iter().setResult(f.load(addr.base, access, ValType::I32)); return true; } static bool EmitAtomicsStore(FunctionCompiler& f) { LinearMemoryAddress<MDefinition*> addr; Scalar::Type viewType; MDefinition* value; if (!f.iter().readAtomicStore(&addr, &viewType, &value)) return false; MWasmMemoryAccess access(viewType, addr.align, addr.offset, 0, MembarBeforeStore, MembarAfterStore); - f.storeAtomicHeap(addr.base, access, value); + f.store(addr.base, access, value); f.iter().setResult(value); return true; } static bool EmitAtomicsBinOp(FunctionCompiler& f) { LinearMemoryAddress<MDefinition*> addr; @@ -2735,17 +2717,17 @@ EmitSimdLoad(FunctionCompiler& f, ValTyp if (!numElems) numElems = defaultNumElems; LinearMemoryAddress<MDefinition*> addr; if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr)) return false; MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems); - f.iter().setResult(f.loadSimdHeap(addr.base, access)); + f.iter().setResult(f.load(addr.base, access, resultType)); return true; } static bool EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems) { unsigned defaultNumElems; Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems); @@ -2754,17 +2736,17 @@ EmitSimdStore(FunctionCompiler& f, ValTy numElems = defaultNumElems; LinearMemoryAddress<MDefinition*> addr; MDefinition* value; if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value)) return false; MWasmMemoryAccess access(viewType, addr.align, addr.offset, numElems); - f.storeSimdHeap(addr.base, access, value); + f.store(addr.base, access, value); return true; } static bool EmitSimdSelect(FunctionCompiler& f, ValType simdType) { MDefinition* trueValue; MDefinition* falseValue; @@ -2934,26 +2916,22 @@ EmitSimdOp(FunctionCompiler& f, ValType case SimdOperation::Fn_shuffle: return EmitSimdShuffle(f, type); case SimdOperation::Fn_load: return EmitSimdLoad(f, type, 0); case SimdOperation::Fn_load1: return EmitSimdLoad(f, type, 1); case SimdOperation::Fn_load2: return EmitSimdLoad(f, type, 2); - case SimdOperation::Fn_load3: - return EmitSimdLoad(f, type, 3); case SimdOperation::Fn_store: return EmitSimdStore(f, type, 0); case SimdOperation::Fn_store1: return EmitSimdStore(f, type, 1); case SimdOperation::Fn_store2: return EmitSimdStore(f, type, 2); - case SimdOperation::Fn_store3: - return EmitSimdStore(f, type, 3); case SimdOperation::Fn_allTrue: return EmitSimdAllTrue(f, type); case SimdOperation::Fn_anyTrue: return EmitSimdAnyTrue(f, type); case SimdOperation::Fn_abs: case SimdOperation::Fn_neg: case SimdOperation::Fn_not: case SimdOperation::Fn_sqrt: @@ -2997,16 +2975,18 @@ EmitSimdOp(FunctionCompiler& f, ValType case SimdOperation::Fn_fromUint16x8Bits: case SimdOperation::Fn_fromInt16x8Bits: return EmitSimdBitcast(f, ValType::I16x8, type); case SimdOperation::Fn_fromInt32x4Bits: case SimdOperation::Fn_fromUint32x4Bits: return EmitSimdBitcast(f, ValType::I32x4, type); case SimdOperation::Fn_fromFloat32x4Bits: return EmitSimdBitcast(f, ValType::F32x4, type); + case SimdOperation::Fn_load3: + case SimdOperation::Fn_store3: case SimdOperation::Fn_fromFloat64x2Bits: MOZ_CRASH("NYI"); } MOZ_CRASH("unexpected opcode"); } static bool EmitGrowMemory(FunctionCompiler& f, uint32_t callOffset) @@ -3022,17 +3002,20 @@ EmitGrowMemory(FunctionCompiler& f, uint MDefinition* delta; if (!f.iter().readUnary(ValType::I32, &delta)) return false; if (!f.passArg(delta, ValType::I32, &args)) return false; - f.finishCall(&args, PassTls::False, InterModule::False); + // As a short-cut, pretend this is an inter-module call so that any pinned + // heap pointer will be reloaded after the call. This hack will go away once + // we can stop pinning registers. + f.finishCall(&args, PassTls::True, InterModule::True); MDefinition* ret; if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret)) return false; f.iter().setResult(ret); return true; }
--- a/js/src/asmjs/WasmJS.cpp +++ b/js/src/asmjs/WasmJS.cpp @@ -13,16 +13,17 @@ * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "asmjs/WasmJS.h" +#include "mozilla/CheckedInt.h" #include "mozilla/Maybe.h" #include "asmjs/WasmCompile.h" #include "asmjs/WasmInstance.h" #include "asmjs/WasmModule.h" #include "asmjs/WasmSignalHandlers.h" #include "builtin/Promise.h" #include "jit/JitOptions.h" @@ -30,16 +31,17 @@ #include "jsobjinlines.h" #include "vm/NativeObject-inl.h" using namespace js; using namespace js::jit; using namespace js::wasm; +using mozilla::CheckedInt; using mozilla::Nothing; bool wasm::HasCompilerSupport(ExclusiveContext* cx) { if (!cx->jitSupportsFloatingPoint()) return false; @@ -705,33 +707,56 @@ wasm::ExportedFunctionToDefinitionIndex( MOZ_ASSERT(IsExportedFunction(fun)); const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT); return v.toInt32(); } // ============================================================================ // WebAssembly.Memory class and methods +const ClassOps WasmMemoryObject::classOps_ = +{ + nullptr, /* addProperty */ + nullptr, /* delProperty */ + nullptr, /* getProperty */ + nullptr, /* setProperty */ + nullptr, /* enumerate */ + nullptr, /* resolve */ + nullptr, /* mayResolve */ + WasmMemoryObject::finalize +}; + const Class WasmMemoryObject::class_ = { "WebAssembly.Memory", JSCLASS_DELAY_METADATA_BUILDER | - JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) + JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) | + JSCLASS_FOREGROUND_FINALIZE, + &WasmMemoryObject::classOps_ }; +/* static */ void +WasmMemoryObject::finalize(FreeOp* fop, JSObject* obj) +{ + WasmMemoryObject& memory = obj->as<WasmMemoryObject>(); + if (memory.hasObservers()) + fop->delete_(&memory.observers()); +} + /* static */ WasmMemoryObject* WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShared buffer, HandleObject proto) { AutoSetNewObjectMetadata metadata(cx); auto* obj = NewObjectWithGivenProto<WasmMemoryObject>(cx, proto); if (!obj) return nullptr; obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer)); + MOZ_ASSERT(!obj->hasObservers()); return obj; } /* static */ bool WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); @@ -835,16 +860,114 @@ const JSFunctionSpec WasmMemoryObject::m { JS_FS_END }; ArrayBufferObjectMaybeShared& WasmMemoryObject::buffer() const { return getReservedSlot(BUFFER_SLOT).toObject().as<ArrayBufferObjectMaybeShared>(); } +bool +WasmMemoryObject::hasObservers() const +{ + return !getReservedSlot(OBSERVERS_SLOT).isUndefined(); +} + +WasmMemoryObject::WeakInstanceSet& +WasmMemoryObject::observers() const +{ + MOZ_ASSERT(hasObservers()); + return *reinterpret_cast<WeakInstanceSet*>(getReservedSlot(OBSERVERS_SLOT).toPrivate()); +} + +WasmMemoryObject::WeakInstanceSet* +WasmMemoryObject::getOrCreateObservers(JSContext* cx) +{ + if (!hasObservers()) { + auto observers = MakeUnique<WeakInstanceSet>(cx->zone(), InstanceSet()); + if (!observers || !observers->init()) { + ReportOutOfMemory(cx); + return nullptr; + } + + setReservedSlot(OBSERVERS_SLOT, PrivateValue(observers.release())); + } + + return &observers(); +} + +bool +WasmMemoryObject::movingGrowable() const +{ +#ifdef WASM_HUGE_MEMORY + return false; +#else + return !buffer().wasmMaxSize(); +#endif +} + +bool +WasmMemoryObject::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance) +{ + MOZ_ASSERT(movingGrowable()); + + WeakInstanceSet* observers = getOrCreateObservers(cx); + if (!observers) + return false; + + if (!observers->putNew(instance)) { + ReportOutOfMemory(cx); + return false; + } + + return true; +} + +uint32_t +WasmMemoryObject::grow(uint32_t delta) +{ + ArrayBufferObject &buf = buffer().as<ArrayBufferObject>(); + + MOZ_ASSERT(buf.wasmActualByteLength() % PageSize == 0); + uint32_t oldNumPages = buf.wasmActualByteLength() / PageSize; + + CheckedInt<uint32_t> newSize = oldNumPages; + newSize += delta; + newSize *= PageSize; + if (!newSize.isValid()) + return -1; + + if (Maybe<uint32_t> maxSize = buf.wasmMaxSize()) { + if (newSize.value() > maxSize.value()) + return -1; + + if (!buf.wasmGrowToSizeInPlace(newSize.value())) + return -1; + } else { +#ifdef WASM_HUGE_MEMORY + if (!buf.wasmGrowToSizeInPlace(newSize.value())) + return -1; +#else + MOZ_ASSERT(movingGrowable()); + + uint8_t* prevMemoryBase = buf.dataPointer(); + + if (!buf.wasmMovingGrowToSize(newSize.value())) + return -1; + + if (hasObservers()) { + for (InstanceSet::Range r = observers().all(); !r.empty(); r.popFront()) + r.front()->instance().onMovingGrow(prevMemoryBase); + } +#endif + } + + return oldNumPages; +} + // ============================================================================ // WebAssembly.Table class and methods const ClassOps WasmTableObject::classOps_ = { nullptr, /* addProperty */ nullptr, /* delProperty */ nullptr, /* getProperty */
--- a/js/src/asmjs/WasmJS.h +++ b/js/src/asmjs/WasmJS.h @@ -155,28 +155,43 @@ class WasmInstanceObject : public Native }; // The class of WebAssembly.Memory. A WasmMemoryObject references an ArrayBuffer // or SharedArrayBuffer object which owns the actual memory. class WasmMemoryObject : public NativeObject { static const unsigned BUFFER_SLOT = 0; + static const unsigned OBSERVERS_SLOT = 1; static const ClassOps classOps_; + static void finalize(FreeOp* fop, JSObject* obj); + + using InstanceSet = GCHashSet<ReadBarrieredWasmInstanceObject, + MovableCellHasher<ReadBarrieredWasmInstanceObject>, + SystemAllocPolicy>; + using WeakInstanceSet = JS::WeakCache<InstanceSet>; + bool hasObservers() const; + WeakInstanceSet& observers() const; + WeakInstanceSet* getOrCreateObservers(JSContext* cx); + public: - static const unsigned RESERVED_SLOTS = 1; + static const unsigned RESERVED_SLOTS = 2; static const Class class_; static const JSPropertySpec properties[]; static const JSFunctionSpec methods[]; static bool construct(JSContext*, unsigned, Value*); static WasmMemoryObject* create(ExclusiveContext* cx, Handle<ArrayBufferObjectMaybeShared*> buffer, HandleObject proto); ArrayBufferObjectMaybeShared& buffer() const; + + bool movingGrowable() const; + bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance); + uint32_t grow(uint32_t delta); }; // The class of WebAssembly.Table. A WasmTableObject holds a refcount on a // wasm::Table, allowing a Table to be shared between multiple Instances // (eventually between multiple threads). class WasmTableObject : public NativeObject {
--- a/js/src/asmjs/WasmModule.cpp +++ b/js/src/asmjs/WasmModule.cpp @@ -571,20 +571,20 @@ Module::instantiateMemory(JSContext* cx, buffer->as<ArrayBufferObject>().isWasm()); uint32_t actualLength = buffer->wasmActualByteLength(); if (actualLength < declaredMin || actualLength > declaredMax.valueOr(UINT32_MAX)) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory"); return false; } - // For asm.js maxMemoryLength doesn't play a role since we can't grow memory. - // For wasm we require that either both memory and module don't specify a max size - // OR that the memory's max size is less than the modules. - if (!metadata_->isAsmJS()) { + if (metadata_->isAsmJS()) { + MOZ_ASSERT(IsValidAsmJSHeapLength(actualLength)); + MOZ_ASSERT(actualLength == buffer->wasmMaxSize().value()); + } else { Maybe<uint32_t> actualMax = buffer->as<ArrayBufferObject>().wasmMaxSize(); if (declaredMax.isSome() != actualMax.isSome() || declaredMax < actualMax) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory"); return false; } } } else { MOZ_ASSERT(!metadata_->isAsmJS());
--- a/js/src/asmjs/WasmSignalHandlers.cpp +++ b/js/src/asmjs/WasmSignalHandlers.cpp @@ -596,21 +596,34 @@ ComputeAccessAddress(EMULATOR_CONTEXT* c AddressOfGPRegisterSlot(context, address.index())); MOZ_ASSERT(address.scale() < 32, "address shift overflow"); result += index * (uintptr_t(1) << address.scale()); } return reinterpret_cast<uint8_t*>(result); } -MOZ_COLD static uint8_t* -EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress, - const MemoryAccess* memoryAccess, const Instance& instance) +MOZ_COLD static bool +HugeMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress, + const Instance& instance, uint8_t** ppc) { MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc)); + + // On WASM_HUGE_MEMORY platforms, wasm::MemoryAccess is only created for + // asm.js loads and stores since they unfortunately do not simply throw on + // out-of-bounds. Everything else (WebAssembly and experimental + // SIMD/Atomics) throws. + + const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc); + if (!memoryAccess) { + *ppc = instance.codeSegment().outOfBoundsCode(); + return true; + } + + MOZ_RELEASE_ASSERT(instance.isAsmJS()); MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase())); // Disassemble the instruction which caused the trap so that we can extract // information about it and decide what to do. Disassembler::HeapAccess access; uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access); const Disassembler::ComplexAddress& address = access.address(); MOZ_RELEASE_ASSERT(end > pc); @@ -646,50 +659,36 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont "Access begins outside the asm.js heap"); MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() + instance.memoryMappedSize(), "Access extends beyond the asm.js heap guard region"); MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() + instance.memoryLength(), "Computed access address is not actually out of bounds"); - // Wasm loads/stores don't wrap offsets at all, so hitting the guard page - // means we are out of bounds in any cases. - if (!memoryAccess->wrapOffset()) { - MOZ_ASSERT(memoryAccess->throwOnOOB()); - return instance.codeSegment().outOfBoundsCode(); - } - // The basic sandbox model is that all heap accesses are a heap base // register plus an index, and the index is always computed with 32-bit // operations, so we know it can only be 4 GiB off of the heap base. // // However, we wish to support the optimization of folding immediates // and scaled indices into addresses, and any address arithmetic we fold // gets done at full pointer width, so it doesn't get properly wrapped. - // We support this by extending MappedSize to the greatest size that could - // be reached by such an unwrapped address, and then when we arrive here in - // the signal handler for such an access, we compute the fully wrapped - // address, and perform the load or store on it. + // We support this by extending HugeMappedSize to the greatest size that + // could be reached by such an unwrapped address, and then when we arrive + // here in the signal handler for such an access, we compute the fully + // wrapped address, and perform the load or store on it. // // Taking a signal is really slow, but in theory programs really shouldn't // be hitting this anyway. intptr_t unwrappedOffset = accessAddress - instance.memoryBase().unwrap(/* for value */); uint32_t wrappedOffset = uint32_t(unwrappedOffset); size_t size = access.size(); MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset); bool inBounds = wrappedOffset + size < instance.memoryLength(); - // If this is storing Z of an XYZ, check whether X is also in bounds, so - // that we don't store anything before throwing. - MOZ_RELEASE_ASSERT(unwrappedOffset > memoryAccess->offsetWithinWholeSimdVector()); - uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - memoryAccess->offsetWithinWholeSimdVector()); - if (wrappedBaseOffset >= instance.memoryLength()) - inBounds = false; - if (inBounds) { // We now know that this is an access that is actually in bounds when // properly wrapped. Complete the load or store with the wrapped // address. SharedMem<uint8_t*> wrappedAddress = instance.memoryBase() + wrappedOffset; MOZ_RELEASE_ASSERT(wrappedAddress >= instance.memoryBase()); MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress); MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.memoryBase() + instance.memoryLength()); @@ -706,20 +705,16 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont case Disassembler::HeapAccess::LoadSext64: MOZ_CRASH("no int64 accesses in asm.js"); case Disassembler::HeapAccess::Unknown: MOZ_CRASH("Failed to disassemble instruction"); } } else { // We now know that this is an out-of-bounds access made by an asm.js // load/store that we should handle. - - if (memoryAccess->throwOnOOB()) - return instance.codeSegment().outOfBoundsCode(); - switch (access.kind()) { case Disassembler::HeapAccess::Load: case Disassembler::HeapAccess::LoadSext32: // Assign the JS-defined result value to the destination register // (ToInt32(undefined) or ToNumber(undefined), determined by the // type of the destination register). Very conveniently, we can // infer the type from the register class, since all SIMD accesses // throw on out of bounds (see above), so the only types using FP @@ -731,17 +726,18 @@ EmulateHeapAccess(EMULATOR_CONTEXT* cont break; case Disassembler::HeapAccess::LoadSext64: MOZ_CRASH("no int64 accesses in asm.js"); case Disassembler::HeapAccess::Unknown: MOZ_CRASH("Failed to disassemble instruction"); } } - return end; + *ppc = end; + return true; } #endif // JS_CODEGEN_X64 MOZ_COLD static bool IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress) { size_t accessLimit = instance.memoryMappedSize(); @@ -798,25 +794,21 @@ HandleFault(PEXCEPTION_POINTERS exceptio // always the logically-faulting pc). Fortunately, we can detect this // case and silence the exception ourselves (the exception will // retrigger after the interrupt jumps back to resumePC). return pc == instance->codeSegment().interruptCode() && instance->codeSegment().containsFunctionPC(activation->resumePC()); } #ifdef WASM_HUGE_MEMORY - const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc); - if (!memoryAccess) - *ppc = instance->codeSegment().outOfBoundsCode(); - else - *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance); + return HugeMemoryAccess(context, pc, faultingAddress, *instance, ppc); #else *ppc = instance->codeSegment().outOfBoundsCode(); + return true; #endif - return true; } static LONG WINAPI AsmJSFaultHandler(LPEXCEPTION_POINTERS exception) { if (HandleFault(exception)) return EXCEPTION_CONTINUE_EXECUTION; @@ -929,21 +921,18 @@ HandleMachException(JSRuntime* rt, const uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]); // This check isn't necessary, but, since we can, check anyway to make // sure we aren't covering up a real bug. if (!IsHeapAccessAddress(*instance, faultingAddress)) return false; #ifdef WASM_HUGE_MEMORY - const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc); - if (!memoryAccess) - *ppc = instance->codeSegment().outOfBoundsCode(); - else - *ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance); + if (!HugeMemoryAccess(&context, pc, faultingAddress, *instance, ppc)) + return false; #else *ppc = instance->codeSegment().outOfBoundsCode(); #endif // Update the thread state with the new pc and register values. kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count); if (kret != KERN_SUCCESS) return false; @@ -1148,34 +1137,28 @@ HandleFault(int signum, siginfo_t* info, uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr); // This check isn't necessary, but, since we can, check anyway to make // sure we aren't covering up a real bug. if (!IsHeapAccessAddress(*instance, faultingAddress)) return false; #ifdef WASM_HUGE_MEMORY - MOZ_RELEASE_ASSERT(signal == Signal::SegFault); - const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc); - if (!memoryAccess) - *ppc = instance->codeSegment().outOfBoundsCode(); - else - *ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance); + return HugeMemoryAccess(context, pc, faultingAddress, *instance, ppc); #elif defined(JS_CODEGEN_ARM) MOZ_RELEASE_ASSERT(signal == Signal::BusError || signal == Signal::SegFault); if (signal == Signal::BusError) *ppc = instance->codeSegment().unalignedAccessCode(); else *ppc = instance->codeSegment().outOfBoundsCode(); + return true; #else - MOZ_RELEASE_ASSERT(signal == Signal::SegFault); *ppc = instance->codeSegment().outOfBoundsCode(); + return true; #endif - - return true; } static struct sigaction sPrevSEGVHandler; static struct sigaction sPrevSIGBUSHandler; template<Signal signal> static void AsmJSFaultHandler(int signum, siginfo_t* info, void* context)
--- a/js/src/asmjs/WasmTypes.cpp +++ b/js/src/asmjs/WasmTypes.cpp @@ -619,55 +619,67 @@ Assumptions::sizeOfExcludingThis(MallocS } // Heap length on ARM should fit in an ARM immediate. We approximate the set // of valid ARM immediates with the predicate: // 2^n for n in [16, 24) // or // 2^24 * n for n >= 1. bool -wasm::IsValidARMLengthImmediate(uint32_t length) +wasm::IsValidARMImmediate(uint32_t i) { - bool valid = (IsPowerOfTwo(length) || - (length & 0x00ffffff) == 0); + bool valid = (IsPowerOfTwo(i) || + (i & 0x00ffffff) == 0); - MOZ_ASSERT_IF(valid, length % PageSize == 0); + MOZ_ASSERT_IF(valid, i % PageSize == 0); return valid; } uint32_t -wasm::RoundUpToNextValidARMLengthImmediate(uint32_t length) +wasm::RoundUpToNextValidARMImmediate(uint32_t i) { - MOZ_ASSERT(length <= 0xff000000); + MOZ_ASSERT(i <= 0xff000000); + + if (i <= 16 * 1024 * 1024) + i = i ? mozilla::RoundUpPow2(i) : 0; + else + i = (i + 0x00ffffff) & ~0x00ffffff; + + MOZ_ASSERT(IsValidARMImmediate(i)); - if (length <= 16 * 1024 * 1024) - length = length ? mozilla::RoundUpPow2(length) : 0; - else - length = (length + 0x00ffffff) & ~0x00ffffff; + return i; +} + +#ifndef WASM_HUGE_MEMORY - MOZ_ASSERT(IsValidARMLengthImmediate(length)); - - return length; +bool +wasm::IsValidBoundsCheckImmediate(uint32_t i) +{ +#ifdef JS_CODEGEN_ARM + return IsValidARMImmediate(i); +#else + return true; +#endif } size_t -wasm::LegalizeMapLength(size_t requestedSize) +wasm::ComputeMappedSize(uint32_t maxSize) { -#ifdef WASM_HUGE_MEMORY - // On 64-bit platforms just give us a 4G guard region - return wasm::MappedSize; -#else - uint32_t res = requestedSize; + MOZ_ASSERT(maxSize % PageSize == 0); - // On 32-bit platforms clamp down to 1GB - uint32_t MaxMappedSize = (1 << 30); - res = Min(res, MaxMappedSize); + // It is the bounds-check limit, not the mapped size, that gets baked into + // code. Thus round up the maxSize to the next valid immediate value + // *before* adding in the guard page. # ifdef JS_CODEGEN_ARM - // On Arm round so that it fits in a single instruction - res = RoundUpToNextValidARMLengthImmediate(res); - MOZ_RELEASE_ASSERT(res <= MaxMappedSize); + uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize); +# else + uint32_t boundsCheckLimit = maxSize; # endif + MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit)); - return res; -#endif + MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0); + MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0); + return boundsCheckLimit + GuardSize; } + +#endif // WASM_HUGE_MEMORY
--- a/js/src/asmjs/WasmTypes.h +++ b/js/src/asmjs/WasmTypes.h @@ -786,105 +786,16 @@ class CallSiteAndTarget : public CallSit static const uint32_t NOT_DEFINITION = UINT32_MAX; bool isDefinition() const { return funcDefIndex_ != NOT_DEFINITION; } uint32_t funcDefIndex() const { MOZ_ASSERT(isDefinition()); return funcDefIndex_; } }; typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector; -// Metadata for a bounds check that may need patching later. - -class BoundsCheck -{ - public: - BoundsCheck() = default; - - explicit BoundsCheck(uint32_t cmpOffset) - : cmpOffset_(cmpOffset) - { } - - uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; } - void offsetBy(uint32_t offset) { cmpOffset_ += offset; } - - private: - uint32_t cmpOffset_; // absolute offset of the comparison -}; - -// Summarizes a heap access made by wasm code that needs to be patched later -// and/or looked up by the wasm signal handlers. Different architectures need -// to know different things (x64: intruction offset, wrapping and failure -// behavior, ARM: nothing, x86: offset of end of instruction (heap length to -// patch is last 4 bytes of instruction)). -#if defined(JS_CODEGEN_X86) -class MemoryAccess -{ - uint32_t nextInsOffset_; - - public: - MemoryAccess() = default; - - explicit MemoryAccess(uint32_t nextInsOffset) - : nextInsOffset_(nextInsOffset) - { } - - void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; } - void offsetBy(uint32_t offset) { nextInsOffset_ += offset; } -}; -#elif defined(JS_CODEGEN_X64) -class MemoryAccess -{ - uint32_t insnOffset_; - uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ - bool throwOnOOB_; // should we throw on OOB? - bool wrapOffset_; // should we wrap the offset on OOB? - - public: - enum OutOfBoundsBehavior { - Throw, - CarryOn, - }; - enum WrappingBehavior { - WrapOffset, - DontWrapOffset, - }; - - MemoryAccess() = default; - - MemoryAccess(uint32_t insnOffset, OutOfBoundsBehavior onOOB, WrappingBehavior onWrap, - uint32_t offsetWithinWholeSimdVector = 0) - : insnOffset_(insnOffset), - offsetWithinWholeSimdVector_(offsetWithinWholeSimdVector), - throwOnOOB_(onOOB == OutOfBoundsBehavior::Throw), - wrapOffset_(onWrap == WrappingBehavior::WrapOffset) - { - MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector, "fits in uint8"); - } - - uint32_t insnOffset() const { return insnOffset_; } - uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; } - bool throwOnOOB() const { return throwOnOOB_; } - bool wrapOffset() const { return wrapOffset_; } - - void offsetBy(uint32_t offset) { insnOffset_ += offset; } -}; -#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ - defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ - defined(JS_CODEGEN_NONE) -// Nothing! We don't patch or emulate memory accesses on these platforms. -class MemoryAccess { - public: - void offsetBy(uint32_t) { MOZ_CRASH(); } - uint32_t insnOffset() const { MOZ_CRASH(); } -}; -#endif - -WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector) -WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector) - // A wasm::SymbolicAddress represents a pointer to a well-known function or // object that is embedded in wasm code. Since wasm code is serialized and // later deserialized into a different address space, symbolic addresses must be // used for *all* pointers into the address space. The MacroAssembler records a // list of all SymbolicAddresses and the offsets of their use in the code for // later patching during static linking. enum class SymbolicAddress @@ -1254,31 +1165,163 @@ struct ExternalTableElem // WasmTableCallSigReg holds the signature id. void* code; // The pointer to the callee's instance's TlsData. This must be loaded into // WasmTlsReg before calling 'code'. TlsData* tls; }; -// Constants: +// Because ARM has a fixed-width instruction encoding, ARM can only express a +// limited subset of immediates (in a single instruction). + +extern bool +IsValidARMImmediate(uint32_t i); + +extern uint32_t +RoundUpToNextValidARMImmediate(uint32_t i); // The WebAssembly spec hard-codes the virtual page size to be 64KiB and -// requires linear memory to always be a multiple of 64KiB. +// requires the size of linear memory to always be a multiple of 64KiB. + static const unsigned PageSize = 64 * 1024; +// Bounds checks always compare the base of the memory access with the bounds +// check limit. If the memory access is unaligned, this means that, even if the +// bounds check succeeds, a few bytes of the access can extend past the end of +// memory. To guard against this, extra space is included in the guard region to +// catch the overflow. MaxMemoryAccessSize is a conservative approximation of +// the maximum guard space needed to catch all unaligned overflows. + +static const unsigned MaxMemoryAccessSize = sizeof(Val); + #ifdef JS_CODEGEN_X64 -#define WASM_HUGE_MEMORY -static const uint64_t Uint32Range = uint64_t(UINT32_MAX) + 1; -static const uint64_t MappedSize = 2 * Uint32Range + PageSize; + +// All other code should use WASM_HUGE_MEMORY instead of JS_CODEGEN_X64 so that +// it is easy to use the huge-mapping optimization for other 64-bit platforms in +// the future. +# define WASM_HUGE_MEMORY + +// On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory +// unconditionally allocates a huge region of virtual memory of size +// wasm::HugeMappedSize. This allows all memory resizing to work without +// reallocation and provides enough guard space for all offsets to be folded +// into memory accesses. + +static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1; +static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1; +static const uint64_t UnalignedGuardPage = PageSize; +static const uint64_t HugeMappedSize = IndexRange + OffsetGuardLimit + UnalignedGuardPage; + +static_assert(MaxMemoryAccessSize <= UnalignedGuardPage, "rounded up to static page size"); + +#else // !WASM_HUGE_MEMORY + +// On !WASM_HUGE_MEMORY platforms: +// - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the +// original ArrayBuffer allocation which has no guard region at all. +// - For WebAssembly memories, an additional GuardSize is mapped after the +// accessible region of the memory to catch folded (base+offset) accesses +// where `offset < OffsetGuardLimit` as well as the overflow from unaligned +// accesses, as described above for MaxMemoryAccessSize. + +static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize; +static const size_t GuardSize = PageSize; + +// Return whether the given immediate satisfies the constraints of the platform +// (viz. that, on ARM, IsValidARMImmediate). + +extern bool +IsValidBoundsCheckImmediate(uint32_t i); + +// For a given WebAssembly/asm.js max size, return the number of bytes to +// map which will necessarily be a multiple of the system page size and greater +// than maxSize. For a returned mappedSize: +// boundsCheckLimit = mappedSize - GuardSize +// IsValidBoundsCheckImmediate(boundsCheckLimit) + +extern size_t +ComputeMappedSize(uint32_t maxSize); + +#endif // WASM_HUGE_MEMORY + +// Metadata for bounds check instructions that are patched at runtime with the +// appropriate bounds check limit. On WASM_HUGE_MEMORY platforms for wasm (and +// SIMD/Atomic) bounds checks, no BoundsCheck is created: the signal handler +// catches everything. On !WASM_HUGE_MEMORY, a BoundsCheck is created for each +// memory access (except when statically eliminated by optimizations) so that +// the length can be patched in as an immediate. This requires that the bounds +// check limit IsValidBoundsCheckImmediate. + +class BoundsCheck +{ + public: + BoundsCheck() = default; + + explicit BoundsCheck(uint32_t cmpOffset) + : cmpOffset_(cmpOffset) + { } + + uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; } + void offsetBy(uint32_t offset) { cmpOffset_ += offset; } + + private: + uint32_t cmpOffset_; +}; + +// Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only +// (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the +// signal handler can implement the semantically-correct wraparound logic; the +// rest simply redirect to the out-of-bounds stub in the signal handler. On x86, +// the base address of memory is baked into each memory access instruction so +// the MemoryAccess records the location of each for patching. On all other +// platforms, no MemoryAccess is created. + +#ifdef WASM_HUGE_MEMORY +class MemoryAccess +{ + uint32_t insnOffset_; + + public: + MemoryAccess() = default; + explicit MemoryAccess(uint32_t insnOffset) + : insnOffset_(insnOffset) + {} + + uint32_t insnOffset() const { return insnOffset_; } + + void offsetBy(uint32_t offset) { insnOffset_ += offset; } +}; +#elif defined(JS_CODEGEN_X86) +class MemoryAccess +{ + uint32_t nextInsOffset_; + + public: + MemoryAccess() = default; + explicit MemoryAccess(uint32_t nextInsOffset) + : nextInsOffset_(nextInsOffset) + { } + + void* patchMemoryPtrImmAt(uint8_t* code) const { return code + nextInsOffset_; } + void offsetBy(uint32_t offset) { nextInsOffset_ += offset; } +}; +#else +class MemoryAccess { + public: + MemoryAccess() { MOZ_CRASH(); } + void offsetBy(uint32_t) { MOZ_CRASH(); } + uint32_t insnOffset() const { MOZ_CRASH(); } +}; #endif -bool IsValidARMLengthImmediate(uint32_t length); -uint32_t RoundUpToNextValidARMLengthImmediate(uint32_t length); -size_t LegalizeMapLength(size_t requestedSize); +WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector) +WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector) + +// Constants: static const unsigned NaN64GlobalDataOffset = 0; static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double); static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float); static const unsigned MaxSigs = 4 * 1024; static const unsigned MaxFuncs = 512 * 1024; static const unsigned MaxGlobals = 4 * 1024;
--- a/js/src/builtin/TestingFunctions.cpp +++ b/js/src/builtin/TestingFunctions.cpp @@ -240,24 +240,20 @@ GetBuildConfiguration(JSContext* cx, uns #ifdef EXPOSE_INTL_API value = BooleanValue(true); #else value = BooleanValue(false); #endif if (!JS_SetProperty(cx, info, "intl-api", value)) return false; -#if defined(XP_WIN) - value = BooleanValue(false); -#elif defined(SOLARIS) +#if defined(SOLARIS) value = BooleanValue(false); -#elif defined(XP_UNIX) +#else value = BooleanValue(true); -#else - value = BooleanValue(false); #endif if (!JS_SetProperty(cx, info, "mapped-array-buffer", value)) return false; #ifdef MOZ_MEMORY value = BooleanValue(true); #else value = BooleanValue(false);
--- a/js/src/frontend/Parser.cpp +++ b/js/src/frontend/Parser.cpp @@ -1033,17 +1033,54 @@ Parser<ParseHandler>::tryDeclareVar(Hand // { { let x; } var x; } for (ParseContext::Scope* scope = pc->innermostScope(); scope != pc->varScope().enclosing(); scope = scope->enclosing()) { if (AddDeclaredNamePtr p = scope->lookupDeclaredNameForAdd(name)) { DeclarationKind declaredKind = p->value()->kind(); - if (!DeclarationKindIsVar(declaredKind) && !DeclarationKindIsParameter(declaredKind)) { + if (DeclarationKindIsVar(declaredKind)) { + // Any vars that are redeclared as body-level functions must + // be recorded as body-level functions. + // + // In the case of global and eval scripts, GlobalDeclaration- + // Instantiation [1] and EvalDeclarationInstantiation [2] + // check for the declarability of global var and function + // bindings via CanDeclareVar [3] and CanDeclareGlobal- + // Function [4]. CanDeclareGlobalFunction is strictly more + // restrictive than CanDeclareGlobalVar, so record the more + // restrictive kind. These semantics are implemented in + // CheckCanDeclareGlobalBinding. + // + // For a var previously declared as ForOfVar, this previous + // DeclarationKind is used only to check for if the + // 'arguments' binding should be declared. Since body-level + // functions shadow 'arguments' [5], it is correct to alter + // the kind to BodyLevelFunction. See + // declareFunctionArgumentsObject. + // + // For a var previously declared as + // VarForAnnexBLexicalFunction, this previous DeclarationKind + // is used so that vars synthesized solely for Annex B.3.3 may + // be removed if an early error would occur. If a synthesized + // Annex B.3.3 var has the same name as a body-level function, + // this is not a redeclaration, and indeed, because the + // body-level function binds the name, this name should not be + // removed should a redeclaration occur in the future. Thus it + // is also correct to alter the kind to BodyLevelFunction. + // + // [1] ES 15.1.11 + // [2] ES 18.2.1.3 + // [3] ES 8.1.1.4.15 + // [4] ES 8.1.1.4.16 + // [5] ES 9.2.12 + if (kind == DeclarationKind::BodyLevelFunction) + p->value()->alterKind(kind); + } else if (!DeclarationKindIsParameter(declaredKind)) { // Annex B.3.5 allows redeclaring simple (non-destructured) // catch parameters with var declarations, except when it // appears in a for-of. bool annexB35Allowance = declaredKind == DeclarationKind::SimpleCatchParameter && kind != DeclarationKind::ForOfVar; // Annex B.3.3 allows redeclaring functions in the same block. bool annexB33Allowance = declaredKind == DeclarationKind::LexicalFunction &&
--- a/js/src/gc/Memory.cpp +++ b/js/src/gc/Memory.cpp @@ -275,67 +275,71 @@ size_t GetPageFaultCount() { PROCESS_MEMORY_COUNTERS pmc; if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) return 0; return pmc.PageFaultCount; } -// On Windows the minimum size for a mapping is the allocation granularity -// (64KiB in practice), so mapping very small buffers is potentially wasteful. void* AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment) { - // The allocation granularity must be a whole multiple of the alignment and - // the caller must request an aligned offset to satisfy Windows' and the - // caller's alignment requirements. + MOZ_ASSERT(length && alignment); + + // The allocation granularity and the requested offset + // must both be divisible by the requested alignment. if (allocGranularity % alignment != 0 || offset % alignment != 0) return nullptr; - // Make sure file exists and do sanity check for offset and size. HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd)); - MOZ_ASSERT(hFile != INVALID_HANDLE_VALUE); - uint32_t fSizeHgh; - uint32_t fSizeLow = GetFileSize(hFile, LPDWORD(&fSizeHgh)); - if (fSizeLow == INVALID_FILE_SIZE && GetLastError() != NO_ERROR) - return nullptr; - - uint64_t fSize = (uint64_t(fSizeHgh) << 32) + fSizeLow; - if (offset >= size_t(fSize) || length == 0 || length > fSize - offset) - return nullptr; - - uint64_t mapSize = length + offset; - HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, mapSize >> 32, mapSize, nullptr); + // This call will fail if the file does not exist, which is what we want. + HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr); if (!hMap) return nullptr; - // MapViewOfFile requires the offset to be a whole multiple of the - // allocation granularity. - size_t alignOffset = offset - (offset % allocGranularity); - size_t alignLength = length + (offset % allocGranularity); - void* map = MapViewOfFile(hMap, FILE_MAP_COPY, 0, alignOffset, alignLength); + size_t alignedOffset = offset - (offset % allocGranularity); + size_t alignedLength = length + (offset % allocGranularity); + + DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32); + DWORD offsetL = uint32_t(alignedOffset); + + // If the offset or length are out of bounds, this call will fail. + uint8_t* map = static_cast<uint8_t*>(MapViewOfFile(hMap, FILE_MAP_COPY, offsetH, + offsetL, alignedLength)); + + // This just decreases the file mapping object's internal reference count; + // it won't actually be destroyed until we unmap the associated view. CloseHandle(hMap); + if (!map) return nullptr; - return reinterpret_cast<void*>(uintptr_t(map) + (offset - alignOffset)); +#ifdef DEBUG + // Zero out data before and after the desired mapping to catch errors early. + if (offset != alignedOffset) + memset(map, 0, offset - alignedOffset); + if (alignedLength % pageSize) + memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize)); +#endif + + return map + (offset - alignedOffset); } void DeallocateMappedContent(void* p, size_t /*length*/) { if (!p) return; // Calculate the address originally returned by MapViewOfFile. - // This is required because AllocateMappedContent returns a pointer that - // might be offset into the view, necessitated by the requirement that the - // beginning of a view must be aligned with the allocation granularity. + // This is needed because AllocateMappedContent returns a pointer + // that might be offset from the view, as the beginning of a + // view must be aligned with the allocation granularity. uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity); MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map))); } # else // Various APIs are unavailable. void* MapAlignedPages(size_t size, size_t alignment) @@ -771,71 +775,60 @@ GetPageFaultCount() if (err) return 0; return usage.ru_majflt; } void* AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment) { -#define NEED_PAGE_ALIGNED 0 - size_t pa_start; // Page aligned starting - size_t pa_end; // Page aligned ending - size_t pa_size; // Total page aligned size - struct stat st; - uint8_t* buf; + MOZ_ASSERT(length && alignment); - // Make sure file exists and do sanity check for offset and size. - if (fstat(fd, &st) < 0 || offset >= (size_t) st.st_size || - length == 0 || length > (size_t) st.st_size - offset) + // The allocation granularity and the requested offset + // must both be divisible by the requested alignment. + if (allocGranularity % alignment != 0 || offset % alignment != 0) return nullptr; - // Check for minimal alignment requirement. -#if NEED_PAGE_ALIGNED - alignment = std::max(alignment, pageSize); -#endif - if (offset & (alignment - 1)) + // Sanity check the offset and size, as mmap does not do this for us. + struct stat st; + if (fstat(fd, &st) || offset >= uint64_t(st.st_size) || length > uint64_t(st.st_size) - offset) return nullptr; - // Page aligned starting of the offset. - pa_start = offset & ~(pageSize - 1); - // Calculate page aligned ending by adding one page to the page aligned - // starting of data end position(offset + length - 1). - pa_end = ((offset + length - 1) & ~(pageSize - 1)) + pageSize; - pa_size = pa_end - pa_start; + size_t alignedOffset = offset - (offset % allocGranularity); + size_t alignedLength = length + (offset % allocGranularity); - // Ask for a continuous memory location. - buf = (uint8_t*) MapMemory(pa_size); - if (!buf) + uint8_t* map = static_cast<uint8_t*>(MapMemory(alignedLength, PROT_READ | PROT_WRITE, + MAP_PRIVATE, fd, alignedOffset)); + if (!map) return nullptr; - buf = (uint8_t*) MapMemoryAt(buf, pa_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_FIXED, fd, pa_start); - if (!buf) - return nullptr; +#ifdef DEBUG + // Zero out data before and after the desired mapping to catch errors early. + if (offset != alignedOffset) + memset(map, 0, offset - alignedOffset); + if (alignedLength % pageSize) + memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize)); +#endif - // Reset the data before target file, which we don't need to see. - memset(buf, 0, offset - pa_start); - - // Reset the data after target file, which we don't need to see. - memset(buf + (offset - pa_start) + length, 0, pa_end - (offset + length)); - - return buf + (offset - pa_start); + return map + (offset - alignedOffset); } void DeallocateMappedContent(void* p, size_t length) { - void* pa_start; // Page aligned starting - size_t total_size; // Total allocated size + if (!p) + return; - pa_start = (void*)(uintptr_t(p) & ~(pageSize - 1)); - total_size = ((uintptr_t(p) + length) & ~(pageSize - 1)) + pageSize - uintptr_t(pa_start); - if (munmap(pa_start, total_size)) - MOZ_ASSERT(errno == ENOMEM); + // Calculate the address originally returned by mmap. + // This is needed because AllocateMappedContent returns a pointer + // that might be offset from the mapping, as the beginning of a + // mapping must be aligned with the allocation granularity. + uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity); + size_t alignedLength = length + (uintptr_t(p) % allocGranularity); + UnmapPages(reinterpret_cast<void*>(map), alignedLength); } #else #error "Memory mapping functions are not defined for your OS." #endif void ProtectPages(void* p, size_t size)
--- a/js/src/jit-test/tests/asm.js/bug1219954.js +++ b/js/src/jit-test/tests/asm.js/bug1219954.js @@ -1,8 +1,9 @@ +// |jit-test| slow "use strict"; if (!('oomTest' in this)) quit(); let g = (function() { "use asm"; function f() {}
--- a/js/src/jit-test/tests/asm.js/testBug1255954.js +++ b/js/src/jit-test/tests/asm.js/testBug1255954.js @@ -1,8 +1,10 @@ +// |jit-test| slow + const USE_ASM = '"use asm";'; if (!('oomTest' in this)) quit(); function asmCompile() { var f = Function.apply(null, arguments); } oomTest(() => { try {
--- a/js/src/jit-test/tests/asm.js/testSIMD-load-store.js +++ b/js/src/jit-test/tests/asm.js/testSIMD-load-store.js @@ -1,13 +1,16 @@ -// |jit-test| test-also-noasmjs +// |jit-test| load(libdir + "asm.js"); load(libdir + "simd.js"); load(libdir + "asserts.js"); +// Avoid pathological --ion-eager compile times due to bails in loops +setJitCompilerOption('ion.warmup.trigger', 1000000); + // Set to true to see more JS debugging spew const DEBUG = false; if (!isSimdAvailable() || typeof SIMD === 'undefined') { DEBUG && print("won't run tests as simd extensions aren't activated yet"); quit(0); } @@ -268,53 +271,43 @@ for (var i = 0; i < SIZE; i++) function MakeCodeFor(typeName) { return ` "use asm"; var type = glob.SIMD.${typeName}; var c = type.check; var l1 = type.load1; var l2 = type.load2; - var l3 = type.load3; var s1 = type.store1; var s2 = type.store2; - var s3 = type.store3; var u8 = new glob.Uint8Array(heap); function load1(i) { i=i|0; return l1(u8, i); } function load2(i) { i=i|0; return l2(u8, i); } - function load3(i) { i=i|0; return l3(u8, i); } function loadCst1() { return l1(u8, 41 << 2); } function loadCst2() { return l2(u8, 41 << 2); } - function loadCst3() { return l3(u8, 41 << 2); } function store1(i, x) { i=i|0; x=c(x); return s1(u8, i, x); } function store2(i, x) { i=i|0; x=c(x); return s2(u8, i, x); } - function store3(i, x) { i=i|0; x=c(x); return s3(u8, i, x); } function storeCst1(x) { x=c(x); return s1(u8, 41 << 2, x); } function storeCst2(x) { x=c(x); return s2(u8, 41 << 2, x); } - function storeCst3(x) { x=c(x); return s3(u8, 41 << 2, x); } return { load1: load1, load2: load2, - load3: load3, loadCst1: loadCst1, loadCst2: loadCst2, - loadCst3: loadCst3, store1: store1, store2: store2, - store3: store3, storeCst1: storeCst1, storeCst2: storeCst2, - storeCst3: storeCst3, } `; } var SIZE = 0x10000; function TestPartialLoads(m, typedArray, x, y, z, w) { // Fill array with predictable values @@ -324,53 +317,44 @@ function TestPartialLoads(m, typedArray, typedArray[i + 2] = z(i); typedArray[i + 3] = w(i); } // Test correct loads var i = 0, j = 0; // i in elems, j in bytes assertEqX4(m.load1(j), [x(i), 0, 0, 0]); assertEqX4(m.load2(j), [x(i), y(i), 0, 0]); - assertEqX4(m.load3(j), [x(i), y(i), z(i), 0]); j += 4; assertEqX4(m.load1(j), [y(i), 0, 0, 0]); assertEqX4(m.load2(j), [y(i), z(i), 0, 0]); - assertEqX4(m.load3(j), [y(i), z(i), w(i), 0]); j += 4; assertEqX4(m.load1(j), [z(i), 0, 0, 0]); assertEqX4(m.load2(j), [z(i), w(i), 0, 0]); - assertEqX4(m.load3(j), [z(i), w(i), x(i+4), 0]); j += 4; assertEqX4(m.load1(j), [w(i), 0, 0, 0]); assertEqX4(m.load2(j), [w(i), x(i+4), 0, 0]); - assertEqX4(m.load3(j), [w(i), x(i+4), y(i+4), 0]); j += 4; i += 4; assertEqX4(m.load1(j), [x(i), 0, 0, 0]); assertEqX4(m.load2(j), [x(i), y(i), 0, 0]); - assertEqX4(m.load3(j), [x(i), y(i), z(i), 0]); // Test loads with constant indexes (41) assertEqX4(m.loadCst1(), [y(40), 0, 0, 0]); assertEqX4(m.loadCst2(), [y(40), z(40), 0, 0]); - assertEqX4(m.loadCst3(), [y(40), z(40), w(40), 0]); // Test limit and OOB accesses assertEqX4(m.load1((SIZE - 1) << 2), [w(SIZE - 4), 0, 0, 0]); assertThrowsInstanceOf(() => m.load1(((SIZE - 1) << 2) + 1), RangeError); assertEqX4(m.load2((SIZE - 2) << 2), [z(SIZE - 4), w(SIZE - 4), 0, 0]); assertThrowsInstanceOf(() => m.load2(((SIZE - 2) << 2) + 1), RangeError); - - assertEqX4(m.load3((SIZE - 3) << 2), [y(SIZE - 4), z(SIZE - 4), w(SIZE - 4), 0]); - assertThrowsInstanceOf(() => m.load3(((SIZE - 3) << 2) + 1), RangeError); } // Partial stores function TestPartialStores(m, typedArray, typeName, x, y, z, w) { var val = SIMD[typeName](x, y, z, w); function Reset() { for (var i = 0; i < SIZE; i++) @@ -394,28 +378,16 @@ function TestPartialStores(m, typedArray CheckNotModified(0, i >> 2); assertEq(typedArray[i >> 2], x); assertEq(typedArray[(i >> 2) + 1], y); CheckNotModified((i >> 2) + 2, SIZE); typedArray[i >> 2] = (i >> 2) + 1; typedArray[(i >> 2) + 1] = (i >> 2) + 2; } - function TestStore3(i) { - m.store3(i, val); - CheckNotModified(0, i >> 2); - assertEq(typedArray[i >> 2], x); - assertEq(typedArray[(i >> 2) + 1], y); - assertEq(typedArray[(i >> 2) + 2], z); - CheckNotModified((i >> 2) + 3, SIZE); - typedArray[i >> 2] = (i >> 2) + 1; - typedArray[(i >> 2) + 1] = (i >> 2) + 2; - typedArray[(i >> 2) + 2] = (i >> 2) + 3; - } - function TestOOBStore(f) { assertThrowsInstanceOf(f, RangeError); CheckNotModified(0, SIZE); } Reset(); TestStore1(0); @@ -435,52 +407,30 @@ function TestPartialStores(m, typedArray TestStore2(3 << 2); TestStore2(1337 << 2); var i = (SIZE - 2) << 2; TestStore2(i); TestOOBStore(() => m.store2(i + 1, val)); TestOOBStore(() => m.store2(-1, val)); - TestStore3(0); - TestStore3(1 << 2); - TestStore3(2 << 2); - TestStore3(3 << 2); - TestStore3(1337 << 2); - - var i = (SIZE - 3) << 2; - TestStore3(i); - TestOOBStore(() => m.store3(i + 1, val)); - TestOOBStore(() => m.store3(-1, val)); - TestOOBStore(() => m.store3(-9, val)); - // Constant indexes (41) m.storeCst1(val); CheckNotModified(0, 41); assertEq(typedArray[41], x); CheckNotModified(42, SIZE); typedArray[41] = 42; m.storeCst2(val); CheckNotModified(0, 41); assertEq(typedArray[41], x); assertEq(typedArray[42], y); CheckNotModified(43, SIZE); typedArray[41] = 42; typedArray[42] = 43; - - m.storeCst3(val); - CheckNotModified(0, 41); - assertEq(typedArray[41], x); - assertEq(typedArray[42], y); - assertEq(typedArray[43], z); - CheckNotModified(44, SIZE); - typedArray[41] = 42; - typedArray[42] = 43; - typedArray[43] = 44; } var f32 = new Float32Array(SIZE); var mFloat32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Float32x4')), this, null, f32.buffer); TestPartialLoads(mFloat32x4, f32, (i) => i + 1, (i) => Math.fround(13.37),
--- a/js/src/jit-test/tests/asm.js/testSIMD.js +++ b/js/src/jit-test/tests/asm.js/testSIMD.js @@ -1,13 +1,16 @@ load(libdir + "asm.js"); load(libdir + "simd.js"); load(libdir + "asserts.js"); var heap = new ArrayBuffer(0x10000); +// Avoid pathological --ion-eager compile times due to bails in loops +setJitCompilerOption('ion.warmup.trigger', 1000000); + // Set to true to see more JS debugging spew const DEBUG = false; if (!isSimdAvailable() || typeof SIMD === 'undefined') { DEBUG && print("won't run tests as simd extensions aren't activated yet"); quit(0); }
--- a/js/src/jit-test/tests/asm.js/testZOOB.js +++ b/js/src/jit-test/tests/asm.js/testZOOB.js @@ -1,15 +1,12 @@ -// |jit-test| test-also-noasmjs +// |jit-test| load(libdir + "asm.js"); load(libdir + "asserts.js"); -setIonCheckGraphCoherency(false); -setCachingEnabled(false); - var ab = new ArrayBuffer(BUF_MIN); // Compute a set of interesting indices. indices = [0] for (var i of [4,1024,BUF_MIN,Math.pow(2,30),Math.pow(2,31),Math.pow(2,32),Math.pow(2,33)]) { for (var j of [-2,-1,0,1,2]) { for (var k of [1,-1]) indices.push((i+j)*k); @@ -99,105 +96,83 @@ function testSimdX4(ctor, shift, scale, var arr = new ctor(ab); var c = asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.' + ctor.name + '(b); ' + 'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' + 'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' + 'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' + - 'var SIMD_' + simdName + '_load3 = SIMD_' + simdName + '.load3; ' + 'var SIMD_' + simdName + '_load2 = SIMD_' + simdName + '.load2; ' + 'var SIMD_' + simdName + '_load1 = SIMD_' + simdName + '.load1; ' + 'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' + - 'var SIMD_' + simdName + '_store3 = SIMD_' + simdName + '.store3; ' + 'var SIMD_' + simdName + '_store2 = SIMD_' + simdName + '.store2; ' + 'var SIMD_' + simdName + '_store1 = SIMD_' + simdName + '.store1; ' + 'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' + - 'function load3(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load3(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' + 'function load2(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' + 'function load1(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' + 'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' + - 'function store3(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store3(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' + 'function store2(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' + 'function store1(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' + - 'return { load: load, load3: load3, load2: load2, load1: load1, store: store, store3: store3, store2 : store2, store1 : store1 }'); + 'return { load: load, load2: load2, load1: load1, store: store, store2 : store2, store1 : store1 }'); var f = asmLink(c, this, null, ab); for (var i of indices) { var index = ((i<<scale)+disp)>>shift; - var v, v3, v2, v1; - var t = false, t3 = false, t2 = false, t1 = false; + var v, v2, v1; + var t = false, t2 = false, t1 = false; try { v = simdCtor.load(arr, index); } catch (e) { assertEq(e instanceof RangeError, true); t = true; } - try { v3 = simdCtor.load3(arr, index); } - catch (e) { - assertEq(e instanceof RangeError, true); - t3 = true; - } try { v2 = simdCtor.load2(arr, index); } catch (e) { assertEq(e instanceof RangeError, true); t2 = true; } try { v1 = simdCtor.load1(arr, index); } catch (e) { assertEq(e instanceof RangeError, true); t1 = true; } // Loads - var l, l3, l2, l1; - var r = false, r3 = false, r2 = false, r1 = false; + var l, l2, l1; + var r = false, r2 = false, r1 = false; try { l = f.load(i); } catch (e) { assertEq(e instanceof RangeError, true); r = true; } - try { l3 = f.load3(i); } - catch (e) { - assertEq(e instanceof RangeError, true); - r3 = true; - } try { l2 = f.load2(i); } catch (e) { assertEq(e instanceof RangeError, true); r2 = true; } try { l1 = f.load1(i); } catch (e) { assertEq(e instanceof RangeError, true); r1 = true; } assertEq(t, r); - assertEq(t3, r3); assertEq(t2, r2); assertEq(t1, r1); if (!t) assertEqX4(v, l); - if (!t3) assertEqX4(v3, l3); if (!t2) assertEqX4(v2, l2); if (!t1) assertEqX4(v1, l1); // Stores if (!t) { simdCtor.store(arr, index, simdCtor.neg(v)); f.store(i, v); assertEqX4(simdCtor.load(arr, index), v); } else assertThrowsInstanceOf(() => f.store(i, simdCtor()), RangeError); - if (!t3) { - simdCtor.store3(arr, index, simdCtor.neg(v3)); - f.store3(i, v3); - assertEqX4(simdCtor.load3(arr, index), v3); - } else - assertThrowsInstanceOf(() => f.store3(i, simdCtor()), RangeError); if (!t2) { simdCtor.store2(arr, index, simdCtor.neg(v2)); f.store2(i, v2); assertEqX4(simdCtor.load2(arr, index), v2); } else assertThrowsInstanceOf(() => f.store2(i, simdCtor()), RangeError); if (!t1) { simdCtor.store1(arr, index, simdCtor.neg(v1)); @@ -234,11 +209,19 @@ test(testInt, Int8Array, 0); test(testInt, Uint8Array, 0); test(testInt, Int16Array, 1); test(testInt, Uint16Array, 1); test(testInt, Int32Array, 2); test(testInt, Uint32Array, 2); test(testFloat32, Float32Array, 2); test(testFloat64, Float64Array, 3); if (typeof SIMD !== 'undefined' && isSimdAvailable()) { + // Avoid pathological --ion-eager compile times due to bails in loops + setJitCompilerOption('ion.warmup.trigger', 1000000); + + // Use a fresh ArrayBuffer so prepareForAsmJS can allocated a guard page + // which SIMD.js needs. Since the original ArrayBuffer was prepared for + // asm.js that didn't use SIMD.js, it has no guard page (on 32-bit). + ab = new ArrayBuffer(BUF_MIN); + test(testInt32x4, Uint8Array, 0); test(testFloat32x4, Uint8Array, 0); }
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/debug/bug1300517.js @@ -0,0 +1,12 @@ +// |jit-test| error: ReferenceError +g = newGlobal(); +g.log *= ""; +Debugger(g).onDebuggerStatement = frame => frame.eval("log += this.Math.toString();"); +let forceException = g.eval(` + (class extends class {} { + constructor() { + debugger; + } + }) +`); +new forceException;
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/debug/bug1300528.js @@ -0,0 +1,34 @@ +load(libdir + "asserts.js"); + +if (helperThreadCount() === 0) + quit(0); + +function BigInteger(a, b, c) {} +function montConvert(x) { + var r = new BigInteger(null); + return r; +} +var ba = new Array(); +a = new BigInteger(ba); +g = montConvert(a); +var lfGlobal = newGlobal(); +for (lfLocal in this) { + if (!(lfLocal in lfGlobal)) { + lfGlobal[lfLocal] = this[lfLocal]; + } +} +lfGlobal.offThreadCompileScript(` + var dbg = new Debugger(g); + dbg.onEnterFrame = function (frame) { + var frameThis = frame.this; + } +`); +lfGlobal.runOffThreadScript(); +assertThrowsInstanceOf(test, ReferenceError); +function test() { + function check(fun, msg, todo) { + success = fun(); + } + check(() => Object.getPrototypeOf(view) == Object.getPrototypeOf(simple)); + typeof this; +}
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/wasm/big-resize.js @@ -0,0 +1,10 @@ +load(libdir + "wasm.js"); + +assertEq(new WebAssembly.Instance(new WebAssembly.Module(textToBinary(`(module + (memory 1 32768) + (func $test (result i32) + (if (i32.eq (grow_memory (i32.const 16384)) (i32.const -1)) (return (i32.const 42))) + (i32.store (i32.const 1073807356) (i32.const 42)) + (i32.load (i32.const 1073807356))) + (export "test" $test) +)`))).exports.test(), 42);
--- a/js/src/jit-test/tests/wasm/memory.js +++ b/js/src/jit-test/tests/wasm/memory.js @@ -143,24 +143,20 @@ for (let [type, ext] of [ ['i64', '32'], ['f32', ''], ['f64', ''], ]) { assertErrorMessage(() => badStoreModule(type, ext), TypeError, /can't touch memory/); } -for (var ind = 0; ind < 1; ind++) { - /* - * TODO: wasm.explicit-bounds-check option is being deprecated. We will be adding a - * new option that treats all offset as "non-foldable". When that is added trigger - * it here when ind == 1. - if (ind == 1) - setJitCompilerOption('wasm.explicit-bounds-checks', 1); - */ +assertEq(getJitCompilerOptions()['wasm.fold-offsets'], 1); + +for (var foldOffsets = 0; foldOffsets <= 1; foldOffsets++) { + setJitCompilerOption('wasm.fold-offsets', foldOffsets); testLoad('i32', '', 0, 0, 0, 0x03020100); testLoad('i32', '', 1, 0, 1, 0x04030201); testLoad('i32', '', 0, 4, 0, 0x07060504); testLoad('i32', '', 1, 3, 4, 0x07060504); testLoad('f32', '', 0, 0, 0, 3.820471434542632e-37); testLoad('f32', '', 1, 0, 1, 1.539989614439558e-36); testLoad('f32', '', 0, 4, 0, 1.0082513512365273e-34); @@ -394,8 +390,10 @@ for (var ind = 0; ind < 1; ind++) { testStore('i64', '', lastValidIndex, offset, align, '0x1234567887654321'); testStoreOOB('i64', '', lastValidIndex + 1, offset, align, '0x1234567887654321'); } setJitCompilerOption('wasm.test-mode', 0); } } + +setJitCompilerOption('wasm.fold-offsets', 1);
--- a/js/src/jit-test/tests/wasm/regress/oom-eval.js +++ b/js/src/jit-test/tests/wasm/regress/oom-eval.js @@ -1,9 +1,9 @@ -// |jit-test| allow-oom +// |jit-test| slow; allow-oom if (typeof oomTest !== 'function' || !wasmIsSupported()) { print('Missing oomTest or wasm support in wasm/regress/oom-eval'); quit(); } function foo() { var g = newGlobal();
--- a/js/src/jit-test/tests/wasm/regress/oom-init.js +++ b/js/src/jit-test/tests/wasm/regress/oom-init.js @@ -1,9 +1,9 @@ -// |jit-test| allow-oom +// |jit-test| slow; allow-oom if (typeof oomTest !== 'function' || !wasmIsSupported()) quit(); Object.getOwnPropertyNames(this); s = newGlobal(); evalcx("\ /x/;\
new file mode 100644 --- /dev/null +++ b/js/src/jit-test/tests/wasm/resizing.js @@ -0,0 +1,77 @@ +// |jit-test| test-also-wasm-baseline +load(libdir + "wasm.js"); + +const Module = WebAssembly.Module; +const Instance = WebAssembly.Instance; +const Table = WebAssembly.Table; +const Memory = WebAssembly.Memory; + +// Test for stale heap pointers after resize + +// Grow directly from builtin call: +assertEq(evalText(`(module + (memory 1) + (func $test (result i32) + (i32.store (i32.const 0) (i32.const 1)) + (i32.store (i32.const 65532) (i32.const 10)) + (grow_memory (i32.const 99)) + (i32.store (i32.const 6553596) (i32.const 100)) + (i32.add + (i32.load (i32.const 0)) + (i32.add + (i32.load (i32.const 65532)) + (i32.load (i32.const 6553596))))) + (export "test" $test) +)`).exports.test(), 111); + +// Grow during call_import: +var exports = evalText(`(module + (import $imp "a" "imp") + (memory 1) + (func $grow (grow_memory (i32.const 99))) + (export "grow" $grow) + (func $test (result i32) + (i32.store (i32.const 0) (i32.const 1)) + (i32.store (i32.const 65532) (i32.const 10)) + (call $imp) + (i32.store (i32.const 6553596) (i32.const 100)) + (i32.add + (i32.load (i32.const 0)) + (i32.add + (i32.load (i32.const 65532)) + (i32.load (i32.const 6553596))))) + (export "test" $test) +)`, {a:{imp() { exports.grow() }}}).exports; + +setJitCompilerOption("baseline.warmup.trigger", 2); +setJitCompilerOption("ion.warmup.trigger", 4); +for (var i = 0; i < 10; i++) + assertEq(exports.test(), 111); + +// Grow during call_indirect: +var mem = new Memory({initial:1}); +var tbl = new Table({initial:1, element:"anyfunc"}); +var exports1 = evalText(`(module + (import "a" "mem" (memory 1)) + (func $grow + (i32.store (i32.const 65532) (i32.const 10)) + (grow_memory (i32.const 99)) + (i32.store (i32.const 6553596) (i32.const 100))) + (export "grow" $grow) +)`, {a:{mem}}).exports; +var exports2 = evalText(`(module + (import "a" "tbl" (table 1)) + (import "a" "mem" (memory 1)) + (type $v2v (func)) + (func $test (result i32) + (i32.store (i32.const 0) (i32.const 1)) + (call_indirect $v2v (i32.const 0)) + (i32.add + (i32.load (i32.const 0)) + (i32.add + (i32.load (i32.const 65532)) + (i32.load (i32.const 6553596))))) + (export "test" $test) +)`, {a:{tbl, mem}}).exports; +tbl.set(0, exports1.grow); +assertEq(exports2.test(), 111);
--- a/js/src/jit-test/tests/wasm/spec/memory_trap.wast +++ b/js/src/jit-test/tests/wasm/spec/memory_trap.wast @@ -11,26 +11,26 @@ ) (export "load" $load) (func $load (param $i i32) (result i32) (i32.load (i32.add (call $addr_limit) (get_local $i))) ) (export "grow_memory" $grow_memory) - (func $grow_memory (param i32) + (func $grow_memory (param i32) (result i32) (grow_memory (get_local 0)) ) ) (assert_return (invoke "store" (i32.const -4) (i32.const 42)) (i32.const 42)) (assert_return (invoke "load" (i32.const -4)) (i32.const 42)) (assert_trap (invoke "store" (i32.const -3) (i32.const 13)) "out of bounds memory access") (assert_trap (invoke "load" (i32.const -3)) "out of bounds memory access") (assert_trap (invoke "store" (i32.const -2) (i32.const 13)) "out of bounds memory access") (assert_trap (invoke "load" (i32.const -2)) "out of bounds memory access") (assert_trap (invoke "store" (i32.const -1) (i32.const 13)) "out of bounds memory access") (assert_trap (invoke "load" (i32.const -1)) "out of bounds memory access") (assert_trap (invoke "store" (i32.const 0) (i32.const 13)) "out of bounds memory access") (assert_trap (invoke "load" (i32.const 0)) "out of bounds memory access") (assert_trap (invoke "store" (i32.const 0x80000000) (i32.const 13)) "out of bounds memory access") (assert_trap (invoke "load" (i32.const 0x80000000)) "out of bounds memory access") -(assert_trap (invoke "grow_memory" (i32.const 0x80000000)) "memory size exceeds implementation limit") +(assert_return (invoke "grow_memory" (i32.const 0x80000000)) (i32.const -1))
--- a/js/src/jit-test/tests/wasm/spec/memory_trap.wast.js +++ b/js/src/jit-test/tests/wasm/spec/memory_trap.wast.js @@ -1,4 +1,2 @@ // |jit-test| test-also-wasm-baseline -// TODO current_memory opcode + traps on OOB -quit(); var importedArgs = ['memory_trap.wast']; load(scriptdir + '../spec.js');
--- a/js/src/jit-test/tests/wasm/spec/resizing.wast.js +++ b/js/src/jit-test/tests/wasm/spec/resizing.wast.js @@ -1,4 +1,2 @@ // |jit-test| test-also-wasm-baseline -// TODO memory resizing (you don't say) -quit(); var importedArgs = ['resizing.wast']; load(scriptdir + '../spec.js');
--- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -11610,16 +11610,37 @@ CodeGenerator::visitInterruptCheck(LInte void CodeGenerator::visitWasmTrap(LWasmTrap* lir) { MOZ_ASSERT(gen->compilingAsmJS()); masm.jump(wasm::JumpTarget(lir->mir()->trap())); } +void +CodeGenerator::visitWasmBoundsCheck(LWasmBoundsCheck* ins) +{ + const MWasmBoundsCheck* mir = ins->mir(); + Register ptr = ToRegister(ins->ptr()); + + if (mir->isRedundant()) { + // For better test coverage, inject debug assertions that redundant + // bounds checks really are redundant. +#ifdef DEBUG + Label ok; + masm.wasmBoundsCheck(Assembler::Below, ptr, &ok); + masm.assumeUnreachable("Redundant bounds check failed!"); + masm.bind(&ok); +#endif + return; + } + + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr, wasm::JumpTarget::OutOfBounds); +} + typedef bool (*RecompileFn)(JSContext*); static const VMFunction RecompileFnInfo = FunctionInfo<RecompileFn>(Recompile, "Recompile"); typedef bool (*ForcedRecompileFn)(JSContext*); static const VMFunction ForcedRecompileFnInfo = FunctionInfo<ForcedRecompileFn>(ForcedRecompile, "ForcedRecompile"); void
--- a/js/src/jit/CodeGenerator.h +++ b/js/src/jit/CodeGenerator.h @@ -414,16 +414,17 @@ class CodeGenerator final : public CodeG void visitAssertResultV(LAssertResultV* ins); void visitAssertResultT(LAssertResultT* ins); void emitAssertResultV(const ValueOperand output, const TemporaryTypeSet* typeset); void emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset); void visitInterruptCheck(LInterruptCheck* lir); void visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ins); void visitWasmTrap(LWasmTrap* lir); + void visitWasmBoundsCheck(LWasmBoundsCheck* ins); void visitRecompileCheck(LRecompileCheck* ins); void visitRotate(LRotate* ins); void visitRandom(LRandom* ins); void visitSignExtend(LSignExtend* ins); #ifdef DEBUG void emitDebugForceBailing(LInstruction* lir);
--- a/js/src/jit/EffectiveAddressAnalysis.cpp +++ b/js/src/jit/EffectiveAddressAnalysis.cpp @@ -95,46 +95,43 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* l if (base->isRecoveredOnBailout()) return; MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement); last->replaceAllUsesWith(eaddr); last->block()->insertAfter(last, eaddr); } -template<typename MWasmMemoryAccessType> +template<typename AsmJSMemoryAccess> bool -EffectiveAddressAnalysis::tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o) +EffectiveAddressAnalysis::tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o) { +#ifdef WASM_HUGE_MEMORY // Compute the new offset. Check for overflow. uint32_t oldOffset = ins->offset(); uint32_t newOffset = oldOffset + o; if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset)) return false; - // Compute the new offset to the end of the access. Check for overflow - // here also. - uint32_t newEnd = newOffset + ins->byteSize(); - if (newEnd < newOffset) - return false; - - // Determine the range of valid offsets which can be folded into this - // instruction and check whether our computed offset is within that range. - size_t range = mir_->foldableOffsetRange(ins); - if (size_t(newEnd) > range) + // The offset must ultimately be written into the offset immediate of a load + // or store instruction so don't allow folding of the offset is bigger. + if (newOffset >= wasm::OffsetGuardLimit) return false; // Everything checks out. This is the new offset. ins->setOffset(newOffset); return true; +#else + return false; +#endif } -template<typename MWasmMemoryAccessType> +template<typename AsmJSMemoryAccess> void -EffectiveAddressAnalysis::analyzeAsmHeapAccess(MWasmMemoryAccessType* ins) +EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins) { MDefinition* base = ins->base(); if (base->isConstant()) { // Look for heap[i] where i is a constant offset, and fold the offset. // By doing the folding now, we simplify the task of codegen; the offset // is always the address mode immediate. This also allows it to avoid // a situation where the sum of a constant pointer value and a non-zero @@ -193,15 +190,15 @@ EffectiveAddressAnalysis::analyze() // Note that we don't check for MAsmJSCompareExchangeHeap // or MAsmJSAtomicBinopHeap, because the backend and the OOB // mechanism don't support non-zero offsets for them yet // (TODO bug 1254935). if (i->isLsh()) AnalyzeLsh(graph_.alloc(), i->toLsh()); else if (i->isAsmJSLoadHeap()) - analyzeAsmHeapAccess(i->toAsmJSLoadHeap()); + analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap()); else if (i->isAsmJSStoreHeap()) - analyzeAsmHeapAccess(i->toAsmJSStoreHeap()); + analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap()); } } return true; }
--- a/js/src/jit/EffectiveAddressAnalysis.h +++ b/js/src/jit/EffectiveAddressAnalysis.h @@ -14,21 +14,21 @@ namespace jit { class MIRGraph; class EffectiveAddressAnalysis { MIRGenerator* mir_; MIRGraph& graph_; - template<typename MWasmMemoryAccessType> - MOZ_MUST_USE bool tryAddDisplacement(MWasmMemoryAccessType* ins, int32_t o); + template <typename AsmJSMemoryAccess> + MOZ_MUST_USE bool tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o); - template<typename MWasmMemoryAccessType> - void analyzeAsmHeapAccess(MWasmMemoryAccessType* ins); + template <typename AsmJSMemoryAccess> + void analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins); public: EffectiveAddressAnalysis(MIRGenerator* mir, MIRGraph& graph) : mir_(mir), graph_(graph) {} MOZ_MUST_USE bool analyze(); };
--- a/js/src/jit/JitOptions.cpp +++ b/js/src/jit/JitOptions.cpp @@ -219,16 +219,20 @@ DefaultJitOptions::DefaultJitOptions() } // Toggles whether unboxed plain objects can be created by the VM. SET_DEFAULT(disableUnboxedObjects, false); // Test whether wasm int64 / double NaN bits testing is enabled. SET_DEFAULT(wasmTestMode, false); + // Toggles the optimization whereby offsets are folded into loads and not + // included in the bounds check. + SET_DEFAULT(wasmFoldOffsets, true); + // Determines whether we suppress using signal handlers // for interrupting jit-ed code. This is used only for testing. SET_DEFAULT(ionInterruptWithoutSignals, false); } bool DefaultJitOptions::isSmallFunction(JSScript* script) const {
--- a/js/src/jit/JitOptions.h +++ b/js/src/jit/JitOptions.h @@ -65,16 +65,17 @@ struct DefaultJitOptions bool disableSharedStubs; bool disableSincos; bool disableSink; bool eagerCompilation; bool forceInlineCaches; bool limitScriptSize; bool osr; bool wasmTestMode; + bool wasmFoldOffsets; bool ionInterruptWithoutSignals; uint32_t baselineWarmUpThreshold; uint32_t exceptionBailoutThreshold; uint32_t frequentBailoutThreshold; uint32_t maxStackArgs; uint32_t osrPcMismatchesBeforeRecompile; uint32_t smallFunctionMaxBytecodeLength_; uint32_t jumpThreshold;
--- a/js/src/jit/Lowering.cpp +++ b/js/src/jit/Lowering.cpp @@ -4120,16 +4120,39 @@ void LIRGenerator::visitHasClass(MHasClass* ins) { MOZ_ASSERT(ins->object()->type() == MIRType::Object); MOZ_ASSERT(ins->type() == MIRType::Boolean); define(new(alloc()) LHasClass(useRegister(ins->object())), ins); } void +LIRGenerator::visitWasmAddOffset(MWasmAddOffset* ins) +{ + MOZ_ASSERT(ins->base()->type() == MIRType::Int32); + MOZ_ASSERT(ins->type() == MIRType::Int32); + define(new(alloc()) LWasmAddOffset(useRegisterAtStart(ins->base())), ins); +} + +void +LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins) +{ +#ifndef DEBUG + if (ins->isRedundant()) + return; +#endif + + MDefinition* input = ins->input(); + MOZ_ASSERT(input->type() == MIRType::Int32); + + auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(input)); + add(lir, ins); +} + +void LIRGenerator::visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins) { if (ins->type() == MIRType::Int64) defineInt64(new(alloc()) LWasmLoadGlobalVarI64, ins); else define(new(alloc()) LWasmLoadGlobalVar, ins); }
--- a/js/src/jit/Lowering.h +++ b/js/src/jit/Lowering.h @@ -277,16 +277,18 @@ class LIRGenerator : public LIRGenerator void visitIn(MIn* ins); void visitInArray(MInArray* ins); void visitInstanceOf(MInstanceOf* ins); void visitCallInstanceOf(MCallInstanceOf* ins); void visitIsCallable(MIsCallable* ins); void visitIsConstructor(MIsConstructor* ins); void visitIsObject(MIsObject* ins); void visitHasClass(MHasClass* ins); + void visitWasmAddOffset(MWasmAddOffset* ins); + void visitWasmBoundsCheck(MWasmBoundsCheck* ins); void visitWasmLoadGlobalVar(MWasmLoadGlobalVar* ins); void visitWasmStoreGlobalVar(MWasmStoreGlobalVar* ins); void visitAsmJSParameter(MAsmJSParameter* ins); void visitAsmJSReturn(MAsmJSReturn* ins); void visitAsmJSVoidReturn(MAsmJSVoidReturn* ins); void visitAsmJSPassStackArg(MAsmJSPassStackArg* ins); void visitWasmCall(MWasmCall* ins); void visitSetDOMProperty(MSetDOMProperty* ins);
--- a/js/src/jit/MIR.cpp +++ b/js/src/jit/MIR.cpp @@ -1,16 +1,17 @@ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "jit/MIR.h" +#include "mozilla/CheckedInt.h" #include "mozilla/FloatingPoint.h" #include "mozilla/IntegerPrintfMacros.h" #include "mozilla/MathAlgorithms.h" #include "mozilla/SizePrintfMacros.h" #include <ctype.h> #include "jslibmath.h" @@ -29,16 +30,17 @@ #include "jsobjinlines.h" #include "jsscriptinlines.h" using namespace js; using namespace js::jit; using JS::ToInt32; +using mozilla::CheckedInt; using mozilla::NumbersAreIdentical; using mozilla::IsFloat32Representable; using mozilla::IsNaN; using mozilla::Maybe; using mozilla::DebugOnly; #ifdef DEBUG size_t MUse::index() const @@ -4939,16 +4941,34 @@ MDefinition* MLoadFixedSlotAndUnbox::foldsTo(TempAllocator& alloc) { if (MDefinition* def = foldsToStore(alloc)) return def; return this; } +MDefinition* +MWasmAddOffset::foldsTo(TempAllocator& alloc) +{ + MDefinition* baseArg = base(); + if (!baseArg->isConstant()) + return this; + + MOZ_ASSERT(baseArg->type() == MIRType::Int32); + CheckedInt<uint32_t> ptr = baseArg->toConstant()->toInt32(); + + ptr += offset(); + + if (!ptr.isValid()) + return this; + + return MConstant::New(alloc, Int32Value(ptr.value())); +} + MDefinition::AliasType MAsmJSLoadHeap::mightAlias(const MDefinition* def) const { if (def->isAsmJSStoreHeap()) { const MAsmJSStoreHeap* store = def->toAsmJSStoreHeap(); if (store->accessType() != accessType()) return AliasType::MayAlias; if (!base()->isConstant() || !store->base()->isConstant()) @@ -5420,25 +5440,26 @@ MWasmCall::New(TempAllocator& alloc, con MWasmCall* MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc, const wasm::CallSiteDesc& desc, const wasm::SymbolicAddress builtin, const ABIArg& instanceArg, const Args& args, MIRType resultType, - uint32_t spIncrement) + uint32_t spIncrement, + uint32_t tlsStackOffset) { auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin); MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement, - MWasmCall::DontSaveTls, nullptr); - + tlsStackOffset, nullptr); if (!call) return nullptr; - MOZ_ASSERT(instanceArg != ABIArg()); // instanceArg must be initialized. + + MOZ_ASSERT(instanceArg != ABIArg()); call->instanceArg_ = instanceArg; return call; } void MSqrt::trySpecializeFloat32(TempAllocator& alloc) { if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) { if (input()->type() == MIRType::Float32)
--- a/js/src/jit/MIR.h +++ b/js/src/jit/MIR.h @@ -13219,120 +13219,130 @@ class MAsmJSNeg public: INSTRUCTION_HEADER(AsmJSNeg) static MAsmJSNeg* NewAsmJS(TempAllocator& alloc, MDefinition* op, MIRType type) { return new(alloc) MAsmJSNeg(op, type); } }; +class MWasmBoundsCheck + : public MUnaryInstruction, + public NoTypePolicy::Data +{ + bool redundant_; + + explicit MWasmBoundsCheck(MDefinition* index) + : MUnaryInstruction(index), + redundant_(false) + { + setGuard(); // Effectful: throws for OOB. + } + + public: + INSTRUCTION_HEADER(WasmBoundsCheck) + TRIVIAL_NEW_WRAPPERS + + AliasSet getAliasSet() const override { + return AliasSet::None(); + } + + bool isRedundant() const { + return redundant_; + } + + void setRedundant(bool val) { + redundant_ = val; + } +}; + +class MWasmAddOffset + : public MUnaryInstruction, + public NoTypePolicy::Data +{ + uint32_t offset_; + + MWasmAddOffset(MDefinition* base, uint32_t offset) + : MUnaryInstruction(base), + offset_(offset) + { + setGuard(); + setResultType(MIRType::Int32); + } + + public: + INSTRUCTION_HEADER(WasmAddOffset) + TRIVIAL_NEW_WRAPPERS + NAMED_OPERANDS((0, base)) + + MDefinition* foldsTo(TempAllocator& alloc) override; + + AliasSet getAliasSet() const override { + return AliasSet::None(); + } + + uint32_t offset() const { + return offset_; + } +}; + class MWasmMemoryAccess { uint32_t offset_; uint32_t align_; Scalar::Type accessType_ : 8; - bool needsBoundsCheck_; unsigned numSimdElems_; MemoryBarrierBits barrierBefore_; MemoryBarrierBits barrierAfter_; public: explicit MWasmMemoryAccess(Scalar::Type accessType, uint32_t align, uint32_t offset, unsigned numSimdElems = 0, MemoryBarrierBits barrierBefore = MembarNobits, MemoryBarrierBits barrierAfter = MembarNobits) : offset_(offset), align_(align), accessType_(accessType), - needsBoundsCheck_(true), numSimdElems_(numSimdElems), barrierBefore_(barrierBefore), barrierAfter_(barrierAfter) { MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType)); MOZ_ASSERT(mozilla::IsPowerOfTwo(align)); } uint32_t offset() const { return offset_; } - uint32_t endOffset() const { return offset() + byteSize(); } uint32_t align() const { return align_; } Scalar::Type accessType() const { return accessType_; } unsigned byteSize() const { return Scalar::isSimdType(accessType()) ? Scalar::scalarByteSize(accessType()) * numSimdElems() : TypedArrayElemSize(accessType()); } - bool needsBoundsCheck() const { return needsBoundsCheck_; } - unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; } + unsigned numSimdElems() const { return numSimdElems_; } MemoryBarrierBits barrierBefore() const { return barrierBefore_; } MemoryBarrierBits barrierAfter() const { return barrierAfter_; } bool isAtomicAccess() const { return (barrierBefore_ | barrierAfter_) != MembarNobits; } + bool isSimdAccess() const { return Scalar::isSimdType(accessType_); } bool isUnaligned() const { return align() && align() < byteSize(); } - void removeBoundsCheck() { needsBoundsCheck_ = false; } - void setOffset(uint32_t o) { offset_ = o; } -}; - -class MWasmBoundsCheck - : public MUnaryInstruction, - public MWasmMemoryAccess, - public NoTypePolicy::Data -{ - bool redundant_; - - explicit MWasmBoundsCheck(MDefinition* index, const MWasmMemoryAccess& access) - : MUnaryInstruction(index), - MWasmMemoryAccess(access), - redundant_(false) - { - setMovable(); - setGuard(); // Effectful: throws for OOB. - } - - public: - INSTRUCTION_HEADER(WasmBoundsCheck) - TRIVIAL_NEW_WRAPPERS - - bool congruentTo(const MDefinition* ins) const override { - if (!congruentIfOperandsEqual(ins)) - return false; - const MWasmBoundsCheck* other = ins->toWasmBoundsCheck(); - return accessType() == other->accessType() && - offset() == other->offset() && - align() == other->align(); - } - - AliasSet getAliasSet() const override { - return AliasSet::None(); - } - - bool isRedundant() const { - return redundant_; - } - - void setRedundant(bool val) { - redundant_ = val; - } + void clearOffset() { offset_ = 0; } }; class MWasmLoad : public MUnaryInstruction, public MWasmMemoryAccess, public NoTypePolicy::Data { - MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, bool isInt64) + MWasmLoad(MDefinition* base, const MWasmMemoryAccess& access, MIRType resultType) : MUnaryInstruction(base), MWasmMemoryAccess(access) { setGuard(); - MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in wasm"); - if (isInt64) - setResultType(MIRType::Int64); - else - setResultType(ScalarTypeToMIRType(access.accessType())); + setResultType(resultType); } public: INSTRUCTION_HEADER(WasmLoad) TRIVIAL_NEW_WRAPPERS NAMED_OPERANDS((0, base)) AliasSet getAliasSet() const override { @@ -13361,64 +13371,77 @@ class MWasmStore TRIVIAL_NEW_WRAPPERS NAMED_OPERANDS((0, base), (1, value)) AliasSet getAliasSet() const override { return AliasSet::Store(AliasSet::AsmJSHeap); } }; +class MAsmJSMemoryAccess +{ + uint32_t offset_; + Scalar::Type accessType_; + bool needsBoundsCheck_; + + public: + explicit MAsmJSMemoryAccess(Scalar::Type accessType) + : offset_(0), + accessType_(accessType), + needsBoundsCheck_(true) + { + MOZ_ASSERT(accessType != Scalar::Uint8Clamped); + MOZ_ASSERT(!Scalar::isSimdType(accessType)); + } + + uint32_t offset() const { return offset_; } + uint32_t endOffset() const { return offset() + byteSize(); } + Scalar::Type accessType() const { return accessType_; } + unsigned byteSize() const { return TypedArrayElemSize(accessType()); } + bool needsBoundsCheck() const { return needsBoundsCheck_; } + + void removeBoundsCheck() { needsBoundsCheck_ = false; } + void setOffset(uint32_t o) { offset_ = o; } +}; + class MAsmJSLoadHeap : public MUnaryInstruction, - public MWasmMemoryAccess, + public MAsmJSMemoryAccess, public NoTypePolicy::Data { - MAsmJSLoadHeap(MDefinition* base, const MWasmMemoryAccess& access) + MAsmJSLoadHeap(MDefinition* base, Scalar::Type accessType) : MUnaryInstruction(base), - MWasmMemoryAccess(access) - { - if (access.barrierBefore() | access.barrierAfter()) - setGuard(); // Not removable - else - setMovable(); - - MOZ_ASSERT(access.accessType() != Scalar::Uint8Clamped, "unexpected load heap in asm.js"); - setResultType(ScalarTypeToMIRType(access.accessType())); + MAsmJSMemoryAccess(accessType) + { + setResultType(ScalarTypeToMIRType(accessType)); } public: INSTRUCTION_HEADER(AsmJSLoadHeap) TRIVIAL_NEW_WRAPPERS MDefinition* base() const { return getOperand(0); } void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); } bool congruentTo(const MDefinition* ins) const override; AliasSet getAliasSet() const override { - // When a barrier is needed make the instruction effectful by - // giving it a "store" effect. - if (isAtomicAccess()) - return AliasSet::Store(AliasSet::AsmJSHeap); return AliasSet::Load(AliasSet::AsmJSHeap); } AliasType mightAlias(const MDefinition* def) const override; }; class MAsmJSStoreHeap : public MBinaryInstruction, - public MWasmMemoryAccess, + public MAsmJSMemoryAccess, public NoTypePolicy::Data { - MAsmJSStoreHeap(MDefinition* base, const MWasmMemoryAccess& access, MDefinition* v) + MAsmJSStoreHeap(MDefinition* base, Scalar::Type accessType, MDefinition* v) : MBinaryInstruction(base, v), - MWasmMemoryAccess(access) - { - if (access.barrierBefore() | access.barrierAfter()) - setGuard(); // Not removable - } + MAsmJSMemoryAccess(accessType) + {} public: INSTRUCTION_HEADER(AsmJSStoreHeap) TRIVIAL_NEW_WRAPPERS MDefinition* base() const { return getOperand(0); } void replaceBase(MDefinition* newBase) { replaceOperand(0, newBase); } MDefinition* value() const { return getOperand(1); } @@ -13660,28 +13683,33 @@ class MWasmCall final AnyRegister reg; MDefinition* def; Arg(AnyRegister reg, MDefinition* def) : reg(reg), def(def) {} }; typedef Vector<Arg, 8, SystemAllocPolicy> Args; static const uint32_t DontSaveTls = UINT32_MAX; - static MWasmCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, - const wasm::CalleeDesc& callee, const Args& args, MIRType resultType, - uint32_t spIncrement, uint32_t tlsStackOffset, + static MWasmCall* New(TempAllocator& alloc, + const wasm::CallSiteDesc& desc, + const wasm::CalleeDesc& callee, + const Args& args, + MIRType resultType, + uint32_t spIncrement, + uint32_t tlsStackOffset, MDefinition* tableIndex = nullptr); static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc, const wasm::CallSiteDesc& desc, const wasm::SymbolicAddress builtin, const ABIArg& instanceArg, const Args& args, MIRType resultType, - uint32_t spIncrement); + uint32_t spIncrement, + uint32_t tlsStackOffset); size_t numArgs() const { return argRegs_.length(); } AnyRegister registerForArg(size_t index) const { MOZ_ASSERT(index < numArgs()); return argRegs_[index]; }
--- a/js/src/jit/MIRGenerator.h +++ b/js/src/jit/MIRGenerator.h @@ -209,19 +209,16 @@ class MIRGenerator public: AsmJSPerfSpewer& perfSpewer() { return asmJSPerfSpewer_; } #endif public: const JitCompileOptions options; - bool needsBoundsCheckBranch(const MWasmMemoryAccess* access) const; - size_t foldableOffsetRange(const MWasmMemoryAccess* access) const; - private: GraphSpewer gs_; public: GraphSpewer& graphSpewer() { return gs_; } };
--- a/js/src/jit/MIRGraph.cpp +++ b/js/src/jit/MIRGraph.cpp @@ -100,62 +100,16 @@ MIRGenerator::addAbortedPreliminaryGroup if (group == abortedPreliminaryGroups_[i]) return; } AutoEnterOOMUnsafeRegion oomUnsafe; if (!abortedPreliminaryGroups_.append(group)) oomUnsafe.crash("addAbortedPreliminaryGroup"); } -bool -MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const -{ - // A heap access needs a bounds-check branch if we're not relying on signal - // handlers to catch errors, and if it's not proven to be within bounds. - // We use signal-handlers on x64, but on x86 there isn't enough address - // space for a guard region. Also, on x64 the atomic loads and stores - // can't (yet) use the signal handlers. -#ifdef WASM_HUGE_MEMORY - return false; -#else - return access->needsBoundsCheck(); -#endif -} - -size_t -MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const -{ - // This determines whether it's ok to fold up to WasmImmediateRange - // offsets, instead of just WasmCheckedImmediateRange. - - static_assert(WasmCheckedImmediateRange <= WasmImmediateRange, - "WasmImmediateRange should be the size of an unconstrained " - "address immediate"); - -#ifdef WASM_HUGE_MEMORY - static_assert(wasm::Uint32Range + WasmImmediateRange + sizeof(wasm::Val) < wasm::MappedSize, - "When using signal handlers for bounds checking, a uint32 is added to the base " - "address followed by an immediate in the range [0, WasmImmediateRange). An " - "unaligned access (whose size is conservatively approximated by wasm::Val) may " - "spill over, so ensure a space at the end."); - return WasmImmediateRange; -#else - // On 32-bit platforms, if we've proven the access is in bounds after - // 32-bit wrapping, we can fold full offsets because they're added with - // 32-bit arithmetic. - if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck()) - return WasmImmediateRange; - - // Otherwise, only allow the checked size. This is always less than the - // minimum heap length, and allows explicit bounds checks to fold in the - // offset without overflow. - return WasmCheckedImmediateRange; -#endif -} - void MIRGraph::addBlock(MBasicBlock* block) { MOZ_ASSERT(block); block->setId(blockIdGen_++); blocks_.pushBack(block); numBlocks_++; }
--- a/js/src/jit/MOpcodes.h +++ b/js/src/jit/MOpcodes.h @@ -266,16 +266,17 @@ namespace jit { _(GetDOMMember) \ _(SetDOMProperty) \ _(IsConstructor) \ _(IsCallable) \ _(IsObject) \ _(HasClass) \ _(CopySign) \ _(WasmBoundsCheck) \ + _(WasmAddOffset) \ _(WasmLoad) \ _(WasmStore) \ _(WasmTrap) \ _(WasmTruncateToInt32) \ _(AsmJSNeg) \ _(AsmJSUnsignedToDouble) \ _(AsmJSUnsignedToFloat32) \ _(AsmJSLoadHeap) \
--- a/js/src/jit/MacroAssembler.h +++ b/js/src/jit/MacroAssembler.h @@ -1035,18 +1035,18 @@ class MacroAssembler : public MacroAssem inline void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) PER_SHARED_ARCH; inline void branchDoubleNotInInt64Range(Address src, Register temp, Label* fail); inline void branchDoubleNotInUInt64Range(Address src, Register temp, Label* fail); inline void branchFloat32NotInInt64Range(Address src, Register temp, Label* fail); inline void branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail); - template <typename T> - inline void branchAdd32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH; + template <typename T, typename L> + inline void branchAdd32(Condition cond, T src, Register dest, L label) PER_SHARED_ARCH; template <typename T> inline void branchSub32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH; inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH; template <class L> inline void branchTest32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH; template <class L> @@ -1309,16 +1309,37 @@ class MacroAssembler : public MacroAssem DEFINED_ON(x86, x64); inline void truncateDoubleToInt64(Address src, Address dest, Register temp) DEFINED_ON(x86_shared); inline void truncateDoubleToUInt64(Address src, Address dest, Register temp, FloatRegister floatTemp) DEFINED_ON(x86, x64); public: + // ======================================================================== + // wasm support + + // Emit a bounds check against the (dynamically-patched) wasm bounds check + // limit, jumping to 'label' if 'cond' holds. + template <class L> + inline void wasmBoundsCheck(Condition cond, Register index, L label) PER_ARCH; + + // Called after compilation completes to patch the given limit into the + // given instruction's immediate. + static inline void wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) PER_ARCH; + + // On x86, each instruction adds its own wasm::MemoryAccess's to the + // wasm::MemoryAccessVector (there can be multiple when i64 is involved). + // On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller + // is responsible for doing this instead. + void wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64); + void wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64); + void wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64); + void wasmStoreI64(Register64 value, Operand dstAddr) DEFINED_ON(x86); + // wasm specific methods, used in both the wasm baseline compiler and ion. void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64); void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared); void outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned, Label* rejoin) DEFINED_ON(x86_shared); void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64); void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared); void outOfLineWasmTruncateFloat32ToInt32(FloatRegister input, bool isUnsigned, Label* rejoin) DEFINED_ON(x86_shared);
--- a/js/src/jit/WasmBCE.cpp +++ b/js/src/jit/WasmBCE.cpp @@ -6,95 +6,83 @@ #include "jit/WasmBCE.h" #include "jit/MIRGenerator.h" #include "jit/MIRGraph.h" using namespace js; using namespace js::jit; using namespace mozilla; -struct DefAndOffset { - MDefinition* loc; - uint32_t endOffset; -}; - -typedef js::HashMap<uint32_t, DefAndOffset, DefaultHasher<uint32_t>, SystemAllocPolicy> +typedef js::HashMap<uint32_t, MDefinition*, DefaultHasher<uint32_t>, SystemAllocPolicy> LastSeenMap; // The Wasm Bounds Check Elimination (BCE) pass looks for bounds checks // on SSA values that have already been checked. (in the same block or in a // dominating block). These bounds checks are redundant and thus eliminated. // // Note: This is safe in the presense of dynamic memory sizes as long as they // can ONLY GROW. If we allow SHRINKING the heap, this pass should be // RECONSIDERED. // // TODO (dbounov): Are there a lot of cases where there is no single dominating // check, but a set of checks that together dominate a redundant check? // // TODO (dbounov): Generalize to constant additions relative to one base -bool jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) { +bool +jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph) +{ // Map for dominating block where a given definition was checked LastSeenMap lastSeen; if (!lastSeen.init()) return false; for (ReversePostorderIterator bIter(graph.rpoBegin()); bIter != graph.rpoEnd(); bIter++) { MBasicBlock* block = *bIter; for (MDefinitionIterator dIter(block); dIter;) { MDefinition* def = *dIter++; switch (def->op()) { case MDefinition::Op_WasmBoundsCheck: { MWasmBoundsCheck* bc = def->toWasmBoundsCheck(); MDefinition* addr = def->getOperand(0); - LastSeenMap::Ptr checkPtr = lastSeen.lookup(addr->id()); - if (checkPtr && - checkPtr->value().endOffset >= bc->endOffset() && - checkPtr->value().loc->block()->dominates(block)) { - // Address already checked. Discard current check + LastSeenMap::AddPtr checkPtr = lastSeen.lookupForAdd(addr->id()); + if (checkPtr && checkPtr->value()->block()->dominates(block)) { bc->setRedundant(true); } else { - DefAndOffset defOff = { def, bc->endOffset() }; - // Address not previously checked - remember current check - if (!lastSeen.put(addr->id(), defOff)) + if (!lastSeen.add(checkPtr, addr->id(), def)) return false; } break; } case MDefinition::Op_Phi: { MPhi* phi = def->toPhi(); bool phiChecked = true; - uint32_t off = UINT32_MAX; MOZ_ASSERT(phi->numOperands() > 0); // If all incoming values to a phi node are safe (i.e. have a // check that dominates this block) then we can consider this // phi node checked. // // Note that any phi that is part of a cycle // will not be "safe" since the value coming on the backedge // cannot be in lastSeen because its block hasn't been traversed yet. for (int i = 0, nOps = phi->numOperands(); i < nOps; i++) { MDefinition* src = phi->getOperand(i); + LastSeenMap::Ptr checkPtr = lastSeen.lookup(src->id()); - - if (!checkPtr || !checkPtr->value().loc->block()->dominates(block)) { + if (!checkPtr || !checkPtr->value()->block()->dominates(block)) { phiChecked = false; break; - } else { - off = Min(off, checkPtr->value().endOffset); } } if (phiChecked) { - DefAndOffset defOff = { def, off }; - if (!lastSeen.put(def->id(), defOff)) + if (!lastSeen.put(def->id(), def)) return false; } break; } default: break; }
--- a/js/src/jit/arm/Architecture-arm.h +++ b/js/src/jit/arm/Architecture-arm.h @@ -645,18 +645,12 @@ static inline bool UseHardFpABI() #endif } #endif // In order to handle SoftFp ABI calls, we need to be able to express that we // have ABIArg which are represented by pair of general purpose registers. #define JS_CODEGEN_REGISTER_PAIR 1 -// See MIRGenerator::foldableOffsetRange for more info. -// TODO: Implement this for ARM. Note that it requires Codegen to respect the -// offset field of AsmJSHeapAccess. -static const size_t WasmCheckedImmediateRange = 0; -static const size_t WasmImmediateRange = 0; - } // namespace jit } // namespace js #endif /* jit_arm_Architecture_arm_h */
--- a/js/src/jit/arm/Assembler-arm.cpp +++ b/js/src/jit/arm/Assembler-arm.cpp @@ -3376,37 +3376,16 @@ Assembler::BailoutTableStart(uint8_t* co { Instruction* inst = (Instruction*)code; // Skip a pool with an artificial guard or NOP fill. inst = inst->skipPool(); MOZ_ASSERT(inst->is<InstBLImm>()); return (uint8_t*) inst; } -void -Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) -{ - Instruction* inst = (Instruction*) patchAt; - MOZ_ASSERT(inst->is<InstCMP>()); - InstCMP* cmp = inst->as<InstCMP>(); - - Register index; - cmp->extractOp1(&index); - - MOZ_ASSERT(cmp->extractOp2().isImm8()); - - Imm8 imm8 = Imm8(heapLength); - MOZ_ASSERT(!imm8.invalid); - - *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always); - // NOTE: we don't update the Auto Flush Cache! this function is currently - // only called from within ModuleGenerator::finish, which does that - // for us. Don't call this! -} - InstructionIterator::InstructionIterator(Instruction* i_) : i(i_) { // Work around pools with an artificial pool guard and around nop-fill. i = i->skipPool(); } uint32_t Assembler::NopFill = 0;
--- a/js/src/jit/arm/Assembler-arm.h +++ b/js/src/jit/arm/Assembler-arm.h @@ -1963,17 +1963,16 @@ class Assembler : public AssemblerShared static void ToggleToJmp(CodeLocationLabel inst_); static void ToggleToCmp(CodeLocationLabel inst_); static uint8_t* BailoutTableStart(uint8_t* code); static size_t ToggledCallSize(uint8_t* code); static void ToggleCall(CodeLocationLabel inst_, bool enabled); - static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength); void processCodeLabels(uint8_t* rawCode); bool bailed() { return m_buffer.bail(); } void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, const Disassembler::HeapAccess& heapAccess)
--- a/js/src/jit/arm/CodeGenerator-arm.cpp +++ b/js/src/jit/arm/CodeGenerator-arm.cpp @@ -2235,138 +2235,113 @@ CodeGeneratorARM::visitWasmCallI64(LWasm { emitWasmCall(ins); } void CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) { const MAsmJSLoadHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + + const LAllocation* ptr = ins->ptr(); + bool isSigned; int size; bool isFloat = false; switch (mir->accessType()) { case Scalar::Int8: isSigned = true; size = 8; break; case Scalar::Uint8: isSigned = false; size = 8; break; case Scalar::Int16: isSigned = true; size = 16; break; case Scalar::Uint16: isSigned = false; size = 16; break; case Scalar::Int32: case Scalar::Uint32: isSigned = true; size = 32; break; case Scalar::Float64: isFloat = true; size = 64; break; case Scalar::Float32: isFloat = true; size = 32; break; default: MOZ_CRASH("unexpected array type"); } - memoryBarrier(mir->barrierBefore()); - - const LAllocation* ptr = ins->ptr(); - if (ptr->isConstant()) { MOZ_ASSERT(!mir->needsBoundsCheck()); int32_t ptrImm = ptr->toConstant()->toInt32(); MOZ_ASSERT(ptrImm >= 0); if (isFloat) { VFPRegister vd(ToFloatRegister(ins->output())); if (size == 32) masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always); else masm.ma_vldr(Address(HeapReg, ptrImm), vd, Assembler::Always); } else { masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm), ToRegister(ins->output()), Offset, Assembler::Always); } } else { Register ptrReg = ToRegister(ptr); - if (isFloat) - masm.ma_loadHeapAsmJS(ptrReg, size, mir->needsBoundsCheck(), - /*faultOnOOB=*/false, ToFloatRegister(ins->output())); - else - masm.ma_loadHeapAsmJS(ptrReg, size, isSigned, mir->needsBoundsCheck(), - mir->isAtomicAccess(), ToRegister(ins->output())); - } - - memoryBarrier(mir->barrierAfter()); -} - -void -CodeGeneratorARM::visitWasmBoundsCheck(LWasmBoundsCheck* ins) -{ - MWasmBoundsCheck* mir = ins->mir(); - - MOZ_ASSERT(mir->offset() <= INT32_MAX); - - if (!mir->isRedundant()) { - // No guarantee that heapBase + endOffset can be properly encoded in - // the cmp immediate in ma_BoundsCheck, so use an explicit add instead. - uint32_t endOffset = mir->endOffset(); - - Register ptr = ToRegister(ins->ptr()); - - ScratchRegisterScope ptrPlusOffset(masm); - masm.move32(Imm32(endOffset), ptrPlusOffset); - masm.ma_add(ptr, ptrPlusOffset, SetCC); - - // Detect unsigned overflow by checking the carry bit. - masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet); - - uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset(); - masm.append(wasm::BoundsCheck(cmpOffset)); - masm.as_b(wasm::JumpTarget::OutOfBounds, Assembler::Above); - } else { -#ifdef DEBUG - Label ok1, ok2; - uint32_t endOffset = mir->endOffset(); - - Register ptr = ToRegister(ins->ptr()); - - ScratchRegisterScope ptrPlusOffset(masm); - masm.move32(Imm32(endOffset), ptrPlusOffset); - masm.ma_add(ptr, ptrPlusOffset, SetCC); - - // Detect unsigned overflow by checking the carry bit. - masm.as_b(&ok1, Assembler::CarryClear); - masm.assumeUnreachable("Redundant bounds check failed!"); - masm.bind(&ok1); - - uint32_t cmpOffset = masm.ma_BoundsCheck(ptrPlusOffset).getOffset(); - masm.append(wasm::BoundsCheck(cmpOffset)); - masm.as_b(&ok2, Assembler::BelowOrEqual); - masm.assumeUnreachable("Redundant bounds check failed!"); - masm.bind(&ok2); -#endif + if (isFloat) { + FloatRegister output = ToFloatRegister(ins->output()); + if (size == 32) + output = output.singleOverlay(); + + Assembler::Condition cond = Assembler::Always; + if (mir->needsBoundsCheck()) { + BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0)); + masm.append(wasm::BoundsCheck(cmp.getOffset())); + + size_t nanOffset = size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset; + masm.ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output, + Assembler::AboveOrEqual); + cond = Assembler::Below; + } + + masm.ma_vldr(output, HeapReg, ptrReg, 0, cond); + } else { + Register output = ToRegister(ins->output()); + + Assembler::Condition cond = Assembler::Always; + if (mir->needsBoundsCheck()) { + uint32_t cmpOffset = masm.as_cmp(ptrReg, Imm8(0)).getOffset(); + masm.append(wasm::BoundsCheck(cmpOffset)); + + masm.ma_mov(Imm32(0), output, Assembler::AboveOrEqual); + cond = Assembler::Below; + } + + masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, cond); + } } } template <typename T> void CodeGeneratorARM::emitWasmLoad(T* lir) { const MWasmLoad* mir = lir->mir(); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - uint32_t offset = mir->offset(); - MOZ_ASSERT(offset <= INT32_MAX); + MOZ_ASSERT(offset < wasm::OffsetGuardLimit); Register ptr = ToRegister(lir->ptr()); Scalar::Type type = mir->accessType(); // Maybe add the offset. if (offset || type == Scalar::Int64) { Register ptrPlusOffset = ToRegister(lir->ptrCopy()); - masm.ma_add(Imm32(offset), ptrPlusOffset); + if (offset) + masm.ma_add(Imm32(offset), ptrPlusOffset); ptr = ptrPlusOffset; } else { MOZ_ASSERT(lir->ptrCopy()->isBogusTemp()); } bool isSigned = type == Scalar::Int8 || type == Scalar::Int16 || type == Scalar::Int32 || type == Scalar::Int64; unsigned byteSize = mir->byteSize(); + memoryBarrier(mir->barrierBefore()); + if (mir->type() == MIRType::Int64) { Register64 output = ToOutRegister64(lir); if (type == Scalar::Int64) { MOZ_ASSERT(INT64LOW_OFFSET == 0); masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low); masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr); masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high); } else { @@ -2383,16 +2358,18 @@ CodeGeneratorARM::emitWasmLoad(T* lir) MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle()); ScratchRegisterScope scratch(masm); masm.ma_add(HeapReg, ptr, scratch); masm.ma_vldr(Address(scratch, 0), output.fpu()); } else { masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr()); } } + + memoryBarrier(mir->barrierAfter()); } void CodeGeneratorARM::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); } @@ -2403,20 +2380,18 @@ CodeGeneratorARM::visitWasmLoadI64(LWasm } template<typename T> void CodeGeneratorARM::emitWasmUnalignedLoad(T* lir) { const MWasmLoad* mir = lir->mir(); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - uint32_t offset = mir->offset(); - MOZ_ASSERT(offset <= INT32_MAX); + MOZ_ASSERT(offset < wasm::OffsetGuardLimit); Register ptr = ToRegister(lir->ptrCopy()); if (offset) masm.ma_add(Imm32(offset), ptr); // Add HeapReg to ptr, so we can use base+index addressing in the byte loads. masm.ma_add(HeapReg, ptr); @@ -2435,16 +2410,18 @@ CodeGeneratorARM::emitWasmUnalignedLoad( else if (mirType == MIRType::Int64) low = ToOutRegister64(lir).low; else low = ToRegister(lir->output()); MOZ_ASSERT(low != tmp); MOZ_ASSERT(low != ptr); + memoryBarrier(mir->barrierBefore()); + masm.emitUnalignedLoad(isSigned, Min(byteSize, 4u), ptr, tmp, low); if (IsFloatingPointType(mirType)) { FloatRegister output = ToFloatRegister(lir->output()); if (byteSize == 4) { MOZ_ASSERT(output.isSingle()); masm.ma_vxfer(low, output); } else { @@ -2463,54 +2440,68 @@ CodeGeneratorARM::emitWasmUnalignedLoad( MOZ_ASSERT(byteSize <= 4); // Propagate sign. if (isSigned) masm.ma_asr(Imm32(31), output.low, output.high); else masm.ma_mov(Imm32(0), output.high); } } + + memoryBarrier(mir->barrierAfter()); } void CodeGeneratorARM::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) { emitWasmUnalignedLoad(lir); } void CodeGeneratorARM::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) { emitWasmUnalignedLoad(lir); } +void +CodeGeneratorARM::visitWasmAddOffset(LWasmAddOffset* lir) +{ + MWasmAddOffset* mir = lir->mir(); + Register base = ToRegister(lir->base()); + Register out = ToRegister(lir->output()); + + masm.ma_add(base, Imm32(mir->offset()), out, SetCC); + masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet); +} + template <typename T> void CodeGeneratorARM::emitWasmStore(T* lir) { const MWasmStore* mir = lir->mir(); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - uint32_t offset = mir->offset(); - MOZ_ASSERT(offset <= INT32_MAX); + MOZ_ASSERT(offset < wasm::OffsetGuardLimit); Register ptr = ToRegister(lir->ptr()); unsigned byteSize = mir->byteSize(); Scalar::Type type = mir->accessType(); // Maybe add the offset. if (offset || type == Scalar::Int64) { Register ptrPlusOffset = ToRegister(lir->ptrCopy()); - masm.ma_add(Imm32(offset), ptrPlusOffset); + if (offset) + masm.ma_add(Imm32(offset), ptrPlusOffset); ptr = ptrPlusOffset; } else { MOZ_ASSERT(lir->ptrCopy()->isBogusTemp()); } + memoryBarrier(mir->barrierBefore()); + if (type == Scalar::Int64) { MOZ_ASSERT(INT64LOW_OFFSET == 0); Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex)); masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low); masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr); masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high); } else { @@ -2522,16 +2513,18 @@ CodeGeneratorARM::emitWasmStore(T* lir) masm.ma_add(HeapReg, ptr, scratch); masm.ma_vstr(val, Address(scratch, 0)); } else { bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap; Register val = value.gpr(); masm.ma_dataTransferN(IsStore, 8 * byteSize /* bits */, isSigned, HeapReg, ptr, val); } } + + memoryBarrier(mir->barrierAfter()); } void CodeGeneratorARM::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); } @@ -2542,30 +2535,30 @@ CodeGeneratorARM::visitWasmStoreI64(LWas } template<typename T> void CodeGeneratorARM::emitWasmUnalignedStore(T* lir) { const MWasmStore* mir = lir->mir(); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - uint32_t offset = mir->offset(); - MOZ_ASSERT(offset <= INT32_MAX); + MOZ_ASSERT(offset < wasm::OffsetGuardLimit); Register ptr = ToRegister(lir->ptrCopy()); if (offset) masm.ma_add(Imm32(offset), ptr); // Add HeapReg to ptr, so we can use base+index addressing in the byte loads. masm.ma_add(HeapReg, ptr); MIRType mirType = mir->value()->type(); + memoryBarrier(mir->barrierAfter()); + Register val = ToRegister(lir->valueHelper()); if (IsFloatingPointType(mirType)) { masm.ma_vxfer(ToFloatRegister(lir->getOperand(LWasmUnalignedStore::ValueIndex)), val); } else if (mirType == MIRType::Int64) { Register64 input = ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)); if (input.low != val) masm.ma_mov(input.low, val); } @@ -2581,17 +2574,19 @@ CodeGeneratorARM::emitWasmUnalignedStore MOZ_ASSERT(fp.isDouble()); ScratchRegisterScope scratch(masm); masm.ma_vxfer(fp, scratch, val); } else { MOZ_ASSERT(mirType == MIRType::Int64); masm.ma_mov(ToRegister64(lir->getInt64Operand(LWasmUnalignedStoreI64::ValueIndex)).high, val); } masm.emitUnalignedStore(4, ptr, val, /* offset */ 4); - } + } + + memoryBarrier(mir->barrierBefore()); } void CodeGeneratorARM::visitWasmUnalignedStore(LWasmUnalignedStore* lir) { emitWasmUnalignedStore(lir); } @@ -2600,35 +2595,35 @@ CodeGeneratorARM::visitWasmUnalignedStor { emitWasmUnalignedStore(lir); } void CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) { const MAsmJSStoreHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + + const LAllocation* ptr = ins->ptr(); + bool isSigned; int size; bool isFloat = false; switch (mir->accessType()) { case Scalar::Int8: case Scalar::Uint8: isSigned = false; size = 8; break; case Scalar::Int16: case Scalar::Uint16: isSigned = false; size = 16; break; case Scalar::Int32: case Scalar::Uint32: isSigned = true; size = 32; break; case Scalar::Float64: isFloat = true; size = 64; break; case Scalar::Float32: isFloat = true; size = 32; break; default: MOZ_CRASH("unexpected array type"); } - memoryBarrier(mir->barrierBefore()); - - const LAllocation* ptr = ins->ptr(); - if (ptr->isConstant()) { MOZ_ASSERT(!mir->needsBoundsCheck()); int32_t ptrImm = ptr->toConstant()->toInt32(); MOZ_ASSERT(ptrImm >= 0); if (isFloat) { VFPRegister vd(ToFloatRegister(ins->value())); Address addr(HeapReg, ptrImm); if (size == 32) @@ -2636,54 +2631,64 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LA else masm.storeDouble(vd, addr); } else { masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm), ToRegister(ins->value()), Offset, Assembler::Always); } } else { Register ptrReg = ToRegister(ptr); - if (isFloat) - masm.ma_storeHeapAsmJS(ptrReg, size, mir->needsBoundsCheck(), /*faultOnOOB=*/false, - ToFloatRegister(ins->value())); - else - masm.ma_storeHeapAsmJS(ptrReg, size, isSigned, mir->needsBoundsCheck(), - mir->isAtomicAccess(), ToRegister(ins->value())); + + Assembler::Condition cond = Assembler::Always; + if (mir->needsBoundsCheck()) { + BufferOffset cmp = masm.as_cmp(ptrReg, Imm8(0)); + masm.append(wasm::BoundsCheck(cmp.getOffset())); + + cond = Assembler::Below; + } + + if (isFloat) { + FloatRegister value = ToFloatRegister(ins->value()); + if (size == 32) + value = value.singleOverlay(); + + masm.ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below); + } else { + Register value = ToRegister(ins->value()); + masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset, cond); + } } - - memoryBarrier(mir->barrierAfter()); } void CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) { MAsmJSCompareExchangeHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Scalar::Type vt = mir->accessType(); const LAllocation* ptr = ins->ptr(); Register ptrReg = ToRegister(ptr); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); - if (mir->needsBoundsCheck()) { - uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset(); - masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - masm.append(wasm::BoundsCheck(cmpOffset)); - } masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt, srcAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); } void CodeGeneratorARM::visitAsmJSCompareExchangeCallout(LAsmJSCompareExchangeCallout* ins) { const MAsmJSCompareExchangeHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Register ptr = ToRegister(ins->ptr()); Register oldval = ToRegister(ins->oldval()); Register newval = ToRegister(ins->newval()); Register tls = ToRegister(ins->tls()); Register instance = ToRegister(ins->getTemp(0)); Register viewType = ToRegister(ins->getTemp(1)); MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg); @@ -2699,36 +2704,34 @@ CodeGeneratorARM::visitAsmJSCompareExcha masm.passABIArg(newval); masm.callWithABI(wasm::SymbolicAddress::AtomicCmpXchg); } void CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins) { MAsmJSAtomicExchangeHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Scalar::Type vt = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register value = ToRegister(ins->value()); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - if (mir->needsBoundsCheck()) { - uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset(); - masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - masm.append(wasm::BoundsCheck(cmpOffset)); - } - masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt, srcAddr, value, InvalidReg, ToAnyRegister(ins->output())); } void CodeGeneratorARM::visitAsmJSAtomicExchangeCallout(LAsmJSAtomicExchangeCallout* ins) { const MAsmJSAtomicExchangeHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Register ptr = ToRegister(ins->ptr()); Register value = ToRegister(ins->value()); Register tls = ToRegister(ins->tls()); Register instance = ToRegister(ins->getTemp(0)); Register viewType = ToRegister(ins->getTemp(1)); MOZ_ASSERT(ToRegister(ins->output()) == ReturnReg); @@ -2741,75 +2744,68 @@ CodeGeneratorARM::visitAsmJSAtomicExchan masm.passABIArg(ptr); masm.passABIArg(value); masm.callWithABI(wasm::SymbolicAddress::AtomicXchg); } void CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) { - MOZ_ASSERT(ins->mir()->hasUses()); - MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - MAsmJSAtomicBinopHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + MOZ_ASSERT(mir->hasUses()); + Scalar::Type vt = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register flagTemp = ToRegister(ins->flagTemp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); + MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne); - if (mir->needsBoundsCheck()) { - uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset(); - masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - masm.append(wasm::BoundsCheck(cmpOffset)); - } - - if (value->isConstant()) + if (value->isConstant()) { atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg, ToAnyRegister(ins->output())); - else + } else { atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, ToRegister(value), srcAddr, flagTemp, InvalidReg, ToAnyRegister(ins->output())); + } } void CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins) { - MOZ_ASSERT(!ins->mir()->hasUses()); - MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - MAsmJSAtomicBinopHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + MOZ_ASSERT(!mir->hasUses()); + Scalar::Type vt = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register flagTemp = ToRegister(ins->flagTemp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); + MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); BaseIndex srcAddr(HeapReg, ptrReg, TimesOne); - if (mir->needsBoundsCheck()) { - uint32_t cmpOffset = masm.ma_BoundsCheck(ptrReg).getOffset(); - masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - masm.append(wasm::BoundsCheck(cmpOffset)); - } - if (value->isConstant()) atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp); else atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp); } void CodeGeneratorARM::visitAsmJSAtomicBinopCallout(LAsmJSAtomicBinopCallout* ins) { const MAsmJSAtomicBinopHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Register ptr = ToRegister(ins->ptr()); Register value = ToRegister(ins->value()); Register tls = ToRegister(ins->tls()); Register instance = ToRegister(ins->getTemp(0)); Register viewType = ToRegister(ins->getTemp(1)); masm.loadPtr(Address(tls, offsetof(wasm::TlsData, instance)), instance); masm.move32(Imm32(mir->accessType()), viewType);
--- a/js/src/jit/arm/CodeGenerator-arm.h +++ b/js/src/jit/arm/CodeGenerator-arm.h @@ -235,21 +235,21 @@ class CodeGeneratorARM : public CodeGene void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir); void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir); void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir); void visitAsmSelect(LAsmSelect* ins); void visitAsmReinterpret(LAsmReinterpret* ins); void emitWasmCall(LWasmCallBase* ins); void visitWasmCall(LWasmCall* ins); void visitWasmCallI64(LWasmCallI64* ins); - void visitWasmBoundsCheck(LWasmBoundsCheck* ins); void visitWasmLoad(LWasmLoad* ins); void visitWasmLoadI64(LWasmLoadI64* ins); void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins); void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* ins); + void visitWasmAddOffset(LWasmAddOffset* ins); void visitWasmStore(LWasmStore* ins); void visitWasmStoreI64(LWasmStoreI64* ins); void visitWasmUnalignedStore(LWasmUnalignedStore* ins); void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins); void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins); void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins); void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins); void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
--- a/js/src/jit/arm/Lowering-arm.cpp +++ b/js/src/jit/arm/Lowering-arm.cpp @@ -604,27 +604,16 @@ void LIRGeneratorARM::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins) { MOZ_ASSERT(ins->input()->type() == MIRType::Int32); LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input())); define(lir, ins); } void -LIRGeneratorARM::visitWasmBoundsCheck(MWasmBoundsCheck* ins) -{ - MDefinition* input = ins->input(); - MOZ_ASSERT(input->type() == MIRType::Int32); - - LAllocation baseAlloc = useRegisterAtStart(input); - auto* lir = new(alloc()) LWasmBoundsCheck(baseAlloc); - add(lir, ins); -} - -void LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); LAllocation ptr = useRegisterAtStart(base); if (ins->isUnaligned()) { @@ -718,19 +707,19 @@ LIRGeneratorARM::visitWasmStore(MWasmSto void LIRGeneratorARM::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) { MOZ_ASSERT(ins->offset() == 0); MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); - LAllocation baseAlloc; // For the ARM it is best to keep the 'base' in a register if a bounds check is needed. + LAllocation baseAlloc; if (base->isConstant() && !ins->needsBoundsCheck()) { // A bounds check is only skipped for a positive index. MOZ_ASSERT(base->toConstant()->toInt32() >= 0); baseAlloc = LAllocation(base->toConstant()); } else { baseAlloc = useRegisterAtStart(base); }
--- a/js/src/jit/arm/Lowering-arm.h +++ b/js/src/jit/arm/Lowering-arm.h @@ -100,17 +100,16 @@ class LIRGeneratorARM : public LIRGenera void visitUnbox(MUnbox* unbox); void visitReturn(MReturn* ret); void lowerPhi(MPhi* phi); void visitGuardShape(MGuardShape* ins); void visitGuardObjectGroup(MGuardObjectGroup* ins); void visitAsmSelect(MAsmSelect* ins); void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins); void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins); - void visitWasmBoundsCheck(MWasmBoundsCheck* ins); void visitWasmLoad(MWasmLoad* ins); void visitWasmStore(MWasmStore* ins); void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins); void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins); void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins); void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins); void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins); void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
--- a/js/src/jit/arm/MacroAssembler-arm-inl.h +++ b/js/src/jit/arm/MacroAssembler-arm-inl.h @@ -1445,22 +1445,22 @@ MacroAssembler::branchTruncateDoubleToIn ma_vcvt_F64_I32(src, scratchSIntReg); ma_vxfer(scratchSIntReg, dest); ma_cmp(dest, Imm32(0x7fffffff)); ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual); ma_b(fail, Assembler::Equal); } -template <typename T> +template <typename T, typename L> void -MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label) +MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label) { add32(src, dest); - j(cond, label); + as_b(label, cond); } template <typename T> void MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label) { ma_sub(src, dest, SetCC); j(cond, label); @@ -1966,16 +1966,48 @@ MacroAssembler::clampIntToUint8(Register // Look at (reg >> 8) if it is 0, then reg shouldn't be clamped if it is // <0, then we want to clamp to 0, otherwise, we wish to clamp to 255 ScratchRegisterScope scratch(*this); as_mov(scratch, asr(reg, 8), SetCC); ma_mov(Imm32(0xff), reg, NotEqual); ma_mov(Imm32(0), reg, Signed); } +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + BufferOffset bo = as_cmp(index, Imm8(0)); + append(wasm::BoundsCheck(bo.getOffset())); + + as_b(label, cond); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + Instruction* inst = (Instruction*) patchAt; + MOZ_ASSERT(inst->is<InstCMP>()); + InstCMP* cmp = inst->as<InstCMP>(); + + Register index; + cmp->extractOp1(&index); + + MOZ_ASSERT(cmp->extractOp2().isImm8()); + + Imm8 imm8 = Imm8(limit); + MOZ_RELEASE_ASSERT(!imm8.invalid); + + *inst = InstALU(InvalidReg, index, imm8, OpCmp, SetCC, Always); + // Don't call Auto Flush Cache; the wasm caller has done this for us. +} + //}}} check_macroassembler_style // =============================================================== void MacroAssemblerARMCompat::incrementInt32Value(const Address& addr) { asMasm().add32(Imm32(1), ToPayload(addr)); }
--- a/js/src/jit/arm/MacroAssembler-arm.cpp +++ b/js/src/jit/arm/MacroAssembler-arm.cpp @@ -2126,65 +2126,16 @@ MacroAssemblerARMCompat::loadFloat32(con uint32_t scale = Imm32::ShiftOf(src.scale).value; int32_t offset = src.offset; ScratchRegisterScope scratch(asMasm()); as_add(scratch, base, lsl(index, scale)); ma_vldr(Address(scratch, offset), VFPRegister(dest).singleOverlay()); } - -void -MacroAssemblerARMCompat::ma_loadHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, - bool faultOnOOB, FloatRegister output) -{ - if (size == 32) - output = output.singleOverlay(); - - if (!needsBoundsCheck) { - ma_vldr(output, HeapReg, ptrReg, 0, Assembler::Always); - } else { - uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset(); - append(wasm::BoundsCheck(cmpOffset)); - - if (faultOnOOB) { - ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - } - else { - size_t nanOffset = - size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset; - ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output, - Assembler::AboveOrEqual); - } - ma_vldr(output, HeapReg, ptrReg, 0, Assembler::Below); - } -} - -void -MacroAssemblerARMCompat::ma_loadHeapAsmJS(Register ptrReg, int size, bool isSigned, - bool needsBoundsCheck, bool faultOnOOB, - Register output) -{ - if (!needsBoundsCheck) { - ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, - Assembler::Always); - return; - } - - uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset(); - append(wasm::BoundsCheck(cmpOffset)); - - if (faultOnOOB) - ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - else - ma_mov(Imm32(0), output, Assembler::AboveOrEqual); - - ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, Assembler::Below); -} - void MacroAssemblerARMCompat::store8(Imm32 imm, const Address& address) { AutoRegisterScope scratch2(asMasm(), secondScratchReg_); ma_mov(imm, scratch2); store8(scratch2, address); } @@ -2351,61 +2302,16 @@ MacroAssemblerARMCompat::storePtr(Regist void MacroAssemblerARMCompat::storePtr(Register src, AbsoluteAddress dest) { ScratchRegisterScope scratch(asMasm()); movePtr(ImmWord(uintptr_t(dest.addr)), scratch); storePtr(src, Address(scratch, 0)); } -void -MacroAssemblerARMCompat::ma_storeHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, - bool faultOnOOB, FloatRegister value) -{ - if (!needsBoundsCheck) { - BaseIndex addr(HeapReg, ptrReg, TimesOne, 0); - if (size == 32) - asMasm().storeFloat32(value, addr); - else - asMasm().storeDouble(value, addr); - } else { - uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset(); - append(wasm::BoundsCheck(cmpOffset)); - - if (faultOnOOB) - ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - - if (size == 32) - value = value.singleOverlay(); - - ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below); - } -} - -void -MacroAssemblerARMCompat::ma_storeHeapAsmJS(Register ptrReg, int size, bool isSigned, - bool needsBoundsCheck, bool faultOnOOB, - Register value) -{ - if (!needsBoundsCheck) { - ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset, - Assembler::Always); - return; - } - - uint32_t cmpOffset = ma_BoundsCheck(ptrReg).getOffset(); - append(wasm::BoundsCheck(cmpOffset)); - - if (faultOnOOB) - ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual); - - ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset, - Assembler::Below); -} - // Note: this function clobbers the input register. void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) { if (HasVFPv3()) { Label notSplit; { ScratchDoubleScope scratchDouble(*this);
--- a/js/src/jit/arm/MacroAssembler-arm.h +++ b/js/src/jit/arm/MacroAssembler-arm.h @@ -976,21 +976,16 @@ class MacroAssemblerARMCompat : public M // Load a float value into a register, then expand it to a double. void loadFloatAsDouble(const Address& addr, FloatRegister dest); void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest); void loadFloat32(const Address& addr, FloatRegister dest); void loadFloat32(const BaseIndex& src, FloatRegister dest); - void ma_loadHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, bool faultOnOOB, - FloatRegister output); - void ma_loadHeapAsmJS(Register ptrReg, int size, bool isSigned, bool needsBoundsCheck, - bool faultOnOOB, Register output); - void store8(Register src, const Address& address); void store8(Imm32 imm, const Address& address); void store8(Register src, const BaseIndex& address); void store8(Imm32 imm, const BaseIndex& address); void store16(Register src, const Address& address); void store16(Imm32 imm, const Address& address); void store16(Register src, const BaseIndex& address); @@ -1017,21 +1012,16 @@ class MacroAssemblerARMCompat : public M template <typename T> void storePtr(ImmGCPtr imm, T address); void storePtr(Register src, const Address& address); void storePtr(Register src, const BaseIndex& address); void storePtr(Register src, AbsoluteAddress dest); void moveDouble(FloatRegister src, FloatRegister dest, Condition cc = Always) { ma_vmov(src, dest, cc); } - void ma_storeHeapAsmJS(Register ptrReg, int size, bool needsBoundsCheck, bool faultOnOOB, - FloatRegister value); - void ma_storeHeapAsmJS(Register ptrReg, int size, bool isSigned, bool needsBoundsCheck, - bool faultOnOOB, Register value); - private: template<typename T> Register computePointer(const T& src, Register r); template<typename T> void compareExchangeARMv6(int nbytes, bool signExtend, const T& mem, Register oldval, Register newval, Register output); @@ -1439,19 +1429,16 @@ class MacroAssemblerARMCompat : public M void abiret() { as_bx(lr); } void ma_storeImm(Imm32 c, const Address& dest) { ma_mov(c, lr); ma_str(lr, dest); } - BufferOffset ma_BoundsCheck(Register bounded) { - return as_cmp(bounded, Imm8(0)); - } void moveFloat32(FloatRegister src, FloatRegister dest, Condition cc = Always) { as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc); } void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) { loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest); }
--- a/js/src/jit/arm64/Architecture-arm64.h +++ b/js/src/jit/arm64/Architecture-arm64.h @@ -451,15 +451,12 @@ hasUnaliasedDouble() // ARM prior to ARMv8 also has doubles that alias multiple floats. // Again, ARMv8 is in the clear. inline bool hasMultiAlias() { return false; } -static const size_t WasmCheckedImmediateRange = 0; -static const size_t WasmImmediateRange = 0; - } // namespace jit } // namespace js #endif // jit_arm64_Architecture_arm64_h
--- a/js/src/jit/arm64/Assembler-arm64.cpp +++ b/js/src/jit/arm64/Assembler-arm64.cpp @@ -632,30 +632,16 @@ Assembler::FixupNurseryObjects(JSContext void Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) { MOZ_CRASH("PatchInstructionImmediate()"); } void -Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) -{ - Instruction* inst = (Instruction*) patchAt; - int32_t mask = ~(heapLength - 1); - unsigned n, imm_s, imm_r; - if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r)) - MOZ_CRASH("Could not encode immediate!?"); - - inst->SetImmR(imm_r); - inst->SetImmS(imm_s); - inst->SetBitN(n); -} - -void Assembler::retarget(Label* label, Label* target) { if (label->used()) { if (target->bound()) { bind(label, BufferOffset(target)); } else if (target->used()) { // The target is not bound but used. Prepend label's branch list // onto target's.
--- a/js/src/jit/arm64/Assembler-arm64.h +++ b/js/src/jit/arm64/Assembler-arm64.h @@ -372,18 +372,16 @@ class Assembler : public vixl::Assembler return reinterpret_cast<Instruction*>(&ldr); } }; // Offset of the patchable target for the given entry. static const size_t OffsetOfJumpTableEntryPointer = 8; public: - static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength); - void writeCodePointer(AbsoluteLabel* absoluteLabel) { MOZ_ASSERT(!absoluteLabel->bound()); uintptr_t x = LabelBase::INVALID_OFFSET; BufferOffset off = EmitData(&x, sizeof(uintptr_t)); // The x86/x64 makes general use of AbsoluteLabel and weaves a linked list // of uses of an AbsoluteLabel through the assembly. ARM only uses labels // for the case statements of switch jump tables. Thus, for simplicity, we
--- a/js/src/jit/arm64/Lowering-arm64.cpp +++ b/js/src/jit/arm64/Lowering-arm64.cpp @@ -334,22 +334,16 @@ LIRGeneratorARM64::visitRandom(MRandom* void LIRGeneratorARM64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) { MOZ_CRASH("NY"); } void -LIRGeneratorARM64::visitWasmBoundsCheck(MWasmBoundsCheck* ins) -{ - MOZ_CRASH("NY"); -} - -void LIRGeneratorARM64::visitWasmLoad(MWasmLoad* ins) { MOZ_CRASH("NY"); } void LIRGeneratorARM64::visitWasmStore(MWasmStore* ins) {
--- a/js/src/jit/arm64/Lowering-arm64.h +++ b/js/src/jit/arm64/Lowering-arm64.h @@ -112,17 +112,16 @@ class LIRGeneratorARM64 : public LIRGene void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins); void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins); void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins); void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins); void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins); void visitSubstr(MSubstr* ins); void visitRandom(MRandom* ins); void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins); - void visitWasmBoundsCheck(MWasmBoundsCheck* ins); void visitWasmLoad(MWasmLoad* ins); void visitWasmStore(MWasmStore* ins); void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins); void visitCopySign(MCopySign* ins); void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins); }; typedef LIRGeneratorARM64 LIRGeneratorSpecific;
--- a/js/src/jit/arm64/MacroAssembler-arm64-inl.h +++ b/js/src/jit/arm64/MacroAssembler-arm64-inl.h @@ -1005,22 +1005,22 @@ MacroAssembler::branchTruncateDoubleMayb } void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail) { convertDoubleToInt32(src, dest, fail); } -template <typename T> +template <typename T, typename L> void -MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label) +MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label) { adds32(src, dest); - branch(cond, label); + B(label, cond); } template <typename T> void MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label) { subs32(src, dest); branch(cond, label); @@ -1540,16 +1540,32 @@ MacroAssembler::clampIntToUint8(Register MOZ_ASSERT(!scratch32.Is(reg32)); Cmp(reg32, Operand(reg32, vixl::UXTB)); Csel(reg32, reg32, vixl::wzr, Assembler::GreaterThanOrEqual); Mov(scratch32, Operand(0xff)); Csel(reg32, reg32, scratch32, Assembler::LessThanOrEqual); } +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + MOZ_CRASH("NYI"); +} + //}}} check_macroassembler_style // =============================================================== template <typename T> void MacroAssemblerCompat::addToStackPtr(T t) { asMasm().addPtr(t, getStackPointer());
--- a/js/src/jit/arm64/vixl/Assembler-vixl.h +++ b/js/src/jit/arm64/vixl/Assembler-vixl.h @@ -853,17 +853,19 @@ class Assembler : public MozBaseAssemble #define COPYENUM(v) static const Condition v = vixl::v #define COPYENUM_(v) static const Condition v = vixl::v##_ COPYENUM(Equal); COPYENUM(Zero); COPYENUM(NotEqual); COPYENUM(NonZero); COPYENUM(AboveOrEqual); + COPYENUM(CarrySet); COPYENUM(Below); + COPYENUM(CarryClear); COPYENUM(Signed); COPYENUM(NotSigned); COPYENUM(Overflow); COPYENUM(NoOverflow); COPYENUM(Above); COPYENUM(BelowOrEqual); COPYENUM_(GreaterThanOrEqual); COPYENUM_(LessThan);
--- a/js/src/jit/arm64/vixl/Constants-vixl.h +++ b/js/src/jit/arm64/vixl/Constants-vixl.h @@ -229,18 +229,18 @@ enum Condition { // Aliases. hs = cs, // C set Unsigned higher or same. lo = cc, // C clear Unsigned lower. // Mozilla expanded aliases. Equal = 0, Zero = 0, NotEqual = 1, NonZero = 1, - AboveOrEqual = 2, - Below = 3, + AboveOrEqual = 2, CarrySet = 2, + Below = 3, CarryClear = 3, Signed = 4, NotSigned = 5, Overflow = 6, NoOverflow = 7, Above = 8, BelowOrEqual = 9, GreaterThanOrEqual_ = 10, LessThan_ = 11,
--- a/js/src/jit/mips-shared/Architecture-mips-shared.h +++ b/js/src/jit/mips-shared/Architecture-mips-shared.h @@ -327,18 +327,12 @@ hasUnaliasedDouble() { // On MIPS, fn-double aliases both fn-float32 and fn+1-float32, so if you need // to convert a float32 to a double as a temporary, you need a temporary // double register. inline bool hasMultiAlias() { return true; } -// See MIRGenerator::foldableOffsetRange for more info. -// TODO: Implement this for MIPS. Note that it requires Codegen to respect the -// offset field of AsmJSHeapAccess. -static const size_t WasmCheckedImmediateRange = 0; -static const size_t WasmImmediateRange = 0; - } // namespace jit } // namespace js #endif /* jit_mips_shared_Architecture_mips_shared_h */
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp +++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp @@ -1656,38 +1656,16 @@ CodeGeneratorMIPSShared::visitWasmCall(L void CodeGeneratorMIPSShared::visitWasmCallI64(LWasmCallI64* ins) { emitWasmCallBase(ins); } void -CodeGeneratorMIPSShared::visitWasmBoundsCheck(LWasmBoundsCheck* ins) -{ - MWasmBoundsCheck* mir = ins->mir(); - - uint32_t offset = mir->offset(); - MOZ_ASSERT(offset <= INT32_MAX); - - uint32_t endOffset = mir->endOffset(); - Register ptr = ToRegister(ins->ptr()); - - masm.move32(Imm32(endOffset), SecondScratchReg); - masm.addPtr(ptr, SecondScratchReg); - - // Detect unsigned overflow. - masm.ma_b(SecondScratchReg, ptr, wasm::JumpTarget::OutOfBounds, Assembler::LessThan); - - BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister); - masm.ma_b(SecondScratchReg, ScratchRegister, wasm::JumpTarget::OutOfBounds, Assembler::Above); - masm.append(wasm::BoundsCheck(bo.getOffset())); -} - -void CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir) { const MWasmLoad* mir = lir->mir(); MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); uint32_t offset = mir->offset(); MOZ_ASSERT(offset <= INT32_MAX);
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h +++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h @@ -191,17 +191,16 @@ class CodeGeneratorMIPSShared : public C void visitNegI(LNegI* lir); void visitNegD(LNegD* lir); void visitNegF(LNegF* lir); void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins); void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins); void visitWasmCall(LWasmCall* ins); void visitWasmCallI64(LWasmCallI64* ins); - void visitWasmBoundsCheck(LWasmBoundsCheck* ins); void visitWasmLoad(LWasmLoad* ins); void visitWasmStore(LWasmStore* ins); void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins); void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins); void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins); void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins); void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins); void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp +++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp @@ -289,27 +289,16 @@ LIRGeneratorMIPSShared::visitAsmJSNeg(MA define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins); } else { MOZ_ASSERT(ins->type() == MIRType::Double); define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins); } } void -LIRGeneratorMIPSShared::visitWasmBoundsCheck(MWasmBoundsCheck* ins) -{ - if (!gen->needsBoundsCheckBranch(ins)) - return; - - MDefinition* index = ins->input(); - auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index)); - add(lir, ins); -} - -void LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); #ifdef JS_CODEGEN_MIPS64 if (ins->type() == MIRType::Int64) { auto* lir = new(alloc()) LWasmLoadI64(useRegisterAtStart(base));
--- a/js/src/jit/mips-shared/Lowering-mips-shared.h +++ b/js/src/jit/mips-shared/Lowering-mips-shared.h @@ -71,17 +71,16 @@ class LIRGeneratorMIPSShared : public LI void lowerModI(MMod* mod); void lowerDivI64(MDiv* div); void lowerModI64(MMod* mod); void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs); void lowerUDiv(MDiv* div); void lowerUMod(MMod* mod); void visitPowHalf(MPowHalf* ins); void visitAsmJSNeg(MAsmJSNeg* ins); - void visitWasmBoundsCheck(MWasmBoundsCheck* ins); void visitWasmLoad(MWasmLoad* ins); void visitWasmStore(MWasmStore* ins); void visitAsmSelect(MAsmSelect* ins); LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy, MTableSwitch* ins); LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h +++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h @@ -618,19 +618,19 @@ MacroAssembler::branchTruncateDoubleMayb } void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail) { convertDoubleToInt32(src, dest, fail); } -template <typename T> +template <typename T, typename L> void -MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* overflow) +MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L overflow) { switch (cond) { case Overflow: ma_addTestOverflow(dest, dest, src, overflow); break; default: MOZ_CRASH("NYI"); }
--- a/js/src/jit/mips32/Assembler-mips32.cpp +++ b/js/src/jit/mips32/Assembler-mips32.cpp @@ -478,19 +478,8 @@ Assembler::ToggleCall(CodeLocationLabel *i2 = jalr; } else { InstNOP nop; *i2 = nop; } AutoFlushICache::flush(uintptr_t(i2), 4); } - -void -Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) -{ - Instruction* inst = (Instruction*) patchAt; - InstImm* i0 = (InstImm*) inst; - InstImm* i1 = (InstImm*) i0->next(); - - // Replace with new value - Assembler::UpdateLuiOriValue(i0, i1, heapLength); -}
--- a/js/src/jit/mips32/Assembler-mips32.h +++ b/js/src/jit/mips32/Assembler-mips32.h @@ -167,18 +167,16 @@ class Assembler : public AssemblerMIPSSh ImmPtr expectedValue); static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue); static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm); static uint32_t ExtractInstructionImmediate(uint8_t* code); static void ToggleCall(CodeLocationLabel inst_, bool enabled); - - static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength); }; // Assembler static const uint32_t NumIntArgRegs = 4; static inline bool GetIntArgReg(uint32_t usedArgSlots, Register* out) { if (usedArgSlots < NumIntArgRegs) {
--- a/js/src/jit/mips32/MacroAssembler-mips32-inl.h +++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h @@ -447,16 +447,37 @@ MacroAssembler::storeUncanonicalizedFloa } void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr) { MOZ_ASSERT(addr.offset == 0); ma_ss(src, addr); } +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + Instruction* inst = (Instruction*) patchAt; + InstImm* i0 = (InstImm*) inst; + InstImm* i1 = (InstImm*) i0->next(); + + // Replace with new value + Assembler::UpdateLuiOriValue(i0, i1, limit); +} + //}}} check_macroassembler_style // =============================================================== void MacroAssemblerMIPSCompat::incrementInt32Value(const Address& addr) { asMasm().add32(Imm32(1), ToPayload(addr)); }
--- a/js/src/jit/mips64/Assembler-mips64.cpp +++ b/js/src/jit/mips64/Assembler-mips64.cpp @@ -523,15 +523,8 @@ Assembler::ToggleCall(CodeLocationLabel *i4 = jalr; } else { InstNOP nop; *i4 = nop; } AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t)); } - -void -Assembler::UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) -{ - // Replace with new value - Assembler::UpdateLoad64Value((Instruction*) patchAt, heapLength); -}
--- a/js/src/jit/mips64/Assembler-mips64.h +++ b/js/src/jit/mips64/Assembler-mips64.h @@ -169,18 +169,16 @@ class Assembler : public AssemblerMIPSSh ImmPtr expectedValue); static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, PatchedImmPtr expectedValue); static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm); static uint64_t ExtractInstructionImmediate(uint8_t* code); static void ToggleCall(CodeLocationLabel inst_, bool enabled); - - static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength); }; // Assembler static const uint32_t NumIntArgRegs = 8; static const uint32_t NumFloatArgRegs = NumIntArgRegs; static inline bool GetIntArgReg(uint32_t usedArgSlots, Register* out) {
--- a/js/src/jit/mips64/MacroAssembler-mips64-inl.h +++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h @@ -404,16 +404,33 @@ MacroAssembler::storeUncanonicalizedFloa } void MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr) { MOZ_ASSERT(addr.offset == 0); ma_ss(src, addr); } +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + MOZ_CRASH("NYI"); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + // Replace with new value + Assembler::UpdateLoad64Value((Instruction*) patchAt, limit); +} + //}}} check_macroassembler_style // =============================================================== void MacroAssemblerMIPS64Compat::incrementInt32Value(const Address& addr) { asMasm().add32(Imm32(1), addr); }
--- a/js/src/jit/none/Architecture-none.h +++ b/js/src/jit/none/Architecture-none.h @@ -146,15 +146,12 @@ inline bool hasMultiAlias() { MOZ_CRASH( static const uint32_t ShadowStackSpace = 0; static const uint32_t JumpImmediateRange = INT32_MAX; #ifdef JS_NUNBOX32 static const int32_t NUNBOX32_TYPE_OFFSET = 4; static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0; #endif -static const size_t WasmCheckedImmediateRange = 0; -static const size_t WasmImmediateRange = 0; - } // namespace jit } // namespace js #endif /* jit_none_Architecture_none_h */
--- a/js/src/jit/none/MacroAssembler-none.h +++ b/js/src/jit/none/MacroAssembler-none.h @@ -142,18 +142,16 @@ class Assembler : public AssemblerShared static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) { MOZ_CRASH(); } static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); } static void PatchInstructionImmediate(uint8_t*, PatchedImmPtr) { MOZ_CRASH(); } static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); } static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); } static void ToggleCall(CodeLocationLabel, bool) { MOZ_CRASH(); } - static void UpdateBoundsCheck(uint8_t*, uint32_t) { MOZ_CRASH(); } - static uintptr_t GetPointer(uint8_t*) { MOZ_CRASH(); } void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, const Disassembler::HeapAccess& heapAccess) { MOZ_CRASH(); } };
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h +++ b/js/src/jit/shared/CodeGenerator-shared-inl.h @@ -342,32 +342,27 @@ CodeGeneratorShared::restoreLiveVolatile LSafepoint* safepoint = ins->safepoint(); LiveRegisterSet regs; regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile()); masm.PopRegsInMask(regs); } void CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad, - bool isInt64, Scalar::Type type, unsigned numElems, - const Operand& mem, LAllocation alloc) + Scalar::Type type, Operand mem, LAllocation alloc) { #ifdef DEBUG using namespace Disassembler; Disassembler::HeapAccess::Kind kind = isLoad ? HeapAccess::Load : HeapAccess::Store; switch (type) { case Scalar::Int8: case Scalar::Int16: if (kind == HeapAccess::Load) - kind = isInt64 ? HeapAccess::LoadSext64 : HeapAccess::LoadSext32; - break; - case Scalar::Int32: - if (isInt64 && kind == HeapAccess::Load) - kind = HeapAccess::LoadSext64; + kind = HeapAccess::LoadSext32; break; default: break; } OtherOperand op; switch (type) { case Scalar::Int8: @@ -376,17 +371,17 @@ CodeGeneratorShared::verifyHeapAccessDis case Scalar::Uint16: case Scalar::Int32: case Scalar::Uint32: if (!alloc.isConstant()) { op = OtherOperand(ToRegister(alloc).encoding()); } else { // x86 doesn't allow encoding an imm64 to memory move; the value // is wrapped anyways. - int32_t i = isInt64 ? int32_t(ToInt64(&alloc)) : ToInt32(&alloc); + int32_t i = ToInt32(&alloc); // Sign-extend the immediate value out to 32 bits. We do this even // for unsigned element types so that we match what the disassembly // code does, as it doesn't know about signedness of stores. unsigned shift = 32 - TypedArrayElemSize(type) * 8; i = i << shift >> shift; op = OtherOperand(i); } @@ -403,38 +398,33 @@ CodeGeneratorShared::verifyHeapAccessDis case Scalar::Int32x4: op = OtherOperand(ToFloatRegister(alloc).encoding()); break; case Scalar::Uint8Clamped: case Scalar::MaxTypedArrayViewType: MOZ_CRASH("Unexpected array type"); } - size_t size = Scalar::isSimdType(type) - ? Scalar::scalarByteSize(type) * numElems - : TypedArrayElemSize(type); - masm.verifyHeapAccessDisassembly(begin, end, - HeapAccess(kind, size, ComplexAddress(mem), op)); + HeapAccess access(kind, TypedArrayElemSize(type), ComplexAddress(mem), op); + masm.verifyHeapAccessDisassembly(begin, end, access); #endif } void -CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, bool isInt64, - Scalar::Type type, unsigned numElems, const Operand& mem, - LAllocation alloc) +CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type, + Operand mem, LAllocation alloc) { - verifyHeapAccessDisassembly(begin, end, true, isInt64, type, numElems, mem, alloc); + verifyHeapAccessDisassembly(begin, end, true, type, mem, alloc); } void -CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, bool isInt64, - Scalar::Type type, unsigned numElems, - const Operand& mem, LAllocation alloc) +CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type, + Operand mem, LAllocation alloc) { - verifyHeapAccessDisassembly(begin, end, false, isInt64, type, numElems, mem, alloc); + verifyHeapAccessDisassembly(begin, end, false, type, mem, alloc); } inline bool CodeGeneratorShared::isGlobalObject(JSObject* object) { // Calling object->is<GlobalObject>() is racy because this relies on // checking the group and this can be changed while we are compiling off the // main thread.
--- a/js/src/jit/shared/CodeGenerator-shared.h +++ b/js/src/jit/shared/CodeGenerator-shared.h @@ -541,25 +541,24 @@ class CodeGeneratorShared : public LElem void emitTracelogIonStop() { #ifdef JS_TRACE_LOGGING emitTracelogStopEvent(TraceLogger_IonMonkey); emitTracelogScriptStop(); #endif } protected: - inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad, bool isInt64, - Scalar::Type type, unsigned numElems, - const Operand& mem, LAllocation alloc); + inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad, + Scalar::Type type, Operand mem, LAllocation alloc); public: - inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, bool isInt64, Scalar::Type type, - unsigned numElems, const Operand& mem, LAllocation alloc); - inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, bool isInt64, Scalar::Type type, - unsigned numElems, const Operand& mem, LAllocation alloc); + inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type, + Operand mem, LAllocation alloc); + inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type, + Operand mem, LAllocation alloc); bool isGlobalObject(JSObject* object); }; // An out-of-line path is generated at the end of the function. class OutOfLineCode : public TempObject { Label entry_;
--- a/js/src/jit/shared/LIR-shared.h +++ b/js/src/jit/shared/LIR-shared.h @@ -7834,16 +7834,31 @@ class LAsmSelectI64 : public LAsmSelectB const LInt64Allocation falseExpr() { return getInt64Operand(FalseExprIndex); } const LAllocation* condExpr() { return getOperand(CondIndex); } }; +class LWasmAddOffset : public LInstructionHelper<1, 1, 0> +{ + public: + LIR_HEADER(WasmAddOffset); + explicit LWasmAddOffset(const LAllocation& base) { + setOperand(0, base); + } + MWasmAddOffset* mir() const { + return mir_->toWasmAddOffset(); + } + const LAllocation* base() { + return getOperand(0); + } +}; + class LWasmBoundsCheck : public LInstructionHelper<0, 1, 0> { public: LIR_HEADER(WasmBoundsCheck); explicit LWasmBoundsCheck(const LAllocation& ptr) { setOperand(0, ptr); } MWasmBoundsCheck* mir() const {
--- a/js/src/jit/shared/LOpcodes-shared.h +++ b/js/src/jit/shared/LOpcodes-shared.h @@ -383,21 +383,22 @@ _(CallDOMNative) \ _(IsCallable) \ _(IsConstructor) \ _(IsObject) \ _(IsObjectAndBranch) \ _(HasClass) \ _(AsmSelect) \ _(AsmSelectI64) \ + _(WasmBoundsCheck) \ + _(WasmAddOffset) \ _(WasmLoad) \ _(WasmLoadI64) \ _(WasmStore) \ _(WasmStoreI64) \ - _(WasmBoundsCheck) \ _(WasmLoadGlobalVar) \ _(WasmLoadGlobalVarI64) \ _(WasmStoreGlobalVar) \ _(WasmStoreGlobalVarI64) \ _(AsmJSLoadHeap) \ _(AsmJSStoreHeap) \ _(AsmJSParameter) \ _(AsmJSParameterI64) \
--- a/js/src/jit/x64/CodeGenerator-x64.cpp +++ b/js/src/jit/x64/CodeGenerator-x64.cpp @@ -408,149 +408,71 @@ CodeGeneratorX64::visitWasmCallI64(LWasm void CodeGeneratorX64::memoryBarrier(MemoryBarrierBits barrier) { if (barrier & MembarStoreLoad) masm.storeLoadFence(); } void -CodeGeneratorX64::loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, - FloatRegister out) +CodeGeneratorX64::wasmStore(Scalar::Type type, unsigned numSimdElems, const LAllocation* value, + Operand dstAddr) { - switch (type) { - case Scalar::Float32x4: { - switch (numElems) { - // In memory-to-register mode, movss zeroes out the high lanes. - case 1: masm.loadFloat32(srcAddr, out); break; - // See comment above, which also applies to movsd. - case 2: masm.loadDouble(srcAddr, out); break; - case 4: masm.loadUnalignedSimd128Float(srcAddr, out); break; - default: MOZ_CRASH("unexpected size for partial load"); - } - break; - } - case Scalar::Int32x4: { - switch (numElems) { - // In memory-to-register mode, movd zeroes out the high lanes. - case 1: masm.vmovd(srcAddr, out); break; - // See comment above, which also applies to movq. - case 2: masm.vmovq(srcAddr, out); break; - case 4: masm.loadUnalignedSimd128Int(srcAddr, out); break; - default: MOZ_CRASH("unexpected size for partial load"); + if (value->isConstant()) { + const MConstant* mir = value->toConstant(); + Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64()); + switch (type) { + case Scalar::Int8: + case Scalar::Uint8: + masm.movb(cst, dstAddr); + break; + case Scalar::Int16: + case Scalar::Uint16: + masm.movw(cst, dstAddr); + break; + case Scalar::Int32: + case Scalar::Uint32: + masm.movl(cst, dstAddr); + break; + case Scalar::Int64: + case Scalar::Float32: + case Scalar::Float64: + case Scalar::Float32x4: + case Scalar::Int8x16: + case Scalar::Int16x8: + case Scalar::Int32x4: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); } - break; - } - case Scalar::Int8x16: - MOZ_ASSERT(numElems == 16, "unexpected partial load"); - masm.loadUnalignedSimd128Int(srcAddr, out); - break; - case Scalar::Int16x8: - MOZ_ASSERT(numElems == 8, "unexpected partial load"); - masm.loadUnalignedSimd128Int(srcAddr, out); - break; - case Scalar::Int8: - case Scalar::Uint8: - case Scalar::Int16: - case Scalar::Uint16: - case Scalar::Int32: - case Scalar::Uint32: - case Scalar::Int64: - case Scalar::Float32: - case Scalar::Float64: - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("should only handle SIMD types"); - } -} - -static wasm::MemoryAccess -AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throwBehavior, - uint32_t offsetWithinWholeSimdVector = 0) -{ - return wasm::MemoryAccess(before, throwBehavior, wasm::MemoryAccess::WrapOffset, - offsetWithinWholeSimdVector); -} - -void -CodeGeneratorX64::load(Scalar::Type type, const Operand& srcAddr, AnyRegister out) -{ - switch (type) { - case Scalar::Int8: masm.movsbl(srcAddr, out.gpr()); break; - case Scalar::Uint8: masm.movzbl(srcAddr, out.gpr()); break; - case Scalar::Int16: masm.movswl(srcAddr, out.gpr()); break; - case Scalar::Uint16: masm.movzwl(srcAddr, out.gpr()); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movl(srcAddr, out.gpr()); break; - case Scalar::Float32: masm.loadFloat32(srcAddr, out.fpu()); break; - case Scalar::Float64: masm.loadDouble(srcAddr, out.fpu()); break; - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: - MOZ_CRASH("SIMD loads should be handled in emitSimdLoad"); - case Scalar::Int64: - MOZ_CRASH("int64 loads must use load64"); - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("unexpected array type"); - } -} - -void -CodeGeneratorX64::loadI64(Scalar::Type type, const Operand& srcAddr, Register64 out) -{ - switch (type) { - case Scalar::Int8: masm.movsbq(srcAddr, out.reg); break; - case Scalar::Uint8: masm.movzbq(srcAddr, out.reg); break; - case Scalar::Int16: masm.movswq(srcAddr, out.reg); break; - case Scalar::Uint16: masm.movzwq(srcAddr, out.reg); break; - case Scalar::Int32: masm.movslq(srcAddr, out.reg); break; - // Int32 to int64 moves zero-extend by default. - case Scalar::Uint32: masm.movl(srcAddr, out.reg); break; - case Scalar::Int64: masm.movq(srcAddr, out.reg); break; - case Scalar::Float32: - case Scalar::Float64: - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: - MOZ_CRASH("non-int64 loads should use load()"); - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("unexpected array type"); + } else { + masm.wasmStore(type, numSimdElems, ToAnyRegister(value), dstAddr); } } template <typename T> void CodeGeneratorX64::emitWasmLoad(T* ins) { const MWasmLoad* mir = ins->mir(); - bool isInt64 = mir->type() == MIRType::Int64; - - Scalar::Type accessType = mir->accessType(); - MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI"); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - MOZ_ASSERT(mir->offset() <= INT32_MAX); + MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit); const LAllocation* ptr = ins->ptr(); Operand srcAddr = ptr->isBogus() ? Operand(HeapReg, mir->offset()) : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); - uint32_t before = masm.size(); - if (isInt64) - loadI64(accessType, srcAddr, ToOutRegister64(ins)); + memoryBarrier(mir->barrierBefore()); + + if (mir->type() == MIRType::Int64) + masm.wasmLoadI64(mir->accessType(), srcAddr, ToOutRegister64(ins)); else - load(accessType, srcAddr, ToAnyRegister(ins->output())); - uint32_t after = masm.size(); + masm.wasmLoad(mir->accessType(), mir->numSimdElems(), srcAddr, ToAnyRegister(ins->output())); - verifyLoadDisassembly(before, after, isInt64, accessType, /* numElems */ 0, srcAddr, - *ins->output()->output()); + memoryBarrier(mir->barrierAfter()); } void CodeGeneratorX64::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); } @@ -560,463 +482,184 @@ CodeGeneratorX64::visitWasmLoadI64(LWasm emitWasmLoad(ins); } template <typename T> void CodeGeneratorX64::emitWasmStore(T* ins) { const MWasmStore* mir = ins->mir(); - - Scalar::Type accessType = mir->accessType(); - MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI"); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - MOZ_ASSERT(mir->offset() <= INT32_MAX); + MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit); const LAllocation* value = ins->getOperand(ins->ValueIndex); const LAllocation* ptr = ins->ptr(); Operand dstAddr = ptr->isBogus() ? Operand(HeapReg, mir->offset()) : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); - uint32_t before = masm.size(); - store(accessType, value, dstAddr); - uint32_t after = masm.size(); - - verifyStoreDisassembly(before, after, mir->value()->type() == MIRType::Int64, - accessType, /* numElems */ 0, dstAddr, *value); + memoryBarrier(mir->barrierBefore()); + wasmStore(mir->accessType(), mir->numSimdElems(), value, dstAddr); + memoryBarrier(mir->barrierAfter()); } void CodeGeneratorX64::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); } void CodeGeneratorX64::visitWasmStoreI64(LWasmStoreI64* ins) { emitWasmStore(ins); } void -CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap* ins) -{ - const MAsmJSLoadHeap* mir = ins->mir(); - Scalar::Type type = mir->accessType(); - FloatRegister out = ToFloatRegister(ins->output()); - const LAllocation* ptr = ins->ptr(); - Operand srcAddr = ptr->isBogus() - ? Operand(HeapReg, mir->offset()) - : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); - - bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr); - - unsigned numElems = mir->numSimdElems(); - if (numElems == 3) { - MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4); - - Operand srcAddrZ = - ptr->isBogus() - ? Operand(HeapReg, 2 * sizeof(float) + mir->offset()) - : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset()); - - // Load XY - uint32_t before = masm.size(); - loadSimd(type, 2, srcAddr, out); - uint32_t after = masm.size(); - verifyLoadDisassembly(before, after, /* isInt64 */ false, type, 2, srcAddr, - *ins->output()->output()); - masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw)); - - // Load Z (W is zeroed) - // This is still in bounds, as we've checked with a manual bounds check - // or we had enough space for sure when removing the bounds check. - before = after; - loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg); - after = masm.size(); - verifyLoadDisassembly(before, after, /* isInt64 */ false, type, 1, srcAddrZ, - LFloatReg(ScratchSimd128Reg)); - masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8)); - - // Move ZW atop XY - masm.vmovlhps(ScratchSimd128Reg, out, out); - } else { - uint32_t before = masm.size(); - loadSimd(type, numElems, srcAddr, out); - uint32_t after = masm.size(); - verifyLoadDisassembly(before, after, /* isInt64 */ true, type, numElems, srcAddr, - *ins->output()->output()); - masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw)); - } - - if (hasBoundsCheck) - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); -} - -void CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) { const MAsmJSLoadHeap* mir = ins->mir(); - Scalar::Type accessType = mir->accessType(); - - if (Scalar::isSimdType(accessType)) - return emitSimdLoad(ins); + MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit); const LAllocation* ptr = ins->ptr(); const LDefinition* out = ins->output(); + + Scalar::Type accessType = mir->accessType(); + MOZ_ASSERT(!Scalar::isSimdType(accessType)); + Operand srcAddr = ptr->isBogus() ? Operand(HeapReg, mir->offset()) : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); - memoryBarrier(mir->barrierBefore()); - - OutOfLineLoadTypedArrayOutOfBounds* ool; - DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool); - uint32_t before = masm.size(); - load(accessType, srcAddr, ToAnyRegister(out)); + masm.wasmLoad(accessType, 0, srcAddr, ToAnyRegister(out)); uint32_t after = masm.size(); - verifyLoadDisassembly(before, after, /* isInt64 */ false, accessType, 0, srcAddr, - *out->output()); - - if (ool) { - MOZ_ASSERT(hasBoundsCheck); - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); - masm.bind(ool->rejoin()); - } - - memoryBarrier(mir->barrierAfter()); - - // We cannot emulate atomic accesses currently. - masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ? - wasm::MemoryAccess::Throw : - wasm::MemoryAccess::CarryOn))); -} - -void -CodeGeneratorX64::store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr) -{ - if (value->isConstant()) { - const MConstant* mir = value->toConstant(); - Imm32 cst = Imm32(mir->type() == MIRType::Int32 ? mir->toInt32() : mir->toInt64()); - switch (type) { - case Scalar::Int8: - case Scalar::Uint8: masm.movb(cst, dstAddr); break; - case Scalar::Int16: - case Scalar::Uint16: masm.movw(cst, dstAddr); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movl(cst, dstAddr); break; - case Scalar::Int64: - case Scalar::Float32: - case Scalar::Float64: - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("unexpected array type"); - } - } else { - switch (type) { - case Scalar::Int8: - case Scalar::Uint8: - masm.movb(ToRegister(value), dstAddr); - break; - case Scalar::Int16: - case Scalar::Uint16: - masm.movw(ToRegister(value), dstAddr); - break; - case Scalar::Int32: - case Scalar::Uint32: - masm.movl(ToRegister(value), dstAddr); - break; - case Scalar::Int64: - masm.movq(ToRegister(value), dstAddr); - break; - case Scalar::Float32: - masm.storeUncanonicalizedFloat32(ToFloatRegister(value), dstAddr); - break; - case Scalar::Float64: - masm.storeUncanonicalizedDouble(ToFloatRegister(value), dstAddr); - break; - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: - MOZ_CRASH("SIMD stores must be handled in emitSimdStore"); - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("unexpected array type"); - } - } -} - -void -CodeGeneratorX64::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, - const Operand& dstAddr) -{ - switch (type) { - case Scalar::Float32x4: { - switch (numElems) { - // In memory-to-register mode, movss zeroes out the high lanes. - case 1: masm.storeUncanonicalizedFloat32(in, dstAddr); break; - // See comment above, which also applies to movsd. - case 2: masm.storeUncanonicalizedDouble(in, dstAddr); break; - case 4: masm.storeUnalignedSimd128Float(in, dstAddr); break; - default: MOZ_CRASH("unexpected size for partial load"); - } - break; - } - case Scalar::Int32x4: { - switch (numElems) { - // In memory-to-register mode, movd zeroes out the high lanes. - case 1: masm.vmovd(in, dstAddr); break; - // See comment above, which also applies to movq. - case 2: masm.vmovq(in, dstAddr); break; - case 4: masm.storeUnalignedSimd128Int(in, dstAddr); break; - default: MOZ_CRASH("unexpected size for partial load"); - } - break; - } - case Scalar::Int8x16: - MOZ_ASSERT(numElems == 16, "unexpected partial store"); - masm.storeUnalignedSimd128Int(in, dstAddr); - break; - case Scalar::Int16x8: - MOZ_ASSERT(numElems == 8, "unexpected partial store"); - masm.storeUnalignedSimd128Int(in, dstAddr); - break; - case Scalar::Int8: - case Scalar::Uint8: - case Scalar::Int16: - case Scalar::Uint16: - case Scalar::Int32: - case Scalar::Uint32: - case Scalar::Int64: - case Scalar::Float32: - case Scalar::Float64: - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("should only handle SIMD types"); - } -} - -void -CodeGeneratorX64::emitSimdStore(LAsmJSStoreHeap* ins) -{ - const MAsmJSStoreHeap* mir = ins->mir(); - Scalar::Type type = mir->accessType(); - FloatRegister in = ToFloatRegister(ins->value()); - const LAllocation* ptr = ins->ptr(); - Operand dstAddr = ptr->isBogus() - ? Operand(HeapReg, mir->offset()) - : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); - - bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr); - - unsigned numElems = mir->numSimdElems(); - if (numElems == 3) { - MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4); - - Operand dstAddrZ = - ptr->isBogus() - ? Operand(HeapReg, 2 * sizeof(float) + mir->offset()) - : Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset()); - - // It's possible that the Z could be out of bounds when the XY is in - // bounds. To avoid storing the XY before the exception is thrown, we - // store the Z first, and record its offset in the MemoryAccess so - // that the signal handler knows to check the bounds of the full - // access, rather than just the Z. - masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg); - uint32_t before = masm.size(); - storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ); - uint32_t after = masm.size(); - verifyStoreDisassembly(before, after, /* int64 */ false, type, 1, dstAddrZ, - LFloatReg(ScratchSimd128Reg)); - masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw, 8)); - - // Store XY - before = after; - storeSimd(type, 2, in, dstAddr); - after = masm.size(); - verifyStoreDisassembly(before, after, /* int64 */ false, type, 2, dstAddr, - *ins->value()); - masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw)); - } else { - uint32_t before = masm.size(); - storeSimd(type, numElems, in, dstAddr); - uint32_t after = masm.size(); - verifyStoreDisassembly(before, after, /* int64 */ false, type, numElems, dstAddr, - *ins->value()); - masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::Throw)); - } - - if (hasBoundsCheck) - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); + verifyLoadDisassembly(before, after, accessType, srcAddr, *out->output()); + masm.append(wasm::MemoryAccess(before)); } void CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) { const MAsmJSStoreHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit); + + const LAllocation* ptr = ins->ptr(); + const LAllocation* value = ins->value(); + Scalar::Type accessType = mir->accessType(); - const LAllocation* value = ins->value(); + MOZ_ASSERT(!Scalar::isSimdType(accessType)); canonicalizeIfDeterministic(accessType, value); - if (Scalar::isSimdType(accessType)) - return emitSimdStore(ins); - - const LAllocation* ptr = ins->ptr(); Operand dstAddr = ptr->isBogus() ? Operand(HeapReg, mir->offset()) : Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset()); - memoryBarrier(mir->barrierBefore()); - - Label* rejoin; - DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin); - uint32_t before = masm.size(); - store(accessType, value, dstAddr); + wasmStore(accessType, 0, value, dstAddr); uint32_t after = masm.size(); - verifyStoreDisassembly(before, after, /* int64 */ false, accessType, 0, dstAddr, *value); - - if (rejoin) { - MOZ_ASSERT(hasBoundsCheck); - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); - masm.bind(rejoin); - } - - memoryBarrier(mir->barrierAfter()); - - // See comment in visitAsmJSLoadHeap - masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ? - wasm::MemoryAccess::Throw : - wasm::MemoryAccess::CarryOn))); + verifyStoreDisassembly(before, after, accessType, dstAddr, *value); + masm.append(wasm::MemoryAccess(before)); } void CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) { - MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - MAsmJSCompareExchangeHeap* mir = ins->mir(); - Scalar::Type accessType = mir->accessType(); + MOZ_ASSERT(mir->offset() == 0); Register ptr = ToRegister(ins->ptr()); - BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); + MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - // Note that we can't use the same machinery as normal asm.js loads/stores - // since signal-handler bounds checking is not yet implemented for atomic - // accesses. - maybeEmitWasmBoundsCheckBranch(mir, ptr); + Scalar::Type accessType = mir->accessType(); + BaseIndex srcAddr(HeapReg, ptr, TimesOne); masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, srcAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); - MOZ_ASSERT(mir->offset() == 0, - "The AsmJS signal handler doesn't yet support emulating " - "atomic accesses in the case of a fault from an unwrapped offset"); } void CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins) { - MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - MOZ_ASSERT(ins->mir()->accessType() <= Scalar::Uint32); - MAsmJSAtomicExchangeHeap* mir = ins->mir(); - Scalar::Type accessType = mir->accessType(); + MOZ_ASSERT(mir->offset() == 0); Register ptr = ToRegister(ins->ptr()); - BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset()); Register value = ToRegister(ins->value()); + MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - // See comment in visitAsmJSCompareExchangeHeap. - maybeEmitWasmBoundsCheckBranch(mir, ptr); + Scalar::Type accessType = mir->accessType(); + MOZ_ASSERT(accessType <= Scalar::Uint32); + + BaseIndex srcAddr(HeapReg, ptr, TimesOne); masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, srcAddr, value, InvalidReg, ToAnyRegister(ins->output())); - MOZ_ASSERT(mir->offset() == 0, - "The AsmJS signal handler doesn't yet support emulating " - "atomic accesses in the case of a fault from an unwrapped offset"); } void CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) { - MOZ_ASSERT(ins->mir()->hasUses()); - MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - MAsmJSAtomicBinopHeap* mir = ins->mir(); - Scalar::Type accessType = mir->accessType(); - accessType = accessType == Scalar::Uint32 ? Scalar::Int32 : accessType; - AtomicOp op = mir->operation(); + MOZ_ASSERT(mir->offset() == 0); + MOZ_ASSERT(mir->hasUses()); Register ptr = ToRegister(ins->ptr()); + const LAllocation* value = ins->value(); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); - BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset()); - - const LAllocation* value = ins->value(); + AnyRegister output = ToAnyRegister(ins->output()); + MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - // See comment in visitAsmJSCompareExchangeHeap. - maybeEmitWasmBoundsCheckBranch(mir, ptr); + Scalar::Type accessType = mir->accessType(); + if (accessType == Scalar::Uint32) + accessType = Scalar::Int32; - AnyRegister output = ToAnyRegister(ins->output()); + AtomicOp op = mir->operation(); + BaseIndex srcAddr(HeapReg, ptr, TimesOne); + if (value->isConstant()) { atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr, temp, InvalidReg, output); } else { atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr, temp, InvalidReg, output); } - - MOZ_ASSERT(mir->offset() == 0, - "The AsmJS signal handler doesn't yet support emulating " - "atomic accesses in the case of a fault from an unwrapped offset"); } void CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins) { - MOZ_ASSERT(!ins->mir()->hasUses()); + MAsmJSAtomicBinopHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + MOZ_ASSERT(!mir->hasUses()); + + Register ptr = ToRegister(ins->ptr()); + const LAllocation* value = ins->value(); MOZ_ASSERT(ins->addrTemp()->isBogusTemp()); - MAsmJSAtomicBinopHeap* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); AtomicOp op = mir->operation(); - Register ptr = ToRegister(ins->ptr()); - BaseIndex srcAddr(HeapReg, ptr, TimesOne, mir->offset()); - const LAllocation* value = ins->value(); - - // See comment in visitAsmJSCompareExchangeHeap. - maybeEmitWasmBoundsCheckBranch(mir, ptr); + BaseIndex srcAddr(HeapReg, ptr, TimesOne); if (value->isConstant()) atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr); else atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr); - MOZ_ASSERT(mir->offset() == 0, - "The AsmJS signal handler doesn't yet support emulating " - "atomic accesses in the case of a fault from an unwrapped offset"); } void CodeGeneratorX64::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins) { MWasmLoadGlobalVar* mir = ins->mir(); MIRType type = mir->type();
--- a/js/src/jit/x64/CodeGenerator-x64.h +++ b/js/src/jit/x64/CodeGenerator-x64.h @@ -22,31 +22,20 @@ class CodeGeneratorX64 : public CodeGene ValueOperand ToValue(LInstruction* ins, size_t pos); ValueOperand ToOutValue(LInstruction* ins); ValueOperand ToTempValue(LInstruction* ins, size_t pos); void storeUnboxedValue(const LAllocation* value, MIRType valueType, Operand dest, MIRType slotType); void memoryBarrier(MemoryBarrierBits barrier); - void load(Scalar::Type type, const Operand& srcAddr, AnyRegister out); - void loadI64(Scalar::Type type, const Operand& srcAddr, Register64 out); - - void store(Scalar::Type type, const LAllocation* value, const Operand& dstAddr); - - void loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, FloatRegister out); - void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, const Operand& dstAddr); + void wasmStore(Scalar::Type type, unsigned numSimdElems, const LAllocation* value, Operand dstAddr); + template <typename T> void emitWasmLoad(T* ins); + template <typename T> void emitWasmStore(T* ins); - void emitSimdLoad(LAsmJSLoadHeap* ins); - void emitSimdStore(LAsmJSStoreHeap* ins); - - template <typename T> - void emitWasmLoad(T* ins); - template <typename T> - void emitWasmStore(T* ins); public: CodeGeneratorX64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm); public: void visitValue(LValue* value); void visitBox(LBox* box); void visitUnbox(LUnbox* unbox); void visitCompareB(LCompareB* lir);
--- a/js/src/jit/x64/Lowering-x64.cpp +++ b/js/src/jit/x64/Lowering-x64.cpp @@ -189,16 +189,31 @@ void LIRGeneratorX64::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins) { MOZ_ASSERT(ins->input()->type() == MIRType::Int32); LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input())); define(lir, ins); } void +LIRGeneratorX64::visitWasmLoad(MWasmLoad* ins) +{ + if (ins->type() != MIRType::Int64) { + lowerWasmLoad(ins); + return; + } + + MDefinition* base = ins->base(); + MOZ_ASSERT(base->type() == MIRType::Int32); + + auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base)); + defineInt64(lir, ins); +} + +void LIRGeneratorX64::visitWasmStore(MWasmStore* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); MDefinition* value = ins->value(); LAllocation valueAlloc; switch (ins->accessType()) { @@ -236,54 +251,44 @@ LIRGeneratorX64::visitWasmStore(MWasmSto } void LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); - // For simplicity, require a register if we're going to emit a bounds-check - // branch, so that we don't have special cases for constants. - LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins) - ? useRegisterAtStart(base) - : useRegisterOrZeroAtStart(base); - - define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins); + define(new(alloc()) LAsmJSLoadHeap(useRegisterOrZeroAtStart(base)), ins); } void LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); - // For simplicity, require a register if we're going to emit a bounds-check - // branch, so that we don't have special cases for constants. - LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins) - ? useRegisterAtStart(base) - : useRegisterOrZeroAtStart(base); - LAsmJSStoreHeap* lir = nullptr; // initialize to silence GCC warning switch (ins->accessType()) { case Scalar::Int8: case Scalar::Uint8: case Scalar::Int16: case Scalar::Uint16: case Scalar::Int32: case Scalar::Uint32: - lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterOrConstantAtStart(ins->value())); + lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base), + useRegisterOrConstantAtStart(ins->value())); break; case Scalar::Float32: case Scalar::Float64: case Scalar::Float32x4: case Scalar::Int8x16: case Scalar::Int16x8: case Scalar::Int32x4: - lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())); + lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base), + useRegisterAtStart(ins->value())); break; case Scalar::Int64: case Scalar::Uint8Clamped: case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected array type"); } add(lir, ins); } @@ -459,31 +464,16 @@ LIRGeneratorX64::lowerUModI64(MMod* mod) { LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), tempFixed(rax)); defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx)))); } void -LIRGeneratorX64::visitWasmLoad(MWasmLoad* ins) -{ - if (ins->type() != MIRType::Int64) { - lowerWasmLoad(ins); - return; - } - - MDefinition* base = ins->base(); - MOZ_ASSERT(base->type() == MIRType::Int32); - - auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base)); - defineInt64(lir, ins); -} - -void LIRGeneratorX64::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) { MDefinition* opd = ins->input(); MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32); LDefinition maybeTemp = ins->isUnsigned() ? tempDouble() : LDefinition::BogusTemp(); defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd), maybeTemp), ins); }
--- a/js/src/jit/x64/Lowering-x64.h +++ b/js/src/jit/x64/Lowering-x64.h @@ -44,30 +44,30 @@ class LIRGeneratorX64 : public LIRGenera bool needTempForPostBarrier() { return true; } void lowerDivI64(MDiv* div); void lowerModI64(MMod* mod); void lowerUDivI64(MDiv* div); void lowerUModI64(MMod* mod); public: - void visitWasmLoad(MWasmLoad* ins); void visitBox(MBox* box); void visitUnbox(MUnbox* unbox); void visitReturn(MReturn* ret); void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins); void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins); void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins); void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins); void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins); void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins); void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins); void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins); void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins); void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins); + void visitWasmLoad(MWasmLoad* ins); void visitWasmStore(MWasmStore* ins); void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins); void visitSubstr(MSubstr* ins); void visitRandom(MRandom* ins); void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins); void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins); void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins); };
--- a/js/src/jit/x64/MacroAssembler-x64-inl.h +++ b/js/src/jit/x64/MacroAssembler-x64-inl.h @@ -794,16 +794,32 @@ MacroAssembler::truncateDoubleToUInt64(A loadPtr(dest, temp); or64(Imm64(0x8000000000000000), Register64(temp)); storePtr(temp, dest); bind(&done); } +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + MOZ_CRASH("x64 should never emit a bounds check"); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + MOZ_CRASH("x64 should never emit a bounds check"); +} + //}}} check_macroassembler_style // =============================================================== void MacroAssemblerX64::incrementInt32Value(const Address& addr) { asMasm().addPtr(Imm32(1), addr); }
--- a/js/src/jit/x64/MacroAssembler-x64.cpp +++ b/js/src/jit/x64/MacroAssembler-x64.cpp @@ -624,16 +624,180 @@ MacroAssembler::storeUnboxedValue(Consta template void MacroAssembler::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest, MIRType slotType); template void MacroAssembler::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest, MIRType slotType); +// ======================================================================== +// wasm support + +void +MacroAssembler::wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out) +{ + switch (type) { + case Scalar::Int8: + movsbl(srcAddr, out.gpr()); + break; + case Scalar::Uint8: + movzbl(srcAddr, out.gpr()); + break; + case Scalar::Int16: + movswl(srcAddr, out.gpr()); + break; + case Scalar::Uint16: + movzwl(srcAddr, out.gpr()); + break; + case Scalar::Int32: + case Scalar::Uint32: + movl(srcAddr, out.gpr()); + break; + case Scalar::Float32: + loadFloat32(srcAddr, out.fpu()); + break; + case Scalar::Float64: + loadDouble(srcAddr, out.fpu()); + break; + case Scalar::Float32x4: + switch (numSimdElems) { + // In memory-to-register mode, movss zeroes out the high lanes. + case 1: loadFloat32(srcAddr, out.fpu()); break; + // See comment above, which also applies to movsd. + case 2: loadDouble(srcAddr, out.fpu()); break; + case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int32x4: + switch (numSimdElems) { + // In memory-to-register mode, movd zeroes out the high lanes. + case 1: vmovd(srcAddr, out.fpu()); break; + // See comment above, which also applies to movq. + case 2: vmovq(srcAddr, out.fpu()); break; + case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int8x16: + MOZ_ASSERT(numSimdElems == 16, "unexpected partial load"); + loadUnalignedSimd128Int(srcAddr, out.fpu()); + break; + case Scalar::Int16x8: + MOZ_ASSERT(numSimdElems == 8, "unexpected partial load"); + loadUnalignedSimd128Int(srcAddr, out.fpu()); + break; + case Scalar::Int64: + MOZ_CRASH("int64 loads must use load64"); + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); + } +} + +void +MacroAssembler::wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out) +{ + switch (type) { + case Scalar::Int8: + movsbq(srcAddr, out.reg); + break; + case Scalar::Uint8: + movzbq(srcAddr, out.reg); + break; + case Scalar::Int16: + movswq(srcAddr, out.reg); + break; + case Scalar::Uint16: + movzwq(srcAddr, out.reg); + break; + case Scalar::Int32: + movslq(srcAddr, out.reg); + break; + // Int32 to int64 moves zero-extend by default. + case Scalar::Uint32: + movl(srcAddr, out.reg); + break; + case Scalar::Int64: + movq(srcAddr, out.reg); + break; + case Scalar::Float32: + case Scalar::Float64: + case Scalar::Float32x4: + case Scalar::Int8x16: + case Scalar::Int16x8: + case Scalar::Int32x4: + MOZ_CRASH("non-int64 loads should use load()"); + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); + } +} + +void +MacroAssembler::wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value, + Operand dstAddr) +{ + switch (type) { + case Scalar::Int8: + case Scalar::Uint8: + movb(value.gpr(), dstAddr); + break; + case Scalar::Int16: + case Scalar::Uint16: + movw(value.gpr(), dstAddr); + break; + case Scalar::Int32: + case Scalar::Uint32: + movl(value.gpr(), dstAddr); + break; + case Scalar::Int64: + movq(value.gpr(), dstAddr); + break; + case Scalar::Float32: + storeUncanonicalizedFloat32(value.fpu(), dstAddr); + break; + case Scalar::Float64: + storeUncanonicalizedDouble(value.fpu(), dstAddr); + break; + case Scalar::Float32x4: + switch (numSimdElems) { + // In memory-to-register mode, movss zeroes out the high lanes. + case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break; + // See comment above, which also applies to movsd. + case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break; + case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int32x4: + switch (numSimdElems) { + // In memory-to-register mode, movd zeroes out the high lanes. + case 1: vmovd(value.fpu(), dstAddr); break; + // See comment above, which also applies to movq. + case 2: vmovq(value.fpu(), dstAddr); break; + case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int8x16: + MOZ_ASSERT(numSimdElems == 16, "unexpected partial store"); + storeUnalignedSimd128Int(value.fpu(), dstAddr); + break; + case Scalar::Int16x8: + MOZ_ASSERT(numSimdElems == 8, "unexpected partial store"); + storeUnalignedSimd128Int(value.fpu(), dstAddr); + break; + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); + } +} + void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) { vcvttsd2sq(input, output); // Check that the result is in the uint32_t range. ScratchRegisterScope scratch(*this); move32(Imm32(0xffffffff), scratch);
--- a/js/src/jit/x86-shared/Architecture-x86-shared.h +++ b/js/src/jit/x86-shared/Architecture-x86-shared.h @@ -452,16 +452,12 @@ hasUnaliasedDouble() // On ARM, Dn aliases both S2n and S2n+1, so if you need to convert a float32 // to a double as a temporary, you need a temporary double register. inline bool hasMultiAlias() { return false; } -// See MIRGenerator::foldableOffsetRange for more info. -static const size_t WasmCheckedImmediateRange = 4096; -static const size_t WasmImmediateRange = UINT32_C(0x80000000); - } // namespace jit } // namespace js #endif /* jit_x86_shared_Architecture_x86_h */
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h +++ b/js/src/jit/x86-shared/Assembler-x86-shared.h @@ -299,16 +299,18 @@ class AssemblerX86Shared : public Assemb AboveOrEqual = X86Encoding::ConditionAE, Below = X86Encoding::ConditionB, BelowOrEqual = X86Encoding::ConditionBE, GreaterThan = X86Encoding::ConditionG, GreaterThanOrEqual = X86Encoding::ConditionGE, LessThan = X86Encoding::ConditionL, LessThanOrEqual = X86Encoding::ConditionLE, Overflow = X86Encoding::ConditionO, + CarrySet = X86Encoding::ConditionC, + CarryClear = X86Encoding::ConditionNC, Signed = X86Encoding::ConditionS, NotSigned = X86Encoding::ConditionNS, Zero = X86Encoding::ConditionE, NonZero = X86Encoding::ConditionNE, Parity = X86Encoding::ConditionP, NoParity = X86Encoding::ConditionNP }; @@ -1076,44 +1078,16 @@ class AssemblerX86Shared : public Assemb } static void patchTwoByteNopToJump(uint8_t* jump, uint8_t* target) { X86Encoding::BaseAssembler::patchTwoByteNopToJump(jump, target); } static void patchJumpToTwoByteNop(uint8_t* jump) { X86Encoding::BaseAssembler::patchJumpToTwoByteNop(jump); } - static void UpdateBoundsCheck(uint8_t* patchAt, uint32_t heapLength) { - // On x64, even with signal handling being used for most bounds checks, - // there may be atomic operations that depend on explicit checks. All - // accesses that have been recorded are the only ones that need bound - // checks. - // - // An access is out-of-bounds iff - // ptr + offset + data-type-byte-size > heapLength - // i.e ptr + offset + data-type-byte-size - 1 >= heapLength - // i.e. ptr >= heapLength - data-type-byte-size - offset + 1. - // - // before := data-type-byte-size + offset - 1 - uint32_t before = reinterpret_cast<uint32_t*>(patchAt)[-1]; - uint32_t after = before + heapLength; - - // If the computed index `before` already is out of bounds, - // we need to make sure the bounds check will fail all the time. - // For bounds checks, the sequence of instructions we use is: - // cmp(ptrReg, #before) - // jae(OutOfBounds) - // so replace the cmp immediate with 0. - if (after > heapLength) - after = 0; - - MOZ_ASSERT_IF(after, int32_t(after) >= int32_t(before)); - reinterpret_cast<uint32_t*>(patchAt)[-1] = after; - } - void breakpoint() { masm.int3(); } static bool HasSSE2() { return CPUInfo::IsSSE2Present(); } static bool HasSSE3() { return CPUInfo::IsSSE3Present(); } static bool HasSSSE3() { return CPUInfo::IsSSSE3Present(); } static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp +++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp @@ -419,92 +419,26 @@ CodeGeneratorX86Shared::visitOutOfLineLo Register destReg = ool->dest().gpr(); masm.mov(ImmWord(0), destReg); break; } masm.jmp(ool->rejoin()); } void -CodeGeneratorX86Shared::visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck) -{ - // The access is heap[ptr + offset]. The inline code checks that - // ptr < heap.length - offset. We get here when that fails. We need to check - // for the case where ptr + offset >= 0, in which case the access is still - // in bounds. - MOZ_ASSERT(oolCheck->offset() != 0, - "An access without a constant offset doesn't need a separate OffsetBoundsCheck"); - masm.cmp32(oolCheck->ptrReg(), Imm32(-uint32_t(oolCheck->offset()))); - if (oolCheck->maybeOutOfBounds()) - masm.j(Assembler::Below, oolCheck->maybeOutOfBounds()); - else - masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds); - -#ifdef JS_CODEGEN_X64 - // In order to get the offset to wrap properly, we must sign-extend the - // pointer to 32-bits. We'll zero out the sign extension immediately - // after the access to restore asm.js invariants. - masm.movslq(oolCheck->ptrReg(), oolCheck->ptrReg()); -#endif - - masm.jmp(oolCheck->rejoin()); -} - -void -CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access, - const MInstruction* mir, - Register ptr, Label* maybeFail) +CodeGeneratorX86Shared::visitWasmAddOffset(LWasmAddOffset* lir) { - // Emit a bounds-checking branch for |access|. - - MOZ_ASSERT(gen->needsBoundsCheckBranch(access)); - - Label* pass = nullptr; - - // If we have a non-zero offset, it's possible that |ptr| itself is out of - // bounds, while adding the offset computes an in-bounds address. To catch - // this case, we need a second branch, which we emit out of line since it's - // unlikely to be needed in normal programs. - if (access->offset() != 0) { - auto oolCheck = new(alloc()) OffsetBoundsCheck(maybeFail, ptr, access->offset()); - maybeFail = oolCheck->entry(); - pass = oolCheck->rejoin(); - addOutOfLineCode(oolCheck, mir); - } - - // The bounds check is a comparison with an immediate value. The asm.js - // module linking process will add the length of the heap to the immediate - // field, so -access->endOffset() will turn into - // (heapLength - access->endOffset()), allowing us to test whether the end - // of the access is beyond the end of the heap. - MOZ_ASSERT(access->endOffset() >= 1, - "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck"); - - uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - access->endOffset())).offset(); - if (maybeFail) - masm.j(Assembler::AboveOrEqual, maybeFail); - else - masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds); - - if (pass) - masm.bind(pass); - - masm.append(wasm::BoundsCheck(cmpOffset)); -} - -void -CodeGeneratorX86Shared::visitWasmBoundsCheck(LWasmBoundsCheck* ins) -{ - const MWasmBoundsCheck* mir = ins->mir(); - - MOZ_ASSERT(gen->needsBoundsCheckBranch(mir)); - MOZ_ASSERT(mir->offset() <= INT32_MAX); - - Register ptrReg = ToRegister(ins->ptr()); - maybeEmitWasmBoundsCheckBranch(mir, ptrReg, mir->isRedundant()); + MWasmAddOffset* mir = lir->mir(); + Register base = ToRegister(lir->base()); + Register out = ToRegister(lir->output()); + + if (base != out) + masm.move32(base, out); + masm.add32(Imm32(mir->offset()), out); + masm.j(Assembler::CarrySet, wasm::JumpTarget::OutOfBounds); } void CodeGeneratorX86Shared::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) { FloatRegister input = ToFloatRegister(lir->input()); Register output = ToRegister(lir->output()); @@ -532,112 +466,16 @@ CodeGeneratorX86Shared::visitWasmTruncat else if (inputType == MIRType::Float32) masm.wasmTruncateFloat32ToInt32(input, output, oolEntry); else MOZ_CRASH("unexpected type"); masm.bind(ool->rejoin()); } -void -CodeGeneratorX86Shared::maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr, - bool redundant) -{ - if (!gen->needsBoundsCheckBranch(mir)) - return; - - MOZ_ASSERT(mir->endOffset() >= 1, - "need to subtract 1 to use JAE, see also AssemblerX86Shared::UpdateBoundsCheck"); - - // TODO: See 1287224 Unify MWasmBoundsCheck::redunant_ and needsBoundsCheck - if (!redundant) { - uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - mir->endOffset())).offset(); - masm.j(Assembler::AboveOrEqual, wasm::JumpTarget::OutOfBounds); - masm.append(wasm::BoundsCheck(cmpOffset)); - } else { -#ifdef DEBUG - Label ok; - uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(1 - mir->endOffset())).offset(); - masm.j(Assembler::Below, &ok); - masm.assumeUnreachable("Redundant bounds check failed!"); - masm.bind(&ok); - masm.append(wasm::BoundsCheck(cmpOffset)); -#endif - } -} - -bool -CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* access, - const MInstruction* mir, - const LAllocation* ptr) -{ - if (!gen->needsBoundsCheckBranch(access)) - return false; - - emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr); - return true; -} - -bool -CodeGeneratorX86Shared::maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins, - OutOfLineLoadTypedArrayOutOfBounds** ool) -{ - MOZ_ASSERT(!Scalar::isSimdType(mir->accessType())); - *ool = nullptr; - - if (!gen->needsBoundsCheckBranch(mir)) - return false; - - Label* rejoin = nullptr; - if (!mir->isAtomicAccess()) { - *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(ins->output()), - mir->accessType()); - addOutOfLineCode(*ool, mir); - rejoin = (*ool)->entry(); - } - - emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), rejoin); - return true; -} - -bool -CodeGeneratorX86Shared::maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins, - Label** rejoin) -{ - MOZ_ASSERT(!Scalar::isSimdType(mir->accessType())); - - *rejoin = nullptr; - if (!gen->needsBoundsCheckBranch(mir)) - return false; - - if (!mir->isAtomicAccess()) - *rejoin = alloc().lifoAlloc()->newInfallible<Label>(); - - emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ins->ptr()), *rejoin); - return true; -} - -void -CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* access, - Register ptr) -{ - // Clean up after performing a heap access checked by a branch. - - MOZ_ASSERT(gen->needsBoundsCheckBranch(access)); - -#ifdef JS_CODEGEN_X64 - // If the offset is 0, we don't use an OffsetBoundsCheck. - if (access->offset() != 0) { - // Zero out the high 32 bits, in case the OffsetBoundsCheck code had to - // sign-extend (movslq) the pointer value to get wraparound to work. - masm.movl(ptr, ptr); - } -#endif -} - bool CodeGeneratorX86Shared::generateOutOfLineCode() { if (!CodeGeneratorShared::generateOutOfLineCode()) return false; if (deoptLabel_.used()) { // All non-table-based bailouts will go here.
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h +++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h @@ -45,35 +45,16 @@ class CodeGeneratorX86Shared : public Co AnyRegister dest() const { return dest_; } Scalar::Type viewType() const { return viewType_; } void accept(CodeGeneratorX86Shared* codegen) { codegen->visitOutOfLineLoadTypedArrayOutOfBounds(this); } }; - // Additional bounds checking for heap accesses with constant offsets. - class OffsetBoundsCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared> - { - Label* maybeOutOfBounds_; - Register ptrReg_; - int32_t offset_; - public: - OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset) - : maybeOutOfBounds_(maybeOutOfBounds), ptrReg_(ptrReg), offset_(offset) - {} - - Label* maybeOutOfBounds() const { return maybeOutOfBounds_; } - Register ptrReg() const { return ptrReg_; } - int32_t offset() const { return offset_; } - void accept(CodeGeneratorX86Shared* codegen) { - codegen->visitOffsetBoundsCheck(this); - } - }; - // Additional bounds check for vector Float to Int conversion, when the // undefined pattern is seen. Might imply a bailout. class OutOfLineSimdFloatToIntCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared> { Register temp_; FloatRegister input_; LInstruction* ins_; @@ -86,39 +67,17 @@ class CodeGeneratorX86Shared : public Co FloatRegister input() const { return input_; } LInstruction* ins() const { return ins_; } void accept(CodeGeneratorX86Shared* codegen) { codegen->visitOutOfLineSimdFloatToIntCheck(this); } }; - private: - void emitAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, const MInstruction* ins, - Register ptr, Label* fail); - - protected: - void maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr, - bool redundant = false); - public: - // For SIMD and atomic loads and stores (which throw on out-of-bounds): - bool maybeEmitThrowingAsmJSBoundsCheck(const MWasmMemoryAccess* mir, const MInstruction* ins, - const LAllocation* ptr); - - // For asm.js plain and atomic loads that possibly require a bounds check: - bool maybeEmitAsmJSLoadBoundsCheck(const MAsmJSLoadHeap* mir, LAsmJSLoadHeap* ins, - OutOfLineLoadTypedArrayOutOfBounds** ool); - - // For asm.js plain and atomic stores that possibly require a bounds check: - bool maybeEmitAsmJSStoreBoundsCheck(const MAsmJSStoreHeap* mir, LAsmJSStoreHeap* ins, - Label** rejoin); - - void cleanupAfterAsmJSBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr); - NonAssertingLabel deoptLabel_; Operand ToOperand(const LAllocation& a); Operand ToOperand(const LAllocation* a); Operand ToOperand(const LDefinition* def); #ifdef JS_PUNBOX64 Operand ToOperandOrRegister64(const LInt64Allocation input); @@ -279,29 +238,28 @@ class CodeGeneratorX86Shared : public Co virtual void visitGuardClass(LGuardClass* guard); virtual void visitEffectiveAddress(LEffectiveAddress* ins); virtual void visitUDivOrMod(LUDivOrMod* ins); virtual void visitUDivOrModConstant(LUDivOrModConstant *ins); virtual void visitAsmJSPassStackArg(LAsmJSPassStackArg* ins); virtual void visitAsmJSPassStackArgI64(LAsmJSPassStackArgI64* ins); virtual void visitAsmSelect(LAsmSelect* ins); virtual void visitAsmReinterpret(LAsmReinterpret* lir); - virtual void visitWasmBoundsCheck(LWasmBoundsCheck* ins); virtual void visitMemoryBarrier(LMemoryBarrier* ins); + virtual void visitWasmAddOffset(LWasmAddOffset* lir); virtual void visitWasmTruncateToInt32(LWasmTruncateToInt32* lir); virtual void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir); virtual void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir); virtual void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir); virtual void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir); virtual void visitCopySignD(LCopySignD* lir); virtual void visitCopySignF(LCopySignF* lir); virtual void visitRotateI64(LRotateI64* lir); void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool); - void visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck); void visitNegI(LNegI* lir); void visitNegD(LNegD* lir); void visitNegF(LNegF* lir); void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool); // SIMD operators
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp +++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp @@ -322,27 +322,16 @@ LIRGeneratorX86Shared::visitAsmJSNeg(MAs defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins, 0); break; default: MOZ_CRASH(); } } void -LIRGeneratorX86Shared::visitWasmBoundsCheck(MWasmBoundsCheck* ins) -{ - if (!gen->needsBoundsCheckBranch(ins)) - return; - - MDefinition* index = ins->input(); - auto* lir = new(alloc()) LWasmBoundsCheck(useRegisterAtStart(index)); - add(lir, ins); -} - -void LIRGeneratorX86Shared::lowerWasmLoad(MWasmLoad* ins) { MOZ_ASSERT(ins->type() != MIRType::Int64); MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); auto* lir = new(alloc()) LWasmLoad(useRegisterOrZeroAtStart(base));
--- a/js/src/jit/x86-shared/Lowering-x86-shared.h +++ b/js/src/jit/x86-shared/Lowering-x86-shared.h @@ -41,17 +41,16 @@ class LIRGeneratorX86Shared : public LIR MDefinition* rhs); void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs); void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs); void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir, MDefinition* lhs, MDefinition* rhs); void visitAsmJSNeg(MAsmJSNeg* ins); - void visitWasmBoundsCheck(MWasmBoundsCheck* ins); void lowerWasmLoad(MWasmLoad* ins); void visitAsmSelect(MAsmSelect* ins); void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs); void lowerDivI(MDiv* div); void lowerModI(MMod* mod); void lowerUDiv(MDiv* div); void lowerUMod(MMod* mod); void lowerUrshD(MUrsh* mir);
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h +++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h @@ -635,19 +635,19 @@ MacroAssembler::branchDouble(DoubleCondi j(Parity, label); return; } MOZ_ASSERT(!(cond & DoubleConditionBitSpecial)); j(ConditionFromDoubleCondition(cond), label); } -template <typename T> +template <typename T, typename L> void -MacroAssembler::branchAdd32(Condition cond, T src, Register dest, Label* label) +MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L label) { addl(src, dest); j(cond, label); } template <typename T> void MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
--- a/js/src/jit/x86/CodeGenerator-x86.cpp +++ b/js/src/jit/x86/CodeGenerator-x86.cpp @@ -252,149 +252,59 @@ CodeGeneratorX86::visitAsmJSUInt32ToFloa if (input != temp) masm.mov(input, temp); // Beware: convertUInt32ToFloat32 clobbers input. masm.convertUInt32ToFloat32(temp, output); } void -CodeGeneratorX86::load(Scalar::Type accessType, const Operand& srcAddr, const LDefinition* out) -{ - switch (accessType) { - case Scalar::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break; - case Scalar::Uint8Clamped: - case Scalar::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break; - case Scalar::Int16: masm.movswlWithPatch(srcAddr, ToRegister(out)); break; - case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, ToRegister(out)); break; - case Scalar::Int32: - case Scalar::Uint32: masm.movlWithPatch(srcAddr, ToRegister(out)); break; - case Scalar::Float32: masm.vmovssWithPatch(srcAddr, ToFloatRegister(out)); break; - case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, ToFloatRegister(out)); break; - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: MOZ_CRASH("SIMD load should be handled in their own function"); - case Scalar::Int64: MOZ_CRASH("should be handled in a separate function (2 regs)"); - case Scalar::MaxTypedArrayViewType: MOZ_CRASH("unexpected type"); - } - masm.append(wasm::MemoryAccess(masm.size())); -} - -void -CodeGeneratorX86::loadI64(Scalar::Type type, const Operand& srcAddr, Register64 out) -{ - switch (type) { - case Scalar::Int8: - MOZ_ASSERT(out == Register64(edx, eax)); - masm.movsblWithPatch(srcAddr, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.cdq(); - break; - case Scalar::Uint8: - masm.movzblWithPatch(srcAddr, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.xorl(out.high, out.high); - break; - case Scalar::Int16: - MOZ_ASSERT(out == Register64(edx, eax)); - masm.movswlWithPatch(srcAddr, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.cdq(); - break; - case Scalar::Uint16: - masm.movzwlWithPatch(srcAddr, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.xorl(out.high, out.high); - break; - case Scalar::Int32: - MOZ_ASSERT(out == Register64(edx, eax)); - masm.movlWithPatch(srcAddr, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.cdq(); - break; - case Scalar::Uint32: - masm.movlWithPatch(srcAddr, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.xorl(out.high, out.high); - break; - case Scalar::Int64: - if (srcAddr.kind() == Operand::MEM_ADDRESS32) { - Operand low(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64LOW_OFFSET)); - Operand high(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64HIGH_OFFSET)); - - masm.movlWithPatch(low, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.movlWithPatch(high, out.high); - masm.append(wasm::MemoryAccess(masm.size())); - } else { - MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP); - Address addr = srcAddr.toAddress(); - Operand low(addr.base, addr.offset + INT64LOW_OFFSET); - Operand high(addr.base, addr.offset + INT64HIGH_OFFSET); - - if (addr.base != out.low) { - masm.movlWithPatch(low, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.movlWithPatch(high, out.high); - masm.append(wasm::MemoryAccess(masm.size())); - } else { - MOZ_ASSERT(addr.base != out.high); - masm.movlWithPatch(high, out.high); - masm.append(wasm::MemoryAccess(masm.size())); - masm.movlWithPatch(low, out.low); - masm.append(wasm::MemoryAccess(masm.size())); - } - } - break; - case Scalar::Float32: - case Scalar::Float64: - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: - MOZ_CRASH("non-int64 loads should use load()"); - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("unexpected array type"); - } -} - -void CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins) { const MLoadTypedArrayElementStatic* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32); Register ptr = ToRegister(ins->ptr()); - const LDefinition* out = ins->output(); + AnyRegister out = ToAnyRegister(ins->output()); OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr; uint32_t offset = mir->offset(); if (mir->needsBoundsCheck()) { MOZ_ASSERT(offset == 0); if (!mir->fallible()) { - ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType); + ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType); addOutOfLineCode(ool, ins->mir()); } masm.cmpPtr(ptr, ImmWord(mir->length())); if (ool) masm.j(Assembler::AboveOrEqual, ool->entry()); else bailoutIf(Assembler::AboveOrEqual, ins->snapshot()); } Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset)); - load(accessType, srcAddr, out); + switch (accessType) { + case Scalar::Int8: masm.movsblWithPatch(srcAddr, out.gpr()); break; + case Scalar::Uint8: masm.movzblWithPatch(srcAddr, out.gpr()); break; + case Scalar::Int16: masm.movswlWithPatch(srcAddr, out.gpr()); break; + case Scalar::Uint16: masm.movzwlWithPatch(srcAddr, out.gpr()); break; + case Scalar::Int32: + case Scalar::Uint32: masm.movlWithPatch(srcAddr, out.gpr()); break; + case Scalar::Float32: masm.vmovssWithPatch(srcAddr, out.fpu()); break; + case Scalar::Float64: masm.vmovsdWithPatch(srcAddr, out.fpu()); break; + default: MOZ_CRASH("Unexpected type"); + } + if (accessType == Scalar::Float64) - masm.canonicalizeDouble(ToFloatRegister(out)); + masm.canonicalizeDouble(out.fpu()); if (accessType == Scalar::Float32) - masm.canonicalizeFloat(ToFloatRegister(out)); + masm.canonicalizeFloat(out.fpu()); + if (ool) masm.bind(ool->rejoin()); } void CodeGeneratorX86::emitWasmCall(LWasmCallBase* ins) { MWasmCall* mir = ins->mir(); @@ -433,87 +343,37 @@ CodeGeneratorX86::visitWasmCallI64(LWasm void CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier) { if (barrier & MembarStoreLoad) masm.storeLoadFence(); } -void -CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, - FloatRegister out) -{ - switch (type) { - case Scalar::Float32x4: { - switch (numElems) { - // In memory-to-register mode, movss zeroes out the high lanes. - case 1: masm.vmovssWithPatch(srcAddr, out); break; - // See comment above, which also applies to movsd. - case 2: masm.vmovsdWithPatch(srcAddr, out); break; - case 4: masm.vmovupsWithPatch(srcAddr, out); break; - default: MOZ_CRASH("unexpected size for partial load"); - } - break; - } - case Scalar::Int32x4: { - switch (numElems) { - // In memory-to-register mode, movd zeroes out the high lanes. - case 1: masm.vmovdWithPatch(srcAddr, out); break; - // See comment above, which also applies to movq. - case 2: masm.vmovqWithPatch(srcAddr, out); break; - case 4: masm.vmovdquWithPatch(srcAddr, out); break; - default: MOZ_CRASH("unexpected size for partial load"); - } - break; - } - case Scalar::Int8x16: - MOZ_ASSERT(numElems == 16, "unexpected partial load"); - masm.vmovdquWithPatch(srcAddr, out); - break; - case Scalar::Int16x8: - MOZ_ASSERT(numElems == 8, "unexpected partial load"); - masm.vmovdquWithPatch(srcAddr, out); - break; - case Scalar::Int8: - case Scalar::Uint8: - case Scalar::Int16: - case Scalar::Uint16: - case Scalar::Int32: - case Scalar::Uint32: - case Scalar::Int64: - case Scalar::Float32: - case Scalar::Float64: - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("should only handle SIMD types"); - } - masm.append(wasm::MemoryAccess(masm.size())); -} - template <typename T> void CodeGeneratorX86::emitWasmLoad(T* ins) { const MWasmLoad* mir = ins->mir(); - - Scalar::Type accessType = mir->accessType(); - MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI"); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - MOZ_ASSERT(mir->offset() <= INT32_MAX); + MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit); const LAllocation* ptr = ins->ptr(); + Operand srcAddr = ptr->isBogus() ? Operand(PatchedAbsoluteAddress(mir->offset())) : Operand(ToRegister(ptr), mir->offset()); + memoryBarrier(mir->barrierBefore()); + if (mir->type() == MIRType::Int64) - loadI64(accessType, srcAddr, ToOutRegister64(ins)); + masm.wasmLoadI64(mir->accessType(), srcAddr, ToOutRegister64(ins)); else - load(accessType, srcAddr, ins->output()); + masm.wasmLoad(mir->accessType(), mir->numSimdElems(), srcAddr, ToAnyRegister(ins->output())); + + memoryBarrier(mir->barrierAfter()); } void CodeGeneratorX86::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); } @@ -523,416 +383,228 @@ CodeGeneratorX86::visitWasmLoadI64(LWasm emitWasmLoad(ins); } template <typename T> void CodeGeneratorX86::emitWasmStore(T* ins) { const MWasmStore* mir = ins->mir(); - - Scalar::Type accessType = mir->accessType(); - MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD NYI"); - MOZ_ASSERT(!mir->barrierBefore() && !mir->barrierAfter(), "atomics NYI"); - MOZ_ASSERT(mir->offset() <= INT32_MAX); + MOZ_ASSERT(mir->offset() < wasm::OffsetGuardLimit); const LAllocation* ptr = ins->ptr(); Operand dstAddr = ptr->isBogus() ? Operand(PatchedAbsoluteAddress(mir->offset())) : Operand(ToRegister(ptr), mir->offset()); - if (accessType == Scalar::Int64) - storeI64(accessType, ins->getInt64Operand(LWasmStoreI64::ValueIndex), dstAddr); - else - store(accessType, ins->getOperand(LWasmStore::ValueIndex), dstAddr); + memoryBarrier(mir->barrierBefore()); + + if (mir->accessType() == Scalar::Int64) { + Register64 value = ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex)); + masm.wasmStoreI64(value, dstAddr); + } else { + AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex)); + masm.wasmStore(mir->accessType(), mir->numSimdElems(), value, dstAddr); + } + + memoryBarrier(mir->barrierBefore()); } - void CodeGeneratorX86::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); } void CodeGeneratorX86::visitWasmStoreI64(LWasmStoreI64* ins) { emitWasmStore(ins); } void -CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap* ins) -{ - const MAsmJSLoadHeap* mir = ins->mir(); - Scalar::Type type = mir->accessType(); - FloatRegister out = ToFloatRegister(ins->output()); - const LAllocation* ptr = ins->ptr(); - Operand srcAddr = ptr->isBogus() - ? Operand(PatchedAbsoluteAddress(mir->offset())) - : Operand(ToRegister(ptr), mir->offset()); - - bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr); - - unsigned numElems = mir->numSimdElems(); - if (numElems == 3) { - MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4); - - Operand srcAddrZ = - ptr->isBogus() - ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset())) - : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset()); - - // Load XY - loadSimd(type, 2, srcAddr, out); - - // Load Z (W is zeroed) - // This is still in bounds, as we've checked with a manual bounds check - // or we had enough space for sure when removing the bounds check. - loadSimd(type, 1, srcAddrZ, ScratchSimd128Reg); - - // Move ZW atop XY - masm.vmovlhps(ScratchSimd128Reg, out, out); - } else { - loadSimd(type, numElems, srcAddr, out); - } - - if (hasBoundsCheck) - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); -} - -void CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) { const MAsmJSLoadHeap* mir = ins->mir(); - Scalar::Type accessType = mir->accessType(); - - if (Scalar::isSimdType(accessType)) - return emitSimdLoad(ins); + MOZ_ASSERT(mir->offset() == 0); const LAllocation* ptr = ins->ptr(); - const LDefinition* out = ins->output(); - Operand srcAddr = ptr->isBogus() - ? Operand(PatchedAbsoluteAddress(mir->offset())) - : Operand(ToRegister(ptr), mir->offset()); + AnyRegister out = ToAnyRegister(ins->output()); - memoryBarrier(mir->barrierBefore()); + Scalar::Type accessType = mir->accessType(); + MOZ_ASSERT(!Scalar::isSimdType(accessType)); - OutOfLineLoadTypedArrayOutOfBounds* ool; - DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSLoadBoundsCheck(mir, ins, &ool); - - load(accessType, srcAddr, out); + OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr; + if (mir->needsBoundsCheck()) { + ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType); + addOutOfLineCode(ool, mir); - if (ool) { - MOZ_ASSERT(hasBoundsCheck); - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); - masm.bind(ool->rejoin()); + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), ool->entry()); } - memoryBarrier(mir->barrierAfter()); -} - -void -CodeGeneratorX86::store(Scalar::Type accessType, const LAllocation* value, const Operand& dstAddr) -{ - switch (accessType) { - case Scalar::Int8: - case Scalar::Uint8Clamped: - case Scalar::Uint8: - masm.movbWithPatch(ToRegister(value), dstAddr); - break; - - case Scalar::Int16: - case Scalar::Uint16: - masm.movwWithPatch(ToRegister(value), dstAddr); - break; - - case Scalar::Int32: - case Scalar::Uint32: - masm.movlWithPatch(ToRegister(value), dstAddr); - break; - - case Scalar::Float32: - masm.vmovssWithPatch(ToFloatRegister(value), dstAddr); - break; - - case Scalar::Float64: - masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr); - break; - - case Scalar::Int64: - MOZ_CRASH("Should be handled in storeI64."); - - case Scalar::Float32x4: - case Scalar::Int8x16: - case Scalar::Int16x8: - case Scalar::Int32x4: - MOZ_CRASH("SIMD stores should be handled in emitSimdStore"); + Operand srcAddr = ptr->isBogus() + ? Operand(PatchedAbsoluteAddress()) + : Operand(ToRegister(ptr), 0); - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("unexpected type"); - } - masm.append(wasm::MemoryAccess(masm.size())); -} - -void -CodeGeneratorX86::storeI64(Scalar::Type accessType, const LInt64Allocation value, - const Operand& dstAddr) -{ - Register64 input = ToRegister64(value); - MOZ_ASSERT(accessType == Scalar::Int64); - if (dstAddr.kind() == Operand::MEM_ADDRESS32) { - Operand low(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64LOW_OFFSET)); - Operand high(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64HIGH_OFFSET)); + masm.wasmLoad(accessType, 0, srcAddr, out); - masm.movlWithPatch(input.low, low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.movlWithPatch(input.high, high); - masm.append(wasm::MemoryAccess(masm.size())); - } else { - MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP); - Address addr = dstAddr.toAddress(); - Operand low(addr.base, addr.offset + INT64LOW_OFFSET); - Operand high(addr.base, addr.offset + INT64HIGH_OFFSET); - - if (addr.base != input.low) { - masm.movlWithPatch(input.low, low); - masm.append(wasm::MemoryAccess(masm.size())); - masm.movlWithPatch(input.high, high); - masm.append(wasm::MemoryAccess(masm.size())); - } else { - MOZ_ASSERT(addr.base != input.high); - masm.movlWithPatch(input.high, high); - masm.append(wasm::MemoryAccess(masm.size())); - masm.movlWithPatch(input.low, low); - masm.append(wasm::MemoryAccess(masm.size())); - } - } + if (ool) + masm.bind(ool->rejoin()); } void CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins) { MStoreTypedArrayElementStatic* mir = ins->mir(); Scalar::Type accessType = mir->accessType(); Register ptr = ToRegister(ins->ptr()); const LAllocation* value = ins->value(); canonicalizeIfDeterministic(accessType, value); uint32_t offset = mir->offset(); - if (!mir->needsBoundsCheck()) { - Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset)); - store(accessType, value, dstAddr); - return; + MOZ_ASSERT_IF(mir->needsBoundsCheck(), offset == 0); + + Label rejoin; + if (mir->needsBoundsCheck()) { + MOZ_ASSERT(offset == 0); + masm.cmpPtr(ptr, ImmWord(mir->length())); + masm.j(Assembler::AboveOrEqual, &rejoin); } - MOZ_ASSERT(offset == 0); - masm.cmpPtr(ptr, ImmWord(mir->length())); - Label rejoin; - masm.j(Assembler::AboveOrEqual, &rejoin); - - Operand dstAddr(ptr, int32_t(mir->base().asValue())); - store(accessType, value, dstAddr); - masm.bind(&rejoin); -} - -void -CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, - const Operand& dstAddr) -{ - switch (type) { - case Scalar::Float32x4: { - switch (numElems) { - // In memory-to-register mode, movss zeroes out the high lanes. - case 1: masm.vmovssWithPatch(in, dstAddr); break; - // See comment above, which also applies to movsd. - case 2: masm.vmovsdWithPatch(in, dstAddr); break; - case 4: masm.vmovupsWithPatch(in, dstAddr); break; - default: MOZ_CRASH("unexpected size for partial load"); - } + Operand dstAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset)); + switch (accessType) { + case Scalar::Int8: + case Scalar::Uint8Clamped: + case Scalar::Uint8: + masm.movbWithPatch(ToRegister(value), dstAddr); break; - } - case Scalar::Int32x4: { - switch (numElems) { - // In memory-to-register mode, movd zeroes out the high lanes. - case 1: masm.vmovdWithPatch(in, dstAddr); break; - // See comment above, which also applies to movsd. - case 2: masm.vmovqWithPatch(in, dstAddr); break; - case 4: masm.vmovdquWithPatch(in, dstAddr); break; - default: MOZ_CRASH("unexpected size for partial load"); - } - break; - } - case Scalar::Int8x16: - MOZ_ASSERT(numElems == 16, "unexpected partial store"); - masm.vmovdquWithPatch(in, dstAddr); - break; - case Scalar::Int16x8: - MOZ_ASSERT(numElems == 8, "unexpected partial store"); - masm.vmovdquWithPatch(in, dstAddr); - break; - case Scalar::Int8: - case Scalar::Uint8: case Scalar::Int16: case Scalar::Uint16: + masm.movwWithPatch(ToRegister(value), dstAddr); + break; case Scalar::Int32: case Scalar::Uint32: - case Scalar::Int64: + masm.movlWithPatch(ToRegister(value), dstAddr); + break; case Scalar::Float32: + masm.vmovssWithPatch(ToFloatRegister(value), dstAddr); + break; case Scalar::Float64: - case Scalar::Uint8Clamped: - case Scalar::MaxTypedArrayViewType: - MOZ_CRASH("should only handle SIMD types"); - } - masm.append(wasm::MemoryAccess(masm.size())); -} - -void -CodeGeneratorX86::emitSimdStore(LAsmJSStoreHeap* ins) -{ - const MAsmJSStoreHeap* mir = ins->mir(); - Scalar::Type type = mir->accessType(); - FloatRegister in = ToFloatRegister(ins->value()); - const LAllocation* ptr = ins->ptr(); - Operand dstAddr = ptr->isBogus() - ? Operand(PatchedAbsoluteAddress(mir->offset())) - : Operand(ToRegister(ptr), mir->offset()); - - bool hasBoundsCheck = maybeEmitThrowingAsmJSBoundsCheck(mir, mir, ptr); - - unsigned numElems = mir->numSimdElems(); - if (numElems == 3) { - MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4); - - Operand dstAddrZ = - ptr->isBogus() - ? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset())) - : Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset()); - - // Store XY - storeSimd(type, 2, in, dstAddr); - - masm.vmovhlps(in, ScratchSimd128Reg, ScratchSimd128Reg); - - // Store Z (W is zeroed) - // This is still in bounds, as we've checked with a manual bounds check - // or we had enough space for sure when removing the bounds check. - storeSimd(type, 1, ScratchSimd128Reg, dstAddrZ); - } else { - storeSimd(type, numElems, in, dstAddr); + masm.vmovsdWithPatch(ToFloatRegister(value), dstAddr); + break; + default: + MOZ_CRASH("unexpected type"); } - if (hasBoundsCheck) - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); + if (rejoin.used()) + masm.bind(&rejoin); } void CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) { const MAsmJSStoreHeap* mir = ins->mir(); - Scalar::Type accessType = mir->accessType(); - const LAllocation* value = ins->value(); - - canonicalizeIfDeterministic(accessType, value); - - if (Scalar::isSimdType(accessType)) - return emitSimdStore(ins); + MOZ_ASSERT(mir->offset() == 0); const LAllocation* ptr = ins->ptr(); - Operand dstAddr = ptr->isBogus() - ? Operand(PatchedAbsoluteAddress(mir->offset())) - : Operand(ToRegister(ptr), mir->offset()); + const LAllocation* value = ins->value(); - memoryBarrier(mir->barrierBefore()); - - Label* rejoin; - DebugOnly<bool> hasBoundsCheck = maybeEmitAsmJSStoreBoundsCheck(mir, ins, &rejoin); + Scalar::Type accessType = mir->accessType(); + MOZ_ASSERT(!Scalar::isSimdType(accessType)); + canonicalizeIfDeterministic(accessType, value); - store(accessType, value, dstAddr); + Operand dstAddr = ptr->isBogus() + ? Operand(PatchedAbsoluteAddress()) + : Operand(ToRegister(ptr), 0); - if (rejoin) { - MOZ_ASSERT(hasBoundsCheck); - cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr)); - masm.bind(rejoin); - } + Label rejoin; + if (mir->needsBoundsCheck()) + masm.wasmBoundsCheck(Assembler::AboveOrEqual, ToRegister(ptr), &rejoin); - memoryBarrier(mir->barrierAfter()); + masm.wasmStore(accessType, 0, ToAnyRegister(value), dstAddr); + + if (rejoin.used()) + masm.bind(&rejoin); } // Perform bounds checking on the access if necessary; if it fails, // jump to out-of-line code that throws. If the bounds check passes, // set up the heap address in addrTemp. void -CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, - const MWasmMemoryAccess* mir) +CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg) { - maybeEmitWasmBoundsCheckBranch(mir, ptrReg); - // Add in the actual heap pointer explicitly, to avoid opening up // the abstraction that is atomicBinopToTypedIntArray at this time. masm.movl(ptrReg, addrTemp); - masm.addlWithPatch(Imm32(mir->offset()), addrTemp); + masm.addlWithPatch(Imm32(0), addrTemp); masm.append(wasm::MemoryAccess(masm.size())); } void CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins) { MAsmJSCompareExchangeHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); Register addrTemp = ToRegister(ins->addrTemp()); - asmJSAtomicComputeAddress(addrTemp, ptrReg, mir); + asmJSAtomicComputeAddress(addrTemp, ptrReg); - Address memAddr(addrTemp, mir->offset()); + Address memAddr(addrTemp, 0); masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, memAddr, oldval, newval, InvalidReg, ToAnyRegister(ins->output())); } void CodeGeneratorX86::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins) { MAsmJSAtomicExchangeHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register value = ToRegister(ins->value()); Register addrTemp = ToRegister(ins->addrTemp()); - asmJSAtomicComputeAddress(addrTemp, ptrReg, mir); + asmJSAtomicComputeAddress(addrTemp, ptrReg); - Address memAddr(addrTemp, mir->offset()); + Address memAddr(addrTemp, 0); masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, memAddr, value, InvalidReg, ToAnyRegister(ins->output())); } void CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) { MAsmJSAtomicBinopHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp()); Register addrTemp = ToRegister(ins->addrTemp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); - asmJSAtomicComputeAddress(addrTemp, ptrReg, mir); + asmJSAtomicComputeAddress(addrTemp, ptrReg); - Address memAddr(addrTemp, mir->offset()); + Address memAddr(addrTemp, 0); if (value->isConstant()) { atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, Imm32(ToInt32(value)), memAddr, temp, InvalidReg, ToAnyRegister(ins->output())); } else { @@ -944,27 +616,28 @@ CodeGeneratorX86::visitAsmJSAtomicBinopH ToAnyRegister(ins->output())); } } void CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins) { MAsmJSAtomicBinopHeap* mir = ins->mir(); + MOZ_ASSERT(mir->offset() == 0); + MOZ_ASSERT(!mir->hasUses()); + Scalar::Type accessType = mir->accessType(); Register ptrReg = ToRegister(ins->ptr()); Register addrTemp = ToRegister(ins->addrTemp()); const LAllocation* value = ins->value(); AtomicOp op = mir->operation(); - MOZ_ASSERT(!mir->hasUses()); + asmJSAtomicComputeAddress(addrTemp, ptrReg); - asmJSAtomicComputeAddress(addrTemp, ptrReg, mir); - - Address memAddr(addrTemp, mir->offset()); + Address memAddr(addrTemp, 0); if (value->isConstant()) atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr); else atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr); } void CodeGeneratorX86::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
--- a/js/src/jit/x86/CodeGenerator-x86.h +++ b/js/src/jit/x86/CodeGenerator-x86.h @@ -23,33 +23,20 @@ class CodeGeneratorX86 : public CodeGene return this; } protected: ValueOperand ToValue(LInstruction* ins, size_t pos); ValueOperand ToOutValue(LInstruction* ins); ValueOperand ToTempValue(LInstruction* ins, size_t pos); - void load(Scalar::Type vt, const Operand& srcAddr, const LDefinition* out); - void loadI64(Scalar::Type vt, const Operand& srcAddr, const Register64 out); - void store(Scalar::Type vt, const LAllocation* value, const Operand& dstAddr); - void storeI64(Scalar::Type vt, const LInt64Allocation value, const Operand& dstAddr); - - void loadSimd(Scalar::Type type, unsigned numElems, const Operand& srcAddr, FloatRegister out); - void emitSimdLoad(LAsmJSLoadHeap* ins); - - void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, const Operand& dstAddr); - void emitSimdStore(LAsmJSStoreHeap* ins); - void memoryBarrier(MemoryBarrierBits barrier); - template <typename T> - void emitWasmLoad(T* ins); - template <typename T> - void emitWasmStore(T* ins); + template <typename T> void emitWasmLoad(T* ins); + template <typename T> void emitWasmStore(T* ins); public: CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm); public: void visitBox(LBox* box); void visitBoxFloatingPoint(LBoxFloatingPoint* box); void visitUnbox(LUnbox* unbox); @@ -97,18 +84,17 @@ class CodeGeneratorX86 : public CodeGene void visitClzI64(LClzI64* lir); void visitCtzI64(LCtzI64* lir); void visitNotI64(LNotI64* lir); void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir); void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir); void visitTestI64AndBranch(LTestI64AndBranch* lir); private: - void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg, - const MWasmMemoryAccess* access); + void asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg); }; typedef CodeGeneratorX86 CodeGeneratorSpecific; } // namespace jit } // namespace js #endif /* jit_x86_CodeGenerator_x86_h */
--- a/js/src/jit/x86/Lowering-x86.cpp +++ b/js/src/jit/x86/Lowering-x86.cpp @@ -269,16 +269,40 @@ void LIRGeneratorX86::visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins) { MOZ_ASSERT(ins->input()->type() == MIRType::Int32); LAsmJSUInt32ToFloat32* lir = new(alloc()) LAsmJSUInt32ToFloat32(useRegisterAtStart(ins->input()), temp()); define(lir, ins); } void +LIRGeneratorX86::visitWasmLoad(MWasmLoad* ins) +{ + if (ins->type() != MIRType::Int64) { + lowerWasmLoad(ins); + return; + } + + MDefinition* base = ins->base(); + MOZ_ASSERT(base->type() == MIRType::Int32); + + auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base)); + + Scalar::Type accessType = ins->accessType(); + if (accessType == Scalar::Int8 || accessType == Scalar::Int16 || accessType == Scalar::Int32) { + // We use cdq to sign-extend the result and cdq demands these registers. + defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)), + LAllocation(AnyRegister(eax)))); + return; + } + + defineInt64(lir, ins); +} + +void LIRGeneratorX86::visitWasmStore(MWasmStore* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); LAllocation baseAlloc = useRegisterOrZeroAtStart(base); LAllocation valueAlloc; @@ -316,32 +340,32 @@ LIRGeneratorX86::visitWasmStore(MWasmSto void LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); // For simplicity, require a register if we're going to emit a bounds-check // branch, so that we don't have special cases for constants. - LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins) + LAllocation baseAlloc = ins->needsBoundsCheck() ? useRegisterAtStart(base) : useRegisterOrZeroAtStart(base); define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins); } void LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) { MDefinition* base = ins->base(); MOZ_ASSERT(base->type() == MIRType::Int32); // For simplicity, require a register if we're going to emit a bounds-check // branch, so that we don't have special cases for constants. - LAllocation baseAlloc = gen->needsBoundsCheckBranch(ins) + LAllocation baseAlloc = ins->needsBoundsCheck() ? useRegisterAtStart(base) : useRegisterOrZeroAtStart(base); LAsmJSStoreHeap* lir = nullptr; switch (ins->accessType()) { case Scalar::Int8: case Scalar::Uint8: // See comment for LIRGeneratorX86::useByteOpRegister. lir = new(alloc()) LAsmJSStoreHeap(baseAlloc, useFixed(ins->value(), eax)); @@ -567,40 +591,16 @@ void LIRGeneratorX86::lowerUModI64(MMod* mod) { LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs())); defineReturn(lir, mod); } void -LIRGeneratorX86::visitWasmLoad(MWasmLoad* ins) -{ - if (ins->type() != MIRType::Int64) { - lowerWasmLoad(ins); - return; - } - - MDefinition* base = ins->base(); - MOZ_ASSERT(base->type() == MIRType::Int32); - - auto* lir = new(alloc()) LWasmLoadI64(useRegisterOrZeroAtStart(base)); - - Scalar::Type accessType = ins->accessType(); - if (accessType == Scalar::Int8 || accessType == Scalar::Int16 || accessType == Scalar::Int32) { - // We use cdq to sign-extend the result and cdq demands these registers. - defineInt64Fixed(lir, ins, LInt64Allocation(LAllocation(AnyRegister(edx)), - LAllocation(AnyRegister(eax)))); - return; - } - - defineInt64(lir, ins); -} - -void LIRGeneratorX86::visitSubstr(MSubstr* ins) { // Due to lack of registers on x86, we reuse the string register as // temporary. As a result we only need two temporary registers and take a // bugos temporary as fifth argument. LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()), useRegister(ins->begin()), useRegister(ins->length()),
--- a/js/src/jit/x86/Lowering-x86.h +++ b/js/src/jit/x86/Lowering-x86.h @@ -51,30 +51,30 @@ class LIRGeneratorX86 : public LIRGenera void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs); void lowerDivI64(MDiv* div); void lowerModI64(MMod* mod); void lowerUDivI64(MDiv* div); void lowerUModI64(MMod* mod); public: - void visitWasmLoad(MWasmLoad* ins); void visitBox(MBox* box); void visitUnbox(MUnbox* unbox); void visitReturn(MReturn* ret); void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins); void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins); void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins); void visitAsmJSUnsignedToDouble(MAsmJSUnsignedToDouble* ins); void visitAsmJSUnsignedToFloat32(MAsmJSUnsignedToFloat32* ins); void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins); void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins); void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins); void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins); void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins); + void visitWasmLoad(MWasmLoad* ins); void visitWasmStore(MWasmStore* ins); void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins); void visitSubstr(MSubstr* ins); void visitRandom(MRandom* ins); void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins); void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins); void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins); void lowerPhi(MPhi* phi);
--- a/js/src/jit/x86/MacroAssembler-x86-inl.h +++ b/js/src/jit/x86/MacroAssembler-x86-inl.h @@ -950,16 +950,35 @@ MacroAssembler::truncateDoubleToUInt64(A load32(Address(dest.base, dest.offset + INT64HIGH_OFFSET), temp); orl(Imm32(0x80000000), temp); store32(temp, Address(dest.base, dest.offset + INT64HIGH_OFFSET)); bind(&done); } +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + CodeOffset off = cmp32WithPatch(index, Imm32(0)); + append(wasm::BoundsCheck(off.offset())); + + j(cond, label); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + reinterpret_cast<uint32_t*>(patchAt)[-1] = limit; +} + //}}} check_macroassembler_style // =============================================================== // Note: this function clobbers the source register. void MacroAssemblerX86::convertUInt32ToDouble(Register src, FloatRegister dest) { // src is [0, 2^32-1]
--- a/js/src/jit/x86/MacroAssembler-x86.cpp +++ b/js/src/jit/x86/MacroAssembler-x86.cpp @@ -519,16 +519,250 @@ MacroAssembler::storeUnboxedValue(Consta template void MacroAssembler::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const Address& dest, MIRType slotType); template void MacroAssembler::storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const BaseIndex& dest, MIRType slotType); // wasm specific methods, used in both the wasm baseline compiler and ion. + +void +MacroAssembler::wasmLoad(Scalar::Type type, unsigned numSimdElems, Operand srcAddr, AnyRegister out) +{ + switch (type) { + case Scalar::Int8: + movsblWithPatch(srcAddr, out.gpr()); + break; + case Scalar::Uint8: + movzblWithPatch(srcAddr, out.gpr()); + break; + case Scalar::Int16: + movswlWithPatch(srcAddr, out.gpr()); + break; + case Scalar::Uint16: + movzwlWithPatch(srcAddr, out.gpr()); + break; + case Scalar::Int32: + case Scalar::Uint32: + movlWithPatch(srcAddr, out.gpr()); + break; + case Scalar::Float32: + vmovssWithPatch(srcAddr, out.fpu()); + break; + case Scalar::Float64: + vmovsdWithPatch(srcAddr, out.fpu()); + break; + case Scalar::Float32x4: + switch (numSimdElems) { + // In memory-to-register mode, movss zeroes out the high lanes. + case 1: vmovssWithPatch(srcAddr, out.fpu()); break; + // See comment above, which also applies to movsd. + case 2: vmovsdWithPatch(srcAddr, out.fpu()); break; + case 4: vmovupsWithPatch(srcAddr, out.fpu()); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int32x4: + switch (numSimdElems) { + // In memory-to-register mode, movd zeroes out the high lanes. + case 1: vmovdWithPatch(srcAddr, out.fpu()); break; + // See comment above, which also applies to movq. + case 2: vmovqWithPatch(srcAddr, out.fpu()); break; + case 4: vmovdquWithPatch(srcAddr, out.fpu()); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int8x16: + MOZ_ASSERT(numSimdElems == 16, "unexpected partial load"); + vmovdquWithPatch(srcAddr, out.fpu()); + break; + case Scalar::Int16x8: + MOZ_ASSERT(numSimdElems == 8, "unexpected partial load"); + vmovdquWithPatch(srcAddr, out.fpu()); + break; + case Scalar::Int64: + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected type"); + } + append(wasm::MemoryAccess(size())); +} + +void +MacroAssembler::wasmLoadI64(Scalar::Type type, Operand srcAddr, Register64 out) +{ + switch (type) { + case Scalar::Int8: + MOZ_ASSERT(out == Register64(edx, eax)); + movsblWithPatch(srcAddr, out.low); + append(wasm::MemoryAccess(size())); + cdq(); + break; + case Scalar::Uint8: + movzblWithPatch(srcAddr, out.low); + append(wasm::MemoryAccess(size())); + xorl(out.high, out.high); + break; + case Scalar::Int16: + MOZ_ASSERT(out == Register64(edx, eax)); + movswlWithPatch(srcAddr, out.low); + append(wasm::MemoryAccess(size())); + cdq(); + break; + case Scalar::Uint16: + movzwlWithPatch(srcAddr, out.low); + append(wasm::MemoryAccess(size())); + xorl(out.high, out.high); + break; + case Scalar::Int32: + MOZ_ASSERT(out == Register64(edx, eax)); + movlWithPatch(srcAddr, out.low); + append(wasm::MemoryAccess(size())); + cdq(); + break; + case Scalar::Uint32: + movlWithPatch(srcAddr, out.low); + append(wasm::MemoryAccess(size())); + xorl(out.high, out.high); + break; + case Scalar::Int64: + if (srcAddr.kind() == Operand::MEM_ADDRESS32) { + Operand low(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64LOW_OFFSET)); + Operand high(PatchedAbsoluteAddress(uint32_t(srcAddr.address()) + INT64HIGH_OFFSET)); + + movlWithPatch(low, out.low); + append(wasm::MemoryAccess(size())); + movlWithPatch(high, out.high); + append(wasm::MemoryAccess(size())); + } else { + MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP); + Address addr = srcAddr.toAddress(); + Operand low(addr.base, addr.offset + INT64LOW_OFFSET); + Operand high(addr.base, addr.offset + INT64HIGH_OFFSET); + + if (addr.base != out.low) { + movlWithPatch(low, out.low); + append(wasm::MemoryAccess(size())); + movlWithPatch(high, out.high); + append(wasm::MemoryAccess(size())); + } else { + MOZ_ASSERT(addr.base != out.high); + movlWithPatch(high, out.high); + append(wasm::MemoryAccess(size())); + movlWithPatch(low, out.low); + append(wasm::MemoryAccess(size())); + } + } + break; + case Scalar::Float32: + case Scalar::Float64: + case Scalar::Float32x4: + case Scalar::Int8x16: + case Scalar::Int16x8: + case Scalar::Int32x4: + MOZ_CRASH("non-int64 loads should use load()"); + case Scalar::Uint8Clamped: + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected array type"); + } +} + +void +MacroAssembler::wasmStore(Scalar::Type type, unsigned numSimdElems, AnyRegister value, + Operand dstAddr) +{ + switch (type) { + case Scalar::Int8: + case Scalar::Uint8Clamped: + case Scalar::Uint8: + movbWithPatch(value.gpr(), dstAddr); + break; + case Scalar::Int16: + case Scalar::Uint16: + movwWithPatch(value.gpr(), dstAddr); + break; + case Scalar::Int32: + case Scalar::Uint32: + movlWithPatch(value.gpr(), dstAddr); + break; + case Scalar::Float32: + vmovssWithPatch(value.fpu(), dstAddr); + break; + case Scalar::Float64: + vmovsdWithPatch(value.fpu(), dstAddr); + break; + case Scalar::Float32x4: + switch (numSimdElems) { + // In memory-to-register mode, movss zeroes out the high lanes. + case 1: vmovssWithPatch(value.fpu(), dstAddr); break; + // See comment above, which also applies to movsd. + case 2: vmovsdWithPatch(value.fpu(), dstAddr); break; + case 4: vmovupsWithPatch(value.fpu(), dstAddr); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int32x4: + switch (numSimdElems) { + // In memory-to-register mode, movd zeroes out the high lanes. + case 1: vmovdWithPatch(value.fpu(), dstAddr); break; + // See comment above, which also applies to movsd. + case 2: vmovqWithPatch(value.fpu(), dstAddr); break; + case 4: vmovdquWithPatch(value.fpu(), dstAddr); break; + default: MOZ_CRASH("unexpected size for partial load"); + } + break; + case Scalar::Int8x16: + MOZ_ASSERT(numSimdElems == 16, "unexpected partial store"); + vmovdquWithPatch(value.fpu(), dstAddr); + break; + case Scalar::Int16x8: + MOZ_ASSERT(numSimdElems == 8, "unexpected partial store"); + vmovdquWithPatch(value.fpu(), dstAddr); + break; + case Scalar::Int64: + MOZ_CRASH("Should be handled in storeI64."); + case Scalar::MaxTypedArrayViewType: + MOZ_CRASH("unexpected type"); + } + append(wasm::MemoryAccess(size())); +} + +void +MacroAssembler::wasmStoreI64(Register64 value, Operand dstAddr) +{ + if (dstAddr.kind() == Operand::MEM_ADDRESS32) { + Operand low(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64LOW_OFFSET)); + Operand high(PatchedAbsoluteAddress(uint32_t(dstAddr.address()) + INT64HIGH_OFFSET)); + + movlWithPatch(value.low, low); + append(wasm::MemoryAccess(size())); + movlWithPatch(value.high, high); + append(wasm::MemoryAccess(size())); + } else { + MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP); + Address addr = dstAddr.toAddress(); + Operand low(addr.base, addr.offset + INT64LOW_OFFSET); + Operand high(addr.base, addr.offset + INT64HIGH_OFFSET); + + if (addr.base != value.low) { + movlWithPatch(value.low, low); + append(wasm::MemoryAccess(size())); + movlWithPatch(value.high, high); + append(wasm::MemoryAccess(size())); + } else { + MOZ_ASSERT(addr.base != value.high); + movlWithPatch(value.high, high); + append(wasm::MemoryAccess(size())); + movlWithPatch(value.low, low); + append(wasm::MemoryAccess(size())); + } + } +} + void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) { Label done; vcvttsd2si(input, output); branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done); loadConstantDouble(double(int32_t(0x80000000)), ScratchDoubleReg);
--- a/js/src/jsapi-tests/testMappedArrayBuffer.cpp +++ b/js/src/jsapi-tests/testMappedArrayBuffer.cpp @@ -1,25 +1,28 @@ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: */ -#ifdef XP_UNIX #include <fcntl.h> #include <stdio.h> -#include <string.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <unistd.h> #include "jsfriendapi.h" #include "js/StructuredClone.h" #include "jsapi-tests/tests.h" #include "vm/ArrayBufferObject.h" +#ifdef XP_WIN +# include <io.h> +# define GET_OS_FD(a) int(_get_osfhandle(a)) +#else +# include <unistd.h> +# define GET_OS_FD(a) (a) +#endif + const char test_data[] = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; const char test_filename[] = "temp-bug945152_MappedArrayBuffer"; BEGIN_TEST(testMappedArrayBuffer_bug945152) { TempFile test_file; FILE* test_stream = test_file.open(test_filename); CHECK(fputs(test_data, test_stream) != EOF); @@ -47,25 +50,28 @@ BEGIN_TEST(testMappedArrayBuffer_bug9451 CHECK(TestCloneObject()); // Steal mapped array buffer contents. CHECK(TestStealContents()); // Transfer mapped array buffer contents. CHECK(TestTransferObject()); + // GC so we can remove the file we created. + GC(cx); + test_file.remove(); return true; } JSObject* CreateNewObject(const int offset, const int length) { int fd = open(test_filename, O_RDONLY); - void* ptr = JS_CreateMappedArrayBufferContents(fd, offset, length); + void* ptr = JS_CreateMappedArrayBufferContents(GET_OS_FD(fd), offset, length); close(fd); if (!ptr) return nullptr; JSObject* obj = JS_NewMappedArrayBufferWithContents(cx, length, ptr); if (!obj) { JS_ReleaseMappedArrayBufferContents(ptr, length); return nullptr; } @@ -98,17 +104,17 @@ bool TestCreateObject(uint32_t offset, u CHECK(VerifyObject(obj, offset, length, true)); return true; } bool TestReleaseContents() { int fd = open(test_filename, O_RDONLY); - void* ptr = JS_CreateMappedArrayBufferContents(fd, 0, 12); + void* ptr = JS_CreateMappedArrayBufferContents(GET_OS_FD(fd), 0, 12); close(fd); if (!ptr) return false; JS_ReleaseMappedArrayBufferContents(ptr, 12); return true; } @@ -178,9 +184,10 @@ bool TestTransferObject() static void GC(JSContext* cx) { JS_GC(cx); // Trigger another to wait for background finalization to end. JS_GC(cx); } END_TEST(testMappedArrayBuffer_bug945152) -#endif + +#undef GET_OS_FD
--- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -6207,16 +6207,19 @@ JS_SetGlobalJitCompilerOption(JSContext* jit::DefaultJitOptions defaultValues; value = defaultValues.jumpThreshold; } jit::JitOptions.jumpThreshold = value; break; case JSJITCOMPILER_WASM_TEST_MODE: jit::JitOptions.wasmTestMode = !!value; break; + case JSJITCOMPILER_WASM_FOLD_OFFSETS: + jit::JitOptions.wasmFoldOffsets = !!value; + break; case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL: jit::JitOptions.ionInterruptWithoutSignals = !!value; break; default: break; } } @@ -6237,16 +6240,18 @@ JS_GetGlobalJitCompilerOption(JSContext* case JSJITCOMPILER_ION_ENABLE: return JS::ContextOptionsRef(cx).ion(); case JSJITCOMPILER_BASELINE_ENABLE: return JS::ContextOptionsRef(cx).baseline(); case JSJITCOMPILER_OFFTHREAD_COMPILATION_ENABLE: return rt->canUseOffthreadIonCompilation(); case JSJITCOMPILER_WASM_TEST_MODE: return jit::JitOptions.wasmTestMode ? 1 : 0; + case JSJITCOMPILER_WASM_FOLD_OFFSETS: + return jit::JitOptions.wasmFoldOffsets ? 1 : 0; case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL: return jit::JitOptions.ionInterruptWithoutSignals ? 1 : 0; default: break; } #endif return 0; }
--- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -5650,17 +5650,18 @@ JS_SetOffthreadIonCompilationEnabled(JSC Register(ION_WARMUP_TRIGGER, "ion.warmup.trigger") \ Register(ION_GVN_ENABLE, "ion.gvn.enable") \ Register(ION_FORCE_IC, "ion.forceinlineCaches") \ Register(ION_ENABLE, "ion.enable") \ Register(ION_INTERRUPT_WITHOUT_SIGNAL, "ion.interrupt-without-signals") \ Register(BASELINE_ENABLE, "baseline.enable") \ Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable") \ Register(JUMP_THRESHOLD, "jump-threshold") \ - Register(WASM_TEST_MODE, "wasm.test-mode") + Register(WASM_TEST_MODE, "wasm.test-mode") \ + Register(WASM_FOLD_OFFSETS, "wasm.fold-offsets") typedef enum JSJitCompilerOption { #define JIT_COMPILER_DECLARE(key, str) \ JSJITCOMPILER_ ## key, JIT_COMPILER_OPTIONS(JIT_COMPILER_DECLARE) #undef JIT_COMPILER_DECLARE
--- a/js/src/shell/js.cpp +++ b/js/src/shell/js.cpp @@ -1075,16 +1075,22 @@ Version(JSContext* cx, unsigned argc, Va return false; } JS_SetVersionForCompartment(js::GetContextCompartment(cx), JSVersion(v)); args.rval().setInt32(origVersion); } return true; } +#ifdef XP_WIN +# define GET_FD_FROM_FILE(a) int(_get_osfhandle(fileno(a))) +#else +# define GET_FD_FROM_FILE(a) fileno(a) +#endif + static bool CreateMappedArrayBuffer(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); if (args.length() < 1 || args.length() > 3) { JS_ReportErrorNumber(cx, my_GetErrorMessage, nullptr, args.length() < 1 ? JSSMSG_NOT_ENOUGH_ARGS : JSSMSG_TOO_MANY_ARGS, @@ -1142,30 +1148,32 @@ CreateMappedArrayBuffer(JSContext* cx, u if (st.st_size < off_t(offset)) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_ARG_INDEX_OUT_OF_RANGE, "2"); return false; } size = st.st_size - offset; } - void* contents = JS_CreateMappedArrayBufferContents(fileno(file), offset, size); + void* contents = JS_CreateMappedArrayBufferContents(GET_FD_FROM_FILE(file), offset, size); if (!contents) { JS_ReportError(cx, "failed to allocate mapped array buffer contents (possibly due to bad alignment)"); return false; } RootedObject obj(cx, JS_NewMappedArrayBufferWithContents(cx, size, contents)); if (!obj) return false; args.rval().setObject(*obj); return true; } +#undef GET_FD_FROM_FILE + static bool AddPromiseReactions(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); if (args.length() != 3) { JS_ReportErrorNumber(cx, my_GetErrorMessage, nullptr, args.length() < 3 ? JSSMSG_NOT_ENOUGH_ARGS : JSSMSG_TOO_MANY_ARGS,
new file mode 100644 --- /dev/null +++ b/js/src/tests/ecma_6/LexicalEnvironment/nondefinable-function-same-script.js @@ -0,0 +1,24 @@ +// |reftest| skip-if(!xulRuntime.shell) + +function assertEvaluateAndIndirectEvalThrows(str) { + assertThrowsInstanceOf(() => evaluate(str), TypeError); + assertThrowsInstanceOf(() => (1,eval)(str), TypeError); +} + +// Regular vars +assertEvaluateAndIndirectEvalThrows(`var NaN; function NaN() {}`); + +// for-of vars +assertEvaluateAndIndirectEvalThrows(`for (var NaN of []); function NaN() {}`); + +// Annex B.3.3 synthesized vars +assertEvaluateAndIndirectEvalThrows(`{ function NaN() {} } function NaN() {}`); + +// Non-data properties +Object.defineProperty(this, 'foo', { set: function() {} }); +assertEvaluateAndIndirectEvalThrows(`var foo; function foo() {}`); +assertEvaluateAndIndirectEvalThrows(`for (var foo of []); function foo() {}`); +assertEvaluateAndIndirectEvalThrows(`{ function foo() {} } function foo() {}`); + +if (typeof reportCompare === "function") + reportCompare(true, true);
--- a/js/src/vm/ArrayBufferObject-inl.h +++ b/js/src/vm/ArrayBufferObject-inl.h @@ -44,30 +44,38 @@ AnyArrayBufferByteLength(const ArrayBuff } inline size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf) { if (buf->is<ArrayBufferObject>()) return buf->as<ArrayBufferObject>().wasmMappedSize(); #ifdef WASM_HUGE_MEMORY - return wasm::MappedSize; + return wasm::HugeMappedSize; #else return buf->as<SharedArrayBufferObject>().byteLength(); #endif } inline uint32_t WasmArrayBufferActualByteLength(const ArrayBufferObjectMaybeShared* buf) { if (buf->is<ArrayBufferObject>()) return buf->as<ArrayBufferObject>().wasmActualByteLength(); return buf->as<SharedArrayBufferObject>().byteLength(); } +inline mozilla::Maybe<uint32_t> +WasmArrayBufferMaxSize(const ArrayBufferObjectMaybeShared* buf) +{ + if (buf->is<ArrayBufferObject>()) + return buf->as<ArrayBufferObject>().wasmMaxSize(); + return mozilla::Some(buf->as<SharedArrayBufferObject>().byteLength()); +} + inline ArrayBufferObjectMaybeShared& AsAnyArrayBuffer(HandleValue val) { if (val.toObject().is<ArrayBufferObject>()) return val.toObject().as<ArrayBufferObject>(); return val.toObject().as<SharedArrayBufferObject>(); }
--- a/js/src/vm/ArrayBufferObject.cpp +++ b/js/src/vm/ArrayBufferObject.cpp @@ -55,16 +55,17 @@ #include "vm/Shape-inl.h" using JS::ToInt32; using mozilla::DebugOnly; using mozilla::CheckedInt; using mozilla::Some; using mozilla::Maybe; +using mozilla::Nothing; using namespace js; using namespace js::gc; /* * Convert |v| to an array index for an array of length |length| per * the Typed Array Specification section 7.0, |subarray|. If successful, * the output value is in the range [0, length]. @@ -381,27 +382,27 @@ ArrayBufferObject::changeContents(JSCont /* * Wasm Raw Buf Linear Memory Structure * * The linear heap in Wasm is an mmaped array buffer. Several * constants manage its lifetime: * * - length - the wasm-visible current length of the buffer. Acesses in the - * range [0, length] succeed. May only increase + * range [0, length] succeed. May only increase * - * - boundsCheckLimit - size against which we perform bounds checks. It is - * always a constant offset smaller than mapped_size. Currently that constant - * offset is 0. + * - boundsCheckLimit - when !WASM_HUGE_MEMORY, the size against which we + * perform bounds checks. It is always a constant offset smaller than + * mappedSize. Currently that constant offset is 0. * * - max - the optional declared limit on how much length can grow. * - * - mapped_size - the actual mmaped size. Access in the range - * [0, mapped_size] will either succeed, or be handled by the wasm signal - * handlers. + * - mappedSize - the actual mmaped size. Access in the range + * [0, mappedSize] will either succeed, or be handled by the wasm signal + * handlers. * * The below diagram shows the layout of the wams heap. The wasm-visible * portion of the heap starts at 0. There is one extra page prior to the * start of the wasm heap which contains the WasmArrayRawBuffer struct at * its end. (i.e. right before the start of the WASM heap). * * WasmArrayRawBuffer * \ ArrayBufferObject::dataPointer() @@ -489,93 +490,104 @@ class js::WasmArrayRawBuffer Maybe<uint32_t> maxSize() const { return maxSize_; } size_t allocatedBytes() const { return mappedSize_ + gc::SystemPageSize(); } +#ifndef WASM_HUGE_MEMORY uint32_t boundsCheckLimit() const { -#ifdef WASM_HUGE_MEMORY - MOZ_CRASH(); - return 0; -#else - return (uint32_t) mappedSize_; + MOZ_ASSERT(mappedSize_ <= UINT32_MAX); + MOZ_ASSERT(mappedSize_ >= wasm::GuardSize); + MOZ_ASSERT(wasm::IsValidBoundsCheckImmediate(mappedSize_ - wasm::GuardSize)); + return mappedSize_ - wasm::GuardSize; + } #endif - } - MOZ_MUST_USE bool growLength(uint32_t deltaLength) - { - // This should be guaranteed by Instance::growMemory - MOZ_ASSERT(maxSize_); - MOZ_ASSERT(deltaLength % wasm::PageSize == 0); + MOZ_MUST_USE bool growToSizeInPlace(uint32_t newSize) { + MOZ_ASSERT(newSize >= actualByteLength()); + MOZ_ASSERT_IF(maxSize(), newSize <= maxSize().value()); + MOZ_ASSERT(newSize <= mappedSize()); - CheckedInt<uint32_t> curLength = actualByteLength(); - CheckedInt<uint32_t> newLength = curLength + deltaLength; - MOZ_RELEASE_ASSERT(newLength.isValid()); - MOZ_ASSERT(newLength.value() <= maxSize_.value()); + uint32_t delta = newSize - actualByteLength(); + MOZ_ASSERT(delta % wasm::PageSize == 0); - uint8_t* dataEnd = dataPointer() + curLength.value(); - MOZ_ASSERT(((intptr_t)dataEnd) % gc::SystemPageSize() == 0); + uint8_t* dataEnd = dataPointer() + actualByteLength(); + MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0); # ifdef XP_WIN - if (deltaLength && !VirtualAlloc(dataEnd, deltaLength, MEM_COMMIT, PAGE_READWRITE)) + if (delta && !VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) return false; # else // XP_WIN - if (deltaLength && mprotect(dataEnd, deltaLength, PROT_READ | PROT_WRITE)) + if (delta && mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) return false; # endif // !XP_WIN # if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) - VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, deltaLength); + VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta); # endif - MemProfiler::SampleNative(dataEnd, deltaLength); + MemProfiler::SampleNative(dataEnd, delta); - length_ = newLength.value(); + length_ = newSize; return true; } - // Try and grow the mapped region of memory. Does not changes current or - // max size. Does not move memory if no space to grow. - void tryGrowMaxSize(uint32_t deltaMaxSize) - { - MOZ_ASSERT(maxSize_); - MOZ_RELEASE_ASSERT(deltaMaxSize % wasm::PageSize == 0); - - CheckedInt<uint32_t> curMax = maxSize_.value(); - CheckedInt<uint32_t> newMax = curMax + deltaMaxSize; - MOZ_RELEASE_ASSERT(newMax.isValid()); - MOZ_RELEASE_ASSERT(newMax.value() % wasm::PageSize == 0); - - size_t newMapped = wasm::LegalizeMapLength(newMax.value()); +#ifndef WASM_HUGE_MEMORY + bool extendMappedSize(uint32_t maxSize) { + size_t newMappedSize = wasm::ComputeMappedSize(maxSize); + MOZ_ASSERT(mappedSize_ <= newMappedSize); + if (mappedSize_ == newMappedSize) + return true; # ifdef XP_WIN - if (!VirtualAlloc(dataPointer(), newMapped, MEM_RESERVE, PAGE_NOACCESS)) - return; + uint8_t* mappedEnd = dataPointer() + mappedSize_; + uint32_t delta = newMappedSize - mappedSize_; + if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) + return false; # elif defined(XP_LINUX) // Note this will not move memory (no MREMAP_MAYMOVE specified) - if (MAP_FAILED == mremap(dataPointer(), mappedSize_, newMapped, 0)) - return; + if (MAP_FAILED == mremap(dataPointer(), mappedSize_, newMappedSize, 0)) + return false; # else - // No mechanism for remapping on MaxOS. Luckily shouldn't need it here - // as most MacOS configs are 64 bit - return; -# endif // !XP_WIN + // No mechanism for remapping on MacOS and other Unices. Luckily + // shouldn't need it here as most of these are 64-bit. + return false; +# endif + + mappedSize_ = newMappedSize; + return true; + } - mappedSize_ = newMapped; - maxSize_ = Some(newMax.value()); - return; + // Try and grow the mapped region of memory. Does not changes current size. + // Does not move memory if no space to grow. + void tryGrowMaxSizeInPlace(uint32_t deltaMaxSize) { + CheckedInt<uint32_t> newMaxSize = maxSize_.value(); + newMaxSize += deltaMaxSize; + MOZ_ASSERT(newMaxSize.isValid()); + MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0); + + if (!extendMappedSize(newMaxSize.value())) + return; + + maxSize_ = Some(newMaxSize.value()); } +#endif // WASM_HUGE_MEMORY }; /* static */ WasmArrayRawBuffer* WasmArrayRawBuffer::Allocate(uint32_t numBytes, Maybe<uint32_t> maxSize) { - size_t mappedSize = wasm::LegalizeMapLength(maxSize.valueOr(numBytes)); + size_t mappedSize; +#ifdef WASM_HUGE_MEMORY + mappedSize = wasm::HugeMappedSize; +#else + mappedSize = wasm::ComputeMappedSize(maxSize.valueOr(numBytes)); +#endif MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize()); MOZ_RELEASE_ASSERT(numBytes <= maxSize.valueOr(UINT32_MAX)); MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0); MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize(); uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize(); @@ -640,117 +652,135 @@ ArrayBufferObject::BufferContents::wasmB { MOZ_RELEASE_ASSERT(kind_ == WASM_MAPPED); return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer)); } #define ROUND_UP(v, a) ((v) % (a) == 0 ? (v) : v + a - ((v) % (a))) /* static */ ArrayBufferObject* -ArrayBufferObject::createForWasm(JSContext* cx, uint32_t numBytes, Maybe<uint32_t> maxSize) +ArrayBufferObject::createForWasm(JSContext* cx, uint32_t initialSize, Maybe<uint32_t> maxSize) { - MOZ_ASSERT(numBytes % wasm::PageSize == 0); + MOZ_ASSERT(initialSize % wasm::PageSize == 0); MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers()); - // First try to map the maximum requested memory - WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(numBytes, maxSize); + // Prevent applications specifying a large max (like UINT32_MAX) from + // unintentially OOMing the browser on 32-bit: they just want "a lot of + // memory". Maintain the invariant that initialSize <= maxSize. + if (sizeof(void*) == 4 && maxSize) { + static const uint32_t OneGiB = 1 << 30; + uint32_t clamp = Max(OneGiB, initialSize); + maxSize = Some(Min(clamp, maxSize.value())); + } + + // Try to reserve the maximum requested memory + WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(initialSize, maxSize); if (!wasmBuf) { #ifdef WASM_HUGE_MEMORY ReportOutOfMemory(cx); return nullptr; #else // If we fail, and have a maxSize, try to reserve the biggest chunk in - // the range [numBytes, maxSize) using log backoff. + // the range [initialSize, maxSize) using log backoff. if (!maxSize) { ReportOutOfMemory(cx); return nullptr; } uint32_t cur = maxSize.value() / 2; - for (; cur > numBytes; cur = cur / 2) { - wasmBuf = WasmArrayRawBuffer::Allocate(numBytes, Some(ROUND_UP(cur, wasm::PageSize))); + for (; cur > initialSize; cur /= 2) { + wasmBuf = WasmArrayRawBuffer::Allocate(initialSize, Some(ROUND_UP(cur, wasm::PageSize))); if (wasmBuf) break; } if (!wasmBuf) { ReportOutOfMemory(cx); return nullptr; } // Try to grow our chunk as much as possible. for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) - wasmBuf->tryGrowMaxSize(ROUND_UP(d, wasm::PageSize)); + wasmBuf->tryGrowMaxSizeInPlace(ROUND_UP(d, wasm::PageSize)); #endif } void *data = wasmBuf->dataPointer(); BufferContents contents = BufferContents::create<WASM_MAPPED>(data); - ArrayBufferObject* buffer = ArrayBufferObject::create(cx, numBytes, contents); + ArrayBufferObject* buffer = ArrayBufferObject::create(cx, initialSize, contents); if (!buffer) { - ReportOutOfMemory(cx); WasmArrayRawBuffer::Release(data); return nullptr; } return buffer; } /* static */ bool -ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer) +ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool needGuard) { +#ifdef WASM_HUGE_MEMORY + MOZ_ASSERT(needGuard); +#endif MOZ_ASSERT(buffer->byteLength() % wasm::PageSize == 0); MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers()); if (buffer->forInlineTypedObject()) { JS_ReportError(cx, "ArrayBuffer can't be used by asm.js"); return false; } -#ifdef WASM_HUGE_MEMORY - if (buffer->isWasmMapped()) + // Since asm.js doesn't grow, assume max is same as length. + uint32_t length = buffer->byteLength(); + uint32_t maxSize = length; + + if (needGuard) { + if (buffer->isWasmMapped()) + return true; + + if (buffer->isAsmJSMalloced()) { + // needGuard is only set for SIMD.js (which isn't shipping, so this + // error isn't content-visible). + JS_ReportError(cx, "ArrayBuffer can't be prepared for asm.js with SIMD.js"); + return false; + } + + WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(maxSize)); + if (!wasmBuf) { + // Note - we don't need the same backoff search as in WASM, since we don't over-map to + // allow growth in asm.js + ReportOutOfMemory(cx); + return false; + } + + // Copy over the current contents of the typed array. + void* data = wasmBuf->dataPointer(); + memcpy(data, buffer->dataPointer(), length); + + // Swap the new elements into the ArrayBufferObject. Mark the + // ArrayBufferObject so we don't do this again. + BufferContents newContents = BufferContents::create<WASM_MAPPED>(data); + buffer->changeContents(cx, newContents); + MOZ_ASSERT(data == buffer->dataPointer()); + return true; + } + + if (buffer->isAsmJSMalloced()) return true; - uint32_t length = buffer->byteLength(); - // Since asm.js doesn't grow, assume max is same as length. - WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(length)); - void* data = wasmBuf->dataPointer(); - - if (!data) { - // Note - we don't need the same backoff search as in WASM, since we don't over-map to - // allow growth in asm.js - ReportOutOfMemory(cx); - return false; - } - - // Copy over the current contents of the typed array. - memcpy(data, buffer->dataPointer(), length); - - // Swap the new elements into the ArrayBufferObject. Mark the - // ArrayBufferObject so we don't do this again. - BufferContents newContents = BufferContents::create<WASM_MAPPED>(data); - buffer->changeContents(cx, newContents); - MOZ_ASSERT(data == buffer->dataPointer()); - return true; -#else if (!buffer->ownsData()) { BufferContents contents = AllocateArrayBufferContents(cx, buffer->byteLength()); if (!contents) return false; memcpy(contents.data(), buffer->dataPointer(), buffer->byteLength()); buffer->changeContents(cx, contents); } buffer->setIsAsmJSMalloced(); - - // On non-x64 architectures we can't yet emulate asm.js heap access. - MOZ_RELEASE_ASSERT(buffer->wasmActualByteLength() == buffer->wasmMappedSize()); - MOZ_RELEASE_ASSERT(buffer->wasmActualByteLength() == buffer->wasmBoundsCheckLimit()); -#endif return true; } ArrayBufferObject::BufferContents ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length) { void* data = AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT); MemProfiler::SampleNative(data, length); @@ -833,59 +863,69 @@ ArrayBufferObject::wasmMaxSize() const { if (isWasmMapped()) return contents().wasmBuffer()->maxSize(); else return Some<uint32_t>(byteLength()); } uint32_t +ArrayBufferObject::wasmActualByteLength() const +{ + if (isWasmMapped()) + return contents().wasmBuffer()->actualByteLength(); + else + return byteLength(); +} + +bool +ArrayBufferObject::wasmGrowToSizeInPlace(uint32_t newSize) +{ + return contents().wasmBuffer()->growToSizeInPlace(newSize); +} + +#ifndef WASM_HUGE_MEMORY +bool +ArrayBufferObject::wasmMovingGrowToSize(uint32_t newSize) +{ + WasmArrayRawBuffer* curBuf = contents().wasmBuffer(); + + if (newSize <= curBuf->boundsCheckLimit() || curBuf->extendMappedSize(newSize)) + return curBuf->growToSizeInPlace(newSize); + + WasmArrayRawBuffer* newBuf = WasmArrayRawBuffer::Allocate(newSize, Nothing()); + if (!newBuf) + return false; + + void* newData = newBuf->dataPointer(); + memcpy(newData, curBuf->dataPointer(), curBuf->actualByteLength()); + + BufferContents newContents = BufferContents::create<WASM_MAPPED>(newData); + changeContents(GetJSContextFromMainThread(), newContents); + return true; +} + +uint32_t ArrayBufferObject::wasmBoundsCheckLimit() const { if (isWasmMapped()) return contents().wasmBuffer()->boundsCheckLimit(); else return byteLength(); } uint32_t -ArrayBufferObject::wasmActualByteLength() const -{ - if (isWasmMapped()) - return contents().wasmBuffer()->actualByteLength(); - else - return byteLength(); -} - -uint32_t ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const { - if (this->is<ArrayBufferObject>()) - return this->as<ArrayBufferObject>().wasmBoundsCheckLimit(); - - // TODO: When SharedArrayBuffer can be used from wasm, this should be - // replaced by SharedArrayBufferObject::wasmBoundsCheckLimit(). - return wasmMappedSize(); -} + if (is<ArrayBufferObject>()) + return as<ArrayBufferObject>().wasmBoundsCheckLimit(); -bool -ArrayBufferObject::growForWasm(uint32_t delta) -{ - MOZ_ASSERT(isWasmMapped()); - - if (delta == 0) - return true; - - // Should be guaranteed by Instance::growMemory - CheckedInt<uint32_t> curSize = wasmActualByteLength(); - CheckedInt<uint32_t> newSize = curSize + CheckedInt<uint32_t>(delta) * wasm ::PageSize; - MOZ_RELEASE_ASSERT(newSize.isValid()); - - return contents().wasmBuffer()->growLength(delta * wasm::PageSize); + return as<SharedArrayBufferObject>().byteLength(); } +#endif uint32_t ArrayBufferObject::flags() const { return uint32_t(getSlot(FLAGS_SLOT).toInt32()); } void
--- a/js/src/vm/ArrayBufferObject.h +++ b/js/src/vm/ArrayBufferObject.h @@ -70,39 +70,48 @@ class WasmArrayRawBuffer; // // During a minor GC, (3) and (4) may move. During a compacting GC, (2), (3), // and (4) may move. class ArrayBufferObjectMaybeShared; uint32_t AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf); uint32_t WasmArrayBufferActualByteLength(const ArrayBufferObjectMaybeShared* buf); +mozilla::Maybe<uint32_t> WasmArrayBufferMaxSize(const ArrayBufferObjectMaybeShared* buf); size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf); bool WasmArrayBufferGrowForWasm(ArrayBufferObjectMaybeShared* buf, uint32_t delta); ArrayBufferObjectMaybeShared& AsAnyArrayBuffer(HandleValue val); class ArrayBufferObjectMaybeShared : public NativeObject { public: uint32_t byteLength() { return AnyArrayBufferByteLength(this); } + inline bool isDetached() const; + + inline SharedMem<uint8_t*> dataPointerEither(); + + // WebAssembly support: + // Note: the eventual goal is to remove this from ArrayBuffer and have + // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object. + + uint32_t wasmActualByteLength() const { + return WasmArrayBufferActualByteLength(this); + } + mozilla::Maybe<uint32_t> wasmMaxSize() const { + return WasmArrayBufferMaxSize(this); + } size_t wasmMappedSize() const { return WasmArrayBufferMappedSize(this); } - +#ifndef WASM_HUGE_MEMORY uint32_t wasmBoundsCheckLimit() const; - uint32_t wasmActualByteLength() const { - return WasmArrayBufferActualByteLength(this); - } - - inline bool isDetached() const; - - inline SharedMem<uint8_t*> dataPointerEither(); +#endif }; typedef Rooted<ArrayBufferObjectMaybeShared*> RootedArrayBufferObjectMaybeShared; typedef Handle<ArrayBufferObjectMaybeShared*> HandleArrayBufferObjectMaybeShared; typedef MutableHandle<ArrayBufferObjectMaybeShared*> MutableHandleArrayBufferObjectMaybeShared; /* * ArrayBufferObject @@ -300,21 +309,17 @@ class ArrayBufferObject : public ArrayBu void setFirstView(ArrayBufferViewObject* view); uint8_t* inlineDataPointer() const; public: uint8_t* dataPointer() const; SharedMem<uint8_t*> dataPointerShared() const; uint32_t byteLength() const; - uint32_t wasmActualByteLength() const; - size_t wasmMappedSize() const; - uint32_t wasmBoundsCheckLimit() const; - mozilla::Maybe<uint32_t> wasmMaxSize() const; - MOZ_MUST_USE bool growForWasm(uint32_t delta); + BufferContents contents() const { return BufferContents(dataPointer(), bufferKind()); } bool hasInlineData() const { return dataPointer() == inlineDataPointer(); } void releaseData(FreeOp* fop); @@ -330,19 +335,28 @@ class ArrayBufferObject : public ArrayBu BufferKind bufferKind() const { return BufferKind(flags() & BUFFER_KIND_MASK); } bool isPlain() const { return bufferKind() == PLAIN; } bool isWasmMapped() const { return bufferKind() == WASM_MAPPED; } bool isAsmJSMalloced() const { return bufferKind() == ASMJS_MALLOCED; } bool isWasm() const { return isWasmMapped() || isAsmJSMalloced(); } bool isMapped() const { return bufferKind() == MAPPED; } bool isDetached() const { return flags() & DETACHED; } - static ArrayBufferObject* createForWasm(JSContext* cx, uint32_t numBytes, + // WebAssembly support: + static ArrayBufferObject* createForWasm(JSContext* cx, uint32_t initialSize, mozilla::Maybe<uint32_t> maxSize); - static bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer); + static bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool needGuard); + uint32_t wasmActualByteLength() const; + size_t wasmMappedSize() const; + mozilla::Maybe<uint32_t> wasmMaxSize() const; + MOZ_MUST_USE bool wasmGrowToSizeInPlace(uint32_t newSize); +#ifndef WASM_HUGE_MEMORY + MOZ_MUST_USE bool wasmMovingGrowToSize(uint32_t newSize); + uint32_t wasmBoundsCheckLimit() const; +#endif static void finalize(FreeOp* fop, JSObject* obj); static BufferContents createMappedContents(int fd, size_t offset, size_t length); static size_t offsetOfFlagsSlot() { return getFixedSlotOffset(FLAGS_SLOT); }
--- a/js/src/vm/EnvironmentObject.cpp +++ b/js/src/vm/EnvironmentObject.cpp @@ -3115,17 +3115,17 @@ js::GetThisValueForDebuggerMaybeOptimize continue; BindingLocation loc = bi.location(); if (loc.kind() == BindingLocation::Kind::Environment) { RootedObject callObj(cx, &ei.environment().as<CallObject>()); return GetProperty(cx, callObj, callObj, bi.name()->asPropertyName(), res); } - if (loc.kind() == BindingLocation::Kind::Frame) + if (loc.kind() == BindingLocation::Kind::Frame && ei.withinInitialFrame()) res.set(frame.unaliasedLocal(bi.location().slot())); else res.setMagic(JS_OPTIMIZED_OUT); return true; } MOZ_CRASH("'this' binding must be found"); @@ -3196,18 +3196,18 @@ bool js::CheckCanDeclareGlobalBinding(JSContext* cx, Handle<GlobalObject*> global, HandlePropertyName name, bool isFunction) { RootedId id(cx, NameToId(name)); Rooted<PropertyDescriptor> desc(cx); if (!GetOwnPropertyDescriptor(cx, global, id, &desc)) return false; - // ES 8.1.14.15 CanDeclareGlobalVar - // ES 8.1.14.16 CanDeclareGlobalFunction + // ES 8.1.1.4.15 CanDeclareGlobalVar + // ES 8.1.1.4.16 CanDeclareGlobalFunction // Step 4. if (!desc.object()) { // 8.1.14.15 step 6. // 8.1.14.16 step 5. if (global->nonProxyIsExtensible()) return true;
--- a/js/src/vm/Interpreter.cpp +++ b/js/src/vm/Interpreter.cpp @@ -5040,19 +5040,27 @@ js::ThrowCheckIsObject(JSContext* cx, Ch bool js::ThrowUninitializedThis(JSContext* cx, AbstractFramePtr frame) { RootedFunction fun(cx); if (frame.isFunctionFrame()) { fun = frame.callee(); } else { - MOZ_ASSERT(frame.isEvalFrame()); - MOZ_ASSERT(frame.script()->isDirectEvalInFunction()); - for (ScopeIter si(frame.script()->enclosingScope()); si; si++) { + Scope* startingScope; + if (frame.isDebuggerEvalFrame()) { + AbstractFramePtr evalInFramePrev = frame.asInterpreterFrame()->evalInFramePrev(); + startingScope = evalInFramePrev.script()->bodyScope(); + } else { + MOZ_ASSERT(frame.isEvalFrame()); + MOZ_ASSERT(frame.script()->isDirectEvalInFunction()); + startingScope = frame.script()->enclosingScope(); + } + + for (ScopeIter si(startingScope); si; si++) { if (si.scope()->is<FunctionScope>()) { fun = si.scope()->as<FunctionScope>().canonicalFunction(); break; } } MOZ_ASSERT(fun); }
--- a/js/src/vm/SharedArrayObject.cpp +++ b/js/src/vm/SharedArrayObject.cpp @@ -68,39 +68,41 @@ MarkValidRegion(void* addr, size_t len) return true; #else if (mprotect(addr, len, PROT_READ | PROT_WRITE)) return false; return true; #endif } -#if defined(WASM_HUGE_MEMORY) // Since this SharedArrayBuffer will likely be used for asm.js code, prepare it // for asm.js by mapping the 4gb protected zone described in WasmTypes.h. // Since we want to put the SharedArrayBuffer header immediately before the // heap but keep the heap page-aligned, allocate an extra page before the heap. static uint64_t -SharedArrayMappedSize() +SharedArrayMappedSize(uint32_t allocSize) { MOZ_RELEASE_ASSERT(jit::JitOptions.wasmTestMode); MOZ_RELEASE_ASSERT(sizeof(SharedArrayRawBuffer) < gc::SystemPageSize()); - return wasm::MappedSize + gc::SystemPageSize(); +#ifdef WASM_HUGE_MEMORY + return wasm::HugeMappedSize + gc::SystemPageSize(); +#else + return allocSize + wasm::GuardSize; +#endif } // If there are too many 4GB buffers live we run up against system resource // exhaustion (address space or number of memory map descriptors), see // bug 1068684, bug 1073934 for details. The limiting case seems to be // Windows Vista Home 64-bit, where the per-process address space is limited // to 8TB. Thus we track the number of live objects, and set a limit of // 1000 live objects per process; we run synchronous GC if necessary; and // we throw an OOM error if the per-process limit is exceeded. static mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numLive; static const uint32_t maxLive = 1000; -#endif static uint32_t SharedArrayAllocSize(uint32_t length) { return AlignBytes(length + gc::SystemPageSize(), gc::SystemPageSize()); } SharedArrayRawBuffer* @@ -113,50 +115,50 @@ SharedArrayRawBuffer::New(JSContext* cx, // Add a page for the header and round to a page boundary. uint32_t allocSize = SharedArrayAllocSize(length); if (allocSize <= length) return nullptr; bool preparedForAsmJS = jit::JitOptions.wasmTestMode && IsValidAsmJSHeapLength(length); void* p = nullptr; -#ifdef WASM_HUGE_MEMORY if (preparedForAsmJS) { // Test >= to guard against the case where multiple extant runtimes // race to allocate. if (++numLive >= maxLive) { JSRuntime* rt = cx->runtime(); if (rt->largeAllocationFailureCallback) rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData); if (numLive >= maxLive) { numLive--; return nullptr; } } + + uint32_t mappedSize = SharedArrayMappedSize(allocSize); + // Get the entire reserved region (with all pages inaccessible) - p = MapMemory(SharedArrayMappedSize(), false); + p = MapMemory(mappedSize, false); if (!p) { numLive--; return nullptr; } if (!MarkValidRegion(p, allocSize)) { - UnmapMemory(p, SharedArrayMappedSize()); + UnmapMemory(p, mappedSize); numLive--; return nullptr; } # if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) // Tell Valgrind/Memcheck to not report accesses in the inaccessible region. VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize, - SharedArrayMappedSize() - allocSize); + mappedSize - allocSize); # endif - } else -#endif - { + } else { p = MapMemory(allocSize, true); if (!p) return nullptr; } uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize(); uint8_t* base = buffer - sizeof(SharedArrayRawBuffer); SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(buffer, length, preparedForAsmJS); @@ -182,28 +184,28 @@ SharedArrayRawBuffer::dropReference() // If this was the final reference, release the buffer. SharedMem<uint8_t*> p = this->dataPointerShared() - gc::SystemPageSize(); MOZ_ASSERT(p.asValue() % gc::SystemPageSize() == 0); uint8_t* address = p.unwrap(/*safe - only reference*/); uint32_t allocSize = SharedArrayAllocSize(this->length); -#if defined(WASM_HUGE_MEMORY) if (this->preparedForAsmJS) { numLive--; - UnmapMemory(address, SharedArrayMappedSize()); + + uint32_t mappedSize = SharedArrayMappedSize(allocSize); + UnmapMemory(address, mappedSize); + # if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE) // Tell Valgrind/Memcheck to recommence reporting accesses in the // previously-inaccessible region. - VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address, SharedArrayMappedSize()); + VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address, mappedSize); # endif - } else -#endif - { + } else { UnmapMemory(address, allocSize); } } const JSFunctionSpec SharedArrayBufferObject::jsfuncs[] = { /* Nothing yet */ JS_FS_END };
--- a/js/src/vm/Stack.cpp +++ b/js/src/vm/Stack.cpp @@ -37,30 +37,28 @@ InterpreterFrame::initExecuteFrame(JSCon { flags_ = 0; script_ = script; // newTarget = NullValue is an initial sentinel for "please fill me in from the stack". // It should never be passed from Ion code. RootedValue newTarget(cx, newTargetValue); if (script->isDirectEvalInFunction()) { - if (evalInFramePrev) { - if (newTarget.isNull() && - evalInFramePrev.script()->bodyScope()->hasOnChain(ScopeKind::Function)) - { - newTarget = evalInFramePrev.newTarget(); - } - } else { - FrameIter iter(cx); - MOZ_ASSERT(!iter.isWasm()); - if (newTarget.isNull() && - iter.script()->bodyScope()->hasOnChain(ScopeKind::Function)) - { - newTarget = iter.newTarget(); - } + FrameIter iter(cx); + MOZ_ASSERT(!iter.isWasm()); + if (newTarget.isNull() && + iter.script()->bodyScope()->hasOnChain(ScopeKind::Function)) + { + newTarget = iter.newTarget(); + } + } else if (evalInFramePrev) { + if (newTarget.isNull() && + evalInFramePrev.script()->bodyScope()->hasOnChain(ScopeKind::Function)) + { + newTarget = evalInFramePrev.newTarget(); } } Value* dstvp = (Value*)this - 1; dstvp[0] = newTarget; envChain_ = envChain.get(); prev_ = nullptr;
--- a/layout/base/DottedCornerFinder.cpp +++ b/layout/base/DottedCornerFinder.cpp @@ -52,17 +52,18 @@ DottedCornerFinder::DottedCornerFinder(c const Point& aCn, Float aRn, const Size& aCornerDim) : mOuterBezier(aOuterBezier), mInnerBezier(aInnerBezier), mCorner(aCorner), mNormalSign((aCorner == C_TL || aCorner == C_BR) ? -1.0f : 1.0f), mC0(aC0), mCn(aCn), mR0(aR0), mRn(aRn), mMaxR(std::max(aR0, aRn)), - mCurveOrigin(mC0.x, mCn.y), + mCenterCurveOrigin(mC0.x, mCn.y), + mInnerCurveOrigin(mInnerBezier.mPoints[0].x, mInnerBezier.mPoints[3].y), mBestOverlap(0.0f), mHasZeroBorderWidth(false), mHasMore(true), mMaxCount(aCornerDim.width + aCornerDim.height), mType(OTHER), mI(0), mCount(0) { NS_ASSERTION(mR0 > 0.0f || mRn > 0.0f, "At least one side should have non-zero radius."); @@ -180,18 +181,18 @@ DottedCornerFinder::Next(void) } else if (mCorner == C_TR) { phi = -M_PI / 2.0f + phi; } else if (mCorner == C_BR) { phi = M_PI / 2.0f - phi; } else { phi = M_PI / 2.0f + phi; } - Point C(mCurveOrigin.x + mCenterCurveR * cos(phi), - mCurveOrigin.y + mCenterCurveR * sin(phi)); + Point C(mCenterCurveOrigin.x + mCenterCurveR * cos(phi), + mCenterCurveOrigin.y + mCenterCurveR * sin(phi)); return DottedCornerFinder::Result(C, mR0); } // Find unfilled and filled circles. (void)FindNext(mBestOverlap); (void)FindNext(mBestOverlap); return Result(mLastC, mLastR); } @@ -283,17 +284,17 @@ DottedCornerFinder::FindNext(Float overl // Basically this shouldn't happen. // If differential is 0, we cannot calculate tangent circle, // skip this point. t = (t + upper) / 2.0f; continue; } Point normal = PointRotateCCW90(Diff / DiffLength) * (-mNormalSign); - r = CalculateDistanceToEllipticArc(C, normal, mCurveOrigin, + r = CalculateDistanceToEllipticArc(C, normal, mInnerCurveOrigin, mInnerWidth, mInnerHeight); // Check overlap along arc. circlesDist = GetBezierLength(mCenterBezier, mLastT, t); expectedDist = (r + mLastR) * factor; if (circlesDist < expectedDist - DIST_MARGIN) { lower = t; } else if (circlesDist > expectedDist + DIST_MARGIN) {
--- a/layout/base/DottedCornerFinder.h +++ b/layout/base/DottedCornerFinder.h @@ -52,17 +52,19 @@ public: struct Result { // Center point of dot and its radius. Point C; Float r; Result(const Point& aC, Float aR) : C(aC), r(aR) - {} + { + MOZ_ASSERT(aR >= 0); + } }; // aBorderRadiusX // aCornerDim.width // |<----------------->| // | | v // --+-------------___---+-- // ^ | __-- | | @@ -184,42 +186,64 @@ private: // Point mC0; Point mCn; Float mR0; Float mRn; Float mMaxR; // Parameters for the center curve with perfect circle and the inner curve. + // The center curve doesn't necessarily share the origin with others. // // ___---+ // __-- | // _- | - // / __---+ + // / __-+ | // / __-- | // | / | // | / __--+-- // | | _- | ^ // | | / | | // | | / | | // | | | | | // | | | | | mInnerHeight // | | | | | - // | | | | | - // | | | | v - // +----+----+---------+ - // | | | mCurveOrigin - // | |<------->| - // | mInnerWidth | - // | | - // |<------------>| - // mCenterCurveR + // | + | | | + // | | | v + // +---------+---------+ + // | | mInnerCurveOrigin + // |<------->| + // mInnerWidth // - Point mCurveOrigin; + // ___---+ + // __-- + // _- + // / __-+ + // / __-- | + // | / | + // | / __--+ + // | | _- | + // | | / | + // | | / | + // | | | | + // | | | | + // | | | | + // | +--- | ------+ + // | | | | mCenterCurveOrigin + // + | + | + // | | + // | | + // | | + // | | + // |<---------->| + // mCenterCurveR + // + Point mCenterCurveOrigin; Float mCenterCurveR; + Point mInnerCurveOrigin; Float mInnerWidth; Float mInnerHeight; Point mLastC; Float mLastR; Float mLastT; // Overlap between two circles.
--- a/layout/base/FrameLayerBuilder.cpp +++ b/layout/base/FrameLayerBuilder.cpp @@ -3759,24 +3759,25 @@ ContainerState::SetupMaskLayerForScrolle */ void ContainerState::ProcessDisplayItems(nsDisplayList* aList) { PROFILER_LABEL("ContainerState", "ProcessDisplayItems", js::ProfileEntry::Category::GRAPHICS); AnimatedGeometryRoot* lastAnimatedGeometryRoot = mContainerAnimatedGeometryRoot; + nsPoint lastAGRTopLeft; nsPoint topLeft(0,0); // When NO_COMPONENT_ALPHA is set, items will be flattened into a single // layer, so we need to choose which active scrolled root to use for all // items. if (mFlattenToSingleLayer) { if (ChooseAnimatedGeometryRoot(*aList, &lastAnimatedGeometryRoot)) { - topLeft = (*lastAnimatedGeometryRoot)->GetOffsetToCrossDoc(mContainerReferenceFrame); + lastAGRTopLeft = (*lastAnimatedGeometryRoot)->GetOffsetToCrossDoc(mContainerReferenceFrame); } } int32_t maxLayers = gfxPrefs::MaxActiveLayers(); int layerCount = 0; nsDisplayList savedItems; nsDisplayItem* item; @@ -3838,16 +3839,17 @@ ContainerState::ProcessDisplayItems(nsDi } bool forceInactive; AnimatedGeometryRoot* animatedGeometryRoot; AnimatedGeometryRoot* animatedGeometryRootForClip = nullptr; if (mFlattenToSingleLayer && layerState != LAYER_ACTIVE_FORCE) { forceInactive = true; animatedGeometryRoot = lastAnimatedGeometryRoot; + topLeft = lastAGRTopLeft; } else { forceInactive = false; if (mManager->IsWidgetLayerManager()) { animatedGeometryRoot = item->GetAnimatedGeometryRoot(); animatedGeometryRootForClip = item->AnimatedGeometryRootForScrollMetadata(); } else { // For inactive layer subtrees, splitting content into PaintedLayers // based on animated geometry roots is pointless. It's more efficient
--- a/layout/base/nsPresShell.cpp +++ b/layout/base/nsPresShell.cpp @@ -8516,16 +8516,19 @@ PresShell::HandleEventInternal(WidgetEve } } if (Telemetry::CanRecordBase() && !aEvent->mTimeStamp.IsNull() && aEvent->AsInputEvent()) { double millis = (TimeStamp::Now() - aEvent->mTimeStamp).ToMilliseconds(); Telemetry::Accumulate(Telemetry::INPUT_EVENT_RESPONSE_MS, millis); + if (mDocument && mDocument->GetReadyStateEnum() != nsIDocument::READYSTATE_COMPLETE) { + Telemetry::Accumulate(Telemetry::LOAD_INPUT_EVENT_RESPONSE_MS, millis); + } } return rv; } void nsIPresShell::DispatchGotOrLostPointerCaptureEvent(bool aIsGotCapture, uint32_t aPointerId,
--- a/layout/forms/nsTextControlFrame.cpp +++ b/layout/forms/nsTextControlFrame.cpp @@ -345,18 +345,18 @@ nsTextControlFrame::CreateAnonymousConte nsContentUtils::RemoveNewlines(placeholderTxt); mUsePlaceholder = !placeholderTxt.IsEmpty(); // Create the placeholder anonymous content if needed. if (mUsePlaceholder) { nsIContent* placeholderNode = txtCtrl->CreatePlaceholderNode(); NS_ENSURE_TRUE(placeholderNode, NS_ERROR_OUT_OF_MEMORY); - // Associate ::-moz-placeholder pseudo-element with the placeholder node. - CSSPseudoElementType pseudoType = CSSPseudoElementType::mozPlaceholder; + // Associate ::placeholder pseudo-element with the placeholder node. + CSSPseudoElementType pseudoType = CSSPseudoElementType::placeholder; // If this is a text input inside a number input then we want to use the // main number input as the source of style for the placeholder frame. nsIFrame* mainInputFrame = this; if (StyleContext()->GetPseudoType() == CSSPseudoElementType::mozNumberText) { do { mainInputFrame = mainInputFrame->GetParent(); } while (mainInputFrame && @@ -1459,17 +1459,17 @@ nsTextControlFrame::BuildDisplayList(nsD } kid = kid->GetNextSibling(); } } mozilla::dom::Element* nsTextControlFrame::GetPseudoElement(CSSPseudoElementType aType) { - if (aType == CSSPseudoElementType::mozPlaceholder) { + if (aType == CSSPseudoElementType::placeholder) { nsCOMPtr<nsITextControlElement> txtCtrl = do_QueryInterface(GetContent()); return txtCtrl->GetPlaceholderNode(); } return nsContainerFrame::GetPseudoElement(aType); } NS_IMETHODIMP
--- a/layout/inspector/tests/test_getCSSPseudoElementNames.html +++ b/layout/inspector/tests/test_getCSSPseudoElementNames.html @@ -10,16 +10,17 @@ .getService(SpecialPowers.Ci.inIDOMUtils); let expected = new Set([ ":after", ":before", ":backdrop", ":first-letter", ":first-line", + ":placeholder", ":-moz-color-swatch", ":-moz-focus-inner", ":-moz-focus-outer", ":-moz-list-bullet", ":-moz-list-number", ":-moz-math-anonymous", ":-moz-meter-bar", ":-moz-placeholder",
--- a/layout/reftests/css-placeholder/css-background.html +++ b/layout/reftests/css-placeholder/css-background.html @@ -1,21 +1,21 @@ <!DOCTYPE html> <html> <!-- - This test checks that setting a background on a ::-moz-placeholder works. + This test checks that setting a background on a ::placeholder works. However, given that it is not possible to do an actualy equality check, we have te check that setting a background doesn't produce the same result as not setting it. --> <style> - :-moz-any(input, textarea).color::-moz-placeholder { + :-moz-any(input, textarea).color::placeholder { background-color: blue; } - :-moz-any(input, textarea).gradient::-moz-placeholder { + :-moz-any(input, textarea).gradient::placeholder { background: linear-gradient(to right, blue, white, red); } </style> <body> <input class="color" placeholder='foo'> <textarea class="color" placeholder='foo'></textarea> <input class="gradient" placeholder='foo'> <textarea class="gradient" placeholder='foo'></textarea>
--- a/layout/reftests/css-placeholder/css-restrictions.html +++ b/layout/reftests/css-placeholder/css-restrictions.html @@ -1,12 +1,12 @@ <!DOCTYPE html> <html> <style> - :-moz-any(input, textarea)::-moz-placeholder { + :-moz-any(input, textarea)::placeholder { visibility: hidden; padding: 20px; float: right; overflow: visible; white-space: pre; /* * This list could be endless given that all non-whitelisted properties
--- a/layout/reftests/css-placeholder/css-simple-styling-ref.html +++ b/layout/reftests/css-placeholder/css-simple-styling-ref.html @@ -8,10 +8,12 @@ color: blue; word-spacing: 5px; text-shadow: 1px 1px 1px red; } </style> <body> <input value='foo bar'> <textarea>foo bar</textarea> + <input value='bar foo'> + <textarea>bar foo</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/css-simple-styling.html +++ b/layout/reftests/css-placeholder/css-simple-styling.html @@ -1,32 +1,35 @@ <!DOCTYPE html> <html> <!-- This test checks that a few allowed CSS properties have an effect when - applied to ::-moz-placeholder. + applied to ::placeholder or ::-moz-placeholder. --> <style> /* * We need to set some properties on the <input> because its size will * depend on its font. */ input, textarea { font: -moz-list; font-family: mono; font-style: italic; } - :-moz-any(input, textarea)::-moz-placeholder { + [placeholder="foo bar"]::placeholder, + [placeholder="bar foo"]::-moz-placeholder { opacity: 1.0; font: -moz-list; font-family: mono; font-style: italic; color: blue; word-spacing: 5px; text-shadow: 1px 1px 1px red; } </style> <body> <input placeholder='foo bar'> <textarea placeholder='foo bar'></textarea> + <input placeholder='bar foo'> + <textarea placeholder='bar foo'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/ignore-pseudo-class.html +++ b/layout/reftests/css-placeholder/ignore-pseudo-class.html @@ -1,12 +1,15 @@ <!DOCTYPE html> <html> <style> :-moz-placeholder { color: red; } + :placeholder { + color: red; + } </style> <body> <input placeholder='foobar'> <textarea placeholder='foobar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/input/css-display.html +++ b/layout/reftests/css-placeholder/input/css-display.html @@ -1,12 +1,14 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <style> - input::-moz-placeholder { + [bar]::-moz-placeholder, + [foo]::placeholder { display: inline; } </style> <body> <input placeholder='foo'> + <input placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/css-opacity.html +++ b/layout/reftests/css-placeholder/input/css-opacity.html @@ -1,12 +1,14 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <style> - input::-moz-placeholder { + [placeholder="bar"]::-moz-placeholder, + [placeholder="foo"]::placeholder { opacity: 0.1; } </style> <body> <input placeholder='foo'> + <input placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/css-text-align.html +++ b/layout/reftests/css-placeholder/input/css-text-align.html @@ -1,12 +1,14 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <style> - input::-moz-placeholder { + [placeholder="bar"]::-moz-placeholder, + [placeholder="foo"]::placeholder { text-align: center; } </style> <body> <input placeholder='foo'> + <input placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/input-ref.html +++ b/layout/reftests/css-placeholder/input/input-ref.html @@ -1,6 +1,7 @@ <!DOCTYPE html> <html> <body> <input> + <input> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-add.html +++ b/layout/reftests/css-placeholder/input/placeholder-add.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { + document.getElementById('moz').placeholder='bar'; document.getElementById('i').placeholder='foo'; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i'> + <input id='moz'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-blur.html +++ b/layout/reftests/css-placeholder/input/placeholder-blur.html @@ -7,15 +7,24 @@ document.getElementById('i').focus(); } function focusHandler() { document.getElementById('i').blur(); } function blurHandler() { + document.getElementById('moz').focus(); + } + function focusHandlerMoz() + { + document.getElementById('moz').blur(); + } + function blurHandlerMoz() + { document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' placeholder='foo' onfocus='focusHandler();' onblur='blurHandler();'> + <input id='moz' placeholder='bar' onfocus='focusHandlerMoz();' onblur='blurHandlerMoz();'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-button-ref.html +++ b/layout/reftests/css-placeholder/input/placeholder-button-ref.html @@ -1,8 +1,9 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <input type='button'> + <input type='button'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-focus-ref.html +++ b/layout/reftests/css-placeholder/input/placeholder-focus-ref.html @@ -3,15 +3,20 @@ <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').focus(); } function focusHandler() { + document.getElementById('moz').focus(); + } + function focusHandlerMoz() + { document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' class='ref' value='foo' onfocus='focusHandler();'> + <input id='moz' class='ref' value='bar' onfocus='focusHandlerMoz();'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-focus.html +++ b/layout/reftests/css-placeholder/input/placeholder-focus.html @@ -3,15 +3,20 @@ <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').focus(); } function focusHandler() { + document.getElementById('moz').focus(); + } + function focusHandlerMoz() + { document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' placeholder='foo' onfocus='focusHandler();'> + <input id='moz' placeholder='bar' onfocus='focusHandlerMoz();'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-removal.html +++ b/layout/reftests/css-placeholder/input/placeholder-removal.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').removeAttribute('placeholder'); + document.getElementById('moz').removeAttribute('placeholder'); document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' placeholder='foo'> + <input id='moz' placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-simple-ref.html +++ b/layout/reftests/css-placeholder/input/placeholder-simple-ref.html @@ -1,7 +1,8 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <input class='ref' value='foo'> + <input class='ref' value='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-simple.html +++ b/layout/reftests/css-placeholder/input/placeholder-simple.html @@ -1,7 +1,8 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <input placeholder='foo'> + <input placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-type-change-1.html +++ b/layout/reftests/css-placeholder/input/placeholder-type-change-1.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').type = 'text'; + document.getElementById('moz').type = 'text'; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input type='button' id='i' placeholder='foo'> + <input type='button' id='moz' placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-type-change-2.html +++ b/layout/reftests/css-placeholder/input/placeholder-type-change-2.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').type = 'button'; + document.getElementById('moz').type = 'button'; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' placeholder='foo'> + <input id='moz' placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-value-ref.html +++ b/layout/reftests/css-placeholder/input/placeholder-value-ref.html @@ -1,6 +1,7 @@ <!DOCTYPE html> <html> <body> <input value='bar'> + <input value='foo'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-value-reset.html +++ b/layout/reftests/css-placeholder/input/placeholder-value-reset.html @@ -1,17 +1,19 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').value = 'bar'; + document.getElementById('moz').value = 'foo'; document.forms[0].reset(); document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <form> <input id='i' placeholder='foo'> + <input id='moz' placeholder='bar'> </form> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-value-set.html +++ b/layout/reftests/css-placeholder/input/placeholder-value-set.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').value = 'bar'; + document.getElementById('moz').value = 'foo'; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' placeholder='foo'> + <input id='moz' placeholder='bar'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-value-unset.html +++ b/layout/reftests/css-placeholder/input/placeholder-value-unset.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('i').value = ''; + document.getElementById('moz').value = ''; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <input id='i' placeholder='foo' value='bar'> + <input id='moz' placeholder='bar' value='foo'> </body> </html>
--- a/layout/reftests/css-placeholder/input/placeholder-value.html +++ b/layout/reftests/css-placeholder/input/placeholder-value.html @@ -1,7 +1,8 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <input placeholder='foo' value='bar'> + <input placeholder='bar' value='foo'> </body> </html>
--- a/layout/reftests/css-placeholder/input/style-shown.css +++ b/layout/reftests/css-placeholder/input/style-shown.css @@ -1,9 +1,9 @@ -input::-moz-placeholder { +input::placeholder { color: green; opacity: 1.0; } input:placeholder-shown { -moz-appearance: none; color: -moz-FieldText; background-color: red;
--- a/layout/reftests/css-placeholder/input/style.css +++ b/layout/reftests/css-placeholder/input/style.css @@ -1,5 +1,6 @@ -input::-moz-placeholder, +[placeholder="bar"]::-moz-placeholder, +[placeholder="foo"]::placeholder, input.ref { color: green; opacity: 1.0; }
--- a/layout/reftests/css-placeholder/textarea/css-display.html +++ b/layout/reftests/css-placeholder/textarea/css-display.html @@ -1,12 +1,14 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <style> - textarea::-moz-placeholder { + textarea::-moz-placeholder, + textarea::placeholder { display: inline; } </style> <body> <textarea placeholder='foo'></textarea> + <textarea placeholder='bar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/css-opacity.html +++ b/layout/reftests/css-placeholder/textarea/css-opacity.html @@ -1,12 +1,14 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <style> - textarea::-moz-placeholder { + textarea::-moz-placeholder, + textarea::placeholder { opacity: 0.1; } </style> <body> <textarea placeholder='foo'></textarea> + <textarea placeholder='bar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/css-resize-ref.html +++ b/layout/reftests/css-placeholder/textarea/css-resize-ref.html @@ -3,10 +3,11 @@ <link rel='stylesheet' type='text/css' href='style.css'> <style> textarea { resize: none; } </style> <body> <textarea class='ref'>foo</textarea> + <textarea class='ref'>bar</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/css-resize.html +++ b/layout/reftests/css-placeholder/textarea/css-resize.html @@ -1,18 +1,20 @@ <!DOCTYPE html> <html> <!-- - resize shouldn't be allowed on the ::-moz-placeholder. + resize shouldn't be allowed on the ::placeholder. --> <link rel='stylesheet' type='text/css' href='style.css'> <style> textarea { resize: none; } - textarea::-moz-placeholder { + textarea::-moz-placeholder, + textarea::placeholder { resize: both; } </style> <body> <textarea placeholder='foo'></textarea> + <textarea placeholder='bar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-add.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-add.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').placeholder='foo'; + document.getElementById('moz').placeholder='bar'; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t'></textarea> + <textarea id='moz'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-blur.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-blur.html @@ -7,15 +7,24 @@ document.getElementById('t').focus(); } function focusHandler() { document.getElementById('t').blur(); } function blurHandler() { + document.getElementById('moz').focus(); + } + function focusHandlerMoz() + { + document.getElementById('moz').blur(); + } + function blurHandlerMoz() + { document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t' placeholder='foo' onfocus='focusHandler();' onblur='blurHandler();'></textarea> + <textarea id='moz' placeholder='bar' onfocus='focusHandlerMoz();' onblur='blurHandlerMoz();'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-focus-ref.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-focus-ref.html @@ -3,15 +3,20 @@ <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').focus(); } function focusHandler() { + document.getElementById('moz').focus(); + } + function focusHandlerMoz() + { document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t' class='ref' onfocus='focusHandler();'>foo</textarea> + <textarea id='moz' class='ref' onfocus='focusHandlerMoz();'>bar</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-focus.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-focus.html @@ -3,15 +3,20 @@ <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').focus(); } function focusHandler() { + document.getElementById('moz').focus(); + } + function focusHandlerMoz() + { document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t' placeholder='foo' onfocus='focusHandler();'></textarea> + <textarea id='moz' placeholder='bar' onfocus='focusHandlerMoz();'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-removal.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-removal.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').removeAttribute('placeholder'); + document.getElementById('moz').removeAttribute('placeholder'); document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t' placeholder='foo'></textarea> + <textarea id='moz' placeholder='bar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-simple-ref.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-simple-ref.html @@ -1,7 +1,8 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <textarea class='ref'>foo</textarea> + <textarea class='ref'>bar</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-simple.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-simple.html @@ -1,7 +1,8 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <textarea placeholder='foo'></textarea> + <textarea placeholder='bar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-value-ref.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-value-ref.html @@ -1,6 +1,7 @@ <!DOCTYPE html> <html> <body> <textarea>bar</textarea> + <textarea>foo</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-value-reset.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-value-reset.html @@ -1,17 +1,19 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').value = 'bar'; + document.getElementById('moz').value = 'foo'; document.forms[0].reset(); document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <form> <textarea id='t' placeholder='foo'></textarea> + <textarea id='moz' placeholder='bar'></textarea> </form> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-value-set.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-value-set.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').value = 'bar'; + document.getElementById('moz').value = 'foo'; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t' placeholder='foo'></textarea> + <textarea id='moz' placeholder='bar'></textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-value-unset.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-value-unset.html @@ -1,14 +1,16 @@ <!DOCTYPE html> <html class='reftest-wait'> <link rel='stylesheet' type='text/css' href='style.css'> <script> function loadHandler() { document.getElementById('t').value = ''; + document.getElementById('moz').value = ''; document.documentElement.className = ''; } </script> <body onload='loadHandler();'> <textarea id='t' placeholder='foo'>bar</textarea> + <textarea id='moz' placeholder='bar'>foo</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/placeholder-value.html +++ b/layout/reftests/css-placeholder/textarea/placeholder-value.html @@ -1,7 +1,8 @@ <!DOCTYPE html> <html> <link rel='stylesheet' type='text/css' href='style.css'> <body> <textarea placeholder='foo'>bar</textarea> + <textarea placeholder='bar'>foo</textarea> </body> </html>
--- a/layout/reftests/css-placeholder/textarea/style-shown.css +++ b/layout/reftests/css-placeholder/textarea/style-shown.css @@ -1,9 +1,9 @@ -textarea::-moz-placeholder { +textarea::placeholder { color: green; opacity: 1.0; } textarea:placeholder-shown { -moz-appearance: none; color: -moz-FieldText; background-color: red;
--- a/layout/reftests/css-placeholder/textarea/style.css +++ b/layout/reftests/css-placeholder/textarea/style.css @@ -1,5 +1,6 @@ textarea::-moz-placeholder, +textarea::placeholder, textarea.ref { color: green; opacity: 1.0; }
--- a/layout/reftests/css-placeholder/textarea/textarea-ref.html +++ b/layout/reftests/css-placeholder/textarea/textarea-ref.html @@ -1,6 +1,7 @@ <!DOCTYPE html> <html> <body> <textarea></textarea> + <textarea></textarea> </body> </html>
--- a/layout/reftests/forms/input/number/number-placeholder-ref.html +++ b/layout/reftests/forms/input/number/number-placeholder-ref.html @@ -1,19 +1,17 @@ <!DOCTYPE html> <html> <head> <style> input { width: 15em; -} - -input[type=text]::-moz-placeholder { color: blue; + font-style: italic; } </style> </head> <body> - <input type="text" placeholder="placeholder"> + <input type="number" value="123"> </body> </html>
--- a/layout/reftests/forms/input/number/number-placeholder.html +++ b/layout/reftests/forms/input/number/number-placeholder.html @@ -1,20 +1,24 @@ <!DOCTYPE html> <html> <head> <style> input { width: 15em; - -moz-appearance: textfield; +} + +input[type=number]::placeholder { + color: blue; + opacity: 1.0; } input[type=number]::-moz-placeholder { - color: blue; + font-style: italic; + opacity: 1.0; } - </style> </head> <body> - <input type="number" placeholder="placeholder"> + <input type="number" placeholder="123"> </body> </html>
--- a/layout/reftests/forms/placeholder/placeholder-style.css +++ b/layout/reftests/forms/placeholder/placeholder-style.css @@ -1,10 +1,10 @@ textarea.placeholder, input.placeholder, -input::-moz-placeholder, -textarea::-moz-placeholder { +input::placeholder, +textarea::placeholder { /* Unfortunately, we can't style the <input> or <textarea> text so we can't set it to opacity: 0.54; which means we have to revert the style to something that doesn't include an opacity. */ color: GrayText; opacity: 1.0; }
--- a/layout/reftests/text-overflow/anonymous-block.html +++ b/layout/reftests/text-overflow/anonymous-block.html @@ -43,21 +43,21 @@ span { } .t1 {width:6em;} .t2 {width:2em;} .t3 {width:25em;} .t4 {width:17.5em;} input { font-family:DejaVuSansMono; } -input::-moz-placeholder { +input::placeholder { overflow:hidden; text-overflow:ellipsis; } -input.t5::-moz-placeholder { +input.t5::placeholder { text-overflow:"X"; } </style> </head><body>
--- a/layout/style/crashtests/989965-1.html +++ b/layout/style/crashtests/989965-1.html @@ -1,9 +1,9 @@ <!DOCTYPE html> <body> <style> -::-moz-placeholder { color: red; } -::-moz-placeholder:focus { color: green; } +::placeholder { color: red; } +::placeholder:focus { color: green; } </style> <script> -window.getComputedStyle(document.body, "::-moz-placeholder").color; +window.getComputedStyle(document.body, "::placeholder").color; </script>
--- a/layout/style/nsCSSParser.cpp +++ b/layout/style/nsCSSParser.cpp @@ -5956,19 +5956,20 @@ CSSParserImpl::ParsePseudoSelector(int32 if (nsCSSAnonBoxes::IsNonElement(pseudo)) { // Non-element anonymous boxes should not match any rule. REPORT_UNEXPECTED_TOKEN(PEPseudoSelUnknown); UngetToken(); return eSelectorParsingStatus_Error; } - // We currently allow :-moz-placeholder and ::-moz-placeholder. We have to - // be a bit stricter regarding the pseudo-element parsing rules. - if (pseudoElementType == CSSPseudoElementType::mozPlaceholder && + // We currently allow :-moz-placeholder and ::-moz-placeholder and + // ::placeholder. We have to be a bit stricter regarding the + // pseudo-element parsing rules. + if (pseudoElementType == CSSPseudoElementType::placeholder && pseudoClassType == CSSPseudoClassType::mozPlaceholder) { if (parsingPseudoElement) { pseudoClassType = CSSPseudoClassType::NotPseudo; } else { pseudoElementType = CSSPseudoElementType::NotPseudo; } }
--- a/layout/style/nsCSSProps.h +++ b/layout/style/nsCSSProps.h @@ -199,17 +199,17 @@ static_assert((CSS_PROPERTY_PARSE_PROPER #define CSS_PROPERTY_HASHLESS_COLOR_QUIRK (1<<15) // Does this property support the unitless length quirk in quirks mode? #define CSS_PROPERTY_UNITLESS_LENGTH_QUIRK (1<<16) // Is this property (which must be a shorthand) really an alias? #define CSS_PROPERTY_IS_ALIAS (1<<17) -// Does the property apply to ::-moz-placeholder? +// Does the property apply to ::placeholder? #define CSS_PROPERTY_APPLIES_TO_PLACEHOLDER (1<<18) // This property is allowed in an @page rule. #define CSS_PROPERTY_APPLIES_TO_PAGE_RULE (1<<19) // This property's getComputedStyle implementation requires layout to be // flushed. #define CSS_PROPERTY_GETCS_NEEDS_LAYOUT_FLUSH (1<<20)
--- a/layout/style/nsCSSPseudoElementList.h +++ b/layout/style/nsCSSPseudoElementList.h @@ -75,11 +75,13 @@ CSS_PSEUDO_ELEMENT(mozRangeTrack, ":-moz CSS_PSEUDO_ELEMENT(mozRangeProgress, ":-moz-range-progress", CSS_PSEUDO_ELEMENT_SUPPORTS_USER_ACTION_STATE) CSS_PSEUDO_ELEMENT(mozRangeThumb, ":-moz-range-thumb", CSS_PSEUDO_ELEMENT_SUPPORTS_USER_ACTION_STATE) CSS_PSEUDO_ELEMENT(mozMeterBar, ":-moz-meter-bar", CSS_PSEUDO_ELEMENT_SUPPORTS_USER_ACTION_STATE) CSS_PSEUDO_ELEMENT(mozPlaceholder, ":-moz-placeholder", CSS_PSEUDO_ELEMENT_SUPPORTS_USER_ACTION_STATE) +CSS_PSEUDO_ELEMENT(placeholder, ":placeholder", + CSS_PSEUDO_ELEMENT_SUPPORTS_USER_ACTION_STATE) CSS_PSEUDO_ELEMENT(mozColorSwatch, ":-moz-color-swatch", CSS_PSEUDO_ELEMENT_SUPPORTS_STYLE_ATTRIBUTE | CSS_PSEUDO_ELEMENT_SUPPORTS_USER_ACTION_STATE)
--- a/layout/style/nsCSSPseudoElements.cpp +++ b/layout/style/nsCSSPseudoElements.cpp @@ -78,16 +78,20 @@ nsCSSPseudoElements::IsCSS2PseudoElement /* static */ CSSPseudoElementType nsCSSPseudoElements::GetPseudoType(nsIAtom *aAtom, EnabledState aEnabledState) { for (CSSPseudoElementTypeBase i = 0; i < ArrayLength(CSSPseudoElements_info); ++i) { if (*CSSPseudoElements_info[i].mAtom == aAtom) { auto type = static_cast<Type>(i); + // ::moz-placeholder is an alias for ::placeholder + if (type == CSSPseudoElementType::mozPlaceholder) { + type = CSSPseudoElementType::placeholder; + } return IsEnabled(type, aEnabledState) ? type : Type::NotPseudo; } } if (nsCSSAnonBoxes::IsAnonBox(aAtom)) { #ifdef MOZ_XUL if (nsCSSAnonBoxes::IsTreePseudoElement(aAtom)) { return Type::XULTree;
--- a/layout/style/nsCSSRuleProcessor.cpp +++ b/layout/style/nsCSSRuleProcessor.cpp @@ -2529,19 +2529,19 @@ void ContentEnumFunc(const RuleValue& va nsCSSSelector* selector = aSelector; if (selector->IsPseudoElement()) { PseudoElementRuleProcessorData* pdata = static_cast<PseudoElementRuleProcessorData*>(data); if (!pdata->mPseudoElement && selector->mPseudoClassList) { // We can get here when calling getComputedStyle(aElt, aPseudo) if: // // * aPseudo is a pseudo-element that supports a user action - // pseudo-class, like "::-moz-placeholder"; + // pseudo-class, like "::placeholder"; // * there is a style rule that uses a pseudo-class on this - // pseudo-element in the document, like ::-moz-placeholder:hover; and + // pseudo-element in the document, like ::placeholder:hover; and // * aElt does not have such a pseudo-element. // // We know that the selector can't match, since there is no element for // the user action pseudo-class to match against. return; } if (!StateSelectorMatches(pdata->mPseudoElement, aSelector, nodeContext, data->mTreeMatchContext,
--- a/layout/style/nsRuleNode.cpp +++ b/layout/style/nsRuleNode.cpp @@ -2214,17 +2214,17 @@ GetPseudoRestriction(nsStyleContext *aCo // This needs to match nsStyleSet::WalkRestrictionRule. uint32_t pseudoRestriction = 0; nsIAtom *pseudoType = aContext->GetPseudo(); if (pseudoType) { if (pseudoType == nsCSSPseudoElements::firstLetter) { pseudoRestriction = CSS_PROPERTY_APPLIES_TO_FIRST_LETTER; } else if (pseudoType == nsCSSPseudoElements::firstLine) { pseudoRestriction = CSS_PROPERTY_APPLIES_TO_FIRST_LINE; - } else if (pseudoType == nsCSSPseudoElements::mozPlaceholder) { + } else if (pseudoType == nsCSSPseudoElements::placeholder) { pseudoRestriction = CSS_PROPERTY_APPLIES_TO_PLACEHOLDER; } } return pseudoRestriction; } static void UnsetPropertiesWithoutFlags(const nsStyleStructID aSID,
--- a/layout/style/nsStyleSet.cpp +++ b/layout/style/nsStyleSet.cpp @@ -1800,17 +1800,17 @@ nsStyleSet::WalkRestrictionRule(CSSPseud nsRuleWalker* aRuleWalker) { // This needs to match GetPseudoRestriction in nsRuleNode.cpp. aRuleWalker->SetLevel(SheetType::Agent, false, false); if (aPseudoType == CSSPseudoElementType::firstLetter) aRuleWalker->Forward(mFirstLetterRule); else if (aPseudoType == CSSPseudoElementType::firstLine) aRuleWalker->Forward(mFirstLineRule); - else if (aPseudoType == CSSPseudoElementType::mozPlaceholder) + else if (aPseudoType == CSSPseudoElementType::placeholder) aRuleWalker->Forward(mPlaceholderRule); } void nsStyleSet::WalkDisableTextZoomRule(Element* aElement, nsRuleWalker* aRuleWalker) { aRuleWalker->SetLevel(SheetType::Agent, false, false); if (aElement->IsSVGElement(nsGkAtoms::text))
--- a/layout/style/res/forms.css +++ b/layout/style/res/forms.css @@ -97,17 +97,17 @@ input { -moz-binding: url("chrome://global/content/platformHTMLBindings.xml#inputFields"); text-indent: 0; -moz-user-select: text; text-shadow: none; overflow-clip-box: content-box; } input > .anonymous-div, -input::-moz-placeholder { +input::placeholder { word-wrap: normal !important; /* Make the line-height equal to the available height */ line-height: -moz-block-height; } @-moz-document url-prefix(chrome://) { input.uri-element-right-align:-moz-locale-dir(rtl) { direction: ltr !important; @@ -151,47 +151,47 @@ textarea { } textarea > scrollbar { cursor: default; } textarea > .anonymous-div, input > .anonymous-div, -input::-moz-placeholder, -textarea::-moz-placeholder { +input::placeholder, +textarea::placeholder { overflow: auto; border: 0px !important; padding: inherit !important; margin: 0px; text-decoration: inherit; text-decoration-color: inherit; text-decoration-style: inherit; display: inline-block; ime-mode: inherit; resize: inherit; -moz-control-character-visibility: visible; overflow-clip-box: inherit; } input > .anonymous-div, -input::-moz-placeholder { +input::placeholder { white-space: pre; } input > .anonymous-div.wrap { white-space: pre-wrap; } textarea > .anonymous-div.inherit-overflow, input > .anonymous-div.inherit-overflow { overflow: inherit; } -input::-moz-placeholder, -textarea::-moz-placeholder { +input::placeholder, +textarea::placeholder { /* * Changing display to inline can leads to broken behaviour and will assert. */ display: inline-block !important; /* * Changing resize would display a broken behaviour and will assert. */ @@ -203,17 +203,17 @@ textarea::-moz-placeholder { * The placeholder should be ignored by pointer otherwise, we might have some * unexpected behavior like the resize handle not being selectable. */ pointer-events: none !important; opacity: 0.54; } -textarea::-moz-placeholder { +textarea::placeholder { white-space: pre-wrap !important; } input:-moz-read-write, textarea:-moz-read-write { -moz-user-modify: read-write !important; } @@ -770,18 +770,18 @@ input[type="submit"]:disabled { * <select>: * inherit into the ':-moz-display-comboboxcontrol-frame' pseudo-element and * the <optgroup>'s ':before' pseudo-element, which is where the label of * the <optgroup> gets displayed. The <option>s don't use anonymous boxes, * so they need no special rules. */ textarea > .anonymous-div, input > .anonymous-div, -input::-moz-placeholder, -textarea::-moz-placeholder, +input::placeholder, +textarea::placeholder, *|*::-moz-button-content, *|*::-moz-display-comboboxcontrol-frame, optgroup:before { unicode-bidi: inherit; text-overflow: inherit; } /**
--- a/layout/style/test/test_pseudoelement_state.html +++ b/layout/style/test/test_pseudoelement_state.html @@ -70,17 +70,17 @@ var gTests = [ { markup: '<input type="range" value="50" min="0" max="100">', pseudoelement: '::-moz-range-thumb', common_style: 'body input { -moz-appearance: none; } input::-moz-range-thumb { background: black; }', hover_test_style: 'body input::-moz-range-thumb:hover { background: green; }', hover_reference_style: 'body input::-moz-range-thumb { background: green; }', active_test_style: 'body input::-moz-range-thumb:active { background: lime; }', active_reference_style: 'body input::-moz-range-thumb { background: lime; }' }, - // ::-moz-placeholder can't be tested, since the UA style sheet sets it to + // ::placeholder can't be tested, since the UA style sheet sets it to // be pointer-events:none. ]; function countPixelDifferences(aCanvas1, aCanvas2) { var ctx1 = aCanvas1.getContext("2d"); var ctx2 = aCanvas2.getContext("2d"); var data1 = ctx1.getImageData(0, 0, aCanvas1.width, aCanvas1.height); var data2 = ctx2.getImageData(0, 0, aCanvas2.width, aCanvas2.height);
--- a/layout/style/test/test_selectors.html +++ b/layout/style/test/test_selectors.html @@ -900,16 +900,18 @@ function run() { test_selector_in_html(".a > .b ~ .nomatch", '<div class="a"><div class="b"></div></div><div class="nomatch"></div>', emptyset, classset(["a", "b", "nomatch"])); // Test serialization of pseudo-elements. should_serialize_to("p::first-letter", "p::first-letter"); should_serialize_to("p:first-letter", "p::first-letter"); should_serialize_to("div>p:first-letter", "div > p::first-letter"); should_serialize_to("span +div:first-line", "span + div::first-line"); + should_serialize_to("input::placeholder", "input::placeholder"); + should_serialize_to("input:placeholder-shown", "input:placeholder-shown"); // Test serialization of non CSS2 pseudo-element. should_serialize_to("input::-moz-placeholder", "input::-moz-placeholder"); // Test default namespaces, including inside :not(). var html_default_ns = "@namespace url(http://www.w3.org/1999/xhtml);"; var html_ns = "@namespace html url(http://www.w3.org/1999/xhtml);"; var xul_default_ns = "@namespace url(http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul);";
--- a/media/libpng/CHANGES +++ b/media/libpng/CHANGES @@ -5670,16 +5670,44 @@ Version 1.6.24rc02 [August 1, 2016] Version 1.6.24rc03 [August 2, 2016] Conditionally compile ARM_NEON headers in pngpriv.h Updated contrib/intel/intel_sse.patch Version 1.6.24[August 4, 2016] No changes. +Version 1.6.25beta01 [August 12, 2016] + Reject oversized iCCP profile immediately. + Cleaned up PNG_DEBUG compile of pngtest.c. + Conditionally compile png_inflate(). + +Version 1.6.25beta02 [August 18, 2016] + Don't install pngcp; it conflicts with pngcp in the pngtools package. + Minor editing of INSTALL, (whitespace, added copyright line) + +Version 1.6.25rc01 [August 24, 2016] + No changes. + +Version 1.6.25rc02 [August 29, 2016] + Added MIPS support (Mandar Sahastrabuddhe <Mandar.Sahastrabuddhe@imgtec.com>). + Only the UP filter is currently implemented. + +Version 1.6.25rc03 [August 29, 2016] + Rebased contrib/intel/intel_sse.patch after the MIPS implementation. + +Version 1.6.25rc04 [August 30, 2016] + Added MIPS support for SUB, AVG, and PAETH filters (Mandar Sahastrabuddhe). + +Version 1.6.25rc05 [August 30, 2016] + Rebased contrib/intel/intel_sse.patch after the MIPS implementation update.. + +Version 1.6.25 [September 1, 2016] + No changes. + Send comments/corrections/commendations to png-mng-implement at lists.sf.net (subscription required; visit https://lists.sourceforge.net/lists/listinfo/png-mng-implement to subscribe) or to glennrp at users.sourceforge.net Glenn R-P #endif
--- a/media/libpng/LICENSE +++ b/media/libpng/LICENSE @@ -3,31 +3,35 @@ This copy of the libpng notices is provi any discrepancy between this copy and the notices in the file png.h that is included in the libpng distribution, the latter shall prevail. COPYRIGHT NOTICE, DISCLAIMER, and LICENSE: If you modify libpng you may insert additional notices immediately following this sentence. +pnglibconf.h and moz.build are distributed under the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can +obtain one at http://mozilla.org/MPL/2.0/. + This modified version of libpng code adds animated PNG support and is released under the libpng license described below. The modifications are Copyright (c) 2006-2007 Andrew Smith, Copyright (c) 2008-2016 Max Stepin, and are delimited by "#ifdef PNG_APNG_SUPPORTED / #endif" directives surrounding them in the modified libpng source files. This modified version of libpng code adds Intel-SSE support and is released under the libpng license described below. The modifications are Copyright (c) 2016 Google, Inc., and consist of the source files in the "sse2" subdirectory and added code in pngpriv.h delimited by #ifndef PNG_INTEL_SSE_OPT / #endif directives. This code is released under the libpng license. -libpng versions 1.0.7, July 1, 2000 through 1.6.24, August 4, 2016 are +libpng versions 1.0.7, July 1, 2000 through 1.6.25, September 1, 2016 are Copyright (c) 2000-2002, 2004, 2006-2016 Glenn Randers-Pehrson, are derived from libpng-1.0.6, and are distributed according to the same disclaimer and license as libpng-1.0.6 with the following individuals added to the list of Contributing Authors: Simon-Pierre Cadieux Eric S. Raymond Mans Rullgard @@ -134,9 +138,9 @@ The Copyright owner believes that the Ex Number (ECCN) for libpng is EAR99, which means not subject to export controls or International Traffic in Arms Regulations (ITAR) because it is open source, publicly available software, that does not contain any encryption software. See the EAR, paragraphs 734.3(b)(3) and 734.7(b). Glenn Randers-Pehrson glennrp at users.sourceforge.net -August 4, 2016 +September 1, 2016
--- a/media/libpng/MOZCHANGES +++ b/media/libpng/MOZCHANGES @@ -1,11 +1,13 @@ Changes made to pristine libpng source by mozilla.org developers. +2016/09/01 -- Synced with libpng-1.6.25 (bug #1299590). + 2016/08/11 -- Enabled SSE2 support (bug #1276127). 2016/08/06 -- Synced with libpng-1.6.24 (bug #1291986). 2016/06/09 -- Synced with libpng-1.6.23 (bug #1275901). 2016/01/16 -- Synced with libpng-1.6.21 (bug #1230757).
--- a/media/libpng/README +++ b/media/libpng/README @@ -1,9 +1,9 @@ -README for libpng version 1.6.24 - August 4, 2016 (shared library 16.0) +README for libpng version 1.6.25 - September 1, 2016 (shared library 16.0) See the note about version numbers near the top of png.h See INSTALL for instructions on how to install libpng. Libpng comes in several distribution formats. Get libpng-*.tar.gz or libpng-*.tar.xz or if you want UNIX-style line endings in the text files, or lpng*.7z or lpng*.zip if you want DOS-style line endings. @@ -175,25 +175,28 @@ Files in this distribution: pngtest.png => Library test sample image pngtrans.c => Common data transformation functions pngwio.c => Lowest-level write I/O functions pngwrite.c => High-level write functions pngwtran.c => Write data transformations pngwutil.c => Write utility functions arm => Contains optimized code for the ARM platform contrib => Contributions + arm-neon => Optimized code for ARM-NEON platform examples => Example programs gregbook => source code for PNG reading and writing, from Greg Roelofs' "PNG: The Definitive Guide", O'Reilly, 1999 + intel => Optimized code for INTEL-SSE2 platform libtests => Test programs pngminim => Minimal decoder, encoder, and progressive decoder programs demonstrating use of pngusr.dfa pngminus => Simple pnm2png and png2pnm programs pngsuite => Test images + testpngs tools => Various tools visupng => Contains a MSVC workspace for VisualPng projects => Contains project files and workspaces for building a DLL owatcom => Contains a WATCOM project for building libpng visualc71 => Contains a Microsoft Visual C++ (MSVC) workspace for building libpng and zlib vstudio => Contains a Microsoft Visual C++ (MSVC)
--- a/media/libpng/apng.patch +++ b/media/libpng/apng.patch @@ -9,17 +9,17 @@ Index: LICENSE +This modified version of libpng code adds animated PNG support and is +released under the libpng license described below. The modifications are +Copyright (c) 2006-2007 Andrew Smith, Copyright (c) 2008-2016 Max Stepin, +and are delimited by "#ifdef PNG_APNG_SUPPORTED / #endif" directives +surrounding them in the modified libpng source files. + This code is released under the libpng license. - libpng versions 1.0.7, July 1, 2000 through 1.6.24, August 4, 2016 are + libpng versions 1.0.7, July 1, 2000 through 1.6.25, September 1, 2016 are Index: pngread.c =================================================================== --- pngread.c +++ pngread.c @@ -161,6 +161,9 @@ else if (chunk_name == png_IDAT) { @@ -294,29 +294,29 @@ Index: pngget.c Index: png.c =================================================================== --- png.c +++ png.c @@ -775,17 +775,21 @@ #else # ifdef __STDC__ return PNG_STRING_NEWLINE \ -- "libpng version 1.6.24 - August 4, 2016" PNG_STRING_NEWLINE \ -+ "libpng version 1.6.24+apng - August 4, 2016" PNG_STRING_NEWLINE \ +- "libpng version 1.6.25 - September 1, 2016" PNG_STRING_NEWLINE \ ++ "libpng version 1.6.25+apng - September 1, 2016" PNG_STRING_NEWLINE \ "Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson" \ PNG_STRING_NEWLINE \ "Copyright (c) 1996-1997 Andreas Dilger" PNG_STRING_NEWLINE \ "Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc." \ - PNG_STRING_NEWLINE; + PNG_STRING_NEWLINE \ + "Portions Copyright (c) 2006-2007 Andrew Smith" PNG_STRING_NEWLINE \ + "Portions Copyright (c) 2008-2016 Max Stepin" PNG_STRING_NEWLINE ; # else -- return "libpng version 1.6.24 - August 4, 2016\ -+ return "libpng version 1.6.24+apng - August 4, 2016\ +- return "libpng version 1.6.25 - September 1, 2016\ ++ return "libpng version 1.6.25+apng - September 1, 2016\ Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson\ Copyright (c) 1996-1997 Andreas Dilger\ - Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc."; + Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.\ + Portions Copyright (c) 2006-2007 Andrew Smith\ + Portions Copyright (c) 2008-2016 Max Stepin"; # endif #endif @@ -333,40 +333,40 @@ Index: png.h + * released under the libpng license described below. The modifications are + * Copyright (c) 2006-2007 Andrew Smith, Copyright (c) 2008-2016 Max Stepin, + * and are delimited by "#ifdef PNG_APNG_SUPPORTED / #endif" directives + * surrounding them in the modified libpng source files. + * * This code is released under the libpng license. * * Some files in the "contrib" directory and some configure-generated -@@ -313,8 +319,9 @@ +@@ -314,8 +320,9 @@ */ /* Version information for png.h - this should match the version in png.c */ --#define PNG_LIBPNG_VER_STRING "1.6.24" --#define PNG_HEADER_VERSION_STRING " libpng version 1.6.24 - August 4, 2016\n" -+#define PNG_LIBPNG_VER_STRING "1.6.24+apng" +-#define PNG_LIBPNG_VER_STRING "1.6.25" +-#define PNG_HEADER_VERSION_STRING " libpng version 1.6.25 - September 1, 2016\n" ++#define PNG_LIBPNG_VER_STRING "1.6.25+apng" +#define PNG_HEADER_VERSION_STRING \ -+ " libpng version 1.6.24+apng - August 4, 2016\n" ++ " libpng version 1.6.25+apng - September 1, 2016\n" #define PNG_LIBPNG_VER_SONUM 16 #define PNG_LIBPNG_VER_DLLNUM 16 -@@ -365,6 +372,10 @@ +@@ -366,6 +373,10 @@ # include "pnglibconf.h" #endif +#define PNG_APNG_SUPPORTED +#define PNG_READ_APNG_SUPPORTED +#define PNG_WRITE_APNG_SUPPORTED + #ifndef PNG_VERSION_INFO_ONLY /* Machine specific configuration. */ # include "pngconf.h" -@@ -460,6 +471,17 @@ +@@ -461,6 +472,17 @@ * See pngconf.h for base types that vary by machine/system */ +#ifdef PNG_APNG_SUPPORTED +/* dispose_op flags from inside fcTL */ +#define PNG_DISPOSE_OP_NONE 0x00 +#define PNG_DISPOSE_OP_BACKGROUND 0x01 +#define PNG_DISPOSE_OP_PREVIOUS 0x02 @@ -374,39 +374,39 @@ Index: png.h +/* blend_op flags from inside fcTL */ +#define PNG_BLEND_OP_SOURCE 0x00 +#define PNG_BLEND_OP_OVER 0x01 +#endif /* APNG */ + /* This triggers a compiler error in png.c, if png.c and png.h * do not agree upon the version number. */ -@@ -780,6 +802,10 @@ +@@ -781,6 +803,10 @@ #define PNG_INFO_sPLT 0x2000U /* ESR, 1.0.6 */ #define PNG_INFO_sCAL 0x4000U /* ESR, 1.0.6 */ #define PNG_INFO_IDAT 0x8000U /* ESR, 1.0.6 */ +#ifdef PNG_APNG_SUPPORTED +#define PNG_INFO_acTL 0x10000U +#define PNG_INFO_fcTL 0x20000U +#endif /* This is used for the transformation routines, as some of them * change these values for the row. It also should enable using -@@ -817,6 +843,10 @@ +@@ -818,6 +844,10 @@ #ifdef PNG_PROGRESSIVE_READ_SUPPORTED typedef PNG_CALLBACK(void, *png_progressive_info_ptr, (png_structp, png_infop)); typedef PNG_CALLBACK(void, *png_progressive_end_ptr, (png_structp, png_infop)); +#ifdef PNG_APNG_SUPPORTED +typedef PNG_CALLBACK(void, *png_progressive_frame_ptr, (png_structp, + png_uint_32)); +#endif /* The following callback receives png_uint_32 row_number, int pass for the * png_bytep data of the row. When transforming an interlaced image the -@@ -3242,6 +3272,75 @@ +@@ -3246,6 +3276,75 @@ * END OF HARDWARE AND SOFTWARE OPTIONS ******************************************************************************/ +#ifdef PNG_APNG_SUPPORTED +PNG_EXPORT(246, png_uint_32, png_get_acTL, (png_structp png_ptr, + png_infop info_ptr, png_uint_32 *num_frames, png_uint_32 *num_plays)); + +PNG_EXPORT(247, png_uint_32, png_set_acTL, (png_structp png_ptr, @@ -472,61 +472,61 @@ Index: png.h +PNG_EXPORT(265, void, png_write_frame_tail, (png_structp png_ptr, + png_infop info_ptr)); +#endif /* WRITE_APNG */ +#endif /* APNG */ + /* Maintainer: Put new public prototypes here ^, in libpng.3, in project * defs, and in scripts/symbols.def. */ -@@ -3250,7 +3349,11 @@ +@@ -3254,7 +3353,11 @@ * one to use is one more than this.) */ #ifdef PNG_EXPORT_LAST_ORDINAL +#ifdef PNG_APNG_SUPPORTED + PNG_EXPORT_LAST_ORDINAL(265); +#else PNG_EXPORT_LAST_ORDINAL(245); +#endif /* APNG */ #endif #ifdef __cplusplus Index: pngpriv.h =================================================================== --- pngpriv.h +++ pngpriv.h -@@ -537,6 +537,10 @@ +@@ -566,6 +566,10 @@ #define PNG_HAVE_CHUNK_AFTER_IDAT 0x2000 /* Have another chunk after IDAT */ /* 0x4000 (unused) */ #define PNG_IS_READ_STRUCT 0x8000 /* Else is a write struct */ +#ifdef PNG_APNG_SUPPORTED +#define PNG_HAVE_acTL 0x10000 +#define PNG_HAVE_fcTL 0x20000 +#endif /* Flags for the transformations the PNG library does on the image data */ #define PNG_BGR 0x0001 -@@ -754,6 +758,16 @@ +@@ -783,6 +787,16 @@ #define png_tRNS PNG_U32(116, 82, 78, 83) #define png_zTXt PNG_U32(122, 84, 88, 116) +#ifdef PNG_APNG_SUPPORTED +#define png_acTL PNG_U32( 97, 99, 84, 76) +#define png_fcTL PNG_U32(102, 99, 84, 76) +#define png_fdAT PNG_U32(102, 100, 65, 84) + +/* For png_struct.apng_flags: */ +#define PNG_FIRST_FRAME_HIDDEN 0x0001 +#define PNG_APNG_APP 0x0002 +#endif + /* The following will work on (signed char*) strings, whereas the get_uint_32 * macro will fail on top-bit-set values because of the sign extension. */ -@@ -1443,6 +1457,49 @@ +@@ -1489,6 +1503,49 @@ #endif /* PROGRESSIVE_READ */ +#ifdef PNG_APNG_SUPPORTED +PNG_INTERNAL_FUNCTION(void,png_ensure_fcTL_is_valid,(png_structp png_ptr, + png_uint_32 width, png_uint_32 height, + png_uint_32 x_offset, png_uint_32 y_offset, + png_uint_16 delay_num, png_uint_16 delay_den,
--- a/media/libpng/arm/filter_neon.S +++ b/media/libpng/arm/filter_neon.S @@ -5,16 +5,22 @@ * Written by Mans Rullgard, 2011. * Last changed in libpng 1.6.16 [December 22, 2014] * * This code is released under the libpng license. * For conditions of distribution and use, see the disclaimer * and license in png.h */ +/* These are required because Mozilla's moz.build system doesn't pass + * -DDefined macros to the assembler. + */ +#define PNG_READ_SUPPORTED +#define MOZ_PNG_HAVE_ARM_NEON + /* This is required to get the symbol renames, which are #defines, and the * definitions (or not) of PNG_ARM_NEON_OPT and PNG_ARM_NEON_IMPLEMENTATION. */ #define PNG_VERSION_INFO_ONLY #include "../pngpriv.h" #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits /* mark stack as non-executable */
--- a/media/libpng/libpng-manual.txt +++ b/media/libpng/libpng-manual.txt @@ -1,22 +1,22 @@ libpng-manual.txt - A description on how to use and modify libpng - libpng version 1.6.24 - August 4, 2016 + libpng version 1.6.25 - September 1, 2016 Updated and distributed by Glenn Randers-Pehrson <glennrp at users.sourceforge.net> Copyright (c) 1998-2016 Glenn Randers-Pehrson This document is released under the libpng license. For conditions of distribution and use, see the disclaimer and license in png.h Based on: - libpng versions 0.97, January 1998, through 1.6.24 - August 4, 2016 + libpng versions 0.97, January 1998, through 1.6.25 - September 1, 2016 Updated and distributed by Glenn Randers-Pehrson Copyright (c) 1998-2016 Glenn Randers-Pehrson libpng 1.0 beta 6 - version 0.96 - May 28, 1997 Updated and distributed by Andreas Dilger Copyright (c) 1996, 1997 Andreas Dilger libpng 1.0 beta 2 - version 0.88 - January 26, 1996 @@ -5345,17 +5345,17 @@ Lines do not exceed 80 characters. Other rules can be inferred by inspecting the libpng source. XVI. Y2K Compliance in libpng Since the PNG Development group is an ad-hoc body, we can't make an official declaration. This is your unofficial assurance that libpng from version 0.71 and -upward through 1.6.24 are Y2K compliant. It is my belief that earlier +upward through 1.6.25 are Y2K compliant. It is my belief that earlier versions were also Y2K compliant. Libpng only has two year fields. One is a 2-byte unsigned integer that will hold years up to 65535. The other, which is deprecated, holds the date in text format, and will hold years up to 9999. The integer is "png_uint_16 year" in png_time_struct.
--- a/media/libpng/moz.build +++ b/media/libpng/moz.build @@ -23,28 +23,28 @@ UNIFIED_SOURCES += [ 'pngset.c', 'pngtrans.c', 'pngwio.c', 'pngwrite.c', 'pngwutil.c' ] if CONFIG['CPU_ARCH'] == 'arm': - DEFINES['MOZ_PNG_HAVE_ARM_NEON'] = True + DEFINES['MOZ_PNG_USE_ARM_NEON'] = True UNIFIED_SOURCES += [ 'arm/arm_init.c', 'arm/filter_neon_intrinsics.c' ] SOURCES += [ 'arm/filter_neon.S' ] if CONFIG['INTEL_ARCHITECTURE']: - DEFINES['PNG_INTEL_SSE'] = True + DEFINES['MOZ_PNG_USE_INTEL_SSE'] = True UNIFIED_SOURCES += [ 'sse2/filter_sse2_intrinsics.c', 'sse2/intel_init.c' ] Library('mozpng') FINAL_LIBRARY = 'gkmedias'
--- a/media/libpng/png.c +++ b/media/libpng/png.c @@ -1,25 +1,25 @@ /* png.c - location for general purpose libpng functions * - * Last changed in libpng 1.6.24 [August 4, 2016] - * Copyright (c) 1998-2002,2004,2006-2015 Glenn Randers-Pehrson + * Last changed in libpng 1.6.25 [September 1, 2016] + * Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) * * This code is released under the libpng license. * For conditions of distribution and use, see the disclaimer * and license in png.h */ #include "pngpriv.h" /* Generate a compiler error if there is an old png.h in the search path. */ -typedef png_libpng_version_1_6_24 Your_png_h_is_not_version_1_6_24; +typedef png_libpng_version_1_6_25 Your_png_h_is_not_version_1_6_25; /* Tells libpng that we have already handled the first "num_bytes" bytes * of the PNG file signature. If the PNG data is embedded into another * stream we can set num_bytes = 8 so that libpng will not attempt to read * or write any of the magic bytes before it starts on the IHDR. */ #ifdef PNG_READ_SUPPORTED @@ -770,26 +770,26 @@ png_const_charp PNGAPI png_get_copyright(png_const_structrp png_ptr) { PNG_UNUSED(png_ptr) /* Silence compiler warning about unused png_ptr */ #ifdef PNG_STRING_COPYRIGHT return PNG_STRING_COPYRIGHT #else # ifdef __STDC__ return PNG_STRING_NEWLINE \ - "libpng version 1.6.24+apng - August 4, 2016" PNG_STRING_NEWLINE \ + "libpng version 1.6.25+apng - September 1, 2016" PNG_STRING_NEWLINE \ "Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson" \ PNG_STRING_NEWLINE \ "Copyright (c) 1996-1997 Andreas Dilger" PNG_STRING_NEWLINE \ "Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc." \ PNG_STRING_NEWLINE \ "Portions Copyright (c) 2006-2007 Andrew Smith" PNG_STRING_NEWLINE \ "Portions Copyright (c) 2008-2016 Max Stepin" PNG_STRING_NEWLINE ; # else - return "libpng version 1.6.24+apng - August 4, 2016\ + return "libpng version 1.6.25+apng - September 1, 2016\ Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson\ Copyright (c) 1996-1997 Andreas Dilger\ Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc.\ Portions Copyright (c) 2006-2007 Andrew Smith\ Portions Copyright (c) 2008-2016 Max Stepin"; # endif #endif } @@ -1930,27 +1930,61 @@ png_colorspace_set_sRGB(png_const_struct /* Encoded value of D50 as an ICC XYZNumber. From the ICC 2010 spec the value * is XYZ(0.9642,1.0,0.8249), which scales to: * * (63189.8112, 65536, 54060.6464) */ static const png_byte D50_nCIEXYZ[12] = { 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d }; -int /* PRIVATE */ -png_icc_check_length(png_const_structrp png_ptr, png_colorspacerp colorspace, +static int /* bool */ +icc_check_length(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length) { if (profile_length < 132) return png_icc_profile_error(png_ptr, colorspace, name, profile_length, "too short"); return 1; } +#ifdef PNG_READ_iCCP_SUPPORTED +int /* PRIVATE */ +png_icc_check_length(png_const_structrp png_ptr, png_colorspacerp colorspace, + png_const_charp name, png_uint_32 profile_length) +{ + if (!icc_check_length(png_ptr, colorspace, name, profile_length)) + return 0; + + /* This needs to be here because the 'normal' check is in + * png_decompress_chunk, yet this happens after the attempt to + * png_malloc_base the required data. We only need this on read; on write + * the caller supplies the profile buffer so libpng doesn't allocate it. See + * the call to icc_check_length below (the write case). + */ +# ifdef PNG_SET_USER_LIMITS_SUPPORTED + else if (png_ptr->user_chunk_malloc_max > 0 && + png_ptr->user_chunk_malloc_max < profile_length) + return png_icc_profile_error(png_ptr, colorspace, name, profile_length, + "exceeds application limits"); +# elif PNG_USER_CHUNK_MALLOC_MAX > 0 + else if (PNG_USER_CHUNK_MALLOC_MAX < profile_length) + return png_icc_profile_error(png_ptr, colorspace, name, profile_length, + "exceeds libpng limits"); +# else /* !SET_USER_LIMITS */ + /* This will get compiled out on all 32-bit and better systems. */ + else if (PNG_SIZE_MAX < profile_length) + return png_icc_profile_error(png_ptr, colorspace, name, profile_length, + "exceeds system limits"); +# endif /* !SET_USER_LIMITS */ + + return 1; +} +#endif /* READ_iCCP */ + int /* PRIVATE */ png_icc_check_header(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length, png_const_bytep profile/* first 132 bytes only */, int color_type) { png_uint_32 temp; /* Length check; this cannot be ignored in this code because profile_length @@ -2376,17 +2410,17 @@ png_icc_set_sRGB(png_const_structrp png_ int /* PRIVATE */ png_colorspace_set_ICC(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length, png_const_bytep profile, int color_type) { if ((colorspace->flags & PNG_COLORSPACE_INVALID) != 0) return 0; - if (png_icc_check_length(png_ptr, colorspace, name, profile_length) != 0 && + if (icc_check_length(png_ptr, colorspace, name, profile_length) != 0 && png_icc_check_header(png_ptr, colorspace, name, profile_length, profile, color_type) != 0 && png_icc_check_tag_table(png_ptr, colorspace, name, profile_length, profile) != 0) { # if defined(PNG_sRGB_SUPPORTED) && PNG_sRGB_PROFILE_CHECKS >= 0 /* If no sRGB support, don't try storing sRGB information */ png_icc_set_sRGB(png_ptr, colorspace, profile, 0);
--- a/media/libpng/png.h +++ b/media/libpng/png.h @@ -1,23 +1,23 @@ /* png.h - header file for PNG reference library * - * libpng version 1.6.24, August 4, 2016 + * libpng version 1.6.25, September 1, 2016 * * Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) * * This code is released under the libpng license (See LICENSE, below) * * Authors and maintainers: * libpng versions 0.71, May 1995, through 0.88, January 1996: Guy Schalnat * libpng versions 0.89, June 1996, through 0.96, May 1997: Andreas Dilger - * libpng versions 0.97, January 1998, through 1.6.24, August 4, 2016: + * libpng versions 0.97, January 1998, through 1.6.25, September 1, 2016: * Glenn Randers-Pehrson. * See also "Contributing Authors", below. */ /* * COPYRIGHT NOTICE, DISCLAIMER, and LICENSE: * * If you modify libpng you may insert additional notices immediately following @@ -36,28 +36,29 @@ * #ifndef PNG_INTEL_SSE_OPT / #endif directives. * * This code is released under the libpng license. * * Some files in the "contrib" directory and some configure-generated * files that are distributed with libpng have other copyright owners and * are released under other open source licenses. * - * libpng versions 1.0.7, July 1, 2000 through 1.6.24, August 4, 2016 are + * libpng versions 1.0.7, July 1, 2000 through 1.6.25, September 1, 2016 are * Copyright (c) 2000-2002, 2004, 2006-2016 Glenn Randers-Pehrson, are * derived from libpng-1.0.6, and are distributed according to the same * disclaimer and license as libpng-1.0.6 with the following individuals * added to the list of Contributing Authors: * * Simon-Pierre Cadieux * Eric S. Raymond * Mans Rullgard * Cosmin Truta * Gilles Vollant * James Yu + * Mandar Sahastrabuddhe * * and with the following additions to the disclaimer: * * There is no warranty against interference with your enjoyment of the * library or against infringement. There is no warranty that our * efforts or the library will fulfill any of your particular purposes * or needs. This library is provided with all faults, and the entire * risk of satisfactory quality, performance, accuracy, and effort is with @@ -224,17 +225,17 @@ * 1.0.7 1 10007 (still compatible) * ... * 1.0.19 10 10019 10.so.0.19[.0] * ... * 1.2.56 13 10256 12.so.0.56[.0] * ... * 1.5.27 15 10527 15.so.15.27[.0] * ... - * 1.6.24 16 10624 16.so.16.24[.0] + * 1.6.25 16 10625 16.so.16.25[.0] * * Henceforth the source version will match the shared-library major * and minor numbers; the shared-library major version number will be * used for changes in backward compatibility, as it is intended. The * PNG_LIBPNG_VER macro, which is not used within libpng but is available * for applications, is an unsigned integer of the form xyyzz corresponding * to the source version x.y.z (leading zeros in y and z). Beta versions * were given the previous public release number plus a letter, until @@ -252,23 +253,23 @@ * is available as a W3C Recommendation and as an ISO Specification, * <http://www.w3.org/TR/2003/REC-PNG-20031110/ */ /* * Y2K compliance in libpng: * ========================= * - * August 4, 2016 + * September 1, 2016 * * Since the PNG Development group is an ad-hoc body, we can't make * an official declaration. * * This is your unofficial assurance that libpng from version 0.71 and - * upward through 1.6.24 are Y2K compliant. It is my belief that + * upward through 1.6.25 are Y2K compliant. It is my belief that * earlier versions were also Y2K compliant. * * Libpng only has two year fields. One is a 2-byte unsigned integer * that will hold years up to 65535. The other, which is deprecated, * holds the date in text format, and will hold years up to 9999. * * The integer is * "png_uint_16 year" in png_time_struct. @@ -320,27 +321,27 @@ * file has been stripped from your copy of libpng, you can find it at * <http://www.libpng.org/pub/png/libpng-manual.txt> * * If you just need to read a PNG file and don't want to read the documentation * skip to the end of this file and read the section entitled 'simplified API'. */ /* Version information for png.h - this should match the version in png.c */ -#define PNG_LIBPNG_VER_STRING "1.6.24+apng" +#define PNG_LIBPNG_VER_STRING "1.6.25+apng" #define PNG_HEADER_VERSION_STRING \ - " libpng version 1.6.24+apng - August 4, 2016\n" + " libpng version 1.6.25+apng - September 1, 2016\n" #define PNG_LIBPNG_VER_SONUM 16 #define PNG_LIBPNG_VER_DLLNUM 16 /* These should match the first 3 components of PNG_LIBPNG_VER_STRING: */ #define PNG_LIBPNG_VER_MAJOR 1 #define PNG_LIBPNG_VER_MINOR 6 -#define PNG_LIBPNG_VER_RELEASE 24 +#define PNG_LIBPNG_VER_RELEASE 25 /* This should match the numeric part of the final component of * PNG_LIBPNG_VER_STRING, omitting any leading zero: */ #define PNG_LIBPNG_VER_BUILD 0 /* Release Status */ @@ -361,17 +362,17 @@ #define PNG_LIBPNG_BUILD_BASE_TYPE PNG_LIBPNG_BUILD_STABLE /* Careful here. At one time, Guy wanted to use 082, but that would be octal. * We must not include leading zeros. * Versions 0.7 through 1.0.0 were in the range 0 to 100 here (only * version 1.0.0 was mis-numbered 100 instead of 10000). From * version 1.0.1 it's xxyyzz, where x=major, y=minor, z=release */ -#define PNG_LIBPNG_VER 10624 /* 1.6.24 */ +#define PNG_LIBPNG_VER 10625 /* 1.6.25 */ /* Library configuration: these options cannot be changed after * the library has been built. */ #ifndef PNGLCONF_H /* If pnglibconf.h is missing, you can * copy scripts/pnglibconf.h.prebuilt to pnglibconf.h */ @@ -486,17 +487,17 @@ extern "C" { /* blend_op flags from inside fcTL */ #define PNG_BLEND_OP_SOURCE 0x00 #define PNG_BLEND_OP_OVER 0x01 #endif /* APNG */ /* This triggers a compiler error in png.c, if png.c and png.h * do not agree upon the version number. */ -typedef char* png_libpng_version_1_6_24; +typedef char* png_libpng_version_1_6_25; /* Basic control structions. Read libpng-manual.txt or libpng.3 for more info. * * png_struct is the cache of information used while reading or writing a single * PNG file. One of these is always required, although the simplified API * (below) hides the creation and destruction of it. */ typedef struct png_struct_def png_struct; @@ -3257,17 +3258,20 @@ PNG_EXPORT(245, int, png_image_write_to_ * selected at run time. */ #ifdef PNG_SET_OPTION_SUPPORTED #ifdef PNG_ARM_NEON_API_SUPPORTED # define PNG_ARM_NEON 0 /* HARDWARE: ARM Neon SIMD instructions supported */ #endif #define PNG_MAXIMUM_INFLATE_WINDOW 2 /* SOFTWARE: force maximum window */ #define PNG_SKIP_sRGB_CHECK_PROFILE 4 /* SOFTWARE: Check ICC profile for sRGB */ -#define PNG_OPTION_NEXT 6 /* Next option - numbers must be even */ +#ifdef PNG_MIPS_MSA_API_SUPPORTED +# define PNG_MIPS_MSA 6 /* HARDWARE: MIPS Msa SIMD instructions supported */ +#endif +#define PNG_OPTION_NEXT 8 /* Next option - numbers must be even */ /* Return values: NOTE: there are four values and 'off' is *not* zero */ #define PNG_OPTION_UNSET 0 /* Unset - defaults to off */ #define PNG_OPTION_INVALID 1 /* Option number out of range */ #define PNG_OPTION_OFF 2 #define PNG_OPTION_ON 3 PNG_EXPORT(244, int, png_set_option, (png_structrp png_ptr, int option,
--- a/media/libpng/pngconf.h +++ b/media/libpng/pngconf.h @@ -1,12 +1,12 @@ /* pngconf.h - machine configurable file for libpng * - * libpng version 1.6.24, August 4, 2016 + * libpng version 1.6.25, September 1, 2016 * * Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) * * This code is released under the libpng license. * For conditions of distribution and use, see the disclaimer * and license in png.h
--- a/media/libpng/pnglibconf.h +++ b/media/libpng/pnglibconf.h @@ -38,34 +38,38 @@ #ifdef _MSC_VER /* The PNG_PEDANTIC_WARNINGS (attributes) fail to build with some MSC * compilers; we'll play it safe and disable them for all MSC compilers. */ #define PNG_NO_PEDANTIC_WARNINGS #endif -#undef PNG_ARM_NEON_OPT /* This may have been defined in pngpriv.h */ -#ifdef __ARM_NEON__ -# ifdef MOZ_PNG_HAVE_ARM_NEON -# ifdef MOZ_PNG_HAVE_ARM_NEON_CHECK -# define PNG_ARM_NEON_CHECK_SUPPORTED -# define PNG_ARM_NEON_OPT 1 -# else -# define PNG_ARM_NEON_OPT 2 -# endif -# define PNG_ALIGNED_MEMORY_SUPPORTED - /* Accept the PNG_ARM_NEON_IMPLEMENTATION setting from pngpriv.h. */ -# else -# define PNG_ARM_NEON_OPT 0 -# endif +#ifdef MOZ_PNG_USE_ARM_NEON +# undef PNG_ARM_NEON_OPT /* Let libpng decide */ +# define PNG_ALIGNED_MEMORY_SUPPORTED #else # define PNG_ARM_NEON_OPT 0 #endif +#ifdef MOZ_PNG_USE_MIPS_MSA +# undef PNG_MIPS_MSA_OPT +# define PNG_ALIGNED_MEMORY_SUPPORTED +#else +# define PNG_MIPS_MSA_OPT 0 +#endif + +#ifdef MOZ_PNG_USE_INTEL_SSE +# undef PNG_INTEL_SSE_OPT +# define PNG_INTEL_SSE +# define PNG_ALIGNED_MEMORY_SUPPORTED +#else +# define PNG_INTEL_SSE_OPT 0 +#endif + #define PNG_READ_SUPPORTED #define PNG_PROGRESSIVE_READ_SUPPORTED #define PNG_READ_APNG_SUPPORTED #define PNG_READ_cHRM_SUPPORTED #define PNG_READ_gAMA_SUPPORTED #define PNG_READ_iCCP_SUPPORTED #define PNG_READ_sRGB_SUPPORTED #define PNG_READ_tRNS_SUPPORTED
--- a/media/libpng/pngmem.c +++ b/media/libpng/pngmem.c @@ -1,12 +1,12 @@ /* pngmem.c - stub functions for memory allocation * - * Last changed in libpng 1.6.24 [August 4, 2016] + * Last changed in libpng 1.6.24 [August 4, 2016%] * Copyright (c) 1998-2002,2004,2006-2014,2016 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) * * This code is released under the libpng license. * For conditions of distribution and use, see the disclaimer * and license in png.h *
--- a/media/libpng/pngpriv.h +++ b/media/libpng/pngpriv.h @@ -1,12 +1,12 @@ /* pngpriv.h - private declarations for use inside libpng * - * Last changed in libpng 1.6.24 [August 4, 2016] + * Last changed in libpng 1.6.25 [September 1, 2016] * Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) * * This code is released under the libpng license. * For conditions of distribution and use, see the disclaimer * and license in png.h */ @@ -177,16 +177,24 @@ # endif /* !PNG_ARM_NEON_IMPLEMENTATION */ # ifndef PNG_ARM_NEON_IMPLEMENTATION /* Use the intrinsics code by default. */ # define PNG_ARM_NEON_IMPLEMENTATION 1 # endif #endif /* PNG_ARM_NEON_OPT > 0 */ +#ifndef PNG_MIPS_MSA_OPT +# if defined(__mips_msa) && (__mips_isa_rev >= 5) && defined(PNG_ALIGNED_MEMORY_SUPPORTED) +# define PNG_MIPS_MSA_OPT 2 +# else +# define PNG_MIPS_MSA_OPT 0 +# endif +#endif + #ifndef PNG_INTEL_SSE_OPT # ifdef PNG_INTEL_SSE /* Only check for SSE if the build configuration has been modified to * enable SSE optimizations. This means that these optimizations will * be off by default. See contrib/intel for more details. */ # if defined(__SSE4_1__) || defined(__AVX__) || defined(__SSSE3__) || \ defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ @@ -213,16 +221,37 @@ # endif # endif # if PNG_INTEL_SSE_IMPLEMENTATION > 0 # define PNG_FILTER_OPTIMIZATIONS png_init_filter_functions_sse2 # endif #endif +#if PNG_MIPS_MSA_OPT > 0 +# define PNG_FILTER_OPTIMIZATIONS png_init_filter_functions_msa +# ifndef PNG_MIPS_MSA_IMPLEMENTATION +# if defined(__mips_msa) +# if defined(__clang__) +# elif defined(__GNUC__) +# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) +# define PNG_MIPS_MSA_IMPLEMENTATION 2 +# endif /* no GNUC support */ +# endif /* __GNUC__ */ +# else /* !defined __mips_msa */ +# define PNG_MIPS_MSA_IMPLEMENTATION 2 +# endif /* __mips_msa */ +# endif /* !PNG_MIPS_MSA_IMPLEMENTATION */ + +# ifndef PNG_MIPS_MSA_IMPLEMENTATION +# define PNG_MIPS_MSA_IMPLEMENTATION 1 +# endif +#endif /* PNG_MIPS_MSA_OPT > 0 */ + + /* Is this a build of a DLL where compilation of the object modules requires * different preprocessor settings to those required for a simple library? If * so PNG_BUILD_DLL must be set. * * If libpng is used inside a DLL but that DLL does not export the libpng APIs * PNG_BUILD_DLL must not be set. To avoid the code below kicking in build a * static library of libpng then link the DLL against that. */ @@ -1235,17 +1264,34 @@ PNG_INTERNAL_FUNCTION(void,png_read_filt row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg4_neon,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth3_neon,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth4_neon,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); #endif - + +#if PNG_MIPS_MSA_OPT > 0 +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_up_msa,(png_row_infop row_info, + png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub3_msa,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub4_msa,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg3_msa,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg4_msa,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth3_msa,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth4_msa,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +#endif + #if PNG_INTEL_SSE_IMPLEMENTATION > 0 PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub3_sse2,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub4_sse2,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg3_sse2,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg4_sse2,(png_row_infop @@ -1597,19 +1643,21 @@ PNG_INTERNAL_FUNCTION(int,png_colorspace #ifdef PNG_iCCP_SUPPORTED PNG_INTERNAL_FUNCTION(int,png_colorspace_set_ICC,(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length, png_const_bytep profile, int color_type), PNG_EMPTY); /* The 'name' is used for information only */ /* Routines for checking parts of an ICC profile. */ +#ifdef PNG_READ_iCCP_SUPPORTED PNG_INTERNAL_FUNCTION(int,png_icc_check_length,(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length), PNG_EMPTY); +#endif /* READ_iCCP */ PNG_INTERNAL_FUNCTION(int,png_icc_check_header,(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length, png_const_bytep profile /* first 132 bytes only */, int color_type), PNG_EMPTY); PNG_INTERNAL_FUNCTION(int,png_icc_check_tag_table,(png_const_structrp png_ptr, png_colorspacerp colorspace, png_const_charp name, png_uint_32 profile_length, @@ -2021,21 +2069,27 @@ PNG_INTERNAL_FUNCTION(void, PNG_FILTER_O #else /* List *all* the possible optimizations here - this branch is required if * the builder of libpng passes the definition of PNG_FILTER_OPTIMIZATIONS in * CFLAGS in place of CPPFLAGS *and* uses symbol prefixing. */ # if PNG_ARM_NEON_OPT > 0 PNG_INTERNAL_FUNCTION(void, png_init_filter_functions_neon, (png_structp png_ptr, unsigned int bpp), PNG_EMPTY); -# endif -# if PNG_INTEL_SSE_IMPLEMENTATION > 0 +#endif + +#if PNG_MIPS_MSA_OPT > 0 +PNG_INTERNAL_FUNCTION(void, png_init_filter_functions_msa, + (png_structp png_ptr, unsigned int bpp), PNG_EMPTY); +#endif + +# if PNG_INTEL_SSE_IMPLEMENTATION > 0 PNG_INTERNAL_FUNCTION(void, png_init_filter_functions_sse2, (png_structp png_ptr, unsigned int bpp), PNG_EMPTY); -# endif +# endif #endif PNG_INTERNAL_FUNCTION(png_uint_32, png_check_keyword, (png_structrp png_ptr, png_const_charp key, png_bytep new_key), PNG_EMPTY); /* Maintainer: Put new private prototypes here ^ */ #include "pngdebug.h"
--- a/media/libpng/pngrutil.c +++ b/media/libpng/pngrutil.c @@ -1,12 +1,12 @@ /* pngrutil.c - utilities to read a PNG file * - * Last changed in libpng 1.6.24 [August 4, 2016] + * Last changed in libpng 1.6.25 [September 1, 2016] * Copyright (c) 1998-2002,2004,2006-2016 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) * * This code is released under the libpng license. * For conditions of distribution and use, see the disclaimer * and license in png.h * @@ -456,16 +456,17 @@ png_zlib_inflate(png_structrp png_ptr, i png_ptr->zstream_start = 0; } return inflate(&png_ptr->zstream, flush); } #endif /* Zlib >= 1.2.4 */ #ifdef PNG_READ_COMPRESSED_TEXT_SUPPORTED +#if defined(PNG_READ_zTXt_SUPPORTED) || defined (PNG_READ_iTXt_SUPPORTED) /* png_inflate now returns zlib error codes including Z_OK and Z_STREAM_END to * allow the caller to do multiple calls if required. If the 'finish' flag is * set Z_FINISH will be passed to the final inflate() call and Z_STREAM_END must * be returned or there has been a problem, otherwise Z_SYNC_FLUSH is used and * Z_OK or Z_STREAM_END will be returned on success. * * The input and output sizes are updated to the actual amounts of data consumed * or written, not the amount available (as in a z_stream). The data pointers @@ -585,17 +586,16 @@ png_inflate(png_structrp png_ptr, png_ui * pointer, which is not owned by the caller, but this is safe; it's only * used on errors! */ png_ptr->zstream.msg = PNGZ_MSG_CAST("zstream unclaimed"); return Z_STREAM_ERROR; } } -#if defined(PNG_READ_zTXt_SUPPORTED) || defined (PNG_READ_iTXt_SUPPORTED) /* * Decompress trailing data in a chunk. The assumption is that read_buffer * points at an allocated area holding the contents of a chunk with a * trailing compressed part. What we get back is an allocated area * holding the original prefix part and an uncompressed version of the * trailing part (the malloc area passed in is freed). */ static int
--- a/memory/replace/logalloc/LogAlloc.cpp +++ b/memory/replace/logalloc/LogAlloc.cpp @@ -33,16 +33,32 @@ prefork() { sLock.Acquire(); } static void postfork() { sLock.Release(); } +static size_t +GetPid() +{ + return size_t(getpid()); +} + +static size_t +GetTid() +{ +#if defined(_WIN32) + return size_t(GetCurrentThreadId()); +#else + return size_t(pthread_self()); +#endif +} + #ifdef ANDROID /* See mozglue/android/APKOpen.cpp */ extern "C" MOZ_EXPORT __attribute__((weak)) void* __dso_handle; /* Android doesn't have pthread_atfork defined in pthread.h */ extern "C" MOZ_EXPORT int pthread_atfork(void (*)(void), void (*)(void), void (*)(void)); @@ -65,17 +81,38 @@ replace_init(const malloc_table_t* aTabl #ifndef _WIN32 /* When another thread has acquired a lock before forking, the child * process will inherit the lock state but the thread, being nonexistent * in the child process, will never release it, leading to a dead-lock * whenever the child process gets the lock. We thus need to ensure no * other thread is holding the lock before forking, by acquiring it * ourselves, and releasing it after forking, both in the parent and child * processes. - * Windows doesn't have this problem since there is no fork(). */ + * Windows doesn't have this problem since there is no fork(). + * The real allocator, however, might be doing the same thing (jemalloc + * does). But pthread_atfork `prepare` handlers (first argument) are + * processed in reverse order they were established. But replace_init + * runs before the real allocator has had any chance to initialize and + * call pthread_atfork itself. This leads to its prefork running before + * ours. This leads to a race condition that can lead to a deadlock like + * the following: + * - thread A forks. + * - libc calls real allocator's prefork, so thread A holds the real + * allocator lock. + * - thread B calls malloc, which calls our replace_malloc. + * - consequently, thread B holds our lock. + * - thread B then proceeds to call the real allocator's malloc, and + * waits for the real allocator's lock, which thread A holds. + * - libc calls our prefork, so thread A waits for our lock, which + * thread B holds. + * To avoid this race condition, the real allocator's prefork must be + * called after ours, which means it needs to be registered before ours. + * So trick the real allocator into initializing itself without more side + * effects by calling malloc with a size it can't possibly allocate. */ + sFuncs->malloc(-1); pthread_atfork(prefork, postfork, postfork); #endif /* Initialize output file descriptor from the MALLOC_LOG environment * variable. Numbers up to 9999 are considered as a preopened file * descriptor number. Other values are considered as a file name. */ char* log = getenv("MALLOC_LOG"); if (log && *log) { @@ -139,100 +176,101 @@ replace_get_bridge() */ void* replace_malloc(size_t aSize) { AutoLock lock(sLock); void* ptr = sFuncs->malloc(aSize); if (ptr) { - FdPrintf(sFd, "%zu malloc(%zu)=%p\n", size_t(getpid()), aSize, ptr); + FdPrintf(sFd, "%zu %zu malloc(%zu)=%p\n", GetPid(), GetTid(), aSize, ptr); } return ptr; } int replace_posix_memalign(void** aPtr, size_t aAlignment, size_t aSize) { AutoLock lock(sLock); int ret = sFuncs->posix_memalign(aPtr, aAlignment, aSize); if (ret == 0) { - FdPrintf(sFd, "%zu posix_memalign(%zu,%zu)=%p\n", size_t(getpid()), + FdPrintf(sFd, "%zu %zu posix_memalign(%zu,%zu)=%p\n", GetPid(), GetTid(), aAlignment, aSize, *aPtr); } return ret; } void* replace_aligned_alloc(size_t aAlignment, size_t aSize) { AutoLock lock(sLock); void* ptr = sFuncs->aligned_alloc(aAlignment, aSize); if (ptr) { - FdPrintf(sFd, "%zu aligned_alloc(%zu,%zu)=%p\n", size_t(getpid()), + FdPrintf(sFd, "%zu %zu aligned_alloc(%zu,%zu)=%p\n", GetPid(), GetTid(), aAlignment, aSize, ptr); } return ptr; } void* replace_calloc(size_t aNum, size_t aSize) { AutoLock lock(sLock); void* ptr = sFuncs->calloc(aNum, aSize); if (ptr) { - FdPrintf(sFd, "%zu calloc(%zu,%zu)=%p\n", size_t(getpid()), aNum, aSize, ptr); + FdPrintf(sFd, "%zu %zu calloc(%zu,%zu)=%p\n", GetPid(), GetTid(), aNum, + aSize, ptr); } return ptr; } void* replace_realloc(void* aPtr, size_t aSize) { AutoLock lock(sLock); void* new_ptr = sFuncs->realloc(aPtr, aSize); if (new_ptr || !aSize) { - FdPrintf(sFd, "%zu realloc(%p,%zu)=%p\n", size_t(getpid()), aPtr, aSize, - new_ptr); + FdPrintf(sFd, "%zu %zu realloc(%p,%zu)=%p\n", GetPid(), GetTid(), aPtr, + aSize, new_ptr); } return new_ptr; } void replace_free(void* aPtr) { AutoLock lock(sLock); if (aPtr) { - FdPrintf(sFd, "%zu free(%p)\n", size_t(getpid()), aPtr); + FdPrintf(sFd, "%zu %zu free(%p)\n", GetPid(), GetTid(), aPtr); } sFuncs->free(aPtr); } void* replace_memalign(size_t aAlignment, size_t aSize) { AutoLock lock(sLock); void* ptr = sFuncs->memalign(aAlignment, aSize); if (ptr) { - FdPrintf(sFd, "%zu memalign(%zu,%zu)=%p\n", size_t(getpid()), aAlignment, - aSize, ptr); + FdPrintf(sFd, "%zu %zu memalign(%zu,%zu)=%p\n", GetPid(), GetTid(), + aAlignment, aSize, ptr); } return ptr; } void* replace_valloc(size_t aSize) { AutoLock lock(sLock); void* ptr = sFuncs->valloc(aSize); if (ptr) { - FdPrintf(sFd, "%zu valloc(%zu)=%p\n", size_t(getpid()), aSize, ptr); + FdPrintf(sFd, "%zu %zu valloc(%zu)=%p\n", GetPid(), GetTid(), aSize, ptr); } return ptr; } void replace_jemalloc_stats(jemalloc_stats_t* aStats) { AutoLock lock(sLock); sFuncs->jemalloc_stats(aStats); - FdPrintf(sFd, "%zu jemalloc_stats()\n", size_t(getpid())); + FdPrintf(sFd, "%zu %zu jemalloc_stats()\n", GetPid(), GetTid()); }
--- a/memory/replace/logalloc/README +++ b/memory/replace/logalloc/README @@ -37,71 +37,71 @@ by shell redirections, such as: MALLOC_LOG=3 firefox 3>&1 1>&2 | gzip -c > log.gz (3>&1 copies the `| gzip` pipe file descriptor to file descriptor #3, 1>&2 then copies stderr to stdout. This leads to: fd1 and fd2 sending to stderr of the parent process (the shell), and fd3 sending to gzip.) Each line of the allocations log is formatted as follows: - <pid> <function>([<args>])[=<result>] + <pid> <tid> <function>([<args>])[=<result>] where <args> is a comma separated list of values. The number of <args> and the presence of <result> depend on the <function>. Example log: - 18545 malloc(32)=0x7f90495120e0 - 18545 calloc(1,148)=0x7f9049537480 - 18545 realloc(0x7f90495120e0,64)=0x7f9049536680 - 18545 posix_memalign(256,240)=0x7f9049583300 - 18545 jemalloc_stats() - 18545 free(0x7f9049536680) + 18545 18545 malloc(32)=0x7f90495120e0 + 18545 18545 calloc(1,148)=0x7f9049537480 + 18545 18545 realloc(0x7f90495120e0,64)=0x7f9049536680 + 18545 18545 posix_memalign(256,240)=0x7f9049583300 + 18545 18545 jemalloc_stats() + 18545 18545 free(0x7f9049536680) This log can be replayed with the logalloc-replay tool in memory/replace/logalloc/replay. However, as the goal of that tool is to reproduce the recorded memory allocations, it needs to avoid as much as possible doing its own allocations for bookkeeping. Reading the logs as they are would require data structures and memory allocations. As a consequence, the logs need to be preprocessed beforehand. The logalloc_munge.py script is responsible for that preprocessing. It simply takes a raw log on its stdin, and outputs the preprocessed log on its stdout. It replaces pointer addresses with indexes the logalloc-replay tool can use in a large (almost) linear array of allocation tracking slots (prefixed with '#'). It also replaces the pids with numbers starting from 1 (such as the first seen pid number is 1, the second is 2, etc.). The above example log would become the following, once preprocessed: - 1 malloc(32)=#1 - 1 calloc(1,148)=#2 - 1 realloc(#1,64)=#1 - 1 posix_memalign(256,240)=#3 - 1 jemalloc_stats() - 1 free(#1) + 1 1 malloc(32)=#1 + 1 1 calloc(1,148)=#2 + 1 1 realloc(#1,64)=#1 + 1 1 posix_memalign(256,240)=#3 + 1 1 jemalloc_stats() + 1 1 free(#1) The logalloc-replay tool then takes the preprocessed log on its stdin and replays the allocations printed there, but will only replay those with the same process id as the first line (which normally is 1). As the log files are simple text files, though, it is easy to separate out the different processes log with e.g. grep, and feed the separate processes logs to logalloc-replay. The logalloc-replay program won't output anything unless jemalloc_stats records appears in the log. You can expect those to be recorded when going to about:memory in Firefox, but they can also be added after preprocessing. Here is an example of what one can do: gunzip -c log.gz | python logalloc_munge.py | \ - awk '$1 == "2" { print $0 } !(NR % 10000) { print "2 jemalloc_stats()" }' | \ + awk '$1 == "2" { print $0 } !(NR % 10000) { print "2 1 jemalloc_stats()" }' | \ ./logalloc-replay The above command replays the allocations of process #2, with some stats output every 10000 records. The logalloc-replay tool itself being hooked with replace-malloc, it is possible to set LD_PRELOAD/DYLD_INSERT_LIBRARIES/MOZ_REPLACE_MALLOC_LIB and replay a log through a different allocator. For example: LD_PRELOAD=libreplace_jemalloc.so logalloc-replay < log -Will replay the log against jemalloc3 (which is, as of writing, what +Will replay the log against jemalloc4 (which is, as of writing, what libreplace_jemalloc.so contains).
--- a/memory/replace/logalloc/replay/Replay.cpp +++ b/memory/replace/logalloc/replay/Replay.cpp @@ -504,16 +504,20 @@ main() } /* The log may contain data for several processes, only entries for the * very first that appears are treated. */ if (first_pid != pid) { continue; } + /* The log contains thread ids for manual analysis, but we just ignore them + * for now. */ + parseNumber(line.SplitChar(' ')); + Buffer func = line.SplitChar('('); Buffer args = line.SplitChar(')'); /* jemalloc_stats and free are functions with no result. */ if (func == Buffer("jemalloc_stats")) { replay.jemalloc_stats(args); continue; } else if (func == Buffer("free")) {
--- a/memory/replace/logalloc/replay/logalloc_munge.py +++ b/memory/replace/logalloc/replay/logalloc_munge.py @@ -56,26 +56,34 @@ class IdMapping(object): return value == 0 or value in self._values class Ignored(Exception): pass def split_log_line(line): try: + # The format for each line is: + # <pid> [<tid>] <function>([<args>])[=<result>] + # + # The original format didn't include the tid, so we try to parse + # lines whether they have one or not. pid, func_call = line.split(' ', 1) - # func_call format is <function>([<args>])[=<result>] call, result = func_call.split(')') func, args = call.split('(') args = args.split(',') if args else [] if result: if result[0] != '=': raise Ignored('Malformed input') result = result[1:] - return pid, func, args, result + if ' ' in func: + tid, func = func.split(' ', 1) + else: + tid = pid + return pid, tid, func, args, result except: raise Ignored('Malformed input') NUM_ARGUMENTS = { 'jemalloc_stats': 0, 'free': 1, 'malloc': 1, @@ -86,24 +94,26 @@ NUM_ARGUMENTS = { 'memalign': 2, 'valloc': 1, } def main(): process_pointers = defaultdict(IdMapping) pids = IdMapping() + tids = IdMapping() for line in sys.stdin: line = line.strip() try: - pid, func, args, result = split_log_line(line) + pid, tid, func, args, result = split_log_line(line) # Replace pid with an id. pid = pids[int(pid)] + tid = tids[int(tid)] pointers = process_pointers[pid] if func not in NUM_ARGUMENTS: raise Ignored('Unknown function') if len(args) != NUM_ARGUMENTS[func]: raise Ignored('Malformed input') @@ -119,17 +129,17 @@ def main(): del pointers[ptr] if result: result = int(result, 16) if not result: raise Ignored('Result is NULL') result = "#%d" % pointers[result] - print('%d %s(%s)%s' % (pid, func, ','.join(args), + print('%d %d %s(%s)%s' % (pid, tid, func, ','.join(args), '=%s' % result if result else '')) except Exception as e: print('Ignored "%s": %s' % (line, e.message), file=sys.stderr) if __name__ == '__main__': main()
--- a/memory/replace/logalloc/replay/replay.log +++ b/memory/replace/logalloc/replay/replay.log @@ -1,17 +1,17 @@ -1 malloc(42)=#1 -1 malloc(24)=#2 -2 malloc(42)=#1 -1 free(#1) -1 posix_memalign(4096,1024)=#1 -1 calloc(4,42)=#3 -1 free(#2) -1 realloc(#3,84)=#2 -1 aligned_alloc(512,1024)=#3 -1 memalign(512,1024)=#4 -1 valloc(1024)=#5 -1 jemalloc_stats() -1 free(#5) -1 free(#4) -1 free(#3) -1 free(#2) -1 free(#1) +1 1 malloc(42)=#1 +1 1 malloc(24)=#2 +2 2 malloc(42)=#1 +1 1 free(#1) +1 1 posix_memalign(4096,1024)=#1 +1 1 calloc(4,42)=#3 +1 1 free(#2) +1 1 realloc(#3,84)=#2 +1 1 aligned_alloc(512,1024)=#3 +1 1 memalign(512,1024)=#4 +1 1 valloc(1024)=#5 +1 1 jemalloc_stats() +1 1 free(#5) +1 1 free(#4) +1 1 free(#3) +1 1 free(#2) +1 1 free(#1)
--- a/mobile/android/base/java/org/mozilla/gecko/GeckoApp.java +++ b/mobile/android/base/java/org/mozilla/gecko/GeckoApp.java @@ -1719,43 +1719,41 @@ public abstract class GeckoApp String sessionString = getProfile().readSessionFile(useBackup); if (sessionString == null) { throw new SessionRestoreException("Could not read from session file"); } // If we are doing an OOM restore, parse the session data and // stub the restored tabs immediately. This allows the UI to be // updated before Gecko has restored. - if (mShouldRestore) { - final JSONArray tabs = new JSONArray(); - final JSONObject windowObject = new JSONObject(); - final boolean sessionDataValid; - - LastSessionParser parser = new LastSessionParser(tabs, windowObject, isExternalURL); - - if (mPrivateBrowsingSession == null) { - sessionDataValid = parser.parse(sessionString); - } else { - sessionDataValid = parser.parse(sessionString, mPrivateBrowsingSession); + final JSONArray tabs = new JSONArray(); + final JSONObject windowObject = new JSONObject(); + final boolean sessionDataValid; + + LastSessionParser parser = new LastSessionParser(tabs, windowObject, isExternalURL); + + if (mPrivateBrowsingSession == null) { + sessionDataValid = parser.parse(sessionString); + } else { + sessionDataValid = parser.parse(sessionString, mPrivateBrowsingSession); + } + + if (tabs.length() > 0) { + windowObject.put("tabs", tabs); + sessionString = new JSONObject().put("windows", new JSONArray().put(windowObject)).toString(); + } else { + if (parser.allTabsSkipped() || sessionDataValid) { + // If we intentionally skipped all tabs we've read from the session file, we + // set mShouldRestore back to false at this point already, so the calling code + // can infer that the exception wasn't due to a damaged session store file. + // The same applies if the session file was syntactically valid and + // simply didn't contain any tabs. + mShouldRestore = false; } - - if (tabs.length() > 0) { - windowObject.put("tabs", tabs); - sessionString = new JSONObject().put("windows", new JSONArray().put(windowObject)).toString(); - } else { - if (parser.allTabsSkipped() || sessionDataValid) { - // If we intentionally skipped all tabs we've read from the session file, we - // set mShouldRestore back to false at this point already, so the calling code - // can infer that the exception wasn't due to a damaged session store file. - // The same applies if the session file was syntactically valid and - // simply didn't contain any tabs. - mShouldRestore = false; - } - throw new SessionRestoreException("No tabs could be read from session file"); - } + throw new SessionRestoreException("No tabs could be read from session file"); } JSONObject restoreData = new JSONObject(); restoreData.put("sessionString", sessionString); return restoreData.toString(); } catch (JSONException e) { throw new SessionRestoreException(e); }
--- a/mobile/android/themes/core/config.css +++ b/mobile/android/themes/core/config.css @@ -55,17 +55,17 @@ body { display: inline-block; width: 12em; min-width: 0; color: #000000; opacity: 1; flex: 1 1 auto; } -#filter-input:-moz-placeholder { +#filter-input::placeholder { color: rgba(255,255,255,0.5); } .toolbar input { display: inline-block; height: 100%; min-width: 3em; box-sizing: border-box;
--- a/old-configure.in +++ b/old-configure.in @@ -41,17 +41,17 @@ dnl ==================================== _SUBDIR_HOST_CFLAGS="$HOST_CFLAGS" _SUBDIR_HOST_CXXFLAGS="$HOST_CXXFLAGS" _SUBDIR_HOST_LDFLAGS="$HOST_LDFLAGS" _SUBDIR_CONFIG_ARGS="$ac_configure_args" dnl Set the version number of the libs included with mozilla dnl ======================================================== MOZJPEG=62 -MOZPNG=10624 +MOZPNG=10625 NSPR_VERSION=4 NSPR_MINVER=4.12 NSS_VERSION=3 dnl Set the minimum version of toolkit libs used by mozilla dnl ======================================================== GLIB_VERSION=2.22 # 2_26 is the earliest version we can set GLIB_VERSION_MIN_REQUIRED.
--- a/python/gdbpp/gdbpp/smartptr.py +++ b/python/gdbpp/gdbpp/smartptr.py @@ -39,23 +39,25 @@ class smartptr_printer(object): else: type_name = str(self.value.dereference().dynamic_type.pointer()) return '[(%s) %s]' % (type_name, str(self.value)) @GeckoPrettyPrinter('mozilla::StyleSheetHandle::RefPtr', '^mozilla::HandleRefPtr<mozilla::StyleSheetHandle>$') class sheetptr_printer(object): def __init__(self, value): - self.value = value['mHandle'] - if self.value and self.value['mPtr'] and self.value['mPtr']['mValue']: - self.value = self.value['mPtr']['mValue'] + self.value = 0 + if (value['mHandle'] and + value['mHandle']['mPtr'] and + value['mHandle']['mPtr']['mValue']): + self.value = int(value['mHandle']['mPtr']['mValue']) def to_string(self): - if not self.value: - type_name = str(self.value.type) + if self.value == 0: + type_name = 'mozilla::StyleSheet *' value = 0 else: value = int(self.value) if value & 0x1: value = value & ~0x1 type_name = 'mozilla::ServoStyleSheet *' else: type_name = 'mozilla::CSSStyleSheet *'
--- a/security/manager/tools/getHSTSPreloadList.js +++ b/security/manager/tools/getHSTSPreloadList.js @@ -176,17 +176,16 @@ RedirectAndAuthStopper.prototype = { function getHSTSStatus(host, resultList) { var req = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"] .createInstance(Ci.nsIXMLHttpRequest); var inResultList = false; var uri = "https://" + host.name + "/"; req.open("GET", uri, true); req.timeout = REQUEST_TIMEOUT; - req.channel.notificationCallbacks = new RedirectAndAuthStopper(); let errorhandler = (evt) => { dump(`ERROR: error making request to ${host.name} (type=${evt.type})\n`); if (!inResultList) { inResultList = true; resultList.push(processStsHeader(host, null, req.status, req.channel.securityInfo)); } @@ -200,16 +199,17 @@ function getHSTSStatus(host, resultList) inResultList = true; var header = req.getResponseHeader("strict-transport-security"); resultList.push(processStsHeader(host, header, req.status, req.channel.securityInfo)); } }; try { + req.channel.notificationCallbacks = new RedirectAndAuthStopper(); req.send(); } catch (e) { dump("ERROR: exception making request to " + host.name + ": " + e + "\n"); } } function compareHSTSStatus(a, b) {
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/send-after-setting-document-domain.htm.ini +++ /dev/null @@ -1,6 +0,0 @@ -[send-after-setting-document-domain.htm] - type: testharness - expected: ERROR - [loading documents from the origin document.domain was set to should throw] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/send-authentication-basic-cors-not-enabled.htm.ini +++ /dev/null @@ -1,5 +0,0 @@ -[send-authentication-basic-cors-not-enabled.htm] - type: testharness - [XMLHttpRequest: send() - "Basic" authenticated CORS requests with user name and password passed to open() (asserts failure)] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/send-non-same-origin.sub.htm.ini +++ /dev/null @@ -1,17 +0,0 @@ -[send-non-same-origin.sub.htm] - type: testharness - [XMLHttpRequest: send() - non same-origin (mailto:test@example.org)] - expected: FAIL - - [XMLHttpRequest: send() - non same-origin (tel:+31600000000)] - expected: FAIL - - [XMLHttpRequest: send() - non same-origin (http://www2.web-platform.test:8000/)] - expected: FAIL - - [XMLHttpRequest: send() - non same-origin (javascript:alert('FAIL'))] - expected: FAIL - - [XMLHttpRequest: send() - non same-origin (folder.txt)] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/send-redirect-bogus-sync.htm.ini +++ /dev/null @@ -1,17 +0,0 @@ -[send-redirect-bogus-sync.htm] - type: testharness - [XMLHttpRequest: send() - Redirects (bogus Location header; sync) (301: foobar://abcd)] - expected: FAIL - - [XMLHttpRequest: send() - Redirects (bogus Location header; sync) (302: http://z)] - expected: FAIL - - [XMLHttpRequest: send() - Redirects (bogus Location header; sync) (302: mailto:someone@example.org)] - expected: FAIL - - [XMLHttpRequest: send() - Redirects (bogus Location header; sync) (303: http://z)] - expected: FAIL - - [XMLHttpRequest: send() - Redirects (bogus Location header; sync) (303: tel:1234567890)] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/send-redirect-infinite-sync.htm.ini +++ /dev/null @@ -1,5 +0,0 @@ -[send-redirect-infinite-sync.htm] - type: testharness - [XMLHttpRequest: send() - Redirects (infinite loop; sync) (301)] - expected: FAIL -
deleted file mode 100644 --- a/testing/web-platform/meta/XMLHttpRequest/xmlhttprequest-network-error-sync.htm.ini +++ /dev/null @@ -1,5 +0,0 @@ -[xmlhttprequest-network-error-sync.htm] - type: testharness - [XMLHttpRequest: members during network errors (sync)] - expected: FAIL -
new file mode 100644 --- /dev/null +++ b/testing/web-platform/tests/XMLHttpRequest/resources/send-after-setting-document-domain-window-1.htm @@ -0,0 +1,23 @@ +<!doctype html> +<html> + <head> + <title>XMLHttpRequest: send() with document.domain set: loading documents from original origin after setting document.domain</title> + <script src="send-after-setting-document-domain-window-helper.js"></script> + <link rel="help" href="https://xhr.spec.whatwg.org/#the-open()-method" data-tested-assertations="following::ol[1]/li[2]/ol[1]/li[3]" /> + </head> + <body> + <script> + run_test(function() { + document.domain = document.domain; // this is not a noop, it does actually change the security context + var client = new XMLHttpRequest(); + client.open("GET", "status.py?content=hello", false); + client.send(null); + assert_equals(client.responseText, "hello"); + document.domain = document.domain.replace(/^\w+\./, ""); + client.open("GET", "status.py?content=hello2", false); + client.send(null); + assert_equals(client.responseText, "hello2"); + }, "loading documents from original origin after setting document.domain"); + </script> + </body> +</html>
new file mode 100644 --- /dev/null +++ b/testing/web-platform/tests/XMLHttpRequest/resources/send-after-setting-document-domain-window-2.htm @@ -0,0 +1,20 @@ +<!doctype html> +<html> + <head> + <title>XMLHttpRequest: send() with document.domain set: loading documents from the origin document.domain was set to should throw</title> + <script src="send-after-setting-document-domain-window-helper.js"></script> + <link rel="help" href="https://xhr.spec.whatwg.org/#the-open()-method" data-tested-assertations="following::ol[1]/li[2]/ol[1]/li[3]" /> + </head> + <body> + <script> + run_test(function() { + document.domain = document.domain.replace(/^\w+\./, ""); + var client = new XMLHttpRequest(); + client.open("GET", location.protocol + "//" + document.domain + location.pathname.replace(/[^\/]*$/, "") + "status.py?content=hello3", false); + assert_throws("NetworkError", function() { + client.send(null); + }); + }, "loading documents from the origin document.domain was set to should throw"); + </script> + </body> +</html>
new file mode 100644 --- /dev/null +++ b/testing/web-platform/tests/XMLHttpRequest/resources/send-after-setting-document-domain-window-helper.js @@ -0,0 +1,29 @@ +function assert_equals(value, expected) { + if (value != expected) { + throw "Got wrong value.\nExpected '" + expected + "',\ngot '" + value + "'"; + } +} + +function assert_throws(expected_exc, func) { + try { + func.call(this); + } catch(e) { + var actual = e.name || e.type; + if (actual != expected_exc) { + throw "Got wrong exception.\nExpected '" + expected_exc + "',\ngot '" + actual + "'."; + } + return; + } + throw "Expected exception, but none was thrown"; +} + +function run_test(test, name) { + var result = {passed: true, message: null, name: name}; + try { + test(); + } catch(e) { + result.passed = false; + result.message = e + ""; + } + opener.postMessage(result, "*"); +}
--- a/testing/web-platform/tests/XMLHttpRequest/send-after-setting-document-domain.htm +++ b/testing/web-platform/tests/XMLHttpRequest/send-after-setting-document-domain.htm @@ -1,38 +1,39 @@ <!doctype html> <html> <head> <title>XMLHttpRequest: send() with document.domain set</title> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> - <!-- The spec doesn't seem to explicitly cover this case (as of June 2013) --> <link rel="help" href="https://xhr.spec.whatwg.org/#the-open()-method" data-tested-assertations="following::ol[1]/li[2]/ol[1]/li[3]" /> </head> <body> <div id="log"></div> <script> - // first make sure we actually run off a domain with at least three parts, in order to be able to shorten it.. - if (location.hostname.split(/\./).length < 3) { - location.href = location.protocol+'//www2.'+location.host+location.pathname - } + var test_base_url = location.protocol+'//www2.'+location.host+"/XMLHttpRequest/resources/", + test_windows = [ + window.open(test_base_url + "send-after-setting-document-domain-window-1.htm"), + window.open(test_base_url + "send-after-setting-document-domain-window-2.htm"), + ], + num_tests_left = test_windows.length; - test(function() { - document.domain = document.domain // this is not a noop, it does actually change the security context - var client = new XMLHttpRequest() - client.open("GET", "resources/status.py?content=hello", false) - client.send(null) - assert_equals(client.responseText, "hello") - document.domain = document.domain.replace(/^\w+\./, '') - client.open("GET", "resources/status.py?content=hello2", false) - client.send(null) - assert_equals(client.responseText, "hello2") - }, "loading documents from original origin after setting document.domain") - // try to load a document from the origin document.domain was set to - test(function () { - var client = new XMLHttpRequest() - client.open("GET", location.protocol + '//' + document.domain + location.pathname.replace(/[^\/]*$/, '') + "resources/status.py?content=hello3", false) - // AFAIK this should throw - assert_throws('NetworkError', function(){client.send(null)}) - }, "loading documents from the origin document.domain was set to should throw") + async_test(function(wrapper_test) { + window.addEventListener("message", function(evt) { + // run a shadow test that just forwards the results + async_test(function(test) { + assert_true(evt.data.passed, evt.data.message); + test.done(); + }, evt.data.name); + + // after last result comes in, close all test + // windows and complete the wrapper test. + if (--num_tests_left == 0) { + for (var i=0; i<test_windows.length; ++i) { + test_windows[i].close(); + } + wrapper_test.done(); + } + }, false); + }, "All tests ran"); </script> </body> </html>
--- a/toolkit/components/telemetry/Histograms.json +++ b/toolkit/components/telemetry/Histograms.json @@ -4718,16 +4718,25 @@ "alert_emails": ["perf-telemetry-alerts@mozilla.com"], "bug_numbers": [1235908], "expires_in_version": "never", "kind": "exponential", "high": 10000, "n_buckets": 50, "description": "Time (ms) from the Input event being created to the end of it being handled" }, + "LOAD_INPUT_EVENT_RESPONSE_MS": { + "alert_emails": ["perf-telemetry-alerts@mozilla.com"], + "bug_numbers": [1298101], + "expires_in_version": "never", + "kind": "exponential", + "high": 10000, + "n_buckets": 50, + "description": "Time (ms) from the Input event being created to the end of it being handled for events handling during page load only" + }, "EVENTLOOP_UI_ACTIVITY_EXP_MS": { "alert_emails": ["perf-telemetry-alerts@mozilla.com"], "bug_numbers": [1198196], "expires_in_version": "never", "kind": "exponential", "high": 60000, "n_buckets": 50, "description": "Widget: Time it takes for the message before a UI message (ms)" @@ -10241,17 +10250,17 @@ "description": "When restoring tabs on startup, reading from sessionstore.js failed, but sessionstore.bak was read successfully.", "cpp_guard": "ANDROID" }, "NUMBER_OF_PROFILES": { "alert_emails": ["amarchesini@mozilla.com"], "expires_in_version": "58", "bug_numbers": [1296606], "kind": "count", - "description": "Number of named profiles." + "description": "Number of named browser profiles for the current user, as reported by the profile service at startup." }, "WEB_PERMISSION_CLEARED": { "alert_emails": ["firefox-dev@mozilla.org"], "bug_numbers": [1286118], "expires_in_version": "55", "kind": "enumerated", "keyed": true, "n_values": 6,
--- a/toolkit/content/textbox.css +++ b/toolkit/content/textbox.css @@ -16,20 +16,20 @@ html|*.textbox-input { html|*.textbox-textarea { -moz-appearance: none !important; text-shadow: inherit; box-sizing: border-box; -moz-box-flex: 1; } /* -html|*.textbox-input::-moz-placeholder, -html|*.textbox-textarea::-moz-placeholder { +html|*.textbox-input::placeholder, +html|*.textbox-textarea::placeholder { text-align: left; direction: ltr; } -html|*.textbox-input::-moz-placeholder:-moz-locale-dir(rtl), -html|*.textbox-textarea::-moz-placeholder:-moz-locale-dir(rtl) { +html|*.textbox-input::placeholder:-moz-locale-dir(rtl), +html|*.textbox-textarea::placeholder:-moz-locale-dir(rtl) { text-align: right; direction: rtl; } */
--- a/toolkit/mozapps/extensions/internal/ProductAddonChecker.jsm +++ b/toolkit/mozapps/extensions/internal/ProductAddonChecker.jsm @@ -216,20 +216,20 @@ function downloadFile(url) { reject(ex); }; xhr.addEventListener("error", fail); xhr.addEventListener("abort", fail); xhr.responseType = "arraybuffer"; try { xhr.open("GET", url); + xhr.send(null); } catch (ex) { reject(ex); } - xhr.send(null); }); } /** * Convert a string containing binary values to hex. */ function binaryToHex(input) { let result = "";
--- a/toolkit/themes/windows/global/textbox.css +++ b/toolkit/themes/windows/global/textbox.css @@ -33,17 +33,17 @@ html|*.textbox-textarea { padding: 0px 1px !important; background-color: inherit; color: inherit; font: inherit; } @media (-moz-windows-default-theme) { @media not all and (-moz-os-version: windows-xp) { - textbox html|*.textbox-input::-moz-placeholder { + textbox html|*.textbox-input::placeholder { font-style: italic; } } } .textbox-contextmenu { cursor: default; }
--- a/toolkit/xre/test/win/TestDllInterceptor.cpp +++ b/toolkit/xre/test/win/TestDllInterceptor.cpp @@ -157,15 +157,18 @@ int main() TestHook("kernel32.dll", "VirtualAlloc") && TestHook("kernel32.dll", "MapViewOfFile") && TestHook("gdi32.dll", "CreateDIBSection") && TestHook("kernel32.dll", "CreateFileW") && #endif TestHook("imm32.dll", "ImmGetContext") && TestHook("imm32.dll", "ImmGetCompositionStringW") && TestHook("imm32.dll", "ImmSetCandidateWindow") && +#ifdef _M_X64 + TestHook("user32.dll", "GetKeyState") && +#endif TestDetour("ntdll.dll", "LdrLoadDll")) { printf("TEST-PASS | WindowsDllInterceptor | all checks passed\n"); return 0; } return 1; }
--- a/tools/profiler/core/platform.cpp +++ b/tools/profiler/core/platform.cpp @@ -1074,16 +1074,27 @@ void mozilla_sampler_sleep_end() { PseudoStack *stack = tlsPseudoStack.get(); if (stack == nullptr) { return; } stack->setSleeping(0); } +bool mozilla_sampler_is_sleeping() { + if (sInitCount == 0) { + return false; + } + PseudoStack *stack = tlsPseudoStack.get(); + if (stack == nullptr) { + return false; + } + return stack->isSleeping(); +} + double mozilla_sampler_time(const mozilla::TimeStamp& aTime) { mozilla::TimeDuration delta = aTime - sStartTime; return delta.ToMilliseconds(); } double mozilla_sampler_time() {
--- a/tools/profiler/public/GeckoProfiler.h +++ b/tools/profiler/public/GeckoProfiler.h @@ -44,16 +44,18 @@ * application is responding to the event loop. Lower is better. * 't' - Elapse time since recording started. * */ #ifndef SAMPLER_H #define SAMPLER_H +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" #ifndef SPS_STANDALONE #include "js/TypeDecls.h" #endif #include "mozilla/UniquePtr.h" #include "mozilla/Vector.h" namespace mozilla { class TimeStamp; @@ -227,16 +229,17 @@ static inline void profiler_unlock() {} static inline void profiler_register_thread(const char* name, void* guessStackTop) {} static inline void profiler_unregister_thread() {} // These functions tell the profiler that a thread went to sleep so that we can avoid // sampling it while it's sleeping. Calling profiler_sleep_start() twice without // profiler_sleep_end() is an error. static inline void profiler_sleep_start() {} static inline void profiler_sleep_end() {} +static inline bool profiler_is_sleeping() { return false; } // Call by the JSRuntime's operation callback. This is used to enable // profiling on auxilerary threads. static inline void profiler_js_operation_callback() {} static inline double profiler_time() { return 0; } static inline double profiler_time(const mozilla::TimeStamp& aTime) { return 0; } @@ -246,29 +249,52 @@ static inline void profiler_log(const ch static inline void profiler_log(const char *fmt, va_list args) {} #else #include "GeckoProfilerImpl.h" #endif -class GeckoProfilerInitRAII { +class MOZ_RAII GeckoProfilerInitRAII { public: explicit GeckoProfilerInitRAII(void* stackTop) { profiler_init(stackTop); } ~GeckoProfilerInitRAII() { profiler_shutdown(); } }; -class GeckoProfilerSleepRAII { +class MOZ_RAII GeckoProfilerSleepRAII { public: GeckoProfilerSleepRAII() { profiler_sleep_start(); } ~GeckoProfilerSleepRAII() { profiler_sleep_end(); } }; +/** + * Temporarily wake up the profiler while servicing events such as + * Asynchronous Procedure Calls (APCs). + */ +class MOZ_RAII GeckoProfilerWakeRAII { +public: + GeckoProfilerWakeRAII() + : mIssuedWake(profiler_is_sleeping()) + { + if (mIssuedWake) { + profiler_sleep_end(); + } + } + ~GeckoProfilerWakeRAII() { + if (mIssuedWake) { + MOZ_ASSERT(!profiler_is_sleeping()); + profiler_sleep_start(); + } + } +private: + bool mIssuedWake; +}; + #endif // ifndef SAMPLER_H
--- a/tools/profiler/public/GeckoProfilerFunc.h +++ b/tools/profiler/public/GeckoProfilerFunc.h @@ -102,16 +102,17 @@ void mozilla_sampler_lock(); void mozilla_sampler_unlock(); // Register/unregister threads with the profiler bool mozilla_sampler_register_thread(const char* name, void* stackTop); void mozilla_sampler_unregister_thread(); void mozilla_sampler_sleep_start(); void mozilla_sampler_sleep_end(); +bool mozilla_sampler_is_sleeping(); double mozilla_sampler_time(); double mozilla_sampler_time(const mozilla::TimeStamp& aTime); void mozilla_sampler_tracing(const char* aCategory, const char* aInfo, TracingMetadata aMetaData); void mozilla_sampler_tracing(const char* aCategory, const char* aInfo,
--- a/tools/profiler/public/GeckoProfilerImpl.h +++ b/tools/profiler/public/GeckoProfilerImpl.h @@ -221,16 +221,22 @@ void profiler_sleep_start() } static inline void profiler_sleep_end() { mozilla_sampler_sleep_end(); } +static inline +bool profiler_is_sleeping() +{ + return mozilla_sampler_is_sleeping(); +} + #ifndef SPS_STANDALONE static inline void profiler_js_operation_callback() { PseudoStack *stack = tlsPseudoStack.get(); if (!stack) { return; }
--- a/tools/profiler/public/PseudoStack.h +++ b/tools/profiler/public/PseudoStack.h @@ -442,16 +442,20 @@ public: // Call this whenever the current thread sleeps or wakes up // Calling setSleeping with the same value twice in a row is an error void setSleeping(int sleeping) { MOZ_ASSERT(mSleeping != sleeping); mSleepId++; mSleeping = sleeping; } + bool isSleeping() { + return !!mSleeping; + } + void ref() { ++mRefCnt; } void deref() { int newValue = --mRefCnt; if (newValue == 0) { delete this;
--- a/widget/windows/nsWindow.cpp +++ b/widget/windows/nsWindow.cpp @@ -4893,20 +4893,27 @@ nsWindow::ProcessMessage(UINT msg, WPARA // be used to specify portions of the source and destination window // rectangles that are valid and should be preserved. We opt not to // implement an elaborate client-area preservation technique, and // simply return 0, which means "preserve the entire old client area // and align it with the upper-left corner of our new client area". RECT *clientRect = wParam ? &(reinterpret_cast<NCCALCSIZE_PARAMS*>(lParam))->rgrc[0] : (reinterpret_cast<RECT*>(lParam)); - clientRect->top += (mCaptionHeight - mNonClientOffset.top); - clientRect->left += (mHorResizeMargin - mNonClientOffset.left); - clientRect->right -= (mHorResizeMargin - mNonClientOffset.right); - clientRect->bottom -= (mVertResizeMargin - mNonClientOffset.bottom); + double scale = WinUtils::IsPerMonitorDPIAware() + ? WinUtils::LogToPhysFactor(mWnd) / WinUtils::SystemScaleFactor() + : 1.0; + clientRect->top += + NSToIntRound((mCaptionHeight - mNonClientOffset.top) * scale); + clientRect->left += + NSToIntRound((mHorResizeMargin - mNonClientOffset.left) * scale); + clientRect->right -= + NSToIntRound((mHorResizeMargin - mNonClientOffset.right) * scale); + clientRect->bottom -= + NSToIntRound((mVertResizeMargin - mNonClientOffset.bottom) * scale); result = true; *aRetValue = 0; } break; } case WM_NCHITTEST:
--- a/xpcom/build/nsWindowsDllInterceptor.h +++ b/xpcom/build/nsWindowsDllInterceptor.h @@ -1,16 +1,17 @@ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef NS_WINDOWS_DLL_INTERCEPTOR_H_ #define NS_WINDOWS_DLL_INTERCEPTOR_H_ + #include <windows.h> #include <winternl.h> /* * Simple function interception. * * We have two separate mechanisms for intercepting a function: We can use the * built-in nop space, if it exists, or we can create a detour. @@ -61,16 +62,67 @@ * */ #include <stdint.h> namespace mozilla { namespace internal { +// Get op length of '/r' +static size_t +GetOpLengthByModRM(const uint8_t* aOp) +{ + uint8_t mod = *aOp >> 6; + uint8_t rm = *aOp & 0x7; + + switch (mod) { + case 0: + if (rm == 4) { + // SIB + if ((*(aOp + 1) & 0x7) == 5) { + // disp32 + return 6; + } + return 2; + } else if (rm == 5) { + // [RIP/EIP + disp32] + // Since we don't modify relative offset, we should mark as impossible + // code. + return 0; + } + // [r/m] + return 1; + + case 1: + if (rm == 4) { + // [SIB + imm8] + return 3; + } + // [r/m + imm8] + return 2; + + case 2: + if (rm == 4) { + // [SIB + imm32] + return 6; + } + // [r/m + imm32] + return 5; + + case 3: + // r/w + return 1; + + default: + break; + } + return 0; +} + class AutoVirtualProtect { public: AutoVirtualProtect(void* aFunc, size_t aSize, DWORD aProtect) : mFunc(aFunc), mSize(aSize), mNewProtect(aProtect), mOldProtect(0), mSuccess(false) {} @@ -639,16 +691,34 @@ protected: } } else { // not support yet! return; } } else if ((origBytes[nBytes] & 0xf0) == 0x50) { // 1-byte push/pop nBytes++; + } else if (origBytes[nBytes] == 0x65) { + // GS prefix + // + // The entry of GetKeyState on Windows 10 has the following code. + // 65 48 8b 04 25 30 00 00 00 mov rax,qword ptr gs:[30h] + // (GS prefix + REX + MOV (0x8b) ...) + if (origBytes[nBytes + 1] == 0x48 && + (origBytes[nBytes + 2] >= 0x88 && origBytes[nBytes + 2] <= 0x8b)) { + nBytes += 3; + size_t len = GetOpLengthByModRM(origBytes + nBytes); + if (!len) { + // no way to support this yet. + return; + } + nBytes += len; + } else { + return; + } } else if (origBytes[nBytes] == 0x90) { // nop nBytes++; } else if (origBytes[nBytes] == 0xb8) { // MOV 0xB8: http://ref.x86asm.net/coder32.html#xB8 nBytes += 5; } else if (origBytes[nBytes] == 0xc3) { // ret